text
stringlengths
78
104k
score
float64
0
0.18
def template_exists(template_name): ''' Determine if a given template exists so that it can be loaded if so, or a default alternative can be used if not. ''' try: template.loader.get_template(template_name) return True except template.TemplateDoesNotExist: return False
0.003067
def build_vep_annotation(csq_info, reference, alternatives, vep_columns): """ Build a dictionary with the vep information from the vep annotation. Indels are handled different by vep depending on the number of alternative alleles there is for a variant. If only one alternative: Insertion: vep represents the alternative by removing the first base from the vcf alternative. Deletion: vep represents the alternative with '-' If there are several alternatives: Insertion: vep represents the alternative by removing the first base from the vcf alternative(Like above). Deletion: If there are multiple alternative deletions vep represents them by removing the first base from the vcf alternative. If the vcf line looks like: 1 970549 . TGGG TG,TGG vep annotation for alternatives will be: G,GG Args: csq_info (list): A list with the raw vep annotations from the vcf line. reference (str): A string that represents the vcf reference alternatives (list): A list of strings that represents the vcf formated alternatives vep_columns (list): A list of strings that represents the vep comluns defined in the vcf header. Returns: vep_dict (dict): A dictionary with the alternative alleles (in vcf form) as keys and a list of annotations for each alternative alleles. One key named 'gene_ids', value is a set with the genes found. """ logger = getLogger(__name__) # The keys in the vep dict are the vcf formatted alternatives, values are the # dictionaries with vep annotations vep_dict = {} # If we have several alternatives we need to check what types of # alternatives we have vep_to_vcf = {} number_of_deletions = 0 for alternative in alternatives: if len(alternative) < len(reference): number_of_deletions += 1 logger.debug("Number of deletions found: {0}".format(number_of_deletions)) for alternative in alternatives: # We store the annotations with keys from the vcf alternatives vep_dict[alternative] = [] # If substitutuion reference and alternative have the same length if len(alternative) == len(reference): vep_to_vcf[alternative] = alternative # If deletion alternative is shorter that the reference else: # If there is a deletion then the alternative will be '-' in vep entry if len(alternative) == 1: vep_to_vcf['-'] = alternative else: vep_to_vcf[alternative[1:]] = alternative for vep_annotation in csq_info: logger.debug("Parsing vep annotation: {0}".format(vep_annotation)) splitted_vep = vep_annotation.split('|') if len(splitted_vep) != len(vep_columns): raise SyntaxError("Csq info for variant does not match csq info in "\ "header. {0}, {1}".format( '|'.join(splitted_vep), '|'.join(vep_columns))) # Build the vep dict: vep_info = dict(zip(vep_columns, splitted_vep)) # If no allele is found we can not determine what allele if vep_info.get('Allele', None): vep_allele = vep_info['Allele'] try: vcf_allele = vep_to_vcf[vep_allele] except KeyError as e: vcf_allele = vep_allele if vcf_allele in vep_dict: vep_dict[vcf_allele].append(vep_info) else: vep_dict[vcf_allele] = [vep_info] else: logger.warning("No allele found in vep annotation! Skipping annotation") return vep_dict
0.009238
def show_shortcuts(self): """Print all shortcuts.""" gui_name = self.gui.name actions_name = self.name name = ('{} - {}'.format(gui_name, actions_name) if actions_name else gui_name) _show_shortcuts(self.shortcuts, name)
0.007246
def is_parseable (self): """Check if content is parseable for recursion. @return: True if content is parseable @rtype: bool """ if self.is_directory(): return True if firefox.has_sqlite and firefox.extension.search(self.url): return True if self.content_type in self.ContentMimetypes: return True log.debug(LOG_CHECK, "File with content type %r is not parseable.", self.content_type) return False
0.007921
def run_and_measure(self, quil_program: Program, qubits: List[int] = None, trials: int = 1, memory_map: Any = None) -> np.ndarray: """ Run a Quil program once to determine the final wavefunction, and measure multiple times. Alternatively, consider using ``wavefunction`` and calling ``sample_bitstrings`` on the resulting object. For a large wavefunction and a low-medium number of trials, use this function. On the other hand, if you're sampling a small system many times you might want to use ``Wavefunction.sample_bitstrings``. .. note:: If your program contains measurements or noisy gates, this method may not do what you want. If the execution of ``quil_program`` is **non-deterministic** then the final wavefunction from which the returned bitstrings are sampled itself only represents a stochastically generated sample and the outcomes sampled from *different* ``run_and_measure`` calls *generally sample different bitstring distributions*. :param quil_program: The program to run and measure :param qubits: An optional list of qubits to measure. The order of this list is respected in the returned bitstrings. If not provided, all qubits used in the program will be measured and returned in their sorted order. :param int trials: Number of times to sample from the prepared wavefunction. :param memory_map: An assignment of classical registers to values, representing an initial state for the QAM's classical memory. This is expected to be of type Dict[str, List[Union[int, float]]], where the keys are memory region names and the values are arrays of initialization data. For now, we also support input of type Dict[MemoryReference, Any], but this is deprecated and will be removed in a future release. :return: An array of measurement results (0 or 1) of shape (trials, len(qubits)) """ if qubits is None: qubits = sorted(quil_program.get_qubits(indices=True)) if memory_map is not None: quil_program = self.augment_program_with_memory_values(quil_program, memory_map) return self.connection._run_and_measure(quil_program=quil_program, qubits=qubits, trials=trials, random_seed=self.random_seed)
0.009091
def multitaper_cross_spectrum(self, clm, slm, k, convention='power', unit='per_l', **kwargs): """ Return the multitaper cross-spectrum estimate and standard error. Usage ----- mtse, sd = x.multitaper_cross_spectrum(clm, slm, k, [convention, unit, lmax, taper_wt, clat, clon, coord_degrees]) Returns ------- mtse : ndarray, shape (lmax-lwin+1) The localized multitaper cross-spectrum estimate, where lmax is the smaller of the two spherical-harmonic bandwidths of clm and slm, and lwin is the spherical-harmonic bandwidth of the localization windows. sd : ndarray, shape (lmax-lwin+1) The standard error of the localized multitaper cross-spectrum estimate. Parameters ---------- clm : SHCoeffs class instance SHCoeffs class instance containing the spherical harmonic coefficients of the first global field to analyze. slm : SHCoeffs class instance SHCoeffs class instance containing the spherical harmonic coefficients of the second global field to analyze. k : int The number of tapers to be utilized in performing the multitaper spectral analysis. convention : str, optional, default = 'power' The type of output spectra: 'power' for power spectra, and 'energy' for energy spectra. unit : str, optional, default = 'per_l' The units of the output spectra. If 'per_l', the spectra contain the total contribution for each spherical harmonic degree l. If 'per_lm', the spectra contain the average contribution for each coefficient at spherical harmonic degree l. lmax : int, optional, default = min(clm.lmax, slm.lmax) The maximum spherical-harmonic degree of the input coefficients to use. taper_wt : ndarray, optional, default = None The weights used in calculating the multitaper cross-spectral estimates and standard error. clat, clon : float, optional, default = 90., 0. Latitude and longitude of the center of the spherical-cap localization windows. coord_degrees : bool, optional, default = True True if clat and clon are in degrees. """ return self._multitaper_cross_spectrum(clm, slm, k, convention=convention, unit=unit, **kwargs)
0.001061
def create_match_bool(words_as_string, eval_function, eval_function_args): """ Evaluates components of words_as_string step by step into a single boolean value using eval_function to evaluate each separate component of words_as_string into booleans. :param words_as_string: String to evaluate :param eval_function: Function for evaluating each component of the string :param eval_function_args: Arguments for eval_function as list or tuple :return: Boolean :raises: SyntaxError if eval raises one. """ new_set = [] count = 0 words_as_string = words_as_string.replace(",", " or ") words = words_as_string.split(" ") # First we combine items that contain spaces back into single entries for i, word in enumerate(words): if count != 0: count -= 1 continue if "'" in word: # Combine stuff between quotes into single entry combined_data, count = _create_combined_words(words, i) if combined_data is None: raise SyntaxError("Missing closing quote for: {}".format(word)) # Count needed to skip over the N next items while we continue new_set.append(combined_data) else: new_set.append(word) newest_set = [] # Then we combine elements inside parentheses into single entries. for i, word in enumerate(new_set): if count != 0: count -= 1 continue if "(" in word: # Combine stuff between parentheses into single entry that can be reduced later. combined_data, count = _create_combined_set(new_set, i) if combined_data is None: raise SyntaxError("Missing closing parenthesis for: {}".format(word)) # Count needed to skip over the N next items while we continue newest_set.append(combined_data) else: newest_set.append(word) # Finally we enter deeper inside the parenthesis structure and start working upwards for i, word in enumerate(newest_set): if "(" in word: # Still parentheses remaining, we must go deeper. result = create_match_bool(word[1:len(word) - 1], eval_function, eval_function_args) newest_set[i] = str(result) for i, word in enumerate(newest_set): if word not in ["True", "False", "or", "and", "not"]: newest_set[i] = str(eval_function(word.replace("'", ""), eval_function_args)) result = eval(" ".join(newest_set)) # pylint: disable=eval-used return result
0.00266
async def join(self): """Block until all items in the queue have been gotten and processed. The count of unfinished tasks goes up whenever an item is added to the queue. The count goes down whenever a consumer calls task_done() to indicate that the item was retrieved and all work on it is complete. When the count of unfinished tasks drops to zero, join() unblocks. """ while True: with self._parent._sync_mutex: if self._parent._unfinished_tasks == 0: break await self._parent._finished.wait()
0.003268
def _make_intermediate_dirs(sftp_client, remote_directory): """ Create all the intermediate directories in a remote host :param sftp_client: A Paramiko SFTP client. :param remote_directory: Absolute Path of the directory containing the file :return: """ if remote_directory == '/': sftp_client.chdir('/') return if remote_directory == '': return try: sftp_client.chdir(remote_directory) except IOError: dirname, basename = os.path.split(remote_directory.rstrip('/')) _make_intermediate_dirs(sftp_client, dirname) sftp_client.mkdir(basename) sftp_client.chdir(basename) return
0.001453
def weather_update(station, pub_sites, interval): ''' main execution loop. query weather data and post to online service. ''' station.parse() # read weather data # santity check weather data if station.fields['TempOut'] > 200: raise NoSensorException( 'Out of range temperature value: %.1f, check sensors' % (station.fields['TempOut'],)) gust, gust_dir = WindGust.get( station, interval ) # upload data in the following order: for ps in pub_sites: try: # try block necessary to attempt every publisher ps.set( pressure = station.fields['Pressure'], dewpoint = station.fields['DewPoint'], humidity = station.fields['HumOut'], tempf = station.fields['TempOut'], rainin = station.fields['RainRate'], rainday = station.fields['RainDay'], dateutc = station.fields['DateStampUtc'], windspeed = station.fields['WindSpeed10Min'], winddir = station.fields['WindDir'], windgust = gust, windgustdir = gust_dir, ) ps.publish() except (Exception) as e: log.warn('publisher %s: %s'%(ps.__class__.__name__,e))
0.039201
def get_reservation_ports(session, reservation_id, model_name='Generic Traffic Generator Port'): """ Get all Generic Traffic Generator Port in reservation. :return: list of all Generic Traffic Generator Port resource objects in reservation """ reservation_ports = [] reservation = session.GetReservationDetails(reservation_id).ReservationDescription for resource in reservation.Resources: if resource.ResourceModelName == model_name: reservation_ports.append(resource) return reservation_ports
0.007366
def _set_circuit_type(self, v, load=False): """ Setter method for circuit_type, mapped from YANG variable /isis_state/interface_brief/isis_intf_brief/circuit_type (isis-circ-type) If this variable is read-only (config: false) in the source YANG file, then _set_circuit_type is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_circuit_type() directly. YANG Description: Type of ISIS Circuit """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-circ-lan': {'value': 2}, u'is-circ-ptpt': {'value': 1}, u'is-circ-unknown': {'value': 0}},), is_leaf=True, yang_name="circuit-type", rest_name="circuit-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-circ-type', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """circuit_type must be of a type compatible with isis-circ-type""", 'defined-type': "brocade-isis-operational:isis-circ-type", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'is-circ-lan': {'value': 2}, u'is-circ-ptpt': {'value': 1}, u'is-circ-unknown': {'value': 0}},), is_leaf=True, yang_name="circuit-type", rest_name="circuit-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='isis-circ-type', is_config=False)""", }) self.__circuit_type = t if hasattr(self, '_set'): self._set()
0.004826
def setup(app): ''' Required Sphinx extension setup function. ''' app.add_autodocumenter(ColorDocumenter) app.add_autodocumenter(EnumDocumenter) app.add_autodocumenter(PropDocumenter) app.add_autodocumenter(ModelDocumenter)
0.004115
def parse_source(self): """Parse source text to find executable lines, excluded lines, etc. Return values are 1) a set of executable line numbers, and 2) a set of excluded line numbers. Reported line numbers are normalized to the first line of multi-line statements. """ try: self._raw_parse() except (tokenize.TokenError, IndentationError): _, tokerr, _ = sys.exc_info() msg, lineno = tokerr.args raise NotPython( "Couldn't parse '%s' as Python source: '%s' at %s" % (self.filename, msg, lineno) ) excluded_lines = self.first_lines(self.excluded) lines = self.first_lines( self.statement_starts, excluded_lines, self.docstrings ) return lines, excluded_lines
0.003348
def _add_rule(self, state, rule): """Parse rule and add it to machine (for internal use).""" if rule.strip() == "-": parsed_rule = None else: parsed_rule = rule.split(',') if (len(parsed_rule) != 3 or parsed_rule[1] not in ['L', 'N', 'R'] or len(parsed_rule[2]) > 1): raise SyntaxError('Wrong format of rule: ' + rule) if parsed_rule[0] == "": parsed_rule[0] = self.alphabet[len(self.states[state])] if parsed_rule[2] == "": parsed_rule[2] = state self.states[state].append(parsed_rule)
0.004518
def string_to_char(l): '''Convert 1-D list of strings to 2-D list of chars.''' if not l: return [] if l == ['']: l = [' '] maxlen = reduce(max, map(len, l)) ll = [x.ljust(maxlen) for x in l] result = [] for s in ll: result.append([x for x in s]) return result
0.012579
def create_timeline(self, timeline, scope_identifier, hub_name, plan_id): """CreateTimeline. :param :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` timeline: :param str scope_identifier: The project GUID to scope the request :param str hub_name: The name of the server hub: "build" for the Build server or "rm" for the Release Management server :param str plan_id: :rtype: :class:`<Timeline> <azure.devops.v5_0.task.models.Timeline>` """ route_values = {} if scope_identifier is not None: route_values['scopeIdentifier'] = self._serialize.url('scope_identifier', scope_identifier, 'str') if hub_name is not None: route_values['hubName'] = self._serialize.url('hub_name', hub_name, 'str') if plan_id is not None: route_values['planId'] = self._serialize.url('plan_id', plan_id, 'str') content = self._serialize.body(timeline, 'Timeline') response = self._send(http_method='POST', location_id='83597576-cc2c-453c-bea6-2882ae6a1653', version='5.0', route_values=route_values, content=content) return self._deserialize('Timeline', response)
0.006074
def parallel(fsms, test): ''' Crawl several FSMs in parallel, mapping the states of a larger meta-FSM. To determine whether a state in the larger FSM is final, pass all of the finality statuses (e.g. [True, False, False] to `test`. ''' alphabet = set().union(*[fsm.alphabet for fsm in fsms]) initial = dict([(i, fsm.initial) for (i, fsm) in enumerate(fsms)]) # dedicated function accepts a "superset" and returns the next "superset" # obtained by following this transition in the new FSM def follow(current, symbol): next = {} for i in range(len(fsms)): if symbol not in fsms[i].alphabet and anything_else in fsms[i].alphabet: actual_symbol = anything_else else: actual_symbol = symbol if i in current \ and current[i] in fsms[i].map \ and actual_symbol in fsms[i].map[current[i]]: next[i] = fsms[i].map[current[i]][actual_symbol] if len(next.keys()) == 0: raise OblivionError return next # Determine the "is final?" condition of each substate, then pass it to the # test to determine finality of the overall FSM. def final(state): accepts = [i in state and state[i] in fsm.finals for (i, fsm) in enumerate(fsms)] return test(accepts) return crawl(alphabet, initial, final, follow).reduce()
0.026337
def generate_querystring(params): """ Generate a querystring suitable for use in the v2 api. The Requests library doesn't know how to generate querystrings that encode dictionaries using square brackets: https://api.mollie.com/v2/methods?amount[value]=100.00&amount[currency]=USD Note: we use `sorted()` to work around a difference in iteration behaviour between Python 2 and 3. This makes the output predictable, and ordering of querystring parameters shouldn't matter. """ if not params: return None parts = [] for param, value in sorted(params.items()): if not isinstance(value, dict): parts.append(urlencode({param: value})) else: # encode dictionary with square brackets for key, sub_value in sorted(value.items()): composed = '{param}[{key}]'.format(param=param, key=key) parts.append(urlencode({composed: sub_value})) if parts: return '&'.join(parts)
0.003988
def _checkremove_que(self, word): """If word ends in -que and if word is not in pass list, strip -que""" in_que_pass_list = False que_pass_list = ['atque', 'quoque', 'neque', 'itaque', 'absque', 'apsque', 'abusque', 'adaeque', 'adusque', 'denique', 'deque', 'susque', 'oblique', 'peraeque', 'plenisque', 'quandoque', 'quisque', 'quaeque', 'cuiusque', 'cuique', 'quemque', 'quamque', 'quaque', 'quique', 'quorumque', 'quarumque', 'quibusque', 'quosque', 'quasque', 'quotusquisque', 'quousque', 'ubique', 'undique', 'usque', 'uterque', 'utique', 'utroque', 'utribique', 'torque', 'coque', 'concoque', 'contorque', 'detorque', 'decoque', 'excoque', 'extorque', 'obtorque', 'optorque', 'retorque', 'recoque', 'attorque', 'incoque', 'intorque', 'praetorque'] if word not in que_pass_list: word = re.sub(r'que$', '', word) else: in_que_pass_list = True return word, in_que_pass_list
0.02473
def get_generation_code(self): """Return python code that generates all drawn gates.""" if len(self.gates) < 1: code = '' else: import_list = set([gate._gencode_gate_class for gate in self.gates]) import_list = 'from FlowCytometryTools import ' + ', '.join(import_list) code_list = [gate.get_generation_code() for gate in self.gates] code_list.sort() code_list = '\n'.join(code_list) code = import_list + 2 * '\n' + code_list self.callback(Event('generated_code', {'code': code})) return code
0.006202
def get_cfn_parameters(self): """Return a dictionary of variables with `type` :class:`CFNType`. Returns: dict: variables that need to be submitted as CloudFormation Parameters. """ variables = self.get_variables() output = {} for key, value in variables.items(): if hasattr(value, "to_parameter_value"): output[key] = value.to_parameter_value() return output
0.004237
def __generate_method(name): """ Wraps the DataFrame's original method by name to return the derived class instance. """ try: func = getattr(DataFrame, name) except AttributeError as e: # PySpark version is too old def func(self, *args, **kwargs): raise e return func wraps = getattr(functools, "wraps", lambda _: lambda f: f) # py3.4+ @wraps(func) def _wrapper(self, *args, **kwargs): dataframe = func(self, *args, **kwargs) if self.__class__ != SourcedDataFrame \ and isinstance(self, SourcedDataFrame) \ and isinstance(dataframe, DataFrame): return self.__class__(dataframe._jdf, self._session, self._implicits) return dataframe return _wrapper
0.004556
def sentence_similarity(sentence1, sentence2): """ compute the sentence similarity using Wordnet """ # Tokenize and tag sentence1 = pos_tag(word_tokenize(sentence1)) sentence2 = pos_tag(word_tokenize(sentence2)) # Get the synsets for the tagged words synsets1 = [tagged_to_synset(*tagged_word) for tagged_word in sentence1] synsets2 = [tagged_to_synset(*tagged_word) for tagged_word in sentence2] # Filter out the Nones synsets1 = [ss for ss in synsets1 if ss] synsets2 = [ss for ss in synsets2 if ss] #print(synsets1) #print(synsets2) score, count = 0.0, 0.0 # For each word in the first sentence for synset in synsets1: # Get the similarity value of the most similar word in the other sentence best_score=[synset.path_similarity(ss) for ss in synsets2 if synset.path_similarity(ss)] # Check that the similarity could have been computed if best_score: score += max(best_score) count += 1 # Average the values if count > 0: score /= count else: score = 0 return score
0.005348
def ensure_us_time_resolution(val): """Convert val out of numpy time, for use in to_dict. Needed because of numpy bug GH#7619""" if np.issubdtype(val.dtype, np.datetime64): val = val.astype('datetime64[us]') elif np.issubdtype(val.dtype, np.timedelta64): val = val.astype('timedelta64[us]') return val
0.002967
def load(self, filename, append=False): """Load collection from pickled file *filename*. *append* determines if the saved collection is added to the current one or if it replaces the current content. If no extension is provided, ".collection" is appended. """ tmp = cPickle.load(open(self._canonicalize(filename), 'rb')) if append: self.extend(tmp) else: self[:] = tmp[:] del tmp
0.004193
def render(self, xml, context, raise_on_errors=True): """Render xml string and apply XSLT transfomation with context""" if xml: self.xml = xml # render XSL self.render_xsl(self.root, context) # create root XSL sheet xsl_ns = self.namespaces['xsl'] rootName = etree.QName(xsl_ns, 'stylesheet') root = etree.Element(rootName, nsmap={'xsl': xsl_ns}) sheet = etree.ElementTree(root) template = etree.SubElement(root, etree.QName(xsl_ns, "template"), match='/') # put OpenOffice tree into XSLT sheet template.append(self.root) self.root = root # drop XSL styles self.remove_style() #self.debug(self.xml) try: # transform XSL xsl = etree.XSLT(self.root) self.root = xsl(context) except etree.Error as e: # log errors for l in e.error_log: self.error("XSLT error at line %s col %s:" % (l.line, l.column)) self.error(" message: %s" % l.message) self.error(" domain: %s (%d)" % (l.domain_name, l.domain)) self.error(' type: %s (%d)' % (l.type_name, l.type)) self.error(' level: %s (%d)' % (l.level_name, l.level)) self.error(' filename: %s' % l.filename) if raise_on_errors: raise return self.xml else: return xml
0.004326
def from_msm(cls, msm, n_macrostates, filter=1.1, save_all_maps=True, n_proc=1, chunk_size=100): """Create and fit lumped model from pre-existing MSM. Parameters ---------- msm : MarkovStateModel The input microstate msm to use. n_macrostates : int The number of macrostates Returns ------- lumper : cls The fit MVCA object. """ params = msm.get_params() lumper = cls(n_macrostates, filter, save_all_maps, n_proc, chunk_size, **params) lumper.transmat_ = msm.transmat_ lumper.populations_ = msm.populations_ lumper.mapping_ = msm.mapping_ lumper.countsmat_ = msm.countsmat_ lumper.n_states_ = msm.n_states_ if n_macrostates is not None: lumper._do_lumping() return lumper
0.003311
def create_superfile(cookie, path, block_list): '''合并slice_upload()中产生的临时文件 path - 文件在服务器上的绝对路径 block_list - 这些文件分片的MD5列表 返回完整的文件pcs信息. ''' url = ''.join([ const.PCS_URL_C, 'file?method=createsuperfile&app_id=250528', '&path=', encoder.encode_uri_component(path), '&', cookie.sub_output('BDUSS'), ]) param = {'block_list': block_list} data = 'param=' + json.dumps(param) req = net.urlopen(url, headers={'Cookie': cookie.header_output()}, data=data.encode()) if req: return json.loads(req.data.decode()) else: return None
0.00155
def metrique_object(_oid, _id=None, _hash=None, _start=None, _end=None, _e=None, _v=None, id=None, __v__=None, **kwargs): ''' Function which takes a dictionary (Mapping) object as input and returns return back a metrique object. Special meta property are added to each object:: _oid: ... _start: ... ... FIXME ''' # NOTE: we completely ignore incoming 'id' keys! # id is RESERVED and ALWAYS expected to be 'autoincrement' # upon insertion into DB (though, its optional, depending # on backend storage behaivor). if id: warnings.warn('non-null "id" keys detected, ignoring them!') _e = dict(_e or {}) # expecting a dict with copy() atr _v = int(_v or 0) if not isinstance(_start, float): _start = dt2ts(_start) if _start else utcnow(as_datetime=False) assert _start is not None, "_start (%s) must be set!" % _start if not isinstance(_end, float): _end = dt2ts(_end) if _end else None _err_msg = "_end(%s) must be >= _start(%s) or None!" % (_end, _start) assert _end is None or bool(_end >= _start), _err_msg # these meta fields are used to generate unique object _hash kwargs['_oid'] = _oid kwargs['_v'] = _v kwargs['_id'] = gen_id(_oid, _start, _end) # ignore passed in _id # generate unique, consistent object _hash based on 'frozen' obj contents # FIXME: make _hash == None valid #kwargs['_hash'] = jsonhash(kwargs) if _hash else None kwargs['_hash'] = jsonhash(kwargs) # add some additional non-hashable meta data kwargs['_start'] = _start kwargs['_end'] = _end kwargs['__v__'] = __v__ or __version__ kwargs['_e'] = _e return kwargs
0.001147
def user(self, message): """ Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead. """ if Settings.UserLogs: self._write_log(Settings.UserLogPrefix, Settings.UserLogTime, message)
0.008565
def load_labware_by_name( self, labware_name: str, location: types.DeckLocation, label: str = None) -> Labware: """ A convenience function to specify a piece of labware by name. For labware already defined by Opentrons, this is a convient way to collapse the two stages of labware initialization (creating the labware and adding it to the protocol) into one. This function returns the created and initialized labware for use later in the protocol. :param str labware_name: The name of the labware to load :param location: The slot into which to load the labware such as 1 or '1' :type location: int or str :param str label: An optional special name to give the labware. If specified, this is the name the labware will appear as in the run log and the calibration view in the Opentrons app. """ labware = load(labware_name, self._deck_layout.position_for(location), label) return self.load_labware(labware, location)
0.001674
def acquire(self, blocking=True, delay=DELAY_INCREMENT, max_delay=MAX_DELAY, timeout=None): """Attempt to acquire the given lock. :param blocking: whether to wait forever to try to acquire the lock :type blocking: bool :param delay: when blocking this is the delay time in seconds that will be added after each failed acquisition :type delay: int/float :param max_delay: the maximum delay to have (this limits the accumulated delay(s) added after each failed acquisition) :type max_delay: int/float :param timeout: an optional timeout (limits how long blocking will occur for) :type timeout: int/float :returns: whether or not the acquisition succeeded :rtype: bool """ if delay < 0: raise ValueError("Delay must be greater than or equal to zero") if timeout is not None and timeout < 0: raise ValueError("Timeout must be greater than or equal to zero") if delay >= max_delay: max_delay = delay self._do_open() watch = _utils.StopWatch(duration=timeout) r = _utils.Retry(delay, max_delay, sleep_func=self.sleep_func, watch=watch) with watch: gotten = r(self._try_acquire, blocking, watch) if not gotten: self.acquired = False return False else: self.acquired = True self.logger.log(_utils.BLATHER, "Acquired file lock `%s` after waiting %0.3fs [%s" " attempts were required]", self.path, watch.elapsed(), r.attempts) return True
0.002169
def sam_conversions(self, sam_file, depth=True): """ Convert sam files to bam files, then sort and index them for later use. :param bool depth: also calculate coverage over each position """ cmd = self.tools.samtools + " view -bS " + sam_file + " > " + sam_file.replace(".sam", ".bam") + "\n" cmd += self.tools.samtools + " sort " + sam_file.replace(".sam", ".bam") + " -o " + sam_file.replace(".sam", "_sorted.bam") + "\n" cmd += self.tools.samtools + " index " + sam_file.replace(".sam", "_sorted.bam") + "\n" if depth: cmd += self.tools.samtools + " depth " + sam_file.replace(".sam", "_sorted.bam") + " > " + sam_file.replace(".sam", "_sorted.depth") + "\n" return cmd
0.007926
def restore_env_from_file(env_file): '''Restore the current environment from an environment stored in a yaml yaml file. :param env_file: Path to environment yaml file. ''' with open(env_file, 'r') as f: env_dict = yaml.load(f.read()) restore_env(env_dict)
0.003448
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'text') and self.text is not None: _dict['text'] = self.text if hasattr(self, 'keywords') and self.keywords is not None: _dict['keywords'] = [x._to_dict() for x in self.keywords] return _dict
0.005571
def _fail(self, request_id, failure, duration): """Publish a CommandFailedEvent.""" self.listeners.publish_command_failure( duration, failure, self.name, request_id, self.sock_info.address, self.op_id)
0.008299
def get(self, sid): """ Constructs a RecordingContext :param sid: The unique string that identifies the resource :returns: twilio.rest.api.v2010.account.recording.RecordingContext :rtype: twilio.rest.api.v2010.account.recording.RecordingContext """ return RecordingContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
0.007519
def RdatasetsBM(database,host=rbiomart_host): """ Lists BioMart datasets through a RPY2 connection. :param database: a database listed in RdatabasesBM() :param host: address of the host server, default='www.ensembl.org' :returns: nothing """ biomaRt = importr("biomaRt") ensemblMart=biomaRt.useMart(database, host=host) print(biomaRt.listDatasets(ensemblMart))
0.007519
def appendtsv(table, source=None, encoding=None, errors='strict', write_header=False, **csvargs): """ Convenience function, as :func:`petl.io.csv.appendcsv` but with different default dialect (tab delimited). """ csvargs.setdefault('dialect', 'excel-tab') return appendcsv(table, source=source, encoding=encoding, errors=errors, write_header=write_header, **csvargs)
0.002342
def moment(self, axis, channel=0, moment=1, *, resultant=None): """Take the nth moment the dataset along one axis, adding lower rank channels. New channels have names ``<channel name>_<axis name>_moment_<moment num>``. Moment 0 is the integral of the slice. Moment 1 is the weighted average or "Center of Mass", normalized by the integral Moment 2 is the variance, the central moment about the center of mass, normalized by the integral Moments 3+ are central moments about the center of mass, normalized by the integral and by the standard deviation to the power of the moment. Moments, especially higher order moments, are susceptible to noise and baseline. It is recommended when used with real data to use :meth:`WrightTools.data.Channel.clip` in conjunction with moments to reduce effects of noise. Parameters ---------- axis : int or str The axis to take the moment along. If given as an integer, the axis with that index is used. If given as a string, the axis with that name is used. The axis must exist, and be a 1D array-aligned axis. (i.e. have a shape with a single value which is not ``1``) The collapsed axis must be monotonic to produce correct results. The axis to collapse along is inferred from the shape of the axis. channel : int or str The channel to take the moment. If given as an integer, the channel with that index is used. If given as a string, the channel with that name is used. The channel must have values along the axis (i.e. its shape must not be ``1`` in the dimension for which the axis is not ``1``) Default is 0, the first channel. moment : int or tuple of int The moments to take. One channel will be created for each number given. Default is 1, the center of mass. resultant : tuple of int The resultant shape after the moment operation. By default, it is intuited by the axis along which the moment is being taken. This default only works if that axis is 1D, so resultant is required if a multidimensional axis is passed as the first argument. The requirement of monotonicity applies on a per pixel basis. See Also -------- collapse Reduce dimensionality by some mathematical operation clip Set values above/below a threshold to a particular value WrightTools.kit.joint_shape Useful for setting `resultant` kwarg based off of axes not collapsed. """ # get axis index -------------------------------------------------------------------------- axis_index = None if resultant is not None: for i, (s, r) in enumerate(zip(self.shape, resultant)): if s != r and r == 1 and axis_index is None: axis_index = i elif s == r: continue else: raise wt_exceptions.ValueError( f"Invalid resultant shape '{resultant}' for shape {self.shape}. " + "Consider using `wt.kit.joint_shape` to join non-collapsed axes." ) index = wt_kit.get_index(self.axis_names, axis) if axis_index is None: axes = [i for i in range(self.ndim) if self.axes[index].shape[i] > 1] if len(axes) > 1: raise wt_exceptions.MultidimensionalAxisError(axis, "moment") elif len(axes) == 0: raise wt_exceptions.ValueError( "Axis {} is a single point, cannot compute moment".format(axis) ) axis_index = axes[0] warnings.warn("moment", category=wt_exceptions.EntireDatasetInMemoryWarning) channel_index = wt_kit.get_index(self.channel_names, channel) channel = self.channel_names[channel_index] if self[channel].shape[axis_index] == 1: raise wt_exceptions.ValueError( "Channel '{}' has a single point along Axis '{}', cannot compute moment".format( channel, axis ) ) new_shape = list(self[channel].shape) new_shape[axis_index] = 1 channel = self[channel] axis_inp = axis axis = self.axes[index] x = axis[:] if np.any(np.isnan(x)): raise wt_exceptions.ValueError("Axis '{}' includes NaN".format(axis_inp)) y = np.nan_to_num(channel[:]) try: moments = tuple(moment) except TypeError: moments = (moment,) multiplier = 1 if 0 in moments: # May be possible to optimize, probably doesn't need the sum # only matters for integral, all others normalize by integral multiplier = np.sign( np.sum(np.diff(x, axis=axis_index), axis=axis_index, keepdims=True) ) for moment in moments: about = 0 norm = 1 if moment > 0: norm = np.trapz(y, x, axis=axis_index) norm = np.array(norm) norm.shape = new_shape if moment > 1: about = np.trapz(x * y, x, axis=axis_index) about = np.array(about) about.shape = new_shape about /= norm if moment > 2: sigma = np.trapz((x - about) ** 2 * y, x, axis=axis_index) sigma = np.array(sigma) sigma.shape = new_shape sigma /= norm sigma **= 0.5 norm *= sigma ** moment values = np.trapz((x - about) ** moment * y, x, axis=axis_index) values = np.array(values) values.shape = new_shape values /= norm if moment == 0: values *= multiplier self.create_channel( "{}_{}_{}_{}".format(channel.natural_name, axis_inp, "moment", moment), values=values, )
0.003655
def get_type(full_path): """Get the type (socket, file, dir, symlink, ...) for the provided path""" status = {'type': []} if os.path.ismount(full_path): status['type'] += ['mount-point'] elif os.path.islink(full_path): status['type'] += ['symlink'] if os.path.isfile(full_path): status['type'] += ['file'] elif os.path.isdir(full_path): status['type'] += ['dir'] if not status['type']: if os.stat.S_ISSOCK(status['mode']): status['type'] += ['socket'] elif os.stat.S_ISCHR(status['mode']): status['type'] += ['special'] elif os.stat.S_ISBLK(status['mode']): status['type'] += ['block-device'] elif os.stat.S_ISFIFO(status['mode']): status['type'] += ['pipe'] if not status['type']: status['type'] += ['unknown'] elif status['type'] and status['type'][-1] == 'symlink': status['type'] += ['broken'] return status['type']
0.001013
def regex_replace(regex, repl, text): r""" thin wrapper around re.sub regex_replace MULTILINE and DOTALL are on by default in all util_regex functions Args: regex (str): pattern to find repl (str): replace pattern with this text (str): text to modify Returns: str: modified text Example1: >>> # ENABLE_DOCTEST >>> from utool.util_regex import * # NOQA >>> regex = r'\(.*\):' >>> repl = '(*args)' >>> text = '''def foo(param1, ... param2, ... param3):''' >>> result = regex_replace(regex, repl, text) >>> print(result) def foo(*args) Example2: >>> # ENABLE_DOCTEST >>> from utool.util_regex import * # NOQA >>> import utool as ut >>> regex = ut.named_field_regex([('keyword', 'def'), ' ', ('funcname', '.*'), '\(.*\):']) >>> repl = ut.named_field_repl([('funcname',), ('keyword',)]) >>> text = '''def foo(param1, ... param2, ... param3):''' >>> result = regex_replace(regex, repl, text) >>> print(result) foodef """ return re.sub(regex, repl, text, **RE_KWARGS)
0.000782
def from_email_message(cls, message, local_id=None): ''' Convert an :class:`email.message.Message` or compatible message object into a CERP XML :class:`eulxml.xmlmap.cerp.Message`. If an id is specified, it will be stored in the Message <LocalId>. :param message: `email.message.Message` object :param id: optional message id to be set as `local_id` :returns: :class:`eulxml.xmlmap.cerp.Message` instance populated with message information ''' result = cls() if local_id is not None: result.local_id = id message_id = message.get('Message-Id') if message_id: result.message_id_supplied = True result.message_id = message_id result.mime_version = message.get('MIME-Version') dates = message.get_all('Date', []) result.orig_date_list.extend([parse_mail_date(d) for d in dates]) result.from_list.extend(message.get_all('From', [])) result.sender_list.extend(message.get_all('From', [])) try: result.to_list.extend(message.get_all('To', [])) except UnicodeError: print(repr(message['To'])) raise result.cc_list.extend(message.get_all('Cc', [])) result.bcc_list.extend(message.get_all('Bcc', [])) result.in_reply_to_list.extend(message.get_all('In-Reply-To', [])) result.references_list.extend(message.get_all('References', [])) result.subject_list.extend(message.get_all('Subject', [])) result.comments_list.extend(message.get_all('Comments', [])) result.keywords_list.extend(message.get_all('Keywords', [])) headers = [ Header(name=key, value=val) for key, val in message.items() ] result.headers.extend(headers) # FIXME: skip multipart messages for now if not message.is_multipart(): result.create_single_body() # FIXME: this is a small subset of the actual elements CERP allows. # we should add the rest of them, too. # message.get_content_type() always returns something. only # put it in the CERP if a Content-Type was explicitly specified. if message['Content-Type']: result.single_body.content_type_list.append(message.get_content_type()) if message.get_content_charset(): result.single_body.charset_list.append(message.get_content_charset()) if message.get_filename(): result.single_body.content_name_list.append(message.get_filename()) # FIXME: attaching the body_content only makes sense for text # content types. we'll eventually need a better solution for # non-text messages result.single_body.create_body_content() payload = message.get_payload(decode=False) # if not unicode, attempt to convert if isinstance(payload, six.binary_type): charset = message.get_charset() # decode according to the specified character set, if any if charset is not None: charset_decoder = codecs.getdecoder(str(charset)) payload, length = charset_decoder(payload) # otherwise, just try to convert else: payload = u(payload) # remove any control characters not allowed in XML control_char_map = dict.fromkeys(range(32)) for i in [9, 10, 13]: # preserve horizontal tab, line feed, carriage return del control_char_map[i] payload = u(payload).translate(control_char_map) result.single_body.body_content.content = payload else: # TODO: handle multipart logger.warn('CERP conversion does not yet handle multipart') # assume we've normalized newlines: result.eol = EOLMAP[os.linesep] return result
0.002984
def image_tasks(self): """ Returns a json-schema document that represents a container of tasks entities. """ uri = "/%s/tasks" % self.uri_base resp, resp_body = self.api.method_get(uri) return resp_body
0.007752
def html(self, text=TEXT): """ Generate an HTML file from the report data. """ self.logger.debug("Generating the HTML report{}..." .format(["", " (text only)"][text])) html = [] for piece in self._pieces: if isinstance(piece, string_types): html.append(markdown2.markdown(piece, extras=["tables"])) elif isinstance(piece, Element): html.append(piece.html()) return "\n\n".join(html)
0.003968
def delete(self, ids): """ Method to delete vrf's by their id's :param ids: Identifiers of vrf's :return: None """ url = build_uri_with_ids('api/v3/vrf/%s/', ids) return super(ApiVrf, self).delete(url)
0.007722
def get_job_errors(self, job_id): """GetJobErrors https://apidocs.joyent.com/manta/api.html#GetJobErrors """ log.debug("GetJobErrors %r", job_id) path = "/%s/jobs/%s/live/err" % (self.account, job_id) res, content = self._request(path, "GET") if res["status"] != "200": raise errors.MantaAPIError(res, content) return self._job_errors_from_content(content)
0.00463
def retrieve_cluster(self, df, cluster_no): """ Extracts the cluster at the given index from the input dataframe :param df: the dataframe that contains the clusters :param cluster_no: the cluster number :return: returns the extracted cluster """ if self.is_pyclustering_instance(self.model): clusters = self.model.get_clusters() mask = [] for i in range(0, df.shape[0]): mask.append(i in clusters[cluster_no]) else: mask = self.model.labels_ == cluster_no # a boolean mask return df[mask]
0.003195
def OnPreferences(self, event): """Preferences event handler that launches preferences dialog""" preferences = self.interfaces.get_preferences_from_user() if preferences: for key in preferences: if type(config[key]) in (type(u""), type("")): config[key] = preferences[key] else: config[key] = ast.literal_eval(preferences[key]) self.main_window.grid.grid_renderer.cell_cache.clear() self.main_window.grid.ForceRefresh()
0.003663
def fmt_repr(obj): """Print a orphaned string representation of an object without the clutter of its parent object. """ items = ["%s = %r" % (k, v) for k, v in list(exclude_fields(obj).items())] return "<%s: {%s}>" % (obj.__class__.__name__, ', '.join(items))
0.014815
def get_container_update_kwargs(self, action, container_name, update_values, kwargs=None): """ Generates keyword arguments for the Docker client to update the HostConfig of an existing container. :param action: Action configuration. :type action: ActionConfig :param container_name: Container name or id. :type container_name: unicode | str :param update_values: Dictionary of values to update; i.e. keyword arguments to the Docker client. :type update_values: dict[unicode | str, unicode | str | int | float | decimal.Decimal] :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict | NoneType :return: Resulting keyword arguments. :rtype: dict """ c_kwargs = dict(container=container_name) update_kwargs(c_kwargs, update_values, kwargs) return c_kwargs
0.007384
def alloc(self): """ Allocate an ID value and return it. Raises: ValueError: Out of capacity in ID pool. """ if not self._free: self._expand() id = self._free.pop() self._used.add(id) return id
0.007092
def remove_listener(self, registration_id): """ Removes the specified item listener. Returns silently if the specified listener was not added before. :param registration_id: (str), id of the listener to be deleted. :return: (bool), ``true`` if the item listener is removed, ``false`` otherwise. """ return self._stop_listening(registration_id, lambda i: list_remove_listener_codec.encode_request(self.name, i))
0.010893
def visual(title, X, activation): '''create a grid of images and save it as a final image title : grid image name X : array of images ''' assert len(X.shape) == 4 X = X.transpose((0, 2, 3, 1)) if activation == 'sigmoid': X = np.clip((X)*(255.0), 0, 255).astype(np.uint8) elif activation == 'tanh': X = np.clip((X+1.0)*(255.0/2.0), 0, 255).astype(np.uint8) n = np.ceil(np.sqrt(X.shape[0])) buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8) for i, img in enumerate(X): fill_buf(buff, i, img, X.shape[1:3]) cv2.imwrite('%s.jpg' % (title), buff)
0.003063
def split_python_text_into_lines(text): """ # TODO: make it so this function returns text so one statment is on one # line that means no splitting up things like function definitions into # multiple lines """ #import jedi #script = jedi.Script(text, line=1, column=None, path='') def parentesis_are_balanced(line): """ helper References: http://stackoverflow.com/questions/18007995/recursive-paren-balance """ def balanced(str_, i=0, cnt=0, left='(', right=')'): if i == len(str_): return cnt == 0 if cnt < 0: return False if str_[i] == left: return balanced(str_, i + 1, cnt + 1) elif str_[i] == right: return balanced(str_, i + 1, cnt - 1) return balanced(str_, i + 1, cnt) return balanced(line) lines = text.split('\n') new_lines = [] current_line = '' for line in lines: current_line += line if parentesis_are_balanced(current_line): new_lines.append(current_line) current_line = '' return lines
0.004241
def clip_out_of_image(self): """ Clip off all parts from all bounding boxes that are outside of the image. Returns ------- imgaug.BoundingBoxesOnImage Bounding boxes, clipped to fall within the image dimensions. """ bbs_cut = [bb.clip_out_of_image(self.shape) for bb in self.bounding_boxes if bb.is_partly_within_image(self.shape)] return BoundingBoxesOnImage(bbs_cut, shape=self.shape)
0.008282
def delete_metadata(self, loadbalancer, keys=None, node=None): """ Deletes metadata items specified by the 'keys' parameter. If no value for 'keys' is provided, all metadata is deleted. If 'node' is supplied, the metadata for that node is deleted instead of the load balancer. """ if keys and not isinstance(keys, (list, tuple)): keys = [keys] md = self.get_metadata(loadbalancer, node=node, raw=True) if keys: md = [dct for dct in md if dct["key"] in keys] if not md: # Nothing to do; log it? Raise an error? return id_list = "&".join(["id=%s" % itm["id"] for itm in md]) if node: uri = "/loadbalancers/%s/nodes/%s/metadata?%s" % ( utils.get_id(loadbalancer), utils.get_id(node), id_list) else: uri = "/loadbalancers/%s/metadata?%s" % ( utils.get_id(loadbalancer), id_list) resp, body = self.api.method_delete(uri) return body
0.001903
def getPointsForInterpolation(self,EndOfPrdvP,aLvlNow): ''' Finds endogenous interpolation points (x,m) for the expenditure function. Parameters ---------- EndOfPrdvP : np.array Array of end-of-period marginal values. aLvlNow : np.array Array of end-of-period asset values that yield the marginal values in EndOfPrdvP. Returns ------- x_for_interpolation : np.array Total expenditure points for interpolation. m_for_interpolation : np.array Corresponding market resource points for interpolation. p_for_interpolation : np.array Corresponding permanent income points for interpolation. ''' # Get size of each state dimension mCount = aLvlNow.shape[1] pCount = aLvlNow.shape[0] MedCount = self.MedShkVals.size # Calculate endogenous gridpoints and controls cLvlNow = np.tile(np.reshape(self.uPinv(EndOfPrdvP),(1,pCount,mCount)),(MedCount,1,1)) MedBaseNow = np.tile(np.reshape(self.uMedPinv(self.MedPrice*EndOfPrdvP),(1,pCount,mCount)), (MedCount,1,1)) MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals**(1.0/self.CRRAmed),(MedCount,1,1)), (1,pCount,mCount)) MedLvlNow = MedShkVals_tiled*MedBaseNow aLvlNow_tiled = np.tile(np.reshape(aLvlNow,(1,pCount,mCount)),(MedCount,1,1)) xLvlNow = cLvlNow + self.MedPrice*MedLvlNow mLvlNow = xLvlNow + aLvlNow_tiled # Limiting consumption is zero as m approaches the natural borrowing constraint x_for_interpolation = np.concatenate((np.zeros((MedCount,pCount,1)),xLvlNow),axis=-1) temp = np.tile(self.BoroCnstNat(np.reshape(self.pLvlGrid,(1,self.pLvlGrid.size,1))), (MedCount,1,1)) m_for_interpolation = np.concatenate((temp,mLvlNow),axis=-1) # Make a 3D array of permanent income for interpolation p_for_interpolation = np.tile(np.reshape(self.pLvlGrid,(1,pCount,1)),(MedCount,1,mCount+1)) # Store for use by cubic interpolator self.cLvlNow = cLvlNow self.MedLvlNow = MedLvlNow self.MedShkVals_tiled = np.tile(np.reshape(self.MedShkVals,(MedCount,1,1)),(1,pCount,mCount)) return x_for_interpolation, m_for_interpolation, p_for_interpolation
0.025021
def get_debug_info(): """Return a list of lines with backend info. """ from . import __version__ d = OrderedDict() d['Version'] = '%s' % __version__ for key, val in PyVisaLibrary.get_session_classes().items(): key_name = '%s %s' % (key[0].name.upper(), key[1]) try: d[key_name] = getattr(val, 'session_issue').split('\n') except AttributeError: d[key_name] = 'Available ' + val.get_low_level_info() return d
0.003752
def save_catalog(filename, catalog, meta=None, prefix=None): """ Save a catalogue of sources using filename as a model. Meta data can be written to some file types (fits, votable). Each type of source will be in a separate file: - base_comp.ext :class:`AegeanTools.models.OutputSource` - base_isle.ext :class:`AegeanTools.models.IslandSource` - base_simp.ext :class:`AegeanTools.models.SimpleSource` Where filename = `base.ext` Parameters ---------- filename : str Name of file to write, format is determined by extension. catalog : list A list of sources to write. Sources must be of type :class:`AegeanTools.models.OutputSource`, :class:`AegeanTools.models.SimpleSource`, or :class:`AegeanTools.models.IslandSource`. prefix : str Prepend each column name with "prefix_". Default is to prepend nothing. meta : dict Meta data to be written to the output file. Support for metadata depends on file type. Returns ------- None """ ascii_table_formats = {'csv': 'csv', 'tab': 'tab', 'tex': 'latex', 'html': 'html'} # .ann and .reg are handled by me meta = update_meta_data(meta) extension = os.path.splitext(filename)[1][1:].lower() if extension in ['ann', 'reg']: writeAnn(filename, catalog, extension) elif extension in ['db', 'sqlite']: writeDB(filename, catalog, meta) elif extension in ['hdf5', 'fits', 'vo', 'vot', 'xml']: write_catalog(filename, catalog, extension, meta, prefix=prefix) elif extension in ascii_table_formats.keys(): write_catalog(filename, catalog, fmt=ascii_table_formats[extension], meta=meta, prefix=prefix) else: log.warning("extension not recognised {0}".format(extension)) log.warning("You get tab format") write_catalog(filename, catalog, fmt='tab', prefix=prefix) return
0.003655
def execute_one_to_many_job(parent_class=None, get_unfinished_kwargs=None, get_unfinished_limit=None, parser_func=None, parser_func_kwargs=None, build_url_func_kwargs=None, downloader_func=None, downloader_func_kwargs=None, post_process_response_func=None, post_process_response_func_kwargs=None, process_item_func_kwargs=None, logger=None, sleep_time=None): """ A standard one-to-many crawling workflow. :param parent_class: :param get_unfinished_kwargs: :param get_unfinished_limit: :param parser_func: html parser function. :param parser_func_kwargs: other keyword arguments for ``parser_func`` :param build_url_func_kwargs: other keyword arguments for ``parent_class().build_url(**build_url_func_kwargs)`` :param downloader_func: a function that taking ``url`` as first arg, make http request and return response/html. :param downloader_func_kwargs: other keyword arguments for ``downloader_func`` :param post_process_response_func: a callback function taking response/html as first argument. You can put any logic in it. For example, you can make it sleep if you detect that you got banned. :param post_process_response_func_kwargs: other keyword arguments for ``post_process_response_func`` :param process_item_func_kwargs: other keyword arguments for ``ParseResult().process_item(**process_item_func_kwargs)`` :param logger: :param sleep_time: default 0, wait time before making each request. """ # prepare arguments get_unfinished_kwargs = prepare_kwargs(get_unfinished_kwargs) parser_func_kwargs = prepare_kwargs(parser_func_kwargs) build_url_func_kwargs = prepare_kwargs(build_url_func_kwargs) downloader_func_kwargs = prepare_kwargs(downloader_func_kwargs) post_process_response_func_kwargs = prepare_kwargs( post_process_response_func_kwargs) process_item_func_kwargs = prepare_kwargs(process_item_func_kwargs) if post_process_response_func is None: def post_process_response_func(response, **kwargs): pass if not isinstance(logger, SpiderLogger): raise TypeError if sleep_time is None: sleep_time = 0 # do the real job query_set = parent_class.get_all_unfinished(**get_unfinished_kwargs) if get_unfinished_limit is not None: query_set = query_set.limit(get_unfinished_limit) todo = list(query_set) logger.log_todo_volumn(todo) for parent_instance in todo: url = parent_instance.build_url(**build_url_func_kwargs) logger.log_to_crawl_url(url) logger.log_sleeper(sleep_time) time.sleep(sleep_time) try: response_or_html = downloader_func(url, **downloader_func_kwargs) if isinstance(response_or_html, string_types): parser_func_kwargs["html"] = response_or_html else: parser_func_kwargs["response"] = response_or_html post_process_response_func( response_or_html, **post_process_response_func_kwargs) except Exception as e: logger.log_error(e) continue try: parse_result = parser_func( parent=parent_instance, **parser_func_kwargs ) parse_result.process_item(**process_item_func_kwargs) logger.log_status(parse_result) except Exception as e: logger.log_error(e) continue
0.000521
def _send_get_request(self, path, params, headers): """ Sends the GET request to the Route53 endpoint. :param str path: The path to tack on to the endpoint URL for the query. :param dict params: Key/value pairs to send. :param dict headers: A dict of headers to send with the request. :rtype: str :returns: The body of the response. """ r = requests.get(self.endpoint + path, params=params, headers=headers) r.raise_for_status() return r.text
0.003676
def erfcc(x): """Complementary error function.""" z = abs(x) t = 1 / (1 + 0.5 * z) r = t * math.exp(-z * z - 1.26551223 + t * (1.00002368 + t * (.37409196 + t * (.09678418 + t * (-.18628806 + t * (.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (-.82215223 + t * .17087277))))))))) if (x >= 0.): return r else: return 2. - r
0.001706
def read_method(self): """ Read a method from the peer. """ self._next_method() m = self.queue.get() if isinstance(m, Exception): raise m return m
0.009302
def ravel(self, name=None): """ Convert 2D histogram into 1D histogram with the y-axis repeated along the x-axis, similar to NumPy's ravel(). """ nbinsx = self.nbins(0) nbinsy = self.nbins(1) left_edge = self.xedgesl(1) right_edge = self.xedgesh(nbinsx) out = Hist(nbinsx * nbinsy, left_edge, nbinsy * (right_edge - left_edge) + left_edge, type=self.TYPE, name=name, title=self.title, **self.decorators) for i, bin in enumerate(self.bins(overflow=False)): out.SetBinContent(i + 1, bin.value) out.SetBinError(i + 1, bin.error) return out
0.002692
def IsErrorSuppressedByNolint(category, linenum): """Returns true if the specified error category is suppressed on this line. Consults the global error_suppressions map populated by ParseNolintSuppressions/ProcessGlobalSuppresions/ResetNolintSuppressions. Args: category: str, the category of the error. linenum: int, the current line number. Returns: bool, True iff the error should be suppressed due to a NOLINT comment or global suppression. """ return (_global_error_suppressions.get(category, False) or linenum in _error_suppressions.get(category, set()) or linenum in _error_suppressions.get(None, set()))
0.004525
def send_once(remote, codes, count=None, device=None, address=None): """ All parameters are passed to irsend. See the man page for irsend for details about their usage. Parameters ---------- remote: str codes: [str] count: int device: str address: str Notes ----- No attempt is made to catch or handle errors. See the documentation for subprocess.check_output to see the types of exceptions it may raise. """ args = ['send_once', remote] + codes _call(args, count, device, address)
0.001812
def write(name, value): """Write a raw env value. A ``None`` value clears the environment variable. Args: name: The environment variable name value: The value to write """ if value is not None: environ[name] = builtins.str(value) elif environ.get(name): del environ[name]
0.00304
def get_objectives(self): """Gets the objective list resulting from the search. return: (osid.learning.ObjectiveList) - the objective list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.ObjectiveList(self._results, runtime=self._runtime)
0.004032
def get_mmol(code, mmol_number=None, outfile=None): """ Get mmol file from PDBe and return its content as a string. Write to file if outfile given. Parameters ---------- code : str PDB code. mmol_number : int mmol number (biological assembly number) of file to download. Numbers from PDBe. If None, defaults to the preferred biological assembly listed for code on the PDBe. outfile : str Filepath. Writes returned value to this file. Returns ------- mmol_string : str, or None Content of the mmol file as a string. None if there are no pdbe files to download, as determined by pdbe_status_code(). None if unable to download the mmol_file from the pdbe site. Raises ------ ValueError If the number of mmols for code is stored in mmols_numbers and if mmol_number is larger than this value. """ if not mmol_number: try: mmol_number = preferred_mmol(code=code) except (ValueError, TypeError, IOError): print("No mmols for {0}".format(code)) return None # sanity check if mmols_numbers: if code in mmols_numbers.keys(): num_mmols = mmols_numbers[code][0] if mmol_number > num_mmols: raise ValueError('There are only {0} mmols for code {1}. mmol_number {2} is too big' .format(num_mmols, code, mmol_number)) # Download mmol file from the PDBE webserver. pdbe_url = "http://www.ebi.ac.uk/pdbe/entry-files/download/{0}_{1}.mmol".format(code, mmol_number) r = requests.get(pdbe_url) if r.status_code == 200: mmol_string = r.text else: # Download gz pdb file from the PDB. pdb_url = "http://www.rcsb.org/pdb/files/{0}.pdb{1}.gz".format(code.upper(), mmol_number) r = requests.get(pdb_url) if r.status_code == 200: temp_gz = tempfile.NamedTemporaryFile() temp_gz.write(r.content) with gzip.open(temp_gz.name, 'rb') as foo: mmol_string = foo.read().decode() else: print("Could not download mmol file for {0}.\n Got requests status_code {1}".format(code, r.status_code)) return None # Write to file if outfile and mmol_string: with open(outfile, 'w') as foo: foo.write(mmol_string) return mmol_string
0.004119
def latex(source: str): """ Add a mathematical equation in latex math-mode syntax to the display. Instead of the traditional backslash escape character, the @ character is used instead to prevent backslash conflicts with Python strings. For example, \\delta would be @delta. :param source: The string representing the latex equation to be rendered. """ r = _get_report() if 'katex' not in r.library_includes: r.library_includes.append('katex') r.append_body(render_texts.latex(source.replace('@', '\\'))) r.stdout_interceptor.write_source('[ADDED] Latex equation\n')
0.001592
def __complete_refers(self, value: str) -> Iterable[str]: """Return an iterable of possible completions matching the given prefix from the list of referred Vars.""" return map( lambda entry: f"{entry[0].name}", filter( Namespace.__completion_matcher(value), [(s, v) for s, v in self.refers] ), )
0.007916
def Convert(self, wave, flux, target_units, area=None): """Perform unit conversion. Parameters ---------- wave, flux : number or array_like Wavelength and flux values to be used for conversion. target_units : str Unit to convert to. area : number or `None` Telescope :ref:`area <pysynphot-area>`, if applicable. This is only needed for conversions involving counts or ``obmag``. Returns ------- result : number or array_like Converted values. Raises ------ TypeError Conversion to given unit is not allowed. """ try: return self.Dispatch[target_units](wave, flux, area=area) except KeyError: raise TypeError("%s is not a valid flux unit" % (target_units))
0.002281
def tokenize_paragraphs(cls, text): """Convert an input string into a list of paragraphs.""" paragraphs = [] paragraphs_first_pass = text.split('\n') for p in paragraphs_first_pass: paragraphs_second_pass = re.split('\s{4,}', p) paragraphs += paragraphs_second_pass # Remove empty strings from list paragraphs = [p for p in paragraphs if p] return paragraphs
0.006834
def _parse_addr(self, addr): ''' Parses address and returns IP version. Raises exception on invalid argument ''' ipv = 0 try: socket.inet_pton(socket.AF_INET6, addr) # Convert ::FFFF:x.y.z.y to IPv4 if addr.lower().startswith('::ffff:'): try: socket.inet_pton(socket.AF_INET, addr) ipv = 4 except: ipv = 6 else: ipv = 6 except: socket.inet_pton(socket.AF_INET, addr) ipv = 4 return ipv
0.009901
def find_file(path): """ Given a path to a part in a zip file, return a path to the file and the path to the part. Assuming /foo.zipx exists as a file, >>> find_file('/foo.zipx/dir/part') # doctest: +SKIP ('/foo.zipx', '/dir/part') >>> find_file('/foo.zipx') # doctest: +SKIP ('/foo.zipx', '') """ path_components = split_all(path) def get_assemblies(): """ Enumerate the various combinations of file paths and part paths """ for n in range(len(path_components), 0, -1): file_c = path_components[:n] part_c = path_components[n:] or [''] yield (os.path.join(*file_c), posixpath.join(*part_c)) for file_path, part_path in get_assemblies(): if os.path.isfile(file_path): return file_path, part_path
0.030014
def deserialize_organization(organization_dict): """ Organization dict-to-object serialization """ return models.Organization( id=organization_dict.get('id'), name=organization_dict.get('name', ''), short_name=organization_dict.get('short_name', ''), description=organization_dict.get('description', ''), logo=organization_dict.get('logo', '') )
0.002469
def setReturnParameter(self, name, type, namespace=None, element_type=0): """Set the return parameter description for the call info.""" parameter = ParameterInfo(name, type, namespace, element_type) self.retval = parameter return parameter
0.00738
def pch_emitter(target, source, env): """Adds the object file target.""" validate_vars(env) pch = None obj = None for t in target: if SCons.Util.splitext(str(t))[1] == '.pch': pch = t if SCons.Util.splitext(str(t))[1] == '.obj': obj = t if not obj: obj = SCons.Util.splitext(str(pch))[0]+'.obj' target = [pch, obj] # pch must be first, and obj second for the PCHCOM to work return (target, source)
0.006211
def reduce_logsumexp(x, reduced_dim, extra_logit=None, name=None): """Numerically stable version of log(reduce_sum(exp(x))). Unlike other reductions, the output has the same shape as the input. Note: with a minor change, we could allow multiple reduced dimensions. Args: x: a Tensor reduced_dim: a dimension in x extra_logit: an optional Tensor broadcastable to (x.shape - reduced_dim) name: an optional string Returns: a Tensor with the same shape and dtype as x. """ reduced_dim = convert_to_dimension(reduced_dim) with tf.variable_scope(name, default_name="reduce_logsumexp"): reduced_shape = x.shape - reduced_dim max_logit = reduce_max(stop_gradient(x), output_shape=reduced_shape) if extra_logit is not None: if isinstance(extra_logit, Tensor): extra_logit = stop_gradient(extra_logit) max_logit = maximum(max_logit, extra_logit) x -= max_logit exp_x = exp(x) sum_exp_x = reduce_sum(exp_x, output_shape=reduced_shape) if extra_logit is not None: sum_exp_x += exp(extra_logit - max_logit) return log(sum_exp_x) + max_logit
0.006233
def _create_hidden_port(self, context, network_id, device_id, fixed_ips, port_type=DEVICE_OWNER_ROUTER_INTF): """Creates port used specially for HA purposes.""" port = {'port': { 'tenant_id': '', # intentionally not set 'network_id': network_id, 'mac_address': ATTR_NOT_SPECIFIED, 'fixed_ips': fixed_ips, 'device_id': device_id, 'device_owner': port_type, 'admin_state_up': True, 'name': ''}} if extensions.is_extension_supported(self._core_plugin, "dns-integration"): port['port'].update(dns_name='') core_plugin = bc.get_plugin() return core_plugin.create_port(context, port)
0.004981
def getProcessStats(self): """Return stats for running and blocked processes, forks, context switches and interrupts. @return: Dictionary of stats. """ info_dict = {} try: fp = open(cpustatFile, 'r') data = fp.read() fp.close() except: raise IOError('Failed reading stats from file: %s' % cpustatFile) for line in data.splitlines(): arr = line.split() if len(arr) > 1 and arr[0] in ('ctxt', 'intr', 'softirq', 'processes', 'procs_running', 'procs_blocked'): info_dict[arr[0]] = arr[1] return info_dict
0.009126
def drag(self, node): """ Drags given node to mouse location. """ dx = self.mouse.x - self.graph.x dy = self.mouse.y - self.graph.y # A dashed line indicates the drag vector. s = self.graph.styles.default self._ctx.nofill() self._ctx.nostroke() if s.stroke: self._ctx.strokewidth(s.strokewidth) self._ctx.stroke( s.stroke.r, s.stroke.g, s.stroke.g, 0.75 ) p = self._ctx.line(node.x, node.y, dx, dy, draw=False) try: p._nsBezierPath.setLineDash_count_phase_([2,4], 2, 50) except: pass self._ctx.drawpath(p) r = node.__class__(None).r * 0.75 self._ctx.oval(dx-r/2, dy-r/2, r, r) node.vx = dx / self.graph.d node.vy = dy / self.graph.d
0.012277
def copyto(self, other): """Copies the value of this array to another array. If ``other`` is a ``NDArray`` object, then ``other.shape`` and ``self.shape`` should be the same. This function copies the value from ``self`` to ``other``. If ``other`` is a context, a new ``NDArray`` will be first created on the target context, and the value of ``self`` is copied. Parameters ---------- other : NDArray or Context The destination array or context. Returns ------- NDArray, CSRNDArray or RowSparseNDArray The copied array. If ``other`` is an ``NDArray``, then the return value and ``other`` will point to the same ``NDArray``. Examples -------- >>> x = mx.nd.ones((2,3)) >>> y = mx.nd.zeros((2,3), mx.gpu(0)) >>> z = x.copyto(y) >>> z is y True >>> y.asnumpy() array([[ 1., 1., 1.], [ 1., 1., 1.]], dtype=float32) >>> y.copyto(mx.gpu(0)) <NDArray 2x3 @gpu(0)> """ if isinstance(other, NDArray): if other.handle is self.handle: warnings.warn('You are attempting to copy an array to itself', RuntimeWarning) return False return _internal._copyto(self, out=other) elif isinstance(other, Context): hret = NDArray(_new_alloc_handle(self.shape, other, True, self.dtype)) return _internal._copyto(self, out=hret) else: raise TypeError('copyto does not support type ' + str(type(other)))
0.003043
def main(*args): r"""Bootstrap Python projects and libraries with virtualenv and pip. Also check system requirements before bootstrap and run post bootstrap hook if any. :param \*args: Command line arguments list. """ # Create parser, read arguments from direct input or command line with disable_error_handler(): args = parse_args(args or sys.argv[1:]) # Read current config from file and command line arguments config = read_config(args.config, args) if config is None: return True bootstrap = config[__script__] # Check pre-requirements if not check_pre_requirements(bootstrap['pre_requirements']): return True # Create virtual environment env_args = prepare_args(config['virtualenv'], bootstrap) if not create_env( bootstrap['env'], env_args, bootstrap['recreate'], bootstrap['ignore_activated'], bootstrap['quiet'] ): # Exit if couldn't create virtual environment return True # And install library or project here pip_args = prepare_args(config['pip'], bootstrap) if not install( bootstrap['env'], bootstrap['requirements'], pip_args, bootstrap['ignore_activated'], bootstrap['install_dev_requirements'], bootstrap['quiet'] ): # Exist if couldn't install requirements into venv return True # Run post-bootstrap hook run_hook(bootstrap['hook'], bootstrap, bootstrap['quiet']) # All OK! if not bootstrap['quiet']: print_message('All OK!') # False means everything went alright, exit code: 0 return False
0.000597
def cdl_addmon(self, source_url, save_path = '/', timeout = 3600): ''' Usage: cdl_addmon <source_url> [save_path] [timeout] - add an offline (cloud) download task and monitor the download progress source_url - the URL to download file from. save_path - path on PCS to save file to. default is to save to root directory '/'. timeout - timeout in seconds. default is 3600 seconds. ''' rpath = self.__get_cdl_dest(source_url, save_path) return self.__cdl_addmon(source_url, rpath, timeout)
0.028
def _transform_col(self, x, i): """Encode one categorical column into average target values. Args: x (pandas.Series): a categorical column to encode i (int): column index Returns: x (pandas.Series): a column with labels. """ return x.fillna(NAN_INT).map(self.target_encoders[i]).fillna(self.target_mean)
0.007916
def join_path(self, *path): """ Unite entries to generate a single path :param path: path items to unite :return: str """ path = self.directory_sep().join(path) return self.normalize_path(path)
0.038647
def runExperiment(args, model=None): """ Run a single OPF experiment. .. note:: The caller is responsible for initializing python logging before calling this function (e.g., import :mod:`nupic.support`; :meth:`nupic.support.initLogging`) See also: :meth:`.initExperimentPrng`. :param args: (string) Experiment command-line args list. Too see all options, run with ``--help``: .. code-block:: text Options: -h, --help show this help message and exit -c <CHECKPOINT> Create a model and save it under the given <CHECKPOINT> name, but don't run it --listCheckpoints List all available checkpoints --listTasks List all task labels in description.py --load=<CHECKPOINT> Load a model from the given <CHECKPOINT> and run it. Run with --listCheckpoints flag for more details. --newSerialization Use new capnproto serialization --tasks Run the tasks with the given TASK LABELS in the order they are given. Either end of arg-list, or a standalone dot ('.') arg or the next short or long option name (-a or --blah) terminates the list. NOTE: FAILS TO RECOGNIZE task label names with one or more leading dashes. [default: run all of the tasks in description.py] --testMode Reduce iteration count for testing --noCheckpoint Don't checkpoint the model after running each task. :param model: (:class:`~nupic.frameworks.opf.model.Model`) For testing, may pass in an existing OPF Model to use instead of creating a new one. :returns: (:class:`~nupic.frameworks.opf.model.Model`) reference to OPF Model instance that was constructed (this is provided to aid with debugging) or None, if none was created. """ # Parse command-line options opt = _parseCommandLineOptions(args) #print "runExperiment: Parsed Command Options: ", opt model = _runExperimentImpl(opt, model) return model
0.007583
def partitions_for_topic(self, topic): """Return set of all partitions for topic (whether available or not) Arguments: topic (str): topic to check for partitions Returns: set: {partition (int), ...} """ if topic not in self._partitions: return None return set(self._partitions[topic].keys())
0.005305
def calc_nfw(self, rbins, offsets=None, numTh=200, numRoff=200, numRinner=20, factorRouter=3): """Calculates Sigma and DeltaSigma profiles. Generates the surface mass density (sigma_nfw attribute of parent object) and differential surface mass density (deltasigma_nfw attribute of parent object) profiles of each cluster, assuming a spherical NFW model. Optionally includes the effect of cluster miscentering offsets. Parameters ---------- rbins : array_like Radial bins (in Mpc) for calculating cluster profiles. Should be 1D, optionally with astropy.units of Mpc. offsets : array_like, optional Parameter describing the width (in Mpc) of the Gaussian distribution of miscentering offsets. Should be 1D, optionally with astropy.units of Mpc. Other Parameters ------------------- numTh : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over theta, for calculating offset profiles (no effect for offsets=None). Default 200. numRoff : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins to use for integration over R_off, for calculating offset profiles (no effect for offsets=None). Default 200. numRinner : int, optional Parameter to pass to SurfaceMassDensity(). Number of bins at r < min(rbins) to use for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma ever, and no effect for DeltaSigma if offsets=None). Default 20. factorRouter : int, optional Parameter to pass to SurfaceMassDensity(). Factor increase over number of rbins, at min(r) < r < max(r), of bins that will be used at for integration over Sigma(<r), for calculating DeltaSigma (no effect for Sigma, and no effect for DeltaSigma if offsets=None). Default 3. """ if offsets is None: self._sigoffset = np.zeros(self.number) * units.Mpc else: self._sigoffset = utils.check_units_and_type(offsets, units.Mpc, num=self.number) self.rbins = utils.check_units_and_type(rbins, units.Mpc) rhoc = self._rho_crit.to(units.Msun / units.pc**2 / units.Mpc) smd = SurfaceMassDensity(self.rs, self.delta_c, rhoc, offsets=self._sigoffset, rbins=self.rbins, numTh=numTh, numRoff=numRoff, numRinner=numRinner, factorRouter=factorRouter) self.sigma_nfw = smd.sigma_nfw() self.deltasigma_nfw = smd.deltasigma_nfw()
0.001004
def remove_exclude_regions(orig_bed, base_file, items, remove_entire_feature=False): """Remove centromere and short end regions from an existing BED file of regions to target. """ from bcbio.structural import shared as sshared out_bed = os.path.join("%s-noexclude.bed" % (utils.splitext_plus(base_file)[0])) if not utils.file_uptodate(out_bed, orig_bed): exclude_bed = sshared.prepare_exclude_file(items, base_file) with file_transaction(items[0], out_bed) as tx_out_bed: pybedtools.BedTool(orig_bed).subtract(pybedtools.BedTool(exclude_bed), A=remove_entire_feature, nonamecheck=True).saveas(tx_out_bed) if utils.file_exists(out_bed): return out_bed else: return orig_bed
0.007557
def archive_model(serialization_dir: str, weights: str = _DEFAULT_WEIGHTS, files_to_archive: Dict[str, str] = None, archive_path: str = None) -> None: """ Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`. Include the additional ``files_to_archive`` if provided. Parameters ---------- serialization_dir: ``str`` The directory where the weights and vocabulary are written out. weights: ``str``, optional (default=_DEFAULT_WEIGHTS) Which weights file to include in the archive. The default is ``best.th``. files_to_archive: ``Dict[str, str]``, optional (default=None) A mapping {flattened_key -> filename} of supplementary files to include in the archive. That is, if you wanted to include ``params['model']['weights']`` then you would specify the key as `"model.weights"`. archive_path : ``str``, optional, (default = None) A full path to serialize the model to. The default is "model.tar.gz" inside the serialization_dir. If you pass a directory here, we'll serialize the model to "model.tar.gz" inside the directory. """ weights_file = os.path.join(serialization_dir, weights) if not os.path.exists(weights_file): logger.error("weights file %s does not exist, unable to archive model", weights_file) return config_file = os.path.join(serialization_dir, CONFIG_NAME) if not os.path.exists(config_file): logger.error("config file %s does not exist, unable to archive model", config_file) # If there are files we want to archive, write out the mapping # so that we can use it during de-archiving. if files_to_archive: fta_filename = os.path.join(serialization_dir, _FTA_NAME) with open(fta_filename, 'w') as fta_file: fta_file.write(json.dumps(files_to_archive)) if archive_path is not None: archive_file = archive_path if os.path.isdir(archive_file): archive_file = os.path.join(archive_file, "model.tar.gz") else: archive_file = os.path.join(serialization_dir, "model.tar.gz") logger.info("archiving weights and vocabulary to %s", archive_file) with tarfile.open(archive_file, 'w:gz') as archive: archive.add(config_file, arcname=CONFIG_NAME) archive.add(weights_file, arcname=_WEIGHTS_NAME) archive.add(os.path.join(serialization_dir, "vocabulary"), arcname="vocabulary") # If there are supplemental files to archive: if files_to_archive: # Archive the { flattened_key -> original_filename } mapping. archive.add(fta_filename, arcname=_FTA_NAME) # And add each requested file to the archive. for key, filename in files_to_archive.items(): archive.add(filename, arcname=f"fta/{key}")
0.002374
def remove_janitor(self, janitor): """Remove janitor from the room""" if not self.owner and not self.admin: raise RuntimeError("Not enough street creed to do this") janitor = janitor.strip().lower() if not janitor: raise ValueError("Empty strings cannot be janitors") if janitor not in self.config.janitors: return self.config.janitors.remove(janitor) self.__set_config_value("janitors", self.config.janitors)
0.00396
def set_orientation(self, orientation='landscape'): """Set the video orientation. Return a coroutine. """ if orientation not in ALLOWED_ORIENTATIONS: _LOGGER.debug('%s is not a valid orientation', orientation) return False return self.change_setting('orientation', orientation)
0.005848
def _subset(subset, superset): """True if subset is a subset of superset. :param dict subset: subset to compare. :param dict superset: superset to compare. :return: True iif all pairs (key, value) of subset are in superset. :rtype: bool """ result = True for k in subset: result = k in superset and subset[k] == superset[k] if not result: break return result
0.002358
def _patch_expand_paths(self, settings, name, value): """ Apply ``SettingsPostProcessor._patch_expand_path`` to each element in list. Args: settings (dict): Current settings. name (str): Setting name. value (list): List of paths to patch. Returns: list: Patched path list to an absolute path. """ return [self._patch_expand_path(settings, name, item) for item in value]
0.004065
def call_async(func): """Decorates a function to be called async on the loop thread""" @wraps(func) def wrapper(self, *args, **kw): """Wraps instance method to be called on loop thread""" def call(): """Calls function on loop thread""" try: func(self, *args, **kw) except Exception: logger.exception( "failed to call async [%r] with [%r] [%r]", func, args, kw ) self.loop.call_soon_threadsafe(call) return wrapper
0.001783
def mathjax_for_markdown(pelicanobj, mathjax_script, mathjax_settings): """Instantiates a customized markdown extension for handling mathjax related content""" # Create the configuration for the markdown template config = {} config['mathjax_script'] = mathjax_script config['math_tag_class'] = 'math' config['auto_insert'] = mathjax_settings['auto_insert'] # Instantiate markdown extension and append it to the current extensions try: if isinstance(pelicanobj.settings.get('MD_EXTENSIONS'), list): # pelican 3.6.3 and earlier pelicanobj.settings['MD_EXTENSIONS'].append(PelicanMathJaxExtension(config)) else: pelicanobj.settings['MARKDOWN'].setdefault('extensions', []).append(PelicanMathJaxExtension(config)) except: sys.excepthook(*sys.exc_info()) sys.stderr.write("\nError - the pelican mathjax markdown extension failed to configure. MathJax is non-functional.\n") sys.stderr.flush()
0.007049