code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def adjust_prior(self, index, prior): """ Adjusts priors for the latent variables Parameters ---------- index : int or list[int] Which latent variable index/indices to be altered prior : Prior object Which prior distribution? E.g. Normal(0,1) Returns ---------- None (changes priors in LatentVariables object) """ self.latent_variables.adjust_prior(index=index, prior=prior)
Adjusts priors for the latent variables Parameters ---------- index : int or list[int] Which latent variable index/indices to be altered prior : Prior object Which prior distribution? E.g. Normal(0,1) Returns ---------- None (changes priors in LatentVariables object)
def resize_volume(self, volumeObj, sizeInGb, bsize=1000): """ Resize a volume to new GB size, must be larger than original. :param volumeObj: ScaleIO Volume Object :param sizeInGb: New size in GB (have to be larger than original) :param bsize: 1000 :return: POST request response :rtype: Requests POST response object """ current_vol = self.get_volume_by_id(volumeObj.id) if current_vol.size_kb > (sizeInGb * bsize * bsize): raise RuntimeError( "resize_volume() - New size needs to be bigger than: %d KBs" % current_vol.size_kb) resizeDict = { 'sizeInGB' : str(sizeInGb) } response = self.conn.connection._do_post("{}/{}{}/{}".format( self.conn.connection._api_url, "instances/Volume::", volumeObj.id, 'action/setVolumeSize'), json=resizeDict) return response
Resize a volume to new GB size, must be larger than original. :param volumeObj: ScaleIO Volume Object :param sizeInGb: New size in GB (have to be larger than original) :param bsize: 1000 :return: POST request response :rtype: Requests POST response object
def run_on(*, event: str): """A decorator to store and link a callback to an event.""" def decorator(callback): @functools.wraps(callback) def decorator_wrapper(): RTMClient.on(event=event, callback=callback) return decorator_wrapper() return decorator
A decorator to store and link a callback to an event.
def _get_parser(extra_args): """Return ArgumentParser with any extra arguments.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, ) dirs = appdirs.AppDirs('hangups', 'hangups') default_token_path = os.path.join(dirs.user_cache_dir, 'refresh_token.txt') parser.add_argument( '--token-path', default=default_token_path, help='path used to store OAuth refresh token' ) parser.add_argument( '-d', '--debug', action='store_true', help='log detailed debugging messages' ) for extra_arg in extra_args: parser.add_argument(extra_arg, required=True) return parser
Return ArgumentParser with any extra arguments.
def get_codemirror_parameters(self, name): """ Return CodeMirror parameters for given configuration name. This is a reduced configuration from internal parameters. Arguments: name (string): Config name from available ones in ``settings.CODEMIRROR_SETTINGS``. Returns: dict: Parameters. """ config = self.get_config(name) return {k: config[k] for k in config if k not in self._internal_only}
Return CodeMirror parameters for given configuration name. This is a reduced configuration from internal parameters. Arguments: name (string): Config name from available ones in ``settings.CODEMIRROR_SETTINGS``. Returns: dict: Parameters.
def show_driver(devname): ''' Queries the specified network device for associated driver information CLI Example: .. code-block:: bash salt '*' ethtool.show_driver <devname> ''' try: module = ethtool.get_module(devname) except IOError: log.error('Driver information not implemented on %s', devname) return 'Not implemented' try: businfo = ethtool.get_businfo(devname) except IOError: log.error('Bus information no available on %s', devname) return 'Not available' ret = { 'driver': module, 'bus_info': businfo, } return ret
Queries the specified network device for associated driver information CLI Example: .. code-block:: bash salt '*' ethtool.show_driver <devname>
def unorm(data, ord=None, axis=None, keepdims=False): """Matrix or vector norm that preserves units This is a wrapper around np.linalg.norm that preserves units. See the documentation for that function for descriptions of the keyword arguments. Examples -------- >>> from unyt import km >>> data = [1, 2, 3]*km >>> print(unorm(data)) 3.7416573867739413 km """ norm = np.linalg.norm(data, ord=ord, axis=axis, keepdims=keepdims) if norm.shape == (): return unyt_quantity(norm, data.units) return unyt_array(norm, data.units)
Matrix or vector norm that preserves units This is a wrapper around np.linalg.norm that preserves units. See the documentation for that function for descriptions of the keyword arguments. Examples -------- >>> from unyt import km >>> data = [1, 2, 3]*km >>> print(unorm(data)) 3.7416573867739413 km
def prepare_adiabatic_limit(slh, k=None): """Prepare the adiabatic elimination on an SLH object Args: slh: The SLH object to take the limit for k: The scaling parameter $k \rightarrow \infty$. The default is a positive symbol 'k' Returns: tuple: The objects ``Y, A, B, F, G, N`` necessary to compute the limiting system. """ if k is None: k = symbols('k', positive=True) Ld = slh.L.dag() LdL = (Ld * slh.L)[0, 0] K = (-LdL / 2 + I * slh.H).expand().simplify_scalar() N = slh.S.dag() B, A, Y = K.series_expand(k, 0, 2) G, F = Ld.series_expand(k, 0, 1) return Y, A, B, F, G, N
Prepare the adiabatic elimination on an SLH object Args: slh: The SLH object to take the limit for k: The scaling parameter $k \rightarrow \infty$. The default is a positive symbol 'k' Returns: tuple: The objects ``Y, A, B, F, G, N`` necessary to compute the limiting system.
def request(self, url, method = u"get", data = None, headers = None, **kwargs): """ public method for doing the live request """ url, method, data, headers, kwargs = self._pre_request(url, method=method, data=data, headers=headers, **kwargs) response = self._request(url, method=method, data=data, headers=headers, **kwargs) response = self._post_request(response) # raises the appropriate exceptions response = self._handle_response(response) return response
public method for doing the live request
def _parse_typed_parameter_typed_value(values): ''' Creates Arguments in a TypedParametervalue. ''' type_, value = _expand_one_key_dictionary(values) _current_parameter_value.type = type_ if _is_simple_type(value): arg = Argument(value) _current_parameter_value.add_argument(arg) elif isinstance(value, list): for idx in value: arg = Argument(idx) _current_parameter_value.add_argument(arg)
Creates Arguments in a TypedParametervalue.
def msg(cls, error=None, debug=True, trace=True): """ prints the error message :param error: the error message :param debug: only prints it if debug is set to true :param trace: if true prints the trace :return: """ if debug and error is not None: print(error) # TODO: BUG: trace should only be printed if debug is true if trace: print(traceback.format_exc())
prints the error message :param error: the error message :param debug: only prints it if debug is set to true :param trace: if true prints the trace :return:
def src2ast(src: str) -> Expression: """Return ast.Expression created from source code given in `src`.""" try: return ast.parse(src, mode='eval') except SyntaxError: raise ValueError("Not a valid expression.") from None
Return ast.Expression created from source code given in `src`.
def take_snapshot(obj, store=True, **kw): """Takes a snapshot of the passed in object :param obj: Content object :returns: New snapshot """ logger.debug("📷 Take new snapshot for {}".format(repr(obj))) # get the object data snapshot = get_object_data(obj) # get the metadata metadata = get_object_metadata(obj, **kw) # store the metadata snapshot["__metadata__"] = metadata # convert the snapshot to JSON data = json.dumps(snapshot) # return immediately if not store: return snapshot # get the snapshot storage storage = get_storage(obj) # store the snapshot data storage.append(data) # Mark the content as auditable alsoProvides(obj, IAuditable) return snapshot
Takes a snapshot of the passed in object :param obj: Content object :returns: New snapshot
async def _set_rev_reg(self, rr_id: str, rr_size: int) -> None: """ Move precomputed revocation registry data from hopper into place within tails directory. :param rr_id: revocation registry identifier :param rr_size: revocation registry size, in case creation required """ LOGGER.debug('Issuer._set_rev_reg >>> rr_id: %s, rr_size: %s', rr_id, rr_size) assert self.rrbx dir_hopper_rr_id = join(self.rrb.dir_tails_hopper, rr_id) while Tails.linked(dir_hopper_rr_id, rr_id) is None: await asyncio.sleep(1) await self._send_rev_reg_def(rr_id) cd_id = rev_reg_id2cred_def_id(rr_id) (next_tag, rr_size_suggested) = Tails.next_tag(self.dir_tails, cd_id) rr_id = rev_reg_id(cd_id, next_tag) self.rrb.mark_in_progress(rr_id, rr_size or rr_size_suggested) LOGGER.debug('Issuer._set_rev_reg <<<')
Move precomputed revocation registry data from hopper into place within tails directory. :param rr_id: revocation registry identifier :param rr_size: revocation registry size, in case creation required
def do_state_tomography(preparation_program, nsamples, cxn, qubits=None, use_run=False): """ Method to perform both a QPU and QVM state tomography, and use the latter as as reference to calculate the fidelity of the former. :param Program preparation_program: Program to execute. :param int nsamples: Number of samples to take for the program. :param QVMConnection|QPUConnection cxn: Connection on which to run the program. :param list qubits: List of qubits for the program. to use in the tomography analysis. :param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run`` instead of ``cxn.run_and_measure``. :return: The state tomogram. :rtype: StateTomography """ return tomography._do_tomography(preparation_program, nsamples, cxn, qubits, tomography.MAX_QUBITS_STATE_TOMO, StateTomography, state_tomography_programs, DEFAULT_STATE_TOMO_SETTINGS, use_run=use_run)
Method to perform both a QPU and QVM state tomography, and use the latter as as reference to calculate the fidelity of the former. :param Program preparation_program: Program to execute. :param int nsamples: Number of samples to take for the program. :param QVMConnection|QPUConnection cxn: Connection on which to run the program. :param list qubits: List of qubits for the program. to use in the tomography analysis. :param bool use_run: If ``True``, use append measurements on all qubits and use ``cxn.run`` instead of ``cxn.run_and_measure``. :return: The state tomogram. :rtype: StateTomography
def fix_chrome_mac_platform(platform): """ Chrome on Mac OS adds minor version number and uses underscores instead of dots. E.g. platform for Firefox will be: 'Intel Mac OS X 10.11' but for Chrome it will be 'Intel Mac OS X 10_11_6'. :param platform: - string like "Macintosh; Intel Mac OS X 10.8" :return: platform with version number including minor number and formatted with underscores, e.g. "Macintosh; Intel Mac OS X 10_8_2" """ ver = platform.split('OS X ')[1] build_range = range(*MACOSX_CHROME_BUILD_RANGE[ver]) build = choice(build_range) mac_ver = ver.replace('.', '_') + '_' + str(build) return 'Macintosh; Intel Mac OS X %s' % mac_ver
Chrome on Mac OS adds minor version number and uses underscores instead of dots. E.g. platform for Firefox will be: 'Intel Mac OS X 10.11' but for Chrome it will be 'Intel Mac OS X 10_11_6'. :param platform: - string like "Macintosh; Intel Mac OS X 10.8" :return: platform with version number including minor number and formatted with underscores, e.g. "Macintosh; Intel Mac OS X 10_8_2"
def sub_template(template,template_tag,substitution): '''make a substitution for a template_tag in a template ''' template = template.replace(template_tag,substitution) return template
make a substitution for a template_tag in a template
def BuildCampaignOperations(batch_job_helper, budget_operations, number_of_campaigns=1): """Builds the operations needed to create a new Campaign. Note: When the Campaigns are created, they will have a different Id than those generated here as a temporary Id. This is just used to identify them in the BatchJobService. Args: batch_job_helper: a BatchJobHelper instance. budget_operations: a list containing the operation that will add the budget used by these Campaigns. number_of_campaigns: an int number defining the number of campaigns to be created. Returns: a list containing the operations to create the desired number of Campaigns. """ # Grab the temporary budgetId to associate with the new Campaigns. budget_id = budget_operations[0]['operand']['budgetId'] campaign_operations = [ { # The xsi_type of the operation can usually be guessed by the API # because a given service only handles one type of operation. # However, batch jobs process operations of different types, so # the xsi_type must always be explicitly defined for these # operations. 'xsi_type': 'CampaignOperation', 'operand': { 'name': 'Batch Campaign #%s' % uuid.uuid4(), # Recommendation: Set the campaign to PAUSED when creating it to # stop the ads from immediately serving. Set to ENABLED once # you've added targeting and the ads are ready to serve. 'status': 'PAUSED', # This is a temporary Id used by the BatchJobService to identify # the Campaigns for operations that require a campaignId. 'id': batch_job_helper.GetId(), 'advertisingChannelType': 'SEARCH', # Note that only the budgetId is required 'budget': { 'budgetId': budget_id }, 'biddingStrategyConfiguration': { 'biddingStrategyType': 'MANUAL_CPC' } }, 'operator': 'ADD' } for _ in range(number_of_campaigns)] return campaign_operations
Builds the operations needed to create a new Campaign. Note: When the Campaigns are created, they will have a different Id than those generated here as a temporary Id. This is just used to identify them in the BatchJobService. Args: batch_job_helper: a BatchJobHelper instance. budget_operations: a list containing the operation that will add the budget used by these Campaigns. number_of_campaigns: an int number defining the number of campaigns to be created. Returns: a list containing the operations to create the desired number of Campaigns.
def get_interface_detail_output_interface_if_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_interface_detail = ET.Element("get_interface_detail") config = get_interface_detail output = ET.SubElement(get_interface_detail, "output") interface = ET.SubElement(output, "interface") interface_type_key = ET.SubElement(interface, "interface-type") interface_type_key.text = kwargs.pop('interface_type') interface_name_key = ET.SubElement(interface, "interface-name") interface_name_key.text = kwargs.pop('interface_name') if_state = ET.SubElement(interface, "if-state") if_state.text = kwargs.pop('if_state') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def expand_path(path): """Expands directories and globs in given path.""" paths = [] path = os.path.expanduser(path) path = os.path.expandvars(path) if os.path.isdir(path): for (dir, dirs, files) in os.walk(path): for file in files: paths.append(os.path.join(dir, file)) else: paths.extend(glob(path)) return paths
Expands directories and globs in given path.
def render(self, *args, **kwargs): ''' Creates a <title> tag if not present and renders the DOCTYPE and tag tree. ''' r = [] #Validates the tag tree and adds the doctype if one was set if self.doctype: r.append(self.doctype) r.append('\n') r.append(super(document, self).render(*args, **kwargs)) return u''.join(r)
Creates a <title> tag if not present and renders the DOCTYPE and tag tree.
def callback(self, provider): """ Handles 3rd party callback and processes it's data """ provider = self.get_provider(provider) try: return provider.authorized_handler(self.login)(provider=provider) except OAuthException as ex: logging.error("Data: %s", ex.data) raise
Handles 3rd party callback and processes it's data
def get(self, sid): """ Constructs a SigningKeyContext :param sid: The sid :returns: twilio.rest.api.v2010.account.signing_key.SigningKeyContext :rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyContext """ return SigningKeyContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
Constructs a SigningKeyContext :param sid: The sid :returns: twilio.rest.api.v2010.account.signing_key.SigningKeyContext :rtype: twilio.rest.api.v2010.account.signing_key.SigningKeyContext
def extract_concepts(self, sentences=None, ids=None, composite_phrase=4, filename=None, file_format='sldi', allow_acronym_variants=False, word_sense_disambiguation=False, allow_large_n=False, strict_model=False, relaxed_model=False, allow_overmatches=False, allow_concept_gaps=False, term_processing=False, no_derivational_variants=False, derivational_variants=False, ignore_word_order=False, unique_acronym_variants=False, prefer_multiple_concepts=False, ignore_stop_phrases=False, compute_all_mappings=False, mm_data_version=False, exclude_sources=[], restrict_to_sources=[], restrict_to_sts=[], exclude_sts=[]): """ extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMap. Supported Options: Composite Phrase -Q Word Sense Disambiguation -y use strict model -A use relaxed model -C allow large N -l allow overmatches -o allow concept gaps -g term processing -z No Derivational Variants -d All Derivational Variants -D Ignore Word Order -i Allow Acronym Variants -a Unique Acronym Variants -u Prefer Multiple Concepts -Y Ignore Stop Phrases -K Compute All Mappings -b MM Data Version -V Exclude Sources -e Restrict to Sources -R Restrict to Semantic Types -J Exclude Semantic Types -k For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found. """ if allow_acronym_variants and unique_acronym_variants: raise ValueError("You can't use both allow_acronym_variants and " "unique_acronym_variants.") if (sentences is not None and filename is not None) or \ (sentences is None and filename is None): raise ValueError("You must either pass a list of sentences " "OR a filename.") if file_format not in ['sldi','sldiID']: raise ValueError("file_format must be either sldi or sldiID") input_file = None if sentences is not None: input_file = tempfile.NamedTemporaryFile(mode="wb", delete=False) else: input_file = open(filename, 'r') output_file = tempfile.NamedTemporaryFile(mode="r", delete=False) error = None try: if sentences is not None: if ids is not None: for identifier, sentence in zip(ids, sentences): input_file.write('{0!r}|{1!r}\n'.format(identifier, sentence).encode('utf8')) else: for sentence in sentences: input_file.write('{0!r}\n'.format(sentence).encode('utf8')) input_file.flush() command = [self.metamap_filename, '-N'] command.append('-Q') command.append(str(composite_phrase)) if mm_data_version is not False: if mm_data_version not in ['Base', 'USAbase', 'NLM']: raise ValueError("mm_data_version must be Base, USAbase, or NLM.") command.append('-V') command.append(str(mm_data_version)) if word_sense_disambiguation: command.append('-y') if strict_model: command.append('-A') if relaxed_model: command.append('-C') if allow_large_n: command.append('-l') if allow_overmatches: command.append('-o') if allow_concept_gaps: command.append('-g') if term_processing: command.append('-z') if no_derivational_variants: command.append('-d') if derivational_variants: command.append('-D') if ignore_word_order: command.append('-i') if allow_acronym_variants: command.append('-a') if unique_acronym_variants: command.append('-u') if prefer_multiple_concepts: command.append('-Y') if ignore_stop_phrases: command.append('-K') if compute_all_mappings: command.append('-b') if len(exclude_sources) > 0: command.append('-e') command.append(str(','.join(exclude_sources))) if len(restrict_to_sources) > 0: command.append('-R') command.append(str(','.join(restrict_to_sources))) if len(restrict_to_sts) > 0: command.append('-J') command.append(str(','.join(restrict_to_sts))) if len(exclude_sts) > 0: command.append('-k') command.append(str(','.join(exclude_sts))) if ids is not None or (file_format == 'sldiID' and sentences is None): command.append('--sldiID') else: command.append('--sldi') command.append(input_file.name) command.append(output_file.name) metamap_process = subprocess.Popen(command, stdout=subprocess.PIPE) while metamap_process.poll() is None: stdout = str(metamap_process.stdout.readline()) if 'ERROR' in stdout: metamap_process.terminate() error = stdout.rstrip() output = str(output_file.read()) finally: if sentences is not None: os.remove(input_file.name) else: input_file.close() os.remove(output_file.name) concepts = Corpus.load(output.splitlines()) return (concepts, error)
extract_concepts takes a list of sentences and ids(optional) then returns a list of Concept objects extracted via MetaMap. Supported Options: Composite Phrase -Q Word Sense Disambiguation -y use strict model -A use relaxed model -C allow large N -l allow overmatches -o allow concept gaps -g term processing -z No Derivational Variants -d All Derivational Variants -D Ignore Word Order -i Allow Acronym Variants -a Unique Acronym Variants -u Prefer Multiple Concepts -Y Ignore Stop Phrases -K Compute All Mappings -b MM Data Version -V Exclude Sources -e Restrict to Sources -R Restrict to Semantic Types -J Exclude Semantic Types -k For information about the available options visit http://metamap.nlm.nih.gov/. Note: If an error is encountered the process will be closed and whatever was processed, if anything, will be returned along with the error found.
def update_data(self): """This is a method that will be called every time a packet is opened from the roaster.""" time_elapsed = datetime.datetime.now() - self.start_time crntTemp = self.roaster.current_temp targetTemp = self.roaster.target_temp heaterLevel = self.roaster.heater_level # print( # "Time: %4.6f, crntTemp: %d, targetTemp: %d, heaterLevel: %d" % # (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel)) self.file.write( "%4.6f,%d,%d,%d\n" % (time_elapsed.total_seconds(), crntTemp, targetTemp, heaterLevel))
This is a method that will be called every time a packet is opened from the roaster.
def _as_published_topic(self): """This stream as a PublishedTopic if it is published otherwise None """ oop = self.get_operator_output_port() if not hasattr(oop, 'export'): return export = oop.export if export['type'] != 'properties': return seen_export_type = False topic = None for p in export['properties']: if p['type'] != 'rstring': continue if p['name'] == '__spl_exportType': if p['values'] == ['"topic"']: seen_export_type = True else: return if p['name'] == '__spl_topic': topic = p['values'][0] if seen_export_type and topic is not None: schema = None if hasattr(oop, 'tupleAttributes'): ta_url = oop.tupleAttributes ta_resp = self.rest_client.make_request(ta_url) schema = streamsx.topology.schema.StreamSchema(ta_resp['splType']) return PublishedTopic(topic[1:-1], schema) return
This stream as a PublishedTopic if it is published otherwise None
def _vmomentsurfaceIntegrand(vz,vR,vT,R,z,df,sigmaR1,gamma,sigmaz1,n,m,o): #pragma: no cover because this is too slow; a warning is shown """Internal function that is the integrand for the vmomentsurface mass integration""" return vR**n*vT**m*vz**o*df(R,vR*sigmaR1,vT*sigmaR1*gamma,z,vz*sigmaz1, use_physical=False)
Internal function that is the integrand for the vmomentsurface mass integration
def main(): """Measure capnp serialization performance of Random """ r = Random(42) # Measure serialization startSerializationTime = time.time() for i in xrange(_SERIALIZATION_LOOPS): # NOTE pycapnp's builder.from_dict (used in nupic.bindings) leaks # memory if called on the same builder more than once, so we construct a # fresh builder here builderProto = RandomProto.new_message() r.write(builderProto) elapsedSerializationTime = time.time() - startSerializationTime builderBytes = builderProto.to_bytes() # Measure deserialization startDeserializationTime = time.time() deserializationCount = 0 while deserializationCount < _DESERIALIZATION_LOOPS: # NOTE: periodicaly create a new reader to avoid "Exceeded message traversal # limit" error readerProto = RandomProto.from_bytes( builderBytes, traversal_limit_in_words=_TRAVERSAL_LIMIT_IN_WORDS, nesting_limit=_NESTING_LIMIT) numReads = min(_DESERIALIZATION_LOOPS - deserializationCount, _MAX_DESERIALIZATION_LOOPS_PER_READER) for _ in xrange(numReads): r.read(readerProto) deserializationCount += numReads elapsedDeserializationTime = time.time() - startDeserializationTime # Print report print _SERIALIZATION_LOOPS, "Serialization loops in", \ elapsedSerializationTime, "seconds." print "\t", elapsedSerializationTime/_SERIALIZATION_LOOPS, "seconds per loop." print deserializationCount, "Deserialization loops in", \ elapsedDeserializationTime, "seconds." print "\t", elapsedDeserializationTime/deserializationCount, "seconds per loop."
Measure capnp serialization performance of Random
def delete(queue, items): ''' Delete an item or items from a queue ''' with _conn(commit=True) as cur: if isinstance(items, dict): cmd = str("""DELETE FROM {0} WHERE data = '{1}'""").format( # future lint: disable=blacklisted-function queue, salt.utils.json.dumps(items)) log.debug('SQL Query: %s', cmd) cur.execute(cmd) return True if isinstance(items, list): items = [(salt.utils.json.dumps(el),) for el in items] cmd = 'DELETE FROM {0} WHERE data = %s'.format(queue) log.debug('SQL Query: %s', cmd) cur.executemany(cmd, items) return True
Delete an item or items from a queue
def load_config(): """Load configuration file containing API KEY and other settings. :rtype: str """ configfile = get_configfile() if not os.path.exists(configfile): data = { 'apikey': 'GET KEY AT: https://www.filemail.com/apidoc/ApiKey.aspx' } save_config(data) with open(configfile, 'rb') as f: return json.load(f)
Load configuration file containing API KEY and other settings. :rtype: str
def insertFromMimeData(self, source): """ Inserts the information from the inputed source. :param source | <QMimeData> """ lines = projex.text.nativestring(source.text()).splitlines() for i in range(1, len(lines)): if not lines[i].startswith('... '): lines[i] = '... ' + lines[i] if len(lines) > 1: lines.append('... ') self.insertPlainText('\n'.join(lines))
Inserts the information from the inputed source. :param source | <QMimeData>
def _get_mapping_for_table(self, table): """ Returns the first mapping for a table name """ for mapping in self.mappings.values(): if mapping["table"] == table: return mapping
Returns the first mapping for a table name
def __focus(self, item): """Called when focus item has changed""" cols = self.__get_display_columns() for col in cols: self.__event_info =(col,item) self.event_generate('<<TreeviewInplaceEdit>>') if col in self._inplace_widgets: w = self._inplace_widgets[col] w.bind('<Key-Tab>', lambda e: w.tk_focusNext().focus_set()) w.bind('<Shift-Key-Tab>', lambda e: w.tk_focusPrev().focus_set())
Called when focus item has changed
def print_(*objects, **kwargs): """print_(*objects, sep=None, end=None, file=None, flush=False) Args: objects (object): zero or more objects to print sep (str): Object separator to use, defaults to ``" "`` end (str): Trailing string to use, defaults to ``"\\n"``. If end is ``"\\n"`` then `os.linesep` is used. file (object): A file-like object, defaults to `sys.stdout` flush (bool): If the file stream should be flushed Raises: EnvironmentError Like print(), but: * Supports printing filenames under Unix + Python 3 and Windows + Python 2 * Emulates ANSI escape sequence support under Windows * Never fails due to encoding/decoding errors. Tries hard to get everything on screen as is, but will fall back to "?" if all fails. This does not conflict with ``colorama``, but will not use it on Windows. """ sep = kwargs.get("sep") sep = sep if sep is not None else " " end = kwargs.get("end") end = end if end is not None else "\n" file = kwargs.get("file") file = file if file is not None else sys.stdout flush = bool(kwargs.get("flush", False)) if is_win: _print_windows(objects, sep, end, file, flush) else: _print_unix(objects, sep, end, file, flush)
print_(*objects, sep=None, end=None, file=None, flush=False) Args: objects (object): zero or more objects to print sep (str): Object separator to use, defaults to ``" "`` end (str): Trailing string to use, defaults to ``"\\n"``. If end is ``"\\n"`` then `os.linesep` is used. file (object): A file-like object, defaults to `sys.stdout` flush (bool): If the file stream should be flushed Raises: EnvironmentError Like print(), but: * Supports printing filenames under Unix + Python 3 and Windows + Python 2 * Emulates ANSI escape sequence support under Windows * Never fails due to encoding/decoding errors. Tries hard to get everything on screen as is, but will fall back to "?" if all fails. This does not conflict with ``colorama``, but will not use it on Windows.
def _merge_outfile_fname(out_file, bam_files, work_dir, batch): """Derive correct name of BAM file based on batching. """ if out_file is None: out_file = os.path.join(work_dir, os.path.basename(sorted(bam_files)[0])) if batch is not None: base, ext = os.path.splitext(out_file) out_file = "%s-b%s%s" % (base, batch, ext) return out_file
Derive correct name of BAM file based on batching.
def extractValue(self, model, item): """ Get the path referenced by this column's attribute. @param model: Either a TabularDataModel or a ScrollableView, depending on what this column is part of. @param item: A port item instance (as defined by L{xmantissa.port}). @rtype: C{unicode} """ certPath = super(CertificateColumn, self).extractValue(model, item) return certPath.path.decode('utf-8', 'replace')
Get the path referenced by this column's attribute. @param model: Either a TabularDataModel or a ScrollableView, depending on what this column is part of. @param item: A port item instance (as defined by L{xmantissa.port}). @rtype: C{unicode}
def Refresh(): """looks up symbols within the inferior and caches their names / values. If debugging information is only partial, this method does its best to find as much information as it can, validation can be done using IsSymbolFileSane. """ try: GdbCache.DICT = gdb.lookup_type('PyDictObject').pointer() GdbCache.TYPE = gdb.lookup_type('PyTypeObject').pointer() except gdb.error as err: # The symbol file we're using doesn't seem to provide type information. pass interp_head_name = GdbCache.FuzzySymbolLookup('interp_head') if interp_head_name: GdbCache.INTERP_HEAD = gdb.parse_and_eval(interp_head_name) else: # As a last resort, ask the inferior about it. GdbCache.INTERP_HEAD = gdb.parse_and_eval('PyInterpreterState_Head()') GdbCache.PENDINGBUSY = GdbCache.FuzzySymbolLookup('pendingbusy') GdbCache.PENDINGCALLS_TO_DO = GdbCache.FuzzySymbolLookup('pendingcalls_to_do')
looks up symbols within the inferior and caches their names / values. If debugging information is only partial, this method does its best to find as much information as it can, validation can be done using IsSymbolFileSane.
def __mark(self, element, mark_set): """ Marks an element :param element: The element to mark :param mark_set: The set corresponding to the mark :return: True if the element was known """ try: # The given element can be of a different type than the original # one (JID instead of str, ...), so we retrieve the original one original = self.__elements.pop(element) mark_set.add(original) except KeyError: return False else: if not self.__elements: # No more elements to wait for self.__call() return True
Marks an element :param element: The element to mark :param mark_set: The set corresponding to the mark :return: True if the element was known
def data_transforms_mnist(args, mnist_mean=None, mnist_std=None): """ data_transforms for mnist dataset """ if mnist_mean is None: mnist_mean = [0.5] if mnist_std is None: mnist_std = [0.5] train_transform = transforms.Compose( [ transforms.RandomCrop(28, padding=4), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std), ] ) if args.cutout: train_transform.transforms.append(Cutout(args.cutout_length)) valid_transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(mnist_mean, mnist_std)] ) return train_transform, valid_transform
data_transforms for mnist dataset
def sign_statement(self, statement, node_name, key_file, node_id, id_attr): """ Sign an XML statement. :param statement: The statement to be signed :param node_name: string like 'urn:oasis:names:...:Assertion' :param key_file: The file where the key can be found :param node_id: :param id_attr: The attribute name for the identifier, normally one of 'id','Id' or 'ID' :return: The signed statement """ if isinstance(statement, SamlBase): statement = str(statement) _, fil = make_temp( statement, suffix='.xml', decode=False, delete=self._xmlsec_delete_tmpfiles, ) com_list = [ self.xmlsec, '--sign', '--privkey-pem', key_file, '--id-attr:{id_attr_name}'.format(id_attr_name=id_attr), node_name, ] if node_id: com_list.extend(['--node-id', node_id]) try: (stdout, stderr, output) = self._run_xmlsec(com_list, [fil]) except XmlsecError as e: raise SignatureError(com_list) # this does not work if --store-signatures is used if output: return output.decode("utf-8") if stdout: return stdout.decode("utf-8") raise SignatureError(stderr)
Sign an XML statement. :param statement: The statement to be signed :param node_name: string like 'urn:oasis:names:...:Assertion' :param key_file: The file where the key can be found :param node_id: :param id_attr: The attribute name for the identifier, normally one of 'id','Id' or 'ID' :return: The signed statement
def get_core_name_without_suffix(file_path): # type: (AnyStr) -> AnyStr """Return core file name without suffix. Examples: >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/1990.01.30/test.01.tif') 'test.01' >>> FileClass.get_core_name_without_suffix(r'C:\zhulj\igsnrr\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:\\zhulj\\igsnrr\\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:/zhulj/igsnrr/lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/dta/taudem/aread8') 'aread8' >>> FileClass.get_core_name_without_suffix('singlename') 'singlename' >>> FileClass.get_core_name_without_suffix('singlename.txt') 'singlename' """ if '\\' in file_path: file_path = file_path.replace('\\', '/') file_name = os.path.basename(file_path) core_names = file_name.split('.') if len(core_names) > 1: core_names = core_names[:-1] if isinstance(core_names, list): return str('.'.join(core_names)) else: return str(core_names)
Return core file name without suffix. Examples: >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/1990.01.30/test.01.tif') 'test.01' >>> FileClass.get_core_name_without_suffix(r'C:\zhulj\igsnrr\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:\\zhulj\\igsnrr\\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:/zhulj/igsnrr/lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/dta/taudem/aread8') 'aread8' >>> FileClass.get_core_name_without_suffix('singlename') 'singlename' >>> FileClass.get_core_name_without_suffix('singlename.txt') 'singlename'
def concatenate(ctx, *text): """ Joins text strings into one text string """ result = '' for arg in text: result += conversions.to_string(arg, ctx) return result
Joins text strings into one text string
def encode_string(data, encoding='hex'): ''' Encode string :param data: string to encode :param encoding: encoding to use (default: 'hex') :return: encoded string ''' if six.PY2: return data.encode(encoding) else: if isinstance(data, str): data = bytes(data, 'utf-8') return codecs.encode(data, encoding).decode('ascii')
Encode string :param data: string to encode :param encoding: encoding to use (default: 'hex') :return: encoded string
def find(self, nameFilter=None, typeFilter=None, bindingModeFilter=None, boundFilter=None): """ Gets the list of services that the Watson IoT Platform can connect to. The list can include a mixture of services that are either bound or unbound. Parameters: - nameFilter(string) - Filter the results by the specified name - typeFilter(string) - Filter the results by the specified type, Available values : cloudant, eventstreams - bindingModeFilter(string) - Filter the results by the specified binding mode, Available values : automatic, manual - boundFilter(boolean) - Filter the results by the bound flag Throws APIException on failure. """ queryParms = {} if nameFilter: queryParms["name"] = nameFilter if typeFilter: queryParms["type"] = typeFilter if bindingModeFilter: queryParms["bindingMode"] = bindingModeFilter if boundFilter: queryParms["bound"] = boundFilter return IterableServiceBindingsList(self._apiClient, filters=queryParms)
Gets the list of services that the Watson IoT Platform can connect to. The list can include a mixture of services that are either bound or unbound. Parameters: - nameFilter(string) - Filter the results by the specified name - typeFilter(string) - Filter the results by the specified type, Available values : cloudant, eventstreams - bindingModeFilter(string) - Filter the results by the specified binding mode, Available values : automatic, manual - boundFilter(boolean) - Filter the results by the bound flag Throws APIException on failure.
def create(cls, cash_register_id, tab_uuid, description, monetary_account_id=None, ean_code=None, avatar_attachment_uuid=None, tab_attachment=None, quantity=None, amount=None, custom_headers=None): """ Create a new TabItem for a given Tab. :type user_id: int :type monetary_account_id: int :type cash_register_id: int :type tab_uuid: str :param description: The TabItem's brief description. Can't be empty and must be no longer than 100 characters :type description: str :param ean_code: The TabItem's EAN code. :type ean_code: str :param avatar_attachment_uuid: An AttachmentPublic UUID that used as an avatar for the TabItem. :type avatar_attachment_uuid: str :param tab_attachment: A list of AttachmentTab attached to the TabItem. :type tab_attachment: list[int] :param quantity: The quantity of the TabItem. Formatted as a number containing up to 15 digits, up to 15 decimals and using a dot. :type quantity: str :param amount: The money amount of the TabItem. Will not change the value of the corresponding Tab. :type amount: object_.Amount :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_DESCRIPTION: description, cls.FIELD_EAN_CODE: ean_code, cls.FIELD_AVATAR_ATTACHMENT_UUID: avatar_attachment_uuid, cls.FIELD_TAB_ATTACHMENT: tab_attachment, cls.FIELD_QUANTITY: quantity, cls.FIELD_AMOUNT: amount } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id(), cls._determine_monetary_account_id( monetary_account_id), cash_register_id, tab_uuid) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseInt.cast_from_bunq_response( cls._process_for_id(response_raw) )
Create a new TabItem for a given Tab. :type user_id: int :type monetary_account_id: int :type cash_register_id: int :type tab_uuid: str :param description: The TabItem's brief description. Can't be empty and must be no longer than 100 characters :type description: str :param ean_code: The TabItem's EAN code. :type ean_code: str :param avatar_attachment_uuid: An AttachmentPublic UUID that used as an avatar for the TabItem. :type avatar_attachment_uuid: str :param tab_attachment: A list of AttachmentTab attached to the TabItem. :type tab_attachment: list[int] :param quantity: The quantity of the TabItem. Formatted as a number containing up to 15 digits, up to 15 decimals and using a dot. :type quantity: str :param amount: The money amount of the TabItem. Will not change the value of the corresponding Tab. :type amount: object_.Amount :type custom_headers: dict[str, str]|None :rtype: BunqResponseInt
def hazeDriver(): """ Process the command line arguments and run the appropriate haze subcommand. We want to be able to do git-style handoffs to subcommands where if we do `haze aws foo bar` and the executable haze-aws-foo exists, we'll call it with the argument bar. We deliberately don't do anything with the arguments other than hand them off to the haze subcommand. Subcommands are responsible for their own argument parsing. """ try: (command, args) = findSubCommand(sys.argv) # If we can't construct a subcommand from sys.argv, it'll still be able # to find this haze driver script, and re-running ourself isn't useful. if os.path.basename(command) == "haze": print "Could not find a subcommand for %s" % " ".join(sys.argv) sys.exit(1) except StandardError: print "Could not find a subcommand for %s" % " ".join(sys.argv) sys.exit(1) check_call([command] + args)
Process the command line arguments and run the appropriate haze subcommand. We want to be able to do git-style handoffs to subcommands where if we do `haze aws foo bar` and the executable haze-aws-foo exists, we'll call it with the argument bar. We deliberately don't do anything with the arguments other than hand them off to the haze subcommand. Subcommands are responsible for their own argument parsing.
def bump_version(version, bump='patch'): """patch: patch, minor, major""" try: parts = map(int, version.split('.')) except ValueError: fail('Current version is not numeric') if bump == 'patch': parts[2] += 1 elif bump == 'minor': parts[1] += 1 parts[2] = 0 elif bump == 'major': parts[0] +=1 parts[1] = 0 parts[2] = 0 return '.'.join(map(str, parts))
patch: patch, minor, major
def get_glibc_version(): """ Returns: Version as a pair of ints (major, minor) or None """ # TODO: Look into a nicer way to get the version try: out = subprocess.Popen(['ldd', '--version'], stdout=subprocess.PIPE).communicate()[0] except OSError: return match = re.search('([0-9]+)\.([0-9]+)\.?[0-9]*', out) try: return map(int, match.groups()) except AttributeError: return
Returns: Version as a pair of ints (major, minor) or None
def refresh(self, module=None): """Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used. """ module = module._mdl if module is not None else ffi.NULL lib.EnvRefreshAgenda(self._env, module)
Recompute the salience values of the Activations on the Agenda and then reorder the agenda. The Python equivalent of the CLIPS refresh-agenda command. If no Module is specified, the current one is used.
def commit_manually(using=None): """ Decorator that activates manual transaction control. It just disables automatic transaction control and doesn't do any commit/rollback of its own -- it's up to the user to call the commit and rollback functions themselves. """ def entering(using): enter_transaction_management(using=using) def exiting(exc_value, using): leave_transaction_management(using=using) return _transaction_func(entering, exiting, using)
Decorator that activates manual transaction control. It just disables automatic transaction control and doesn't do any commit/rollback of its own -- it's up to the user to call the commit and rollback functions themselves.
def UpdateStatus(self, is_complete): '''Mock method to update the transfer status. If is_complete is False, this marks the transfer is active; otherwise it marks the transfer as complete. It is an error to call this method after calling it with is_complete as True. In both cases, it updates the number of bytes transferred to be the current size of the transfer file (whose filename was emitted in the TransferCreated signal). ''' status = 'complete' if is_complete else 'active' transferred = os.path.getsize(self.props[TRANSFER_IFACE]['Filename']) self.props[TRANSFER_IFACE]['Status'] = status self.props[TRANSFER_IFACE]['Transferred'] = dbus.UInt64(transferred, variant_level=1) self.EmitSignal(dbus.PROPERTIES_IFACE, 'PropertiesChanged', 'sa{sv}as', [ TRANSFER_IFACE, { 'Status': dbus.String(status, variant_level=1), 'Transferred': dbus.UInt64(transferred, variant_level=1), }, [], ])
Mock method to update the transfer status. If is_complete is False, this marks the transfer is active; otherwise it marks the transfer as complete. It is an error to call this method after calling it with is_complete as True. In both cases, it updates the number of bytes transferred to be the current size of the transfer file (whose filename was emitted in the TransferCreated signal).
def alter_object(self, obj): """ Alters all the attributes in an individual object. If it returns False, the object will not be saved """ for attname, field, replacer in self.replacers: currentval = getattr(obj, attname) replacement = replacer(self, obj, field, currentval) setattr(obj, attname, replacement)
Alters all the attributes in an individual object. If it returns False, the object will not be saved
def check(self): """Add platform specific checks""" if not self.is_valid: raise PolyaxonDeploymentConfigError( 'Deployment type `{}` not supported'.format(self.deployment_type)) check = False if self.is_kubernetes: check = self.check_for_kubernetes() elif self.is_docker_compose: check = self.check_for_docker_compose() elif self.is_docker: check = self.check_for_docker() elif self.is_heroku: check = self.check_for_heroku() if not check: raise PolyaxonDeploymentConfigError( 'Deployment `{}` is not valid'.format(self.deployment_type))
Add platform specific checks
def invalidate(self, name): # type: (str) -> None """ Invalidates the given component :param name: Name of the component to invalidate :raise ValueError: Invalid component name """ with self.__instances_lock: try: stored_instance = self.__instances[name] except KeyError: raise ValueError( "Unknown component instance '{0}'".format(name) ) else: # Call back the component during the invalidation stored_instance.invalidate(True)
Invalidates the given component :param name: Name of the component to invalidate :raise ValueError: Invalid component name
def gather(self, cmd): """ Runs a command and returns rc,stdout,stderr as a tuple. If called while the `Dir` context manager is in effect, guarantees that the process is executed in that directory, even if it is no longer the current directory of the process (i.e. it is thread-safe). :param cmd: The command and arguments to execute :return: (rc,stdout,stderr) """ if not isinstance(cmd, list): cmd_list = shlex.split(cmd) else: cmd_list = cmd cwd = pushd.Dir.getcwd() cmd_info = '[cwd={}]: {}'.format(cwd, cmd_list) self.logger.debug("Executing:gather {}".format(cmd_info)) proc = subprocess.Popen( cmd_list, cwd=cwd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() rc = proc.returncode self.logger.debug( "Process {}: exited with: {}\nstdout>>{}<<\nstderr>>{}<<\n". format(cmd_info, rc, out, err)) return rc, out, err
Runs a command and returns rc,stdout,stderr as a tuple. If called while the `Dir` context manager is in effect, guarantees that the process is executed in that directory, even if it is no longer the current directory of the process (i.e. it is thread-safe). :param cmd: The command and arguments to execute :return: (rc,stdout,stderr)
def identifyApplication(self, unProcessId, pchAppKey): """ Identifies a running application. OpenVR can't always tell which process started in response to a URL. This function allows a URL handler (or the process itself) to identify the app key for the now running application. Passing a process ID of 0 identifies the calling process. The application must be one that's known to the system via a call to AddApplicationManifest. """ fn = self.function_table.identifyApplication result = fn(unProcessId, pchAppKey) return result
Identifies a running application. OpenVR can't always tell which process started in response to a URL. This function allows a URL handler (or the process itself) to identify the app key for the now running application. Passing a process ID of 0 identifies the calling process. The application must be one that's known to the system via a call to AddApplicationManifest.
def _create_filter(self): """Create a filter of all of the dependency products that we have selected.""" self._product_filter = {} for chip in itertools.chain(iter(self._family.targets(self._tile.short_name)), iter([self._family.platform_independent_target()])): for key, prods in chip.property('depends', {}).items(): name, _, _ = key.partition(',') for prod in prods: if prod not in self._product_filter: self._product_filter[prod] = set() self._product_filter[prod].add(name)
Create a filter of all of the dependency products that we have selected.
def Serialize(self, writer): """ Serialize full object. Args: writer (neo.IO.BinaryWriter): """ writer.WriteVarBytes(self.Script) writer.WriteVarBytes(self.ParameterList) writer.WriteByte(self.ReturnType)
Serialize full object. Args: writer (neo.IO.BinaryWriter):
def _generate_typevars(self): # type: () -> None """ Creates type variables that are used by the type signatures for _process_custom_annotations. """ self.emit("T = TypeVar('T', bound=bb.AnnotationType)") self.emit("U = TypeVar('U')") self.import_tracker._register_typing_import('TypeVar') self.emit()
Creates type variables that are used by the type signatures for _process_custom_annotations.
def run_samtools(align_bams, items, ref_file, assoc_files, region=None, out_file=None): """Detect SNPs and indels with samtools mpileup and bcftools. """ return shared_variantcall(_call_variants_samtools, "samtools", align_bams, ref_file, items, assoc_files, region, out_file)
Detect SNPs and indels with samtools mpileup and bcftools.
def project_decrease_permissions(object_id, input_params={}, always_retry=True, **kwargs): """ Invokes the /project-xxxx/decreasePermissions API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2FdecreasePermissions """ return DXHTTPRequest('/%s/decreasePermissions' % object_id, input_params, always_retry=always_retry, **kwargs)
Invokes the /project-xxxx/decreasePermissions API method. For more info, see: https://wiki.dnanexus.com/API-Specification-v1.0.0/Project-Permissions-and-Sharing#API-method%3A-%2Fproject-xxxx%2FdecreasePermissions
def _add_reference(self, obj, ident=0): """ Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level """ log_debug( "## New reference handle 0x{0:X}: {1} -> {2}".format( len(self.references) + self.BASE_REFERENCE_IDX, type(obj).__name__, repr(obj), ), ident, ) self.references.append(obj)
Adds a read reference to the marshaler storage :param obj: Reference to add :param ident: Log indentation level
def _process_message(self, message: amqp.Message) -> None: """Processes the message received from the queue.""" if self.shutdown_pending.is_set(): return try: if isinstance(message.body, bytes): message.body = message.body.decode() description = json.loads(message.body) except Exception: logger.error("Cannot decode message. Dropping. Message: %r", message.body) traceback.print_exc() message.channel.basic_reject(message.delivery_tag, requeue=False) else: logger.info("Processing task: %r", description) self._process_description(message, description)
Processes the message received from the queue.
def change_wavelength(self, wavelength): ''' Changes the wavelength of the structure. This will affect the mode solver and potentially the refractive indices used (provided functions were provided as refractive indices). Args: wavelength (float): The new wavelength. ''' for name, slab in self.slabs.items(): const_args = slab._const_args mat_args = slab._mat_params const_args[8] = wavelength s = Slab(*const_args) for mat_arg in mat_args: s.add_material(*mat_arg) self.slabs[name] = s self._wl = wavelength
Changes the wavelength of the structure. This will affect the mode solver and potentially the refractive indices used (provided functions were provided as refractive indices). Args: wavelength (float): The new wavelength.
def set_perms(path, grant_perms=None, deny_perms=None, inheritance=True, reset=False): ''' Set permissions for the given path Args: path (str): The full path to the directory. grant_perms (dict): A dictionary containing the user/group and the basic permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also set the ``applies_to`` setting here. The default for ``applise_to`` is ``this_folder_subfolders_files``. Specify another ``applies_to`` setting like this: .. code-block:: yaml {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} To set advanced permissions use a list for the ``perms`` parameter, ie: .. code-block:: yaml {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} To see a list of available attributes and applies to settings see the documentation for salt.utils.win_dacl. A value of ``None`` will make no changes to the ``grant`` portion of the DACL. Default is ``None``. deny_perms (dict): A dictionary containing the user/group and permissions to deny along with the ``applies_to`` setting. Use the same format used for the ``grant_perms`` parameter. Remember, deny permissions supersede grant permissions. A value of ``None`` will make no changes to the ``deny`` portion of the DACL. Default is ``None``. inheritance (bool): If ``True`` the object will inherit permissions from the parent, if ``False``, inheritance will be disabled. Inheritance setting will not apply to parent directories if they must be created. Default is ``False``. reset (bool): If ``True`` the existing DCL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. .. versionadded:: 2018.3.0 Returns: bool: True if successful Raises: CommandExecutionError: If unsuccessful CLI Example: .. code-block:: bash # To grant the 'Users' group 'read & execute' permissions. salt '*' file.set_perms C:\\Temp\\ "{'Users': {'perms': 'read_execute'}}" # Locally using salt call salt-call file.set_perms C:\\Temp\\ "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}" # Specify advanced attributes with a list salt '*' file.set_perms C:\\Temp\\ "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}" ''' return __utils__['dacl.set_perms'](obj_name=path, obj_type='file', grant_perms=grant_perms, deny_perms=deny_perms, inheritance=inheritance, reset=reset)
Set permissions for the given path Args: path (str): The full path to the directory. grant_perms (dict): A dictionary containing the user/group and the basic permissions to grant, ie: ``{'user': {'perms': 'basic_permission'}}``. You can also set the ``applies_to`` setting here. The default for ``applise_to`` is ``this_folder_subfolders_files``. Specify another ``applies_to`` setting like this: .. code-block:: yaml {'user': {'perms': 'full_control', 'applies_to': 'this_folder'}} To set advanced permissions use a list for the ``perms`` parameter, ie: .. code-block:: yaml {'user': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder'}} To see a list of available attributes and applies to settings see the documentation for salt.utils.win_dacl. A value of ``None`` will make no changes to the ``grant`` portion of the DACL. Default is ``None``. deny_perms (dict): A dictionary containing the user/group and permissions to deny along with the ``applies_to`` setting. Use the same format used for the ``grant_perms`` parameter. Remember, deny permissions supersede grant permissions. A value of ``None`` will make no changes to the ``deny`` portion of the DACL. Default is ``None``. inheritance (bool): If ``True`` the object will inherit permissions from the parent, if ``False``, inheritance will be disabled. Inheritance setting will not apply to parent directories if they must be created. Default is ``False``. reset (bool): If ``True`` the existing DCL will be cleared and replaced with the settings defined in this function. If ``False``, new entries will be appended to the existing DACL. Default is ``False``. .. versionadded:: 2018.3.0 Returns: bool: True if successful Raises: CommandExecutionError: If unsuccessful CLI Example: .. code-block:: bash # To grant the 'Users' group 'read & execute' permissions. salt '*' file.set_perms C:\\Temp\\ "{'Users': {'perms': 'read_execute'}}" # Locally using salt call salt-call file.set_perms C:\\Temp\\ "{'Users': {'perms': 'read_execute', 'applies_to': 'this_folder_only'}}" # Specify advanced attributes with a list salt '*' file.set_perms C:\\Temp\\ "{'jsnuffy': {'perms': ['read_attributes', 'read_ea'], 'applies_to': 'this_folder_only'}}"
def group_by_month(self): """Return a dictionary of this collection's values grouped by each month. Key values are between 1-12. """ data_by_month = OrderedDict() for d in xrange(1, 13): data_by_month[d] = [] for v, dt in zip(self._values, self.datetimes): data_by_month[dt.month].append(v) return data_by_month
Return a dictionary of this collection's values grouped by each month. Key values are between 1-12.
def require_iterable_of(objs, types, name=None, type_name=None, truncate_at=80): """ Raise an exception if objs is not an iterable with each element an instance of one of the specified types. See `require_instance` for descriptions of the other parameters. """ # Fast pass for common case where all types are correct. # This avoids the more expensive loop below. A typical speedup from this # optimization is 6.6 sec -> 1.7 sec, for testing a list of size 10,000,000. try: if all(isinstance(obj, types) for obj in objs): return except TypeError: # We don't require that objs is a list in this function, just that it's # iterable. We specify 'list' below as a convenient way to throw the # desired error. require_instance(objs, list, name, "iterable", truncate_at) # Some type isn't correct. We reuse the require_instance function to raise # the exception. prefix = ("%s: " % name) if name else "" for (i, obj) in enumerate(objs): element_name = prefix + ("element at index %d" % i) require_instance(obj, types, element_name, type_name, truncate_at) assert False, "Shouldn't reach here."
Raise an exception if objs is not an iterable with each element an instance of one of the specified types. See `require_instance` for descriptions of the other parameters.
def delete_attachment(self, attachment): """Delete attachment from the AR or Analysis The attachment will be only deleted if it is not further referenced by another AR/Analysis. """ # Get the holding parent of this attachment parent = None if attachment.getLinkedRequests(): # Holding parent is an AR parent = attachment.getRequest() elif attachment.getLinkedAnalyses(): # Holding parent is an Analysis parent = attachment.getAnalysis() if parent is None: logger.warn( "Attachment {} is nowhere assigned. This should never happen!" .format(repr(attachment))) return False # Get the other attachments of the holding parent attachments = parent.getAttachment() # New attachments to set if attachment in attachments: attachments.remove(attachment) # Set the attachments w/o the current attachments parent.setAttachment(attachments) retain = False # Attachment is referenced by another Analysis if attachment.getLinkedAnalyses(): holder = attachment.getAnalysis() logger.info("Attachment {} referenced by {} -> RETAIN" .format(repr(attachment), repr(holder))) retain = True # Attachment is referenced by another AR if attachment.getLinkedRequests(): holder = attachment.getRequest() logger.info("Attachment {} referenced by {} -> RETAIN" .format(repr(attachment), repr(holder))) retain = True # Delete attachment finally if retain is False: client = api.get_parent(attachment) client.manage_delObjects([attachment.getId(), ])
Delete attachment from the AR or Analysis The attachment will be only deleted if it is not further referenced by another AR/Analysis.
def change_port_speed(self, instance_id, public, speed): """Allows you to change the port speed of a virtual server's NICs. Example:: #change the Public interface to 10Mbps on instance 12345 result = mgr.change_port_speed(instance_id=12345, public=True, speed=10) # result will be True or an Exception :param int instance_id: The ID of the VS :param bool public: Flag to indicate which interface to change. True (default) means the public interface. False indicates the private interface. :param int speed: The port speed to set. .. warning:: A port speed of 0 will disable the interface. """ if public: return self.client.call('Virtual_Guest', 'setPublicNetworkInterfaceSpeed', speed, id=instance_id) else: return self.client.call('Virtual_Guest', 'setPrivateNetworkInterfaceSpeed', speed, id=instance_id)
Allows you to change the port speed of a virtual server's NICs. Example:: #change the Public interface to 10Mbps on instance 12345 result = mgr.change_port_speed(instance_id=12345, public=True, speed=10) # result will be True or an Exception :param int instance_id: The ID of the VS :param bool public: Flag to indicate which interface to change. True (default) means the public interface. False indicates the private interface. :param int speed: The port speed to set. .. warning:: A port speed of 0 will disable the interface.
def _add_fold_decoration(self, block, region): """ Add fold decorations (boxes arround a folded block in the editor widget). """ deco = TextDecoration(block) deco.signals.clicked.connect(self._on_fold_deco_clicked) deco.tooltip = region.text(max_lines=25) deco.draw_order = 1 deco.block = block deco.select_line() deco.set_outline(drift_color( self._get_scope_highlight_color(), 110)) deco.set_background(self._get_scope_highlight_color()) deco.set_foreground(QtGui.QColor('#808080')) self._block_decos.append(deco) self.editor.decorations.append(deco)
Add fold decorations (boxes arround a folded block in the editor widget).
def zoom_to(self, zoomlevel, no_reset=False): """Set zoom level in a channel. This only changes the relevant settings; The image is not modified. Also see :meth:`scale_to`. .. note:: In addition to the given zoom level, other zoom settings are defined for the channel in preferences. Parameters ---------- zoomlevel : int The zoom level to zoom the image. Negative value to zoom out; positive to zoom in. no_reset : bool Do not reset ``autozoom`` setting. """ scale_x, scale_y = self.zoom.calc_scale(zoomlevel) self._scale_to(scale_x, scale_y, no_reset=no_reset)
Set zoom level in a channel. This only changes the relevant settings; The image is not modified. Also see :meth:`scale_to`. .. note:: In addition to the given zoom level, other zoom settings are defined for the channel in preferences. Parameters ---------- zoomlevel : int The zoom level to zoom the image. Negative value to zoom out; positive to zoom in. no_reset : bool Do not reset ``autozoom`` setting.
def import_device(self, directory): """Imports a device from the given directory. You export the device by using device.export() There are two special cases: user and meta devices. If the device name is meta, import_device will not do anything. If the device name is "user", import_device will overwrite the user device even if it exists already. """ # read the device's info with open(os.path.join(directory, "device.json"), "r") as f: ddata = json.load(f) d = self[ddata["name"]] dname = ddata["name"] del ddata["name"] if dname == "meta": return elif dname == "user": d.set(ddata) elif d.exists(): raise ValueError("The device " + d.name + " already exists") else: d.create(**ddata) # Now import all of the streams for name in os.listdir(directory): sdir = os.path.join(directory, name) if os.path.isdir(sdir): d.import_stream(sdir)
Imports a device from the given directory. You export the device by using device.export() There are two special cases: user and meta devices. If the device name is meta, import_device will not do anything. If the device name is "user", import_device will overwrite the user device even if it exists already.
def deployed_devices(self): """ :returns: Version deployed_devices of preview :rtype: twilio.rest.preview.deployed_devices.DeployedDevices """ if self._deployed_devices is None: self._deployed_devices = DeployedDevices(self) return self._deployed_devices
:returns: Version deployed_devices of preview :rtype: twilio.rest.preview.deployed_devices.DeployedDevices
def get_distribution(self, name): """ Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None`` """ result = None name = name.lower() if not self._cache_enabled: for dist in self._yield_distributions(): if dist.key == name: result = dist break else: self._generate_cache() if name in self._cache.name: result = self._cache.name[name][0] elif self._include_egg and name in self._cache_egg.name: result = self._cache_egg.name[name][0] return result
Looks for a named distribution on the path. This function only returns the first result found, as no more than one value is expected. If nothing is found, ``None`` is returned. :rtype: :class:`InstalledDistribution`, :class:`EggInfoDistribution` or ``None``
def get_root_modules(): """ Returns a list containing the names of all the modules available in the folders of the pythonpath. """ ip = get_ipython() if 'rootmodules' in ip.db: return ip.db['rootmodules'] t = time() store = False modules = list(sys.builtin_module_names) for path in sys.path: modules += module_list(path) if time() - t >= TIMEOUT_STORAGE and not store: store = True print("\nCaching the list of root modules, please wait!") print("(This will only be done once - type '%rehashx' to " "reset cache!)\n") sys.stdout.flush() if time() - t > TIMEOUT_GIVEUP: print("This is taking too long, we give up.\n") ip.db['rootmodules'] = [] return [] modules = set(modules) if '__init__' in modules: modules.remove('__init__') modules = list(modules) if store: ip.db['rootmodules'] = modules return modules
Returns a list containing the names of all the modules available in the folders of the pythonpath.
def match_criterion(self, tag): '''Override. Determine if a tag has the desired name and kind attribute value. Args: tag: A BeautifulSoup Tag. Returns: True if tag has the desired name and kind, otherwise False. ''' return tag.name == self.reference_tag_name and \ tag.attrs.get('kind', '') == self.reference_tag_kind
Override. Determine if a tag has the desired name and kind attribute value. Args: tag: A BeautifulSoup Tag. Returns: True if tag has the desired name and kind, otherwise False.
def dbRestore(self, db_value, context=None): """ Converts a stored database value to Python. :param py_value: <variant> :param context: <orb.Context> :return: <variant> """ if db_value is not None: return yaml.load(projex.text.nativestring(db_value)) else: return db_value
Converts a stored database value to Python. :param py_value: <variant> :param context: <orb.Context> :return: <variant>
def _append_value(self, v_values, next_value, v_idx=None, n_vals=1): """Update a list of parsed values with a new value.""" for _ in range(n_vals): if v_idx: try: v_i = next(v_idx) except StopIteration: # Repeating commas are null-statements and can be ignored # Otherwise, we warn the user that this is a bad namelist if next_value is not None: warnings.warn('f90nml: warning: Value {0} is not assigned to ' 'any variable and has been removed.' ''.format(next_value)) # There are more values than indices, so we stop here break v_s = [self.default_start_index if idx is None else idx for idx in v_idx.first] if not self.row_major: v_i = v_i[::-1] v_s = v_s[::-1] # Multidimensional arrays if not self.sparse_arrays: pad_array(v_values, list(zip(v_i, v_s))) # We iterate inside the v_values and inspect successively # deeper lists within the list tree. If the requested index is # missing, we re-size that particular entry. # (NOTE: This is unnecessary when sparse_arrays is disabled.) v_subval = v_values for (i_v, i_s) in zip(v_i[:-1], v_s[:-1]): try: v_subval = v_subval[i_v - i_s] except IndexError: size = len(v_subval) v_subval.extend([] for _ in range(size, i_v - i_s + 1)) v_subval = v_subval[i_v - i_s] # On the deepest level, we explicitly assign the value i_v, i_s = v_i[-1], v_s[-1] try: v_subval[i_v - i_s] = next_value except IndexError: size = len(v_subval) v_subval.extend(None for _ in range(size, i_v - i_s + 1)) v_subval[i_v - i_s] = next_value else: v_values.append(next_value)
Update a list of parsed values with a new value.
def _get_queue(self): """Gets the actual location of the queue, or None. """ if self._queue is None: self._links = [] queue, depth = self._resolve_queue(self.queue, links=self._links) if queue is None and depth > 0: raise QueueLinkBroken self._queue = queue return self._queue
Gets the actual location of the queue, or None.
def _expectation(p, rbf_kern, feat1, lin_kern, feat2, nghp=None): """ Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - K_lin_{.,.} :: RBF kernel - K_rbf_{.,.} :: Linear kernel Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint active_dims, in which case the joint expectations simplify into a product of expectations :return: NxM1xM2 """ if rbf_kern.on_separate_dims(lin_kern) and isinstance(p, DiagonalGaussian): # no joint expectations required eKxz1 = expectation(p, (rbf_kern, feat1)) eKxz2 = expectation(p, (lin_kern, feat2)) return eKxz1[:, :, None] * eKxz2[:, None, :] if feat1 != feat2: raise NotImplementedError("Features have to be the same for both kernels.") if rbf_kern.active_dims != lin_kern.active_dims: raise NotImplementedError("active_dims have to be the same for both kernels.") with params_as_tensors_for(rbf_kern, lin_kern, feat1, feat2): # use only active dimensions Xcov = rbf_kern._slice_cov(tf.matrix_diag(p.cov) if isinstance(p, DiagonalGaussian) else p.cov) Z, Xmu = rbf_kern._slice(feat1.Z, p.mu) N = tf.shape(Xmu)[0] D = tf.shape(Xmu)[1] lin_kern_variances = lin_kern.variance if lin_kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + lin_kern.variance rbf_kern_lengthscales = rbf_kern.lengthscales if rbf_kern.ARD \ else tf.zeros((D,), dtype=settings.float_type) + rbf_kern.lengthscales ## Begin RBF eKxz code: chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(rbf_kern_lengthscales ** 2) + Xcov) # NxDxD Z_transpose = tf.transpose(Z) all_diffs = Z_transpose - tf.expand_dims(Xmu, 2) # NxDxM exponent_mahalanobis = tf.matrix_triangular_solve(chol_L_plus_Xcov, all_diffs, lower=True) # NxDxM exponent_mahalanobis = tf.reduce_sum(tf.square(exponent_mahalanobis), 1) # NxM exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM sqrt_det_L = tf.reduce_prod(rbf_kern_lengthscales) sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1)) determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N eKxz_rbf = rbf_kern.variance * (determinants[:, None] * exponent_mahalanobis) ## NxM <- End RBF eKxz code tiled_Z = tf.tile(tf.expand_dims(Z_transpose, 0), (N, 1, 1)) # NxDxM z_L_inv_Xcov = tf.matmul(tiled_Z, Xcov / rbf_kern_lengthscales[:, None] ** 2., transpose_a=True) # NxMxD cross_eKzxKxz = tf.cholesky_solve( chol_L_plus_Xcov, (lin_kern_variances * rbf_kern_lengthscales ** 2.)[..., None] * tiled_Z) # NxDxM cross_eKzxKxz = tf.matmul((z_L_inv_Xcov + Xmu[:, None, :]) * eKxz_rbf[..., None], cross_eKzxKxz) # NxMxM return cross_eKzxKxz
Compute the expectation: expectation[n] = <Ka_{Z1, x_n} Kb_{x_n, Z2}>_p(x_n) - K_lin_{.,.} :: RBF kernel - K_rbf_{.,.} :: Linear kernel Different Z1 and Z2 are handled if p is diagonal and K_lin and K_rbf have disjoint active_dims, in which case the joint expectations simplify into a product of expectations :return: NxM1xM2
def getNameFromPath(filePath): """ Returns the filename of the specified path without its extensions. This is usually how we derive the default name for a given object. """ if len(filePath) == 0: raise ValueError("Cannot have empty path for name") fileName = os.path.split(os.path.normpath(filePath))[1] # We need to handle things like .fa.gz, so we can't use # os.path.splitext ret = fileName.split(".")[0] assert ret != "" return ret
Returns the filename of the specified path without its extensions. This is usually how we derive the default name for a given object.
def _execute(self, *args): """ Execute a given taskwarrior command with arguments Returns a 2-tuple of stdout and stderr (respectively). """ command = ( [ 'task', 'rc:%s' % self.config_filename, ] + self.get_configuration_override_args() + [six.text_type(arg) for arg in args] ) # subprocess is expecting bytestrings only, so nuke unicode if present # and remove control characters for i in range(len(command)): if isinstance(command[i], six.text_type): command[i] = ( taskw.utils.clean_ctrl_chars(command[i].encode('utf-8'))) try: proc = subprocess.Popen( command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, ) stdout, stderr = proc.communicate() except OSError as e: if e.errno == errno.ENOENT: raise OSError("Unable to find the 'task' command-line tool.") raise if proc.returncode != 0: raise TaskwarriorError(command, stderr, stdout, proc.returncode) # We should get bytes from the outside world. Turn those into unicode # as soon as we can. # Everything going into and coming out of taskwarrior *should* be # utf-8, but there are weird edge cases where something totally unusual # made it in.. so we need to be able to handle (or at least try to # handle) whatever. Kitchen tries its best. try: stdout = stdout.decode(self.config.get('encoding', 'utf-8')) except UnicodeDecodeError as e: stdout = kitchen.text.converters.to_unicode(stdout) try: stderr = stderr.decode(self.config.get('encoding', 'utf-8')) except UnicodeDecodeError as e: stderr = kitchen.text.converters.to_unicode(stderr) # strip any crazy terminal escape characters like bells, backspaces, # and form feeds for c in ('\a', '\b', '\f', ''): stdout = stdout.replace(c, '?') stderr = stderr.replace(c, '?') return stdout, stderr
Execute a given taskwarrior command with arguments Returns a 2-tuple of stdout and stderr (respectively).
def set_ylabel(self, s, delay_draw=False): "set plot ylabel" self.conf.relabel(ylabel=s, delay_draw=delay_draw)
set plot ylabel
def autoconfig_url_from_preferences(): """ Get the PAC ``AutoConfigURL`` value from the macOS System Preferences. This setting is visible as the "URL" field in System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration. :return: The value from the registry, or None if the value isn't configured or available. Note that it may be local filesystem path instead of a URL. :rtype: str|None :raises NotDarwinError: If called on a non-macOS/OSX platform. """ if not ON_DARWIN: raise NotDarwinError() try: config = SystemConfiguration.SCDynamicStoreCopyProxies(None) except AttributeError: return # Key or value not found. if all(('ProxyAutoConfigEnable' in config, 'ProxyAutoConfigURLString' in config, not config.get('ProxyAutoDiscoveryEnable', 0))): # Only return a value if it is enabled, not empty, and auto discovery is disabled. return str(config['ProxyAutoConfigURLString'])
Get the PAC ``AutoConfigURL`` value from the macOS System Preferences. This setting is visible as the "URL" field in System Preferences > Network > Advanced... > Proxies > Automatic Proxy Configuration. :return: The value from the registry, or None if the value isn't configured or available. Note that it may be local filesystem path instead of a URL. :rtype: str|None :raises NotDarwinError: If called on a non-macOS/OSX platform.
def getPositionFromState(pState): """Return the position of a particle given its state dict. Parameters: -------------------------------------------------------------- retval: dict() of particle position, keys are the variable names, values are their positions """ result = dict() for (varName, value) in pState['varStates'].iteritems(): result[varName] = value['position'] return result
Return the position of a particle given its state dict. Parameters: -------------------------------------------------------------- retval: dict() of particle position, keys are the variable names, values are their positions
def _normalised_numpy(self): """Normalised data points using numpy.""" dx = (self.screen.width / float(len(self.points))) oy = (self.screen.height) points = np.array(self.points) - self.minimum points = points * 4.0 / self.extents * self.size.y for x, y in enumerate(points): yield Point(( dx * x, min(oy, oy - y), ))
Normalised data points using numpy.
def instantiateSong(fileName): """Create an AudioSegment with the data from the given file""" ext = detectFormat(fileName) if(ext == "mp3"): return pd.AudioSegment.from_mp3(fileName) elif(ext == "wav"): return pd.AudioSegment.from_wav(fileName) elif(ext == "ogg"): return pd.AudioSegment.from_ogg(fileName) elif(ext == "flv"): return pd.AudioSegment.from_flv(fileName) elif(ext == "m4a"): return pd.AudioSegment.from_file(fileName, "mp4") else: return pd.AudioSegment.from_file(fileName, ext)
Create an AudioSegment with the data from the given file
def CheckFile(self, path): """Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions. """ print('Checking: {0:s}'.format(path)) definitions_registry = registry.DataTypeDefinitionsRegistry() definitions_reader = reader.YAMLDataTypeDefinitionsFileReader() result = False try: definitions_reader.ReadFile(definitions_registry, path) result = True except KeyError as exception: logging.warning(( 'Unable to register data type definition in file: {0:s} with ' 'error: {1:s}').format(path, exception)) except errors.FormatError as exception: logging.warning( 'Unable to validate file: {0:s} with error: {1:s}'.format( path, exception)) return result
Validates the definition in a file. Args: path (str): path of the definition file. Returns: bool: True if the file contains valid definitions.
def county_state_alerts(self, county, state): """Given a county and state, return alerts""" samecode = self.geo.lookup_samecode(county, state) return self.samecode_alerts(samecode)
Given a county and state, return alerts
def return_letters_from_string(text): """Get letters from string only.""" out = "" for letter in text: if letter.isalpha(): out += letter return out
Get letters from string only.
def cubic_lattice( a, b, c, spacing ): """ Generate a cubic lattice. Args: a (Int): Number of lattice repeat units along x. b (Int): Number of lattice repeat units along y. c (Int): Number of lattice repeat units along z. spacing (Float): Distance between lattice sites. Returns: (Lattice): The new lattice """ grid = np.array( list( range( 1, a * b * c + 1 ) ) ).reshape( a, b, c, order='F' ) it = np.nditer( grid, flags=[ 'multi_index' ] ) sites = [] while not it.finished: x, y, z = it.multi_index r = np.array( [ x, y, z ] ) * spacing neighbours = [ np.roll( grid, +1, axis=0 )[x,y,z], np.roll( grid, -1, axis=0 )[x,y,z], np.roll( grid, +1, axis=1 )[x,y,z], np.roll( grid, -1, axis=1 )[x,y,z], np.roll( grid, +1, axis=2 )[x,y,z], np.roll( grid, -1, axis=2 )[x,y,z] ] sites.append( lattice_site.Site( int( it[0] ), r, neighbours, 0.0, 'L' ) ) it.iternext() return lattice.Lattice( sites, cell_lengths = np.array( [ a, b, c ] ) * spacing )
Generate a cubic lattice. Args: a (Int): Number of lattice repeat units along x. b (Int): Number of lattice repeat units along y. c (Int): Number of lattice repeat units along z. spacing (Float): Distance between lattice sites. Returns: (Lattice): The new lattice
async def stream( self, version="1.1", keep_alive=False, keep_alive_timeout=None ): """Streams headers, runs the `streaming_fn` callback that writes content to the response body, then finalizes the response body. """ headers = self.get_headers( version, keep_alive=keep_alive, keep_alive_timeout=keep_alive_timeout, ) self.protocol.push_data(headers) await self.protocol.drain() await self.streaming_fn(self) self.protocol.push_data(b"0\r\n\r\n")
Streams headers, runs the `streaming_fn` callback that writes content to the response body, then finalizes the response body.
def _viewport_default(self): """ Trait initialiser """ viewport = Viewport(component=self.canvas, enable_zoom=True) viewport.tools.append(ViewportPanTool(viewport)) return viewport
Trait initialiser
def command(name): """Create a command, using the wrapped function as the handler. Args ---- name : str Name given to the created Command instance. Returns ------- Command A new instance of Command, with handler set to the wrapped function. """ # TODO(nick): It would be nice if this didn't transform the handler. That # way, handlers could be used and tested independently of this system. # Unfortunately that's one of the better properties of the previous # system that wasn't preserved in this rewrite. def wrapper(func): title, description = _parse_doc(func.__doc__) command = Command(name=name, title=title, description=description) command.add_handler(func) argparse_args_list = getattr(func, 'ARGPARSE_ARGS_LIST', []) for args, kwargs in argparse_args_list: command.add_argument_tuple((args, kwargs)) return command return wrapper
Create a command, using the wrapped function as the handler. Args ---- name : str Name given to the created Command instance. Returns ------- Command A new instance of Command, with handler set to the wrapped function.
def _port_action_vlan(self, port, segment, func, vni): """Verify configuration and then process event.""" # Verify segment. if not self._is_valid_segment(segment): return device_id = self._get_port_uuid(port) if nexus_help.is_baremetal(port): host_id = port.get('dns_name') else: host_id = port.get(bc.portbindings.HOST_ID) vlan_id = segment.get(api.SEGMENTATION_ID) is_provider = nxos_db.is_provider_vlan(vlan_id) settings = {"vlan_id": vlan_id, "device_id": device_id, "host_id": host_id} missing_fields = [field for field, value in settings.items() if (field != 'host_id' and not value)] if not missing_fields: func(port, vlan_id, device_id, host_id, vni, is_provider) else: raise excep.NexusMissingRequiredFields( fields=' '.join(missing_fields))
Verify configuration and then process event.
def channel_is_settled( self, participant1: Address, participant2: Address, block_identifier: BlockSpecification, channel_identifier: ChannelID, ) -> bool: """ Returns true if the channel is in a settled state, false otherwise. """ try: channel_state = self._get_channel_state( participant1=participant1, participant2=participant2, block_identifier=block_identifier, channel_identifier=channel_identifier, ) except RaidenRecoverableError: return False return channel_state >= ChannelState.SETTLED
Returns true if the channel is in a settled state, false otherwise.
def info(self, **kwargs): """ Get the basic information for an account. Call this method first, before calling other Account methods. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('info') kwargs.update({'session_id': self.session_id}) response = self._GET(path, kwargs) self.id = response['id'] self._set_attrs_to_values(response) return response
Get the basic information for an account. Call this method first, before calling other Account methods. Returns: A dict respresentation of the JSON returned from the API.
def command(cls, command, stdin=None, shell=False): """ Runs specified command. The command can be fed with data on stdin with parameter ``stdin``. The command can also be treated as a shell command with parameter ``shell``. Please refer to subprocess.Popen on how does this stuff work :returns: Run() instance with resulting data """ if not shell and isinstance(command, str): command = cls.shlex.split(command) collate_original = None try: collate_original = cls.os.environ['LC_ALL'] except KeyError: pass cls.os.environ['LC_ALL'] = "C" # Because of my czech locale, YUCK! :) try: process = cls.subprocess.Popen(command, stdout=cls.subprocess.PIPE, stderr=cls.subprocess.PIPE, stdin=cls.subprocess.PIPE, shell=shell) (stdout, stderr) = process.communicate(stdin) finally: if collate_original: cls.os.environ['LC_ALL'] = collate_original else: del cls.os.environ['LC_ALL'] return cls(stdout, stderr, stdin, process.returncode, command)
Runs specified command. The command can be fed with data on stdin with parameter ``stdin``. The command can also be treated as a shell command with parameter ``shell``. Please refer to subprocess.Popen on how does this stuff work :returns: Run() instance with resulting data
def set_memcached_backend(self, config): """ Select the most suitable Memcached backend based on the config and on what's installed """ # This is the preferred backend as it is the fastest and most fully # featured, so we use this by default config['BACKEND'] = 'django_pylibmc.memcached.PyLibMCCache' if is_importable(config['BACKEND']): return # Otherwise, binary connections can use this pure Python implementation if config.get('BINARY') and is_importable('django_bmemcached'): config['BACKEND'] = 'django_bmemcached.memcached.BMemcached' return # For text-based connections without any authentication we can fall # back to Django's core backends if the supporting libraries are # installed if not any([config.get(key) for key in ('BINARY', 'USERNAME', 'PASSWORD')]): if is_importable('pylibmc'): config['BACKEND'] = \ 'django.core.cache.backends.memcached.PyLibMCCache' elif is_importable('memcached'): config['BACKEND'] = \ 'django.core.cache.backends.memcached.MemcachedCache'
Select the most suitable Memcached backend based on the config and on what's installed
def view_umatrix(self, figsize=None, colormap=cm.Spectral_r, colorbar=False, bestmatches=False, bestmatchcolors=None, labels=None, zoom=None, filename=None): """Plot the U-matrix of the trained map. :param figsize: Optional parameter to specify the size of the figure. :type figsize: (int, int) :param colormap: Optional parameter to specify the color map to be used. :type colormap: matplotlib.colors.Colormap :param colorbar: Optional parameter to include a colormap as legend. :type colorbar: bool. :param bestmatches: Optional parameter to plot best matching units. :type bestmatches: bool. :param bestmatchcolors: Optional parameter to specify the color of each best matching unit. :type bestmatchcolors: list of int. :param labels: Optional parameter to specify the label of each point. :type labels: list of str. :param zoom: Optional parameter to zoom into a region on the map. The first two coordinates of the tuple are the row limits, the second tuple contains the column limits. :type zoom: ((int, int), (int, int)) :param filename: If specified, the plot will not be shown but saved to this file. :type filename: str. """ if self.umatrix is None: raise Exception("The U-matrix is not available. Either train a map" " or load a U-matrix from a file") return self._view_matrix(self.umatrix, figsize, colormap, colorbar, bestmatches, bestmatchcolors, labels, zoom, filename)
Plot the U-matrix of the trained map. :param figsize: Optional parameter to specify the size of the figure. :type figsize: (int, int) :param colormap: Optional parameter to specify the color map to be used. :type colormap: matplotlib.colors.Colormap :param colorbar: Optional parameter to include a colormap as legend. :type colorbar: bool. :param bestmatches: Optional parameter to plot best matching units. :type bestmatches: bool. :param bestmatchcolors: Optional parameter to specify the color of each best matching unit. :type bestmatchcolors: list of int. :param labels: Optional parameter to specify the label of each point. :type labels: list of str. :param zoom: Optional parameter to zoom into a region on the map. The first two coordinates of the tuple are the row limits, the second tuple contains the column limits. :type zoom: ((int, int), (int, int)) :param filename: If specified, the plot will not be shown but saved to this file. :type filename: str.