code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _build_action_bound_constraints_table(self): '''Builds the lower and upper action bound constraint expressions.''' self.action_lower_bound_constraints = {} self.action_upper_bound_constraints = {} for name, preconds in self.local_action_preconditions.items(): for precond in preconds: expr_type = precond.etype expr_args = precond.args bounds_expr = None if expr_type == ('aggregation', 'forall'): inner_expr = expr_args[1] if inner_expr.etype[0] == 'relational': bounds_expr = inner_expr elif expr_type[0] == 'relational': bounds_expr = precond if bounds_expr: # lower bound bound = self._extract_lower_bound(name, bounds_expr) if bound is not None: self.action_lower_bound_constraints[name] = bound else: # upper bound bound = self._extract_upper_bound(name, bounds_expr) if bound is not None: self.action_upper_bound_constraints[name] = bound
Builds the lower and upper action bound constraint expressions.
def insert(self, key, value, ttl=0, format=None, persist_to=0, replicate_to=0): """Store an object in Couchbase unless it already exists. Follows the same conventions as :meth:`upsert` but the value is stored only if it does not exist already. Conversely, the value is not stored if the key already exists. Notably missing from this method is the `cas` parameter, this is because `insert` will only succeed if a key does not already exist on the server (and thus can have no CAS) :raise: :exc:`.KeyExistsError` if the key already exists .. seealso:: :meth:`upsert`, :meth:`insert_multi` """ return _Base.insert(self, key, value, ttl=ttl, format=format, persist_to=persist_to, replicate_to=replicate_to)
Store an object in Couchbase unless it already exists. Follows the same conventions as :meth:`upsert` but the value is stored only if it does not exist already. Conversely, the value is not stored if the key already exists. Notably missing from this method is the `cas` parameter, this is because `insert` will only succeed if a key does not already exist on the server (and thus can have no CAS) :raise: :exc:`.KeyExistsError` if the key already exists .. seealso:: :meth:`upsert`, :meth:`insert_multi`
def cluster(self, method, **kwargs): """ Cluster the tribe. Cluster templates within a tribe: returns multiple tribes each of which could be stacked. :type method: str :param method: Method of stacking, see :mod:`eqcorrscan.utils.clustering` :return: List of tribes. .. rubric:: Example """ from eqcorrscan.utils import clustering tribes = [] func = getattr(clustering, method) if method in ['space_cluster', 'space_time_cluster']: cat = Catalog([t.event for t in self.templates]) groups = func(cat, **kwargs) for group in groups: new_tribe = Tribe() for event in group: new_tribe.templates.extend([t for t in self.templates if t.event == event]) tribes.append(new_tribe) return tribes
Cluster the tribe. Cluster templates within a tribe: returns multiple tribes each of which could be stacked. :type method: str :param method: Method of stacking, see :mod:`eqcorrscan.utils.clustering` :return: List of tribes. .. rubric:: Example
def get_box_files(self, box_key): '''Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts) ''' uri = '/'.join([self.api_uri, self.boxes_suffix, box_key, self.files_suffix ]) return self._req('get', uri)
Gets to file infos in a single box. Args: box_key key for the file return (status code, list of file info dicts)
def get_reply_visibility(self, status_dict): """Given a status dict, return the visibility that should be used. This behaves like Mastodon does by default. """ # Visibility rankings (higher is more limited) visibility = ("public", "unlisted", "private", "direct") default_visibility = visibility.index(self.default_visibility) status_visibility = visibility.index(status_dict["visibility"]) return visibility[max(default_visibility, status_visibility)]
Given a status dict, return the visibility that should be used. This behaves like Mastodon does by default.
def has_args(): ''' returns true if the decorator invocation had arguments passed to it before being sent a function to decorate ''' no_args_syntax = '@overload' args_syntax = no_args_syntax + '(' args, no_args = [(-1,-1)], [(-1,-1)] for i, line in enumerate(Overload.traceback_lines()): if args_syntax in line: args.append((i, line.find(args_syntax))) if no_args_syntax in line: no_args.append((i, line.find(no_args_syntax))) args, no_args = max(args), max(no_args) if sum(args)+sum(no_args) == -4: # couldnt find invocation return False return args >= no_args
returns true if the decorator invocation had arguments passed to it before being sent a function to decorate
def getoptS(X, Y, M_E, E): ''' Find Sopt given X, Y ''' n, r = X.shape C = np.dot(np.dot(X.T, M_E), Y) C = C.flatten() A = np.zeros((r * r, r * r)) for i in range(r): for j in range(r): ind = j * r + i temp = np.dot( np.dot(X.T, np.dot(X[:, i, None], Y[:, j, None].T) * E), Y) A[:, ind] = temp.flatten() S = np.linalg.solve(A, C) return np.reshape(S, (r, r)).T
Find Sopt given X, Y
def clear_plot(self): """Clear plot display.""" self.tab_plot.clear() self.tab_plot.draw() self.save_plot.set_enabled(False)
Clear plot display.
def get_score(self, fmap='', importance_type='weight'): """Get feature importance of each feature. Importance type can be defined as: * 'weight': the number of times a feature is used to split the data across all trees. * 'gain': the average gain across all splits the feature is used in. * 'cover': the average coverage across all splits the feature is used in. * 'total_gain': the total gain across all splits the feature is used in. * 'total_cover': the total coverage across all splits the feature is used in. .. note:: Feature importance is defined only for tree boosters Feature importance is only defined when the decision tree model is chosen as base learner (`booster=gbtree`). It is not defined for other base learner types, such as linear learners (`booster=gblinear`). Parameters ---------- fmap: str (optional) The name of feature map file. importance_type: str, default 'weight' One of the importance types defined above. """ if getattr(self, 'booster', None) is not None and self.booster not in {'gbtree', 'dart'}: raise ValueError('Feature importance is not defined for Booster type {}' .format(self.booster)) allowed_importance_types = ['weight', 'gain', 'cover', 'total_gain', 'total_cover'] if importance_type not in allowed_importance_types: msg = ("importance_type mismatch, got '{}', expected one of " + repr(allowed_importance_types)) raise ValueError(msg.format(importance_type)) # if it's weight, then omap stores the number of missing values if importance_type == 'weight': # do a simpler tree dump to save time trees = self.get_dump(fmap, with_stats=False) fmap = {} for tree in trees: for line in tree.split('\n'): # look for the opening square bracket arr = line.split('[') # if no opening bracket (leaf node), ignore this line if len(arr) == 1: continue # extract feature name from string between [] fid = arr[1].split(']')[0].split('<')[0] if fid not in fmap: # if the feature hasn't been seen yet fmap[fid] = 1 else: fmap[fid] += 1 return fmap average_over_splits = True if importance_type == 'total_gain': importance_type = 'gain' average_over_splits = False elif importance_type == 'total_cover': importance_type = 'cover' average_over_splits = False trees = self.get_dump(fmap, with_stats=True) importance_type += '=' fmap = {} gmap = {} for tree in trees: for line in tree.split('\n'): # look for the opening square bracket arr = line.split('[') # if no opening bracket (leaf node), ignore this line if len(arr) == 1: continue # look for the closing bracket, extract only info within that bracket fid = arr[1].split(']') # extract gain or cover from string after closing bracket g = float(fid[1].split(importance_type)[1].split(',')[0]) # extract feature name from string before closing bracket fid = fid[0].split('<')[0] if fid not in fmap: # if the feature hasn't been seen yet fmap[fid] = 1 gmap[fid] = g else: fmap[fid] += 1 gmap[fid] += g # calculate average value (gain/cover) for each feature if average_over_splits: for fid in gmap: gmap[fid] = gmap[fid] / fmap[fid] return gmap
Get feature importance of each feature. Importance type can be defined as: * 'weight': the number of times a feature is used to split the data across all trees. * 'gain': the average gain across all splits the feature is used in. * 'cover': the average coverage across all splits the feature is used in. * 'total_gain': the total gain across all splits the feature is used in. * 'total_cover': the total coverage across all splits the feature is used in. .. note:: Feature importance is defined only for tree boosters Feature importance is only defined when the decision tree model is chosen as base learner (`booster=gbtree`). It is not defined for other base learner types, such as linear learners (`booster=gblinear`). Parameters ---------- fmap: str (optional) The name of feature map file. importance_type: str, default 'weight' One of the importance types defined above.
def power_down(self): """ turn off the HX711 :return: always True :rtype bool """ GPIO.output(self._pd_sck, False) GPIO.output(self._pd_sck, True) time.sleep(0.01) return True
turn off the HX711 :return: always True :rtype bool
def qop(self): """Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.""" def on_update(header_set): if not header_set and 'qop' in self: del self['qop'] elif header_set: self['qop'] = header_set.to_header() return parse_set_header(self.get('qop'), on_update)
Indicates what "quality of protection" the client has applied to the message for HTTP digest auth.
def kmer_counter(seq, k=4): """Return a sequence of all the unique substrings (k-mer or q-gram) within a short (<128 symbol) string Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing. jellyfish is a C implementation of k-mer counting If seq is a string generate a sequence of k-mer string If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings If seq is a sequence of sequences of strings generate a sequence of sequence of generators ... Default k = 4 because that's the length of a gene base-pair? >>> kmer_counter('AGATAGATAGACACAGAAATGGGACCACAC') == Counter({'ACAC': 2, 'ATAG': 2, 'CACA': 2, ... 'TAGA': 2, 'AGAT': 2, 'GATA': 2, 'AGAC': 1, 'ACAG': 1, 'AGAA': 1, 'AAAT': 1, 'TGGG': 1, 'ATGG': 1, ... 'ACCA': 1, 'GGAC': 1, 'CCAC': 1, 'CAGA': 1, 'GAAA': 1, 'GGGA': 1, 'GACA': 1, 'GACC': 1, 'AATG': 1}) True """ if isinstance(seq, basestring): return Counter(generate_kmers(seq, k))
Return a sequence of all the unique substrings (k-mer or q-gram) within a short (<128 symbol) string Used for algorithms like UniqTag for genome unique identifier locality sensitive hashing. jellyfish is a C implementation of k-mer counting If seq is a string generate a sequence of k-mer string If seq is a sequence of strings then generate a sequence of generators or sequences of k-mer strings If seq is a sequence of sequences of strings generate a sequence of sequence of generators ... Default k = 4 because that's the length of a gene base-pair? >>> kmer_counter('AGATAGATAGACACAGAAATGGGACCACAC') == Counter({'ACAC': 2, 'ATAG': 2, 'CACA': 2, ... 'TAGA': 2, 'AGAT': 2, 'GATA': 2, 'AGAC': 1, 'ACAG': 1, 'AGAA': 1, 'AAAT': 1, 'TGGG': 1, 'ATGG': 1, ... 'ACCA': 1, 'GGAC': 1, 'CCAC': 1, 'CAGA': 1, 'GAAA': 1, 'GGGA': 1, 'GACA': 1, 'GACC': 1, 'AATG': 1}) True
def _allocate_address(self, instance, network_ids): """ Allocates a floating/public ip address to the given instance. :param instance: instance to assign address to :param list network_id: List of IDs (as strings) of networks where to request allocation the floating IP. :return: public ip address """ with OpenStackCloudProvider.__node_start_lock: try: # Use the `novaclient` API (works with python-novaclient <8.0.0) free_ips = [ip for ip in self.nova_client.floating_ips.list() if not ip.fixed_ip] if not free_ips: free_ips.append(self.nova_client.floating_ips.create()) except AttributeError: # Use the `neutronclient` API # # for some obscure reason, using `fixed_ip_address=None` in the # call to `list_floatingips()` returns *no* results (not even, # in fact, those with `fixed_ip_address: None`) whereas # `fixed_ip_address=''` acts as a wildcard and lists *all* the # addresses... so filter them out with a list comprehension free_ips = [ip for ip in self.neutron_client.list_floatingips(fixed_ip_address='')['floatingips'] if ip['fixed_ip_address'] is None] if not free_ips: # FIXME: OpenStack Network API v2 requires that we specify # a network ID along with the request for a floating IP. # However, ElastiCluster configuration allows for multiple # networks to be connected to a VM, but does not give any # hint as to which one(s) should be used for such requests. # So we try them all, ignoring errors until one request # succeeds and hope that it's the OK. One can imagine # scenarios where this is *not* correct, but: (1) these # scenarios are unlikely, and (2) the old novaclient code # above has not even had the concept of multiple networks # for floating IPs and no-one has complained in 5 years... allocated_ip = None for network_id in network_ids: log.debug( "Trying to allocate floating IP on network %s ...", network_id) try: allocated_ip = self.neutron_client.create_floatingip({ 'floatingip': {'floating_network_id':network_id}}) except BadNeutronRequest as err: log.debug( "Failed allocating floating IP on network %s: %s", network_id, err) if allocated_ip: free_ips.append(allocated_ip) break else: continue # try next network if free_ips: ip = free_ips.pop() else: raise RuntimeError( "Could not allocate floating IP for VM {0}" .format(vm.id)) instance.add_floating_ip(ip) return ip.ip
Allocates a floating/public ip address to the given instance. :param instance: instance to assign address to :param list network_id: List of IDs (as strings) of networks where to request allocation the floating IP. :return: public ip address
def DeleteDatabase(self, database_link, options=None): """Deletes a database. :param str database_link: The link to the database. :param dict options: The request options for the request. :return: The deleted Database. :rtype: dict """ if options is None: options = {} path = base.GetPathFromLink(database_link) database_id = base.GetResourceIdOrFullNameFromLink(database_link) return self.DeleteResource(path, 'dbs', database_id, None, options)
Deletes a database. :param str database_link: The link to the database. :param dict options: The request options for the request. :return: The deleted Database. :rtype: dict
def get_masters(ppgraph): """From a protein-peptide graph dictionary (keys proteins, values peptides), return master proteins aka those which have no proteins whose peptides are supersets of them. If shared master proteins are found, report only the first, we will sort the whole proteingroup later anyway. In that case, the master reported here may be temporary.""" masters = {} for protein, peps in ppgraph.items(): ismaster = True peps = set(peps) multimaster = set() for subprotein, subpeps in ppgraph.items(): if protein == subprotein: continue if peps.issubset(subpeps): if peps.union(subpeps) > peps: ismaster = False break elif peps.intersection(subpeps) == peps: multimaster.update({protein, subprotein}) if not ismaster: continue elif multimaster: premaster = sorted(list(multimaster))[0] else: premaster = protein for pep in peps: try: masters[pep].add(premaster) except KeyError: masters[pep] = {premaster} return masters
From a protein-peptide graph dictionary (keys proteins, values peptides), return master proteins aka those which have no proteins whose peptides are supersets of them. If shared master proteins are found, report only the first, we will sort the whole proteingroup later anyway. In that case, the master reported here may be temporary.
def construct_codons_dict(alphabet_file = None): """Generate the sub_codons_right dictionary of codon suffixes. syntax of custom alphabet_files: char: list,of,amino,acids,or,codons,separated,by,commas Parameters ---------- alphabet_file : str File name for a custom alphabet definition. If no file is provided, the default alphabet is used, i.e. standard amino acids, undetermined amino acids (B, J, X, and Z), and single codon symbols. Returns ------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol. """ #Some symbols can't be used in the CDR3 sequences in order to allow for #regular expression parsing and general manipulation. protected_symbols = [' ', '\t', '\n', '\x0b', '\x0c', '\r', ':', ',', ';', '[', ']', '{', '}', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] #construct list of all 64 codons codons = [i + j + k for i in 'ACGT' for j in 'ACGT' for k in 'ACGT'] codons_dict = {} #add standard amino acids symbols to the dict (i.e. 'ACDEFGHIKLMNPQRSTVWY*'). #these symbols CANNOT be overwritten by custom alphabet files for codon in codons: codons_dict[nt2aa(codon)] = codons_dict.get(nt2aa(codon), []) + [codon] #add single codon symbols to allow for inframe ntseq pgen computation #'\x80\x81\x82\x83\x84\x85\x86\x87\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f\x90\x91\x92\x93\x94\x95\x96\x97\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7\xa8\xa9\xaa\xab\xac\xad\xae\xaf\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf' #these symbols CANNOT be overwritten by custom alphabet files for codon in codons: codons_dict[nt2codon_rep(codon)] = [codon] #Check to see if custom alphabet file is supplied, else use default alphabet #Include standard ambigious amino acids. #these symbols CAN be overwritten by custom alphabet files expanded_alphabet = {} expanded_alphabet['B'] = ['D','N'] expanded_alphabet['J'] = ['I', 'L'] expanded_alphabet['X'] = ['A','C','D','E','F','G','H','I','K','L','M','N','P','Q','R','S','T','V','W','Y'] expanded_alphabet['Z'] = ['E', 'Q'] if alphabet_file is not None: #Use custom alphabet file definitions alphabet_f = open(alphabet_file, 'r') for line in alphabet_f: #assumed syntax is of a line is: #s: a1, a2, a3, a4, a5, ..., aN #where s is a single character symbol that isn't reserved, and all #of the a's are either amino acid symbols or codons. Whitespaces #will be stripped as will brackets if the a's are presented as a #list. c_symbol = line.split(':', 1)[0].strip(''.join(protected_symbols)) #Note there shouldn't be any additional colons -- this is a protected symbol. c_aa_codon_list_str = line.split(':', 1)[1] expanded_alphabet[c_symbol] = [x.strip(''.join(protected_symbols)) for x in c_aa_codon_list_str.split(',')] alphabet_f.close() for symbol in expanded_alphabet.keys(): #Double check that the symbol isn't already used (important particularly for the single codon representation) if symbol in codons_dict.keys(): print symbol + " is already used as an 'amino acid' symbol for codons: " print codons_dict[symbol] continue elif not len(symbol) == 1: #Check that the custom symbol is a single character print "Can't use " + symbol + " as a custom 'amino acid' definitions as such symbols must be single characters." continue elif symbol in protected_symbols: #This elif shouldn't trigger due to the stripping of protected symbols. print symbol + " is a protected character" current_codon_collection = set() for x in expanded_alphabet[symbol]: if x in codons_dict.keys(): #Check if reference to an amino acid or other amino acid symbol current_codon_collection = current_codon_collection.union(codons_dict[x]) #If so, add those codons to the new collection elif x.upper() in codons: #Check if specifying a single codon current_codon_collection.add(x.upper()) #If so, add the codon to the new collection elif len(x) == 0: #fully stripped away continue else: #If not, don't recognize the addition and continue. print 'Unfamiliar amino acid symbol or codon: ' + x continue codons_dict[symbol] = list(current_codon_collection) return codons_dict
Generate the sub_codons_right dictionary of codon suffixes. syntax of custom alphabet_files: char: list,of,amino,acids,or,codons,separated,by,commas Parameters ---------- alphabet_file : str File name for a custom alphabet definition. If no file is provided, the default alphabet is used, i.e. standard amino acids, undetermined amino acids (B, J, X, and Z), and single codon symbols. Returns ------- codons_dict : dict Dictionary, keyed by the allowed 'amino acid' symbols with the values being lists of codons corresponding to the symbol.
def singleton(*args, **kwargs): ''' a lazy init singleton pattern. usage: ``` py @singleton() class X: ... ``` `args` and `kwargs` will pass to ctor of `X` as args. ''' def decorator(cls: type) -> Callable[[], object]: if issubclass(type(cls), _SingletonMetaClassBase): raise TypeError('cannot inherit from another singleton class.') box = _Box() factory = None lock = Lock() def metaclass_call(_): if box.value is None: with lock: if box.value is None: instance = cls(*args, **kwargs) instance.__class__ = factory box.value = (instance, ) # use tuple to handle `cls()` return `None` return box.value[0] def _is_init(*_): return box.value is not None SingletonMetaClass = type('SingletonMetaClass', (type(cls), _SingletonMetaClassBase), { '__slots__': (), '__call__': metaclass_call }) factory = SingletonMetaClass(cls.__name__, (cls, ), { '__slots__': (), '_is_init': _is_init }) return update_wrapper(factory, cls, updated=()) return decorator
a lazy init singleton pattern. usage: ``` py @singleton() class X: ... ``` `args` and `kwargs` will pass to ctor of `X` as args.
def get_icloud_folder_location(): """ Try to locate the iCloud Drive folder. Returns: (str) Full path to the iCloud Drive folder. """ yosemite_icloud_path = '~/Library/Mobile Documents/com~apple~CloudDocs/' icloud_home = os.path.expanduser(yosemite_icloud_path) if not os.path.isdir(icloud_home): error('Unable to find your iCloud Drive =(') return str(icloud_home)
Try to locate the iCloud Drive folder. Returns: (str) Full path to the iCloud Drive folder.
def unassigned(data, as_json=False): """ https://sendgrid.com/docs/API_Reference/api_v3.html#ip-addresses The /ips rest endpoint returns information about the IP addresses and the usernames assigned to an IP unassigned returns a listing of the IP addresses that are allocated but have 0 users assigned data (response.body from sg.client.ips.get()) as_json False -> get list of dicts True -> get json object example: sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY')) params = { 'subuser': 'test_string', 'ip': 'test_string', 'limit': 1, 'exclude_whitelabels': 'true', 'offset': 1 } response = sg.client.ips.get(query_params=params) if response.status_code == 201: data = response.body unused = unassigned(data) """ no_subusers = set() if not isinstance(data, list): return format_ret(no_subusers, as_json=as_json) for current in data: num_subusers = len(current["subusers"]) if num_subusers == 0: current_ip = current["ip"] no_subusers.add(current_ip) ret_val = format_ret(no_subusers, as_json=as_json) return ret_val
https://sendgrid.com/docs/API_Reference/api_v3.html#ip-addresses The /ips rest endpoint returns information about the IP addresses and the usernames assigned to an IP unassigned returns a listing of the IP addresses that are allocated but have 0 users assigned data (response.body from sg.client.ips.get()) as_json False -> get list of dicts True -> get json object example: sg = sendgrid.SendGridAPIClient(os.environ.get('SENDGRID_API_KEY')) params = { 'subuser': 'test_string', 'ip': 'test_string', 'limit': 1, 'exclude_whitelabels': 'true', 'offset': 1 } response = sg.client.ips.get(query_params=params) if response.status_code == 201: data = response.body unused = unassigned(data)
def count_objects_by_tags(self, metric, scraper_config): """ Count objects by whitelisted tags and submit counts as gauges. """ config = self.object_count_params[metric.name] metric_name = "{}.{}".format(scraper_config['namespace'], config['metric_name']) object_counter = Counter() for sample in metric.samples: tags = [ self._label_to_tag(l, sample[self.SAMPLE_LABELS], scraper_config) for l in config['allowed_labels'] ] + scraper_config['custom_tags'] object_counter[tuple(sorted(tags))] += sample[self.SAMPLE_VALUE] for tags, count in iteritems(object_counter): self.gauge(metric_name, count, tags=list(tags))
Count objects by whitelisted tags and submit counts as gauges.
def _set_copy(self, v, load=False): """ Setter method for copy, mapped from YANG variable /copy (container) If this variable is read-only (config: false) in the source YANG file, then _set_copy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_copy() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=copy.copy, is_container='container', presence=False, yang_name="copy", rest_name="copy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RAS copy operation', u'action': u'support-interactive'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """copy must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=copy.copy, is_container='container', presence=False, yang_name="copy", rest_name="copy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'RAS copy operation', u'action': u'support-interactive'}}, namespace='urn:brocade.com:mgmt:brocade-ras', defining_module='brocade-ras', yang_type='container', is_config=True)""", }) self.__copy = t if hasattr(self, '_set'): self._set()
Setter method for copy, mapped from YANG variable /copy (container) If this variable is read-only (config: false) in the source YANG file, then _set_copy is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_copy() directly.
def has_subdirectories(path, include, exclude, show_all): """Return True if path has subdirectories""" try: # > 1 because of '..' return len( listdir(path, include, exclude, show_all, folders_only=True) ) > 1 except (IOError, OSError): return False
Return True if path has subdirectories
def rvs(self, size=1, param=None): """Gives a set of random values drawn from this distribution. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params. """ if param is not None: dtype = [(param, float)] else: dtype = [(p, float) for p in self.params] arr = numpy.zeros(size, dtype=dtype) for (p,_) in dtype: log_high = numpy.log10(self._bounds[p][0]) log_low = numpy.log10(self._bounds[p][1]) arr[p] = 10.0**(numpy.random.uniform(log_low, log_high, size=size)) return arr
Gives a set of random values drawn from this distribution. Parameters ---------- size : {1, int} The number of values to generate; default is 1. param : {None, string} If provided, will just return values for the given parameter. Otherwise, returns random values for each parameter. Returns ------- structured array The random values in a numpy structured array. If a param was specified, the array will only have an element corresponding to the given parameter. Otherwise, the array will have an element for each parameter in self's params.
def gifs_categories_category_get(self, api_key, category, **kwargs): """ Category Tags Endpoint. Returns a list of tags for a given category. NOTE `limit` and `offset` must both be set; otherwise they're ignored. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.gifs_categories_category_get(api_key, category, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str api_key: Giphy API Key. (required) :param str category: Filters results by category. (required) :param int limit: The maximum number of records to return. :param int offset: An optional results offset. Defaults to 0. :return: InlineResponse2004 If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('callback'): return self.gifs_categories_category_get_with_http_info(api_key, category, **kwargs) else: (data) = self.gifs_categories_category_get_with_http_info(api_key, category, **kwargs) return data
Category Tags Endpoint. Returns a list of tags for a given category. NOTE `limit` and `offset` must both be set; otherwise they're ignored. This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please define a `callback` function to be invoked when receiving the response. >>> def callback_function(response): >>> pprint(response) >>> >>> thread = api.gifs_categories_category_get(api_key, category, callback=callback_function) :param callback function: The callback function for asynchronous request. (optional) :param str api_key: Giphy API Key. (required) :param str category: Filters results by category. (required) :param int limit: The maximum number of records to return. :param int offset: An optional results offset. Defaults to 0. :return: InlineResponse2004 If the method is called asynchronously, returns the request thread.
def get_hash(self): """Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item. """ if self.__index_hash: return self.__index_hash key = self.request.method key += URLHelper.get_protocol(self.request.url) key += URLHelper.get_subdomain(self.request.url) key += URLHelper.get_hostname(self.request.url) key += URLHelper.get_tld(self.request.url) key += URLHelper.get_path(self.request.url) key += str(URLHelper.get_ordered_params(self.request.url)) if self.request.data is not None: key += str(self.request.data.keys()) self.__index_hash = key return self.__index_hash
Generate and return the dict index hash of the given queue item. Note: Cookies should not be included in the hash calculation because otherwise requests are crawled multiple times with e.g. different session keys, causing infinite crawling recursion. Note: At this moment the keys do not actually get hashed since it works perfectly without and since hashing the keys requires us to built hash collision management. Returns: str: The hash of the given queue item.
def page_strip(page, versioned): """Remove bits in content results to minimize memory utilization. TODO: evolve this to a key filter on metadata, like date """ # page strip filtering should be conditional page.pop('ResponseMetadata', None) contents_key = versioned and 'Versions' or 'Contents' contents = page.get(contents_key, ()) # aggressive size if versioned: keys = [] for k in contents: if k['IsLatest']: keys.append((k['Key'], k['VersionId'], True)) else: keys.append((k['Key'], k['VersionId'])) return keys else: return [k['Key'] for k in contents] if not contents: return page # Depending on use case we may want these for k in contents: k.pop('Owner', None) k.pop('LastModified', None) k.pop('ETag', None) k.pop('StorageClass', None) k.pop('Size', None) return page
Remove bits in content results to minimize memory utilization. TODO: evolve this to a key filter on metadata, like date
def _wrapinstance(ptr, base=None): """Enable implicit cast of pointer to most suitable class This behaviour is available in sip per default. Based on http://nathanhorne.com/pyqtpyside-wrap-instance Usage: This mechanism kicks in under these circumstances. 1. Qt.py is using PySide 1 or 2. 2. A `base` argument is not provided. See :func:`QtCompat.wrapInstance()` Arguments: ptr (long): Pointer to QObject in memory base (QObject, optional): Base class to wrap with. Defaults to QObject, which should handle anything. """ assert isinstance(ptr, long), "Argument 'ptr' must be of type <long>" assert (base is None) or issubclass(base, Qt.QtCore.QObject), ( "Argument 'base' must be of type <QObject>") if Qt.IsPyQt4 or Qt.IsPyQt5: func = getattr(Qt, "_sip").wrapinstance elif Qt.IsPySide2: func = getattr(Qt, "_shiboken2").wrapInstance elif Qt.IsPySide: func = getattr(Qt, "_shiboken").wrapInstance else: raise AttributeError("'module' has no attribute 'wrapInstance'") if base is None: q_object = func(long(ptr), Qt.QtCore.QObject) meta_object = q_object.metaObject() class_name = meta_object.className() super_class_name = meta_object.superClass().className() if hasattr(Qt.QtWidgets, class_name): base = getattr(Qt.QtWidgets, class_name) elif hasattr(Qt.QtWidgets, super_class_name): base = getattr(Qt.QtWidgets, super_class_name) else: base = Qt.QtCore.QObject return func(long(ptr), base)
Enable implicit cast of pointer to most suitable class This behaviour is available in sip per default. Based on http://nathanhorne.com/pyqtpyside-wrap-instance Usage: This mechanism kicks in under these circumstances. 1. Qt.py is using PySide 1 or 2. 2. A `base` argument is not provided. See :func:`QtCompat.wrapInstance()` Arguments: ptr (long): Pointer to QObject in memory base (QObject, optional): Base class to wrap with. Defaults to QObject, which should handle anything.
def _init_client(): '''Setup client and init datastore. ''' global client, path_prefix if client is not None: return etcd_kwargs = { 'host': __opts__.get('etcd.host', '127.0.0.1'), 'port': __opts__.get('etcd.port', 2379), 'protocol': __opts__.get('etcd.protocol', 'http'), 'allow_reconnect': __opts__.get('etcd.allow_reconnect', True), 'allow_redirect': __opts__.get('etcd.allow_redirect', False), 'srv_domain': __opts__.get('etcd.srv_domain', None), 'read_timeout': __opts__.get('etcd.read_timeout', 60), 'username': __opts__.get('etcd.username', None), 'password': __opts__.get('etcd.password', None), 'cert': __opts__.get('etcd.cert', None), 'ca_cert': __opts__.get('etcd.ca_cert', None), } path_prefix = __opts__.get('etcd.path_prefix', _DEFAULT_PATH_PREFIX) if path_prefix != "": path_prefix = '/{0}'.format(path_prefix.strip('/')) log.info("etcd: Setting up client with params: %r", etcd_kwargs) client = etcd.Client(**etcd_kwargs) try: client.read(path_prefix) except etcd.EtcdKeyNotFound: log.info("etcd: Creating dir %r", path_prefix) client.write(path_prefix, None, dir=True)
Setup client and init datastore.
def logtrick_minimizer(minimizer): r""" Log-Trick decorator for optimizers. This decorator implements the "log trick" for optimizing positive bounded variables. It will apply this trick for any variables that correspond to a Positive() bound. Examples -------- >>> from scipy.optimize import minimize as sp_min >>> from ..btypes import Bound, Positive Here is an example where we may want to enforce a particular parameter or parameters to be strictly greater than zero, >>> def cost(w, lambda_): ... sq_norm = w.T.dot(w) ... return .5 * lambda_ * sq_norm, lambda_ * w Now let's enforce that the `w` are positive, >>> bounds = [Positive(), Positive(), Positive()] >>> new_min = logtrick_minimizer(sp_min) Initial values >>> w_0 = np.array([.5, .1, .2]) >>> lambda_0 = .25 >>> res = new_min(cost, w_0, args=(lambda_0,), bounds=bounds, ... method='L-BFGS-B', jac=True) >>> res.x >= 0 array([ True, True, True], dtype=bool) Note ---- This decorator only works on unstructured optimizers. However, it can be use with structured_minimizer, so long as it is the inner wrapper. """ @wraps(minimizer) def new_minimizer(fun, x0, jac=True, bounds=None, **minimizer_kwargs): if bounds is None: return minimizer(fun, x0, jac=jac, bounds=bounds, **minimizer_kwargs) logx, expx, gradx, bounds = _logtrick_gen(bounds) # Intercept gradient if callable(jac): def new_jac(x, *fargs, **fkwargs): return gradx(jac(expx(x), *fargs, **fkwargs), x) else: new_jac = jac # Intercept objective if (not callable(jac)) and bool(jac): def new_fun(x, *fargs, **fkwargs): o, g = fun(expx(x), *fargs, **fkwargs) return o, gradx(g, x) else: def new_fun(x, *fargs, **fkwargs): return fun(expx(x), *fargs, **fkwargs) # Transform the final result result = minimizer(new_fun, logx(x0), jac=new_jac, bounds=bounds, **minimizer_kwargs) result['x'] = expx(result['x']) return result return new_minimizer
r""" Log-Trick decorator for optimizers. This decorator implements the "log trick" for optimizing positive bounded variables. It will apply this trick for any variables that correspond to a Positive() bound. Examples -------- >>> from scipy.optimize import minimize as sp_min >>> from ..btypes import Bound, Positive Here is an example where we may want to enforce a particular parameter or parameters to be strictly greater than zero, >>> def cost(w, lambda_): ... sq_norm = w.T.dot(w) ... return .5 * lambda_ * sq_norm, lambda_ * w Now let's enforce that the `w` are positive, >>> bounds = [Positive(), Positive(), Positive()] >>> new_min = logtrick_minimizer(sp_min) Initial values >>> w_0 = np.array([.5, .1, .2]) >>> lambda_0 = .25 >>> res = new_min(cost, w_0, args=(lambda_0,), bounds=bounds, ... method='L-BFGS-B', jac=True) >>> res.x >= 0 array([ True, True, True], dtype=bool) Note ---- This decorator only works on unstructured optimizers. However, it can be use with structured_minimizer, so long as it is the inner wrapper.
def get_path(self): """ Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments """ md5_hash = hashlib.md5(self.task_id.encode()).hexdigest() logger.debug('Hash %s corresponds to task %s', md5_hash, self.task_id) return os.path.join(self.temp_dir, str(self.unique.value), md5_hash)
Returns a temporary file path based on a MD5 hash generated with the task's name and its arguments
def diri(table): """ from SparCC - "randomly draw from the corresponding posterior Dirichlet distribution with a uniform prior" """ t = [] for i in table: a = [j + 1 for j in i] t.append(np.ndarray.tolist(np.random.mtrand.dirichlet(a))) return t
from SparCC - "randomly draw from the corresponding posterior Dirichlet distribution with a uniform prior"
def _Build(self, storage_file): """Builds the event tag index. Args: storage_file (BaseStorageFile): storage file. """ self._index = {} for event_tag in storage_file.GetEventTags(): self.SetEventTag(event_tag)
Builds the event tag index. Args: storage_file (BaseStorageFile): storage file.
def process_tls(self, data, name): """ Remote TLS processing - one address:port per line :param data: :param name: :return: """ ret = [] try: lines = [x.strip() for x in data.split('\n')] for idx, line in enumerate(lines): if line == '': continue sub = self.process_host(line, name, idx) if sub is not None: ret.append(sub) except Exception as e: logger.error('Error in file processing %s : %s' % (name, e)) self.roca.trace_logger.log(e) return ret
Remote TLS processing - one address:port per line :param data: :param name: :return:
def get_sdk_dir(self): """Return the MSSSDK given the version string.""" try: return self._sdk_dir except AttributeError: sdk_dir = self.find_sdk_dir() self._sdk_dir = sdk_dir return sdk_dir
Return the MSSSDK given the version string.
def fetch(bank, key): ''' Fetch a key value. ''' c_key = '{0}/{1}'.format(bank, key) try: _, value = api.kv.get(c_key) if value is None: return {} return __context__['serial'].loads(value['Value']) except Exception as exc: raise SaltCacheError( 'There was an error reading the key, {0}: {1}'.format( c_key, exc ) )
Fetch a key value.
def pluralize(data_type): """ adds s to the data type or the correct english plural form """ known = { u"address": u"addresses", u"company": u"companies" } if data_type in known.keys(): return known[data_type] else: return u"%ss" % data_type
adds s to the data type or the correct english plural form
def __get_pid_by_scanning(self): 'Internally used by get_pid().' dwProcessId = None dwThreadId = self.get_tid() with win32.CreateToolhelp32Snapshot(win32.TH32CS_SNAPTHREAD) as hSnapshot: te = win32.Thread32First(hSnapshot) while te is not None: if te.th32ThreadID == dwThreadId: dwProcessId = te.th32OwnerProcessID break te = win32.Thread32Next(hSnapshot) if dwProcessId is None: msg = "Cannot find thread ID %d in any process" % dwThreadId raise RuntimeError(msg) return dwProcessId
Internally used by get_pid().
def create_app(config_name): """ Factory Function """ app = Flask(__name__) app.config.from_object(CONFIG[config_name]) BOOTSTRAP.init_app(app) # call controllers from flask_seguro.controllers.main import main as main_blueprint app.register_blueprint(main_blueprint) return app
Factory Function
async def promote_chat_member(self, chat_id: typing.Union[base.Integer, base.String], user_id: base.Integer, can_change_info: typing.Union[base.Boolean, None] = None, can_post_messages: typing.Union[base.Boolean, None] = None, can_edit_messages: typing.Union[base.Boolean, None] = None, can_delete_messages: typing.Union[base.Boolean, None] = None, can_invite_users: typing.Union[base.Boolean, None] = None, can_restrict_members: typing.Union[base.Boolean, None] = None, can_pin_messages: typing.Union[base.Boolean, None] = None, can_promote_members: typing.Union[base.Boolean, None] = None) -> base.Boolean: """ Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Pass False for all boolean parameters to demote a user. Source: https://core.telegram.org/bots/api#promotechatmember :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param user_id: Unique identifier of the target user :type user_id: :obj:`base.Integer` :param can_change_info: Pass True, if the administrator can change chat title, photo and other settings :type can_change_info: :obj:`typing.Union[base.Boolean, None]` :param can_post_messages: Pass True, if the administrator can create channel posts, channels only :type can_post_messages: :obj:`typing.Union[base.Boolean, None]` :param can_edit_messages: Pass True, if the administrator can edit messages of other users, channels only :type can_edit_messages: :obj:`typing.Union[base.Boolean, None]` :param can_delete_messages: Pass True, if the administrator can delete messages of other users :type can_delete_messages: :obj:`typing.Union[base.Boolean, None]` :param can_invite_users: Pass True, if the administrator can invite new users to the chat :type can_invite_users: :obj:`typing.Union[base.Boolean, None]` :param can_restrict_members: Pass True, if the administrator can restrict, ban or unban chat members :type can_restrict_members: :obj:`typing.Union[base.Boolean, None]` :param can_pin_messages: Pass True, if the administrator can pin messages, supergroups only :type can_pin_messages: :obj:`typing.Union[base.Boolean, None]` :param can_promote_members: Pass True, if the administrator can add new administrators with a subset of his own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him) :type can_promote_members: :obj:`typing.Union[base.Boolean, None]` :return: Returns True on success :rtype: :obj:`base.Boolean` """ payload = generate_payload(**locals()) result = await self.request(api.Methods.PROMOTE_CHAT_MEMBER, payload) return result
Use this method to promote or demote a user in a supergroup or a channel. The bot must be an administrator in the chat for this to work and must have the appropriate admin rights. Pass False for all boolean parameters to demote a user. Source: https://core.telegram.org/bots/api#promotechatmember :param chat_id: Unique identifier for the target chat or username of the target channel :type chat_id: :obj:`typing.Union[base.Integer, base.String]` :param user_id: Unique identifier of the target user :type user_id: :obj:`base.Integer` :param can_change_info: Pass True, if the administrator can change chat title, photo and other settings :type can_change_info: :obj:`typing.Union[base.Boolean, None]` :param can_post_messages: Pass True, if the administrator can create channel posts, channels only :type can_post_messages: :obj:`typing.Union[base.Boolean, None]` :param can_edit_messages: Pass True, if the administrator can edit messages of other users, channels only :type can_edit_messages: :obj:`typing.Union[base.Boolean, None]` :param can_delete_messages: Pass True, if the administrator can delete messages of other users :type can_delete_messages: :obj:`typing.Union[base.Boolean, None]` :param can_invite_users: Pass True, if the administrator can invite new users to the chat :type can_invite_users: :obj:`typing.Union[base.Boolean, None]` :param can_restrict_members: Pass True, if the administrator can restrict, ban or unban chat members :type can_restrict_members: :obj:`typing.Union[base.Boolean, None]` :param can_pin_messages: Pass True, if the administrator can pin messages, supergroups only :type can_pin_messages: :obj:`typing.Union[base.Boolean, None]` :param can_promote_members: Pass True, if the administrator can add new administrators with a subset of his own privileges or demote administrators that he has promoted, directly or indirectly (promoted by administrators that were appointed by him) :type can_promote_members: :obj:`typing.Union[base.Boolean, None]` :return: Returns True on success :rtype: :obj:`base.Boolean`
def copy( ctx, opts, owner_repo_package, destination, skip_errors, wait_interval, no_wait_for_sync, sync_attempts, ): """ Copy a package to another repository. This requires appropriate permissions for both the source repository/package and the destination repository. - OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the REPO name where the package is stored, and the PACKAGE name (slug) of the package itself. All separated by a slash. Example: 'your-org/awesome-repo/better-pkg'. - DEST: Specify the DEST (destination) repository to copy the package to. This *must* be in the same namespace as the source repository. Example: 'other-repo' Full CLI example: $ cloudsmith cp your-org/awesome-repo/better-pkg other-repo """ owner, source, slug = owner_repo_package click.echo( "Copying %(slug)s package from %(source)s to %(dest)s ... " % { "slug": click.style(slug, bold=True), "source": click.style(source, bold=True), "dest": click.style(destination, bold=True), }, nl=False, ) context_msg = "Failed to copy package!" with handle_api_exceptions( ctx, opts=opts, context_msg=context_msg, reraise_on_error=skip_errors ): with maybe_spinner(opts): _, new_slug = copy_package( owner=owner, repo=source, identifier=slug, destination=destination ) click.secho("OK", fg="green") if no_wait_for_sync: return wait_for_package_sync( ctx=ctx, opts=opts, owner=owner, repo=destination, slug=new_slug, wait_interval=wait_interval, skip_errors=skip_errors, attempts=sync_attempts, )
Copy a package to another repository. This requires appropriate permissions for both the source repository/package and the destination repository. - OWNER/REPO/PACKAGE: Specify the OWNER namespace (i.e. user or org), the REPO name where the package is stored, and the PACKAGE name (slug) of the package itself. All separated by a slash. Example: 'your-org/awesome-repo/better-pkg'. - DEST: Specify the DEST (destination) repository to copy the package to. This *must* be in the same namespace as the source repository. Example: 'other-repo' Full CLI example: $ cloudsmith cp your-org/awesome-repo/better-pkg other-repo
def _has_fileno(stream): """Returns whether the stream object seems to have a working fileno() Tells whether _redirect_stderr is likely to work. Parameters ---------- stream : IO stream object Returns ------- has_fileno : bool True if stream.fileno() exists and doesn't raise OSError or UnsupportedOperation """ try: stream.fileno() except (AttributeError, OSError, IOError, io.UnsupportedOperation): return False return True
Returns whether the stream object seems to have a working fileno() Tells whether _redirect_stderr is likely to work. Parameters ---------- stream : IO stream object Returns ------- has_fileno : bool True if stream.fileno() exists and doesn't raise OSError or UnsupportedOperation
def log_interp1d(self, xx, yy, kind='linear'): """ Performs a log space 1d interpolation. :param xx: the x values. :param yy: the y values. :param kind: the type of interpolation to apply (as per scipy interp1d) :return: the interpolation function. """ logx = np.log10(xx) logy = np.log10(yy) lin_interp = interp1d(logx, logy, kind=kind) log_interp = lambda zz: np.power(10.0, lin_interp(np.log10(zz))) return log_interp
Performs a log space 1d interpolation. :param xx: the x values. :param yy: the y values. :param kind: the type of interpolation to apply (as per scipy interp1d) :return: the interpolation function.
def protocol_version_to_kmip_version(value): """ Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent. Args: value (ProtocolVersion): A ProtocolVersion struct to be converted into a KMIPVersion enumeration. Returns: KMIPVersion: The enumeration equivalent of the struct. If the struct cannot be converted to a valid enumeration, None is returned. """ if not isinstance(value, ProtocolVersion): return None if value.major == 1: if value.minor == 0: return enums.KMIPVersion.KMIP_1_0 elif value.minor == 1: return enums.KMIPVersion.KMIP_1_1 elif value.minor == 2: return enums.KMIPVersion.KMIP_1_2 elif value.minor == 3: return enums.KMIPVersion.KMIP_1_3 elif value.minor == 4: return enums.KMIPVersion.KMIP_1_4 else: return None else: return None
Convert a ProtocolVersion struct to its KMIPVersion enumeration equivalent. Args: value (ProtocolVersion): A ProtocolVersion struct to be converted into a KMIPVersion enumeration. Returns: KMIPVersion: The enumeration equivalent of the struct. If the struct cannot be converted to a valid enumeration, None is returned.
def plot_element_profile(self, element, comp, show_label_index=None, xlim=5): """ Draw the element profile plot for a composition varying different chemical potential of an element. X value is the negative value of the chemical potential reference to elemental chemical potential. For example, if choose Element("Li"), X= -(µLi-µLi0), which corresponds to the voltage versus metal anode. Y values represent for the number of element uptake in this composition (unit: per atom). All reactions are printed to help choosing the profile steps you want to show label in the plot. Args: element (Element): An element of which the chemical potential is considered. It also must be in the phase diagram. comp (Composition): A composition. show_label_index (list of integers): The labels for reaction products you want to show in the plot. Default to None (not showing any annotation for reaction products). For the profile steps you want to show the labels, just add it to the show_label_index. The profile step counts from zero. For example, you can set show_label_index=[0, 2, 5] to label profile step 0,2,5. xlim (float): The max x value. x value is from 0 to xlim. Default to 5 eV. Returns: Plot of element profile evolution by varying the chemical potential of an element. """ plt = pretty_plot(12, 8) pd = self._pd evolution = pd.get_element_profile(element, comp) num_atoms = evolution[0]["reaction"].reactants[0].num_atoms element_energy = evolution[0]['chempot'] for i, d in enumerate(evolution): v = -(d["chempot"] - element_energy) print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"]) if i != 0: plt.plot([x2, x2], [y1, d["evolution"] / num_atoms], 'k', linewidth=2.5) x1 = v y1 = d["evolution"] / num_atoms if i != len(evolution) - 1: x2 = - (evolution[i + 1]["chempot"] - element_energy) else: x2 = 5.0 if show_label_index is not None and i in show_label_index: products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula) for p in d["reaction"].products if p.reduced_formula != element.symbol] plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05), fontsize=24, color='r') plt.plot([x1, x2], [y1, y1], 'r', linewidth=3) else: plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5) plt.xlim((0, xlim)) plt.xlabel("-$\\Delta{\\mu}$ (eV)") plt.ylabel("Uptake per atom") return plt
Draw the element profile plot for a composition varying different chemical potential of an element. X value is the negative value of the chemical potential reference to elemental chemical potential. For example, if choose Element("Li"), X= -(µLi-µLi0), which corresponds to the voltage versus metal anode. Y values represent for the number of element uptake in this composition (unit: per atom). All reactions are printed to help choosing the profile steps you want to show label in the plot. Args: element (Element): An element of which the chemical potential is considered. It also must be in the phase diagram. comp (Composition): A composition. show_label_index (list of integers): The labels for reaction products you want to show in the plot. Default to None (not showing any annotation for reaction products). For the profile steps you want to show the labels, just add it to the show_label_index. The profile step counts from zero. For example, you can set show_label_index=[0, 2, 5] to label profile step 0,2,5. xlim (float): The max x value. x value is from 0 to xlim. Default to 5 eV. Returns: Plot of element profile evolution by varying the chemical potential of an element.
def create_presentation(self): """ Create the presentation. The audio track is mixed with the slides. The resulting file is saved as self.output DownloadError is raised if some resources cannot be fetched. ConversionError is raised if the final video cannot be created. """ # Avoid wasting time and bandwidth if we known that conversion will fail. if not self.overwrite and os.path.exists(self.output): raise ConversionError("File %s already exist and --overwrite not specified" % self.output) video = self.download_video() raw_slides = self.download_slides() # ffmpeg does not support SWF png_slides = self._convert_slides(raw_slides) # Create one frame per second using the time code information frame_pattern = self._prepare_frames(png_slides) return self._assemble(video, frame_pattern)
Create the presentation. The audio track is mixed with the slides. The resulting file is saved as self.output DownloadError is raised if some resources cannot be fetched. ConversionError is raised if the final video cannot be created.
async def getStickerSet(self, name): """ See: https://core.telegram.org/bots/api#getstickerset """ p = _strip(locals()) return await self._api_request('getStickerSet', _rectify(p))
See: https://core.telegram.org/bots/api#getstickerset
def fromDatetime(klass, dtime): """Return a new Time instance from a datetime.datetime instance. If the datetime instance does not have an associated timezone, it is assumed to be UTC. """ self = klass.__new__(klass) if dtime.tzinfo is not None: self._time = dtime.astimezone(FixedOffset(0, 0)).replace(tzinfo=None) else: self._time = dtime self.resolution = datetime.timedelta.resolution return self
Return a new Time instance from a datetime.datetime instance. If the datetime instance does not have an associated timezone, it is assumed to be UTC.
def fit(self, Z, **fit_params): """Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- Z : ArrayRDD, TupleRDD or DictRDD Input data in blocked distributed format. Returns ------- self : SparkPipeline """ Zt, fit_params = self._pre_transform(Z, **fit_params) self.steps[-1][-1].fit(Zt, **fit_params) Zt.unpersist() return self
Fit all the transforms one after the other and transform the data, then fit the transformed data using the final estimator. Parameters ---------- Z : ArrayRDD, TupleRDD or DictRDD Input data in blocked distributed format. Returns ------- self : SparkPipeline
def request(self, method, url, params=None, **aio_kwargs): """Make a request to provider.""" oparams = { 'oauth_consumer_key': self.consumer_key, 'oauth_nonce': sha1(str(RANDOM()).encode('ascii')).hexdigest(), 'oauth_signature_method': self.signature.name, 'oauth_timestamp': str(int(time.time())), 'oauth_version': self.version, } oparams.update(params or {}) if self.oauth_token: oparams['oauth_token'] = self.oauth_token url = self._get_url(url) if urlsplit(url).query: raise ValueError( 'Request parameters should be in the "params" parameter, ' 'not inlined in the URL') oparams['oauth_signature'] = self.signature.sign( self.consumer_secret, method, url, oauth_token_secret=self.oauth_token_secret, **oparams) self.logger.debug("%s %s", url, oparams) return self._request(method, url, params=oparams, **aio_kwargs)
Make a request to provider.
def heading_title(self): """ Makes the Article Title for the Heading. Metadata element, content derived from FrontMatter """ art_title = self.article.root.xpath('./front/article-meta/title-group/article-title')[0] article_title = deepcopy(art_title) article_title.tag = 'h1' article_title.attrib['id'] = 'title' article_title.attrib['class'] = 'article-title' return article_title
Makes the Article Title for the Heading. Metadata element, content derived from FrontMatter
def create_new_dispatch(self, dispatch): """ Create a new dispatch :param dispatch: is the new dispatch that the client wants to create """ self._validate_uuid(dispatch.dispatch_id) # Create new dispatch url = "/notification/v1/dispatch" post_response = NWS_DAO().postURL( url, self._write_headers(), self._json_body(dispatch.json_data())) if post_response.status != 200: raise DataFailureException( url, post_response.status, post_response.data) return post_response.status
Create a new dispatch :param dispatch: is the new dispatch that the client wants to create
def _get_function_name(self, fn, default="None"): """ Return name of function, using default value if function not defined """ if fn is None: fn_name = default else: fn_name = fn.__name__ return fn_name
Return name of function, using default value if function not defined
def _get_ssh_client(self): """Return a new or existing SSH client for given ip.""" return ipa_utils.get_ssh_client( self.instance_ip, self.ssh_private_key_file, self.ssh_user, timeout=self.timeout )
Return a new or existing SSH client for given ip.
def default(self, request, exception): """ Provide a default behavior for the objects of :class:`ErrorHandler`. If a developer chooses to extent the :class:`ErrorHandler` they can provide a custom implementation for this method to behave in a way they see fit. :param request: Incoming request :param exception: Exception object :type request: :class:`sanic.request.Request` :type exception: :class:`sanic.exceptions.SanicException` or :class:`Exception` :return: """ self.log(format_exc()) try: url = repr(request.url) except AttributeError: url = "unknown" response_message = "Exception occurred while handling uri: %s" logger.exception(response_message, url) if issubclass(type(exception), SanicException): return text( "Error: {}".format(exception), status=getattr(exception, "status_code", 500), headers=getattr(exception, "headers", dict()), ) elif self.debug: html_output = self._render_traceback_html(exception, request) return html(html_output, status=500) else: return html(INTERNAL_SERVER_ERROR_HTML, status=500)
Provide a default behavior for the objects of :class:`ErrorHandler`. If a developer chooses to extent the :class:`ErrorHandler` they can provide a custom implementation for this method to behave in a way they see fit. :param request: Incoming request :param exception: Exception object :type request: :class:`sanic.request.Request` :type exception: :class:`sanic.exceptions.SanicException` or :class:`Exception` :return:
def _nth_of_quarter(self, nth, day_of_week): """ Modify to the given occurrence of a given day of the week in the current quarter. If the calculated occurrence is outside, the scope of the current quarter, then return False and no modifications are made. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :type nth: int :type day_of_week: int or None :rtype: Date """ if nth == 1: return self.first_of("quarter", day_of_week) dt = self.replace(self.year, self.quarter * 3, 1) last_month = dt.month year = dt.year dt = dt.first_of("quarter") for i in range(nth - (1 if dt.day_of_week == day_of_week else 0)): dt = dt.next(day_of_week) if last_month < dt.month or year != dt.year: return False return self.set(self.year, dt.month, dt.day)
Modify to the given occurrence of a given day of the week in the current quarter. If the calculated occurrence is outside, the scope of the current quarter, then return False and no modifications are made. Use the supplied consts to indicate the desired day_of_week, ex. pendulum.MONDAY. :type nth: int :type day_of_week: int or None :rtype: Date
def add(self, element, multiplicity=1): """Adds an element to the multiset. >>> ms = Multiset() >>> ms.add('a') >>> sorted(ms) ['a'] An optional multiplicity can be specified to define how many of the element are added: >>> ms.add('b', 2) >>> sorted(ms) ['a', 'b', 'b'] This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity. Args: element: The element to add to the multiset. multiplicity: The multiplicity i.e. count of elements to add. """ if multiplicity < 1: raise ValueError("Multiplicity must be positive") self._elements[element] += multiplicity self._total += multiplicity
Adds an element to the multiset. >>> ms = Multiset() >>> ms.add('a') >>> sorted(ms) ['a'] An optional multiplicity can be specified to define how many of the element are added: >>> ms.add('b', 2) >>> sorted(ms) ['a', 'b', 'b'] This extends the :meth:`MutableSet.add` signature to allow specifying the multiplicity. Args: element: The element to add to the multiset. multiplicity: The multiplicity i.e. count of elements to add.
def _unpack_basis_label_or_index(self, label_or_index): """return tuple (label, ind) from `label_or_index` If `label_or_int` is a :class:`.SymbolicLabelBase` sub-instance, it will be stored in the `label` attribute, and the `ind` attribute will return the value of the label's :attr:`.FockIndex.fock_index` attribute. No checks are performed for symbolic labels. :meth:`_check_basis_label_type` is called on `label_or_index`. Raises: ValueError: if `label_or_index` is a :class:`str` referencing an invalid basis state; or, if `label_or_index` is an :class:`int` < 0 or >= the dimension of the Hilbert space BasisNotSetError: if `label_or_index` is a :class:`str`, but the Hilbert space has no defined basis TypeError: if `label_or_int` is not a :class:`str`, :class:`int`, or :class:`.SymbolicLabelBase`, or more generally whatever types are allowed through the `_basis_label_types` attribute of the Hilbert space. """ self._check_basis_label_type(label_or_index) if isinstance(label_or_index, str): label = label_or_index try: ind = self.basis_labels.index(label) # the above line may also raise BasisNotSetError, which we # don't want to catch here except ValueError: # a less confusing error message: raise ValueError( "%r is not one of the basis labels %r" % (label, self.basis_labels)) elif isinstance(label_or_index, int): ind = label_or_index if ind < 0: raise ValueError("Index %d must be >= 0" % ind) if self.has_basis: if ind >= self.dimension: raise ValueError( "Index %s must be < the dimension %d of Hilbert " "space %s" % (ind, self.dimension, self)) label = self.basis_labels[label_or_index] else: label = str(label_or_index) elif isinstance(label_or_index, SymbolicLabelBase): label = label_or_index try: ind = label_or_index.fock_index except AttributeError: raise TypeError( "label_or_index must define a fock_index attribute in " "order to be used for identifying a level in a Hilbert " "space") else: raise TypeError( "label_or_index must be an int or str, or SymbolicLabelBase, " "not %s" % type(label_or_index)) return label, ind
return tuple (label, ind) from `label_or_index` If `label_or_int` is a :class:`.SymbolicLabelBase` sub-instance, it will be stored in the `label` attribute, and the `ind` attribute will return the value of the label's :attr:`.FockIndex.fock_index` attribute. No checks are performed for symbolic labels. :meth:`_check_basis_label_type` is called on `label_or_index`. Raises: ValueError: if `label_or_index` is a :class:`str` referencing an invalid basis state; or, if `label_or_index` is an :class:`int` < 0 or >= the dimension of the Hilbert space BasisNotSetError: if `label_or_index` is a :class:`str`, but the Hilbert space has no defined basis TypeError: if `label_or_int` is not a :class:`str`, :class:`int`, or :class:`.SymbolicLabelBase`, or more generally whatever types are allowed through the `_basis_label_types` attribute of the Hilbert space.
def p_primary_expr_no_brace_4(self, p): """primary_expr_no_brace : LPAREN expr RPAREN""" if isinstance(p[2], self.asttypes.GroupingOp): # this reduces the grouping operator to one. p[0] = p[2] else: p[0] = self.asttypes.GroupingOp(expr=p[2]) p[0].setpos(p)
primary_expr_no_brace : LPAREN expr RPAREN
def get_accent_string(string): """ Get the first accent from the right of a string. """ accents = list(filter(lambda accent: accent != Accent.NONE, map(get_accent_char, string))) return accents[-1] if accents else Accent.NONE
Get the first accent from the right of a string.
def is_modified(self): """ Returns whether model is modified or not """ if len(self.__modified_data__) or len(self.__deleted_fields__): return True for value in self.__original_data__.values(): try: if value.is_modified(): return True except AttributeError: pass return False
Returns whether model is modified or not
def ajax_preview(request, **kwargs): """ Currently only supports markdown """ data = { "html": render_to_string("pinax/blog/_preview.html", { "content": parse(request.POST.get("markup")) }) } return JsonResponse(data)
Currently only supports markdown
def from_array(self, coeffs, r0, errors=None, normalization='schmidt', csphase=1, lmax=None, copy=True): """ Initialize the class with spherical harmonic coefficients from an input array. Usage ----- x = SHMagCoeffs.from_array(array, r0, [errors, normalization, csphase, lmax, copy]) Returns ------- x : SHMagCoeffs class instance. Parameters ---------- array : ndarray, shape (2, lmaxin+1, lmaxin+1). The input spherical harmonic coefficients. r0 : float The reference radius of the spherical harmonic coefficients. errors : ndarray, optional, default = None The uncertainties of the spherical harmonic coefficients. normalization : str, optional, default = 'schmidt' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it. lmax : int, optional, default = None The maximum spherical harmonic degree to include in the returned class instance. This must be less than or equal to lmaxin. copy : bool, optional, default = True If True, make a copy of array when initializing the class instance. If False, initialize the class instance with a reference to array. Notes ----- The coefficients in the input array are assumed to have units of nT. """ if _np.iscomplexobj(coeffs): raise TypeError('The input array must be real.') if type(normalization) != str: raise ValueError('normalization must be a string. ' 'Input type was {:s}' .format(str(type(normalization)))) if normalization.lower() not in ('4pi', 'ortho', 'schmidt', 'unnorm'): raise ValueError( "The normalization must be '4pi', 'ortho', 'schmidt', " "or 'unnorm'. Input value was {:s}." .format(repr(normalization)) ) if csphase != 1 and csphase != -1: raise ValueError( "csphase must be either 1 or -1. Input value was {:s}." .format(repr(csphase)) ) if errors is not None: if coeffs.shape != errors.shape: raise ValueError( "The shape of coeffs and errors must be the same." "Shape of coeffs = {:s}, shape of errors = {:s}" .format(repr(coeffs.shape), repr(coeffs.errors)) ) lmaxin = coeffs.shape[1] - 1 if lmax is None: lmax = lmaxin else: if lmax > lmaxin: lmax = lmaxin if normalization.lower() == 'unnorm' and lmax > 85: _warnings.warn("Calculations using unnormalized coefficients " "are stable only for degrees less than or equal " "to 85. lmax for the coefficients will be set to " "85. Input value was {:d}.".format(lmax), category=RuntimeWarning) lmax = 85 if errors is not None: clm = SHMagRealCoeffs(coeffs[:, 0:lmax+1, 0:lmax+1], r0=r0, errors=errors[:, 0:lmax+1, 0:lmax+1], normalization=normalization.lower(), csphase=csphase, copy=copy) else: clm = SHMagRealCoeffs(coeffs[:, 0:lmax+1, 0:lmax+1], r0=r0, normalization=normalization.lower(), csphase=csphase, copy=copy) return clm
Initialize the class with spherical harmonic coefficients from an input array. Usage ----- x = SHMagCoeffs.from_array(array, r0, [errors, normalization, csphase, lmax, copy]) Returns ------- x : SHMagCoeffs class instance. Parameters ---------- array : ndarray, shape (2, lmaxin+1, lmaxin+1). The input spherical harmonic coefficients. r0 : float The reference radius of the spherical harmonic coefficients. errors : ndarray, optional, default = None The uncertainties of the spherical harmonic coefficients. normalization : str, optional, default = 'schmidt' '4pi', 'ortho', 'schmidt', or 'unnorm' for geodesy 4pi normalized, orthonormalized, Schmidt semi-normalized, or unnormalized coefficients, respectively. csphase : int, optional, default = 1 Condon-Shortley phase convention: 1 to exclude the phase factor, or -1 to include it. lmax : int, optional, default = None The maximum spherical harmonic degree to include in the returned class instance. This must be less than or equal to lmaxin. copy : bool, optional, default = True If True, make a copy of array when initializing the class instance. If False, initialize the class instance with a reference to array. Notes ----- The coefficients in the input array are assumed to have units of nT.
def is_complete(self): """Do all required parameters have values?""" return all(p.name in self.values for p in self.parameters if p.required)
Do all required parameters have values?
def get_assessment_part_item_session(self, *args, **kwargs): """Gets the ``OsidSession`` associated with the assessment part item service. return: (osid.assessment.authoring.AssessmentPartItemSession) - an ``AssessmentPartItemSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.* """ if not self.supports_assessment_part_lookup(): # This is kludgy, but only until Tom fixes spec raise errors.Unimplemented() if self._proxy_in_args(*args, **kwargs): raise errors.InvalidArgument('A Proxy object was received but not expected.') # pylint: disable=no-member return sessions.AssessmentPartItemSession(runtime=self._runtime)
Gets the ``OsidSession`` associated with the assessment part item service. return: (osid.assessment.authoring.AssessmentPartItemSession) - an ``AssessmentPartItemSession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_assessment_part_item()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_part_lookup()`` is ``true``.*
def create_list_stories( list_id_stories, number_of_stories, shuffle, max_threads ): """Show in a formatted way the stories for each item of the list.""" list_stories = [] with ThreadPoolExecutor(max_workers=max_threads) as executor: futures = { executor.submit(get_story, new) for new in list_id_stories[:number_of_stories] } for future in tqdm( as_completed(futures), desc='Getting results', unit=' news', ): list_stories.append(future.result()) if shuffle: random.shuffle(list_stories) return list_stories
Show in a formatted way the stories for each item of the list.
def GetAll(alias=None,location=None,session=None): """Gets a list of anti-affinity policies within a given account. https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies >>> clc.v2.AntiAffinity.GetAll() [<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>] """ if not alias: alias = clc.v2.Account.GetAlias(session=session) policies = [] policy_resp = clc.v2.API.Call('GET','antiAffinityPolicies/%s' % alias,{},session=session) for k in policy_resp: r_val = policy_resp[k] for r in r_val: if r.get('location'): if location and r['location'].lower()!=location.lower(): continue servers = [obj['id'] for obj in r['links'] if obj['rel'] == "server"] policies.append(AntiAffinity(id=r['id'],name=r['name'],location=r['location'],servers=servers,session=session)) return(policies)
Gets a list of anti-affinity policies within a given account. https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies >>> clc.v2.AntiAffinity.GetAll() [<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>]
def conversations_replies(self, *, channel: str, ts: str, **kwargs) -> SlackResponse: """Retrieve a thread of messages posted to a conversation Args: channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890' ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456' """ kwargs.update({"channel": channel, "ts": ts}) return self.api_call("conversations.replies", http_verb="GET", params=kwargs)
Retrieve a thread of messages posted to a conversation Args: channel (str): Conversation ID to fetch thread from. e.g. 'C1234567890' ts (str): Unique identifier of a thread's parent message. e.g. '1234567890.123456'
def _find_value(key, *args): """Find a value for 'key' in any of the objects given as 'args'""" for arg in args: v = _get_value(arg, key) if v is not None: return v
Find a value for 'key' in any of the objects given as 'args
def initialize(self): """Create the laboratory directories.""" mkdir_p(self.archive_path) mkdir_p(self.bin_path) mkdir_p(self.codebase_path) mkdir_p(self.input_basepath)
Create the laboratory directories.
def propose_value(self, value): ''' Sets the proposal value for this node iff this node is not already aware of a previous proposal value. If the node additionally believes itself to be the current leader, an Accept message will be returned ''' if self.proposed_value is None: self.proposed_value = value if self.leader: self.current_accept_msg = Accept(self.network_uid, self.proposal_id, value) return self.current_accept_msg
Sets the proposal value for this node iff this node is not already aware of a previous proposal value. If the node additionally believes itself to be the current leader, an Accept message will be returned
def handle_exc(exc): """ Given a database exception determine how to fail Attempt to lookup a known error & abort on a meaningful error. Otherwise issue a generic DatabaseUnavailable exception. :param exc: psycopg2 exception """ err = ERRORS_TABLE.get(exc.pgcode) if err: abort(exceptions.InvalidQueryParams(**{ 'detail': err, 'parameter': 'filter', })) abort(exceptions.DatabaseUnavailable)
Given a database exception determine how to fail Attempt to lookup a known error & abort on a meaningful error. Otherwise issue a generic DatabaseUnavailable exception. :param exc: psycopg2 exception
def is_unwrapped(f): """If `f` was imported and then unwrapped, this function might return True. .. |is_unwrapped| replace:: :py:func:`is_unwrapped`""" try: g = look_up(object_name(f)) return g != f and unwrap(g) == f except (AttributeError, TypeError, ImportError): return False
If `f` was imported and then unwrapped, this function might return True. .. |is_unwrapped| replace:: :py:func:`is_unwrapped`
def put_metadata(self, key, value, namespace='default'): """ Add metadata to the current active trace entity. Metadata is not indexed but can be later retrieved by BatchGetTraces API. :param str namespace: optional. Default namespace is `default`. It must be a string and prefix `AWS.` is reserved. :param str key: metadata key under specified namespace :param object value: any object that can be serialized into JSON string """ entity = self.get_trace_entity() if entity and entity.sampled: entity.put_metadata(key, value, namespace)
Add metadata to the current active trace entity. Metadata is not indexed but can be later retrieved by BatchGetTraces API. :param str namespace: optional. Default namespace is `default`. It must be a string and prefix `AWS.` is reserved. :param str key: metadata key under specified namespace :param object value: any object that can be serialized into JSON string
def to_json(self): """ Serialize object to json dict :return: dict """ res = dict() res['Count'] = self.count res['Messages'] = self.messages res['ForcedState'] = self.forced res['ForcedKeyboard'] = self.keyboard res['Entities'] = list() for item in self.entities: res['Entities'].append(item.to_json()) res['ForcedMessage'] = self.forced_message return res
Serialize object to json dict :return: dict
def page_url(self) -> str: """(:class:`str`) The canonical url of the page.""" url = self.attributes['canonicalurl'] assert isinstance(url, str) return url
(:class:`str`) The canonical url of the page.
def main(): """ Read the options given on the command line and do the required actions. This method is used in the entry_point `cast`. """ opts = docopt(__doc__, version="cast 0.1") cast = pychromecast.PyChromecast(CHROMECAST_HOST) ramp = cast.get_protocol(pychromecast.PROTOCOL_RAMP) # Wait for ramp connection to be initted. time.sleep(SLEEP_TIME) if ramp is None: print 'Chromecast is not up or current app does not handle RAMP.' return 1 if opts['next']: ramp.next() elif opts['pause']: ramp.pause() elif opts['play']: ramp.play() elif opts['toggle']: ramp.playpause() elif opts['seek']: ramp.seek(opts['<second>']) elif opts['rewind']: ramp.rewind() elif opts['status']: _status_command(cast, ramp) elif opts['volume']: _volume_command(ramp, opts['<value>']) # Wait for command to be sent. time.sleep(SLEEP_TIME)
Read the options given on the command line and do the required actions. This method is used in the entry_point `cast`.
def batch(self, timelimit=None): """ Run the flow in batch mode, return exit status of the job script. Requires a manager.yml file and a batch_adapter adapter. Args: timelimit: Time limit (int with seconds or string with time given with the slurm convention: "days-hours:minutes:seconds"). If timelimit is None, the default value specified in the `batch_adapter` entry of `manager.yml` is used. """ from .launcher import BatchLauncher # Create a batch dir from the flow.workdir. prev_dir = os.path.join(*self.workdir.split(os.path.sep)[:-1]) prev_dir = os.path.join(os.path.sep, prev_dir) workdir = os.path.join(prev_dir, os.path.basename(self.workdir) + "_batch") return BatchLauncher(workdir=workdir, flows=self).submit(timelimit=timelimit)
Run the flow in batch mode, return exit status of the job script. Requires a manager.yml file and a batch_adapter adapter. Args: timelimit: Time limit (int with seconds or string with time given with the slurm convention: "days-hours:minutes:seconds"). If timelimit is None, the default value specified in the `batch_adapter` entry of `manager.yml` is used.
def describe_change_set(awsclient, change_set_name, stack_name): """Print out the change_set to console. This needs to run create_change_set first. :param awsclient: :param change_set_name: :param stack_name: """ client = awsclient.get_client('cloudformation') status = None while status not in ['CREATE_COMPLETE', 'FAILED']: response = client.describe_change_set( ChangeSetName=change_set_name, StackName=stack_name) status = response['Status'] # print('##### %s' % status) if status == 'FAILED': print(response['StatusReason']) elif status == 'CREATE_COMPLETE': for change in response['Changes']: print(json2table(change['ResourceChange']))
Print out the change_set to console. This needs to run create_change_set first. :param awsclient: :param change_set_name: :param stack_name:
def rpc_get_usages(self, filename, source, offset): """Return the uses of the symbol at offset. Returns a list of occurrences of the symbol, as dicts with the fields name, filename, and offset. """ line, column = pos_to_linecol(source, offset) uses = run_with_debug(jedi, 'usages', source=source, line=line, column=column, path=filename, encoding='utf-8') if uses is None: return None result = [] for use in uses: if use.module_path == filename: offset = linecol_to_pos(source, use.line, use.column) elif use.module_path is not None: with open(use.module_path) as f: text = f.read() offset = linecol_to_pos(text, use.line, use.column) result.append({"name": use.name, "filename": use.module_path, "offset": offset}) return result
Return the uses of the symbol at offset. Returns a list of occurrences of the symbol, as dicts with the fields name, filename, and offset.
def birch(args): """ %prog birch seqids layout Plot birch macro-synteny, with an embedded phylogenetic tree to the right. """ p = OptionParser(birch.__doc__) opts, args, iopts = p.set_image_options(args, figsize="8x6") if len(args) != 2: sys.exit(not p.print_help()) seqids, layout = args fig = plt.figure(1, (iopts.w, iopts.h)) root = fig.add_axes([0, 0, 1, 1]) K = Karyotype(fig, root, seqids, layout) L = K.layout xs = .79 dt = dict(rectangle=False, circle=False) # Embed a phylogenetic tree to the right coords = {} coords["Amborella"] = (xs, L[0].y) coords["Vitis"] = (xs, L[1].y) coords["Prunus"] = (xs, L[2].y) coords["Betula"] = (xs, L[3].y) coords["Populus"] = (xs, L[4].y) coords["Arabidopsis"] = (xs, L[5].y) coords["fabids"] = join_nodes(root, coords, "Prunus", "Betula", xs, **dt) coords["malvids"] = join_nodes(root, coords, \ "Populus", "Arabidopsis", xs, **dt) coords["rosids"] = join_nodes(root, coords, "fabids", "malvids", xs, **dt) coords["eudicots"] = join_nodes(root, coords, "rosids", "Vitis", xs, **dt) coords["angiosperm"] = join_nodes(root, coords, \ "eudicots", "Amborella", xs, **dt) # Show branch length branch_length(root, coords["Amborella"], coords["angiosperm"], ">160.0") branch_length(root, coords["eudicots"], coords["angiosperm"], ">78.2", va="top") branch_length(root, coords["Vitis"], coords["eudicots"], "138.5") branch_length(root, coords["rosids"], coords["eudicots"], "19.8", va="top") branch_length(root, coords["Prunus"], coords["fabids"], "104.2", ha="right", va="top") branch_length(root, coords["Arabidopsis"], coords["malvids"], "110.2", va="top") branch_length(root, coords["fabids"], coords["rosids"], "19.8", ha="right", va="top") branch_length(root, coords["malvids"], coords["rosids"], "8.5", va="top") root.set_xlim(0, 1) root.set_ylim(0, 1) root.set_axis_off() pf = "birch" image_name = pf + "." + iopts.format savefig(image_name, dpi=iopts.dpi, iopts=iopts)
%prog birch seqids layout Plot birch macro-synteny, with an embedded phylogenetic tree to the right.
def p_DictionaryMember(p): """DictionaryMember : Type IDENTIFIER Default ";" """ p[0] = model.DictionaryMember(type=p[1], name=p[2], default=p[3])
DictionaryMember : Type IDENTIFIER Default ";"
def url(self): """Return the admin url of the object.""" return urlresolvers.reverse( "admin:%s_%s_change" % (self.content_type.app_label, self.content_type.model), args = (self.get_object().uid,))
Return the admin url of the object.
def _reprJSON(self): """Returns a JSON serializable represenation of a ``MzmlPrecursor`` class instance. Use :func:`maspy.core.MzmlPrecursor._fromJSON()` to generate a new ``MzmlPrecursor`` instance from the return value. :returns: a JSON serializable python object """ return {'__MzmlPrecursor__': (self.spectrumRef, self.activation, self.isolationWindow, self.selectedIonList ) }
Returns a JSON serializable represenation of a ``MzmlPrecursor`` class instance. Use :func:`maspy.core.MzmlPrecursor._fromJSON()` to generate a new ``MzmlPrecursor`` instance from the return value. :returns: a JSON serializable python object
def _fast_read(self, infile): """Function for fast reading from sensor files.""" infile.seek(0) return(int(infile.read().decode().strip()))
Function for fast reading from sensor files.
def load_directory(self, top_path, followlinks): """ Traverse top_path directory and save patterns in any .ddsignore files found. :param top_path: str: directory name we should traverse looking for ignore files :param followlinks: boolean: should we traverse symbolic links """ for dir_name, child_dirs, child_files in os.walk(top_path, followlinks=followlinks): for child_filename in child_files: if child_filename == DDS_IGNORE_FILENAME: pattern_lines = self._read_non_empty_lines(dir_name, child_filename) self.add_patterns(dir_name, pattern_lines)
Traverse top_path directory and save patterns in any .ddsignore files found. :param top_path: str: directory name we should traverse looking for ignore files :param followlinks: boolean: should we traverse symbolic links
def tokens2ids(tokens: Iterable[str], vocab: Dict[str, int]) -> List[int]: """ Returns sequence of integer ids given a sequence of tokens and vocab. :param tokens: List of string tokens. :param vocab: Vocabulary (containing UNK symbol). :return: List of word ids. """ return [vocab.get(w, vocab[C.UNK_SYMBOL]) for w in tokens]
Returns sequence of integer ids given a sequence of tokens and vocab. :param tokens: List of string tokens. :param vocab: Vocabulary (containing UNK symbol). :return: List of word ids.
def run_npm(self): """ h/t https://github.com/elbaschid/virtual-node/blob/master/setup.py """ for name, version in npm_dependencies.items(): # packages are installed globally to make sure that they are # installed in the virtualenv rather than the current directory. # it is also necessary for packages containing scripts, e.g. less dep_name = '%s@%s' % (name, version) self.run_cmd(['npm', 'install', '-g', dep_name])
h/t https://github.com/elbaschid/virtual-node/blob/master/setup.py
def add_members(self, users=None, role=TeamRoles.MEMBER): """Members to add to a team. :param members: list of members, either `User` objects or usernames :type members: List of `User` or List of pk :param role: (optional) role of the users to add (default `TeamRoles.MEMBER`) :type role: basestring :raises IllegalArgumentError: when providing incorrect roles Example ------- >>> my_team = client.team(name='My own team') >>> other_user = client.users(name='That other person') >>> myself = client.users(name='myself') >>> my_team.add_members([myself], role=TeamRoles.MANAGER) >>> my_team.add_members([other_user], role=TeamRoles.MEMBER) """ if role and role not in TeamRoles.values(): raise IllegalArgumentError("role should be one of `TeamRoles` {}, got '{}'".format(TeamRoles.values(), role)) if not users or not isinstance(users, (list, tuple, set)): raise IllegalArgumentError("users should be a list of user_ids or `User` objects, got '{}'". format(users)) update_dict = dict(role=role) if all(isinstance(user, int) for user in users): update_dict['users'] = users elif all(isinstance(user, User) for user in users): update_dict['users'] = [user.id for user in users] else: raise IllegalArgumentError("All users should be a list of user_ids or `User` objects, got '{}'". format(users)) self._update('team_add_members', team_id=self.id, update_dict=update_dict)
Members to add to a team. :param members: list of members, either `User` objects or usernames :type members: List of `User` or List of pk :param role: (optional) role of the users to add (default `TeamRoles.MEMBER`) :type role: basestring :raises IllegalArgumentError: when providing incorrect roles Example ------- >>> my_team = client.team(name='My own team') >>> other_user = client.users(name='That other person') >>> myself = client.users(name='myself') >>> my_team.add_members([myself], role=TeamRoles.MANAGER) >>> my_team.add_members([other_user], role=TeamRoles.MEMBER)
def single_violation(self, column=None, value=None, **kwargs): """ A single event violation is a one-time event that occurred on a fixed date, and is associated with one permitted facility. >>> PCS().single_violation('single_event_viol_date', '16-MAR-01') """ return self._resolve_call('PCS_SINGLE_EVENT_VIOL', column, value, **kwargs)
A single event violation is a one-time event that occurred on a fixed date, and is associated with one permitted facility. >>> PCS().single_violation('single_event_viol_date', '16-MAR-01')
def _add_genetic_models(self, variant_obj, info_dict): """Add the genetic models found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary """ genetic_models_entry = info_dict.get('GeneticModels') if genetic_models_entry: genetic_models = [] for family_annotation in genetic_models_entry.split(','): for genetic_model in family_annotation.split(':')[-1].split('|'): genetic_models.append(genetic_model) logger.debug("Updating genetic models to: {0}".format( ', '.join(genetic_models))) variant_obj.genetic_models = genetic_models
Add the genetic models found Args: variant_obj (puzzle.models.Variant) info_dict (dict): A info dictionary
def _prep_acl_for_compare(ACL): ''' Prepares the ACL returned from the AWS API for comparison with a given one. ''' ret = copy.deepcopy(ACL) ret['Owner'] = _normalize_user(ret['Owner']) for item in ret.get('Grants', ()): item['Grantee'] = _normalize_user(item.get('Grantee')) return ret
Prepares the ACL returned from the AWS API for comparison with a given one.
def handle(client_message, handle_event_imap_invalidation=None, handle_event_imap_batch_invalidation=None, to_object=None): """ Event handler """ message_type = client_message.get_message_type() if message_type == EVENT_IMAPINVALIDATION and handle_event_imap_invalidation is not None: key = None if not client_message.read_bool(): key = client_message.read_data() handle_event_imap_invalidation(key=key) if message_type == EVENT_IMAPBATCHINVALIDATION and handle_event_imap_batch_invalidation is not None: keys_size = client_message.read_int() keys = [] for _ in range(0, keys_size): keys_item = client_message.read_data() keys.append(keys_item) handle_event_imap_batch_invalidation(keys=keys)
Event handler
def apply_operation(self, symmop): """ Apply a symmetry operation to the molecule. Args: symmop (SymmOp): Symmetry operation to apply. """ def operate_site(site): new_cart = symmop.operate(site.coords) return Site(site.species, new_cart, properties=site.properties) self._sites = [operate_site(s) for s in self._sites]
Apply a symmetry operation to the molecule. Args: symmop (SymmOp): Symmetry operation to apply.
def handle_label_relation(self, line: str, position: int, tokens: ParseResults) -> ParseResults: """Handle statements like ``p(X) label "Label for X"``. :raises: RelabelWarning """ subject_node_dsl = self.ensure_node(tokens[SUBJECT]) description = tokens[OBJECT] if self.graph.has_node_description(subject_node_dsl): raise RelabelWarning( line_number=self.get_line_number(), line=line, position=position, node=self.graph.node, old_label=self.graph.get_node_description(subject_node_dsl), new_label=description ) self.graph.set_node_description(subject_node_dsl, description) return tokens
Handle statements like ``p(X) label "Label for X"``. :raises: RelabelWarning
def _validate_bag(self, bag, **kwargs): """ Validate BagIt (checksums, payload.oxum etc) """ failed = None try: bag.validate(**kwargs) except BagValidationError as e: failed = e # for d in e.details: # if isinstance(d, ChecksumMismatch): # log.error("Validation Error: expected %s to have %s checksum of %s but found %s", d.path, d.algorithm, d.expected, d.found) # else: # log.error("Validation Error: %s", d) if failed: raise BagValidationError("%s" % failed)
Validate BagIt (checksums, payload.oxum etc)
def _process_added_port_event(self, port_name): """Callback for added ports.""" LOG.info("Hyper-V VM vNIC added: %s", port_name) self._added_ports.add(port_name)
Callback for added ports.
def parse_tweet(raw_tweet, source, now=None): """ Parses a single raw tweet line from a twtxt file and returns a :class:`Tweet` object. :param str raw_tweet: a single raw tweet line :param Source source: the source of the given tweet :param Datetime now: the current datetime :returns: the parsed tweet :rtype: Tweet """ if now is None: now = datetime.now(timezone.utc) raw_created_at, text = raw_tweet.split("\t", 1) created_at = parse_iso8601(raw_created_at) if created_at > now: raise ValueError("Tweet is from the future") return Tweet(click.unstyle(text.strip()), created_at, source)
Parses a single raw tweet line from a twtxt file and returns a :class:`Tweet` object. :param str raw_tweet: a single raw tweet line :param Source source: the source of the given tweet :param Datetime now: the current datetime :returns: the parsed tweet :rtype: Tweet
def attention_lm_moe_large(): """Large model for distributed training. Over 1B parameters, so requires multi-gpu training due to memory requirements. on lm1b_32k: After 45K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.18 eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9 Returns: an hparams object. """ hparams = attention_lm_moe_base() hparams.num_hidden_layers = 5 hparams.moe_layers = "3" hparams.hidden_size = 1024 hparams.num_heads = 16 hparams.filter_size = 4096 hparams.moe_hidden_sizes = "4096" hparams.moe_num_experts = 128 hparams.layer_prepostprocess_dropout = 0.2 return hparams
Large model for distributed training. Over 1B parameters, so requires multi-gpu training due to memory requirements. on lm1b_32k: After 45K steps on 8 GPUs (synchronous): eval_log_ppl_per_token = 3.18 eval_ppl_per_word = exp(1.107893 * eval_log_ppl_per_token) = 33.9 Returns: an hparams object.
def _get_app_version(self, app_config): """ Some plugins ship multiple applications and extensions. However all of them have the same version, because they are released together. That's why only-top level module is used to fetch version information. """ base_name = app_config.__module__.split('.')[0] module = __import__(base_name) return getattr(module, '__version__', 'N/A')
Some plugins ship multiple applications and extensions. However all of them have the same version, because they are released together. That's why only-top level module is used to fetch version information.
def get_portchannel_info_by_intf_output_lacp_actor_brcd_state(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_portchannel_info_by_intf = ET.Element("get_portchannel_info_by_intf") config = get_portchannel_info_by_intf output = ET.SubElement(get_portchannel_info_by_intf, "output") lacp = ET.SubElement(output, "lacp") actor_brcd_state = ET.SubElement(lacp, "actor-brcd-state") actor_brcd_state.text = kwargs.pop('actor_brcd_state') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code