code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def Run(self, unused_arg): """This kills us with no cleanups.""" logging.debug("Disabling service") msg = "Service disabled." if hasattr(sys, "frozen"): grr_binary = os.path.abspath(sys.executable) elif __file__: grr_binary = os.path.abspath(__file__) try: os.remove(grr_binary) except OSError: msg = "Could not remove binary." try: os.remove(config.CONFIG["Client.plist_path"]) except OSError: if "Could not" in msg: msg += " Could not remove plist file." else: msg = "Could not remove plist file." # Get the directory we are running in from pyinstaller. This is either the # GRR directory which we should delete (onedir mode) or a generated temp # directory which we can delete without problems in onefile mode. directory = getattr(sys, "_MEIPASS", None) if directory: shutil.rmtree(directory, ignore_errors=True) self.SendReply(rdf_protodict.DataBlob(string=msg))
This kills us with no cleanups.
def from_taxtable(cls, taxtable_fp): """ Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable`` """ r = csv.reader(taxtable_fp) headers = next(r) rows = (collections.OrderedDict(list(zip(headers, i))) for i in r) row = next(rows) root = cls(rank=row['rank'], tax_id=row[ 'tax_id'], name=row['tax_name']) path_root = headers.index('root') root.ranks = headers[path_root:] for row in rows: rank, tax_id, name = [ row[i] for i in ('rank', 'tax_id', 'tax_name')] path = [_f for _f in list(row.values())[path_root:] if _f] parent = root.path(path[:-1]) parent.add_child(cls(rank, tax_id, name=name)) return root
Generate a node from an open handle to a taxtable, as generated by ``taxit taxtable``
def vi_return_param(self, index): """ Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter """ if index == 0: return self.mu0 elif index == 1: return np.log(self.sigma0)
Wrapper function for selecting appropriate latent variable for variational inference Parameters ---------- index : int 0 or 1 depending on which latent variable Returns ---------- The appropriate indexed parameter
def decode_call(self, call): """ Replace callable tokens with callable names. :param call: Encoded callable name :type call: string :rtype: string """ # Callable name is None when callable is part of exclude list if call is None: return None itokens = call.split(self._callables_separator) odict = {} for key, value in self._clut.items(): if value in itokens: odict[itokens[itokens.index(value)]] = key return self._callables_separator.join([odict[itoken] for itoken in itokens])
Replace callable tokens with callable names. :param call: Encoded callable name :type call: string :rtype: string
def parse_number_factory(alg, sep, pre_sep): """ Create a function that will format a number into a tuple. Parameters ---------- alg : ns enum Indicate how to format the *bytes*. sep : str The string character to be inserted before the number in the returned tuple. pre_sep : str In the event that *alg* contains ``UNGROUPLETTERS``, this string will be placed in a single-element tuple at the front of the returned nested tuple. Returns ------- func : callable A function that accepts numeric input (e.g. *int* or *float*) and returns a tuple containing the number with the leading string *sep*. Intended to be used as the *num_func* argument to *natsort_key*. See Also -------- natsort_key """ nan_replace = float("+inf") if alg & ns.NANLAST else float("-inf") def func(val, _nan_replace=nan_replace, _sep=sep): """Given a number, place it in a tuple with a leading null string.""" return _sep, _nan_replace if val != val else val # Return the function, possibly wrapping in tuple if PATH is selected. if alg & ns.PATH and alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: (((pre_sep,), func(x)),) elif alg & ns.UNGROUPLETTERS and alg & ns.LOCALEALPHA: return lambda x: ((pre_sep,), func(x)) elif alg & ns.PATH: return lambda x: (func(x),) else: return func
Create a function that will format a number into a tuple. Parameters ---------- alg : ns enum Indicate how to format the *bytes*. sep : str The string character to be inserted before the number in the returned tuple. pre_sep : str In the event that *alg* contains ``UNGROUPLETTERS``, this string will be placed in a single-element tuple at the front of the returned nested tuple. Returns ------- func : callable A function that accepts numeric input (e.g. *int* or *float*) and returns a tuple containing the number with the leading string *sep*. Intended to be used as the *num_func* argument to *natsort_key*. See Also -------- natsort_key
def add_association_to_graph(self): """ Overrides Association by including bnode support The reified relationship between a genotype (or any genotype part) and a phenotype is decorated with some provenance information. This makes the assumption that both the genotype and phenotype are classes. currently hardcoded to map the annotation to the monarch namespace :param g: :return: """ Assoc.add_association_to_graph(self) # make a blank stage if self.start_stage_id or self.end_stage_id is not None: stage_process_id = '-'.join((str(self.start_stage_id), str(self.end_stage_id))) stage_process_id = '_:'+re.sub(r':', '', stage_process_id) self.model.addIndividualToGraph( stage_process_id, None, self.globaltt['developmental_process']) self.graph.addTriple( stage_process_id, self.globaltt['starts during'], self.start_stage_id) self.graph.addTriple( stage_process_id, self.globaltt['ends during'], self.end_stage_id) self.stage_process_id = stage_process_id self.graph.addTriple( self.assoc_id, self.globaltt['has_qualifier'], self.stage_process_id) if self.environment_id is not None: self.graph.addTriple( self.assoc_id, self.globaltt['has_qualifier'], self.environment_id) return
Overrides Association by including bnode support The reified relationship between a genotype (or any genotype part) and a phenotype is decorated with some provenance information. This makes the assumption that both the genotype and phenotype are classes. currently hardcoded to map the annotation to the monarch namespace :param g: :return:
def tau_reduction(ms, rate, n_per_decade): """Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values """ ms = np.int64(ms) keep = np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) - np.rint(n_per_decade*np.log10(ms[:-1]))) # Adjust ms size to fit above-defined mask ms = ms[:-1] assert len(ms) == len(keep) ms = ms[keep] taus = ms/float(rate) return ms, taus
Reduce the number of taus to maximum of n per decade (Helper function) takes in a tau list and reduces the number of taus to a maximum amount per decade. This is only useful if more than the "decade" and "octave" but less than the "all" taus are wanted. E.g. to show certain features of the data one might want 100 points per decade. NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and will also remove some points in this range, which is usually fine. Typical use would be something like: (data,m,taus)=tau_generator(data,rate,taus="all") (m,taus)=tau_reduction(m,rate,n_per_decade) Parameters ---------- ms: array of integers List of m values (assumed to be an "all" list) to remove points from. rate: float Sample rate of data in Hz. Time interval between measurements is 1/rate seconds. Used to convert to taus. n_per_decade: int Number of ms/taus to keep per decade. Returns ------- m: np.array Reduced list of m values taus: np.array Reduced list of tau values
def p_reset(self, program): """ reset : RESET primary """ program[0] = node.Reset([program[2]]) self.verify_reg(program[2], 'qreg')
reset : RESET primary
def _update_card_file_location(self, card_name, new_directory): """ Moves card to new gssha working directory """ with tmp_chdir(self.gssha_directory): file_card = self.project_manager.getCard(card_name) if file_card: if file_card.value: original_location = file_card.value.strip("'").strip('"') new_location = os.path.join(new_directory, os.path.basename(original_location)) file_card.value = '"{0}"'.format(os.path.basename(original_location)) try: move(original_location, new_location) except OSError as ex: log.warning(ex) pass
Moves card to new gssha working directory
def multiple_packaged_versions(package_name): """ Look through built package directory and see if there are multiple versions there """ dist_files = os.listdir('dist') versions = set() for filename in dist_files: version = funcy.re_find(r'{}-(.+).tar.gz'.format(package_name), filename) if version: versions.add(version) return len(versions) > 1
Look through built package directory and see if there are multiple versions there
def write_metadata(self, key, values): """ write out a meta data array to the key as a fixed-format Series Parameters ---------- key : string values : ndarray """ values = Series(values) self.parent.put(self._get_metadata_path(key), values, format='table', encoding=self.encoding, errors=self.errors, nan_rep=self.nan_rep)
write out a meta data array to the key as a fixed-format Series Parameters ---------- key : string values : ndarray
def create_tables(database): '''Create all tables in the given database''' logging.getLogger(__name__).debug("Creating missing database tables") database.connect() database.create_tables([User, Group, UserToGroup, GroupToCapability, Capability], safe=True)
Create all tables in the given database
def _run_up(self, path, migration_file, batch, pretend=False): """ Run "up" a migration instance. :type migration_file: str :type batch: int :type pretend: bool """ migration = self._resolve(path, migration_file) if pretend: return self._pretend_to_run(migration, 'up') migration.up() self._repository.log(migration_file, batch) self._note('<info>✓ Migrated</info> %s' % migration_file)
Run "up" a migration instance. :type migration_file: str :type batch: int :type pretend: bool
def solar(filename_solar, solar_factor): ''' read solar abundances from filename_solar. Parameters ---------- filename_solar : string The file name. solar_factor : float The correction factor to apply, in case filename_solar is not solar, but some file used to get initial abundances at metallicity lower than solar. However, notice that this is really rude, since alpha-enahncements and things like that are not properly considered. Only H and He4 are not multiplied. So, for publications PLEASE use proper filename_solar at...solar, and use solar_factor = 1. Marco ''' f0=open(filename_solar) sol=f0.readlines() f0.close sol[0].split(" ") # Now read in the whole file and create a hashed array: global names_sol names_sol=[] global z_sol z_sol=[] yps=np.zeros(len(sol)) mass_number=np.zeros(len(sol)) for i in range(len(sol)): z_sol.append(int(sol[i][1:3])) names_sol.extend([sol[i].split(" ")[0][4:]]) yps[i]=float(sol[i].split(" ")[1]) * solar_factor try: mass_number[i]=int(names_sol[i][2:5]) except ValueError: print("WARNING:") print("This initial abundance file uses an element name that does") print("not contain the mass number in the 3rd to 5th position.") print("It is assumed that this is the proton and we will change") print("the name to 'h 1' to be consistent with the notation used in") print("iniab.dat files") names_sol[i]='h 1' mass_number[i]=int(names_sol[i][2:5]) if mass_number[i] == 1 or mass_number[i] == 4: yps[i] = old_div(yps[i],solar_factor) # convert 'h 1' in prot, not needed any more?? #names_sol[0] = 'prot ' # now zip them together: global solar_abundance solar_abundance={} for a,b in zip(names_sol,yps): solar_abundance[a] = b z_bismuth = 83 global solar_elem_abund solar_elem_abund = np.zeros(z_bismuth) for i in range(z_bismuth): dummy = 0. for j in range(len(solar_abundance)): if z_sol[j] == i+1: dummy = dummy + float(solar_abundance[names_sol[j]]) solar_elem_abund[i] = dummy
read solar abundances from filename_solar. Parameters ---------- filename_solar : string The file name. solar_factor : float The correction factor to apply, in case filename_solar is not solar, but some file used to get initial abundances at metallicity lower than solar. However, notice that this is really rude, since alpha-enahncements and things like that are not properly considered. Only H and He4 are not multiplied. So, for publications PLEASE use proper filename_solar at...solar, and use solar_factor = 1. Marco
def show_status(self): """ dumps the status of the agent """ txt = 'Agent Status:\n' print(txt) txt += "start_x = " + str(self.start_x) + "\n" txt += "start_y = " + str(self.start_y) + "\n" txt += "target_x = " + str(self.target_x) + "\n" txt += "target_y = " + str(self.target_y) + "\n" txt += "current_x = " + str(self.current_x) + "\n" txt += "current_y = " + str(self.current_y) + "\n" print(self.grd) return txt
dumps the status of the agent
def bar_amplitude(self): "返回bar振幅" res = (self.high - self.low) / self.low res.name = 'bar_amplitude' return res
返回bar振幅
def distance(self, there): """ Calculate the distance from this location to there. Parameters ---------- there : Location Returns ------- distance_in_m : float """ return haversine_distance((self.latitude, self.longitude), (there.latitude, there.longitude))
Calculate the distance from this location to there. Parameters ---------- there : Location Returns ------- distance_in_m : float
def login(self, username, password, load=True): """ Set the authentication data in the object, and if load is True (default is True) it also retrieve the ip list and the vm list in order to build the internal objects list. @param (str) username: username of the cloud @param (str) password: password of the cloud @param (bool) load: define if pre cache the objects. @return: None """ self.auth = Auth(username, password) if load is True: self.get_ip() self.get_servers()
Set the authentication data in the object, and if load is True (default is True) it also retrieve the ip list and the vm list in order to build the internal objects list. @param (str) username: username of the cloud @param (str) password: password of the cloud @param (bool) load: define if pre cache the objects. @return: None
def drawDisplay(self, painter, option, rect, text): """ Overloads the drawDisplay method to render HTML if the rich text \ information is set to true. :param painter | <QtGui.QPainter> option | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> text | <str> """ if self.showRichText(): # create the document doc = QtGui.QTextDocument() doc.setTextWidth(float(rect.width())) doc.setHtml(text) # draw the contents painter.translate(rect.x(), rect.y()) doc.drawContents(painter, QtCore.QRectF(0, 0, float(rect.width()), float(rect.height()))) painter.translate(-rect.x(), -rect.y()) else: if type(text).__name__ not in ('str', 'unicode', 'QString'): text = nativestring(text) metrics = QtGui.QFontMetrics(option.font) text = metrics.elidedText(text, QtCore.Qt.TextElideMode(option.textElideMode), rect.width()) painter.setFont(option.font) painter.drawText(rect, int(option.displayAlignment), text)
Overloads the drawDisplay method to render HTML if the rich text \ information is set to true. :param painter | <QtGui.QPainter> option | <QtGui.QStyleOptionItem> rect | <QtCore.QRect> text | <str>
def _http_response(self, url, method, data=None, content_type=None, schema=None, **kwargs): """url -> full target url method -> method from requests data -> request body kwargs -> url formatting args """ if schema is None: schema = self.schema_2 header = { 'content-type': content_type or 'application/json', 'Accept': schema, } # Token specific part. We add the token in the header if necessary auth = self.auth token_required = auth.token_required token = auth.token desired_scope = auth.desired_scope scope = auth.scope if token_required: if not token or desired_scope != scope: logger.debug("Getting new token for scope: %s", desired_scope) auth.get_new_token() header['Authorization'] = 'Bearer %s' % self.auth.token if data and not content_type: data = json.dumps(data) path = url.format(**kwargs) logger.debug("%s %s", method.__name__.upper(), path) response = method(self.host + path, data=data, headers=header, **self.method_kwargs) logger.debug("%s %s", response.status_code, response.reason) response.raise_for_status() return response
url -> full target url method -> method from requests data -> request body kwargs -> url formatting args
def truncate_volume(self, volume, size): """Truncate a volume to a new, smaller size. :param volume: Name of the volume to truncate. :type volume: str :param size: Size in bytes, or string representing the size of the volume to be created. :type size: int or str :returns: A dictionary mapping "name" to volume and "size" to the volume's new size in bytes. :rtype: ResponseDict .. warnings also:: Data may be irretrievably lost in this operation. .. note:: A snapshot of the volume in its previous state is taken and immediately destroyed, but it is available for recovery for the 24 hours following the truncation. """ return self.set_volume(volume, size=size, truncate=True)
Truncate a volume to a new, smaller size. :param volume: Name of the volume to truncate. :type volume: str :param size: Size in bytes, or string representing the size of the volume to be created. :type size: int or str :returns: A dictionary mapping "name" to volume and "size" to the volume's new size in bytes. :rtype: ResponseDict .. warnings also:: Data may be irretrievably lost in this operation. .. note:: A snapshot of the volume in its previous state is taken and immediately destroyed, but it is available for recovery for the 24 hours following the truncation.
def absorb(self, other): """ For attributes of others that value is not None, assign it to self. **中文文档** 将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。 """ if not isinstance(other, self.__class__): raise TypeError("`other` has to be a instance of %s!" % self.__class__) for attr, value in other.items(): if value is not None: setattr(self, attr, deepcopy(value))
For attributes of others that value is not None, assign it to self. **中文文档** 将另一个文档中的数据更新到本条文档。当且仅当数据值不为None时。
def _populate_input_for_name_id(self, config, record, context, data): """ Use a record found in LDAP to populate input for NameID generation. """ user_id = "" user_id_from_attrs = config['user_id_from_attrs'] for attr in user_id_from_attrs: if attr in record["attributes"]: value = record["attributes"][attr] if isinstance(value, list): # Use a default sort to ensure some predictability since the # LDAP directory server may return multi-valued attributes # in any order. value.sort() user_id += "".join(value) satosa_logging( logger, logging.DEBUG, "Added attribute {} with values {} to input for NameID".format(attr, value), context.state ) else: user_id += value satosa_logging( logger, logging.DEBUG, "Added attribute {} with value {} to input for NameID".format(attr, value), context.state ) if not user_id: satosa_logging( logger, logging.WARNING, "Input for NameID is empty so not overriding default", context.state ) else: data.subject_id = user_id satosa_logging( logger, logging.DEBUG, "Input for NameID is {}".format(data.subject_id), context.state )
Use a record found in LDAP to populate input for NameID generation.
def slang_date(self, locale="en"): """"Returns human slang representation of date. Keyword Arguments: locale -- locale to translate to, e.g. 'fr' for french. (default: 'en' - English) """ dt = pendulum.instance(self.datetime()) try: return _translate(dt, locale) except KeyError: pass delta = humanize.time.abs_timedelta( timedelta(seconds=(self.epoch - now().epoch))) format_string = "DD MMM" if delta.days >= 365: format_string += " YYYY" return dt.format(format_string, locale=locale).title()
Returns human slang representation of date. Keyword Arguments: locale -- locale to translate to, e.g. 'fr' for french. (default: 'en' - English)
def dump(self, obj): """ Dumps the given object in the Java serialization format """ self.references = [] self.object_obj = obj self.object_stream = BytesIO() self._writeStreamHeader() self.writeObject(obj) return self.object_stream.getvalue()
Dumps the given object in the Java serialization format
def get_object(self, object_ids): """Get the value or values in the object store associated with the IDs. Return the values from the local object store for object_ids. This will block until all the values for object_ids have been written to the local object store. Args: object_ids (List[object_id.ObjectID]): A list of the object IDs whose values should be retrieved. """ # Make sure that the values are object IDs. for object_id in object_ids: if not isinstance(object_id, ObjectID): raise TypeError( "Attempting to call `get` on the value {}, " "which is not an ray.ObjectID.".format(object_id)) # Do an initial fetch for remote objects. We divide the fetch into # smaller fetches so as to not block the manager for a prolonged period # of time in a single call. plain_object_ids = [ plasma.ObjectID(object_id.binary()) for object_id in object_ids ] for i in range(0, len(object_ids), ray._config.worker_fetch_request_size()): self.raylet_client.fetch_or_reconstruct( object_ids[i:(i + ray._config.worker_fetch_request_size())], True) # Get the objects. We initially try to get the objects immediately. final_results = self.retrieve_and_deserialize(plain_object_ids, 0) # Construct a dictionary mapping object IDs that we haven't gotten yet # to their original index in the object_ids argument. unready_ids = { plain_object_ids[i].binary(): i for (i, val) in enumerate(final_results) if val is plasma.ObjectNotAvailable } if len(unready_ids) > 0: # Try reconstructing any objects we haven't gotten yet. Try to # get them until at least get_timeout_milliseconds # milliseconds passes, then repeat. while len(unready_ids) > 0: object_ids_to_fetch = [ plasma.ObjectID(unready_id) for unready_id in unready_ids.keys() ] ray_object_ids_to_fetch = [ ObjectID(unready_id) for unready_id in unready_ids.keys() ] fetch_request_size = ray._config.worker_fetch_request_size() for i in range(0, len(object_ids_to_fetch), fetch_request_size): self.raylet_client.fetch_or_reconstruct( ray_object_ids_to_fetch[i:(i + fetch_request_size)], False, self.current_task_id, ) results = self.retrieve_and_deserialize( object_ids_to_fetch, max([ ray._config.get_timeout_milliseconds(), int(0.01 * len(unready_ids)), ]), ) # Remove any entries for objects we received during this # iteration so we don't retrieve the same object twice. for i, val in enumerate(results): if val is not plasma.ObjectNotAvailable: object_id = object_ids_to_fetch[i].binary() index = unready_ids[object_id] final_results[index] = val unready_ids.pop(object_id) # If there were objects that we weren't able to get locally, # let the raylet know that we're now unblocked. self.raylet_client.notify_unblocked(self.current_task_id) assert len(final_results) == len(object_ids) return final_results
Get the value or values in the object store associated with the IDs. Return the values from the local object store for object_ids. This will block until all the values for object_ids have been written to the local object store. Args: object_ids (List[object_id.ObjectID]): A list of the object IDs whose values should be retrieved.
def register_listener(self, address, func): """Adds a listener to messages received on a specific address If some KNX messages will be received from the KNX bus, this listener will be called func(address, data). There can be multiple listeners for a given address """ try: listeners = self.address_listeners[address] except KeyError: listeners = [] self.address_listeners[address] = listeners if not func in listeners: listeners.append(func) return True
Adds a listener to messages received on a specific address If some KNX messages will be received from the KNX bus, this listener will be called func(address, data). There can be multiple listeners for a given address
def share_vm_image(self, vm_image_name, permission): ''' Share an already replicated OS image. This operation is only for publishers. You have to be registered as image publisher with Windows Azure to be able to call this. vm_image_name: The name of the virtual machine image to share permission: The sharing permission: public, msdn, or private ''' _validate_not_none('vm_image_name', vm_image_name) _validate_not_none('permission', permission) path = self._get_sharing_path_using_vm_image_name(vm_image_name) query = '&permission=' + permission path = path + '?' + query.lstrip('&') return self._perform_put( path, None, as_async=True, x_ms_version='2015-04-01' )
Share an already replicated OS image. This operation is only for publishers. You have to be registered as image publisher with Windows Azure to be able to call this. vm_image_name: The name of the virtual machine image to share permission: The sharing permission: public, msdn, or private
def get_rank_value(cls, name): """Returns the integer constant value for the given rank name. :param string rank: the string rank name (E.g., 'HARDCODED'). :returns: the integer constant value of the rank. :rtype: int """ if name in cls._RANK_NAMES.values(): return getattr(cls, name, None) return None
Returns the integer constant value for the given rank name. :param string rank: the string rank name (E.g., 'HARDCODED'). :returns: the integer constant value of the rank. :rtype: int
def datum_to_value(self, instance, datum): """Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum. """ datum = self.map_func(instance, datum) if datum is None: return None local_data = None if self.reverse is not None: local_data = {} if self.reverse is undefined: local_data[instance.__class__.__name__.lower()] = instance else: local_data[self.reverse] = instance # Get the class from the bound origin. bound = getattr(instance._origin, self.cls) return bound(datum, local_data=local_data)
Convert a given MAAS-side datum to a Python-side value. :param instance: The `Object` instance on which this field is currently operating. This method should treat it as read-only, for example to perform validation with regards to other fields. :param datum: The MAAS-side datum to validate and convert into a Python-side value. :return: A set of `cls` from the given datum.
def from_zenity_tuple_str(zenity_tuple_str: str): """ Parser for Zenity output, which outputs a named tuple-like string: "rgb(R, G, B)", where R, G, B are base10 integers. @param zenity_tuple_str: tuple-like string: "rgb(r, g, b), where r, g, b are base10 integers. @return: ColourData instance @rtype: ColourData """ components = zenity_tuple_str.strip("rgb()").split(",") return ColourData(*map(int, components))
Parser for Zenity output, which outputs a named tuple-like string: "rgb(R, G, B)", where R, G, B are base10 integers. @param zenity_tuple_str: tuple-like string: "rgb(r, g, b), where r, g, b are base10 integers. @return: ColourData instance @rtype: ColourData
def get_config(self, name, default=_MISSING): """Get a configuration setting from this DeviceAdapter. See :meth:`AbstractDeviceAdapter.get_config`. """ val = self._config.get(name, default) if val is _MISSING: raise ArgumentError("DeviceAdapter config {} did not exist and no default".format(name)) return val
Get a configuration setting from this DeviceAdapter. See :meth:`AbstractDeviceAdapter.get_config`.
def _fetchBlock(self): """ internal use only. get a block of rows from the server and put in standby block. future enhancements: (1) locks for multithreaded access (protect from multiple calls) (2) allow for prefetch by use of separate thread """ # make sure that another block request is not standing if self._blockRequestInProgress : # need to wait here before returning... (TODO) return # make sure another block request has not completed meanwhile if self._standbyBlock is not None: return self._blockRequestInProgress = True fetchReq = TFetchResultsReq(operationHandle=self.operationHandle, orientation=TFetchOrientation.FETCH_NEXT, maxRows=self.arraysize) self._standbyBlock = self._fetch([],fetchReq) self._blockRequestInProgress = False return
internal use only. get a block of rows from the server and put in standby block. future enhancements: (1) locks for multithreaded access (protect from multiple calls) (2) allow for prefetch by use of separate thread
def protect(self, password=None, read_protect=False, protect_from=0): """Set password protection or permanent lock bits. If the *password* argument is None, all memory pages will be protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found, protect() also sets the NDEF write flag to read-only. All Tags of the NTAG21x family can alternatively be protected by password. If a *password* argument is provided, the protect() method writes the first 4 byte of the *password* string into the Tag's password (PWD) memory bytes and the following 2 byte of the *password* string into the password acknowledge (PACK) memory bytes. Factory default values are used if the *password* argument is an empty string. Lock bits are not set for password protection. The *read_protect* and *protect_from* arguments are only evaluated if *password* is not None. If *read_protect* is True, the memory protection bit (PROT) is set to require password verification also for reading of protected memory pages. The value of *protect_from* determines the first password protected memory page (one page is 4 byte) with the exception that the smallest set value is page 3 even if *protect_from* is smaller. """ args = (password, read_protect, protect_from) return super(NTAG21x, self).protect(*args)
Set password protection or permanent lock bits. If the *password* argument is None, all memory pages will be protected by setting the relevant lock bits (note that lock bits can not be reset). If valid NDEF management data is found, protect() also sets the NDEF write flag to read-only. All Tags of the NTAG21x family can alternatively be protected by password. If a *password* argument is provided, the protect() method writes the first 4 byte of the *password* string into the Tag's password (PWD) memory bytes and the following 2 byte of the *password* string into the password acknowledge (PACK) memory bytes. Factory default values are used if the *password* argument is an empty string. Lock bits are not set for password protection. The *read_protect* and *protect_from* arguments are only evaluated if *password* is not None. If *read_protect* is True, the memory protection bit (PROT) is set to require password verification also for reading of protected memory pages. The value of *protect_from* determines the first password protected memory page (one page is 4 byte) with the exception that the smallest set value is page 3 even if *protect_from* is smaller.
def createNetwork(dataSource): """Creates and returns a new Network with a sensor region reading data from 'dataSource'. There are two hierarchical levels, each with one SP and one TM. @param dataSource - A RecordStream containing the input data @returns a Network ready to run """ network = Network() # Create and add a record sensor and a SP region sensor = createRecordSensor(network, name=_RECORD_SENSOR, dataSource=dataSource) createSpatialPooler(network, name=_L1_SPATIAL_POOLER, inputWidth=sensor.encoder.getWidth()) # Link the SP region to the sensor input linkType = "UniformLink" linkParams = "" network.link(_RECORD_SENSOR, _L1_SPATIAL_POOLER, linkType, linkParams) # Create and add a TM region l1temporalMemory = createTemporalMemory(network, _L1_TEMPORAL_MEMORY) # Link SP region to TM region in the feedforward direction network.link(_L1_SPATIAL_POOLER, _L1_TEMPORAL_MEMORY, linkType, linkParams) # Add a classifier classifierParams = { # Learning rate. Higher values make it adapt faster. 'alpha': 0.005, # A comma separated list of the number of steps the # classifier predicts in the future. The classifier will # learn predictions of each order specified. 'steps': '1', # The specific implementation of the classifier to use # See SDRClassifierFactory#create for options 'implementation': 'py', # Diagnostic output verbosity control; # 0: silent; [1..6]: increasing levels of verbosity 'verbosity': 0} l1Classifier = network.addRegion(_L1_CLASSIFIER, "py.SDRClassifierRegion", json.dumps(classifierParams)) l1Classifier.setParameter('inferenceMode', True) l1Classifier.setParameter('learningMode', True) network.link(_L1_TEMPORAL_MEMORY, _L1_CLASSIFIER, linkType, linkParams, srcOutput="bottomUpOut", destInput="bottomUpIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="categoryOut", destInput="categoryIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(_RECORD_SENSOR, _L1_CLASSIFIER, linkType, linkParams, srcOutput="actValueOut", destInput="actValueIn") # Second Level l2inputWidth = l1temporalMemory.getSelf().getOutputElementCount("bottomUpOut") createSpatialPooler(network, name=_L2_SPATIAL_POOLER, inputWidth=l2inputWidth) network.link(_L1_TEMPORAL_MEMORY, _L2_SPATIAL_POOLER, linkType, linkParams) createTemporalMemory(network, _L2_TEMPORAL_MEMORY) network.link(_L2_SPATIAL_POOLER, _L2_TEMPORAL_MEMORY, linkType, linkParams) l2Classifier = network.addRegion(_L2_CLASSIFIER, "py.SDRClassifierRegion", json.dumps(classifierParams)) l2Classifier.setParameter('inferenceMode', True) l2Classifier.setParameter('learningMode', True) network.link(_L2_TEMPORAL_MEMORY, _L2_CLASSIFIER, linkType, linkParams, srcOutput="bottomUpOut", destInput="bottomUpIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="categoryOut", destInput="categoryIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="bucketIdxOut", destInput="bucketIdxIn") network.link(_RECORD_SENSOR, _L2_CLASSIFIER, linkType, linkParams, srcOutput="actValueOut", destInput="actValueIn") return network
Creates and returns a new Network with a sensor region reading data from 'dataSource'. There are two hierarchical levels, each with one SP and one TM. @param dataSource - A RecordStream containing the input data @returns a Network ready to run
def init_app(self, app, sessionstore=None, register_blueprint=False): """Flask application initialization. :param app: The Flask application. :param sessionstore: store for sessions. Passed to ``flask-kvsession``. If ``None`` then Redis is configured. (Default: ``None``) :param register_blueprint: If ``True``, the application registers the blueprints. (Default: ``True``) """ return super(InvenioAccountsREST, self).init_app( app, sessionstore=sessionstore, register_blueprint=register_blueprint, )
Flask application initialization. :param app: The Flask application. :param sessionstore: store for sessions. Passed to ``flask-kvsession``. If ``None`` then Redis is configured. (Default: ``None``) :param register_blueprint: If ``True``, the application registers the blueprints. (Default: ``True``)
def _build_youtube_dl_coprocessor(cls, session: AppSession, proxy_port: int): '''Build youtube-dl coprocessor.''' # Test early for executable wpull.processor.coprocessor.youtubedl.get_version(session.args.youtube_dl_exe) coprocessor = session.factory.new( 'YoutubeDlCoprocessor', session.args.youtube_dl_exe, (session.args.proxy_server_address, proxy_port), root_path=session.args.directory_prefix, user_agent=session.args.user_agent or session.default_user_agent, warc_recorder=session.factory.get('WARCRecorder'), inet_family=session.args.inet_family, # Proxy will always present a invalid MITM cert #check_certificate=session.args.check_certificate check_certificate=False ) return coprocessor
Build youtube-dl coprocessor.
def dedents(self, s, stacklevel=3): """ Dedent a string and substitute with the :attr:`params` attribute Parameters ---------- s: str string to dedent and insert the sections of the :attr:`params` attribute stacklevel: int The stacklevel for the warning raised in :func:`safe_module` when encountering an invalid key in the string""" s = dedents(s) return safe_modulo(s, self.params, stacklevel=stacklevel)
Dedent a string and substitute with the :attr:`params` attribute Parameters ---------- s: str string to dedent and insert the sections of the :attr:`params` attribute stacklevel: int The stacklevel for the warning raised in :func:`safe_module` when encountering an invalid key in the string
def remove_event(self, name=None, time=None, chan=None): """Action: remove single event.""" self.annot.remove_event(name=name, time=time, chan=chan) self.update_annotations()
Action: remove single event.
def _permute(self, ordering: np.ndarray) -> None: """ Permute all the attributes in the collection Remarks: This permutes the order of the values for each attribute in the file """ for key in self.keys(): self[key] = self[key][ordering]
Permute all the attributes in the collection Remarks: This permutes the order of the values for each attribute in the file
def set_volume(self, volume=50): """ allows to change the volume :param int volume: volume to be set for the current device [0..100] (default: 50) """ assert(volume in range(101)) log.debug("setting volume...") cmd, url = DEVICE_URLS["set_volume"] json_data = { "volume": volume, } return self._exec(cmd, url, json_data=json_data)
allows to change the volume :param int volume: volume to be set for the current device [0..100] (default: 50)
def main(): """The entry point for the HNV Agent.""" neutron_config.register_agent_state_opts_helper(CONF) common_config.init(sys.argv[1:]) neutron_config.setup_logging() hnv_agent = HNVAgent() # Start everything. LOG.info("Agent initialized successfully, now running... ") hnv_agent.daemon_loop()
The entry point for the HNV Agent.
def _parse_response(self, resp): """Gets the authentication information from the returned JSON.""" super(RaxIdentity, self)._parse_response(resp) user = resp["access"]["user"] defreg = user.get("RAX-AUTH:defaultRegion") if defreg: self._default_region = defreg
Gets the authentication information from the returned JSON.
def prepareToSolve(self): ''' Perform preparatory work before calculating the unconstrained consumption function. Parameters ---------- none Returns ------- none ''' self.setAndUpdateValues(self.solution_next,self.IncomeDstn,self.LivPrb,self.DiscFac) self.defBoroCnst(self.BoroCnstArt)
Perform preparatory work before calculating the unconstrained consumption function. Parameters ---------- none Returns ------- none
def make_grid(self): """ return grid """ changes = None # if there is a MagicDataFrame, extract data from it if isinstance(self.magic_dataframe, cb.MagicDataFrame): # get columns and reorder slightly col_labels = list(self.magic_dataframe.df.columns) for ex_col in self.exclude_cols: col_labels.pop(ex_col) if self.grid_type == 'ages': levels = ['specimen', 'sample', 'site', 'location'] for label in levels[:]: if label in col_labels: col_labels.remove(label) else: levels.remove(label) col_labels[:0] = levels else: if self.parent_type: if self.parent_type[:-1] in col_labels: col_labels.remove(self.parent_type[:-1]) col_labels[:0] = [self.parent_type[:-1]] if self.grid_type[:-1] in col_labels: col_labels.remove(self.grid_type[:-1]) col_labels[:0] = (self.grid_type[:-1],) for col in col_labels: if col not in self.magic_dataframe.df.columns: self.magic_dataframe.df[col] = None self.magic_dataframe.df = self.magic_dataframe.df[col_labels] self.magic_dataframe.sort_dataframe_cols() col_labels = list(self.magic_dataframe.df.columns) row_labels = self.magic_dataframe.df.index # make sure minimum defaults are present for header in self.reqd_headers: if header not in col_labels: changes = set([1]) col_labels.append(header) # if there is no pre-existing MagicDataFrame, # make a blank grid with do some defaults: else: # default headers #col_labels = list(self.data_model.get_headers(self.grid_type, 'Names')) #col_labels[:0] = self.reqd_headers col_labels = list(self.reqd_headers) if self.grid_type in ['specimens', 'samples', 'sites']: col_labels.extend(['age', 'age_sigma']) ## use the following line if you want sorted column labels: #col_labels = sorted(set(col_labels)) # defaults are different for ages if self.grid_type == 'ages': levels = ['specimen', 'sample', 'site', 'location'] for label in levels: if label in col_labels: col_labels.remove(label) col_labels[:0] = levels else: if self.parent_type: col_labels.remove(self.parent_type[:-1]) col_labels[:0] = [self.parent_type[:-1]] col_labels.remove(self.grid_type[:-1]) col_labels[:0] = [self.grid_type[:-1]] # make sure all reqd cols are in magic_dataframe for col in col_labels: if col not in self.magic_dataframe.df.columns: self.magic_dataframe.df[col] = None # make the grid if not self.huge: grid = magic_grid.MagicGrid(parent=self.panel, name=self.grid_type, row_labels=[], col_labels=col_labels) # make the huge grid else: row_labels = self.magic_dataframe.df.index grid = magic_grid.HugeMagicGrid(parent=self.panel, name=self.grid_type, row_labels=row_labels, col_labels=col_labels) grid.do_event_bindings() grid.changes = changes self.grid = grid return grid
return grid
def __json(self): """ Using the exclude lists, convert fields to a string. """ if self.exclude_list is None: self.exclude_list = [] fields = {} for key, item in vars(self).items(): if hasattr(self, '_sa_instance_state'): # load only deferred objects if len(orm.attributes.instance_state(self).unloaded) > 0: mapper = inspect(self) for column in mapper.attrs: column.key column.value if str(key).startswith('_') or key in self.exclude_list: continue fields[key] = item obj = Json.safe_object(fields) return str(obj)
Using the exclude lists, convert fields to a string.
def add_mark_at(string, index, mark): """ Add mark to the index-th character of the given string. Return the new string after applying change. Notice: index > 0 """ if index == -1: return string # Python can handle the case which index is out of range of given string return string[:index] + add_mark_char(string[index], mark) + string[index+1:]
Add mark to the index-th character of the given string. Return the new string after applying change. Notice: index > 0
def forward(self, layer_input: torch.Tensor, layer_output: torch.Tensor, layer_index: int = None, total_layers: int = None) -> torch.Tensor: # pylint: disable=arguments-differ """ Apply dropout to this layer, for this whole mini-batch. dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and total_layers is specified, else it will use the undecayed_dropout_prob directly. Parameters ---------- layer_input ``torch.FloatTensor`` required The input tensor of this layer. layer_output ``torch.FloatTensor`` required The output tensor of this layer, with the same shape as the layer_input. layer_index ``int`` The layer index, starting from 1. This is used to calcuate the dropout prob together with the `total_layers` parameter. total_layers ``int`` The total number of layers. Returns ------- output: ``torch.FloatTensor`` A tensor with the same shape as `layer_input` and `layer_output`. """ if layer_index is not None and total_layers is not None: dropout_prob = 1.0 * self.undecayed_dropout_prob * layer_index / total_layers else: dropout_prob = 1.0 * self.undecayed_dropout_prob if self.training: if torch.rand(1) < dropout_prob: return layer_input else: return layer_output + layer_input else: return (1 - dropout_prob) * layer_output + layer_input
Apply dropout to this layer, for this whole mini-batch. dropout_prob = layer_index / total_layers * undecayed_dropout_prob if layer_idx and total_layers is specified, else it will use the undecayed_dropout_prob directly. Parameters ---------- layer_input ``torch.FloatTensor`` required The input tensor of this layer. layer_output ``torch.FloatTensor`` required The output tensor of this layer, with the same shape as the layer_input. layer_index ``int`` The layer index, starting from 1. This is used to calcuate the dropout prob together with the `total_layers` parameter. total_layers ``int`` The total number of layers. Returns ------- output: ``torch.FloatTensor`` A tensor with the same shape as `layer_input` and `layer_output`.
def disable(gandi, resource, backend, port, probe): """ Disable a backend or a probe on a webaccelerator """ result = [] if backend: backends = backend for backend in backends: if 'port' not in backend: if not port: backend['port'] = click.prompt('Please set a port for ' 'backends. If you want to ' ' different port for ' 'each backend, use `-b ' 'ip:port`', type=int) else: backend['port'] = port result = gandi.webacc.backend_disable(backend) if probe: if not resource: gandi.echo('You need to indicate the Webaccelerator name') return result = gandi.webacc.probe_disable(resource) return result
Disable a backend or a probe on a webaccelerator
def pack_nibbles(nibbles): """pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator """ if nibbles[-1] == NIBBLE_TERMINATOR: flags = 2 nibbles = nibbles[:-1] else: flags = 0 oddlen = len(nibbles) % 2 flags |= oddlen # set lowest bit if odd number of nibbles if oddlen: nibbles = [flags] + nibbles else: nibbles = [flags, 0] + nibbles o = b'' for i in range(0, len(nibbles), 2): o += ascii_chr(16 * nibbles[i] + nibbles[i + 1]) return o
pack nibbles to binary :param nibbles: a nibbles sequence. may have a terminator
def add_scope(self, scope_type, scope_name, scope_start, is_method=False): """we identified a scope and add it to positions.""" if self._curr is not None: self._curr['end'] = scope_start - 1 # close last scope self._curr = { 'type': scope_type, 'name': scope_name, 'start': scope_start, 'end': scope_start } if is_method and self._positions: last = self._positions[-1] if not 'methods' in last: last['methods'] = [] last['methods'].append(self._curr) else: self._positions.append(self._curr)
we identified a scope and add it to positions.
def search(cls, session, queries, out_type): """Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined. """ cls._check_implements('search') domain = cls.get_search_domain(queries) return cls( '/search/%s.json' % cls.__endpoint__, data={'query': str(domain)}, session=session, out_type=out_type, )
Search for a record given a domain. Args: session (requests.sessions.Session): Authenticated session. queries (helpscout.models.Domain or iter): The queries for the domain. If a ``Domain`` object is provided, it will simply be returned. Otherwise, a ``Domain`` object will be generated from the complex queries. In this case, the queries should conform to the interface in :func:`helpscout.domain.Domain.from_tuple`. out_type (helpscout.BaseModel): The type of record to output. This should be provided by child classes, by calling super. Returns: RequestPaginator(output_type=helpscout.BaseModel): Results iterator of the ``out_type`` that is defined.
def update_function(old, new): """Upgrade the code object of a function""" for name in func_attrs: try: setattr(old, name, getattr(new, name)) except (AttributeError, TypeError): pass
Upgrade the code object of a function
def options(cls, obj, options=None, **kwargs): """ Context-manager for temporarily setting options on an object (if options is None, no options will be set) . Once the context manager exits, both the object and the Store will be left in exactly the same state they were in before the context manager was used. See holoviews.core.options.set_options function for more information on the options specification format. """ if (options is None) and kwargs == {}: yield else: Store._options_context = True optstate = cls.state(obj) groups = Store.options().groups.keys() options = cls.merge_options(groups, options, **kwargs) cls.set_options(obj, options) yield if options is not None: Store._options_context = True cls.state(obj, state=optstate)
Context-manager for temporarily setting options on an object (if options is None, no options will be set) . Once the context manager exits, both the object and the Store will be left in exactly the same state they were in before the context manager was used. See holoviews.core.options.set_options function for more information on the options specification format.
def ResolveForCreate(self, document): """Resolves the collection for creating the document based on the partition key. :param dict document: The document to be created. :return: Collection Self link or Name based link which should handle the Create operation. :rtype: str """ if document is None: raise ValueError("document is None.") partition_key = self.partition_key_extractor(document) return self.consistent_hash_ring.GetCollectionNode(partition_key)
Resolves the collection for creating the document based on the partition key. :param dict document: The document to be created. :return: Collection Self link or Name based link which should handle the Create operation. :rtype: str
def clear_scroll(self, scroll_id = None, body = '', params = {}, callback = None, **kwargs ): """ Clear the scroll request created by specifying the scroll parameter to search. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_ :arg scroll_id: The scroll ID or a list of scroll IDs :arg body: A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter """ url = self.mk_url(*['_search', 'scroll', scroll_id]) self.client.fetch( self.mk_req(url, method='DELETE', body=body, **kwargs), callback = callback )
Clear the scroll request created by specifying the scroll parameter to search. `<http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/search-request-scroll.html>`_ :arg scroll_id: The scroll ID or a list of scroll IDs :arg body: A comma-separated list of scroll IDs to clear if none was specified via the scroll_id parameter
def hasDependencyRecursively(self, name, target=None, test_dependencies=False): ''' Check if this module, or any of its dependencies, have a dependencies with the specified name in their dependencies, or in their targetDependencies corresponding to the specified target. Note that if recursive dependencies are not installed, this test may return a false-negative. ''' # checking dependencies recursively isn't entirely straightforward, so # use the existing method to resolve them all before checking: dependencies = self.getDependenciesRecursive( target = target, test = test_dependencies ) return (name in dependencies)
Check if this module, or any of its dependencies, have a dependencies with the specified name in their dependencies, or in their targetDependencies corresponding to the specified target. Note that if recursive dependencies are not installed, this test may return a false-negative.
def doc_stream(path): """ Generator to feed tokenized documents (treating each line as a document). """ with open(path, 'r') as f: for line in f: if line.strip(): yield line
Generator to feed tokenized documents (treating each line as a document).
def create_module(name, path): """ Returns module created *on the fly*. Returned module would have name same as given ``name`` and would contain code read from file at the given ``path`` (it may also be a zip or package containing *__main__* module). """ module = imp.new_module(name) module.__file__ = path execfile(path, module.__dict__) return module
Returns module created *on the fly*. Returned module would have name same as given ``name`` and would contain code read from file at the given ``path`` (it may also be a zip or package containing *__main__* module).
def single(self): """ Obtain the next and only remaining record from this result. A warning is generated if more than one record is available but the first of these is still returned. :returns: the next :class:`.Record` or :const:`None` if none remain :warns: if more than one record is available """ records = list(self) size = len(records) if size == 0: return None if size != 1: warn("Expected a result with a single record, but this result contains %d" % size) return records[0]
Obtain the next and only remaining record from this result. A warning is generated if more than one record is available but the first of these is still returned. :returns: the next :class:`.Record` or :const:`None` if none remain :warns: if more than one record is available
def fromFile(cls, filename): """ Load a suffix array instance from filename, a file created by toFile. Accept any filename following the _open conventions. """ self = cls.__new__(cls) # new instance which does not call __init__ start = _time() savedData = _loads(_open(filename, "r").read()) # load common attributes self.string, self.unit, self.voc, self.vocSize, self.SA, features = savedData[:6] self.length = len(self.SA) # determine token delimiter if self.unit == UNIT_WORD: self.tokSep = " " elif self.unit in (UNIT_CHARACTER, UNIT_BYTE): self.tokSep = "" else: raise Exception("Unknown unit type identifier:", self.unit) # recompute tokId based on voc self.tokId = dict((char, iChar) for iChar, char in enumerate(self.voc)) self.nbSentences = self.string.count(self.tokId.get("\n", 0)) # Load features self.features = [] for featureName, (featureValues, featureDefault) in zip(features, savedData[6:]): self.addFeatureSA((lambda _: featureValues), name=featureName, default=featureDefault) self.fromFileTime = _time() - start if _trace: print >> _stderr, "fromFileTime %.2fs" % self.fromFileTime return self
Load a suffix array instance from filename, a file created by toFile. Accept any filename following the _open conventions.
def format_interface_name(intf_type, port, ch_grp=0): """Method to format interface name given type, port. Given interface type, port, and channel-group, this method formats an interface name. If channel-group is non-zero, then port-channel is configured. :param intf_type: Such as 'ethernet' or 'port-channel' :param port: unique identification -- 1/32 or 1 :ch_grp: If non-zero, ignore other params and format port-channel<ch_grp> :returns: the full formatted interface name. ex: ethernet:1/32, port-channel:1 """ if ch_grp > 0: return 'port-channel:%s' % str(ch_grp) return '%s:%s' % (intf_type.lower(), port)
Method to format interface name given type, port. Given interface type, port, and channel-group, this method formats an interface name. If channel-group is non-zero, then port-channel is configured. :param intf_type: Such as 'ethernet' or 'port-channel' :param port: unique identification -- 1/32 or 1 :ch_grp: If non-zero, ignore other params and format port-channel<ch_grp> :returns: the full formatted interface name. ex: ethernet:1/32, port-channel:1
def as_completed(fs, timeout=None): """An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout. """ with _AcquireFutures(fs): finished = set(f for f in fs if f._state in [CANCELLED_AND_NOTIFIED, FINISHED]) pending = set(fs) - finished waiter = _create_and_install_waiters(fs, _AS_COMPLETED) timer = Timeout(timeout) timer.start() try: for future in finished: yield future while pending: waiter.event.wait() with waiter.lock: finished = waiter.finished_futures waiter.finished_futures = [] waiter.event.clear() for future in finished: yield future pending.remove(future) except Timeout as e: if timer is not e: raise raise TimeoutError('%d (of %d) futures unfinished' % (len(pending), len(fs))) finally: timer.cancel() for f in fs: f._waiters.remove(waiter)
An iterator over the given futures that yields each as it completes. Args: fs: The sequence of Futures (possibly created by different Executors) to iterate over. timeout: The maximum number of seconds to wait. If None, then there is no limit on the wait time. Returns: An iterator that yields the given Futures as they complete (finished or cancelled). Raises: TimeoutError: If the entire result iterator could not be generated before the given timeout.
def build_docs(directory): """Builds sphinx docs from a given directory.""" os.chdir(directory) process = subprocess.Popen(["make", "html"], cwd=directory) process.communicate()
Builds sphinx docs from a given directory.
def past_date(start='-30d'): """ Returns a ``date`` object in the past between 1 day ago and the specified ``start``. ``start`` can be a string, another date, or a timedelta. If it's a string, it must start with `-`, followed by and integer and a unit, Eg: ``'-30d'``. Defaults to `'-30d'` Valid units are: * ``'years'``, ``'y'`` * ``'weeks'``, ``'w'`` * ``'days'``, ``'d'`` * ``'hours'``, ``'h'`` * ``'minutes'``, ``'m'`` * ``'seconds'``, ``'s'`` """ return lambda n, f: f.past_date( start_date=start, tzinfo=get_timezone(), )
Returns a ``date`` object in the past between 1 day ago and the specified ``start``. ``start`` can be a string, another date, or a timedelta. If it's a string, it must start with `-`, followed by and integer and a unit, Eg: ``'-30d'``. Defaults to `'-30d'` Valid units are: * ``'years'``, ``'y'`` * ``'weeks'``, ``'w'`` * ``'days'``, ``'d'`` * ``'hours'``, ``'h'`` * ``'minutes'``, ``'m'`` * ``'seconds'``, ``'s'``
def handle_cannot(reference_answers: List[str]): """ Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER". """ num_cannot = 0 num_spans = 0 for ref in reference_answers: if ref == 'CANNOTANSWER': num_cannot += 1 else: num_spans += 1 if num_cannot >= num_spans: reference_answers = ['CANNOTANSWER'] else: reference_answers = [x for x in reference_answers if x != 'CANNOTANSWER'] return reference_answers
Process a list of reference answers. If equal or more than half of the reference answers are "CANNOTANSWER", take it as gold. Otherwise, return answers that are not "CANNOTANSWER".
def read(self, offset, length, min_length=0, unbuffered=False, wait=True, send=True): """ Reads from an opened file or pipe Supports out of band send function, call this function with send=False to return a tuple of (SMB2ReadRequest, receive_func) instead of sending the the request and waiting for the response. The receive_func can be used to get the response from the server by passing in the Request that was used to sent it out of band. :param offset: The offset to start the read of the file. :param length: The number of bytes to read from the offset. :param min_length: The minimum number of bytes to be read for a successful operation. :param unbuffered: Whether to the server should cache the read data at intermediate layers, only value for SMB 3.0.2 or newer :param wait: If send=True, whether to wait for a response if STATUS_PENDING was received from the server or fail. :param send: Whether to send the request in the same call or return the message to the caller and the unpack function :return: A byte string of the bytes read """ if length > self.connection.max_read_size: raise SMBException("The requested read length %d is greater than " "the maximum negotiated read size %d" % (length, self.connection.max_read_size)) read = SMB2ReadRequest() read['length'] = length read['offset'] = offset read['minimum_count'] = min_length read['file_id'] = self.file_id read['padding'] = b"\x50" if unbuffered: if self.connection.dialect < Dialects.SMB_3_0_2: raise SMBUnsupportedFeature(self.connection.dialect, Dialects.SMB_3_0_2, "SMB2_READFLAG_READ_UNBUFFERED", True) read['flags'].set_flag(ReadFlags.SMB2_READFLAG_READ_UNBUFFERED) if not send: return read, self._read_response log.info("Session: %s, Tree Connect ID: %s - sending SMB2 Read " "Request for file %s" % (self.tree_connect.session.username, self.tree_connect.share_name, self.file_name)) log.debug(str(read)) request = self.connection.send(read, self.tree_connect.session.session_id, self.tree_connect.tree_connect_id) return self._read_response(request, wait)
Reads from an opened file or pipe Supports out of band send function, call this function with send=False to return a tuple of (SMB2ReadRequest, receive_func) instead of sending the the request and waiting for the response. The receive_func can be used to get the response from the server by passing in the Request that was used to sent it out of band. :param offset: The offset to start the read of the file. :param length: The number of bytes to read from the offset. :param min_length: The minimum number of bytes to be read for a successful operation. :param unbuffered: Whether to the server should cache the read data at intermediate layers, only value for SMB 3.0.2 or newer :param wait: If send=True, whether to wait for a response if STATUS_PENDING was received from the server or fail. :param send: Whether to send the request in the same call or return the message to the caller and the unpack function :return: A byte string of the bytes read
def download_url(url, destination): """ Download an external URL to the destination """ from settings import VALID_IMAGE_EXTENSIONS base_name, ext = os.path.splitext(url) ext = ext.lstrip('.') if ext not in VALID_IMAGE_EXTENSIONS: raise Exception("Invalid image extension") base_path, filename = os.path.split(destination) os.makedirs(base_path) urllib.urlretrieve(url, destination)
Download an external URL to the destination
def qtaax(mt, x, t, q, m=1): """ geometrica """ q = float(q) j = (mt.i - q) / (1 + q) mtj = Actuarial(nt=mt.nt, i=j) return taax(mtj, x, t) - ((float(m - 1) / float(m * 2)) * (1 - nEx(mt, x, t)))
geometrica
def property(func): """Wrap a function as a property. This differs from attribute by identifying properties explicitly listed in the class definition rather than named attributes defined on instances of a class at init time. """ attr = abc.abstractmethod(func) attr.__iproperty__ = True attr = Property(attr) return attr
Wrap a function as a property. This differs from attribute by identifying properties explicitly listed in the class definition rather than named attributes defined on instances of a class at init time.
def alias_tags(tags_list, alias_map): """ update tags to new values Args: tags_list (list): alias_map (list): list of 2-tuples with regex, value Returns: list: updated tags CommandLine: python -m utool.util_tags alias_tags --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']] >>> ut.build_alias_map() >>> result = alias_tags(tags_list, alias_map) >>> print(result) """ def _alias_dict(tags): tags_ = [alias_map.get(t, t) for t in tags] return list(set([t for t in tags_ if t is not None])) tags_list_ = [_alias_dict(tags) for tags in tags_list] return tags_list_
update tags to new values Args: tags_list (list): alias_map (list): list of 2-tuples with regex, value Returns: list: updated tags CommandLine: python -m utool.util_tags alias_tags --show Example: >>> # DISABLE_DOCTEST >>> from utool.util_tags import * # NOQA >>> import utool as ut >>> tags_list = [['t1', 't2'], [], ['t3'], ['t4', 't5']] >>> ut.build_alias_map() >>> result = alias_tags(tags_list, alias_map) >>> print(result)
def read_git_branch(): """Obtain the current branch name from the Git repository. If on Travis CI, use the ``TRAVIS_BRANCH`` environment variable. """ if os.getenv('TRAVIS'): return os.getenv('TRAVIS_BRANCH') else: try: repo = git.repo.base.Repo(search_parent_directories=True) return repo.active_branch.name except Exception: return ''
Obtain the current branch name from the Git repository. If on Travis CI, use the ``TRAVIS_BRANCH`` environment variable.
def load_config(): '''try loading config file from a default directory''' cfg_path = '/usr/local/etc/freelan' cfg_file = 'freelan.cfg' if not os.path.isdir(cfg_path): print("Can not find default freelan config directory.") return cfg_file_path = os.path.join(cfg_path,cfg_file) if not os.path.isfile( cfg_file_path ): print("Can not find default freelan config file.") return return _load_config(cfg_file_path)
try loading config file from a default directory
def start_of_chunk(prev_tag, tag, prev_type, type_): """Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean. """ chunk_start = False if tag == 'B': chunk_start = True if tag == 'S': chunk_start = True if prev_tag == 'E' and tag == 'E': chunk_start = True if prev_tag == 'E' and tag == 'I': chunk_start = True if prev_tag == 'S' and tag == 'E': chunk_start = True if prev_tag == 'S' and tag == 'I': chunk_start = True if prev_tag == 'O' and tag == 'E': chunk_start = True if prev_tag == 'O' and tag == 'I': chunk_start = True if tag != 'O' and tag != '.' and prev_type != type_: chunk_start = True return chunk_start
Checks if a chunk started between the previous and current word. Args: prev_tag: previous chunk tag. tag: current chunk tag. prev_type: previous type. type_: current type. Returns: chunk_start: boolean.
def get_float(prompt=None): """ Read a line of text from standard input and return the equivalent float as precisely as possible; if text does not represent a double, user is prompted to retry. If line can't be read, return None. """ while True: s = get_string(prompt) if s is None: return None if len(s) > 0 and re.search(r"^[+-]?\d*(?:\.\d*)?$", s): try: return float(s) except ValueError: pass # Temporarily here for backwards compatibility if prompt is None: print("Retry: ", end="")
Read a line of text from standard input and return the equivalent float as precisely as possible; if text does not represent a double, user is prompted to retry. If line can't be read, return None.
def reset(self): """ Indicates the start of a new sequence. Clears any predictions and makes sure synapses don't grow to the currently active cells in the next time step. """ self.activeCells = [] self.winnerCells = [] self.activeSegments = [] self.matchingSegments = []
Indicates the start of a new sequence. Clears any predictions and makes sure synapses don't grow to the currently active cells in the next time step.
def MessageSetItemSizer(field_number): """Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } } """ static_size = (_TagSize(1) * 2 + _TagSize(2) + _VarintSize(field_number) + _TagSize(3)) local_VarintSize = _VarintSize def FieldSize(value): l = value.ByteSize() return static_size + local_VarintSize(l) + l return FieldSize
Returns a sizer for extensions of MessageSet. The message set message looks like this: message MessageSet { repeated group Item = 1 { required int32 type_id = 2; required string message = 3; } }
def _maybe_cast_to_float64(da): """Cast DataArrays to np.float64 if they are of type np.float32. Parameters ---------- da : xr.DataArray Input DataArray Returns ------- DataArray """ if da.dtype == np.float32: logging.warning('Datapoints were stored using the np.float32 datatype.' 'For accurate reduction operations using bottleneck, ' 'datapoints are being cast to the np.float64 datatype.' ' For more information see: https://github.com/pydata/' 'xarray/issues/1346') return da.astype(np.float64) else: return da
Cast DataArrays to np.float64 if they are of type np.float32. Parameters ---------- da : xr.DataArray Input DataArray Returns ------- DataArray
def save_profile(self, **params): """Save the given parameters into profile settings. Parameters ---------- params : dict Keywords and values to be saved. """ image = self.get_image() if (image is None): return profile = image.get('profile', None) if profile is None: # If image has no profile then create one profile = Settings.SettingGroup() image.set(profile=profile) self.logger.debug("saving to image profile: params=%s" % ( str(params))) profile.set(**params) return profile
Save the given parameters into profile settings. Parameters ---------- params : dict Keywords and values to be saved.
def _count_elements(mapping, iterable): 'Tally elements from the iterable.' mapping_get = mapping.get for elem in iterable: mapping[elem] = mapping_get(elem, 0) + 1
Tally elements from the iterable.
def get_acl(self, key_name='', headers=None, version_id=None): """returns a bucket's acl. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.""" return self.get_acl_helper(key_name, headers, STANDARD_ACL)
returns a bucket's acl. We include a version_id argument to support a polymorphic interface for callers, however, version_id is not relevant for Google Cloud Storage buckets and is therefore ignored here.
def waitget(self,key, maxwaittime = 60 ): """ Wait (poll) for a key to get a value Will wait for `maxwaittime` seconds before raising a KeyError. The call exits normally if the `key` field in db gets a value within the timeout period. Use this for synchronizing different processes or for ensuring that an unfortunately timed "db['key'] = newvalue" operation in another process (which causes all 'get' operation to cause a KeyError for the duration of pickling) won't screw up your program logic. """ wtimes = [0.2] * 3 + [0.5] * 2 + [1] tries = 0 waited = 0 while 1: try: val = self[key] return val except KeyError: pass if waited > maxwaittime: raise KeyError(key) time.sleep(wtimes[tries]) waited+=wtimes[tries] if tries < len(wtimes) -1: tries+=1
Wait (poll) for a key to get a value Will wait for `maxwaittime` seconds before raising a KeyError. The call exits normally if the `key` field in db gets a value within the timeout period. Use this for synchronizing different processes or for ensuring that an unfortunately timed "db['key'] = newvalue" operation in another process (which causes all 'get' operation to cause a KeyError for the duration of pickling) won't screw up your program logic.
def from_xdr_object(cls, asset_xdr_object): """Create a :class:`Asset` from an XDR Asset object. :param asset_xdr_object: The XDR Asset object. :return: A new :class:`Asset` object from the given XDR Asset object. """ if asset_xdr_object.type == Xdr.const.ASSET_TYPE_NATIVE: return Asset.native() elif asset_xdr_object.type == Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4: issuer = encode_check( 'account', asset_xdr_object.alphaNum4.issuer.ed25519).decode() code = asset_xdr_object.alphaNum4.assetCode.decode().rstrip('\x00') else: issuer = encode_check( 'account', asset_xdr_object.alphaNum12.issuer.ed25519).decode() code = ( asset_xdr_object.alphaNum12.assetCode.decode().rstrip('\x00')) return cls(code, issuer)
Create a :class:`Asset` from an XDR Asset object. :param asset_xdr_object: The XDR Asset object. :return: A new :class:`Asset` object from the given XDR Asset object.
def _create_namespace(namespace, apiserver_url): ''' create namespace on the defined k8s cluster ''' # Prepare URL url = "{0}/api/v1/namespaces".format(apiserver_url) # Prepare data data = { "kind": "Namespace", "apiVersion": "v1", "metadata": { "name": namespace, } } log.trace("namespace creation requests: %s", data) # Make request ret = _kpost(url, data) log.trace("result is: %s", ret) # Check requests status return ret
create namespace on the defined k8s cluster
def pretty_unicode(string): """ Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed. """ if isinstance(string, six.text_type): return string try: return string.decode("utf8") except UnicodeDecodeError: return string.decode('Latin-1').encode('unicode_escape').decode("utf8")
Make sure string is unicode, try to decode with utf8, or unicode escaped string if failed.
def _num_values(self, zVar, varNum): ''' Determines the number of values in a record. Set zVar=True if this is a zvariable. ''' values = 1 if (zVar == True): numDims = self.zvarsinfo[varNum][2] dimSizes = self.zvarsinfo[varNum][3] dimVary = self.zvarsinfo[varNum][4] else: numDims = self.rvarsinfo[varNum][2] dimSizes = self.rvarsinfo[varNum][3] dimVary = self.rvarsinfo[varNum][4] if (numDims < 1): return values else: for x in range(0, numDims): if (zVar == True): values = values * dimSizes[x] else: if (dimVary[x] != 0): values = values * dimSizes[x] return values
Determines the number of values in a record. Set zVar=True if this is a zvariable.
def eval_basis(self, x, regularize=True): """ basis_mat = C.eval_basis(x) Evaluates self's basis functions on x and returns them stacked in a matrix. basis_mat[i,j] gives basis function i evaluated at x[j,:]. """ if regularize: x = regularize_array(x) out = zeros((self.n, x.shape[0]), dtype=float, order='F') for i in xrange(self.n): out[i] = self.basis[i](x, **self.params) return out
basis_mat = C.eval_basis(x) Evaluates self's basis functions on x and returns them stacked in a matrix. basis_mat[i,j] gives basis function i evaluated at x[j,:].
def setup_package(): """Package setup""" setup( name='pyviews', version=_get_version(), description='Base package for xml views', long_description=_get_long_description(), long_description_content_type='text/markdown', url='https://github.com/eumis/pyviews', author='eumis(Eugen Misievich)', author_email='[email protected]', license='MIT', classifiers=[ # 2 - Pre-Alpha # 3 - Alpha # 4 - Beta # 5 - Production/Stable 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Topic :: Software Development :: Libraries', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6' ], python_requires='>=3.6', keywords='binding pyviews python mvvm tkviews wxviews', packages=find_packages(exclude=['*.tests']))
Package setup
def sr1(x, promisc=None, filter=None, iface=None, nofilter=0, *args, **kargs): """Send packets at layer 3 and return only the first answer""" s = conf.L3socket(promisc=promisc, filter=filter, nofilter=nofilter, iface=iface) ans, _ = sndrcv(s, x, *args, **kargs) s.close() if len(ans) > 0: return ans[0][1] else: return None
Send packets at layer 3 and return only the first answer
def apply(self, config, raise_on_unknown_key=True): # type: (Dict[str, Any], bool) -> None """Apply additional configuration from a dictionary This will look for dictionary items that exist in the base_config any apply their values on the current configuration object """ _recursive_merge(self._data, config, raise_on_unknown_key)
Apply additional configuration from a dictionary This will look for dictionary items that exist in the base_config any apply their values on the current configuration object
def edgepaths(self): """ Returns the fixed EdgePaths or computes direct connections between supplied nodes. """ edgepaths = super(TriMesh, self).edgepaths edgepaths.crs = self.crs return edgepaths
Returns the fixed EdgePaths or computes direct connections between supplied nodes.
def make_repr(*args, **kwargs): """Returns __repr__ method which returns ASCII representaion of the object with given fields. Without arguments, ``make_repr`` generates a method which outputs all object's non-protected (non-undercored) arguments which are not callables. Accepts ``*args``, which should be a names of object's attributes to be included in the output:: __repr__ = make_repr('foo', 'bar') If you want to generate attribute's content on the fly, then you should use keyword arguments and pass a callable of one argument:: __repr__ = make_repr(foo=lambda obj: obj.blah + 100500) """ def method(self): cls_name = self.__class__.__name__ if args: field_names = args else: def undercored(name): return name.startswith('_') def is_method(name): return callable(getattr(self, name)) def good_name(name): return not undercored(name) and not is_method(name) field_names = filter(good_name, dir(self)) field_names = sorted(field_names) # on this stage, we make from field_names an # attribute getters field_getters = zip(field_names, map(attrgetter, field_names)) # now process keyword args, they must # contain callables of one argument # and callable should return a field's value field_getters = chain( field_getters, kwargs.items()) fields = ((name, format_value(getter(self))) for name, getter in field_getters) # prepare key strings fields = ((u'{0}='.format(name), value) for name, value in fields) # join values with they respective keys fields = list(starmap(serialize_text, fields)) beginning = u'<{cls_name} '.format( cls_name=cls_name, ) result = serialize_list( beginning, fields) # append closing braket result += u'>' if ON_PYTHON2: # on python 2.x repr returns bytes, but on python3 - unicode strings result = result.encode('utf-8') return result return method
Returns __repr__ method which returns ASCII representaion of the object with given fields. Without arguments, ``make_repr`` generates a method which outputs all object's non-protected (non-undercored) arguments which are not callables. Accepts ``*args``, which should be a names of object's attributes to be included in the output:: __repr__ = make_repr('foo', 'bar') If you want to generate attribute's content on the fly, then you should use keyword arguments and pass a callable of one argument:: __repr__ = make_repr(foo=lambda obj: obj.blah + 100500)
def _filter_valid_arguments(self, arguments, binary="mongod", config=False): """ Return a list of accepted arguments. Check which arguments in list are accepted by the specified binary (mongod, mongos). If an argument does not start with '-' but its preceding argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments would be accepted for a mongod. """ # get the help list of the binary if self.args and self.args['binarypath']: binary = os.path.join(self.args['binarypath'], binary) ret = (subprocess.Popen(['%s' % binary, '--help'], stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=False)) out, err = ret.communicate() accepted_arguments = [] # extract all arguments starting with a '-' for line in [option for option in out.decode('utf-8').split('\n')]: line = line.lstrip() if line.startswith('-'): argument = line.split()[0] # exception: don't allow unsupported config server arguments if config and argument in ['--oplogSize', '--storageEngine', '--smallfiles', '--nojournal']: continue accepted_arguments.append(argument) # add undocumented options accepted_arguments.append('--setParameter') if binary.endswith('mongod'): accepted_arguments.append('--wiredTigerEngineConfigString') # filter valid arguments result = [] for i, arg in enumerate(arguments): if arg.startswith('-'): # check if the binary accepts this argument # or special case -vvv for any number of v argname = arg.split('=', 1)[0] if argname in accepted_arguments or re.match(r'-v+', arg): result.append(arg) elif (binary.endswith('mongod') and argname in self.UNDOCUMENTED_MONGOD_ARGS): result.append(arg) elif self.ignored_arguments.get(binary + argname) is None: # warn once for each combination of binary and unknown arg self.ignored_arguments[binary + argname] = True if not (binary.endswith("mongos") and arg in self.UNSUPPORTED_MONGOS_ARGS): print("warning: ignoring unknown argument %s for %s" % (arg, binary)) elif i > 0 and arguments[i - 1] in result: # if it doesn't start with a '-', it could be the value of # the last argument, e.g. `--slowms 1000` result.append(arg) # return valid arguments as joined string return ' '.join(result)
Return a list of accepted arguments. Check which arguments in list are accepted by the specified binary (mongod, mongos). If an argument does not start with '-' but its preceding argument was accepted, then it is accepted as well. Example ['--slowms', '1000'] both arguments would be accepted for a mongod.
def _notify_new_tick_event(self, tick): """tick推送""" if tick.time is '': return event = Event(type_=EVENT_TINY_TICK) event.dict_['data'] = tick self._event_engine.put(event)
tick推送
def _flush_data(self): """ If this relation's local unit data has been modified, publish it on the relation. This should be automatically called. """ if self._data and self._data.modified: hookenv.relation_set(self.relation_id, dict(self.to_publish.data))
If this relation's local unit data has been modified, publish it on the relation. This should be automatically called.
def accept(self): ''' Accepts all children fields, collects resulting values into dict and passes that dict to converter. Returns result of converter as separate value in parent `python_data` ''' result = dict(self.python_data) for field in self.fields: if field.writable: result.update(field.accept()) else: # readonly field field.set_raw_value(self.form.raw_data, field.from_python(result[field.name])) self.clean_value = self.conv.accept(result) return {self.name: self.clean_value}
Accepts all children fields, collects resulting values into dict and passes that dict to converter. Returns result of converter as separate value in parent `python_data`
def edit_wiki_page(self, subreddit, page, content, reason=''): """Create or edit a wiki page with title `page` for `subreddit`. :returns: The json response from the server. """ data = {'content': content, 'page': page, 'r': six.text_type(subreddit), 'reason': reason} evict = self.config['wiki_page'].format( subreddit=six.text_type(subreddit), page=page.lower()) self.evict(evict) return self.request_json(self.config['wiki_edit'], data=data)
Create or edit a wiki page with title `page` for `subreddit`. :returns: The json response from the server.
def get_span_column_count(span): """ Find the length of a colspan. Parameters ---------- span : list of lists of int The [row, column] pairs that make up the span Returns ------- columns : int The number of columns included in the span Example ------- Consider this table:: +------+------------------+ | foo | bar | +------+--------+---------+ | spam | goblet | berries | +------+--------+---------+ :: >>> span = [[0, 1], [0, 2]] >>> print(get_span_column_count(span)) 2 """ columns = 1 first_column = span[0][1] for i in range(len(span)): if span[i][1] > first_column: columns += 1 first_column = span[i][1] return columns
Find the length of a colspan. Parameters ---------- span : list of lists of int The [row, column] pairs that make up the span Returns ------- columns : int The number of columns included in the span Example ------- Consider this table:: +------+------------------+ | foo | bar | +------+--------+---------+ | spam | goblet | berries | +------+--------+---------+ :: >>> span = [[0, 1], [0, 2]] >>> print(get_span_column_count(span)) 2
def verify(expr, params=None): """ Determine if expression can be successfully translated to execute on MapD """ try: compile(expr, params=params) return True except com.TranslationError: return False
Determine if expression can be successfully translated to execute on MapD
def flavor_list_paged(request, is_public=True, get_extras=False, marker=None, paginate=False, sort_key="name", sort_dir="desc", reversed_order=False): """Get the list of available instance sizes (flavors).""" has_more_data = False has_prev_data = False if paginate: if reversed_order: sort_dir = 'desc' if sort_dir == 'asc' else 'asc' page_size = utils.get_page_size(request) flavors = _nova.novaclient(request).flavors.list(is_public=is_public, marker=marker, limit=page_size + 1, sort_key=sort_key, sort_dir=sort_dir) flavors, has_more_data, has_prev_data = update_pagination( flavors, page_size, marker, reversed_order) else: flavors = _nova.novaclient(request).flavors.list(is_public=is_public) if get_extras: for flavor in flavors: flavor.extras = flavor_get_extras(request, flavor.id, True, flavor) return (flavors, has_more_data, has_prev_data)
Get the list of available instance sizes (flavors).