code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def green(fn=None, consume_green_mode=True): """Make a function green. Can be used as a decorator.""" def decorator(fn): @wraps(fn) def greener(obj, *args, **kwargs): args = (obj,) + args wait = kwargs.pop('wait', None) timeout = kwargs.pop('timeout', None) access = kwargs.pop if consume_green_mode else kwargs.get green_mode = access('green_mode', None) executor = get_object_executor(obj, green_mode) return executor.run(fn, args, kwargs, wait=wait, timeout=timeout) return greener if fn is None: return decorator return decorator(fn)
Make a function green. Can be used as a decorator.
def get(self, measurementId): """ Analyses the measurement with the given parameters :param measurementId: :return: """ logger.info('Loading raw data for ' + measurementId) measurement = self._measurementController.getMeasurement(measurementId, MeasurementStatus.COMPLETE) if measurement is not None: if measurement.inflate(): data = { name: { 'raw': { 'x': self._jsonify(data.raw('x')), 'y': self._jsonify(data.raw('y')), 'z': self._jsonify(data.raw('z')) }, 'vibration': { 'x': self._jsonify(data.vibration('x')), 'y': self._jsonify(data.vibration('y')), 'z': self._jsonify(data.vibration('z')) }, 'tilt': { 'x': self._jsonify(data.tilt('x')), 'y': self._jsonify(data.tilt('y')), 'z': self._jsonify(data.tilt('z')) } } for name, data in measurement.data.items() } return data, 200 else: return None, 404 else: return None, 404
Analyses the measurement with the given parameters :param measurementId: :return:
def _handle_sub_action(self, input_dict, handler): """ Handles resolving replacements in the Sub action based on the handler that is passed as an input. :param input_dict: Dictionary to be resolved :param supported_values: One of several different objects that contain the supported values that need to be changed. See each method above for specifics on these objects. :param handler: handler that is specific to each implementation. :return: Resolved value of the Sub dictionary """ if not self.can_handle(input_dict): return input_dict key = self.intrinsic_name sub_value = input_dict[key] input_dict[key] = self._handle_sub_value(sub_value, handler) return input_dict
Handles resolving replacements in the Sub action based on the handler that is passed as an input. :param input_dict: Dictionary to be resolved :param supported_values: One of several different objects that contain the supported values that need to be changed. See each method above for specifics on these objects. :param handler: handler that is specific to each implementation. :return: Resolved value of the Sub dictionary
def search(self, **kwargs): """ Method to search neighbors based on extends search. :param search: Dict containing QuerySets to find neighbors. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing neighbors """ return super(ApiV4Neighbor, self).get(self.prepare_url( 'api/v4/neighbor/', kwargs))
Method to search neighbors based on extends search. :param search: Dict containing QuerySets to find neighbors. :param include: Array containing fields to include on response. :param exclude: Array containing fields to exclude on response. :param fields: Array containing fields to override default fields. :param kind: Determine if result will be detailed ('detail') or basic ('basic'). :return: Dict containing neighbors
def namedb_create(path, genesis_block): """ Create a sqlite3 db at the given path. Create all the tables and indexes we need. """ global BLOCKSTACK_DB_SCRIPT if os.path.exists( path ): raise Exception("Database '%s' already exists" % path) lines = [l + ";" for l in BLOCKSTACK_DB_SCRIPT.split(";")] con = sqlite3.connect( path, isolation_level=None, timeout=2**30 ) for line in lines: db_query_execute(con, line, ()) con.row_factory = namedb_row_factory # create genesis block namedb_create_token_genesis(con, genesis_block['rows'], genesis_block['history']) return con
Create a sqlite3 db at the given path. Create all the tables and indexes we need.
def generate_id_name_map(sdk, reverse=False): """ Generate the ID-NAME map dict :param sdk: CloudGenix API constructor :param reverse: Generate reverse name-> ID map as well, return tuple with both. :return: ID Name dictionary """ global_id_name_dict = {} global_name_id_dict = {} # system struct system_list = [] # Global lookup dictionary for sub items if_id_to_name = {} global_swi_id = {} global_ln_id = {} swi_to_wan_network_dict = {} swi_to_site_dict = {} wan_network_to_swi_dict = {} all_anynets = {} all_vpns = {} swi_id_name_dict = {} site_swi_dict = {} path_id_to_name = {} vpn_id_to_anynet_id = {} # Create xlation dicts and lists. logger.info("Caching Operators..") id_operator_dict, operator_id_dict = operators_to_name_dict(sdk) if id_operator_dict: global_id_name_dict.update(id_operator_dict) global_name_id_dict.update(operator_id_dict) if operator_id_dict: global_name_id_dict.update(operator_id_dict) logger.info("Caching Sites..") id_site_dict, site_id_dict, site_id_list, site_info_dict = siteid_to_name_dict(sdk) global_id_name_dict.update(id_site_dict) global_name_id_dict.update(site_id_dict) logger.info("Caching Elements..") id_element_dict, element_id_dict, element_site_dict, element_id_list = elements_to_name_dict(sdk) global_id_name_dict.update(id_element_dict) global_name_id_dict.update(element_id_dict) logger.info("Caching WAN Networks..") id_wannetwork_dict, name_wannetwork_id_dict, wannetwork_id_list, wannetwork_type_dict = wan_network_dicts(sdk) global_id_name_dict.update(id_wannetwork_dict) global_name_id_dict.update(name_wannetwork_id_dict) logger.info("Caching Circuit Catagories..") id_circuit_categories, name_circuit_categories = circuit_categories_dicts(sdk) global_id_name_dict.update(id_circuit_categories) global_name_id_dict.update(name_circuit_categories) logger.info("Caching Network Contexts..") id_network_contexts, name_circuit_contexts = network_context_dicts(sdk) global_id_name_dict.update(id_network_contexts) global_name_id_dict.update(name_circuit_contexts) logger.info("Caching Appdefs..") id_appdef_dict, name_appdef_dict, appdef_id_list = appdefs_to_name_dict(sdk) global_id_name_dict.update(id_appdef_dict) global_name_id_dict.update(name_appdef_dict) logger.info("Caching Policysets..") id_policyset_dict, name_policyset_dict, policyset_id_list = policyset_to_name_dict(sdk) global_id_name_dict.update(id_policyset_dict) global_name_id_dict.update(name_policyset_dict) logger.info("Caching Security Policysets..") id_securitypolicyset_dict, name_securitypolicyset_dict, \ securitypolicyset_id_list = securitypolicyset_to_name_dict(sdk) global_id_name_dict.update(id_securitypolicyset_dict) global_name_id_dict.update(name_securitypolicyset_dict) logger.info("Caching Security Zones..") id_securityzone_dict, securityzone_id_dict, securityzone_id_list = securityzone_to_name_dict(sdk) global_id_name_dict.update(id_securityzone_dict) global_name_id_dict.update(securityzone_id_dict) id_interface_dict = {} logger.info("Filling Network Site->Element->Interface table..") for site in site_id_list: elements = [] swi_id_dict = {} ln_id_dict = {} # enumerate elements for element in element_id_list: # Is this element bound to a site? site_in = element_site_dict.get(element, None) # if it is bound, and bound to this site, add to list. if site_in and site_in == site: # Query interfaces interfaces_list, if_id_to_name_item, if_name_to_id_item, _, \ _, if_id_data_entry = interface_query(site, element, sdk) # add the element to the list elements.append({ 'id': element, 'name': id_element_dict.get(element, ""), 'interfaces': interfaces_list }) # add the if id name mapping to the main dict if_id_to_name.update(if_id_to_name_item) # update grand interface list id_interface_dict.update(if_id_data_entry) system_list.append({ 'id': site, 'name': id_site_dict.get(site, ""), 'elements': elements }) # query Site WAN Interface info resp = sdk.get.waninterfaces(site) swi_status = resp.cgx_status swi_query = resp.cgx_content if swi_status: # iterate all the site wan interfaces for current_swi in swi_query.get('items', []): # get the WN bound to the SWI. wan_network_id = current_swi.get('network_id', "") swi_id = current_swi.get('id', "") name = current_swi.get('name') if name and swi_id: swi_id_name_dict[swi_id] = name elif swi_id and wan_network_id: # Handle SWI with no name. wan_network_name = id_wannetwork_dict.get(wan_network_id, wan_network_id) swi_id_name_dict[swi_id] = "Circuit to {0}".format(wan_network_name) if swi_id: # update SWI -> Site xlation dict swi_to_site_dict[swi_id] = site # get the SWIs if wan_network_id and swi_id: logger.debug('SWI_ID = SITE: {0} = {1}'.format(swi_id, site)) # query existing wan_network_to_swi dict if entry exists. existing_swi_list = wan_network_to_swi_dict.get(wan_network_id, []) # update swi -> WN xlate dict swi_to_wan_network_dict[swi_id] = wan_network_id # update WN -> swi xlate dict existing_swi_list.append(swi_id) wan_network_to_swi_dict[wan_network_id] = existing_swi_list # add to global global_swi_id.update(swi_id_name_dict) # query LAN Network info resp = sdk.get.lannetworks(site) ln_status = resp.cgx_status ln_query = resp.cgx_content if ln_status: for ln in ln_query.get('items'): ln_id = ln.get('id') ln_name = ln.get('name') if ln_id and ln_name: ln_id_dict[ln_id] = ln_name # add to global global_ln_id.update(ln_id_dict) logger.info("Loading VPN topology information for {0} sites, please wait...".format(len(site_id_list))) # add all interface IDs # note - can't reliably make reverse name to ID items here, as they are not global. global_id_name_dict.update(if_id_to_name) global_id_name_dict.update(global_swi_id) global_id_name_dict.update(global_ln_id) for site in site_id_list: site_swi_list = [] query = { "type": "basenet", "nodes": [ site ] } status = False rest_call_retry = 0 resp = sdk.post.topology(query) status = resp.cgx_status topology = resp.cgx_content if status and topology: # iterate topology. We need to iterate all of the matching SWIs, and existing anynet connections (sorted). logger.debug("TOPOLOGY: {0}".format(json.dumps(topology, indent=4))) for link in topology.get('links', []): link_type = link.get('type', "") # if an anynet link (SWI to SWI) if link_type in ["anynet", "public-anynet", "private-anynet"]: # vpn record, check for uniqueness. # 4.4.1 source_swi = link.get('source_wan_if_id') if not source_swi: # 4.3.x compatibility source_swi = link.get('source_wan_path_id') if source_swi: link['source_wan_if_id'] = source_swi # 4.4.1 dest_swi = link.get('target_wan_if_id') if not dest_swi: # 4.3.x compatibility dest_swi = link.get('target_wan_path_id') if dest_swi: link['target_wan_if_id'] = dest_swi # create anynet lookup key # anynet_lookup_key = "_".join(sorted([source_swi, dest_swi])) # use path ID anynet_lookup_key = link.get('path_id') if not all_anynets.get(anynet_lookup_key, None): # path is not in current anynets, add all_anynets[anynet_lookup_key] = link else: # path_id already seen. pass elif link_type in ['vpn']: vpn_lookup_key = link.get('path_id') if not all_vpns.get(vpn_lookup_key, None): # path is not in VPNs, add. all_vpns[vpn_lookup_key] = link else: # Bail out logger.info("ERROR: could not query site ID {0}. Continuing.".format(site)) # update all_anynets with site info. Can't do this above, because xlation table not finished when needed. for anynet_key, link in all_anynets.items(): # 4.4.1 source_swi = link.get('source_wan_if_id') if not source_swi: # 4.3.x compatibility source_swi = link.get('source_wan_path_id') # 4.4.1 dest_swi = link.get('target_wan_if_id') if not dest_swi: # 4.3.x compatibility dest_swi = link.get('target_wan_path_id') source_site_id = swi_to_site_dict.get(source_swi, 'UNKNOWN (Unable to map SWI to Site ID)') target_site_id = swi_to_site_dict.get(dest_swi, 'UNKNOWN (Unable to map SWI to Site ID)') source_wan_network_name = link.get("source_wan_network") target_wan_network_name = link.get("target_wan_network") # update struct in case it's needed later link['source_site_id'] = source_site_id link['target_site_id'] = target_site_id # get names. source_site_name = id_site_dict.get(source_site_id, source_site_id) target_site_name = id_site_dict.get(target_site_id, target_site_id) source_swi_name = swi_id_name_dict.get(source_swi, source_swi) target_swi_name = swi_id_name_dict.get(dest_swi, dest_swi) # build text map. anynet_text = "{0} ('{1}' via '{2}') <-> ('{4}' via '{5}') {3}".format( source_site_name, source_wan_network_name, source_swi_name, target_site_name, target_wan_network_name, target_swi_name, ) # update pathid to name dict path_id_to_name[anynet_key] = anynet_text logger.info("SWI -> WN xlate ({0}): {1}".format(len(swi_to_wan_network_dict), json.dumps(swi_to_wan_network_dict, indent=4))) logger.info("All Anynets ({0}): {1}".format(len(all_anynets), json.dumps(all_anynets, indent=4))) logger.info("All VPNs ({0}): {1}".format(len(all_vpns), json.dumps(all_vpns, indent=4))) logger.info("Site -> SWI construct ({0}): {1}".format(len(site_swi_dict), json.dumps(site_swi_dict, indent=4))) logger.info("WN to SWI xlate ({0}): {1}".format(len(wan_network_to_swi_dict), json.dumps(wan_network_to_swi_dict, indent=4))) logger.info("SWI -> SITE xlate ({0}): {1}".format(len(swi_to_site_dict), json.dumps(swi_to_site_dict, indent=4))) # create VPN to anynet maps AND update text mappings. for vpn_key, link in all_vpns.items(): anynet_link_id = link.get("anynet_link_id") source_element_id = link.get("source_node_id") target_element_id = link.get("target_node_id") # update vpn -> anynet table vpn_id_to_anynet_id[vpn_key] = anynet_link_id # get names source_element_name = id_element_dict.get(source_element_id, source_element_id) target_element_name = id_element_dict.get(target_element_id, target_element_id) anynet_text = path_id_to_name.get(anynet_link_id, anynet_link_id) vpn_text = "[{0}] : {1} : [{2}]".format( source_element_name, anynet_text, target_element_name ) # update path mapping path_id_to_name[vpn_key] = vpn_text # done, update global global_id_name_dict.update(path_id_to_name) if reverse: # return both id_name and what we can get of name_id. return global_id_name_dict, global_name_id_dict return global_id_name_dict
Generate the ID-NAME map dict :param sdk: CloudGenix API constructor :param reverse: Generate reverse name-> ID map as well, return tuple with both. :return: ID Name dictionary
def qs(schema): """ Decorate a function with a query string schema. """ def wrapper(func): setattr(func, QS, schema) return func return wrapper
Decorate a function with a query string schema.
def fractional(value): ''' There will be some cases where one might not want to show ugly decimal places for floats and decimals. This function returns a human readable fractional number in form of fractions and mixed fractions. Pass in a string, or a number or a float, and this function returns a string representation of a fraction or whole number or a mixed fraction Examples: fractional(0.3) will return '1/3' fractional(1.3) will return '1 3/10' fractional(float(1/3)) will return '1/3' fractional(1) will return '1' This will always return a string. ''' try: number = float(value) except (TypeError, ValueError): return value wholeNumber = int(number) frac = Fraction(number - wholeNumber).limit_denominator(1000) numerator = frac._numerator denominator = frac._denominator if wholeNumber and not numerator and denominator == 1: return '%.0f' % wholeNumber # this means that an integer was passed in (or variants of that integer like 1.0000) elif not wholeNumber: return '%.0f/%.0f' % (numerator, denominator) else: return '%.0f %.0f/%.0f' % (wholeNumber, numerator, denominator)
There will be some cases where one might not want to show ugly decimal places for floats and decimals. This function returns a human readable fractional number in form of fractions and mixed fractions. Pass in a string, or a number or a float, and this function returns a string representation of a fraction or whole number or a mixed fraction Examples: fractional(0.3) will return '1/3' fractional(1.3) will return '1 3/10' fractional(float(1/3)) will return '1/3' fractional(1) will return '1' This will always return a string.
def _GetDiscoveryDocFromFlags(args): """Get the discovery doc from flags.""" if args.discovery_url: try: return util.FetchDiscoveryDoc(args.discovery_url) except exceptions.CommunicationError: raise exceptions.GeneratedClientError( 'Could not fetch discovery doc') infile = os.path.expanduser(args.infile) or '/dev/stdin' with io.open(infile, encoding='utf8') as f: return json.loads(util.ReplaceHomoglyphs(f.read()))
Get the discovery doc from flags.
def get_trace(self, frame, tb): """Get a dict of the traceback for wdb.js use""" import linecache frames = [] stack, _ = self.get_stack(frame, tb) current = 0 for i, (stack_frame, lno) in enumerate(stack): code = stack_frame.f_code filename = code.co_filename or '<unspecified>' line = None if filename[0] == '<' and filename[-1] == '>': line = get_source_from_byte_code(code) fn = filename else: fn = os.path.abspath(filename) if not line: linecache.checkcache(filename) line = linecache.getline(filename, lno, stack_frame.f_globals) if not line: line = self.compile_cache.get(id(code), '') line = to_unicode_string(line, filename) line = line and line.strip() startlnos = dis.findlinestarts(code) lastlineno = list(startlnos)[-1][1] if frame == stack_frame: current = i frames.append({ 'file': fn, 'function': code.co_name, 'flno': code.co_firstlineno, 'llno': lastlineno, 'lno': lno, 'code': line, 'level': i, 'current': frame == stack_frame }) # While in exception always put the context to the top return stack, frames, current
Get a dict of the traceback for wdb.js use
def setref(graphtable=None, comptable=None, thermtable=None, area=None, waveset=None): """Set default graph and component tables, primary area, and wavelength set. This is similar to setting ``refdata`` in IRAF STSDAS SYNPHOT. If all parameters set to `None`, they are reverted to software default. If any of the parameters are not `None`, they are set to desired values while the rest (if any) remain at current setting. Parameters ---------- graphtable, comptable, thermtable : str or `None` Graph, component, and thermal table names, respectively, for `~pysynphot.observationmode` throughput look-up. Do not use "*" wildcard. area : float or `None` Telescope collecting area, i.e., the primary mirror, in :math:`\\textnormal{cm}^{2}`. waveset : tuple or `None` Parameters for :func:`set_default_waveset` as follow: * ``(minwave, maxwave, num)`` - This assumes log scale. * ``(minwave, maxwave, num, 'log')`` * ``(minwave, maxwave, num, 'linear')`` Raises ------ ValueError Invalid ``waveset`` parameters. """ global GRAPHTABLE, COMPTABLE, THERMTABLE, PRIMARY_AREA, GRAPHDICT, COMPDICT, THERMDICT GRAPHDICT = {} COMPDICT = {} THERMDICT = {} #Check for all None, which means reset kwds=set([graphtable,comptable,thermtable,area,waveset]) if kwds == set([None]): #then we should reset everything. _set_default_refdata() return #Otherwise, check them all separately if graphtable is not None: GRAPHTABLE = irafconvert(graphtable) if comptable is not None: COMPTABLE = irafconvert(comptable) if thermtable is not None: THERMTABLE = irafconvert(thermtable) #Area is a bit different: if area is not None: PRIMARY_AREA = area if waveset is not None: if len(waveset) not in (3, 4): raise ValueError('waveset tuple must contain 3 or 4 values') minwave = waveset[0] maxwave = waveset[1] num = waveset[2] if len(waveset) == 3: log = True elif len(waveset) == 4: if waveset[3].lower() == 'log': log = True elif waveset[3].lower() == 'linear': log = False else: raise ValueError('fourth waveset option must be "log" or "linear"') set_default_waveset(minwave,maxwave,num,log=log) #That's it. return
Set default graph and component tables, primary area, and wavelength set. This is similar to setting ``refdata`` in IRAF STSDAS SYNPHOT. If all parameters set to `None`, they are reverted to software default. If any of the parameters are not `None`, they are set to desired values while the rest (if any) remain at current setting. Parameters ---------- graphtable, comptable, thermtable : str or `None` Graph, component, and thermal table names, respectively, for `~pysynphot.observationmode` throughput look-up. Do not use "*" wildcard. area : float or `None` Telescope collecting area, i.e., the primary mirror, in :math:`\\textnormal{cm}^{2}`. waveset : tuple or `None` Parameters for :func:`set_default_waveset` as follow: * ``(minwave, maxwave, num)`` - This assumes log scale. * ``(minwave, maxwave, num, 'log')`` * ``(minwave, maxwave, num, 'linear')`` Raises ------ ValueError Invalid ``waveset`` parameters.
def _resolve_input(variable, variable_name, config_key, config): """ Resolve input entered as option values with config values If option values are provided (passed in as `variable`), then they are returned unchanged. If `variable` is None, then we first look for a config value to use. If no config value is found, then raise an error. Parameters ---------- variable: string or numeric value passed in as input by the user variable_name: string name of the variable, for clarity in the error message config_key: string key in the config whose value could be used to fill in the variable config: ConfigParser contains keys/values in .apparatecfg """ if variable is None: try: variable = config.get(PROFILE, config_key) except NoOptionError: raise ValueError(( 'no {} found - either provide a command line argument or ' 'set up a default by running `apparate configure`' ).format(variable_name)) return variable
Resolve input entered as option values with config values If option values are provided (passed in as `variable`), then they are returned unchanged. If `variable` is None, then we first look for a config value to use. If no config value is found, then raise an error. Parameters ---------- variable: string or numeric value passed in as input by the user variable_name: string name of the variable, for clarity in the error message config_key: string key in the config whose value could be used to fill in the variable config: ConfigParser contains keys/values in .apparatecfg
def view_packages(self, *args): """:View slackbuild packages with version and arch args[0] package color args[1] package args[2] version args[3] arch """ ver = GetFromInstalled(args[1]).version() print(" {0}{1}{2}{3} {4}{5} {6}{7}{8}{9}{10}{11:>11}{12}".format( args[0], args[1] + ver, self.meta.color["ENDC"], " " * (23-len(args[1] + ver)), args[2], " " * (18-len(args[2])), args[3], " " * (15-len(args[3])), "", "", "SBo", "", "")).rstrip()
:View slackbuild packages with version and arch args[0] package color args[1] package args[2] version args[3] arch
def _mark_image_file_deleted(cls, mapper, connection, target): """When the session flushes, marks images as deleted. The files of this marked images will be actually deleted in the image storage when the ongoing transaction succeeds. If it fails the :attr:`_deleted_images` queue will be just empty. """ cls._deleted_images.add((target, get_current_store()))
When the session flushes, marks images as deleted. The files of this marked images will be actually deleted in the image storage when the ongoing transaction succeeds. If it fails the :attr:`_deleted_images` queue will be just empty.
def pivot(self): """ transposes rows and columns """ self.op_data = [list(i) for i in zip(*self.ip_data)]
transposes rows and columns
def fill_view(self, view): """ Fill this histogram from a view of another histogram """ other = view.hist _other_x_center = other.axis(0).GetBinCenter _other_y_center = other.axis(1).GetBinCenter _other_z_center = other.axis(2).GetBinCenter _other_get = other.GetBinContent _other_get_bin = super(_HistBase, other).GetBin other_sum_w2 = other.GetSumw2() _other_sum_w2_at = other_sum_w2.At _find = self.FindBin sum_w2 = self.GetSumw2() _sum_w2_at = sum_w2.At _sum_w2_setat = sum_w2.SetAt _set = self.SetBinContent _get = self.GetBinContent for x, y, z in view.points: idx = _find( _other_x_center(x), _other_y_center(y), _other_z_center(z)) other_idx = _other_get_bin(x, y, z) _set(idx, _get(idx) + _other_get(other_idx)) _sum_w2_setat( _sum_w2_at(idx) + _other_sum_w2_at(other_idx), idx)
Fill this histogram from a view of another histogram
def total_branches(self): """How many total branches are there?""" exit_counts = self.parser.exit_counts() return sum([count for count in exit_counts.values() if count > 1])
How many total branches are there?
def call_fn(fn: TransitionOperator, args: Union[Tuple[Any], Any]) -> Any: """Calls a transition operator with args, unpacking args if its a sequence. Args: fn: A `TransitionOperator`. args: Arguments to `fn` Returns: ret: Return value of `fn`. """ if isinstance(args, (list, tuple)) and not mcmc_util.is_namedtuple_like(args): args = args # type: Tuple[Any] return fn(*args) else: return fn(args)
Calls a transition operator with args, unpacking args if its a sequence. Args: fn: A `TransitionOperator`. args: Arguments to `fn` Returns: ret: Return value of `fn`.
def identify_needed_data(curr_exe_job, link_job_instance=None): """ This function will identify the length of data that a specific executable needs to analyse and what part of that data is valid (ie. inspiral doesn't analyse the first or last 64+8s of data it reads in). In addition you can supply a second job instance to "link" to, which will ensure that the two jobs will have a one-to-one correspondence (ie. one template bank per one matched-filter job) and the corresponding jobs will be "valid" at the same times. Parameters ----------- curr_exe_job : Job An instance of the Job class that has a get_valid times method. link_job_instance : Job instance (optional), Coordinate the valid times with another executable. Returns -------- dataLength : float The amount of data (in seconds) that each instance of the job must read in. valid_chunk : glue.segment.segment The times within dataLength for which that jobs output **can** be valid (ie. for inspiral this is (72, dataLength-72) as, for a standard setup the inspiral job cannot look for triggers in the first 72 or last 72 seconds of data read in.) valid_length : float The maximum length of data each job can be valid for. If not using link_job_instance this is abs(valid_segment), but can be smaller than that if the linked job only analyses a small amount of data (for e.g.). """ # Set up the condorJob class for the current executable data_lengths, valid_chunks = curr_exe_job.get_valid_times() # Begin by getting analysis start and end, and start and end of time # that the output file is valid for valid_lengths = [abs(valid_chunk) for valid_chunk in valid_chunks] if link_job_instance: # FIXME: Should we remove this, after testing is complete?? # EURGHH! What we are trying to do here is, if this option is given, # line up the template bank and inspiral jobs so that there is one # template bank for each inspiral job. This is proving a little messy # and probably still isn't perfect. # What data does the linked exe use? link_data_length, link_valid_chunk = link_job_instance.get_valid_times() if len(link_data_length) > 1 or len(valid_lengths) > 1: raise ValueError('Linking job instances for tiling is not supported' ' between jobs that allow variable tile size') # What data is lost at start of both jobs? Take the maximum. start_data_loss = max(valid_chunks[0][0], link_valid_chunk[0][0]) # What data is lost at end of both jobs? Take the maximum. end_data_loss = max(data_lengths[0] - valid_chunks[0][1],\ link_data_length[0] - link_valid_chunk[0][1]) # Calculate valid_segments for both jobs based on the combined data # loss. valid_chunks[0] = segments.segment(start_data_loss, \ data_lengths[0] - end_data_loss) link_valid_chunk = segments.segment(start_data_loss, \ link_data_length[0] - end_data_loss) # The maximum valid length should be the minimum of the two link_valid_length = abs(link_valid_chunk) # Which one is now longer? Use this is valid_length if link_valid_length < valid_lengths[0]: valid_lengths[0] = link_valid_length return data_lengths, valid_chunks, valid_lengths
This function will identify the length of data that a specific executable needs to analyse and what part of that data is valid (ie. inspiral doesn't analyse the first or last 64+8s of data it reads in). In addition you can supply a second job instance to "link" to, which will ensure that the two jobs will have a one-to-one correspondence (ie. one template bank per one matched-filter job) and the corresponding jobs will be "valid" at the same times. Parameters ----------- curr_exe_job : Job An instance of the Job class that has a get_valid times method. link_job_instance : Job instance (optional), Coordinate the valid times with another executable. Returns -------- dataLength : float The amount of data (in seconds) that each instance of the job must read in. valid_chunk : glue.segment.segment The times within dataLength for which that jobs output **can** be valid (ie. for inspiral this is (72, dataLength-72) as, for a standard setup the inspiral job cannot look for triggers in the first 72 or last 72 seconds of data read in.) valid_length : float The maximum length of data each job can be valid for. If not using link_job_instance this is abs(valid_segment), but can be smaller than that if the linked job only analyses a small amount of data (for e.g.).
def find_out_var(self, varnames=[]): """ This function will read the standard out of the program, catch variables and return the values EG. #varname=value """ if self.wdir != '': stdout = "%s/%s"%(self.wdir, self.stdout) else: stdout = self.stdout response = [None]*len(varnames) # First we check if the file we want to print does exists if os.path.exists(stdout): with open_(stdout, 'r') as f: for line in f: if '=' in line: var = line.strip('#').split('=') value = var[1].strip() var = var[0].strip() if var in varnames: response[varnames.index(var)] = value else: # FILE DOESN'T EXIST debug.log("Error: The stdout file %s does not exist!"%(stdout)) return response
This function will read the standard out of the program, catch variables and return the values EG. #varname=value
def handle(self): """ Executes the command. """ database = self.option("database") self.resolver.set_default_connection(database) repository = DatabaseMigrationRepository(self.resolver, "migrations") migrator = Migrator(repository, self.resolver) if not migrator.repository_exists(): return self.error("No migrations found") self._prepare_database(migrator, database) path = self.option("path") if path is None: path = self._get_migration_path() ran = migrator.get_repository().get_ran() migrations = [] for migration in migrator._get_migration_files(path): if migration in ran: migrations.append(["<fg=cyan>%s</>" % migration, "<info>Yes</>"]) else: migrations.append(["<fg=cyan>%s</>" % migration, "<fg=red>No</>"]) if migrations: table = self.table(["Migration", "Ran?"], migrations) table.render() else: return self.error("No migrations found") for note in migrator.get_notes(): self.line(note)
Executes the command.
def rate_of_return(period_ret, base_period): """ Convert returns to 'one_period_len' rate of returns: that is the value the returns would have every 'one_period_len' if they had grown at a steady rate Parameters ---------- period_ret: pd.DataFrame DataFrame containing returns values with column headings representing the return period. base_period: string The base period length used in the conversion It must follow pandas.Timedelta constructor format (e.g. '1 days', '1D', '30m', '3h', '1D1h', etc) Returns ------- pd.DataFrame DataFrame in same format as input but with 'one_period_len' rate of returns values. """ period_len = period_ret.name conversion_factor = (pd.Timedelta(base_period) / pd.Timedelta(period_len)) return period_ret.add(1).pow(conversion_factor).sub(1)
Convert returns to 'one_period_len' rate of returns: that is the value the returns would have every 'one_period_len' if they had grown at a steady rate Parameters ---------- period_ret: pd.DataFrame DataFrame containing returns values with column headings representing the return period. base_period: string The base period length used in the conversion It must follow pandas.Timedelta constructor format (e.g. '1 days', '1D', '30m', '3h', '1D1h', etc) Returns ------- pd.DataFrame DataFrame in same format as input but with 'one_period_len' rate of returns values.
def read(self, size=-1): """! @brief Return bytes read from the connection.""" if self.connected is None: return None # Extract requested amount of data from the read buffer. data = self._get_input(size) return data
! @brief Return bytes read from the connection.
def find_or_graft(self, board): """Build a tree with each level corresponding to a fixed position on board. A path of tiles is stored for each board. If any two boards have the same path, then they are the same board. If there is any difference, a new branch will be created to store that path. Return: True if board already exists in the tree; False otherwise """ is_duplicate_board = True # assume same until find a difference # compare each position node = self for p, new_tile in board.positions_with_tile(): found_tile = False # assume no tile in same position until found for child in node.children: if child.tile == new_tile: # same tile found in this position --> continue this branch node = child found_tile = True break if found_tile: pass # go on to the next position else: # different tile --> start new branch and mark not exact match child = _DuplicateTree(new_tile) node.graft_child(child) node = child is_duplicate_board = False # this will get set many times. ok return is_duplicate_board
Build a tree with each level corresponding to a fixed position on board. A path of tiles is stored for each board. If any two boards have the same path, then they are the same board. If there is any difference, a new branch will be created to store that path. Return: True if board already exists in the tree; False otherwise
def InitUI(self): """ initialize window """ self.main_sizer = wx.BoxSizer(wx.VERTICAL) self.init_grid_headers() self.grid_builder = GridBuilder(self.er_magic, self.grid_type, self.grid_headers, self.panel, self.parent_type) self.grid = self.grid_builder.make_grid() self.grid.InitUI() ## Column management buttons self.add_cols_button = wx.Button(self.panel, label="Add additional columns", name='add_cols_btn') self.Bind(wx.EVT_BUTTON, self.on_add_cols, self.add_cols_button) self.remove_cols_button = wx.Button(self.panel, label="Remove columns", name='remove_cols_btn') self.Bind(wx.EVT_BUTTON, self.on_remove_cols, self.remove_cols_button) ## Row management buttons self.remove_row_button = wx.Button(self.panel, label="Remove last row", name='remove_last_row_btn') self.Bind(wx.EVT_BUTTON, self.on_remove_row, self.remove_row_button) many_rows_box = wx.BoxSizer(wx.HORIZONTAL) self.add_many_rows_button = wx.Button(self.panel, label="Add row(s)", name='add_many_rows_btn') self.rows_spin_ctrl = wx.SpinCtrl(self.panel, value='1', initial=1, name='rows_spin_ctrl') many_rows_box.Add(self.add_many_rows_button, flag=wx.ALIGN_CENTRE) many_rows_box.Add(self.rows_spin_ctrl) self.Bind(wx.EVT_BUTTON, self.on_add_rows, self.add_many_rows_button) self.deleteRowButton = wx.Button(self.panel, id=-1, label='Delete selected row(s)', name='delete_row_btn') self.Bind(wx.EVT_BUTTON, lambda event: self.on_remove_row(event, False), self.deleteRowButton) self.deleteRowButton.Disable() ## Data management buttons self.importButton = wx.Button(self.panel, id=-1, label='Import MagIC-format file', name='import_btn') self.Bind(wx.EVT_BUTTON, self.onImport, self.importButton) self.exitButton = wx.Button(self.panel, id=-1, label='Save and close grid', name='save_and_quit_btn') self.Bind(wx.EVT_BUTTON, self.onSave, self.exitButton) self.cancelButton = wx.Button(self.panel, id=-1, label='Cancel', name='cancel_btn') self.Bind(wx.EVT_BUTTON, self.onCancelButton, self.cancelButton) ## Help message and button # button self.toggle_help_btn = wx.Button(self.panel, id=-1, label="Show help", name='toggle_help_btn') self.Bind(wx.EVT_BUTTON, self.toggle_help, self.toggle_help_btn) # message self.help_msg_boxsizer = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='help_msg_boxsizer'), wx.VERTICAL) self.default_msg_text = 'Edit {} here.\nYou can add or remove both rows and columns, however required columns may not be deleted.\nControlled vocabularies are indicated by **, and will have drop-down-menus.\nTo edit all values in a column, click the column header.\nYou can cut and paste a block of cells from an Excel-like file.\nJust click the top left cell and use command "v".\nColumns that pertain to interpretations will be marked with "++".'.format(self.grid_type + 's') txt = '' if self.grid_type == 'location': txt = '\n\nNote: you can fill in location start/end latitude/longitude here.\nHowever, if you add sites in step 2, the program will calculate those values automatically,\nbased on site latitudes/logitudes.\nThese values will be written to your upload file.' if self.grid_type == 'sample': txt = "\n\nNote: you can fill in lithology, class, and type for each sample here.\nHowever, if the sample's class, lithology, and type are the same as its parent site,\nthose values will propagate down, and will be written to your sample file automatically." if self.grid_type == 'specimen': txt = "\n\nNote: you can fill in lithology, class, and type for each specimen here.\nHowever, if the specimen's class, lithology, and type are the same as its parent sample,\nthose values will propagate down, and will be written to your specimen file automatically." if self.grid_type == 'age': txt = "\n\nNote: only ages for which you provide data will be written to your upload file." self.default_msg_text += txt self.msg_text = wx.StaticText(self.panel, label=self.default_msg_text, style=wx.TE_CENTER, name='msg text') self.help_msg_boxsizer.Add(self.msg_text) self.help_msg_boxsizer.ShowItems(False) ## Code message and button # button self.toggle_codes_btn = wx.Button(self.panel, id=-1, label="Show method codes", name='toggle_codes_btn') self.Bind(wx.EVT_BUTTON, self.toggle_codes, self.toggle_codes_btn) # message self.code_msg_boxsizer = pw.MethodCodeDemystifier(self.panel, vocab) self.code_msg_boxsizer.ShowItems(False) ## Add content to sizers self.hbox = wx.BoxSizer(wx.HORIZONTAL) col_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Columns', name='manage columns'), wx.VERTICAL) row_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Rows', name='manage rows'), wx.VERTICAL) main_btn_vbox = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Manage data', name='manage data'), wx.VERTICAL) col_btn_vbox.Add(self.add_cols_button, 1, flag=wx.ALL, border=5) col_btn_vbox.Add(self.remove_cols_button, 1, flag=wx.ALL, border=5) row_btn_vbox.Add(many_rows_box, 1, flag=wx.ALL, border=5) row_btn_vbox.Add(self.remove_row_button, 1, flag=wx.ALL, border=5) row_btn_vbox.Add(self.deleteRowButton, 1, flag=wx.ALL, border=5) main_btn_vbox.Add(self.importButton, 1, flag=wx.ALL, border=5) main_btn_vbox.Add(self.exitButton, 1, flag=wx.ALL, border=5) main_btn_vbox.Add(self.cancelButton, 1, flag=wx.ALL, border=5) self.hbox.Add(col_btn_vbox, 1) self.hbox.Add(row_btn_vbox, 1) self.hbox.Add(main_btn_vbox, 1) self.panel.Bind(wx.grid.EVT_GRID_LABEL_LEFT_CLICK, self.onLeftClickLabel, self.grid) self.Bind(wx.EVT_KEY_DOWN, self.on_key_down) self.panel.Bind(wx.EVT_TEXT_PASTE, self.do_fit) # add actual data! self.grid_builder.add_data_to_grid(self.grid, self.grid_type) if self.grid_type == 'age': self.grid_builder.add_age_data_to_grid() # add drop_down menus if self.parent_type: belongs_to = sorted(self.er_magic.data_lists[self.parent_type][0], key=lambda item: item.name) else: belongs_to = '' self.drop_down_menu = drop_down_menus.Menus(self.grid_type, self, self.grid, belongs_to) self.grid_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, name='grid container'), wx.VERTICAL) self.grid_box.Add(self.grid, 1, flag=wx.ALL|wx.EXPAND, border=5) # a few special touches if it is a location grid if self.grid_type == 'location': lat_lon_dict = self.er_magic.get_min_max_lat_lon(self.er_magic.locations) for loc in self.er_magic.locations: # try to fill in min/max latitudes/longitudes from sites d = lat_lon_dict[loc.name] col_labels = [self.grid.GetColLabelValue(col) for col in range(self.grid.GetNumberCols())] row_labels = [self.grid.GetCellValue(row, 0) for row in range(self.grid.GetNumberRows())] for key, value in list(d.items()): if value: if str(loc.er_data[key]) == str(value): # no need to update pass else: # update loc.er_data[key] = value col_ind = col_labels.index(key) row_ind = row_labels.index(loc.name) self.grid.SetCellValue(row_ind, col_ind, str(value)) if not self.grid.changes: self.grid.changes = set([row_ind]) else: self.grid.changes.add(row_ind) # a few special touches if it is an age grid if self.grid_type == 'age': self.remove_row_button.Disable() self.add_many_rows_button.Disable() self.grid.SetColLabelValue(0, 'er_site_name') toggle_box = wx.StaticBoxSizer(wx.StaticBox(self.panel, -1, label='Ages level', name='Ages level'), wx.VERTICAL) levels = ['specimen', 'sample', 'site', 'location'] age_level = pw.radio_buttons(self.panel, levels, 'Choose level to assign ages') level_ind = levels.index(self.er_magic.age_type) age_level.radio_buttons[level_ind].SetValue(True) toggle_box.Add(age_level) self.Bind(wx.EVT_RADIOBUTTON, self.toggle_ages) self.hbox.Add(toggle_box) # a few special touches if it is a result grid if self.grid_type == 'result': # populate specimen_names, sample_names, etc. self.drop_down_menu.choices[2] = [sorted([spec.name for spec in self.er_magic.specimens if spec]), False] self.drop_down_menu.choices[3] = [sorted([samp.name for samp in self.er_magic.samples if samp]), False] self.drop_down_menu.choices[4] = [sorted([site.name for site in self.er_magic.sites if site]), False] self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False] for row in range(self.grid.GetNumberRows()): result_name = self.grid.GetCellValue(row, 0) result = self.er_magic.find_by_name(result_name, self.er_magic.results) if result: if result.specimens: self.grid.SetCellValue(row, 2, ' : '.join([pmag.get_attr(spec) for spec in result.specimens])) if result.samples: self.grid.SetCellValue(row, 3, ' : '.join([pmag.get_attr(samp) for samp in result.samples])) if result.sites: self.grid.SetCellValue(row, 4, ' : '.join([pmag.get_attr(site) for site in result.sites])) if result.locations: self.grid.SetCellValue(row, 5, ' : '.join([pmag.get_attr(loc) for loc in result.locations])) self.drop_down_menu.choices[5] = [sorted([loc.name for loc in self.er_magic.locations if loc]), False] # final layout, set size self.main_sizer.Add(self.hbox, flag=wx.ALL|wx.ALIGN_CENTER|wx.SHAPED, border=20) self.main_sizer.Add(self.toggle_help_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5) self.main_sizer.Add(self.help_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=10) self.main_sizer.Add(self.toggle_codes_btn, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5) self.main_sizer.Add(self.code_msg_boxsizer, .5, flag=wx.BOTTOM|wx.ALIGN_CENTRE|wx.SHAPED, border=5) self.main_sizer.Add(self.grid_box, 2, flag=wx.ALL|wx.EXPAND, border=10) self.panel.SetSizer(self.main_sizer) self.main_sizer.Fit(self) ## this keeps sizing correct if the user resizes the window manually #self.Bind(wx.EVT_SIZE, self.do_fit) self.Centre() self.Show()
initialize window
def code_from_ipynb(nb, markdown=False): """ Get the code for a given notebook nb is passed in as a dictionary that's a parsed ipynb file """ code = PREAMBLE for cell in nb['cells']: if cell['cell_type'] == 'code': # transform the input to executable Python code += ''.join(cell['source']) if cell['cell_type'] == 'markdown': code += '\n# ' + '# '.join(cell['source']) # We want a blank newline after each cell's output. # And the last line of source doesn't have a newline usually. code += '\n\n' return code
Get the code for a given notebook nb is passed in as a dictionary that's a parsed ipynb file
def animation_dialog(images, delay_s=1., loop=True, **kwargs): ''' .. versionadded:: v0.19 Parameters ---------- images : list Filepaths to images or :class:`gtk.Pixbuf` instances. delay_s : float, optional Number of seconds to display each frame. Default: ``1.0``. loop : bool, optional If ``True``, restart animation after last image has been displayed. Default: ``True``. Returns ------- gtk.MessageDialog Message dialog with animation displayed in `gtk.Image` widget when dialog is run. ''' def _as_pixbuf(image): if isinstance(image, types.StringTypes): return gtk.gdk.pixbuf_new_from_file(image) else: return image pixbufs = map(_as_pixbuf, images) # Need this to support background thread execution with GTK. gtk.gdk.threads_init() dialog = gtk.MessageDialog(**kwargs) # Append image to dialog content area. image = gtk.Image() content_area = dialog.get_content_area() content_area.pack_start(image) content_area.show_all() stop_animation = threading.Event() def _stop_animation(*args): stop_animation.set() def _animate(dialog): def __animate(): if loop: frames = it.cycle(pixbufs) else: frames = pixbufs for pixbuf_i in frames: gobject.idle_add(image.set_from_pixbuf, pixbuf_i) if stop_animation.wait(delay_s): break thread = threading.Thread(target=__animate) thread.daemon = True thread.start() dialog.connect('destroy', _stop_animation) dialog.connect('show', _animate) return dialog
.. versionadded:: v0.19 Parameters ---------- images : list Filepaths to images or :class:`gtk.Pixbuf` instances. delay_s : float, optional Number of seconds to display each frame. Default: ``1.0``. loop : bool, optional If ``True``, restart animation after last image has been displayed. Default: ``True``. Returns ------- gtk.MessageDialog Message dialog with animation displayed in `gtk.Image` widget when dialog is run.
def write_puml(self, filename=''): """ Writes PUML from the system. If filename is given, stores result in the file. Otherwise returns result as a string. """ def get_type(o): type = 'program' if isinstance(o, AbstractSensor): type = 'sensor' elif isinstance(o, AbstractActuator): type = 'actuator' return type if filename: s = open(filename, 'w') else: s = io.StringIO() s.write('@startuml\n') s.write('skinparam state {\n') for k, v in list(self.background_colors.items()): s.write('BackGroundColor<<%s>> %s\n' % (k, v)) s.write('}\n') for o in self.system.objects: if isinstance(o, DefaultProgram) or o.hide_in_uml: continue if isinstance(o, ProgrammableSystemObject): s.write('state "%s" as %s <<%s>>\n' % (o, o, get_type(o))) s.write('%s: %s\n' % (o, o.class_name)) if isinstance(o, AbstractActuator): for p in reversed(o.program_stack): s.write('%s: %s :: %s\n' % (o, p, o.program_status.get(p, '-'))) elif hasattr(o, 'status'): s.write('%s: Status: %s\n' % (o, o.status)) if getattr(o, 'is_program', False): s.write('%s: Priority: %s\n' % (o, o.priority)) for t in o.actual_triggers: if isinstance(t, DefaultProgram) or t.hide_in_uml: continue s.write('%s -[%s]-> %s\n' % (t, self.arrow_colors['trigger'], o)) for t in o.actual_targets: if t.hide_in_uml: continue if o.active: color = 'active_target' else: color = 'inactive_target' if getattr(t, 'program', None) == o: color = 'controlled_target' s.write('%s -[%s]-> %s\n' % (o, self.arrow_colors[color], t)) s.write('@enduml\n') if filename: s.close() else: return s.getvalue()
Writes PUML from the system. If filename is given, stores result in the file. Otherwise returns result as a string.
def encrypt(passwd): """ Encrypts the incoming password after adding some salt to store it in the database. @param passwd: password portion of user credentials @type passwd: string @returns: encrypted/salted string """ m = sha1() salt = hexlify(os.urandom(salt_len)) m.update(unicode2bytes(passwd) + salt) crypted = bytes2unicode(salt) + m.hexdigest() return crypted
Encrypts the incoming password after adding some salt to store it in the database. @param passwd: password portion of user credentials @type passwd: string @returns: encrypted/salted string
def find_first_file_with_ext(base_paths, prefix, exts): """Runs through the given list of file extensions and returns the first file with the given base path and extension combination that actually exists. Args: base_paths: The base paths in which to search for files. prefix: The filename prefix of the file for which to search. exts: An ordered list of file extensions for which to search. Returns: On success, a 2-tuple containing the base path in which the file was found, and the extension of the file. On failure, returns (None, None). """ for base_path in base_paths: for ext in exts: filename = os.path.join(base_path, "%s%s" % (prefix, ext)) if os.path.exists(filename) and os.path.isfile(filename): logger.debug("Found first file with relevant extension: %s", filename) return base_path, ext logger.debug("No files found for prefix %s, extensions %s", prefix, ", ".join(exts)) return None, None
Runs through the given list of file extensions and returns the first file with the given base path and extension combination that actually exists. Args: base_paths: The base paths in which to search for files. prefix: The filename prefix of the file for which to search. exts: An ordered list of file extensions for which to search. Returns: On success, a 2-tuple containing the base path in which the file was found, and the extension of the file. On failure, returns (None, None).
def update(self, dt): """Update the shape's position by moving it forward according to its velocity. Parameters ---------- dt : float """ self.translate(dt * self.velocity) self.rotate(dt * self.angular_velocity)
Update the shape's position by moving it forward according to its velocity. Parameters ---------- dt : float
def parse_bug_activity(raw_html): """Parse a Bugzilla bug activity HTML stream. This method extracts the information about activity from the given HTML stream. The bug activity is stored into a HTML table. Each parsed activity event is returned into a dictionary. If the given HTML is invalid, the method will raise a ParseError exception. :param raw_html: HTML string to parse :returns: a generator of parsed activity events :raises ParseError: raised when an error occurs parsing the given HTML stream """ def is_activity_empty(bs): EMPTY_ACTIVITY = "No changes have been made to this (?:bug|issue) yet." tag = bs.find(text=re.compile(EMPTY_ACTIVITY)) return tag is not None def find_activity_table(bs): # The first table with 5 columns is the table of activity tables = bs.find_all('table') for tb in tables: nheaders = len(tb.tr.find_all('th', recursive=False)) if nheaders == 5: return tb raise ParseError(cause="Table of bug activity not found.") def remove_tags(bs): HTML_TAGS_TO_REMOVE = ['a', 'i', 'span'] for tag in bs.find_all(HTML_TAGS_TO_REMOVE): tag.replaceWith(tag.text) def format_text(bs): strings = [s.strip(' \n\t') for s in bs.stripped_strings] s = ' '.join(strings) return s # Parsing starts here bs = bs4.BeautifulSoup(raw_html, 'html.parser') if is_activity_empty(bs): fields = [] else: activity_tb = find_activity_table(bs) remove_tags(activity_tb) fields = activity_tb.find_all('td') while fields: # First two fields: 'Who' and 'When'. who = fields.pop(0) when = fields.pop(0) # The attribute 'rowspan' of 'who' field tells how many # changes were made on the same date. n = int(who.get('rowspan')) # Next fields are split into chunks of three elements: # 'What', 'Removed' and 'Added'. These chunks share # 'Who' and 'When' values. for _ in range(n): what = fields.pop(0) removed = fields.pop(0) added = fields.pop(0) event = {'Who': format_text(who), 'When': format_text(when), 'What': format_text(what), 'Removed': format_text(removed), 'Added': format_text(added)} yield event
Parse a Bugzilla bug activity HTML stream. This method extracts the information about activity from the given HTML stream. The bug activity is stored into a HTML table. Each parsed activity event is returned into a dictionary. If the given HTML is invalid, the method will raise a ParseError exception. :param raw_html: HTML string to parse :returns: a generator of parsed activity events :raises ParseError: raised when an error occurs parsing the given HTML stream
def respond(self, output): """Generates server response.""" response = {'exit_code': output.code, 'command_output': output.log} self.send_response(200) self.send_header('Content-type', 'application/json') self.end_headers() self.wfile.write(bytes(json.dumps(response), "utf8"))
Generates server response.
def V(self, brightest=False): """ http://www.aerith.net/astro/color_conversion.html """ mags = self.get_photometry(brightest=brightest, convert=False) VT, dVT = mags['VT'] BT, dBT = mags['BT'] if (-0.25 < BT - VT < 2.0): (a, b, c, d) = (0.00097, 0.1334, 0.05486, 0.01998) V = (VT + a - b * (BT - VT) + c * (BT - VT)**2 - d * (BT - VT)**3) dVdVT = 1 + b - 2*c*(BT-VT) + 3*d*(BT-VT)**2 dVdBT = -b + 2*c*(BT-VT) - 3*d*(BT-VT)**2 dV = np.sqrt((dVdVT**2 * dVT**2) + (dVdBT**2*dBT**2)) else: raise ValueError('BT-VT outside of range to convert') return V, dV
http://www.aerith.net/astro/color_conversion.html
def extend(self, *args, **kwargs): """Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__ """ if len(args) > 1: raise TypeError("extend() takes at most 1 positional " "arguments ({0} given)".format(len(args))) other = args[0] if len(args) >= 1 else () if isinstance(other, HTTPHeaderDict): for key, val in other.iteritems(): self.add(key, val) elif isinstance(other, Mapping): for key in other: self.add(key, other[key]) elif hasattr(other, "keys"): for key in other.keys(): self.add(key, other[key]) else: for key, value in other: self.add(key, value) for key, value in kwargs.items(): self.add(key, value)
Generic import function for any type of header-like object. Adapted version of MutableMapping.update in order to insert items with self.add instead of self.__setitem__
def isclose(a, b, rtol=4*np.finfo(float).eps, atol=0.0, equal_nan=False): """ Returns a boolean array where two arrays are element-wise equal within a tolerance. This function is essentially a copy of the `numpy.isclose` function, with different default tolerances and one minor changes necessary to deal correctly with quaternions. The tolerance values are positive, typically very small numbers. The relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. Parameters ---------- a, b : array_like Input arrays to compare. rtol : float The relative tolerance parameter (see Notes). atol : float The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. Returns ------- y : array_like Returns a boolean array of where `a` and `b` are equal within the given tolerance. If both `a` and `b` are scalars, returns a single boolean value. See Also -------- allclose Notes ----- For finite values, isclose uses the following equation to test whether two floating point values are equivalent: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) The above equation is not symmetric in `a` and `b`, so that `isclose(a, b)` might be different from `isclose(b, a)` in some rare cases. Examples -------- >>> quaternion.isclose([1e10*quaternion.x, 1e-7*quaternion.y], [1.00001e10*quaternion.x, 1e-8*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([True, False]) >>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.00001e10*quaternion.x, 1e-9*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([True, True]) >>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.0001e10*quaternion.x, 1e-9*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([False, True]) >>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y]) array([True, False]) >>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y], equal_nan=True) array([True, True]) """ def within_tol(x, y, atol, rtol): with np.errstate(invalid='ignore'): result = np.less_equal(abs(x-y), atol + rtol * abs(y)) if np.isscalar(a) and np.isscalar(b): result = bool(result) return result x = np.array(a, copy=False, subok=True, ndmin=1) y = np.array(b, copy=False, subok=True, ndmin=1) # Make sure y is an inexact type to avoid bad behavior on abs(MIN_INT). # This will cause casting of x later. Also, make sure to allow subclasses # (e.g., for numpy.ma). try: dt = np.result_type(y, 1.) except TypeError: dt = np.dtype(np.quaternion) y = np.array(y, dtype=dt, copy=False, subok=True) xfin = np.isfinite(x) yfin = np.isfinite(y) if np.all(xfin) and np.all(yfin): return within_tol(x, y, atol, rtol) else: finite = xfin & yfin cond = np.zeros_like(finite, subok=True) # Because we're using boolean indexing, x & y must be the same shape. # Ideally, we'd just do x, y = broadcast_arrays(x, y). It's in # lib.stride_tricks, though, so we can't import it here. x = x * np.ones_like(cond) y = y * np.ones_like(cond) # Avoid subtraction with infinite/nan values... cond[finite] = within_tol(x[finite], y[finite], atol, rtol) # Check for equality of infinite values... cond[~finite] = (x[~finite] == y[~finite]) if equal_nan: # Make NaN == NaN both_nan = np.isnan(x) & np.isnan(y) cond[both_nan] = both_nan[both_nan] if np.isscalar(a) and np.isscalar(b): return bool(cond) else: return cond
Returns a boolean array where two arrays are element-wise equal within a tolerance. This function is essentially a copy of the `numpy.isclose` function, with different default tolerances and one minor changes necessary to deal correctly with quaternions. The tolerance values are positive, typically very small numbers. The relative difference (`rtol` * abs(`b`)) and the absolute difference `atol` are added together to compare against the absolute difference between `a` and `b`. Parameters ---------- a, b : array_like Input arrays to compare. rtol : float The relative tolerance parameter (see Notes). atol : float The absolute tolerance parameter (see Notes). equal_nan : bool Whether to compare NaN's as equal. If True, NaN's in `a` will be considered equal to NaN's in `b` in the output array. Returns ------- y : array_like Returns a boolean array of where `a` and `b` are equal within the given tolerance. If both `a` and `b` are scalars, returns a single boolean value. See Also -------- allclose Notes ----- For finite values, isclose uses the following equation to test whether two floating point values are equivalent: absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`)) The above equation is not symmetric in `a` and `b`, so that `isclose(a, b)` might be different from `isclose(b, a)` in some rare cases. Examples -------- >>> quaternion.isclose([1e10*quaternion.x, 1e-7*quaternion.y], [1.00001e10*quaternion.x, 1e-8*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([True, False]) >>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.00001e10*quaternion.x, 1e-9*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([True, True]) >>> quaternion.isclose([1e10*quaternion.x, 1e-8*quaternion.y], [1.0001e10*quaternion.x, 1e-9*quaternion.y], ... rtol=1.e-5, atol=1.e-8) array([False, True]) >>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y]) array([True, False]) >>> quaternion.isclose([quaternion.x, np.nan*quaternion.y], [quaternion.x, np.nan*quaternion.y], equal_nan=True) array([True, True])
def create(cls, second_line, name_on_card, alias=None, type_=None, pin_code_assignment=None, monetary_account_id_fallback=None, custom_headers=None): """ Create a new debit card request. :type user_id: int :param second_line: The second line of text on the card, used as name/description for it. It can contain at most 17 characters and it can be empty. :type second_line: str :param name_on_card: The user's name as it will be on the card. Check 'card-name' for the available card names for a user. :type name_on_card: str :param alias: The pointer to the monetary account that will be connected at first with the card. Its IBAN code is also the one that will be printed on the card itself. The pointer must be of type IBAN. :type alias: object_.Pointer :param type_: The type of card to order. Can be MAESTRO or MASTERCARD. :type type_: str :param pin_code_assignment: Array of Types, PINs, account IDs assigned to the card. :type pin_code_assignment: list[object_.CardPinAssignment] :param monetary_account_id_fallback: ID of the MA to be used as fallback for this card if insufficient balance. Fallback account is removed if not supplied. :type monetary_account_id_fallback: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseCardDebit """ if custom_headers is None: custom_headers = {} request_map = { cls.FIELD_SECOND_LINE: second_line, cls.FIELD_NAME_ON_CARD: name_on_card, cls.FIELD_ALIAS: alias, cls.FIELD_TYPE: type_, cls.FIELD_PIN_CODE_ASSIGNMENT: pin_code_assignment, cls.FIELD_MONETARY_ACCOUNT_ID_FALLBACK: monetary_account_id_fallback } request_map_string = converter.class_to_json(request_map) request_map_string = cls._remove_field_for_request(request_map_string) api_client = client.ApiClient(cls._get_api_context()) request_bytes = request_map_string.encode() request_bytes = security.encrypt(cls._get_api_context(), request_bytes, custom_headers) endpoint_url = cls._ENDPOINT_URL_CREATE.format(cls._determine_user_id()) response_raw = api_client.post(endpoint_url, request_bytes, custom_headers) return BunqResponseCardDebit.cast_from_bunq_response( cls._from_json(response_raw, cls._OBJECT_TYPE_POST) )
Create a new debit card request. :type user_id: int :param second_line: The second line of text on the card, used as name/description for it. It can contain at most 17 characters and it can be empty. :type second_line: str :param name_on_card: The user's name as it will be on the card. Check 'card-name' for the available card names for a user. :type name_on_card: str :param alias: The pointer to the monetary account that will be connected at first with the card. Its IBAN code is also the one that will be printed on the card itself. The pointer must be of type IBAN. :type alias: object_.Pointer :param type_: The type of card to order. Can be MAESTRO or MASTERCARD. :type type_: str :param pin_code_assignment: Array of Types, PINs, account IDs assigned to the card. :type pin_code_assignment: list[object_.CardPinAssignment] :param monetary_account_id_fallback: ID of the MA to be used as fallback for this card if insufficient balance. Fallback account is removed if not supplied. :type monetary_account_id_fallback: int :type custom_headers: dict[str, str]|None :rtype: BunqResponseCardDebit
def H(self, phase, T): """ Calculate the enthalpy of a phase of the compound at a specified temperature. :param phase: A phase of the compound, e.g. 'S', 'L', 'G'. :param T: [K] temperature :returns: [J/mol] Enthalpy. """ try: return self._phases[phase].H(T) except KeyError: raise Exception("The phase '{}' was not found in compound '{}'." .format(phase, self.formula))
Calculate the enthalpy of a phase of the compound at a specified temperature. :param phase: A phase of the compound, e.g. 'S', 'L', 'G'. :param T: [K] temperature :returns: [J/mol] Enthalpy.
def add_entity(self): ''' Add the entity. All the information got from the post data. ''' post_data = self.get_post_data() if 'kind' in post_data: if post_data['kind'] == '1': self.add_pic(post_data) elif post_data['kind'] == '2': self.add_pdf(post_data) elif post_data['kind'] == '3': self.add_url(post_data) else: pass else: self.add_pic(post_data)
Add the entity. All the information got from the post data.
def __construct_claim_json(self): """ Writes the properties from self.data to a new or existing json in self.wd_json_representation :return: None """ def handle_qualifiers(old_item, new_item): if not new_item.check_qualifier_equality: old_item.set_qualifiers(new_item.get_qualifiers()) def is_good_ref(ref_block): if len(WDItemEngine.databases) == 0: WDItemEngine._init_ref_system() prop_nrs = [x.get_prop_nr() for x in ref_block] values = [x.get_value() for x in ref_block] good_ref = True prop_value_map = dict(zip(prop_nrs, values)) # if self.good_refs has content, use these to determine good references if self.good_refs and len(self.good_refs) > 0: found_good = True for rblock in self.good_refs: if not all([k in prop_value_map for k, v in rblock.items()]): found_good = False if not all([v in prop_value_map[k] for k, v in rblock.items() if v]): found_good = False if found_good: return True return False # stated in, title, retrieved ref_properties = ['P248', 'P1476', 'P813'] # 'P407' language of work, for v in values: if prop_nrs[values.index(v)] == 'P248': return True elif v == 'P698': return True for p in ref_properties: if p not in prop_nrs: return False for ref in ref_block: pn = ref.get_prop_nr() value = ref.get_value() if pn == 'P248' and value not in WDItemEngine.databases and 'P854' not in prop_nrs: return False elif pn == 'P248' and value in WDItemEngine.databases: db_props = WDItemEngine.databases[value] if not any([False if x not in prop_nrs else True for x in db_props]) and 'P854' not in prop_nrs: return False return good_ref def handle_references(old_item, new_item): """ Local function to handle references :param old_item: An item containing the data as currently in WD :type old_item: A child of WDBaseDataType :param new_item: An item containing the new data which should be written to WD :type new_item: A child of WDBaseDataType """ # stated in, title, language of work, retrieved, imported from ref_properties = ['P248', 'P1476', 'P407', 'P813', 'P143'] new_references = new_item.get_references() old_references = old_item.get_references() if any([z.overwrite_references for y in new_references for z in y]) \ or sum(map(lambda z: len(z), old_references)) == 0 \ or self.global_ref_mode == 'STRICT_OVERWRITE': old_item.set_references(new_references) elif self.global_ref_mode == 'STRICT_KEEP' or new_item.statement_ref_mode == 'STRICT_KEEP': pass elif self.global_ref_mode == 'STRICT_KEEP_APPEND' or new_item.statement_ref_mode == 'STRICT_KEEP_APPEND': old_references.extend(new_references) old_item.set_references(old_references) elif self.global_ref_mode == 'CUSTOM' or new_item.statement_ref_mode == 'CUSTOM': self.ref_handler(old_item, new_item) elif self.global_ref_mode == 'KEEP_GOOD' or new_item.statement_ref_mode == 'KEEP_GOOD': keep_block = [False for x in old_references] for count, ref_block in enumerate(old_references): stated_in_value = [x.get_value() for x in ref_block if x.get_prop_nr() == 'P248'] if is_good_ref(ref_block): keep_block[count] = True new_ref_si_values = [x.get_value() if x.get_prop_nr() == 'P248' else None for z in new_references for x in z] for si in stated_in_value: if si in new_ref_si_values: keep_block[count] = False refs = [x for c, x in enumerate(old_references) if keep_block[c]] refs.extend(new_references) old_item.set_references(refs) # sort the incoming data according to the WD property number self.data.sort(key=lambda z: z.get_prop_nr().lower()) # collect all statements which should be deleted statements_for_deletion = [] for item in self.data: if item.get_value() == '' and isinstance(item, WDBaseDataType): statements_for_deletion.append(item.get_prop_nr()) if self.create_new_item: self.statements = copy.copy(self.data) else: for stat in self.data: prop_nr = stat.get_prop_nr() prop_data = [x for x in self.statements if x.get_prop_nr() == prop_nr] prop_pos = [x.get_prop_nr() == prop_nr for x in self.statements] prop_pos.reverse() insert_pos = len(prop_pos) - (prop_pos.index(True) if any(prop_pos) else 0) # If value should be appended, check if values exists, if not, append if prop_nr in self.append_value: equal_items = [stat == x for x in prop_data] if True not in equal_items: self.statements.insert(insert_pos + 1, stat) else: # if item exists, modify rank current_item = prop_data[equal_items.index(True)] current_item.set_rank(stat.get_rank()) handle_references(old_item=current_item, new_item=stat) handle_qualifiers(old_item=current_item, new_item=stat) continue # set all existing values of a property for removal for x in prop_data: # for deletion of single statements, do not set all others to delete if hasattr(stat, 'remove'): break elif x.get_id() and not hasattr(x, 'retain'): # keep statements with good references if keep_good_ref_statements is True if self.keep_good_ref_statements: if any([is_good_ref(r) for r in x.get_references()]): setattr(x, 'retain', '') else: setattr(x, 'remove', '') match = [] for i in prop_data: if stat == i and hasattr(stat, 'remove'): match.append(True) setattr(i, 'remove', '') elif stat == i: match.append(True) setattr(i, 'retain', '') if hasattr(i, 'remove'): delattr(i, 'remove') handle_references(old_item=i, new_item=stat) handle_qualifiers(old_item=i, new_item=stat) i.set_rank(rank=stat.get_rank()) # if there is no value, do not add an element, this is also used to delete whole properties. elif i.get_value(): match.append(False) if True not in match and not hasattr(stat, 'remove'): self.statements.insert(insert_pos + 1, stat) # For whole property deletions, add remove flag to all statements which should be deleted for item in copy.deepcopy(self.statements): if item.get_prop_nr() in statements_for_deletion and item.get_id() != '': setattr(item, 'remove', '') elif item.get_prop_nr() in statements_for_deletion: self.statements.remove(item) # regenerate claim json self.wd_json_representation['claims'] = {} for stat in self.statements: prop_nr = stat.get_prop_nr() if prop_nr not in self.wd_json_representation['claims']: self.wd_json_representation['claims'][prop_nr] = [] self.wd_json_representation['claims'][prop_nr].append(stat.get_json_representation())
Writes the properties from self.data to a new or existing json in self.wd_json_representation :return: None
def isNonPairTag(self, isnonpair=None): """ True if element is listed in nonpair tag table (``br`` for example) or if it ends with ``/>`` (``<hr />`` for example). You can also change state from pair to nonpair if you use this as setter. Args: isnonpair (bool, default None): If set, internal nonpair state is changed. Returns: book: True if tag is nonpair. """ if isnonpair is None: return self._isnonpairtag if not self._istag: return if isnonpair: self.endtag = None self.childs = [] self._isnonpairtag = isnonpair
True if element is listed in nonpair tag table (``br`` for example) or if it ends with ``/>`` (``<hr />`` for example). You can also change state from pair to nonpair if you use this as setter. Args: isnonpair (bool, default None): If set, internal nonpair state is changed. Returns: book: True if tag is nonpair.
def save(self, sc, path): """ Save this model to the given path. """ java_model = sc._jvm.org.apache.spark.mllib.classification.LogisticRegressionModel( _py2java(sc, self._coeff), self.intercept, self.numFeatures, self.numClasses) java_model.save(sc._jsc.sc(), path)
Save this model to the given path.
def get_rup_array(ebruptures, srcfilter=nofilter): """ Convert a list of EBRuptures into a numpy composite array, by filtering out the ruptures far away from every site """ if not BaseRupture._code: BaseRupture.init() # initialize rupture codes rups = [] geoms = [] nbytes = 0 offset = 0 for ebrupture in ebruptures: rup = ebrupture.rupture mesh = surface_to_array(rup.surface) sy, sz = mesh.shape[1:] # sanity checks assert sy < TWO16, 'Too many multisurfaces: %d' % sy assert sz < TWO16, 'The rupture mesh spacing is too small' points = mesh.reshape(3, -1).T # shape (n, 3) minlon = points[:, 0].min() minlat = points[:, 1].min() maxlon = points[:, 0].max() maxlat = points[:, 1].max() if srcfilter.integration_distance and len(srcfilter.close_sids( (minlon, minlat, maxlon, maxlat), rup.tectonic_region_type, rup.mag)) == 0: continue hypo = rup.hypocenter.x, rup.hypocenter.y, rup.hypocenter.z rate = getattr(rup, 'occurrence_rate', numpy.nan) tup = (ebrupture.serial, ebrupture.srcidx, ebrupture.grp_id, rup.code, ebrupture.n_occ, rup.mag, rup.rake, rate, minlon, minlat, maxlon, maxlat, hypo, offset, offset + len(points), sy, sz) offset += len(points) rups.append(tup) geoms.append(numpy.array([tuple(p) for p in points], point3d)) nbytes += rupture_dt.itemsize + mesh.nbytes if not rups: return () dic = dict(geom=numpy.concatenate(geoms), nbytes=nbytes) # TODO: PMFs for nonparametric ruptures are not converted return hdf5.ArrayWrapper(numpy.array(rups, rupture_dt), dic)
Convert a list of EBRuptures into a numpy composite array, by filtering out the ruptures far away from every site
def fixed_poch(a, n): """Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order. """ # Old form, calls gamma function: # if a < 0.0 and a % 1 == 0 and n <= -a: # p = (-1.0)**n * scipy.misc.factorial(-a) / scipy.misc.factorial(-a - n) # else: # p = scipy.special.poch(a, n) # return p if (int(n) != n) or (n < 0): raise ValueError("Parameter n must be a nonnegative int!") n = int(n) # Direct form based on product: terms = [a + k for k in range(0, n)] return scipy.prod(terms)
Implementation of the Pochhammer symbol :math:`(a)_n` which handles negative integer arguments properly. Need conditional statement because scipy's impelementation of the Pochhammer symbol is wrong for negative integer arguments. This function uses the definition from http://functions.wolfram.com/GammaBetaErf/Pochhammer/02/ Parameters ---------- a : float The argument. n : nonnegative int The order.
def calculate_start_time(df): """Calculate the star_time per read. Time data is either a "time" (in seconds, derived from summary files) or a "timestamp" (in UTC, derived from fastq_rich format) and has to be converted appropriately in a datetime format time_arr For both the time_zero is the minimal value of the time_arr, which is then used to subtract from all other times In the case of method=track (and dataset is a column in the df) then this subtraction is done per dataset """ if "time" in df: df["time_arr"] = pd.Series(df["time"], dtype='datetime64[s]') elif "timestamp" in df: df["time_arr"] = pd.Series(df["timestamp"], dtype="datetime64[ns]") else: return df if "dataset" in df: for dset in df["dataset"].unique(): time_zero = df.loc[df["dataset"] == dset, "time_arr"].min() df.loc[df["dataset"] == dset, "start_time"] = \ df.loc[df["dataset"] == dset, "time_arr"] - time_zero else: df["start_time"] = df["time_arr"] - df["time_arr"].min() return df.drop(["time", "timestamp", "time_arr"], axis=1, errors="ignore")
Calculate the star_time per read. Time data is either a "time" (in seconds, derived from summary files) or a "timestamp" (in UTC, derived from fastq_rich format) and has to be converted appropriately in a datetime format time_arr For both the time_zero is the minimal value of the time_arr, which is then used to subtract from all other times In the case of method=track (and dataset is a column in the df) then this subtraction is done per dataset
def load_time_series(filename, delimiter=r'\s+'): r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float) """ # Use our universal function to load in the events times, values = load_delimited(filename, [float, float], delimiter) times = np.array(times) values = np.array(values) return times, values
r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float)
def find_category(self, parent_alias, title): """Searches parent category children for the given title (case independent). :param str parent_alias: :param str title: :rtype: Category|None :return: None if not found; otherwise - found Category """ found = None child_ids = self.get_child_ids(parent_alias) for cid in child_ids: category = self.get_category_by_id(cid) if category.title.lower() == title.lower(): found = category break return found
Searches parent category children for the given title (case independent). :param str parent_alias: :param str title: :rtype: Category|None :return: None if not found; otherwise - found Category
def init( dist='dist', minver=None, maxver=None, use_markdown_readme=True, use_stdeb=False, use_distribute=False, ): """Imports and returns a setup function. If use_markdown_readme is set, then README.md is added to setuptools READMES list. If use_stdeb is set on a Debian based system, then module stdeb is imported. Stdeb supports building deb packages on Debian based systems. The package should only be installed on the same system version it was built on, though. See http://github.com/astraw/stdeb. If use_distribute is set, then distribute_setup.py is imported. """ if not minver == maxver == None: import sys if not minver <= sys.version < (maxver or 'Any'): sys.stderr.write( '%s: requires python version in <%s, %s), not %s\n' % ( sys.argv[0], minver or 'any', maxver or 'any', sys.version.split()[0])) sys.exit(1) if use_distribute: from distribute_setup import use_setuptools use_setuptools(to_dir=dist) from setuptools import setup else: try: from setuptools import setup except ImportError: from distutils.core import setup if use_markdown_readme: try: import setuptools.command.sdist setuptools.command.sdist.READMES = tuple(list(getattr(setuptools.command.sdist, 'READMES', ())) + ['README.md']) except ImportError: pass if use_stdeb: import platform if 'debian' in platform.dist(): try: import stdeb except ImportError: pass return setup
Imports and returns a setup function. If use_markdown_readme is set, then README.md is added to setuptools READMES list. If use_stdeb is set on a Debian based system, then module stdeb is imported. Stdeb supports building deb packages on Debian based systems. The package should only be installed on the same system version it was built on, though. See http://github.com/astraw/stdeb. If use_distribute is set, then distribute_setup.py is imported.
def calc_percentile_interval(self, conf_percentage): """ Calculates percentile bootstrap confidence intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- None. Will store the percentile intervals as `self.percentile_interval` Notes ----- Must have all ready called `self.generate_bootstrap_replicates`. """ # Get the alpha % that corresponds to the given confidence percentage. alpha = bc.get_alpha_from_conf_percentage(conf_percentage) # Create the column names for the dataframe of confidence intervals single_column_names =\ ['{:.3g}%'.format(alpha / 2.0), '{:.3g}%'.format(100 - alpha / 2.0)] # Calculate the desired confidence intervals. conf_intervals =\ bc.calc_percentile_interval(self.bootstrap_replicates.values, conf_percentage) # Store the desired confidence intervals self.percentile_interval =\ pd.DataFrame(conf_intervals.T, index=self.mle_params.index, columns=single_column_names) return None
Calculates percentile bootstrap confidence intervals for one's model. Parameters ---------- conf_percentage : scalar in the interval (0.0, 100.0). Denotes the confidence-level for the returned endpoints. For instance, to calculate a 95% confidence interval, pass `95`. Returns ------- None. Will store the percentile intervals as `self.percentile_interval` Notes ----- Must have all ready called `self.generate_bootstrap_replicates`.
def monkey_patch(enabled=True): """Monkey patching PIL.Image.open method Args: enabled (bool): If the monkey patch should be activated or deactivated. """ if enabled: Image.open = imdirect_open else: Image.open = pil_open
Monkey patching PIL.Image.open method Args: enabled (bool): If the monkey patch should be activated or deactivated.
def _variants(self, case_id, gemini_query): """Return variants found in the gemini database Args: case_id (str): The case for which we want to see information gemini_query (str): What variants should be chosen filters (dict): A dictionary with filters Yields: variant_obj (dict): A Variant formatted dictionary """ individuals = [] # Get the individuals for the case case_obj = self.case(case_id) for individual in case_obj.individuals: individuals.append(individual) self.db = case_obj.variant_source self.variant_type = case_obj.variant_type gq = GeminiQuery(self.db) gq.run(gemini_query) index = 0 for gemini_variant in gq: variant = None # Check if variant is non ref in the individuals is_variant = self._is_variant(gemini_variant, individuals) if self.variant_type == 'snv' and not is_variant: variant = None else: index += 1 logger.debug("Updating index to: {0}".format(index)) variant = self._format_variant( case_id=case_id, gemini_variant=gemini_variant, individual_objs=individuals, index=index ) if variant: yield variant
Return variants found in the gemini database Args: case_id (str): The case for which we want to see information gemini_query (str): What variants should be chosen filters (dict): A dictionary with filters Yields: variant_obj (dict): A Variant formatted dictionary
def init(self, force_deploy=False, client=None): """Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met. """ _force_deploy = self.provider_conf.force_deploy self.provider_conf.force_deploy = _force_deploy or force_deploy self._provider_conf = self.provider_conf.to_dict() r = api.Resources(self._provider_conf, client=client) r.launch() roles = r.get_roles() networks = r.get_networks() return (_to_enos_roles(roles), _to_enos_networks(networks))
Reserve and deploys the nodes according to the resources section In comparison to the vagrant provider, networks must be characterized as in the networks key. Args: force_deploy (bool): True iff the environment must be redeployed Raises: MissingNetworkError: If one network is missing in comparison to what is claimed. NotEnoughNodesError: If the `min` constraints can't be met.
def path(self): """Returns the build root for the current workspace.""" if self._root_dir is None: # This env variable is for testing purpose. override_buildroot = os.environ.get('PANTS_BUILDROOT_OVERRIDE', None) if override_buildroot: self._root_dir = override_buildroot else: self._root_dir = os.path.realpath(self.find_buildroot()) if PY2: self._root_dir = self._root_dir.decode('utf-8') return self._root_dir
Returns the build root for the current workspace.
def _check_params(self,params): """ Print a warning if params contains something that is not a Parameter of the overridden object. """ overridden_object_params = list(self._overridden.param) for item in params: if item not in overridden_object_params: self.param.warning("'%s' will be ignored (not a Parameter).",item)
Print a warning if params contains something that is not a Parameter of the overridden object.
def validate_backup_window(window): """Validate PreferredBackupWindow for DBInstance""" hour = r'[01]?[0-9]|2[0-3]' minute = r'[0-5][0-9]' r = ("(?P<start_hour>%s):(?P<start_minute>%s)-" "(?P<end_hour>%s):(?P<end_minute>%s)") % (hour, minute, hour, minute) range_regex = re.compile(r) m = range_regex.match(window) if not m: raise ValueError("DBInstance PreferredBackupWindow must be in the " "format: hh24:mi-hh24:mi") start_ts = (int(m.group('start_hour')) * 60) + int(m.group('start_minute')) end_ts = (int(m.group('end_hour')) * 60) + int(m.group('end_minute')) if abs(end_ts - start_ts) < 30: raise ValueError("DBInstance PreferredBackupWindow must be at least " "30 minutes long.") return window
Validate PreferredBackupWindow for DBInstance
def get_message_headers(self, section: Sequence[int] = None, subset: Collection[bytes] = None, inverse: bool = False) -> Writeable: """Get the headers from the message or a ``message/rfc822`` sub-part of the message.. The ``section`` argument can index a nested sub-part of the message. For example, ``[2, 3]`` would get the 2nd sub-part of the message and then index it for its 3rd sub-part. Args: section: Optional nested list of sub-part indexes. subset: Subset of headers to get. inverse: If ``subset`` is given, this flag will invert it so that the headers *not* in ``subset`` are returned. """ ...
Get the headers from the message or a ``message/rfc822`` sub-part of the message.. The ``section`` argument can index a nested sub-part of the message. For example, ``[2, 3]`` would get the 2nd sub-part of the message and then index it for its 3rd sub-part. Args: section: Optional nested list of sub-part indexes. subset: Subset of headers to get. inverse: If ``subset`` is given, this flag will invert it so that the headers *not* in ``subset`` are returned.
def get_stack_refs(refs: list): # copy pasted from Senza """ Returns a list of stack references with name and version. """ refs = list(refs) refs.reverse() stack_refs = [] last_stack = None while refs: ref = refs.pop() if last_stack is not None and re.compile(r'v[0-9][a-zA-Z0-9-]*$').match(ref): stack_refs.append(StackReference(last_stack, ref)) else: try: with open(ref) as fd: data = yaml.safe_load(fd) ref = data['SenzaInfo']['StackName'] except (OSError, IOError): # It's still possible that the ref is a regex pass if refs: version = refs.pop() else: version = None stack_refs.append(StackReference(ref, version)) last_stack = ref return stack_refs
Returns a list of stack references with name and version.
def getBinding(self): """Return the Binding object that is referenced by this port.""" wsdl = self.getService().getWSDL() return wsdl.bindings[self.binding]
Return the Binding object that is referenced by this port.
def ReadClientCrashInfo(self, client_id): """Reads the latest client crash record for a single client.""" history = self.crash_history.get(client_id, None) if not history: return None ts = max(history) res = rdf_client.ClientCrash.FromSerializedString(history[ts]) res.timestamp = ts return res
Reads the latest client crash record for a single client.
def _objective_decorator(func): """Decorate an objective function Converts an objective function using the typical sklearn metrics signature so that it is usable with ``xgboost.training.train`` Parameters ---------- func: callable Expects a callable with signature ``func(y_true, y_pred)``: y_true: array_like of shape [n_samples] The target values y_pred: array_like of shape [n_samples] The predicted values Returns ------- new_func: callable The new objective function as expected by ``xgboost.training.train``. The signature is ``new_func(preds, dmatrix)``: preds: array_like, shape [n_samples] The predicted values dmatrix: ``DMatrix`` The training set from which the labels will be extracted using ``dmatrix.get_label()`` """ def inner(preds, dmatrix): """internal function""" labels = dmatrix.get_label() return func(labels, preds) return inner
Decorate an objective function Converts an objective function using the typical sklearn metrics signature so that it is usable with ``xgboost.training.train`` Parameters ---------- func: callable Expects a callable with signature ``func(y_true, y_pred)``: y_true: array_like of shape [n_samples] The target values y_pred: array_like of shape [n_samples] The predicted values Returns ------- new_func: callable The new objective function as expected by ``xgboost.training.train``. The signature is ``new_func(preds, dmatrix)``: preds: array_like, shape [n_samples] The predicted values dmatrix: ``DMatrix`` The training set from which the labels will be extracted using ``dmatrix.get_label()``
def almost_unitary(gate: Gate) -> bool: """Return true if gate tensor is (almost) unitary""" res = (gate @ gate.H).asoperator() N = gate.qubit_nb return np.allclose(asarray(res), np.eye(2**N), atol=TOLERANCE)
Return true if gate tensor is (almost) unitary
def zsymDecorator(odd): """Decorator to deal with zsym=True input; set odd=True if the function is an odd function of z (like zforce)""" def wrapper(func): @wraps(func) def zsym_wrapper(*args,**kwargs): if args[0]._zsym: out= func(args[0],args[1],numpy.fabs(args[2]),**kwargs) else: out= func(*args,**kwargs) if odd and args[0]._zsym: return sign(args[2])*out else: return out return zsym_wrapper return wrapper
Decorator to deal with zsym=True input; set odd=True if the function is an odd function of z (like zforce)
def _parseSections(self, data, imageDosHeader, imageNtHeaders, parse_header_only=False): """Parses the sections in the memory and returns a list of them""" sections = [] optional_header_offset = imageDosHeader.header.e_lfanew + 4 + sizeof(IMAGE_FILE_HEADER) offset = optional_header_offset + imageNtHeaders.header.FileHeader.SizeOfOptionalHeader # start reading behind the dos- and ntheaders image_section_header_size = sizeof(IMAGE_SECTION_HEADER) for sectionNo in range(imageNtHeaders.header.FileHeader.NumberOfSections): ishdr = IMAGE_SECTION_HEADER.from_buffer(data, offset) if parse_header_only: raw = None bytes_ = bytearray() else: size = ishdr.SizeOfRawData raw = (c_ubyte * size).from_buffer(data, ishdr.PointerToRawData) bytes_ = bytearray(raw) sections.append(SectionData(header=ishdr, name=ishdr.Name.decode('ASCII', errors='ignore'), bytes=bytes_, raw=raw)) offset += image_section_header_size return sections
Parses the sections in the memory and returns a list of them
def see(obj=DEFAULT_ARG, *args, **kwargs): """ see(obj=anything) Show the features and attributes of an object. This function takes a single argument, ``obj``, which can be of any type. A summary of the object is printed immediately in the Python interpreter. For example:: >>> see([]) [] in + += * *= < <= == != > >= dir() hash() help() iter() len() repr() reversed() str() .append() .clear() .copy() .count() .extend() .index() .insert() .pop() .remove() .reverse() .sort() If this function is run without arguments, it will instead list the objects that are available in the current scope. :: >>> see() os random see() sys The return value is an instance of :class:`SeeResult`. """ use_locals = obj is DEFAULT_ARG if use_locals: # Get the local scope from the caller's stack frame. # Typically this is the scope of an interactive Python session. obj = Namespace(inspect.currentframe().f_back.f_locals) tokens = [] attrs = dir(obj) if not use_locals: for name, func in INSPECT_FUNCS: if func(obj): tokens.append(name) for feature in FEATURES: if feature.match(obj, attrs): tokens.append(feature.symbol) for attr in filter(lambda a: not a.startswith('_'), attrs): try: prop = getattr(obj, attr) except (AttributeError, Exception): # pylint: disable=broad-except prop = SeeError() action = output.display_name(name=attr, obj=prop, local=use_locals) tokens.append(action) if args or kwargs: tokens = handle_deprecated_args(tokens, args, kwargs) return output.SeeResult(tokens)
see(obj=anything) Show the features and attributes of an object. This function takes a single argument, ``obj``, which can be of any type. A summary of the object is printed immediately in the Python interpreter. For example:: >>> see([]) [] in + += * *= < <= == != > >= dir() hash() help() iter() len() repr() reversed() str() .append() .clear() .copy() .count() .extend() .index() .insert() .pop() .remove() .reverse() .sort() If this function is run without arguments, it will instead list the objects that are available in the current scope. :: >>> see() os random see() sys The return value is an instance of :class:`SeeResult`.
def set_evernote_spec(): """ set the spec of the notes :return: spec """ spec = NoteStore.NotesMetadataResultSpec() spec.includeTitle = True spec.includeAttributes = True return spec
set the spec of the notes :return: spec
def _firmware_update(firmwarefile='', host='', directory=''): ''' Update firmware for a single host ''' dest = os.path.join(directory, firmwarefile[7:]) __salt__['cp.get_file'](firmwarefile, dest) username = __pillar__['proxy']['admin_user'] password = __pillar__['proxy']['admin_password'] __salt__['dracr.update_firmware'](dest, host=host, admin_username=username, admin_password=password)
Update firmware for a single host
def vm_detach_nic(name, kwargs=None, call=None): ''' Detaches a disk from a virtual machine. .. versionadded:: 2016.3.0 name The name of the VM from which to detach the network interface. nic_id The ID of the nic to detach. CLI Example: .. code-block:: bash salt-cloud -a vm_detach_nic my-vm nic_id=1 ''' if call != 'action': raise SaltCloudSystemExit( 'The vm_detach_nic action must be called with -a or --action.' ) if kwargs is None: kwargs = {} nic_id = kwargs.get('nic_id', None) if nic_id is None: raise SaltCloudSystemExit( 'The vm_detach_nic function requires a \'nic_id\' to be provided.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) vm_id = int(get_vm_id(kwargs={'name': name})) response = server.one.vm.detachnic(auth, vm_id, int(nic_id)) data = { 'action': 'vm.detachnic', 'nic_detached': response[0], 'vm_id': response[1], 'error_code': response[2], } return data
Detaches a disk from a virtual machine. .. versionadded:: 2016.3.0 name The name of the VM from which to detach the network interface. nic_id The ID of the nic to detach. CLI Example: .. code-block:: bash salt-cloud -a vm_detach_nic my-vm nic_id=1
def calculate_z2pt5_ngaw2(vs30): ''' Reads an array of vs30 values (in m/s) and returns the depth to the 2.5 km/s velocity horizon (in km) Ref: Campbell, K.W. & Bozorgnia, Y., 2014. 'NGA-West2 ground motion model for the average horizontal components of PGA, PGV, and 5pct damped linear acceleration response spectra.' Earthquake Spectra, 30(3), pp.1087–1114. :param vs30: the shear wave velocity (in m/s) at a depth of 30 m ''' c1 = 7.089 c2 = -1.144 z2pt5 = numpy.exp(c1 + numpy.log(vs30) * c2) return z2pt5
Reads an array of vs30 values (in m/s) and returns the depth to the 2.5 km/s velocity horizon (in km) Ref: Campbell, K.W. & Bozorgnia, Y., 2014. 'NGA-West2 ground motion model for the average horizontal components of PGA, PGV, and 5pct damped linear acceleration response spectra.' Earthquake Spectra, 30(3), pp.1087–1114. :param vs30: the shear wave velocity (in m/s) at a depth of 30 m
def crick_angles(p, reference_axis, tag=True, reference_axis_name='ref_axis'): """Returns the Crick angle for each CA atom in the `Polymer`. Notes ----- The final value is in the returned list is `None`, since the angle calculation requires pairs of points on both the primitive and reference_axis. Parameters ---------- p : ampal.Polymer Reference `Polymer`. reference_axis : list(numpy.array or tuple or list) Length of reference_axis must equal length of the Polymer. Each element of reference_axis represents a point in R^3. tag : bool, optional If `True`, tags the `Polymer` with the reference axis coordinates and each Residue with its Crick angle. Crick angles are stored at the Residue level, but are calculated using the CA atom. reference_axis_name : str, optional Used to name the keys in tags at Chain and Residue level. Returns ------- cr_angles : list(float) The crick angles in degrees for each CA atom of the Polymer. Raises ------ ValueError If the Polymer and the reference_axis have unequal length. """ if not len(p) == len(reference_axis): raise ValueError( "The reference axis must contain the same number of points" " as the Polymer primitive.") prim_cas = p.primitive.coordinates p_cas = p.get_reference_coords() ref_points = reference_axis.coordinates cr_angles = [ dihedral(ref_points[i], prim_cas[i], prim_cas[i + 1], p_cas[i]) for i in range(len(prim_cas) - 1)] cr_angles.append(None) if tag: p.tags[reference_axis_name] = reference_axis monomer_tag_name = 'crick_angle_{0}'.format(reference_axis_name) for m, c in zip(p._monomers, cr_angles): m.tags[monomer_tag_name] = c return cr_angles
Returns the Crick angle for each CA atom in the `Polymer`. Notes ----- The final value is in the returned list is `None`, since the angle calculation requires pairs of points on both the primitive and reference_axis. Parameters ---------- p : ampal.Polymer Reference `Polymer`. reference_axis : list(numpy.array or tuple or list) Length of reference_axis must equal length of the Polymer. Each element of reference_axis represents a point in R^3. tag : bool, optional If `True`, tags the `Polymer` with the reference axis coordinates and each Residue with its Crick angle. Crick angles are stored at the Residue level, but are calculated using the CA atom. reference_axis_name : str, optional Used to name the keys in tags at Chain and Residue level. Returns ------- cr_angles : list(float) The crick angles in degrees for each CA atom of the Polymer. Raises ------ ValueError If the Polymer and the reference_axis have unequal length.
def digest(self): """Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes. """ A = self.A B = self.B C = self.C D = self.D input = [] + self.input count = [] + self.count index = (self.count[0] >> 3) & 0x3f if index < 56: padLen = 56 - index else: padLen = 120 - index padding = [b'\200'] + [b'\000'] * 63 self.update(padding[:padLen]) # Append length (before padding). bits = _bytelist2long(self.input[:56]) + count self._transform(bits) # Store state in digest. digest = struct.pack("<IIII", self.A, self.B, self.C, self.D) self.A = A self.B = B self.C = C self.D = D self.input = input self.count = count return digest
Terminate the message-digest computation and return digest. Return the digest of the strings passed to the update() method so far. This is a 16-byte string which may contain non-ASCII characters, including null bytes.
def canonicalize_id(reference_id): """\ Returns the canonicalized form of the provided reference_id. WikiLeaks provides some malformed cable identifiers. If the provided `reference_id` is not valid, this method returns the valid reference identifier equivalent. If the reference identifier is valid, the reference id is returned unchanged. Note: The returned canonicalized identifier may not be a valid WikiLeaks identifier anymore. In most cases the returned canonical form is identical to the WikiLeaks identifier, but for malformed cable identifiers like "09SECTION01OF03SANJOSE525" it is not (becomes "09SANJOSE525"). `reference_id` The cable identifier to canonicalize """ if u'EMBASSY' in reference_id: return reference_id.replace(u'EMBASSY', u'') m = _C14N_PATTERN.match(reference_id) if m: origin = m.group(1) return reference_id.replace(origin, _C14N_FIXES[origin]) return MALFORMED_CABLE_IDS.get(reference_id, INVALID_CABLE_IDS.get(reference_id, reference_id))
\ Returns the canonicalized form of the provided reference_id. WikiLeaks provides some malformed cable identifiers. If the provided `reference_id` is not valid, this method returns the valid reference identifier equivalent. If the reference identifier is valid, the reference id is returned unchanged. Note: The returned canonicalized identifier may not be a valid WikiLeaks identifier anymore. In most cases the returned canonical form is identical to the WikiLeaks identifier, but for malformed cable identifiers like "09SECTION01OF03SANJOSE525" it is not (becomes "09SANJOSE525"). `reference_id` The cable identifier to canonicalize
def decode_list(self, integers): """List of ints to list of str.""" integers = list(np.squeeze(integers)) return self.encoders["inputs"].decode_list(integers)
List of ints to list of str.
def encode(self, raw=False): """Convert message to on-the-wire FIX format. :param raw: If True, encode pairs exactly as provided. Unless 'raw' is set, this function will calculate and correctly set the BodyLength (9) and Checksum (10) fields, and ensure that the BeginString (8), Body Length (9), Message Type (35) and Checksum (10) fields are in the right positions. This function does no further validation of the message content.""" buf = b"" if raw: # Walk pairs, creating string. for tag, value in self.pairs: buf += tag + b'=' + value + SOH_STR return buf # Cooked. for tag, value in self.pairs: if int(tag) in (8, 9, 35, 10): continue buf += tag + b'=' + value + SOH_STR # Prepend the message type. if self.message_type is None: raise ValueError("No message type set") buf = b"35=" + self.message_type + SOH_STR + buf # Calculate body length. # # From first byte after body length field, to the delimiter # before the checksum (which shouldn't be there yet). body_length = len(buf) # Prepend begin-string and body-length. if not self.begin_string: raise ValueError("No begin string set") buf = b"8=" + self.begin_string + SOH_STR + \ b"9=" + fix_val("%u" % body_length) + SOH_STR + \ buf # Calculate and append the checksum. checksum = 0 for c in buf: checksum += ord(c) if sys.version_info[0] == 2 else c buf += b"10=" + fix_val("%03u" % (checksum % 256,)) + SOH_STR return buf
Convert message to on-the-wire FIX format. :param raw: If True, encode pairs exactly as provided. Unless 'raw' is set, this function will calculate and correctly set the BodyLength (9) and Checksum (10) fields, and ensure that the BeginString (8), Body Length (9), Message Type (35) and Checksum (10) fields are in the right positions. This function does no further validation of the message content.
def destroyTempFiles(self): """Destroys all temp temp file hierarchy, getting rid of all files. """ os.system("rm -rf %s" % self.rootDir) logger.debug("Temp files created: %s, temp files actively destroyed: %s" % (self.tempFilesCreated, self.tempFilesDestroyed))
Destroys all temp temp file hierarchy, getting rid of all files.
def forward(self, src_seq, tgt_seq, src_valid_length=None, tgt_valid_length=None): #pylint: disable=arguments-differ """Generate the prediction given the src_seq and tgt_seq. This is used in training an NMT model. Parameters ---------- src_seq : NDArray tgt_seq : NDArray src_valid_length : NDArray or None tgt_valid_length : NDArray or None Returns ------- outputs : NDArray Shape (batch_size, tgt_length, tgt_word_num) additional_outputs : list of list Additional outputs of encoder and decoder, e.g, the attention weights """ additional_outputs = [] encoder_outputs, encoder_additional_outputs = self.encode(src_seq, valid_length=src_valid_length) decoder_states = self.decoder.init_state_from_encoder(encoder_outputs, encoder_valid_length=src_valid_length) outputs, _, decoder_additional_outputs =\ self.decode_seq(tgt_seq, decoder_states, tgt_valid_length) additional_outputs.append(encoder_additional_outputs) additional_outputs.append(decoder_additional_outputs) return outputs, additional_outputs
Generate the prediction given the src_seq and tgt_seq. This is used in training an NMT model. Parameters ---------- src_seq : NDArray tgt_seq : NDArray src_valid_length : NDArray or None tgt_valid_length : NDArray or None Returns ------- outputs : NDArray Shape (batch_size, tgt_length, tgt_word_num) additional_outputs : list of list Additional outputs of encoder and decoder, e.g, the attention weights
def returner(ret): ''' Parse the return data and return metrics to Librato. ''' librato_conn = _get_librato(ret) q = librato_conn.new_queue() if ret['fun'] == 'state.highstate': log.debug('Found returned Highstate data.') # Calculate the runtimes and number of failed states. stats = _calculate_runtimes(ret['return']) log.debug('Batching Metric retcode with %s', ret['retcode']) q.add('saltstack.highstate.retcode', ret['retcode'], tags={'Name': ret['id']}) log.debug( 'Batching Metric num_failed_jobs with %s', stats['num_failed_states'] ) q.add('saltstack.highstate.failed_states', stats['num_failed_states'], tags={'Name': ret['id']}) log.debug( 'Batching Metric num_passed_states with %s', stats['num_passed_states'] ) q.add('saltstack.highstate.passed_states', stats['num_passed_states'], tags={'Name': ret['id']}) log.debug('Batching Metric runtime with %s', stats['runtime']) q.add('saltstack.highstate.runtime', stats['runtime'], tags={'Name': ret['id']}) log.debug( 'Batching Metric runtime with %s', stats['num_failed_states'] + stats['num_passed_states'] ) q.add('saltstack.highstate.total_states', stats[ 'num_failed_states'] + stats['num_passed_states'], tags={'Name': ret['id']}) log.info('Sending metrics to Librato.') q.submit()
Parse the return data and return metrics to Librato.
def extractInputForTP(self, tm): """ Extract inputs for TP from the state of temporal memory three information are extracted 1. correctly predicted cells 2. all active cells 3. bursting cells (unpredicted input) """ # bursting cells in layer 4 burstingColumns = tm.activeState["t"].sum(axis=1) burstingColumns[ burstingColumns < tm.cellsPerColumn ] = 0 burstingColumns[ burstingColumns == tm.cellsPerColumn ] = 1 # print "Bursting column indices=",burstingColumns.nonzero()[0] # correctly predicted cells in layer 4 correctlyPredictedCells = numpy.zeros(self._inputDimensions).astype(realDType) idx = (tm.predictedState["t-1"] + tm.activeState["t"]) == 2 idx = idx.reshape(self._inputDimensions) correctlyPredictedCells[idx] = 1.0 # print "Predicted->active cell indices=",correctlyPredictedCells.nonzero()[0] # all currently active cells in layer 4 spInputVector = tm.learnState["t"].reshape(self._inputDimensions) # spInputVector = tm.activeState["t"].reshape(self._inputDimensions) return (correctlyPredictedCells, spInputVector, burstingColumns)
Extract inputs for TP from the state of temporal memory three information are extracted 1. correctly predicted cells 2. all active cells 3. bursting cells (unpredicted input)
def fromfits(infile, hdu = 0, verbose = True): """ Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0) """ pixelarray, hdr = ft.getdata(infile, hdu, header=True) pixelarray = np.asarray(pixelarray).transpose() #print pixelarray pixelarrayshape = pixelarray.shape if verbose : print "Input shape : (%i, %i)" % (pixelarrayshape[0], pixelarrayshape[1]) print "Input file BITPIX : %s" % (hdr["BITPIX"]) pixelarrayshape = np.asarray(pixelarrayshape) if verbose : print "Internal array type :", pixelarray.dtype.name return f2nimage(pixelarray, verbose = verbose)
Factory function that reads a FITS file and returns a f2nimage object. Use hdu to specify which HDU you want (primary = 0)
def rpush(self, key, value, *values): """Insert all the specified values at the tail of the list stored at key. """ return self.execute(b'RPUSH', key, value, *values)
Insert all the specified values at the tail of the list stored at key.
def connection_from_promised_list(data_promise, args=None, **kwargs): ''' A version of `connectionFromArray` that takes a promised array, and returns a promised connection. ''' return data_promise.then(lambda data: connection_from_list(data, args, **kwargs))
A version of `connectionFromArray` that takes a promised array, and returns a promised connection.
def get_endpoint_path(self, endpoint_id): '''return the first fullpath to a folder in the endpoint based on expanding the user's home from the globus config file. This function is fragile but I don't see any other way to do it. Parameters ========== endpoint_id: the endpoint id to look up the path for ''' config = os.path.expanduser("~/.globusonline/lta/config-paths") if not os.path.exists(config): bot.error('%s not found for a local Globus endpoint.') sys.exit(1) path = None # Read in the config and get the root path config = [x.split(',')[0] for x in read_file(config)] for path in config: if os.path.exists(path): break # If we don't have an existing path, exit if path is None: bot.error('No path was found for a local Globus endpoint.') sys.exit(1) return path
return the first fullpath to a folder in the endpoint based on expanding the user's home from the globus config file. This function is fragile but I don't see any other way to do it. Parameters ========== endpoint_id: the endpoint id to look up the path for
def on_disconnect(self, client, userdata, result_code): """ Callback when the MQTT client is disconnected. In this case, the server waits five seconds before trying to reconnected. :param client: the client being disconnected. :param userdata: unused. :param result_code: result code. """ self.log_info("Disconnected with result code " + str(result_code)) self.state_handler.set_state(State.goodbye) time.sleep(5) self.thread_handler.run(target=self.start_blocking)
Callback when the MQTT client is disconnected. In this case, the server waits five seconds before trying to reconnected. :param client: the client being disconnected. :param userdata: unused. :param result_code: result code.
def shutdown(): '''Shut down the SDK. :returns: An exception object if an error occurred, a falsy value otherwise. :rtype: Exception ''' global _sdk_ref_count #pylint:disable=global-statement global _sdk_instance #pylint:disable=global-statement global _should_shutdown #pylint:disable=global-statement with _sdk_ref_lk: logger.debug("shutdown: ref count = %d, should_shutdown = %s", \ _sdk_ref_count, _should_shutdown) nsdk = nativeagent.try_get_sdk() if not nsdk: logger.warning('shutdown: SDK not initialized or already shut down') _sdk_ref_count = 0 return None if _sdk_ref_count > 1: logger.debug('shutdown: reference count is now %d', _sdk_ref_count) _sdk_ref_count -= 1 return None logger.info('shutdown: Shutting down SDK.') try: if _should_shutdown: _rc = nsdk.shutdown() if _rc == ErrorCode.NOT_INITIALIZED: logger.warning('shutdown: native SDK was not initialized') else: nativeagent.checkresult(nsdk, _rc, 'shutdown') _should_shutdown = False except SDKError as e: logger.warning('shutdown failed', exc_info=sys.exc_info()) return e _sdk_ref_count = 0 _sdk_instance = None nativeagent._force_initialize(None) #pylint:disable=protected-access logger.debug('shutdown: completed') return None
Shut down the SDK. :returns: An exception object if an error occurred, a falsy value otherwise. :rtype: Exception
def getmembers_static(cls): """Gets members (vars) from all scopes using ONLY static information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'. """ names = set() for scope in cls.scopes: names.update(structured.getmembers_static(scope)) return names
Gets members (vars) from all scopes using ONLY static information. You most likely want to use ScopeStack.getmembers instead. Returns: Set of available vars. Raises: NotImplementedError if any scope fails to implement 'getmembers'.
def html_to_rgb(html): """Convert the HTML color to (r, g, b). Parameters: :html: the HTML definition of the color (#RRGGBB or #RGB or a color name). Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] Throws: :ValueError: If html is neither a known color name or a hexadecimal RGB representation. >>> '(%g, %g, %g)' % html_to_rgb('#ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('#f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon') '(1, 0.980392, 0.803922)' """ html = html.strip().lower() if html[0]=='#': html = html[1:] elif html in NAMED_COLOR: html = NAMED_COLOR[html][1:] if len(html)==6: rgb = html[:2], html[2:4], html[4:] elif len(html)==3: rgb = ['%c%c' % (v,v) for v in html] else: raise ValueError("input #%s is not in #RRGGBB format" % html) return tuple(((int(n, 16) / 255.0) for n in rgb))
Convert the HTML color to (r, g, b). Parameters: :html: the HTML definition of the color (#RRGGBB or #RGB or a color name). Returns: The color as an (r, g, b) tuple in the range: r[0...1], g[0...1], b[0...1] Throws: :ValueError: If html is neither a known color name or a hexadecimal RGB representation. >>> '(%g, %g, %g)' % html_to_rgb('#ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('ff8000') '(1, 0.501961, 0)' >>> '(%g, %g, %g)' % html_to_rgb('#f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('f60') '(1, 0.4, 0)' >>> '(%g, %g, %g)' % html_to_rgb('lemonchiffon') '(1, 0.980392, 0.803922)'
def _impl(lexer): """Return an Implies expression.""" p = _sumterm(lexer) tok = next(lexer) # SUMTERM '=>' IMPL if isinstance(tok, OP_rarrow): q = _impl(lexer) return ('implies', p, q) # SUMTERM '<=>' IMPL elif isinstance(tok, OP_lrarrow): q = _impl(lexer) return ('equal', p, q) # SUMTERM else: lexer.unpop_token(tok) return p
Return an Implies expression.
def tracebacks_from_lines(lines_iter): """Generator that yields tracebacks found in a lines iterator The lines iterator can be: - a file-like object - a list (or deque) of lines. - any other iterable sequence of strings """ tbgrep = TracebackGrep() for line in lines_iter: tb = tbgrep.process(line) if tb: yield tb
Generator that yields tracebacks found in a lines iterator The lines iterator can be: - a file-like object - a list (or deque) of lines. - any other iterable sequence of strings
def get_command(self, ctx, name): """Retrieve the appropriate method from the Resource, decorate it as a click command, and return that method. """ # Sanity check: Does a method exist corresponding to this # command? If not, None is returned for click to raise # exception. if not hasattr(self.resource, name): return None # Get the method. method = getattr(self.resource, name) # Get any attributes that were given at command-declaration # time. attrs = getattr(method, '_cli_command_attrs', {}) # If the help message comes from the docstring, then # convert it into a message specifically for this resource. help_text = inspect.getdoc(method) attrs['help'] = self._auto_help_text(help_text or '') # On some methods, we ignore the defaults, which are intended # for writing and not reading; process this. ignore_defaults = attrs.pop('ignore_defaults', False) # Wrap the method, such that it outputs its final return # value rather than returning it. new_method = self._echo_method(method) # Soft copy the "__click_params__", if any exist. # This is the internal holding method that the click library # uses to store @click.option and @click.argument directives # before the method is converted into a command. # # Because self._echo_method uses @functools.wraps, this is # actually preserved; the purpose of copying it over is # so we can get our resource fields at the top of the help; # the easiest way to do this is to load them in before the # conversion takes place. (This is a happy result of Armin's # work to get around Python's processing decorators # bottom-to-top.) click_params = getattr(method, '__click_params__', []) new_method.__click_params__ = copy(click_params) new_method = with_global_options(new_method) # Write options based on the fields available on this resource. fao = attrs.pop('use_fields_as_options', True) if fao: for field in reversed(self.resource.fields): if not field.is_option: continue # If we got an iterable rather than a boolean, # then it is a list of fields to use; check for # presence in that list. if not isinstance(fao, bool) and field.name not in fao: continue # Create the initial arguments based on the # option value. If we have a different key to use # (which is what gets routed to the Tower API), # ensure that is the first argument. args = [field.option] if field.key: args.insert(0, field.key) # short name aliases for common flags short_fields = { 'name': 'n', 'description': 'd', 'inventory': 'i', 'extra_vars': 'e' } if field.name in short_fields: args.append('-'+short_fields[field.name]) # Apply the option to the method. option_help = field.help if isinstance(field.type, StructuredInput): option_help += ' Use @ to get JSON or YAML from a file.' if field.required: option_help = '[REQUIRED] ' + option_help elif field.read_only: option_help = '[READ ONLY] ' + option_help option_help = '[FIELD]' + option_help click.option( *args, default=field.default if not ignore_defaults else None, help=option_help, type=field.type, show_default=field.show_default, multiple=field.multiple, is_eager=False )(new_method) # Make a click Command instance using this method # as the callback, and return it. cmd = click.command(name=name, cls=ActionSubcommand, **attrs)(new_method) # If this method has a `pk` positional argument, # then add a click argument for it. code = six.get_function_code(method) if 'pk' in code.co_varnames: click.argument('pk', nargs=1, required=False, type=str, metavar='[ID]')(cmd) # Done; return the command. return cmd
Retrieve the appropriate method from the Resource, decorate it as a click command, and return that method.
def isVideo(self): """ Is the stream labelled as a video stream. """ val=False if self.__dict__['codec_type']: if self.codec_type == 'video': val=True return val
Is the stream labelled as a video stream.
def geometry_from_grid(self, grid, pixel_centres, pixel_neighbors, pixel_neighbors_size, buffer=1e-8): """Determine the geometry of the Voronoi pixelization, by alligning it with the outer-most coordinates on a \ grid plus a small buffer. Parameters ----------- grid : ndarray The (y,x) grid of coordinates which determine the Voronoi pixelization's geometry. pixel_centres : ndarray The (y,x) centre of every Voronoi pixel in arc-seconds. origin : (float, float) The arc-second origin of the Voronoi pixelization's coordinate system. pixel_neighbors : ndarray An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \ the Voronoi grid (entries of -1 correspond to no neighbor). pixel_neighbors_size : ndarrayy An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \ Voronoi grid. """ y_min = np.min(grid[:, 0]) - buffer y_max = np.max(grid[:, 0]) + buffer x_min = np.min(grid[:, 1]) - buffer x_max = np.max(grid[:, 1]) + buffer shape_arcsec = (y_max - y_min, x_max - x_min) origin = ((y_max + y_min) / 2.0, (x_max + x_min) / 2.0) return self.Geometry(shape_arcsec=shape_arcsec, pixel_centres=pixel_centres, origin=origin, pixel_neighbors=pixel_neighbors, pixel_neighbors_size=pixel_neighbors_size)
Determine the geometry of the Voronoi pixelization, by alligning it with the outer-most coordinates on a \ grid plus a small buffer. Parameters ----------- grid : ndarray The (y,x) grid of coordinates which determine the Voronoi pixelization's geometry. pixel_centres : ndarray The (y,x) centre of every Voronoi pixel in arc-seconds. origin : (float, float) The arc-second origin of the Voronoi pixelization's coordinate system. pixel_neighbors : ndarray An array of length (voronoi_pixels) which provides the index of all neighbors of every pixel in \ the Voronoi grid (entries of -1 correspond to no neighbor). pixel_neighbors_size : ndarrayy An array of length (voronoi_pixels) which gives the number of neighbors of every pixel in the \ Voronoi grid.
def set_site_energies( self, energies ): """ Set the energies for every site in the lattice according to the site labels. Args: energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.:: { 'A' : 1.0, 'B', 0.0 } Returns: None """ self.site_energies = energies for site_label in energies: for site in self.sites: if site.label == site_label: site.energy = energies[ site_label ]
Set the energies for every site in the lattice according to the site labels. Args: energies (Dict(Str:Float): Dictionary of energies for each site label, e.g.:: { 'A' : 1.0, 'B', 0.0 } Returns: None
def ReadFileObject(self, file_object): """Reads artifact definitions from a file-like object. Args: file_object (file): file-like object to read from. Yields: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the JSON artifact definition is not set or incorrect. """ # TODO: add try, except? json_definitions = json.loads(file_object.read()) last_artifact_definition = None for json_definition in json_definitions: try: artifact_definition = self.ReadArtifactDefinitionValues(json_definition) except errors.FormatError as exception: error_location = 'At start' if last_artifact_definition: error_location = 'After: {0:s}'.format(last_artifact_definition.name) raise errors.FormatError( '{0:s} {1!s}'.format(error_location, exception)) yield artifact_definition last_artifact_definition = artifact_definition
Reads artifact definitions from a file-like object. Args: file_object (file): file-like object to read from. Yields: ArtifactDefinition: an artifact definition. Raises: FormatError: if the format of the JSON artifact definition is not set or incorrect.
def unfreeze(self): """ Unfreeze model layers """ for idx, child in enumerate(self.model.children()): mu.unfreeze_layer(child)
Unfreeze model layers
def start(self): """Execution happening on jhubctl.""" # Get specified resource. resource_list = getattr(self, f'{self.resource_type}_list') resource_action = getattr(resource_list, self.resource_action) resource_action(self.resource_name)
Execution happening on jhubctl.
def _make_3d(field, twod): """Add a dimension to field if necessary. Args: field (numpy.array): the field that need to be 3d. twod (str): 'XZ', 'YZ' or None depending on what is relevant. Returns: numpy.array: reshaped field. """ shp = list(field.shape) if twod and 'X' in twod: shp.insert(1, 1) elif twod: shp.insert(0, 1) return field.reshape(shp)
Add a dimension to field if necessary. Args: field (numpy.array): the field that need to be 3d. twod (str): 'XZ', 'YZ' or None depending on what is relevant. Returns: numpy.array: reshaped field.
def mlp(feature, hparams, name="mlp"): """Multi layer perceptron with dropout and relu activation.""" with tf.variable_scope(name, "mlp", values=[feature]): num_mlp_layers = hparams.num_mlp_layers mlp_dim = hparams.mlp_dim for _ in range(num_mlp_layers): feature = common_layers.dense(feature, mlp_dim, activation=tf.nn.relu) feature = tf.nn.dropout(feature, keep_prob=1.-hparams.dropout) return feature
Multi layer perceptron with dropout and relu activation.
def typing(self, *, channel: str): """Sends a typing indicator to the specified channel. This indicates that this app is currently writing a message to send to a channel. Args: channel (str): The channel id. e.g. 'C024BE91L' Raises: SlackClientNotConnectedError: Websocket connection is closed. """ payload = {"id": self._next_msg_id(), "type": "typing", "channel": channel} self.send_over_websocket(payload=payload)
Sends a typing indicator to the specified channel. This indicates that this app is currently writing a message to send to a channel. Args: channel (str): The channel id. e.g. 'C024BE91L' Raises: SlackClientNotConnectedError: Websocket connection is closed.
def getJson(url): """Download json and return simplejson object""" site = urllib2.urlopen(url, timeout=300) return json.load(site)
Download json and return simplejson object
def create_vpn_gateway(self, type, availability_zone=None): """ Create a new Vpn Gateway :type type: str :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1' :type availability_zone: str :param availability_zone: The Availability Zone where you want the VPN gateway. :rtype: The newly created VpnGateway :return: A :class:`boto.vpc.vpngateway.VpnGateway` object """ params = {'Type' : type} if availability_zone: params['AvailabilityZone'] = availability_zone return self.get_object('CreateVpnGateway', params, VpnGateway)
Create a new Vpn Gateway :type type: str :param type: Type of VPN Connection. Only valid valid currently is 'ipsec.1' :type availability_zone: str :param availability_zone: The Availability Zone where you want the VPN gateway. :rtype: The newly created VpnGateway :return: A :class:`boto.vpc.vpngateway.VpnGateway` object
def create_blocking_connection(host): """ Return properly created blocking connection. Args: host (str): Host as it is defined in :func:`.get_amqp_settings`. Uses :func:`edeposit.amqp.amqpdaemon.getConParams`. """ return pika.BlockingConnection( amqpdaemon.getConParams( settings.get_amqp_settings()[host.lower()]["vhost"] ) )
Return properly created blocking connection. Args: host (str): Host as it is defined in :func:`.get_amqp_settings`. Uses :func:`edeposit.amqp.amqpdaemon.getConParams`.