text
stringlengths
78
104k
score
float64
0
0.18
def report_progress(stream=None): """Report progress from any currently installed reporters. Args: stream: The text stream (default: sys.stderr) to which progress will be reported. """ if stream is None: stream = sys.stderr for reporter in _reporters: reporter(stream)
0.003077
def get_post_reference_section_keyword_patterns(): """Return a list of compiled regex patterns used to search for various keywords that can often be found after, and therefore suggest the end of, a reference section in a full-text document. @return: (list) of compiled regex patterns. """ compiled_patterns = [] patterns = [u'(' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'prepared') + ur'|' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'created') + ur').*(AAS\s*)?\sLATEX', ur'AAS\s+?LATEX\s+?' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'macros') + u'v', ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters( u'This paper has been produced using'), ur'^\s*' + _create_regex_pattern_add_optional_spaces_to_word_characters(u'This article was processed by the author using Springer-Verlag') + u' LATEX'] for p in patterns: compiled_patterns.append(re.compile(p, re.I | re.UNICODE)) return compiled_patterns
0.009942
def format(self, text, width=78, indent=4): """Apply textwrap to a given text string""" return textwrap.fill( text, width=width, initial_indent=' ' * indent, subsequent_indent=' ' * indent, )
0.007605
def manage_schedule(self, tag, data): ''' Refresh the functions and returners. ''' func = data.get('func', None) name = data.get('name', None) schedule = data.get('schedule', None) where = data.get('where', None) persist = data.get('persist', None) funcs = {'delete': ('delete_job', (name, persist)), 'add': ('add_job', (schedule, persist)), 'modify': ('modify_job', (name, schedule, persist)), 'enable': ('enable_schedule', ()), 'disable': ('disable_schedule', ()), 'enable_job': ('enable_job', (name, persist)), 'disable_job': ('disable_job', (name, persist)), 'postpone_job': ('postpone_job', (name, data)), 'skip_job': ('skip_job', (name, data)), 'reload': ('reload', (schedule,)), 'list': ('list', (where,)), 'save_schedule': ('save_schedule', ()), 'get_next_fire_time': ('get_next_fire_time', (name,))} # Call the appropriate schedule function try: alias, params = funcs.get(func) getattr(self.schedule, alias)(*params) except TypeError: log.error('Function "%s" is unavailable in salt.utils.scheduler', func)
0.001386
def _plain_auth_stage2(self, _unused): """Do the second stage (<iq type='set'/>) of legacy "plain" authentication. [client only]""" iq=Iq(stanza_type="set") q=iq.new_query("jabber:iq:auth") q.newTextChild(None,"username",to_utf8(self.my_jid.node)) q.newTextChild(None,"resource",to_utf8(self.my_jid.resource)) q.newTextChild(None,"password",to_utf8(self.password)) self.send(iq) self.set_response_handlers(iq,self.auth_finish,self.auth_error) iq.free()
0.022181
def parse_manifest(manifest): """ return a list of dicts containing an rpm name, version and release eg: [{'name': 'httpd', 'version': 1.3.39, 'release': 1}] """ regex = re.compile('(.*)-(.*)') manifest = os.path.expanduser(manifest) if not os.path.exists(manifest): raise JuicerManifestError('File not found: %s' % manifest) rpm_list = [] fd = open(manifest) data = yaml.load(fd) if data is None: raise JuicerManifestError('%s contains no items' % manifest) for pkg_name, version in data.iteritems(): if version == 'absent' or version == 'purged': juicer.utils.Log.log_debug('%s is absent/purged. Skipping...' % pkg_name) elif version == 'latest': juicer.utils.Log.log_debug('%s is set to latest. Finding...' % pkg_name) lversion, release = juicer.utils.find_latest(pkg_name) if not lversion and not release: # package wasn't found in repo so don't add it to the list continue juicer.utils.Log.log_debug('Adding %s version %s release %s' % (pkg_name, lversion, release)) rpm_list.append({'name': pkg_name, 'version': lversion, 'release': release}) else: try: _m = regex.match(version) version = _m.group(1) release = _m.group(2) rpm_list.append({'name': pkg_name, 'version': _m.group(1), 'release': _m.group(2)}) juicer.utils.Log.log_debug('Adding %s version %s release %s' % (pkg_name, version, release)) except: raise JuicerManifestError('The manifest %s is improperly formatted' % manifest) return False return rpm_list
0.005114
def generateVectors(): """Convert the known ra/decs of the channel corners into unit vectors. This code creates the conents of the function loadOriginVectors() (below) """ ra_deg = 290.66666667 dec_deg = +44.5 #rollAngle_deg = 33.0 rollAngle_deg = +123. boresight = r.vecFromRaDec(ra_deg, dec_deg) #Read in prime mission coords and convert to vectors inFile = "../etc/fov.txt" inFile = np.loadtxt(inFile) radecs = np.zeros( (len(inFile), 3)) rotate = radecs*0 for i, row in enumerate(inFile): radecs[i, :] = r.vecFromRaDec(inFile[i, 4], inFile[i,5]) rotate[i, :] = r.rotateAroundVector(radecs[i], boresight,\ -rollAngle_deg) #Slew to ra/dec of zero Ra = r.rightAscensionRotationMatrix(-ra_deg) Rd = r.declinationRotationMatrix(-dec_deg) R = np.dot(Rd, Ra) origin = rotate*0 for i, row in enumerate(rotate): origin[i] = np.dot(R, rotate[i]) mp.plot(origin[:,0], origin[:,1]) #Print out the results #import pdb; pdb.set_trace() print("[") for i in range(len(inFile)): ch = channelFromModOut(inFile[i,0], inFile[i,1]) print("[%3i., %3i., %3i., %13.7f, %13.7f, %13.7f], \\" %( \ inFile[i, 0], inFile[i, 1], ch, \ origin[i, 0], origin[i, 1], origin[i,2])) print("]")
0.014043
def iter_xCharts(self): """ Generate each xChart child element in document. """ plot_tags = ( qn('c:area3DChart'), qn('c:areaChart'), qn('c:bar3DChart'), qn('c:barChart'), qn('c:bubbleChart'), qn('c:doughnutChart'), qn('c:line3DChart'), qn('c:lineChart'), qn('c:ofPieChart'), qn('c:pie3DChart'), qn('c:pieChart'), qn('c:radarChart'), qn('c:scatterChart'), qn('c:stockChart'), qn('c:surface3DChart'), qn('c:surfaceChart') ) for child in self.iterchildren(): if child.tag not in plot_tags: continue yield child
0.00299
def set_branding(self, asset_ids): """Sets the branding. arg: asset_ids (osid.id.Id[]): the new assets raise: InvalidArgument - ``asset_ids`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``asset_ids`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ if asset_ids is None: raise NullArgument('asset_ids cannot be None') if self.get_branding_metadata().is_read_only(): raise NoAccess() if not isinstance(asset_ids, list): raise InvalidArgument('asset_ids must be a list') if not self.my_osid_object_form._is_valid_input(asset_ids, self.get_branding_metadata(), array=True): raise InvalidArgument() branding_ids = [] for asset_id in asset_ids: branding_ids.append(str(asset_id)) self.my_osid_object_form._my_map['brandingIds'] = branding_ids
0.002725
def put_contacts(self, uid, **kwargs): """ Assign contacts to the specified list. :Example: client.lists.put_contacts(uid=1901010, contacts="1723812,1239912") :param int uid: The unique id of the List. Required. :param str contacts: Contact ID(s), separated by comma. Required. """ return self.update_subresource_instance(uid, body=kwargs, subresource=None, slug="contacts")
0.00339
def estimate_noise_std(img, average=True): """Estimate standard deviation of noise in ``img``. The algorithm, given in [Immerkaer1996], estimates the noise in an image. Parameters ---------- img : array-like Array to estimate noise in. average : bool If ``True``, return the mean noise in the image, otherwise give a pointwise estimate. Returns ------- noise : float Examples -------- Create image with noise 1.0, verify result >>> img = np.random.randn(10, 10) >>> result = estimate_noise_std(img) # should be about 1 Also works with higher dimensional arrays >>> img = np.random.randn(3, 3, 3) >>> result = estimate_noise_std(img) The method can also estimate the noise pointwise (but with high uncertainity): >>> img = np.random.randn(3, 3, 3) >>> result = estimate_noise_std(img, average=False) References ---------- [Immerkaer1996] Immerkaer, J. *Fast Noise Variance Estimation*. Computer Vision and Image Understanding, 1996. """ import scipy.signal import functools img = np.asarray(img, dtype='float') M = functools.reduce(np.add.outer, [[-1, 2, -1]] * img.ndim) convolved = scipy.signal.fftconvolve(img, M, mode='valid') if average: conv_var = np.sum(convolved ** 2) / convolved.size else: conv_var = convolved ** 2 # Pad in order to retain shape conv_var = np.pad(conv_var, pad_width=1, mode='edge') scale = np.sum(np.square(M)) sigma = np.sqrt(conv_var / scale) return sigma
0.000625
def parse(self, resource=None): """ Parse a list of directories ans :param resource: List of folders """ if resource is None: resource = self.__resources__ self.inventory = self.dispatcher.collection try: self._parse(resource) except MyCapytain.errors.UndispatchedTextError as E: if self.RAISE_ON_UNDISPATCHED is True: raise UndispatchedTextError(E) self.inventory = self.dispatcher.collection return self.inventory
0.003683
def process(self, frames, eod, spec_range=120.0): """ Returns a tuple containing the spectral centroid and the spectrum (dB scales) of the input audio frames. FFT window sizes are adatable to the input frame size.""" samples = frames[:, 0] nsamples = len(frames[:, 0]) if nsamples != self.blocksize: self.window = self.window_function(nsamples) samples *= self.window while nsamples > self.fft_size: self.fft_size = 2 * self.fft_size zeros_p = numpy.zeros(self.fft_size / 2 - int(nsamples / 2)) if nsamples % 2: zeros_n = numpy.zeros(self.fft_size / 2 - int(nsamples / 2) - 1) else: zeros_n = numpy.zeros(self.fft_size / 2 - int(nsamples / 2)) samples = numpy.concatenate((zeros_p, samples, zeros_n), axis=0) fft = numpy.fft.fft(samples) # normalized abs(FFT) between 0 and 1 spectrum = numpy.abs(fft[:fft.shape[0] / 2 + 1]) / float(nsamples) length = numpy.float64(spectrum.shape[0]) # scale the db spectrum from [- spec_range db ... 0 db] > [0..1] db_spectrum = ((20 * (numpy.log10(spectrum + 1e-30))) .clip(-spec_range, 0.0) + spec_range) / spec_range energy = spectrum.sum() spectral_centroid = 0 if energy > 1e-20: # calculate the spectral centroid if self.spectrum_range is None: self.spectrum_range = numpy.arange(length) spectral_centroid = (spectrum * self.spectrum_range).sum() / \ (energy * (length - 1)) * \ self.samplerate * 0.5 # clip > log10 > scale between 0 and 1 spectral_centroid = (math.log10(self.clip(spectral_centroid, self.lower, self.higher)) - self.lower_log) / (self.higher_log - self.lower_log) return (spectral_centroid, db_spectrum)
0.000945
def check_subprocess(self): """ Make sure the process didn't exit with an error and run the checks. :rtype: bool :return: the actual check status :raise ProcessExitedWithError: when the main process exits with an error """ exit_code = self.process.poll() if exit_code is not None and exit_code != 0: # The main process exited with an error. Clean up the children # if any. self._kill_all_kids(self._sig_kill) self._clear_process() raise ProcessExitedWithError(self, exit_code) return self.after_start_check()
0.003058
def get_file_search(self, query): """Performs advanced search on samples, matching certain binary/ metadata/detection criteria. Possible queries: file size, file type, first or last submission to VT, number of positives, bynary content, etc. Args: query: dictionary with search arguments Example: 'query': 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' Returns: A dict with the VT report. """ api_name = 'virustotal-file-search' (all_responses, query) = self._bulk_cache_lookup(api_name, query) response_chunks = self._request_reports("query", query, 'file/search') self._extract_response_chunks(all_responses, response_chunks, api_name) return all_responses
0.003686
def _get_initial_step(parameters, lower_bounds, upper_bounds, max_step_sizes): """Get an initial step size to use for every parameter. This chooses the step sizes based on the maximum step size and the lower and upper bounds. Args: parameters (ndarray): The parameters at which to evaluate the gradient. A (d, p) matrix with d problems, p parameters and n samples. lower_bounds (list): lower bounds upper_bounds (list): upper bounds max_step_sizes (list or None): the maximum step size, or the maximum step size per parameter. Defaults to 0.1 Returns: ndarray: for every problem instance the vector with the initial step size for each parameter. """ nmr_params = parameters.shape[1] initial_step = np.zeros_like(parameters) if max_step_sizes is None: max_step_sizes = 0.1 if isinstance(max_step_sizes, Number): max_step_sizes = [max_step_sizes] * nmr_params max_step_sizes = np.array(max_step_sizes) for ind in range(parameters.shape[1]): minimum_allowed_step = np.minimum(np.abs(parameters[:, ind] - lower_bounds[ind]), np.abs(upper_bounds[ind] - parameters[:, ind])) initial_step[:, ind] = np.minimum(minimum_allowed_step, max_step_sizes[ind]) return initial_step / 2.
0.005922
def datastream_etag(request, pid, dsid, repo=None, as_of_date=None, **kwargs): '''Method suitable for use as an etag function with :class:`django.views.decorators.http.condition`. Takes the same arguments as :meth:`~eulfedora.views.raw_datastream`. ''' # if a range is requested and it is not for the entire file, # do *NOT* return an etag # NOTE: using api directly here instead of object/ds models # to avoid making unneeded api calls try: if repo is None: repo = Repository() resp = repo.api.getDatastream(pid, dsid, asOfDateTime=as_of_date) dsprofile = parse_xml_object(DatastreamProfile, resp.content, resp.url) if dsprofile and dsprofile.checksum_type != 'DISABLED': return dsprofile.checksum except RequestFailed: pass return None
0.003517
def normalize_to_unit_range(values): """Bring a 1D NumPy array with at least two values in `values` to a linearly normalized range of [0, 1].""" if not isinstance(values, np.ndarray) or values.ndim != 1: raise ValueError('`values` must be a 1D NumPy array') if len(values) < 2: raise ValueError('`values` must contain at least two values') min_ = np.min(values) max_ = np.max(values) range_ = max_ - min_ if range_ == 0: raise ValueError('range of `values` is 0 -- cannot normalize') return (values - min_) / range_
0.003478
def _retrieve(self): """ Return the current content of the inactive-db.json file. """ if PyFunceble.CONFIGURATION["inactive_database"]: # The database subsystem is activated. # We get, format and initiate the historical database file. self._reformat_historical_formating_error() if PyFunceble.path.isfile(self.inactive_db_path): # The database file exist. # We merge our current database into already initiated one. self._merge()
0.003565
def _widening_points(self, function_address): """ Return the ordered widening points for a specific function. :param int function_address: Address of the querying function. :return: A list of sorted merge points (addresses). :rtype: list """ # we are entering a new function. now it's time to figure out how to optimally traverse the control flow # graph by generating the sorted merge points try: new_function = self.kb.functions[function_address] except KeyError: # the function does not exist return [ ] if function_address not in self._function_widening_points: if not new_function.normalized: new_function.normalize() widening_points = CFGUtils.find_widening_points(function_address, new_function.endpoints, new_function.graph) self._function_widening_points[function_address] = widening_points return self._function_widening_points[function_address]
0.006301
def get_plaintext_citations(file): """ Parse a plaintext file to get a clean list of plaintext citations. The \ file should have one citation per line. :param file: Either the path to the plaintext file or the content of a \ plaintext file. :returns: A list of cleaned plaintext citations. """ # Handle path or content if os.path.isfile(file): with open(file, 'r') as fh: content = fh.readlines() else: content = file.splitlines() # Clean every line to have plaintext cleaned_citations = [tools.clean_whitespaces(line) for line in content] return cleaned_citations
0.001511
def array_values(expr): """Given an expression expr denoting a list of values, array_values(expr) returns a list of values for that expression.""" if isinstance(expr, Array): return expr.get_elems(all_subs(expr._bounds)) elif isinstance(expr, list): vals = [array_values(x) for x in expr] return flatten(vals) else: return [expr]
0.005195
async def requirements(client: Client, search: str) -> dict: """ GET list of requirements for a given UID/Public key :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/requirements/%s' % search, schema=REQUIREMENTS_SCHEMA)
0.006154
def get_version_status( package_descriptors, targets, repos_data, strip_version=False, strip_os_code_name=False): """ For each package and target check if it is affected by a sync. This is the case when the package version in the testing repo is different from the version in the main repo. :return: a dict indexed by package names containing dicts indexed by targets containing a list of status strings (one for each repo) """ status = {} for package_descriptor in package_descriptors.values(): pkg_name = package_descriptor.pkg_name debian_pkg_name = package_descriptor.debian_pkg_name ref_version = package_descriptor.version if strip_version: ref_version = _strip_version_suffix(ref_version) status[pkg_name] = {} for target in targets: statuses = [] for repo_data in repos_data: version = repo_data.get(target, {}).get(debian_pkg_name, None) if strip_version: version = _strip_version_suffix(version) if strip_os_code_name: version = _strip_os_code_name_suffix( version, target.os_code_name) if ref_version: if not version: statuses.append('missing') elif version.startswith(ref_version): # including equal statuses.append('equal') else: if _version_is_gt_other(version, ref_version): statuses.append('higher') else: statuses.append('lower') else: if not version: statuses.append('ignore') else: statuses.append('obsolete') status[pkg_name][target] = statuses return status
0.000503
def add_completions( replace_list: list, belstr: str, replace_span: Span, completion_text: str ) -> List[Mapping[str, Any]]: """Create completions to return given replacement list Args: replace_list: list of completion replacement values belstr: BEL String replace_span: start, stop of belstr to replace completion_text: text to use for completion - used for creating highlight Returns: [{ "replacement": replacement, "cursor_loc": cursor_loc, "highlight": highlight, "label": label, }] """ completions = [] for r in replace_list: # if '(' not in belstr: # replacement = f'{r["replacement"]}()' # cursor_loc = len(replacement) - 1 # inside parenthesis # elif r['type'] == 'Function' and replace_span[1] == len(belstr) - 1: if len(belstr) > 0: belstr_end = len(belstr) - 1 else: belstr_end = 0 log.debug( f'Replace list {r} Replace_span {replace_span} BELstr: {belstr} Len: {belstr_end} Test1 {r["type"] == "Function"} Test2 {replace_span[1] + 1 == len(belstr)}' ) # Put a space between comma and following function arg if ( r["type"] == "Function" and replace_span[0] > 0 and belstr[replace_span[0] - 1] == "," ): log.debug("prior char is a comma") replacement = ( belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()" + belstr[replace_span[1] + 1 :] ) cursor_loc = len( belstr[0 : replace_span[0]] + " " + f"{r['replacement']}()" ) # Put a space between comman and following NSArg or StrArg elif replace_span[0] > 0 and belstr[replace_span[0] - 1] == ",": log.debug("prior char is a comma") replacement = ( belstr[0 : replace_span[0]] + " " + r["replacement"] + belstr[replace_span[1] + 1 :] ) cursor_loc = len(belstr[0 : replace_span[0]] + " " + r["replacement"]) # Add function to end of belstr elif r["type"] == "Function" and replace_span[1] >= belstr_end: replacement = belstr[0 : replace_span[0]] + f"{r['replacement']}()" cursor_loc = len(replacement) - 1 # inside parenthesis log.debug(f"Replacement: {replacement}") # Insert replacement in beginning or middle of belstr else: replacement = ( belstr[0 : replace_span[0]] + r["replacement"] + belstr[replace_span[1] + 1 :] ) cursor_loc = len( belstr[0 : replace_span[0]] + r["replacement"] ) # move cursor just past replacement completions.append( { "replacement": replacement, "cursor_loc": cursor_loc, "highlight": r["highlight"], "label": r["label"], } ) return completions
0.004371
def switch(self, gen_mode:bool=None): "Switch the model, if `gen_mode` is provided, in the desired mode." self.gen_mode = (not self.gen_mode) if gen_mode is None else gen_mode self.opt.opt = self.opt_gen.opt if self.gen_mode else self.opt_critic.opt self._set_trainable() self.model.switch(gen_mode) self.loss_func.switch(gen_mode)
0.015831
def __get_favorites(self, favorite_type, start=0, max_items=100): """ Helper method for `get_favorite_radio_*` methods. Args: favorite_type (str): Specify either `RADIO_STATIONS` or `RADIO_SHOWS`. start (int): Which number to start the retrieval from. Used for paging. max_items (int): The total number of results to return. """ if favorite_type not in (RADIO_SHOWS, RADIO_STATIONS): favorite_type = SONOS_FAVORITES response = self.contentDirectory.Browse([ ('ObjectID', 'FV:2' if favorite_type is SONOS_FAVORITES else 'R:0/{0}'.format(favorite_type)), ('BrowseFlag', 'BrowseDirectChildren'), ('Filter', '*'), ('StartingIndex', start), ('RequestedCount', max_items), ('SortCriteria', '') ]) result = {} favorites = [] results_xml = response['Result'] if results_xml != '': # Favorites are returned in DIDL-Lite format metadata = XML.fromstring(really_utf8(results_xml)) for item in metadata.findall( '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}container' if favorite_type == RADIO_SHOWS else '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}item'): favorite = {} favorite['title'] = item.findtext( '{http://purl.org/dc/elements/1.1/}title') favorite['uri'] = item.findtext( '{urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/}res') if favorite_type == SONOS_FAVORITES: favorite['meta'] = item.findtext( '{urn:schemas-rinconnetworks-com:metadata-1-0/}resMD') favorites.append(favorite) result['total'] = response['TotalMatches'] result['returned'] = len(favorites) result['favorites'] = favorites return result
0.00097
def makeAggShkDstn(self): ''' Creates the attributes TranShkAggDstn, PermShkAggDstn, and AggShkDstn. Draws on attributes TranShkAggStd, PermShkAddStd, TranShkAggCount, PermShkAggCount. Parameters ---------- None Returns ------- None ''' self.TranShkAggDstn = approxMeanOneLognormal(sigma=self.TranShkAggStd,N=self.TranShkAggCount) self.PermShkAggDstn = approxMeanOneLognormal(sigma=self.PermShkAggStd,N=self.PermShkAggCount) self.AggShkDstn = combineIndepDstns(self.PermShkAggDstn,self.TranShkAggDstn)
0.014827
def is_persistent(arg): ''' is_persistent(x) yields True if x is a persistent object and False if not. Note that this persistence can only be checked by the pimms library, so immutable/persistent structures not known to pimms or defined in terms of pimms's immutables library cannot be evaluated correctly. Additionally, is_persistent(x) checks only x; if x is a tuple of mutable objects, it is still considered a persistent object. ''' from .immutable import (is_imm, imm_is_persistent) if is_imm(arg): return imm_is_persistent(arg) elif isinstance(arg, (np.generic, np.ndarray)): return not arg.flags.writeable elif is_quantity(arg) and isinstance(mag(arg), (np.generic, np.ndarray)): return not mag(arg).flags.writable elif is_str(arg): return True elif is_number(arg): return True elif is_pmap(arg): return True elif isinstance(arg, frozenset): return True elif isinstance(arg, (ps.PVector, ps.PSet, ps.PList, ps.PRecord)): return True else: return False
0.013462
def qos_map_dscp_cos_mark_to(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") qos = ET.SubElement(config, "qos", xmlns="urn:brocade.com:mgmt:brocade-qos") map = ET.SubElement(qos, "map") dscp_cos = ET.SubElement(map, "dscp-cos") dscp_cos_map_name_key = ET.SubElement(dscp_cos, "dscp-cos-map-name") dscp_cos_map_name_key.text = kwargs.pop('dscp_cos_map_name') mark = ET.SubElement(dscp_cos, "mark") dscp_in_values_key = ET.SubElement(mark, "dscp-in-values") dscp_in_values_key.text = kwargs.pop('dscp_in_values') to = ET.SubElement(mark, "to") to.text = kwargs.pop('to') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003802
def _get_subword_units(token, gram): """Return subword-units presentation, given a word/token. """ if token == '</s>': # special token for padding purpose. return [token] t = '#' + token + '#' return [t[i:i + gram] for i in range(0, len(t) - gram + 1)]
0.003559
def resize_droplet(self, droplet_id, size): """ This method allows you to resize a specific droplet to a different size. This will affect the number of processors and memory allocated to the droplet. Required parameters: droplet_id: Integer, this is the id of your droplet that you want to resize size, one of size_id: Numeric, this is the id of the size with which you would like the droplet created size_slug: String, this is the slug of the size with which you would like the droplet created """ if not droplet_id: raise DOPException('droplet_id is required to resize a droplet!') params = {} size_id = size.get('size_id') if size_id: params.update({'size_id': size_id}) else: size_slug = size.get('size_slug') if size_slug: params.update({'size_slug': size_slug}) else: msg = 'size_id or size_slug are required to resize a droplet!' raise DOPException(msg) json = self.request('/droplets/%s/resize' % droplet_id, method='GET', params=params) status = json.get('status') if status == 'OK': return json.get('event_id') else: message = json.get('message') raise DOPException('[%s]: %s' % (status, message))
0.002633
def get(self): """Return the bars.""" frac, whole = modf(self.size * self.percent / 100.0) ret = curses_bars[8] * int(whole) if frac > 0: ret += curses_bars[int(frac * 8)] whole += 1 ret += self.__empty_char * int(self.size - whole) if self.__with_text: ret = '{}{:5.1f}%'.format(ret, self.percent) return ret
0.004988
def _validate(self, val): """ Checks that the list is of the right length and has the right contents. Otherwise, an exception is raised. """ if self.allow_None and val is None: return if not isinstance(val, list): raise ValueError("List '%s' must be a list."%(self.name)) if self.bounds is not None: min_length,max_length = self.bounds l=len(val) if min_length is not None and max_length is not None: if not (min_length <= l <= max_length): raise ValueError("%s: list length must be between %s and %s (inclusive)"%(self.name,min_length,max_length)) elif min_length is not None: if not min_length <= l: raise ValueError("%s: list length must be at least %s."%(self.name,min_length)) elif max_length is not None: if not l <= max_length: raise ValueError("%s: list length must be at most %s."%(self.name,max_length)) self._check_type(val)
0.015539
def kill(self): """Kill the current process.""" # safety measure in case the current process has been killed in # meantime and the kernel reused its PID if not self.is_running(): name = self._platform_impl._process_name raise NoSuchProcess(self.pid, name) if os.name == 'posix': self.send_signal(signal.SIGKILL) else: self._platform_impl.kill_process()
0.004464
def reflect_table_data(table, mapping=None, engine_name='default'): """ Write table to Model dict """ table = reflect_table(table, engine_name) mapping = mapping or {} from uliweb.utils.sorteddict import SortedDict field_type_map = {'VARCHAR':'str', 'VARCHAR2':'str', 'INTEGER':'int', 'FLOAT':'float'} meta = {} columns = SortedDict() #write columns _primary_key = None for k, v in table.columns.items(): column_type = v.type type_name = column_type.__class__.__name__.lower() kwargs = SortedDict() field_type = type_name.upper() if type_name in ('char', 'varchar'): kwargs['max_length'] = column_type.length elif type_name in ('text', 'blob', 'integer', 'float', 'bigint'): pass elif type_name == 'long': field_type = 'bigint' elif type_name in ('clob',): field_type = 'TEXT' elif type_name in ('decimal', 'float'): kwargs['precision'] = v.type.precision kwargs['scale'] = v.type.scale elif type_name == 'raw': #oracle field_type = 'binary' kwargs['max_length'] = column_type.length elif type_name == 'number': if v.type.scale: kwargs['precision'] = v.type.precision kwargs['scale'] = v.type.scale field_type = 'DECIMAL' else: field_type = 'int' elif type_name == 'numeric': field_type = 'DECIMAL' kwargs['precision'] = v.type.precision kwargs['scale'] = v.type.scale elif type_name in ('timestamp',): field_type = 'TIMESTAMP' elif type_name in ('datetime', 'date', 'time'): pass #for tinyint will be treated as bool elif type_name in ('tinyint', 'boolean'): field_type = 'bool' else: raise ValueError("Don't support column [{0}] for type [{1}] when parsing {2}".format(k, type_name, table.name)) if v.primary_key: kwargs['primary_key'] = True _primary_key = k if v.autoincrement: kwargs['autoincrement'] = True if not v.nullable: kwargs['nullable'] = False if v.server_default: server_default = v.server_default.arg kwargs['server_default'] = server_default if v.index: kwargs['index'] = True if v.unique: kwargs['unique'] = True if sa_version >= '1.2' and v.comment: kwargs['verbose_name'] = v.comment #convert field_type to common python data type field_type = field_type_map.get(field_type, field_type) columns[k] = field_type, kwargs meta['columns'] = columns indexes = [] indexes_names = [] for index in table.indexes: cols = list(index.columns) _len = len(cols) #if only one column it'll be set to Property if _len == 1: column_name = cols[0].name d = {'index':True} if index.unique: d['unique'] = index.unique columns[column_name][1].update(d) else: if not index.name in indexes_names: indexes.append({'name':index.name, 'columns':[x.name for x in index.columns], 'unique':index.unique}) indexes_names.append(index.name) meta['indexes'] = indexes return meta
0.005675
def _insert(self, name, value, timestamp, intervals, **kwargs): ''' Insert the new value. ''' # TODO: confirm that this is in fact using the indices correctly. for interval,config in self._intervals.items(): timestamps = self._normalize_timestamps(timestamp, intervals, config) for tstamp in timestamps: self._insert_data(name, value, tstamp, interval, config, **kwargs)
0.009756
def confirmation(self, *args, **kwargs): """Upstream packet, send to current terminal.""" if not self.current_terminal: raise RuntimeError("no active terminal") if not isinstance(self.current_terminal, Client): raise RuntimeError("current terminal not a client") self.current_terminal.confirmation(*args, **kwargs)
0.005391
def from_dicts(cls, ds: List[dict], force_snake_case: bool=True, force_cast: bool=False, restrict: bool=True) -> TList[T]: """From list of dict to list of instance :param ds: List of dict :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param force_cast: Cast forcibly if True :param restrict: Prohibit extra parameters if True :return: List of instance Usage: >>> from owlmixin.samples import Human >>> humans: TList[Human] = Human.from_dicts([ ... {"id": 1, "name": "Tom", "favorites": [{"name": "Apple"}]}, ... {"id": 2, "name": "John", "favorites": [{"name": "Orange"}]} ... ]) >>> humans[0].name 'Tom' >>> humans[1].name 'John' """ return TList([cls.from_dict(d, force_snake_case=force_snake_case, force_cast=force_cast, restrict=restrict) for d in ds])
0.009813
def _dspace( irez, d2201, d2211, d3210, d3222, d4410, d4422, d5220, d5232, d5421, d5433, dedt, del1, del2, del3, didt, dmdt, dnodt, domdt, argpo, argpdot, t, tc, gsto, xfact, xlamo, no, atime, em, argpm, inclm, xli, mm, xni, nodem, nm, ): fasx2 = 0.13130908; fasx4 = 2.8843198; fasx6 = 0.37448087; g22 = 5.7686396; g32 = 0.95240898; g44 = 1.8014998; g52 = 1.0508330; g54 = 4.4108898; rptim = 4.37526908801129966e-3; # equates to 7.29211514668855e-5 rad/sec stepp = 720.0; stepn = -720.0; step2 = 259200.0; # ----------- calculate deep space resonance effects ----------- dndt = 0.0; theta = (gsto + tc * rptim) % twopi em = em + dedt * t; inclm = inclm + didt * t; argpm = argpm + domdt * t; nodem = nodem + dnodt * t; mm = mm + dmdt * t; """ // sgp4fix for negative inclinations // the following if statement should be commented out // if (inclm < 0.0) // { // inclm = -inclm; // argpm = argpm - pi; // nodem = nodem + pi; // } /* - update resonances : numerical (euler-maclaurin) integration - */ /* ------------------------- epoch restart ---------------------- */ // sgp4fix for propagator problems // the following integration works for negative time steps and periods // the specific changes are unknown because the original code was so convoluted // sgp4fix take out atime = 0.0 and fix for faster operation """ ft = 0.0; if irez != 0: # sgp4fix streamline check if atime == 0.0 or t * atime <= 0.0 or fabs(t) < fabs(atime): atime = 0.0; xni = no; xli = xlamo; # sgp4fix move check outside loop if t > 0.0: delt = stepp; else: delt = stepn; iretn = 381; # added for do loop iret = 0; # added for loop while iretn == 381: # ------------------- dot terms calculated ------------- # ----------- near - synchronous resonance terms ------- if irez != 2: xndt = del1 * sin(xli - fasx2) + del2 * sin(2.0 * (xli - fasx4)) + \ del3 * sin(3.0 * (xli - fasx6)); xldot = xni + xfact; xnddt = del1 * cos(xli - fasx2) + \ 2.0 * del2 * cos(2.0 * (xli - fasx4)) + \ 3.0 * del3 * cos(3.0 * (xli - fasx6)); xnddt = xnddt * xldot; else: # --------- near - half-day resonance terms -------- xomi = argpo + argpdot * atime; x2omi = xomi + xomi; x2li = xli + xli; xndt = (d2201 * sin(x2omi + xli - g22) + d2211 * sin(xli - g22) + d3210 * sin(xomi + xli - g32) + d3222 * sin(-xomi + xli - g32)+ d4410 * sin(x2omi + x2li - g44)+ d4422 * sin(x2li - g44) + d5220 * sin(xomi + xli - g52) + d5232 * sin(-xomi + xli - g52)+ d5421 * sin(xomi + x2li - g54) + d5433 * sin(-xomi + x2li - g54)); xldot = xni + xfact; xnddt = (d2201 * cos(x2omi + xli - g22) + d2211 * cos(xli - g22) + d3210 * cos(xomi + xli - g32) + d3222 * cos(-xomi + xli - g32) + d5220 * cos(xomi + xli - g52) + d5232 * cos(-xomi + xli - g52) + 2.0 * (d4410 * cos(x2omi + x2li - g44) + d4422 * cos(x2li - g44) + d5421 * cos(xomi + x2li - g54) + d5433 * cos(-xomi + x2li - g54))); xnddt = xnddt * xldot; # ----------------------- integrator ------------------- # sgp4fix move end checks to end of routine if fabs(t - atime) >= stepp: iret = 0; iretn = 381; else: ft = t - atime; iretn = 0; if iretn == 381: xli = xli + xldot * delt + xndt * step2; xni = xni + xndt * delt + xnddt * step2; atime = atime + delt; nm = xni + xndt * ft + xnddt * ft * ft * 0.5; xl = xli + xldot * ft + xndt * ft * ft * 0.5; if irez != 1: mm = xl - 2.0 * nodem + 2.0 * theta; dndt = nm - no; else: mm = xl - nodem - argpm + theta; dndt = nm - no; nm = no + dndt; return ( atime, em, argpm, inclm, xli, mm, xni, nodem, dndt, nm, )
0.038865
def chk_associations(self, fout_err="gaf.err"): """Check that fields are legal in GAF""" obj = GafData("2.1") return obj.chk(self.associations, fout_err)
0.011299
def date_to_solr(d): """ converts DD-MM-YYYY to YYYY-MM-DDT00:00:00Z""" return "{y}-{m}-{day}T00:00:00Z".format(day=d[:2], m=d[3:5], y=d[6:]) if d else d
0.012422
def from_config(cls, gitlab_id=None, config_files=None): """Create a Gitlab connection from configuration files. Args: gitlab_id (str): ID of the configuration section. config_files list[str]: List of paths to configuration files. Returns: (gitlab.Gitlab): A Gitlab connection. Raises: gitlab.config.GitlabDataError: If the configuration is not correct. """ config = gitlab.config.GitlabConfigParser(gitlab_id=gitlab_id, config_files=config_files) return cls(config.url, private_token=config.private_token, oauth_token=config.oauth_token, ssl_verify=config.ssl_verify, timeout=config.timeout, http_username=config.http_username, http_password=config.http_password, api_version=config.api_version, per_page=config.per_page)
0.002014
def ask_for_board_id(self): """Factored out in case interface isn't keyboard""" board_id = raw_input("paste in board id or url: ").strip() m = re.search(r"(?:https?://)?(?:trello.com)?/?b?/?([a-zA-Z]{8})/(?:.*)", board_id) if m: board_id = m.group(1) return board_id
0.009434
def make_full_path(basedir, outkey, origname): """Make a full file path by combining tokens Parameters ----------- basedir : str The top level output area outkey : str The key for the particular instance of the analysis origname : str Template for the output file name Returns ------- outpath : str This will be <basedir>:<outkey>:<newname>.fits Where newname = origname.replace('.fits', '_<outkey>.fits') """ return os.path.join(basedir, outkey, os.path.basename(origname).replace('.fits', '_%s.fits' % outkey))
0.004367
def get_fullpath(self, fullname=None, relative_to=None): """ Return the original full path if full path is specified, otherwise search in the case file path """ # if is an empty path if not fullname: return fullname isabs = os.path.isabs(fullname) path, name = os.path.split(fullname) if not name: # path to a folder return None else: # path to a file if isabs: return fullname else: return os.path.join(self.case_path, path, name)
0.003356
def userBrowser(self, request, tag): """ Render a TDB of local users. """ f = LocalUserBrowserFragment(self.browser) f.docFactory = webtheme.getLoader(f.fragmentName) f.setFragmentParent(self) return f
0.007782
def cree_local_DB(scheme): """Create emmpt DB according to the given scheme : dict { table : [ (column_name, column_type), .. ]} Usefull at installation of application (and for developement) """ conn = LocalConnexion() req = "" for table, fields in scheme.items(): req += f"DROP TABLE IF EXISTS {table};" req_fields = ", ".join(f'{c_name} {c_type}' for c_name, c_type in fields) req += f"""CREATE TABLE {table} ( {req_fields} ) ;""" cur = conn.cursor() cur.executescript(req) conn.connexion.commit() conn.connexion.close() logging.info("Database created with succes.")
0.00471
def get_classification_node(self, project, structure_group, path=None, depth=None): """GetClassificationNode. Gets the classification node for a given node path. :param str project: Project ID or project name :param TreeStructureGroup structure_group: Structure group of the classification node, area or iteration. :param str path: Path of the classification node. :param int depth: Depth of children to fetch. :rtype: :class:`<WorkItemClassificationNode> <azure.devops.v5_0.work_item_tracking.models.WorkItemClassificationNode>` """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if structure_group is not None: route_values['structureGroup'] = self._serialize.url('structure_group', structure_group, 'TreeStructureGroup') if path is not None: route_values['path'] = self._serialize.url('path', path, 'str') query_parameters = {} if depth is not None: query_parameters['$depth'] = self._serialize.query('depth', depth, 'int') response = self._send(http_method='GET', location_id='5a172953-1b41-49d3-840a-33f79c3ce89f', version='5.0', route_values=route_values, query_parameters=query_parameters) return self._deserialize('WorkItemClassificationNode', response)
0.005902
def erase_key_value(self): """ Erase key-value represented fields. :rtype: Column :Example: >>> new_ds = df.erase_key_value('f1 f2') """ field_name = self.name new_df = copy_df(self) new_df._perform_operation(op.FieldKVConfigOperation({field_name: None})) return new_df
0.008523
def _build_fluent_table(self): '''Builds the fluent table for each RDDL pvariable.''' self.fluent_table = collections.OrderedDict() for name, size in zip(self.domain.non_fluent_ordering, self.non_fluent_size): non_fluent = self.domain.non_fluents[name] self.fluent_table[name] = (non_fluent, size) for name, size in zip(self.domain.state_fluent_ordering, self.state_size): fluent = self.domain.state_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.action_fluent_ordering, self.action_size): fluent = self.domain.action_fluents[name] self.fluent_table[name] = (fluent, size) for name, size in zip(self.domain.interm_fluent_ordering, self.interm_size): fluent = self.domain.intermediate_fluents[name] self.fluent_table[name] = (fluent, size)
0.006466
def main(reactor, argv=sys.argv[1:], env=os.environ, acme_url=LETSENCRYPT_DIRECTORY.asText()): """ A tool to automatically request, renew and distribute Let's Encrypt certificates for apps running on Marathon and served by marathon-lb. """ parser = argparse.ArgumentParser( description='Automatically manage ACME certificates for Marathon apps') parser.add_argument('-a', '--acme', help='The address for the ACME Directory Resource ' '(default: %(default)s)', default=acme_url) parser.add_argument('-e', '--email', help='An email address to register with the ACME ' 'service (optional)') parser.add_argument('-m', '--marathon', metavar='MARATHON[,MARATHON,...]', help='The addresses for the Marathon HTTP API ' '(default: %(default)s)', default='http://marathon.mesos:8080') parser.add_argument('-l', '--lb', metavar='LB[,LB,...]', help='The addresses for the marathon-lb HTTP API ' '(default: %(default)s)', default='http://marathon-lb.marathon.mesos:9090') parser.add_argument('-g', '--group', help='The marathon-lb group to issue certificates for ' '(default: %(default)s)', default='external') parser.add_argument('--allow-multiple-certs', help=('Allow multiple certificates for a single app ' 'port. This allows multiple domains for an app, ' 'but is not recommended.'), action='store_true') parser.add_argument('--listen', help='The address for the port to listen on (default: ' '%(default)s)', default=':8000') parser.add_argument('--marathon-timeout', help=('Amount of time in seconds to wait for HTTP ' 'response headers to be received for all ' 'requests to Marathon. Set to 0 to disable. ' '(default: %(default)s)'), type=float, default=10) parser.add_argument('--sse-timeout', help=('Amount of time in seconds to wait for some ' 'event data to be received from Marathon. Set ' 'to 0 to disable. (default: %(default)s)'), type=float, default=60) parser.add_argument('--log-level', help='The minimum severity level to log messages at ' '(default: %(default)s)', choices=['debug', 'info', 'warn', 'error', 'critical'], default='info'), parser.add_argument('--vault', help=('Enable storage of certificates in Vault. This ' 'can be further configured with VAULT_-style ' 'environment variables.'), action='store_true') parser.add_argument('storage_path', metavar='storage-path', help=('Path for storing certificates. If --vault is ' 'used then this is the mount path for the ' 'key/value engine in Vault. If not, this is the ' 'path to a directory.')) parser.add_argument('--version', action='version', version=__version__) args = parser.parse_args(argv) # Set up logging init_logging(args.log_level) # Set up marathon-acme marathon_addrs = args.marathon.split(',') mlb_addrs = args.lb.split(',') sse_timeout = args.sse_timeout if args.sse_timeout > 0 else None acme_url = URL.fromText(_to_unicode(args.acme)) endpoint_description = parse_listen_addr(args.listen) log_args = [ ('storage-path', args.storage_path), ('vault', args.vault), ('acme', acme_url), ('email', args.email), ('allow-multiple-certs', args.allow_multiple_certs), ('marathon', marathon_addrs), ('sse-timeout', sse_timeout), ('lb', mlb_addrs), ('group', args.group), ('endpoint-description', endpoint_description), ] log_args = ['{}={!r}'.format(k, v) for k, v in log_args] log.info('Starting marathon-acme {} with: {}'.format( __version__, ', '.join(log_args))) if args.vault: key_d, cert_store = init_vault_storage( reactor, env, args.storage_path) else: key_d, cert_store = init_file_storage(args.storage_path) # Once we have the client key, create the txacme client creator key_d.addCallback(create_txacme_client_creator, reactor, acme_url) # Once we have the client creator, create the service key_d.addCallback( create_marathon_acme, cert_store, args.email, args.allow_multiple_certs, marathon_addrs, args.marathon_timeout, sse_timeout, mlb_addrs, args.group, reactor) # Finally, run the thing return key_d.addCallback(lambda ma: ma.run(endpoint_description))
0.000184
def plot_counts(df, theme): """ plot the counts of a given theme from a created database over time""" dates, counts = df['date-observation'], df[theme + "_count"] fig, ax = plt.subplots() ax.set_ylabel("{} pixel counts".format(" ".join(theme.split("_")))) ax.set_xlabel("observation date") ax.plot(dates, counts, '.') fig.autofmt_xdate() plt.show()
0.002632
def validate_axiscolor(value): """Validate a dictionary containing axiscolor definitions Parameters ---------- value: dict see :attr:`psyplot.plotter.baseplotter.axiscolor` Returns ------- dict Raises ------ ValueError""" validate = try_and_error(validate_none, validate_color) possible_keys = {'right', 'left', 'top', 'bottom'} try: value = dict(value) false_keys = set(value) - possible_keys if false_keys: raise ValueError("Wrong keys (%s)!" % (', '.join(false_keys))) for key, val in value.items(): value[key] = validate(val) except: value = dict(zip(possible_keys, repeat(validate(value)))) return value
0.002692
def build_chunk(oscillators): """ Build an audio chunk and progress the oscillator states. Args: oscillators (list): A list of oscillator.Oscillator objects to build chunks from Returns: str: a string of audio sample bytes ready to be written to a wave file """ step_random_processes(oscillators) subchunks = [] for osc in oscillators: osc.amplitude.step_amp() osc_chunk = osc.get_samples(config.CHUNK_SIZE) if osc_chunk is not None: subchunks.append(osc_chunk) if len(subchunks): new_chunk = sum(subchunks) else: new_chunk = numpy.zeros(config.CHUNK_SIZE) # If we exceed the maximum amplitude, handle it gracefully chunk_amplitude = amplitude.find_amplitude(new_chunk) if chunk_amplitude > config.MAX_AMPLITUDE: # Normalize the amplitude chunk to mitigate immediate clipping new_chunk = amplitude.normalize_amplitude(new_chunk, config.MAX_AMPLITUDE) # Pick some of the offending oscillators (and some random others) # and lower their drift targets avg_amp = (sum(osc.amplitude.value for osc in oscillators) / len(oscillators)) for osc in oscillators: if (osc.amplitude.value > avg_amp and rand.prob_bool(0.1) or rand.prob_bool(0.01)): osc.amplitude.drift_target = rand.weighted_rand( [(-5, 1), (0, 10)]) osc.amplitude.change_rate = rand.weighted_rand( osc.amplitude.change_rate_weights) return new_chunk.astype(config.SAMPLE_DATA_TYPE).tostring()
0.000586
def split(self, frac): """ Split the DataFrame into two DataFrames with certain ratio. :param frac: Split ratio :type frac: float :return: two split DataFrame objects :rtype: list[DataFrame] """ from .. import preprocess split_obj = getattr(preprocess, '_Split')(fraction=frac) return split_obj.transform(self)
0.005102
def sanity_check_ir_blocks_from_frontend(ir_blocks, query_metadata_table): """Assert that IR blocks originating from the frontend do not have nonsensical structure. Args: ir_blocks: list of BasicBlocks representing the IR to sanity-check Raises: AssertionError, if the IR has unexpected structure. If the IR produced by the front-end cannot be successfully and correctly used to generate MATCH or Gremlin due to a bug, this is the method that should catch the problem. """ if not ir_blocks: raise AssertionError(u'Received no ir_blocks: {}'.format(ir_blocks)) _sanity_check_fold_scope_locations_are_unique(ir_blocks) _sanity_check_no_nested_folds(ir_blocks) _sanity_check_query_root_block(ir_blocks) _sanity_check_output_source_follower_blocks(ir_blocks) _sanity_check_block_pairwise_constraints(ir_blocks) _sanity_check_mark_location_preceding_optional_traverse(ir_blocks) _sanity_check_every_location_is_marked(ir_blocks) _sanity_check_coerce_type_outside_of_fold(ir_blocks) _sanity_check_all_marked_locations_are_registered(ir_blocks, query_metadata_table) _sanity_check_registered_locations_parent_locations(query_metadata_table)
0.004045
def show_queue(): ''' Show contents of the mail queue CLI Example: .. code-block:: bash salt '*' postfix.show_queue ''' cmd = 'mailq' out = __salt__['cmd.run'](cmd).splitlines() queue = [] queue_pattern = re.compile(r"(?P<queue_id>^[A-Z0-9]+)\s+(?P<size>\d+)\s(?P<timestamp>\w{3}\s\w{3}\s\d{1,2}\s\d{2}\:\d{2}\:\d{2})\s+(?P<sender>.+)") recipient_pattern = re.compile(r"^\s+(?P<recipient>.+)") for line in out: if re.match('^[-|postqueue:|Mail]', line): # discard in-queue wrapper continue if re.match(queue_pattern, line): m = re.match(queue_pattern, line) queue_id = m.group('queue_id') size = m.group('size') timestamp = m.group('timestamp') sender = m.group('sender') elif re.match(recipient_pattern, line): # recipient/s m = re.match(recipient_pattern, line) recipient = m.group('recipient') elif not line: # end of record queue.append({'queue_id': queue_id, 'size': size, 'timestamp': timestamp, 'sender': sender, 'recipient': recipient}) return queue
0.00256
def clean(tf_matrix, tf_matrix_gene_names, target_gene_name): """ :param tf_matrix: numpy array. The full transcription factor matrix. :param tf_matrix_gene_names: the full list of transcription factor names, corresponding to the tf_matrix columns. :param target_gene_name: the target gene to remove from the tf_matrix and tf_names. :return: a tuple of (matrix, names) equal to the specified ones minus the target_gene_name if the target happens to be one of the transcription factors. If not, the specified (tf_matrix, tf_names) is returned verbatim. """ if target_gene_name not in tf_matrix_gene_names: clean_tf_matrix = tf_matrix else: clean_tf_matrix = np.delete(tf_matrix, tf_matrix_gene_names.index(target_gene_name), 1) clean_tf_names = [tf for tf in tf_matrix_gene_names if tf != target_gene_name] assert clean_tf_matrix.shape[1] == len(clean_tf_names) # sanity check return clean_tf_matrix, clean_tf_names
0.006917
def create_action(self): """Create actions associated with Annotations.""" actions = {} act = QAction('New Annotations', self) act.triggered.connect(self.new_annot) actions['new_annot'] = act act = QAction('Load Annotations', self) act.triggered.connect(self.load_annot) actions['load_annot'] = act act = QAction('Clear Annotations...', self) act.triggered.connect(self.clear_annot) actions['clear_annot'] = act act = QAction('New...', self) act.triggered.connect(self.new_rater) actions['new_rater'] = act act = QAction('Rename...', self) act.triggered.connect(self.rename_rater) actions['rename_rater'] = act act = QAction('Delete...', self) act.triggered.connect(self.delete_rater) actions['del_rater'] = act act = QAction(QIcon(ICON['bookmark']), 'New Bookmark', self) act.setCheckable(True) actions['new_bookmark'] = act act = QAction(QIcon(ICON['new_eventtype']), 'New Event Type', self) act.triggered.connect(self.new_eventtype) actions['new_eventtype'] = act act = QAction(QIcon(ICON['del_eventtype']), 'Delete Event Type', self) act.triggered.connect(self.delete_eventtype) actions['del_eventtype'] = act act = QAction('Rename Event Type', self) act.triggered.connect(self.rename_eventtype) actions['rename_eventtype'] = act act = QAction('New Name', self) act.triggered.connect(self.markers_to_events) actions['m2e_newname'] = act act = QAction('Keep Marker Names', self) act.triggered.connect(partial(self.markers_to_events, True)) actions['m2e_keepname'] = act act = QAction('Merge Events...', self) act.triggered.connect(self.parent.show_merge_dialog) actions['merge_events'] = act act = QAction(QIcon(ICON['event']), 'Event Mode', self) act.setCheckable(True) actions['new_event'] = act uncheck_new_event = lambda: actions['new_event'].setChecked(False) uncheck_new_bookmark = lambda: actions['new_bookmark'].setChecked(False) actions['new_event'].triggered.connect(uncheck_new_bookmark) actions['new_bookmark'].triggered.connect(uncheck_new_event) act = {} for one_stage, one_shortcut in zip(STAGE_NAME, STAGE_SHORTCUT): act[one_stage] = QAction('Score as ' + one_stage, self.parent) act[one_stage].setShortcut(one_shortcut) stage_idx = STAGE_NAME.index(one_stage) act[one_stage].triggered.connect(partial(self.get_sleepstage, stage_idx)) self.addAction(act[one_stage]) actions['stages'] = act act = {} for one_qual, one_shortcut in zip(QUALIFIERS, QUALITY_SHORTCUT): act[one_qual] = QAction('Score as ' + one_qual, self.parent) act[one_qual].setShortcut(one_shortcut) qual_idx = QUALIFIERS.index(one_qual) act[one_qual].triggered.connect(partial(self.get_quality, qual_idx)) self.addAction(act[one_qual]) actions['quality'] = act act = QAction('Set Cycle Start', self) act.setShortcut('Ctrl+[') act.triggered.connect(self.get_cycle_mrkr) actions['cyc_start'] = act act = QAction('Set Cycle End', self) act.setShortcut('Ctrl+]') act.triggered.connect(partial(self.get_cycle_mrkr, end=True)) actions['cyc_end'] = act act = QAction('Remove Cycle Marker', self) act.triggered.connect(self.remove_cycle_mrkr) actions['remove_cyc'] = act act = QAction('Clear Cycle Markers', self) act.triggered.connect(self.clear_cycle_mrkrs) actions['clear_cyc'] = act act = QAction('Domino', self) act.triggered.connect(partial(self.import_staging, 'domino')) actions['import_domino'] = act act = QAction('Alice', self) act.triggered.connect(partial(self.import_staging, 'alice')) actions['import_alice'] = act act = QAction('Sandman', self) act.triggered.connect(partial(self.import_staging, 'sandman')) actions['import_sandman'] = act act = QAction('RemLogic', self) act.triggered.connect(partial(self.import_staging, 'remlogic')) actions['import_remlogic'] = act act = QAction('Compumedics', self) act.triggered.connect(partial(self.import_staging, 'compumedics')) actions['import_compumedics'] = act act = QAction('PRANA', self) act.triggered.connect(partial(self.import_staging, 'prana')) actions['import_prana'] = act act = QAction('DeltaMed', self) act.triggered.connect(partial(self.import_staging, 'deltamed')) actions['import_deltamed'] = act act = QAction('FASST', self) act.triggered.connect(self.import_fasst) actions['import_fasst'] = act act = QAction('Domino', self) act.triggered.connect(partial(self.import_staging, 'domino', as_qual=True)) actions['import_domino_qual'] = act act = QAction('Alice', self) act.triggered.connect(partial(self.import_staging, 'alice', as_qual=True)) actions['import_alice_qual'] = act act = QAction('Sandman', self) act.triggered.connect(partial(self.import_staging, 'sandman', as_qual=True)) actions['import_sandman_qual'] = act act = QAction('RemLogic', self) act.triggered.connect(partial(self.import_staging, 'remlogic', as_qual=True)) actions['import_remlogic_qual'] = act act = QAction('Compumedics', self) act.triggered.connect(partial(self.import_staging, 'compumedics', as_qual=True)) actions['import_compumedics_qual'] = act act = QAction('PRANA', self) act.triggered.connect(partial(self.import_staging, 'prana', as_qual=True)) actions['import_prana_qual'] = act act = QAction('DeltaMed', self) act.triggered.connect(partial(self.import_staging, 'deltamed', as_qual=True)) actions['import_deltamed_qual'] = act act = QAction('Wonambi', self) act.triggered.connect(partial(self.import_events, 'wonambi')) actions['import_events_wonambi'] = act act = QAction('RemLogic', self) act.triggered.connect(partial(self.import_events, 'remlogic')) actions['import_events_remlogic'] = act act = QAction('CSV', self) act.triggered.connect(partial(self.export, xformat='csv')) actions['export_to_csv'] = act act = QAction('RemLogic', self) act.triggered.connect(partial(self.export, xformat='remlogic')) actions['export_to_remlogic'] = act act = QAction('RemLogic FR', self) act.triggered.connect(partial(self.export, xformat='remlogic_fr')) actions['export_to_remlogic_fr'] = act act = QAction('Export Events...', self) act.triggered.connect(self.parent.show_export_events_dialog) actions['export_events'] = act act = QAction('Spindle...', self) act.triggered.connect(self.parent.show_spindle_dialog) act.setShortcut('Ctrl+Shift+s') act.setEnabled(False) actions['spindle'] = act act = QAction('Slow Wave...', self) act.triggered.connect(self.parent.show_slow_wave_dialog) act.setShortcut('Ctrl+Shift+w') act.setEnabled(False) actions['slow_wave'] = act act = QAction('Analysis Console', self) act.triggered.connect(self.parent.show_analysis_dialog) act.setShortcut('Ctrl+Shift+a') act.setEnabled(False) actions['analyze'] = act act = QAction('Sleep Statistics', self) act.triggered.connect(self.export_sleeps_stats) actions['export_sleepstats'] = act self.action = actions
0.002837
def connect_all_networks(self, action, container_name, **kwargs): """ Connects a container to all of its configured networks. Assuming that this is typically used after container creation, where teh first endpoint is already defined, this skips the first configuration. Pass ``skip_first`` as ``False`` to change this. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param container_name: Container names or id. :type container_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict """ kwargs.setdefault('skip_first', True) self.connect_networks(action, container_name, action.config.networks, **kwargs)
0.007203
def convert_vocab(vocab_file): """GluonNLP specific code to convert the original vocabulary to nlp.vocab.BERTVocab.""" original_vocab = load_vocab(vocab_file) token_to_idx = dict(original_vocab) num_tokens = len(token_to_idx) idx_to_token = [None] * len(original_vocab) for word in original_vocab: idx = int(original_vocab[word]) idx_to_token[idx] = word def swap(token, target_idx, token_to_idx, idx_to_token, swap_idx): original_idx = token_to_idx[token] original_token = idx_to_token[target_idx] token_to_idx[token] = target_idx token_to_idx[original_token] = original_idx idx_to_token[target_idx] = token idx_to_token[original_idx] = original_token swap_idx.append((original_idx, target_idx)) reserved_tokens = [gluonnlp.vocab.BERTVocab.PADDING_TOKEN, gluonnlp.vocab.BERTVocab.CLS_TOKEN, gluonnlp.vocab.BERTVocab.SEP_TOKEN, gluonnlp.vocab.BERTVocab.MASK_TOKEN] unknown_token = gluonnlp.vocab.BERTVocab.UNKNOWN_TOKEN padding_token = gluonnlp.vocab.BERTVocab.PADDING_TOKEN swap_idx = [] assert unknown_token in token_to_idx assert padding_token in token_to_idx swap(unknown_token, 0, token_to_idx, idx_to_token, swap_idx) for i, token in enumerate(reserved_tokens): swap(token, i + 1, token_to_idx, idx_to_token, swap_idx) # sanity checks assert len(token_to_idx) == num_tokens assert len(idx_to_token) == num_tokens assert None not in idx_to_token assert len(set(idx_to_token)) == num_tokens bert_vocab_dict = {} bert_vocab_dict['idx_to_token'] = idx_to_token bert_vocab_dict['token_to_idx'] = token_to_idx bert_vocab_dict['reserved_tokens'] = reserved_tokens bert_vocab_dict['unknown_token'] = unknown_token bert_vocab_dict['padding_token'] = padding_token bert_vocab_dict['bos_token'] = None bert_vocab_dict['eos_token'] = None bert_vocab_dict['mask_token'] = gluonnlp.vocab.BERTVocab.MASK_TOKEN bert_vocab_dict['sep_token'] = gluonnlp.vocab.BERTVocab.SEP_TOKEN bert_vocab_dict['cls_token'] = gluonnlp.vocab.BERTVocab.CLS_TOKEN json_str = json.dumps(bert_vocab_dict) converted_vocab = gluonnlp.vocab.BERTVocab.from_json(json_str) return converted_vocab, swap_idx
0.001731
def cells(self): """The number of cells in the MOC. This gives the total number of cells at all orders, with cells from every order counted equally. >>> m = MOC(0, (1, 2)) >>> m.cells 2 """ n = 0 for (order, cells) in self: n += len(cells) return n
0.005797
def call_set_attr(node: Node, key: str, value): """Calls node setter""" node.set_attr(key, value)
0.009524
def PackageVariable(key, help, default, searchfunc=None): # NB: searchfunc is currently undocumented and unsupported """ The input parameters describe a 'package list' option, thus they are returned with the correct converter and validator appended. The result is usable for input to opts.Add() . A 'package list' option may either be 'all', 'none' or a list of package names (separated by space). """ help = '\n '.join( (help, '( yes | no | /path/to/%s )' % key)) return (key, help, default, lambda k, v, e: _validator(k,v,e,searchfunc), _converter)
0.00639
def create_upload(self, project_id, path_data, hash_data, remote_filename=None, storage_provider_id=None): """ Create a chunked upload id to pass to create_file_chunk_url to create upload urls. :param project_id: str: uuid of the project :param path_data: PathData: holds file system data about the file we are uploading :param hash_data: HashData: contains hash alg and value for the file we are uploading :param remote_filename: str: name to use for our remote file (defaults to path_data basename otherwise) :param storage_provider_id: str: optional storage provider id :return: str: uuid for the upload """ upload_response = self._create_upload(project_id, path_data, hash_data, remote_filename=remote_filename, storage_provider_id=storage_provider_id, chunked=True) return upload_response['id']
0.009646
def _set_instance(self, v, load=False): """ Setter method for instance, mapped from YANG variable /protocol/spanning_tree/mstp/instance (list) If this variable is read-only (config: false) in the source YANG file, then _set_instance is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_instance() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("id",instance.instance, yang_name="instance", rest_name="instance", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions={u'tailf-common': {u'info': u'MST instance', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'inst-config'}}), is_container='list', yang_name="instance", rest_name="instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MST instance', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'inst-config'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """instance must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("id",instance.instance, yang_name="instance", rest_name="instance", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='id', extensions={u'tailf-common': {u'info': u'MST instance', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'inst-config'}}), is_container='list', yang_name="instance", rest_name="instance", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'MST instance', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'cli-suppress-key-abbreviation': None, u'cli-incomplete-command': None, u'callpoint': u'inst-config'}}, namespace='urn:brocade.com:mgmt:brocade-xstp', defining_module='brocade-xstp', yang_type='list', is_config=True)""", }) self.__instance = t if hasattr(self, '_set'): self._set()
0.004041
def absent(name, user=None, config=None): ''' Verifies that the specified host is not known by the given user name The host name Note that only single host names are supported. If foo.example.com and bar.example.com are the same machine and you need to exclude both, you will need one Salt state for each. user The user who owns the ssh authorized keys file to modify config The location of the authorized keys file relative to the user's home directory, defaults to ".ssh/known_hosts". If no user is specified, defaults to "/etc/ssh/ssh_known_hosts". If present, must be an absolute path when a user is not specified. ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if not user: config = config or '/etc/ssh/ssh_known_hosts' else: config = config or '.ssh/known_hosts' if not user and not os.path.isabs(config): comment = 'If not specifying a "user", specify an absolute "config".' ret['result'] = False return dict(ret, comment=comment) known_host = __salt__['ssh.get_known_host_entries'](user=user, hostname=name, config=config) if not known_host: return dict(ret, comment='Host is already absent') if __opts__['test']: comment = 'Key for {0} is set to be removed from {1}'.format(name, config) ret['result'] = None return dict(ret, comment=comment) rm_result = __salt__['ssh.rm_known_host'](user=user, hostname=name, config=config) if rm_result['status'] == 'error': return dict(ret, result=False, comment=rm_result['error']) else: return dict(ret, changes={'old': known_host, 'new': None}, result=True, comment=rm_result['comment'])
0.001535
def compare_vm_configs(new_config, current_config): ''' Compares virtual machine current and new configuration, the current is the one which is deployed now, and the new is the target config. Returns the differences between the objects in a dictionary, the keys are the configuration parameter keys and the values are differences objects: either list or recursive difference new_config: New config dictionary with every available parameter current_config Currently deployed configuration ''' diffs = {} keys = set(new_config.keys()) # These values identify the virtual machine, comparison is unnecessary keys.discard('name') keys.discard('datacenter') keys.discard('datastore') for property_key in ('version', 'image'): if property_key in keys: single_value_diff = recursive_diff( {property_key: current_config[property_key]}, {property_key: new_config[property_key]}) if single_value_diff.diffs: diffs[property_key] = single_value_diff keys.discard(property_key) if 'cpu' in keys: keys.remove('cpu') cpu_diff = recursive_diff(current_config['cpu'], new_config['cpu']) if cpu_diff.diffs: diffs['cpu'] = cpu_diff if 'memory' in keys: keys.remove('memory') _convert_units([current_config['memory']]) _convert_units([new_config['memory']]) memory_diff = recursive_diff(current_config['memory'], new_config['memory']) if memory_diff.diffs: diffs['memory'] = memory_diff if 'advanced_configs' in keys: keys.remove('advanced_configs') key = 'advanced_configs' advanced_diff = recursive_diff(current_config[key], new_config[key]) if advanced_diff.diffs: diffs[key] = advanced_diff if 'disks' in keys: keys.remove('disks') _convert_units(current_config['disks']) _convert_units(new_config['disks']) disk_diffs = list_diff(current_config['disks'], new_config['disks'], 'address') # REMOVE UNSUPPORTED DIFFERENCES/CHANGES # If the disk already exist, the backing properties like eagerly scrub # and thin provisioning # cannot be updated, and should not be identified as differences disk_diffs.remove_diff(diff_key='eagerly_scrub') # Filename updates are not supported yet, on VSAN datastores the # backing.fileName points to a uid + the vmdk name disk_diffs.remove_diff(diff_key='filename') # The adapter name shouldn't be changed disk_diffs.remove_diff(diff_key='adapter') if disk_diffs.diffs: diffs['disks'] = disk_diffs if 'interfaces' in keys: keys.remove('interfaces') interface_diffs = list_diff(current_config['interfaces'], new_config['interfaces'], 'mac') # The adapter name shouldn't be changed interface_diffs.remove_diff(diff_key='adapter') if interface_diffs.diffs: diffs['interfaces'] = interface_diffs # For general items where the identification can be done by adapter for key in keys: if key not in current_config or key not in new_config: raise ValueError('A general device {0} configuration was ' 'not supplied or it was not retrieved from ' 'remote configuration'.format(key)) device_diffs = list_diff(current_config[key], new_config[key], 'adapter') if device_diffs.diffs: diffs[key] = device_diffs return diffs
0.000259
def get(self, *args, **kwargs): """Get a relationship details""" self.before_get(args, kwargs) relationship_field, model_relationship_field, related_type_, related_id_field = self._get_relationship_data() obj, data = self._data_layer.get_relationship(model_relationship_field, related_type_, related_id_field, kwargs) result = {'links': {'self': request.path, 'related': self.schema._declared_fields[relationship_field].get_related_url(obj)}, 'data': data} qs = QSManager(request.args, self.schema) if qs.include: schema = compute_schema(self.schema, dict(), qs, qs.include) serialized_obj = schema.dump(obj) result['included'] = serialized_obj.data.get('included', dict()) final_result = self.after_get(result) return final_result
0.003795
def to_json(self, is_admin=False): """Returns a dict representation of the object Args: is_admin (`bool`): If true, include information about the account that should be avaiable only to admins Returns: `dict` """ if is_admin: return { 'accountId': self.account_id, 'accountName': self.account_name, 'accountType': self.account_type, 'contacts': self.contacts, 'enabled': True if self.enabled == 1 else False, 'requiredRoles': self.required_roles, 'properties': {to_camelcase(prop.name): prop.value for prop in self.account.properties} } else: return { 'accountId': self.account_id, 'accountName': self.account_name, 'contacts': self.contacts }
0.004324
def partition(self, id_): """Get a partition by the id number. Arguments: id_ -- a partition id value Returns: A partitions.Partition object Throws: a Sqlalchemy exception if the partition either does not exist or is not unique Because this method works on the bundle, the id_ ( without version information ) is equivalent to the vid ( with version information ) """ from ..orm import Partition as OrmPartition from sqlalchemy import or_ from ..identity import PartialPartitionName if isinstance(id_, PartitionIdentity): id_ = id_.id_ elif isinstance(id_, PartialPartitionName): id_ = id_.promote(self.bundle.identity.name) session = self.bundle.dataset._database.session q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(or_(OrmPartition.id == str(id_).encode('ascii'), OrmPartition.vid == str(id_).encode('ascii'))) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None if not orm_partition: q = session\ .query(OrmPartition)\ .filter(OrmPartition.d_vid == self.bundle.dataset.vid)\ .filter(OrmPartition.name == str(id_).encode('ascii')) try: orm_partition = q.one() return self.bundle.wrap_partition(orm_partition) except NoResultFound: orm_partition = None return orm_partition
0.001712
def check(self, radl): """Check the features in this application.""" SIMPLE_FEATURES = { "name": (str, lambda x, _: bool(x.value)), "path": (str, lambda x, _: bool(x.value)), "version": (str, is_version), "preinstalled": (str, ["YES", "NO"]) } self.check_simple(SIMPLE_FEATURES, radl)
0.005495
def remove_entity(self, entity, second=False): ''' Removes entity from world and kills entity ''' if entity in self._entities: if second: for group in self._groups.keys(): if entity in self._groups[group]: self.deregister_entity_from_group(entity, group) self._entities.remove(entity) else: entity.kill() else: raise UnmanagedEntityError(entity)
0.003899
def compose_later(self, *things): """ register list of things for composition using compose() compose_later takes a list of fsts. The last element specifies the base module as string things are composed directly after the base module is imported by application code """ if len(things) == 1: return things[0] module_name = things[-1] if module_name in sys.modules: raise CompositionError( 'compose_later call after module has been imported: ' + module_name ) LazyComposerHook.add(module_name, things[:-1], self)
0.004491
def get_broks_from_satellites(self): # pragma: no cover - not used! """Get broks from my all internal satellite links The arbiter get the broks from ALL the known satellites :return: None """ for satellites in [self.conf.brokers, self.conf.schedulers, self.conf.pollers, self.conf.reactionners, self.conf.receivers]: for satellite in satellites: # Get only if reachable... if not satellite.reachable: continue logger.debug("Getting broks from: %s", satellite.name) new_broks = satellite.get_and_clear_broks() if new_broks: logger.debug("Got %d broks from: %s", len(new_broks), satellite.name) for brok in new_broks: self.add(brok)
0.004608
def get_for_file( fp, hash_mode="md5" ): r""" Returns a hash string for the given file path. :param fp: Path to the file. :param hash_mode: Can be either one of 'md5', 'sha1', 'sha256' or 'sha512'. Defines the algorithm used to generate the resulting hash string. Default is 'md5'. """ with _get_file_handle(fp) as f: file_hash_digest = get_for_handle(f, hash_mode) return file_hash_digest
0.009785
def build_GTK_KDE(self): """Build the Key Data Encapsulation for GTK KeyID: 0 Ref: 802.11i p81 """ return b''.join([ b'\xdd', # Type KDE chb(len(self.gtk_full) + 6), b'\x00\x0f\xac', # OUI b'\x01', # GTK KDE b'\x00\x00', # KeyID - Tx - Reserved x2 self.gtk_full, ])
0.005141
def guesstype(timestr): """Tries to guess whether a string represents a time or a time delta and returns the appropriate object. :param timestr (required) The string to be analyzed """ timestr_full = " {} ".format(timestr) if timestr_full.find(" in ") != -1 or timestr_full.find(" ago ") != -1: return Chronyk(timestr) comps = ["second", "minute", "hour", "day", "week", "month", "year"] for comp in comps: if timestr_full.find(comp) != -1: return ChronykDelta(timestr) return Chronyk(timestr)
0.001761
def _bool_encode(self, d): """ Converts bool values to lowercase strings """ for k, v in d.items(): if isinstance(v, bool): d[k] = str(v).lower() return d
0.016667
def advertise( self, routers=None, name=None, timeout=None, router_file=None, jitter=None, ): """Make a service available on the Hyperbahn routing mesh. This will make contact with a Hyperbahn host from a list of known Hyperbahn routers. Additional Hyperbahn connections will be established once contact has been made with the network. :param router: A seed list of addresses of Hyperbahn routers, e.g., ``["127.0.0.1:23000"]``. :param name: The identity of this service on the Hyperbahn. This is usually unnecessary, as it defaults to the name given when initializing the :py:class:`TChannel` (which is used as your identity as a caller). :returns: A future that resolves to the remote server's response after the first advertise finishes. Advertisement will continue to happen periodically. """ name = name or self.name if not self.is_listening(): self.listen() return hyperbahn.advertise( self, name, routers, timeout, router_file, jitter, )
0.002322
def select_event( event = None, selection = "ejets" ): """ Select a HEP event. """ if selection == "ejets": # Require single lepton. # Require >= 4 jets. if \ 0 < len(event.el_pt) < 2 and \ len(event.jet_pt) >= 4 and \ len(event.ljet_m) >= 1: return True else: return False
0.020151
def command(epilog=None, help=None, width=140, **attrs): """Same as `@click.command()`, but with common settings (ie: "-h" for help, epilog, slightly larger help display)""" if epilog is None: epilog = _get_caller_doc() attrs = settings(epilog=epilog, help=help, width=width, **attrs) return click.command(**attrs)
0.005917
def list_statistics(self, begin_date, end_date, shop_id=-1): """ Wi-Fi数据统计 详情请参考 http://mp.weixin.qq.com/wiki/8/dfa2b756b66fca5d9b1211bc18812698.html :param begin_date: 起始日期时间,最长时间跨度为30天 :param end_date: 结束日期时间戳,最长时间跨度为30天 :param shop_id: 可选,门店 ID,按门店ID搜索,-1为总统计 :return: 返回的 JSON 数据包 """ if isinstance(begin_date, (datetime, date)): begin_date = begin_date.strftime('%Y-%m-%d') if isinstance(end_date, (datetime, date)): end_date = end_date.strftime('%Y-%m-%d') res = self._post( 'statistics/list', data={ 'begin_date': begin_date, 'end_date': end_date, 'shop_id': shop_id }, result_processor=lambda x: x['data'] ) return res
0.002326
def diversity_coef_sign(W, ci): ''' The Shannon-entropy based diversity coefficient measures the diversity of intermodular connections of individual nodes and ranges from 0 to 1. Parameters ---------- W : NxN np.ndarray undirected connection matrix with positive and negative weights ci : Nx1 np.ndarray community affiliation vector Returns ------- Hpos : Nx1 np.ndarray diversity coefficient based on positive connections Hneg : Nx1 np.ndarray diversity coefficient based on negative connections ''' n = len(W) # number of nodes _, ci = np.unique(ci, return_inverse=True) ci += 1 m = np.max(ci) # number of modules def entropy(w_): S = np.sum(w_, axis=1) # strength Snm = np.zeros((n, m)) # node-to-module degree for i in range(m): Snm[:, i] = np.sum(w_[:, ci == i + 1], axis=1) pnm = Snm / (np.tile(S, (m, 1)).T) pnm[np.isnan(pnm)] = 0 pnm[np.logical_not(pnm)] = 1 return -np.sum(pnm * np.log(pnm), axis=1) / np.log(m) #explicitly ignore compiler warning for division by zero with np.errstate(invalid='ignore'): Hpos = entropy(W * (W > 0)) Hneg = entropy(-W * (W < 0)) return Hpos, Hneg
0.001544
def register_custom_adapter(cls, target_class, adapter): """ :type target_class: type :type adapter: JsonAdapter|type :rtype: None """ class_name = target_class.__name__ if adapter.can_serialize(): cls._custom_serializers[class_name] = adapter if adapter.can_deserialize(): cls._custom_deserializers[class_name] = adapter
0.004843
def ftypes(self): """ Return the ftypes (indication of sparse/dense and dtype) in DataFrame. This returns a Series with the data type of each column. The result's index is the original DataFrame's columns. Columns with mixed types are stored with the ``object`` dtype. See :ref:`the User Guide <basics.dtypes>` for more. Returns ------- pandas.Series The data type and indication of sparse/dense of each column. See Also -------- DataFrame.dtypes: Series with just dtype information. SparseDataFrame : Container for sparse tabular data. Notes ----- Sparse data should have the same dtypes as its dense representation. Examples -------- >>> arr = np.random.RandomState(0).randn(100, 4) >>> arr[arr < .8] = np.nan >>> pd.DataFrame(arr).ftypes 0 float64:dense 1 float64:dense 2 float64:dense 3 float64:dense dtype: object >>> pd.SparseDataFrame(arr).ftypes 0 float64:sparse 1 float64:sparse 2 float64:sparse 3 float64:sparse dtype: object """ from pandas import Series return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
0.001439
def reqfile(filepath): """Turns a text file into a list (one element per line)""" result = [] import re url_re = re.compile(".+:.+#egg=(.+)") with open(filepath, "r") as f: for line in f: line = line.strip() if not line or line.startswith("#"): continue mo = url_re.match(line) if mo is not None: line = mo.group(1) result.append(line) return result
0.002114
def status_favourite(self, id): """ Favourite a status. Returns a `toot dict`_ with the favourited status. """ id = self.__unpack_id(id) url = '/api/v1/statuses/{0}/favourite'.format(str(id)) return self.__api_request('POST', url)
0.006969
def min_conn_k(traj_exp): ''' Function returns the minimum number of connections, k, that are required to form a fully connected graph based gene expression data :param traj_exp: ndarray representing gene expression :return k: int of the minimum number of connections needed for a minimally connected graph ''' traj_dist=sps.distance.squareform(sps.distance.pdist(traj_exp)) #this is the gene expression euclidean distance calculation conn_comp=2 #just a starting value for the number of connected components in the graph k=0 #just a starting value for the number of neighbors required in the graph while (conn_comp>1): k=k+1 #each time this loops, increase the number of neighbors by 1 until we get 1 graph component(indicating a fully connected graph) adj_mat=adaptive_knn_graph(traj_dist,k) #uses adaptive_knn_graph to generate an adjacency matrix traj_graph = nx.Graph(adj_mat) #uses that adjacency matrix to make a graph conn_comp = nx.number_connected_components(traj_graph) #count the number of connected components in that graph return k
0.027753
def first_container_with_errors(self, errors): """ Returns the first container with errors, otherwise returns None. """ for tab in self.fields: errors_here = any(error in tab for error in errors) if errors_here: return tab return None
0.006369
def initialize_all_switch_interfaces(self, interfaces, switch_ip=None, replay=True): """Configure Nexus interface and get port channel number. Called during switch replay or just init if no replay is configured. For latter case, only configured interfaces are affected by this method. During switch replay, the change group from the host mapping data base is used. There is no attempt to relearn port-channel from the Nexus switch. What we last knew it to be will persist. :param interfaces: List of interfaces for a given switch. ch_grp can be altered as last arg to each interface. If no ch_grp, this arg will be zero. :param switch_ip: IP address of Nexus switch :param replay: Whether in replay path """ if not interfaces: return starttime = time.time() if replay: try: vpcs = nxos_db.get_active_switch_vpc_allocs(switch_ip) except cexc.NexusVPCAllocNotFound: vpcs = [] for vpc in vpcs: # if this is an allocated vpc, then recreate it if not vpc.learned: self.create_port_channel(switch_ip, vpc.vpc_id) for i, (nexus_host, intf_type, nexus_port, is_native, ch_grp) in enumerate(interfaces): if replay and ch_grp != 0: try: vpc = nxos_db.get_switch_vpc_alloc(switch_ip, ch_grp) self.add_ch_grp_to_interface( nexus_host, intf_type, nexus_port, ch_grp) except cexc.NexusVPCAllocNotFound: pass # if channel-group exists, switch to port-channel # instead of the provided ethernet interface intf_type = 'port-channel' nexus_port = str(ch_grp) #substitute content of ch_grp no_chgrp_len = len(interfaces[i]) - 1 interfaces[i] = interfaces[i][:no_chgrp_len] + (ch_grp,) trunk_mode_present, vlan_present = ( self._get_interface_switch_trunk_present( nexus_host, intf_type, nexus_port)) if not vlan_present: self.send_enable_vlan_on_trunk_int( nexus_host, "", intf_type, nexus_port, False, not trunk_mode_present) elif not trunk_mode_present: LOG.warning(TRUNK_MODE_NOT_FOUND, nexus_host, nexus_help.format_interface_name( intf_type, nexus_port)) self.capture_and_print_timeshot( starttime, "get_allif", switch=nexus_host)
0.002076
def signature_split(signatures: bytes, pos: int) -> Tuple[int, int, int]: """ :param signatures: signatures in form of {bytes32 r}{bytes32 s}{uint8 v} :param pos: position of the signature :return: Tuple with v, r, s """ signature_pos = 65 * pos v = signatures[64 + signature_pos] r = int.from_bytes(signatures[signature_pos:32 + signature_pos], 'big') s = int.from_bytes(signatures[32 + signature_pos:64 + signature_pos], 'big') return v, r, s
0.004124
def mtf_unitransformer_base(): """Hyperparameters for single-stack Transformer.""" hparams = mtf_transformer2_base() hparams.add_hparam("autoregressive", True) # HYPERPARAMETERS FOR THE SINGLE LAYER STACK hparams.add_hparam("layers", ["self_att", "drd"] * 6) # number of heads in multihead attention hparams.add_hparam("num_heads", 8) # default of 0 for standard transformer behavior # 1 means a single set of keys and values that are read by all query heads hparams.add_hparam("num_memory_heads", 0) # share attention keys and values hparams.add_hparam("shared_kv", False) # if nonzero then use local attention hparams.add_hparam("local_attention_radius", 128) return hparams
0.022663
def __verify_minion(self, id_, token): ''' Take a minion id and a string signed with the minion private key The string needs to verify as 'salt' with the minion public key :param str id_: A minion ID :param str token: A string signed with the minion private key :rtype: bool :return: Boolean indicating whether or not the token can be verified. ''' if not salt.utils.verify.valid_id(self.opts, id_): return False pub_path = os.path.join(self.opts['pki_dir'], 'minions', id_) try: pub = salt.crypt.get_rsa_pub_key(pub_path) except (IOError, OSError): log.warning( 'Salt minion claiming to be %s attempted to communicate with ' 'master, but key could not be read and verification was denied.', id_ ) return False except (ValueError, IndexError, TypeError) as err: log.error('Unable to load public key "%s": %s', pub_path, err) try: if salt.crypt.public_decrypt(pub, token) == b'salt': return True except ValueError as err: log.error('Unable to decrypt token: %s', err) log.error( 'Salt minion claiming to be %s has attempted to communicate with ' 'the master and could not be verified', id_ ) return False
0.002091
def register(linter): ''' Register the transformation functions. ''' try: MANAGER.register_transform(nodes.Class, rootlogger_transform) except AttributeError: MANAGER.register_transform(nodes.ClassDef, rootlogger_transform)
0.003861
def iter_package_families(paths=None): """Iterate over package families, in no particular order. Note that multiple package families with the same name can be returned. Unlike packages, families later in the searchpath are not hidden by earlier families. Args: paths (list of str, optional): paths to search for package families, defaults to `config.packages_path`. Returns: `PackageFamily` iterator. """ for path in (paths or config.packages_path): repo = package_repository_manager.get_repository(path) for resource in repo.iter_package_families(): yield PackageFamily(resource)
0.00149
def getFlags (self, ifname): """Get the flags for an interface""" try: result = self._ioctl(self.SIOCGIFFLAGS, self._getifreq(ifname)) except IOError as msg: log.warn(LOG_CHECK, "error getting flags for interface %r: %s", ifname, msg) return 0 # extract the interface's flags from the return value flags, = struct.unpack('H', result[16:18]) # return "UP" bit return flags
0.008333
def _forward_iterator(self): "Returns a forward iterator over the trie" path = [(self, 0, Bits())] while path: node, idx, prefix = path.pop() if idx==0 and node.value is not None and not node.prune_value: yield (self._unpickle_key(prefix), self._unpickle_value(node.value)) if idx<len(node.children): path.append((node, idx+1, prefix)) link = node.children[idx] if not link.pruned: path.append((link.node, 0, prefix + link.prefix))
0.008696
def predict(self, Xnew, full_cov=False, Y_metadata=None, kern=None, likelihood=None, include_likelihood=True): """ Predict the function(s) at the new point(s) Xnew. This includes the likelihood variance added to the predicted underlying function (usually referred to as f). In order to predict without adding in the likelihood give `include_likelihood=False`, or refer to self.predict_noiseless(). :param Xnew: The points at which to make a prediction :type Xnew: np.ndarray (Nnew x self.input_dim) :param full_cov: whether to return the full covariance matrix, or just the diagonal :type full_cov: bool :param Y_metadata: metadata about the predicting point to pass to the likelihood :param kern: The kernel to use for prediction (defaults to the model kern). this is useful for examining e.g. subprocesses. :param include_likelihood: Whether or not to add likelihood noise to the predicted underlying latent function f. :type include_likelihood: bool :returns: (mean, var): mean: posterior mean, a Numpy array, Nnew x self.input_dim var: posterior variance, a Numpy array, Nnew x 1 if full_cov=False, Nnew x Nnew otherwise If full_cov and self.input_dim > 1, the return shape of var is Nnew x Nnew x self.input_dim. If self.input_dim == 1, the return shape is Nnew x Nnew. This is to allow for different normalizations of the output dimensions. Note: If you want the predictive quantiles (e.g. 95% confidence interval) use :py:func:"~GPy.core.gp.GP.predict_quantiles". """ # Predict the latent function values mean, var = self._raw_predict(Xnew, full_cov=full_cov, kern=kern) if include_likelihood: # now push through likelihood if likelihood is None: likelihood = self.likelihood mean, var = likelihood.predictive_values(mean, var, full_cov, Y_metadata=Y_metadata) if self.normalizer is not None: mean = self.normalizer.inverse_mean(mean) # We need to create 3d array for the full covariance matrix with # multiple outputs. if full_cov & (mean.shape[1] > 1): var = self.normalizer.inverse_covariance(var) else: var = self.normalizer.inverse_variance(var) return mean, var
0.00112