text
stringlengths
78
104k
score
float64
0
0.18
def ensure_timezone(dt, tz=None): """ Make sure the datetime <dt> has a timezone set, using timezone <tz> if it doesn't. <tz> defaults to the local timezone. """ if dt.tzinfo is None: return dt.replace(tzinfo=tz or tzlocal()) else: return dt
0.003559
def module_for_loader(fxn): """Decorator to handle selecting the proper module for loaders. The decorated function is passed the module to use instead of the module name. The module passed in to the function is either from sys.modules if it already exists or is a new module. If the module is new, then __name__ is set the first argument to the method, __loader__ is set to self, and __package__ is set accordingly (if self.is_package() is defined) will be set before it is passed to the decorated function (if self.is_package() does not work for the module it will be set post-load). If an exception is raised and the decorator created the module it is subsequently removed from sys.modules. The decorator assumes that the decorated function takes the module name as the second argument. """ warnings.warn('The import system now takes care of this automatically.', DeprecationWarning, stacklevel=2) @functools.wraps(fxn) def module_for_loader_wrapper(self, fullname, *args, **kwargs): with _module_to_load(fullname) as module: module.__loader__ = self try: is_package = self.is_package(fullname) except (ImportError, AttributeError): pass else: if is_package: module.__package__ = fullname else: module.__package__ = fullname.rpartition('.')[0] # If __package__ was not set above, __import__() will do it later. return fxn(self, module, *args, **kwargs) return module_for_loader_wrapper
0.001809
def extract_statements(self): """Process the table to extract Statements.""" for _, (tf, target, effect, refs) in self.df.iterrows(): tf_agent = get_grounded_agent(tf) target_agent = get_grounded_agent(target) if effect == 'Activation': stmt_cls = IncreaseAmount elif effect == 'Repression': stmt_cls = DecreaseAmount else: continue pmids = refs.split(';') for pmid in pmids: stmt = make_stmt(stmt_cls, tf_agent, target_agent, pmid) self.statements.append(stmt)
0.00312
def reqTickByTickData( self, contract: Contract, tickType: str, numberOfTicks: int = 0, ignoreSize: bool = False) -> Ticker: """ Subscribe to tick-by-tick data and return the Ticker that holds the ticks in ticker.tickByTicks. https://interactivebrokers.github.io/tws-api/tick_data.html Args: contract: Contract of interest. tickType: One of 'Last', 'AllLast', 'BidAsk' or 'MidPoint'. numberOfTicks: Number of ticks or 0 for unlimited. ignoreSize: Ignore bid/ask ticks that only update the size. """ reqId = self.client.getReqId() ticker = self.wrapper.startTicker(reqId, contract, tickType) self.client.reqTickByTickData( reqId, contract, tickType, numberOfTicks, ignoreSize) return ticker
0.002336
def fromFile(cls, filepath): """ Creates a proxy instance from the inputted registry file. :param filepath | <str> :return <PluginProxy> || None """ xdata = ElementTree.parse(nstr(filepath)) xroot = xdata.getroot() # collect variable information name = xroot.get('name') ver = float(xroot.get('version', '1.0')) if not name: name = os.path.basename(filepath).split('.') if name == '__init__': name = os.path.normpath(filepath).split(os.path.sep)[-2] name = projex.text.pretty(name) icon = xroot.get('icon', './icon.png') ximport = xroot.find('import') if ximport is not None: importpath = ximport.get('path', './__init__.py') else: importpath = './__init__.py' params = {'description': '', 'author': '', 'email': '', 'url': ''} for param, default in params.items(): xdata = xroot.find(param) if xdata is not None: params[param] = xdata.text # generate the proxy information proxy = PluginProxy(cls, name, ver) proxy.setImportPath(importpath) proxy.setDescription(params['description']) proxy.setAuthor(params['author']) proxy.setEmail(params['email']) proxy.setUrl(params['url']) proxy.setFilepath(filepath) return proxy
0.002706
def reduce(self, values, inplace=True): """ Reduces the distribution to the context of the given variable values. The formula for the obtained conditional distribution is given by - For, .. math:: N(X_j | X_i = x_i) ~ N(mu_{j.i} ; sig_{j.i}) where, .. math:: mu_{j.i} = mu_j + sig_{j, i} * {sig_{i, i}^{-1}} * (x_i - mu_i) .. math:: sig_{j.i} = sig_{j, j} - sig_{j, i} * {sig_{i, i}^{-1}} * sig_{i, j} Parameters ---------- values: list, array-like A list of tuples of the form (variable_name, variable_value). inplace: boolean If inplace=True it will modify the factor itself, else would return a new ContinuosFactor object. Returns ------- GaussianDistribution or None: if inplace=True (default) returns None if inplace=False returns a new GaussianDistribution instance. Examples -------- >>> import numpy as np >>> from pgmpy.factors.distributions import GaussianDistribution as GD >>> dis = GD(variables=['x1', 'x2', 'x3'], ... mean=[1, -3, 4], ... cov=[[4, 2, -2], ... [2, 5, -5], ... [-2, -5, 8]]) >>> dis.variables ['x1', 'x2', 'x3'] >>> dis.mean array([[ 1.], [-3.], [ 4.]]) >>> dis.covariance array([[ 4., 2., -2.], [ 2., 5., -5.], [-2., -5., 8.]]) >>> dis.reduce([('x1', 7)]) >>> dis.variables ['x2', 'x3'] >>> dis.mean array([[ 0.], [ 1.]]) >>> dis.covariance array([[ 4., -4.], [-4., 7.]]) """ if not isinstance(values, list): raise TypeError("values: Expected type list or array-like, ", "got type {var_type}".format( var_type=type(values))) phi = self if inplace else self.copy() var_to_reduce = [var for var, value in values] # index_to_keep -> j vector index_to_keep = [self.variables.index(var) for var in self.variables if var not in var_to_reduce] # index_to_reduce -> i vector index_to_reduce = [self.variables.index(var) for var in var_to_reduce] mu_j = self.mean[index_to_keep] mu_i = self.mean[index_to_reduce] x_i = np.array([value for var, value in values]).reshape(len(index_to_reduce), 1) sig_i_j = self.covariance[np.ix_(index_to_reduce, index_to_keep)] sig_j_i = self.covariance[np.ix_(index_to_keep, index_to_reduce)] sig_i_i_inv = np.linalg.inv(self.covariance[np.ix_(index_to_reduce, index_to_reduce)]) sig_j_j = self.covariance[np.ix_(index_to_keep, index_to_keep)] phi.variables = [self.variables[index] for index in index_to_keep] phi.mean = mu_j + np.dot(np.dot(sig_j_i, sig_i_i_inv), x_i - mu_i) phi.covariance = sig_j_j - np.dot(np.dot(sig_j_i, sig_i_i_inv), sig_i_j) phi._precision_matrix = None if not inplace: return phi
0.002146
def split_sentences(self, text): """ Split input text into sentences that match CoreNLP's default format, but are not yet processed. :param text: The text of the parent paragraph of the sentences :return: """ if self.model.has_pipe("sentence_boundary_detector"): self.model.remove_pipe(name="sentence_boundary_detector") if not self.model.has_pipe("sentencizer"): sentencizer = self.model.create_pipe("sentencizer") # add sentencizer self.model.add_pipe(sentencizer) try: doc = self.model(text, disable=["parser", "tagger", "ner"]) except ValueError: # temporary increase character limit of spacy # 'Probably save' according to spacy, as no parser or NER is used previous_max_length = self.model.max_length self.model.max_length = 100_000_000 self.logger.warning( f"Temporarily increased spacy maximum " f"character limit to {self.model.max_length} to split sentences." ) doc = self.model(text, disable=["parser", "tagger", "ner"]) self.model.max_length = previous_max_length self.logger.warning( f"Spacy maximum " f"character limit set back to {self.model.max_length}." ) doc.is_parsed = True position = 0 for sent in doc.sents: parts = defaultdict(list) text = sent.text for i, token in enumerate(sent): parts["words"].append(str(token)) parts["lemmas"].append(token.lemma_) parts["pos_tags"].append(token.pos_) parts["ner_tags"].append("") # placeholder for later NLP parsing parts["char_offsets"].append(token.idx) parts["abs_char_offsets"].append(token.idx) parts["dep_parents"].append(0) # placeholder for later NLP parsing parts["dep_labels"].append("") # placeholder for later NLP parsing # make char_offsets relative to start of sentence parts["char_offsets"] = [ p - parts["char_offsets"][0] for p in parts["char_offsets"] ] parts["position"] = position parts["text"] = text position += 1 yield parts
0.00289
def _make_single_run(self): """ Modifies the trajectory for single runs executed by the environment """ self._is_run = False # to be able to use f_set_crun self._new_nodes = OrderedDict() self._new_links = OrderedDict() self._is_run = True return self
0.013378
def getATR(reader): """Return the ATR of the card inserted into the reader.""" connection = reader.createConnection() atr = "" try: connection.connect() atr = smartcard.util.toHexString(connection.getATR()) connection.disconnect() except smartcard.Exceptions.NoCardException: atr = "no card inserted" return atr
0.002725
def fix_hp_addrs(server): """ Works around hpcloud's peculiar "all ip addresses are returned as private even though one is public" bug. This is also what the official hpfog gem does in the ``Fog::Compute::HP::Server#public_ip_address`` method. :param dict server: Contains the server ID, a list of public IP addresses, and a list of private IP addresses. """ fixed = {A.server.ID: server[A.server.ID]} both = server.get(A.server.PRIVATE_IPS) if both: fixed[A.server.PUBLIC_IPS] = [both[1]] fixed[A.server.PRIVATE_IPS] = [both[0]] return fixed
0.001639
def integral(self, xbin1=1, xbin2=-2, ybin1=1, ybin2=-2, zbin1=1, zbin2=-2, width=False, error=False, overflow=False): """ Compute the integral and error over a range of bins """ if xbin1 is None: xbin1 = 0 if overflow else 1 if xbin2 is None: xbin2 = -1 if overflow else -2 if ybin1 is None: ybin1 = 0 if overflow else 1 if ybin2 is None: ybin2 = -1 if overflow else -2 if zbin1 is None: zbin1 = 0 if overflow else 1 if zbin2 is None: zbin2 = -1 if overflow else -2 nbinsx = self.nbins(axis=0, overflow=True) xbin1 %= nbinsx xbin2 %= nbinsx nbinsy = self.nbins(axis=1, overflow=True) ybin1 %= nbinsy ybin2 %= nbinsy nbinsz = self.nbins(axis=2, overflow=True) zbin1 %= nbinsz zbin2 %= nbinsz options = 'width' if width else '' if error: error = ROOT.Double() integral = super(_Hist3D, self).IntegralAndError( xbin1, xbin2, ybin1, ybin2, zbin1, zbin2, error, options) return integral, error return super(_Hist3D, self).Integral( xbin1, xbin2, ybin1, ybin2, zbin1, zbin2, options)
0.00578
def Click(x: int, y: int, waitTime: float = OPERATION_WAIT_TIME) -> None: """ Simulate mouse click at point x, y. x: int. y: int. waitTime: float. """ SetCursorPos(x, y) screenWidth, screenHeight = GetScreenSize() mouse_event(MouseEventFlag.LeftDown | MouseEventFlag.Absolute, x * 65535 // screenWidth, y * 65535 // screenHeight, 0, 0) time.sleep(0.05) mouse_event(MouseEventFlag.LeftUp | MouseEventFlag.Absolute, x * 65535 // screenWidth, y * 65535 // screenHeight, 0, 0) time.sleep(waitTime)
0.005545
def seven_zip(archive, items, self_extracting=False): """Create a 7z archive.""" if not isinstance(items, (list, tuple)): items = [items] if self_extracting: return er(_get_sz(), "a", "-ssw", "-sfx", archive, *items) else: return er(_get_sz(), "a", "-ssw", archive, *items)
0.003195
def _decoder(self, obj): """ Decode a toc element leaf-node """ if '__class__' in obj: elem = eval(obj['__class__'])() elem.ident = obj['ident'] elem.group = str(obj['group']) elem.name = str(obj['name']) elem.ctype = str(obj['ctype']) elem.pytype = str(obj['pytype']) elem.access = obj['access'] return elem return obj
0.004556
def eplotter(task, data): # CRYSTAL, VASP, EXCITING ''' eplotter is like bdplotter but less complicated ''' results, color, fdata = [], None, [] if task == 'optstory': color = '#CC0000' clickable = True for n, i in enumerate(data): fdata.append([n, i[4]]) fdata = array(fdata) fdata[:,1] -= min(fdata[:,1]) # this normalizes values to minimum (by 2nd col) fdata = fdata.tolist() elif task == 'convergence': color = '#0066CC' clickable = False for n, i in enumerate(data): fdata.append([n, i]) for n in range(len(fdata)): #fdata[n][1] = "%10.5f" % fdata[n][1] fdata[n][1] = round(fdata[n][1], 5) results.append({'color': color, 'clickable:': clickable, 'data': fdata}) return results
0.008393
def to(self, unit): """Convert to a given unit. Parameters ---------- unit : str Name of the unit to convert to. Returns ------- u : Unit new Unit object with the requested unit and computed value. """ u = Unit("0cm") u.value = self.value/self.per_inch[self.unit]*self.per_inch[unit] u.unit = unit return u
0.004684
def getFilterNames(header, filternames=None): """ Returns a comma-separated string of filter names extracted from the input header (PyFITS header object). This function has been hard-coded to support the following instruments: ACS, WFPC2, STIS This function relies on the 'INSTRUME' keyword to define what instrument has been used to generate the observation/header. The 'filternames' parameter allows the user to provide a list of keyword names for their instrument, in the case their instrument is not supported. """ # Define the keyword names for each instrument _keydict = { 'ACS': ['FILTER1', 'FILTER2'], 'WFPC2': ['FILTNAM1', 'FILTNAM2'], 'STIS': ['OPT_ELEM', 'FILTER'], 'NICMOS': ['FILTER', 'FILTER2'], 'WFC3': ['FILTER', 'FILTER2'] } # Find out what instrument the input header came from, based on the # 'INSTRUME' keyword if 'INSTRUME' in header: instrument = header['INSTRUME'] else: raise ValueError('Header does not contain INSTRUME keyword.') # Check to make sure this instrument is supported in _keydict if instrument in _keydict: _filtlist = _keydict[instrument] else: _filtlist = filternames # At this point, we know what keywords correspond to the filter names # in the header. Now, get the values associated with those keywords. # Build a list of all filter name values, with the exception of the # blank keywords. Values containing 'CLEAR' or 'N/A' are valid. _filter_values = [] for _key in _filtlist: if _key in header: _val = header[_key] else: _val = '' if _val.strip() != '': _filter_values.append(header[_key]) # Return the comma-separated list return ','.join(_filter_values)
0.000538
def eeg_microstates_clustering(data, n_microstates=4, clustering_method="kmeans", n_jobs=1, n_init=25, occurence_rejection_treshold=0.05, max_refitting=5, verbose=True): """ Fit the clustering algorithm. """ # Create training set training_set = data.copy() if verbose is True: print("- Initializing the clustering algorithm...") if clustering_method == "kmeans": algorithm = sklearn.cluster.KMeans(init='k-means++', n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs) elif clustering_method == "spectral": algorithm = sklearn.cluster.SpectralClustering(n_clusters=n_microstates, n_init=n_init, n_jobs=n_jobs) elif clustering_method == "agglom": algorithm = sklearn.cluster.AgglomerativeClustering(n_clusters=n_microstates, linkage="complete") elif clustering_method == "dbscan": algorithm = sklearn.cluster.DBSCAN(min_samples=100) elif clustering_method == "affinity": algorithm = sklearn.cluster.AffinityPropagation(damping=0.5) else: print("NeuroKit Error: eeg_microstates(): clustering_method must be 'kmeans', 'spectral', 'dbscan', 'affinity' or 'agglom'") refitting = 0 # Initialize the number of refittings good_fit_achieved = False while good_fit_achieved is False: good_fit_achieved = True if verbose is True: print("- Fitting the classifier...") # Fit the algorithm algorithm.fit(training_set) if verbose is True: print("- Clustering back the initial data...") # Predict the more likely cluster for each observation predicted = algorithm.fit_predict(training_set) if verbose is True: print("- Check for abnormalities...") # Check for abnormalities and prune the training set until none found occurences = dict(collections.Counter(predicted)) masks = [np.array([True]*len(training_set))] for microstate in occurences: # is the frequency of one microstate inferior to a treshold if occurences[microstate] < len(data)*occurence_rejection_treshold: good_fit_achieved = False refitting += 1 # Increment the refitting print("NeuroKit Warning: eeg_microstates(): detected some outliers: refitting the classifier (n=" + str(refitting) + ").") masks.append(predicted!=microstate) mask = np.all(masks, axis=0) training_set = training_set[mask] return(algorithm)
0.00356
def bokeh_tree(name, rawtext, text, lineno, inliner, options=None, content=None): ''' Link to a URL in the Bokeh GitHub tree, pointing to appropriate tags for releases, or to master otherwise. The link text is simply the URL path supplied, so typical usage might look like: .. code-block:: none All of the examples are located in the :bokeh-tree:`examples` subdirectory of your Bokeh checkout. Returns 2 part tuple containing list of nodes to insert into the document and a list of system messages. Both are allowed to be empty. ''' app = inliner.document.settings.env.app tag = app.env.config['version'] if '-' in tag: tag = 'master' url = "%s/tree/%s/%s" % (_BOKEH_GH, tag, text) options = options or {} set_classes(options) node = nodes.reference(rawtext, text, refuri=url, **options) return [node], []
0.00221
def _update_limits_from_api(self): """ Call the service's API action to retrieve limit/quota information, and update AwsLimit objects in ``self.limits`` with this information. """ self.connect_resource() summary = self.resource_conn.AccountSummary() for k, v in sorted(summary.summary_map.items()): if k in self.API_TO_LIMIT_NAME: # this is a usage for one of our limits lname = self.API_TO_LIMIT_NAME[k] # if len(self.limits[lname].get_current_usage()) < 1: self.limits[lname]._add_current_usage(v) elif k.endswith('Quota') and k[:-5] in self.API_TO_LIMIT_NAME: # quota for one of our limits lname = self.API_TO_LIMIT_NAME[k[:-5]] self.limits[lname]._set_api_limit(v) else: logger.debug("Ignoring IAM AccountSummary attribute: %s", k)
0.002094
def _set_igmp_statistics(self, v, load=False): """ Setter method for igmp_statistics, mapped from YANG variable /igmp_snooping_state/igmp_statistics/igmp_statistics (list) If this variable is read-only (config: false) in the source YANG file, then _set_igmp_statistics is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_igmp_statistics() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("interface_name",igmp_statistics_.igmp_statistics, yang_name="igmp-statistics", rest_name="igmp-statistics", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-name', extensions={u'tailf-common': {u'callpoint': u'mc_hms-igmp-interface-statistics-igmp-statistics-2'}}), is_container='list', yang_name="igmp-statistics", rest_name="igmp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc_hms-igmp-interface-statistics-igmp-statistics-2'}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='list', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """igmp_statistics must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("interface_name",igmp_statistics_.igmp_statistics, yang_name="igmp-statistics", rest_name="igmp-statistics", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='interface-name', extensions={u'tailf-common': {u'callpoint': u'mc_hms-igmp-interface-statistics-igmp-statistics-2'}}), is_container='list', yang_name="igmp-statistics", rest_name="igmp-statistics", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'mc_hms-igmp-interface-statistics-igmp-statistics-2'}}, namespace='urn:brocade.com:mgmt:brocade-mc-hms-operational', defining_module='brocade-mc-hms-operational', yang_type='list', is_config=False)""", }) self.__igmp_statistics = t if hasattr(self, '_set'): self._set()
0.004647
def as_dict(self): """ create a dict based on class attributes """ odict = OrderedDict() for name in self._order: attr_value = getattr(self, name) if isinstance(attr_value, List): _list = [] for item in attr_value: _list.append((item.as_dict() if isinstance(item, Entity) else item)) odict[name] = _list elif isinstance(attr_value, Entity): odict[name] = attr_value.as_dict() else: odict[name] = getattr(self, name) return odict
0.004902
def tar(self, appname, appversion): """ Given an app name and version to be used in the tarball name, create a tar.bz2 file with all of this folder's contents inside. Return a Build object with attributes for appname, appversion, time, and path. """ name_tmpl = '%(app)s-%(version)s-%(time)s.tar.bz2' time = utc.now() name = name_tmpl % {'app': appname, 'version': appversion, 'time': time.strftime('%Y-%m-%dT%H-%M')} if not os.path.exists(TARBALL_HOME): os.mkdir(TARBALL_HOME) tarball = os.path.join(TARBALL_HOME, name) tar_params = {'filename': tarball, 'folder': self.folder} tar_result = run('tar -C %(folder)s -cjf %(filename)s .' % tar_params) tar_result.raise_for_status() return Build(appname, appversion, time, tarball)
0.002179
def Ravipudi_Godbold(m, x, D, rhol, rhog, Cpl, kl, mug, mu_b, mu_w=None): r'''Calculates the two-phase non-boiling heat transfer coefficient of a liquid and gas flowing inside a tube of any inclination, as in [1]_ and reviewed in [2]_. .. math:: Nu = \frac{h_{TP} D}{k_l} = 0.56 \left(\frac{V_{gs}}{V_{ls}} \right)^{0.3}\left(\frac{\mu_g}{\mu_l}\right)^{0.2} Re_{ls}^{0.6} Pr_l^{1/3}\left(\frac{\mu_b}{\mu_w}\right)^{0.14} Parameters ---------- m : float Mass flow rate [kg/s] x : float Quality at the specific tube interval [-] D : float Diameter of the tube [m] rhol : float Density of the liquid [kg/m^3] rhog : float Density of the gas [kg/m^3] Cpl : float Constant-pressure heat capacity of liquid [J/kg/K] kl : float Thermal conductivity of liquid [W/m/K] mug : float Viscosity of gas [Pa*s] mu_b : float Viscosity of liquid at bulk conditions (average of inlet/outlet temperature) [Pa*s] mu_w : float, optional Viscosity of liquid at wall temperature [Pa*s] Returns ------- h : float Heat transfer coefficient [W/m^2/K] Notes ----- If the viscosity at the wall temperature is not given, the liquid viscosity correction is not applied. Developed with a vertical pipe, superficial gas/liquid velocity ratios of 1-90, in the froth regime, and for fluid mixtures of air and water, toluene, benzene, and methanol. Examples -------- >>> Ravipudi_Godbold(m=1, x=.9, D=.3, rhol=1000, rhog=2.5, Cpl=2300, kl=.6, mug=1E-5, mu_b=1E-3, mu_w=1.2E-3) 299.3796286459285 References ---------- .. [1] Ravipudi, S., and Godbold, T., The Effect of Mass Transfer on Heat Transfer Rates for Two-Phase Flow in a Vertical Pipe, Proceedings 6th International Heat Transfer Conference, Toronto, V. 1, p. 505-510, 1978. .. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L. Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with Seven Sets of Experimental Data, Including Flow Pattern and Tube Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1, 1999): 15-40. doi:10.1080/014576399271691. ''' Vgs = m*x/(rhog*pi/4*D**2) Vls = m*(1-x)/(rhol*pi/4*D**2) Prl = Prandtl(Cp=Cpl, mu=mu_b, k=kl) Rels = D*Vls*rhol/mu_b Nu = 0.56*(Vgs/Vls)**0.3*(mug/mu_b)**0.2*Rels**0.6*Prl**(1/3.) if mu_w: Nu *= (mu_b/mu_w)**0.14 return Nu*kl/D
0.006114
def to_aes_key(password): """Compute/Derivate a key to be used in AES encryption from the password To maintain compatibility with the reference implementation the resulting key should be a sha256 hash of the sha256 hash of the password """ password_hash = hashlib.sha256(password.encode('utf-8')).digest() return hashlib.sha256(password_hash).digest()
0.00266
def report(issues, show_urls=False): """Summary report about a list of issues, printing number and title. """ # titles may have unicode in them, so we must encode everything below if show_urls: for i in issues: print(u'#%d: %s' % (i['number'], i['title'].replace(u'`', u'``'))) else: for i in issues: print(u'* %d: %s' % (i['number'], i['title'].replace(u'`', u'``')))
0.004292
def set_number(self, key, value): """ set a key's value """ storage = self.storage if not isinstance(value, int): logger.error("set_number: Value must be an integer") return try: lock.acquire() storage[key] = value finally: self.storage._p_changed = True lock.release() return storage[key]
0.004762
def get_default_config_help(self): """ Returns the help text for the configuration options for this handler """ config = super(StatsiteHandler, self).get_default_config_help() config.update({ 'host': '', 'tcpport': '', 'udpport': '', 'timeout': '', }) return config
0.005391
def generate_hash_comment(file_path): """ Read file with given file_path and return string of format # SHA1:da39a3ee5e6b4b0d3255bfef95601890afd80709 which is hex representation of SHA1 file content hash """ with open(file_path, 'rb') as fp: hexdigest = hashlib.sha1(fp.read().strip()).hexdigest() return "# SHA1:{0}\n".format(hexdigest)
0.002646
def max(self): """Return the maximum of ``self``. See Also -------- numpy.amax min """ results = [x.ufuncs.max() for x in self.elem] return np.max(results)
0.009091
def run(self): """Run loading of movie appearances. The wiki page structure for this part cannot be easily handled by simple xpath queries. We need to iterate over the respective portion of the page and parse appearances. """ # make all requests via a cache instance request_cache = cache.get_request_cache() # DB session to operate in session = client.get_client().create_session() # clear completion flag for this task self.mark_incomplete() # list of universes seen in character appearances universes = [] # don't auto-flush the session for queries, this causes issues with the 'id' field of newly # created MovieAppearance instances with session.no_autoflush: # get all movies movies = session.query(models.Movie).all() # iterate over all movies and build appearance objects for movie in movies: # retrieve movie article, keep main article content only, parse article = request_cache.get("http://marvel.wikia.com" + movie.url, xpath="//article[@id='WikiaMainContent']", rate_limit=0.5) doc = html.fromstring(article) # find heading for appearances, this is a span inside an h2; go to the h2 node = doc.xpath("//span[@id='Appearances']")[0] node = node.getparent() # Appearance type is given by <p><b>... some text ...</b></p> tags. Sometimes the first # group of appearances carries no such label, assume it's the featured characters. appearance_type = "Featured Characters" # walk along the tree; character lists are in <ul>s, labels in <p>s; # the next h2 ends the character listing node = node.getnext() while node is not None and node.tag != 'h2': if node.tag == 'ul' and ('characters' in appearance_type.lower() or 'villains' in appearance_type.lower()): # starts a new list of stuff; only enter here if the previous label was for characters; # use iter() to iterate over all 'li' items (also those of nested lists) for li in node.iter('li'): # inside the list element, find all 'a's; iterate over child nodes, don't use iter(), # since we want don't want to find 'a's of sub-elements in a nested list here for a in li: if a.tag != 'a': continue # there are 'a's in the list that wrap imags, don't use these; also don't use # links that lead to somewhere else than the wiki if "image" in a.get("class", "") or not a.get("href").startswith("/wiki/"): continue match = re.search(r'\(.*?\)', a.get('href')) if match: universes.append(match.group()[1:-1]) # accept the first matching href, build a new appearance object, then skip to next li try: character = session.query(models.Character) \ .filter(models.Character.url == a.get("href")) \ .one() # -- start documentation include: many-to-many-generation appearance = models.MovieAppearance(movie_id=movie.id, character_id=character.id, appearance_type=appearance_type) session.add(appearance) # -- end documentation include: many-to-many-generation except NoResultFound: # none found, ignore pass # break looping over 'a's once we have found one, go to next 'li' break elif node.tag == 'p': # new character class (or label for locations, items, ...) appearance_type = " ".join(node.itertext()).strip().strip(':').strip() node = node.getnext() print("\nNumber of character appearances per universe: ") print(pd.Series(data=universes).value_counts()) # done, save all data, finalize task session.commit() session.close() self.mark_complete()
0.006295
def sensor_offsets_encode(self, mag_ofs_x, mag_ofs_y, mag_ofs_z, mag_declination, raw_press, raw_temp, gyro_cal_x, gyro_cal_y, gyro_cal_z, accel_cal_x, accel_cal_y, accel_cal_z): ''' Offsets and calibrations values for hardware sensors. This makes it easier to debug the calibration process. mag_ofs_x : magnetometer X offset (int16_t) mag_ofs_y : magnetometer Y offset (int16_t) mag_ofs_z : magnetometer Z offset (int16_t) mag_declination : magnetic declination (radians) (float) raw_press : raw pressure from barometer (int32_t) raw_temp : raw temperature from barometer (int32_t) gyro_cal_x : gyro X calibration (float) gyro_cal_y : gyro Y calibration (float) gyro_cal_z : gyro Z calibration (float) accel_cal_x : accel X calibration (float) accel_cal_y : accel Y calibration (float) accel_cal_z : accel Z calibration (float) ''' return MAVLink_sensor_offsets_message(mag_ofs_x, mag_ofs_y, mag_ofs_z, mag_declination, raw_press, raw_temp, gyro_cal_x, gyro_cal_y, gyro_cal_z, accel_cal_x, accel_cal_y, accel_cal_z)
0.00545
def receive(self, timeout=None): """Receive data through websocket""" log.debug('Receiving') if not self._socket: log.warn('No connection') return try: if timeout: rv = self._socket.poll(timeout) if not rv: log.info('Connection timeouted') return 'Quit' data = self._socket.recv_bytes() except Exception: log.error('Connection lost') return 'Quit' log.debug('Got %s' % data) return data.decode('utf-8')
0.003317
def view_package_info(self, package: str='') -> str: '''View package detail information.''' output, _ = self._execute( '-s', self.device_sn, 'shell', 'dumpsys', 'package', package) return output
0.017391
def traverse_imports(names): """ Walks over all the names imported in a dotted_as_names node. """ pending = [names] while pending: node = pending.pop() if node.type == token.NAME: yield node.value elif node.type == syms.dotted_name: yield "".join([ch.value for ch in node.children]) elif node.type == syms.dotted_as_name: pending.append(node.children[0]) elif node.type == syms.dotted_as_names: pending.extend(node.children[::-2]) else: raise AssertionError("unkown node type")
0.001653
def import_seaborn(): '''import seaborn and handle deprecation of apionly module''' with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") try: import seaborn.apionly as sns if (w and issubclass(w[-1].category, UserWarning) and ("seaborn.apionly module" in str(w[-1].message))): raise ImportError except ImportError: import seaborn as sns finally: warnings.resetwarnings() return sns
0.001866
def update_director(self, service_id, version_number, name_key, **kwargs): """Update the director for a particular service and version.""" body = self._formdata(kwargs, FastlyDirector.FIELDS) content = self._fetch("/service/%s/version/%d/director/%s" % (service_id, version_number, name_key), method="PUT", body=body) return FastlyDirector(self, content)
0.019337
def get(cls, id=None, condition=None, fields=None, cache=False, engine_name=None, **kwargs): """ Get object from Model, if given fields, then only fields will be loaded into object, other properties will be Lazy if cache is True or defined __cacheable__=True in Model class, it'll use cache first """ if id is None and condition is None: return None can_cacheable = (cache or getattr(cls, '__cacheable__', None)) and \ isinstance(id, (int, long, str, unicode)) if can_cacheable: #send 'get_object' topic to get cached object obj = dispatch.get(cls, 'get_object', id) if obj: return obj if condition is not None: _cond = condition else: if is_condition(id): _cond = id else: _cond = cls.c[cls._primary_field] == id #todo # if isinstance(id, (int, long)): # _cond = cls.c.id==id # elif isinstance(id, (str, unicode)) and id.isdigit(): # _cond = cls.c.id==int(id) # else: # _cond = id #if there is no cached object, then just fetch from database obj = cls.filter(_cond, **kwargs).fields(*(fields or [])).one() if obj and cache or getattr(cls, '__cacheable__', None): dispatch.call(cls, 'set_object', instance=obj) return obj
0.007218
def get_all_requisite_objectives(self, objective_id=None): """Gets a list of Objectives that are the requisites for the given Objective including the requistes of the requisites, and so on. In plenary mode, the returned list contains all of the immediate requisites, or an error results if an Objective is not found or inaccessible. Otherwise, inaccessible Objectives may be omitted from the list and may present the elements in any order including returning a unique set. arg: objective_id (osid.id.Id): Id of the Objective return: (osid.learning.ObjectiveList) - the returned Objective list raise: NotFound - objective_id not found raise: NullArgument - objective_id is null raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure compliance: mandatory - This method must be implemented. """ # This should be re-implemented if and when handcar supports # getting all requisites directly requisites = list() requisite_ids = list() all_requisites = self._get_requisites_recursively(objective_id, requisites, requisite_ids) return objects.ObjectiveList(all_requisites)
0.002299
def reverse( self, query, reverse_geocode_preference=('StreetAddress', ), maximum_responses=25, filtering='', exactly_one=DEFAULT_SENTINEL, timeout=DEFAULT_SENTINEL, ): """ Return an address by location point. :param query: The coordinates for which you wish to obtain the closest human-readable addresses. :type query: :class:`geopy.point.Point`, list or tuple of ``(latitude, longitude)``, or string as ``"%(latitude)s, %(longitude)s"``. :param list reverse_geocode_preference: Enable to set expected results type. It can be `StreetAddress` or `PositionOfInterest`. Default is set to `StreetAddress`. :param int maximum_responses: The maximum number of responses to ask to the API in the query body. :param str filtering: Provide string that help setting geocoder filter. It contains an XML string. See examples in documentation and ignfrance.py file in directory tests. :param bool exactly_one: Return one result or a list of results, if available. .. versionchanged:: 1.14.0 Default value for ``exactly_one`` was ``False``, which differs from the conventional default across geopy. Please always pass this argument explicitly, otherwise you would get a warning. In geopy 2.0 the default value will become ``True``. :param int timeout: Time, in seconds, to wait for the geocoding service to respond before raising a :class:`geopy.exc.GeocoderTimedOut` exception. Set this only if you wish to override, on this call only, the value set during the geocoder's initialization. :rtype: ``None``, :class:`geopy.location.Location` or a list of them, if ``exactly_one=False``. """ if exactly_one is DEFAULT_SENTINEL: warnings.warn('%s.reverse: default value for `exactly_one` ' 'argument will become True in geopy 2.0. ' 'Specify `exactly_one=False` as the argument ' 'explicitly to get rid of this warning.' % type(self).__name__, DeprecationWarning, stacklevel=2) exactly_one = False sub_request = """ <ReverseGeocodeRequest> {reverse_geocode_preference} <Position> <gml:Point> <gml:pos>{query}</gml:pos> </gml:Point> {filtering} </Position> </ReverseGeocodeRequest> """ xml_request = self.xml_request.format( method_name='ReverseGeocodeRequest', sub_request=sub_request, maximum_responses=maximum_responses ) for pref in reverse_geocode_preference: if pref not in ('StreetAddress', 'PositionOfInterest'): raise GeocoderQueryError( '`reverse_geocode_preference` must contain ' 'one or more of: StreetAddress, PositionOfInterest' ) point = self._coerce_point_to_string(query, "%(lat)s %(lon)s") reverse_geocode_preference = '\n'.join(( '<ReverseGeocodePreference>%s</ReverseGeocodePreference>' % pref for pref in reverse_geocode_preference )) request_string = xml_request.format( maximum_responses=maximum_responses, query=point, reverse_geocode_preference=reverse_geocode_preference, filtering=filtering ) url = "?".join((self.api, urlencode({'xls': request_string}))) logger.debug("%s.reverse: %s", self.__class__.__name__, url) raw_xml = self._request_raw_content(url, timeout) return self._parse_xml( raw_xml, exactly_one=exactly_one, is_reverse=True, is_freeform='false' )
0.001209
def check_float_param(self, param, low, high, name): """ Check if the value of the given parameter is in the given range and a float. Designed for testing parameters like `mu` and `eps`. To pass this function the variable `param` must be able to be converted into a float with a value between `low` and `high`. **Args:** * `param` : parameter to check (float or similar) * `low` : lowest allowed value (float), or None * `high` : highest allowed value (float), or None * `name` : name of the parameter (string), it is used for an error message **Returns:** * `param` : checked parameter converted to float """ try: param = float(param) except: raise ValueError( 'Parameter {} is not float or similar'.format(name) ) if low != None or high != None: if not low <= param <= high: raise ValueError('Parameter {} is not in range <{}, {}>' .format(name, low, high)) return param
0.008651
def resolve(self, value=None): """ Resolve the current expression against the supplied value """ # If we still have an uninitialized matcher init it now if self.matcher: self._init_matcher() # Evaluate the current set of matchers forming the expression matcher = self.evaluate() try: value = self._transform(value) self._assertion(matcher, value) except AssertionError as ex: # By re-raising here the exception we reset the traceback raise ex finally: # Reset the state of the object so we can use it again if self.deferred: self.reset()
0.002841
def get_build_work_items_refs_from_commits(self, commit_ids, project, build_id, top=None): """GetBuildWorkItemsRefsFromCommits. Gets the work items associated with a build, filtered to specific commits. :param [str] commit_ids: A comma-delimited list of commit IDs. :param str project: Project ID or project name :param int build_id: The ID of the build. :param int top: The maximum number of work items to return, or the number of commits to consider if no commit IDs are specified. :rtype: [ResourceRef] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if build_id is not None: route_values['buildId'] = self._serialize.url('build_id', build_id, 'int') query_parameters = {} if top is not None: query_parameters['$top'] = self._serialize.query('top', top, 'int') content = self._serialize.body(commit_ids, '[str]') response = self._send(http_method='POST', location_id='5a21f5d2-5642-47e4-a0bd-1356e6731bee', version='5.0', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('[ResourceRef]', self._unwrap_collection(response))
0.006143
def handle_tick(self): """Internal callback every time 1 second has passed.""" self.uptime += 1 for name, interval in self.ticks.items(): if interval == 0: continue self.tick_counters[name] += 1 if self.tick_counters[name] == interval: self.graph_input(self.TICK_STREAMS[name], self.uptime) self.tick_counters[name] = 0
0.004651
def update_progress(cls, progress, starttime): """ Display an ascii progress bar while processing operation. """ width, _height = click.get_terminal_size() if not width: return duration = datetime.utcnow() - starttime hours, remainder = divmod(duration.seconds, 3600) minutes, seconds = divmod(remainder, 60) size = int(width * .6) status = "" if isinstance(progress, int): progress = float(progress) if not isinstance(progress, float): progress = 0 status = 'error: progress var must be float\n' cls.echo(type(progress)) if progress < 0: progress = 0 status = 'Halt...\n' if progress >= 1: progress = 1 # status = 'Done...\n' block = int(round(size * progress)) text = ('\rProgress: [{0}] {1:.2%} {2} {3:0>2}:{4:0>2}:{5:0>2} ' ''.format('#' * block + '-' * (size - block), progress, status, hours, minutes, seconds)) sys.stdout.write(text) sys.stdout.flush()
0.001754
def retry(self, index=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-retry-policy.html>`_ :arg index: The name of the indices (comma-separated) whose failed lifecycle step is to be retry """ return self.transport.perform_request( "POST", _make_path(index, "_ilm", "retry"), params=params )
0.00489
def fire_event(self, evt_name, *args, **kwargs): """触发事件 :params evt_name: 事件名称 :params args: 给事件接受者的参数 :params kwargs: 给事件接受者的参数 """ listeners = self.__get_listeners(evt_name) evt = self.generate_event(evt_name) for listener in listeners: listener(evt, *args, **kwargs)
0.005764
def prepare_static_data(self, data): """ If user defined static fields, then process them with visiable value """ d = self.obj.to_dict() d.update(data.copy()) for f in self.get_fields(): if f['static'] and f['name'] in d: v = make_view_field(f, self.obj, self.types_convert_map, self.fields_convert_map, d[f['name']]) d[f['name']] = v['display'] return d
0.006466
def complete_json_get(self, cmd_param_text, full_cmd, *rest): """ TODO: prefetch & parse znodes & suggest keys """ complete_keys = partial(complete_values, ["key1", "key2", "#{key1.key2}"]) completers = [self._complete_path, complete_keys, complete_labeled_boolean("recursive")] return complete(completers, cmd_param_text, full_cmd, *rest)
0.010782
def size(self, width=None, height=None): u'''Set/get window size.''' sc = System.Console if width is not None and height is not None: sc.BufferWidth, sc.BufferHeight = width,height else: return sc.BufferWidth, sc.BufferHeight if width is not None and height is not None: sc.WindowWidth, sc.WindowHeight = width,height else: return sc.WindowWidth - 1, sc.WindowHeight - 1
0.008368
async def handshake( self, origins: Optional[Sequence[Optional[Origin]]] = None, available_extensions: Optional[Sequence[ServerExtensionFactory]] = None, available_subprotocols: Optional[Sequence[Subprotocol]] = None, extra_headers: Optional[HeadersLikeOrCallable] = None, ) -> str: """ Perform the server side of the opening handshake. If provided, ``origins`` is a list of acceptable HTTP Origin values. Include ``None`` if the lack of an origin is acceptable. If provided, ``available_extensions`` is a list of supported extensions in the order in which they should be used. If provided, ``available_subprotocols`` is a list of supported subprotocols in order of decreasing preference. If provided, ``extra_headers`` sets additional HTTP response headers. It can be a :class:`~websockets.http.Headers` instance, a :class:`~collections.abc.Mapping`, an iterable of ``(name, value)`` pairs, or a callable taking the request path and headers in arguments and returning one of the above. Raise :exc:`~websockets.exceptions.InvalidHandshake` if the handshake fails. Return the path of the URI of the request. """ path, request_headers = await self.read_http_request() # Hook for customizing request handling, for example checking # authentication or treating some paths as plain HTTP endpoints. early_response = self.process_request(path, request_headers) if isinstance(early_response, Awaitable): early_response = await early_response # Change the response to a 503 error if the server is shutting down. if not self.ws_server.is_serving(): early_response = ( http.HTTPStatus.SERVICE_UNAVAILABLE, [], b"Server is shutting down.\n", ) if early_response is not None: raise AbortHandshake(*early_response) key = check_request(request_headers) self.origin = self.process_origin(request_headers, origins) extensions_header, self.extensions = self.process_extensions( request_headers, available_extensions ) protocol_header = self.subprotocol = self.process_subprotocol( request_headers, available_subprotocols ) response_headers = Headers() build_response(response_headers, key) if extensions_header is not None: response_headers["Sec-WebSocket-Extensions"] = extensions_header if protocol_header is not None: response_headers["Sec-WebSocket-Protocol"] = protocol_header if extra_headers is not None: if callable(extra_headers): extra_headers = extra_headers(path, self.request_headers) if isinstance(extra_headers, Headers): extra_headers = extra_headers.raw_items() elif isinstance(extra_headers, collections.abc.Mapping): extra_headers = extra_headers.items() for name, value in extra_headers: response_headers[name] = value response_headers.setdefault("Date", email.utils.formatdate(usegmt=True)) response_headers.setdefault("Server", USER_AGENT) self.write_http_response(http.HTTPStatus.SWITCHING_PROTOCOLS, response_headers) self.connection_open() return path
0.00171
def colorify_logo(cls, home=False): """ Print the colored logo based on global results. :param home: Tell us if we have to print the initial coloration. :type home: bool """ if not PyFunceble.CONFIGURATION["quiet"]: # The quiet mode is not activated. to_print = [] if home: # We have to print the initial logo. for line in PyFunceble.ASCII_PYFUNCEBLE.split("\n"): # We loop through each lines of the ASCII representation # of PyFunceble. # And we append to the data to print the currently read # line with the right coloration. to_print.append( PyFunceble.Fore.YELLOW + line + PyFunceble.Fore.RESET ) elif PyFunceble.INTERN["counter"]["percentage"]["up"] >= 50: # The percentage of up is greater or equal to 50%. for line in PyFunceble.ASCII_PYFUNCEBLE.split("\n"): # We loop through each lines of the ASCII representation # of PyFunceble. # And we append to the data to print the currently read # line with the right coloration. to_print.append( PyFunceble.Fore.GREEN + line + PyFunceble.Fore.RESET ) else: # The percentage of up is less than 50%. for line in PyFunceble.ASCII_PYFUNCEBLE.split("\n"): # We loop through each lines of the ASCII representation # of PyFunceble. # And we append to the data to print the currently read # line with the right coloration. to_print.append(PyFunceble.Fore.RED + line + PyFunceble.Fore.RESET) print("\n".join(to_print))
0.001518
def load_output_meta(self): """ Load descriptive output meta data from a JSON file in the input directory. """ options = self.options file_path = os.path.join(options.inputdir, 'output.meta.json') with open(file_path) as infile: return json.load(infile)
0.009585
def _attrs_to_tuple(obj, attrs): """ Create a tuple of all values of *obj*'s *attrs*. """ return tuple(getattr(obj, a.name) for a in attrs)
0.006452
def get_proxy(self, proxystr=''): """ Get the proxy given the option passed on the command line. If an empty string is passed it looks at the HTTP_PROXY environment variable. """ if not proxystr: proxystr = os.environ.get('HTTP_PROXY', '') if proxystr: if '@' in proxystr: user_password, server_port = proxystr.split('@', 1) if ':' in user_password: user, password = user_password.split(':', 1) else: user = user_password prompt = 'Password for %s@%s: ' % (user, server_port) password = urllib.quote(getpass.getpass(prompt)) return '%s:%s@%s' % (user, password, server_port) else: return proxystr else: return None
0.002257
def event_key_pressed(self, event): """ So a "invert shift" for user inputs: Convert all lowercase letters to uppercase and vice versa. """ char = event.char if not char: return if char in string.ascii_letters: char = invert_shift(char) self.user_input_queue.put(char) # Don't insert the char in text widget, because it will be echoed # back from the machine! return "break"
0.00409
def inject(fun: Callable) -> Callable: """ A decorator for injection dependencies into functions/methods, based on their type annotations. .. code-block:: python class SomeClass: @inject def __init__(self, my_dep: DepType) -> None: self.my_dep = my_dep .. important:: On the opposite to :class:`~haps.Inject`, dependency is injected at the moment of method invocation. In case of decorating `__init__`, dependency is injected when `SomeClass` instance is created. :param fun: callable with annotated parameters :return: decorated callable """ sig = inspect.signature(fun) injectables: Dict[str, Any] = {} for name, param in sig.parameters.items(): type_ = param.annotation if name == 'self': continue else: injectables[name] = type_ @wraps(fun) def _inner(*args, **kwargs): container = Container() for n, t in injectables.items(): if n not in kwargs: kwargs[n] = container.get_object(t) return fun(*args, **kwargs) return _inner
0.000858
def last_year(today: datetime=None, tz=None): """ Returns last year begin (inclusive) and end (exclusive). :param today: Some date (defaults current datetime) :param tz: Timezone (defaults pytz UTC) :return: begin (inclusive), end (exclusive) """ if today is None: today = datetime.utcnow() end = datetime(day=1, month=1, year=today.year) end_incl = end - timedelta(seconds=1) begin = datetime(day=1, month=1, year=end_incl.year) return localize_time_range(begin, end, tz)
0.005725
def save(self, target=None, shp=None, shx=None, dbf=None): """Save the shapefile data to three files or three file-like objects. SHP and DBF files can also be written exclusively using saveShp, saveShx, and saveDbf respectively.""" # TODO: Create a unique filename for target if None. if shp: self.saveShp(shp) if shx: self.saveShx(shx) if dbf: self.saveDbf(dbf) elif target: self.saveShp(target) self.shp.close() self.saveShx(target) self.shx.close() self.saveDbf(target) self.dbf.close()
0.004425
def install_sql_hook(): """If installed this causes Django's queries to be captured.""" try: from django.db.backends.utils import CursorWrapper except ImportError: from django.db.backends.util import CursorWrapper try: real_execute = CursorWrapper.execute real_executemany = CursorWrapper.executemany except AttributeError: # XXX(mitsuhiko): On some very old django versions (<1.6) this # trickery would have to look different but I can't be bothered. return def record_sql(vendor, alias, start, duration, sql, params): def processor(data): real_sql, real_params = format_sql(sql, params) if real_params: real_sql = real_sql % tuple(real_params) # maybe category to 'django.%s.%s' % (vendor, alias or # 'default') ? data.update({ 'message': real_sql, 'category': 'query', }) breadcrumbs.record(processor=processor) def record_many_sql(vendor, alias, start, sql, param_list): duration = time.time() - start for params in param_list: record_sql(vendor, alias, start, duration, sql, params) def execute(self, sql, params=None): start = time.time() try: return real_execute(self, sql, params) finally: record_sql(self.db.vendor, getattr(self.db, 'alias', None), start, time.time() - start, sql, params) def executemany(self, sql, param_list): start = time.time() try: return real_executemany(self, sql, param_list) finally: record_many_sql(self.db.vendor, getattr(self.db, 'alias', None), start, sql, param_list) CursorWrapper.execute = execute CursorWrapper.executemany = executemany breadcrumbs.ignore_logger('django.db.backends')
0.000513
def expire_data(self): """Expire data within the samples collection.""" # Do we need to start deleting stuff? while self.sample_storage_size() > self.samples_cap: # This should return the 'oldest' record in samples record = self.database[self.sample_collection].find().sort('import_time',pymongo.ASCENDING).limit(1)[0] self.remove_sample(record['md5'])
0.009662
def checkmagic(self): """Verify that self is a valid CArchive. Magic signature is at end of the archive.""" #magic is at EOF; if we're embedded, we need to figure where that is if self.len: self.lib.seek(self.start+self.len, 0) else: self.lib.seek(0, 2) filelen = self.lib.tell() if self.len: self.lib.seek(self.start+self.len-self.TRLLEN, 0) else: self.lib.seek(-self.TRLLEN, 2) (magic, totallen, tocpos, toclen, pyvers) = struct.unpack(self.TRLSTRUCT, self.lib.read(self.TRLLEN)) if magic != self.MAGIC: raise RuntimeError("%s is not a valid %s archive file" % (self.path, self.__class__.__name__)) self.pkgstart = filelen - totallen if self.len: if totallen != self.len or self.pkgstart != self.start: raise RuntimeError, "Problem with embedded archive in %s" % self.path self.tocpos, self.toclen = tocpos, toclen
0.00653
def makeW(r1, r2, r3, r4=0): """ matrix involved in quaternion rotation """ W = np.asarray([ [r4, r3, -r2, r1], [-r3, r4, r1, r2], [r2, -r1, r4, r3], [-r1, -r2, -r3, r4]]) return W
0.00431
def _compute_dk_dtau(self, tau, n): r"""Evaluate :math:`dk/d\tau` at the specified locations with the specified derivatives. Parameters ---------- tau : :py:class:`Matrix`, (`M`, `D`) `M` inputs with dimension `D`. n : :py:class:`Array`, (`D`,) Degree of derivative with respect to each dimension. Returns ------- dk_dtau : :py:class:`Array`, (`M`,) Specified derivative at specified locations. """ # Construct the derivative pattern: # For each dimension, this will contain the index of the dimension # repeated a number of times equal to the order of derivative with # respect to that dimension. # Example: For d^3 k(x, y, z) / dx^2 dy, n would be [2, 1, 0] and # deriv_pattern should be [0, 0, 1]. For k(x, y, z) deriv_pattern is []. deriv_pattern = [] for idx in xrange(0, len(n)): deriv_pattern.extend(n[idx] * [idx]) deriv_pattern = scipy.asarray(deriv_pattern, dtype=int) # Handle non-derivative case separately for efficiency: if len(deriv_pattern) == 0: return self._compute_k(tau) else: # Compute all partitions of the deriv_pattern: deriv_partitions = generate_set_partitions(deriv_pattern) # Compute the requested derivative using the multivariate Faa di Bruno's equation: dk_dtau = scipy.zeros(tau.shape[0]) # Loop over the partitions: for partition in deriv_partitions: dk_dtau += self._compute_dk_dtau_on_partition(tau, partition) return dk_dtau
0.004084
def decons_obs_group_ids(comp_ids, obs_ids, shape, labels, xnull): """ reconstruct labels from observed group ids Parameters ---------- xnull: boolean, if nulls are excluded; i.e. -1 labels are passed through """ if not xnull: lift = np.fromiter(((a == -1).any() for a in labels), dtype='i8') shape = np.asarray(shape, dtype='i8') + lift if not is_int64_overflow_possible(shape): # obs ids are deconstructable! take the fast route! out = decons_group_index(obs_ids, shape) return out if xnull or not lift.any() \ else [x - y for x, y in zip(out, lift)] i = unique_label_indices(comp_ids) i8copy = lambda a: a.astype('i8', subok=False, copy=True) return [i8copy(lab[i]) for lab in labels]
0.002516
def name(self): """ Returns name of the digest """ if not hasattr(self, 'digest_name'): self.digest_name = Oid(libcrypto.EVP_MD_type(self.digest) ).longname() return self.digest_name
0.011905
def eventReminder(self, thread_id, time, title, location="", location_id=""): """ Deprecated. Use :func:`fbchat.Client.createPlan` instead """ plan = Plan(time=time, title=title, location=location, location_id=location_id) self.createPlan(plan=plan, thread_id=thread_id)
0.009677
def set_keepalive(self, interval): """ Set a keepalive to occur every ``interval`` on this connection. """ pinger = functools.partial(self.ping, 'keep-alive') self.reactor.scheduler.execute_every(period=interval, func=pinger)
0.007547
def touch(ctx, key, policy, admin_pin, force): """ Manage touch policy for OpenPGP keys. \b KEY Key slot to set (sig, enc or aut). POLICY Touch policy to set (on, off or fixed). """ controller = ctx.obj['controller'] old_policy = controller.get_touch(key) if old_policy == TOUCH_MODE.FIXED: ctx.fail('A FIXED policy cannot be changed!') force or click.confirm('Set touch policy of {.name} key to {.name}?'.format( key, policy), abort=True, err=True) if admin_pin is None: admin_pin = click.prompt('Enter admin PIN', hide_input=True, err=True) controller.set_touch(key, policy, admin_pin.encode('utf8'))
0.002933
def save(self, other: merkle_tree.MerkleTree): """Save this tree into a dumb data object for serialisation. The object must have attributes tree_size:int and hashes:list. """ other.__tree_size = self.__tree_size other.__hashes = self.__hashes
0.007067
def graph(args): """ %prog graph best.edges Convert Celera Assembler's "best.edges" to a GEXF which can be used to feed into Gephi to check the topology of the best overlapping graph. Mutual best edges are represented as thicker edges. Reference: https://github.com/PacificBiosciences/Bioinformatics-Training/blob/master/scripts/CeleraToGephi.py """ p = OptionParser(graph.__doc__) p.add_option("--query", default=-1, type="int", help="Search from node, -1 to select random node, 0 to disable") p.add_option("--contig", help="Search from contigs, use comma to separate") p.add_option("--largest", default=0, type="int", help="Only show largest components") p.add_option("--maxsize", default=500, type="int", help="Max graph size") p.add_option("--nomutualbest", default=False, action="store_true", help="Do not plot mutual best edges as heavy") add_graph_options(p) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) bestedges, = args query = opts.query contig = opts.contig largest = opts.largest frgctg = opts.frgctg edgeweight = not opts.nomutualbest G = read_graph(bestedges, maxerr=opts.maxerr) if largest: H = list(nx.connected_component_subgraphs(G)) c = min(len(H), largest) logging.debug("{0} components found, {1} retained".format(len(H), c)) G = nx.Graph() for x in H[:c]: G.add_edges_from(x.edges()) if query: if query == -1: query = choice(G.nodes()) reads_to_ctgs = parse_ctgs(bestedges, frgctg) if contig: contigs = set(contig.split(",")) core = [k for k, v in reads_to_ctgs.items() if v in contigs] else: ctg = reads_to_ctgs.get(query) core = [k for k, v in reads_to_ctgs.items() if v == ctg] logging.debug("Reads ({0}) extended from the same contig {1}".\ format(len(core), ctg)) # Extract a local neighborhood SG = nx.Graph() H = graph_local_neighborhood(G, query=core, maxsize=opts.maxsize) SG.add_edges_from(H.edges(data=edgeweight)) G = SG seen = [] for n, attrib in G.nodes_iter(data=True): contig = reads_to_ctgs.get(n, "na") attrib['label'] = contig seen.append(contig) c = Counter(seen) cc = ["{0}({1})".format(k, v) for k, v in c.most_common()] print("Contigs: {0}".format(" ".join(cc)), file=sys.stderr) gexf = "best" if query >= 0: gexf += ".{0}".format(query) gexf += ".gexf" nx.write_gexf(G, gexf) logging.debug("Graph written to `{0}` (|V|={1}, |E|={2})".\ format(gexf, len(G), G.size()))
0.002458
def _read_elem_elements(self, fid): """Read all FE elements from the file stream. Elements are stored in the self.element_data dict. The keys refer to the element types: * 3: Triangular grid (three nodes) * 8: Quadrangular grid (four nodes) * 11: Mixed boundary element * 12: Neumann (no-flow) boundary element """ elements = {} # read elements for element_type in range(0, self.header['nr_element_types']): element_list = [] for element_coordinates in range( 0, self.header['element_infos'][element_type, 1]): element_coordinates_line = fid.readline().lstrip() tmp_element = self.element() tmp_element.nodes = np.fromstring(element_coordinates_line, dtype=int, sep=' ') tmp_element.xcoords = self.nodes['presort'][tmp_element.nodes - 1, 1] tmp_element.zcoords = self.nodes['presort'][tmp_element.nodes - 1, 2] element_list.append(tmp_element) element_type_number = self.header['element_infos'][element_type, 0] elements[element_type_number] = element_list self.element_data = elements
0.001412
def decode(s, checksum=True): """Convert base58 to binary using BASE58_ALPHABET.""" v, prefix = to_long( BASE58_BASE, lambda c: BASE58_LOOKUP[c], s.encode("utf8")) data = from_long(v, prefix, 256, lambda x: x) if checksum: data, the_hash = data[:-4], data[-4:] if utils.hash256(data)[:4] == the_hash: return data raise ValueError("hashed base58 has bad checksum %s" % s) return data
0.002227
def center_cell_text(cell): """ Horizontally center the text within a cell's grid Like this:: +---------+ +---------+ | foo | --> | foo | +---------+ +---------+ Parameters ---------- cell : dashtable.data2rst.Cell Returns ------- cell : dashtable.data2rst.Cell """ lines = cell.text.split('\n') cell_width = len(lines[0]) - 2 truncated_lines = [''] for i in range(1, len(lines) - 1): truncated = lines[i][2:len(lines[i]) - 2].rstrip() truncated_lines.append(truncated) truncated_lines.append('') max_line_length = get_longest_line_length('\n'.join(truncated_lines)) remainder = cell_width - max_line_length left_width = math.floor(remainder / 2) left_space = left_width * ' ' for i in range(len(truncated_lines)): truncated_lines[i] = left_space + truncated_lines[i] right_width = cell_width - len(truncated_lines[i]) truncated_lines[i] += right_width * ' ' for i in range(1, len(lines) - 1): lines[i] = ''.join([ lines[i][0], truncated_lines[i], lines[i][-1] ]) cell.text = '\n'.join(lines) return cell
0.000822
def error(self, message, code=1): """ Prints the error, and exits with the given code. """ sys.stderr.write(message) sys.exit(code)
0.012903
def json_decode(value: Union[str, bytes]) -> Any: """Returns Python objects for the given JSON string. Supports both `str` and `bytes` inputs. """ return json.loads(to_basestring(value))
0.004926
def unquote(cls, string): """ Removes quotes from a quoted string. Splunk search command quote rules are applied. The enclosing double-quotes, if present, are removed. Escaped double-quotes ('\"' or '""') are replaced by a single double-quote ('"'). **NOTE** We are not using a json.JSONDecoder because Splunk quote rules are different than JSON quote rules. A json.JSONDecoder does not recognize a pair of double-quotes ('""') as an escaped quote ('"') and will decode single-quoted strings ("'") in addition to double-quoted ('"') strings. """ if len(string) == 0: return '' if string[0] != '"': return string if len(string) == 1: return string if string[-1] != '"': raise ValueError("Poorly formed string literal: %s" % string) def replace(match): value = match.group(0) if value == '\\\\': return '\\' if value == '\\"': return '"' if value == '""': return '"' if len(value) != 2: raise ValueError("Poorly formed string literal: %s" % string) return value # consistent with python handling result = re.sub(cls._escaped_quote_re, replace, string[1:-1]) return result
0.002144
def _fit(self, dataset): """Trains a TensorFlow model and returns a TFModel instance with the same args/params pointing to a checkpoint or saved_model on disk. Args: :dataset: A Spark DataFrame with columns that will be mapped to TensorFlow tensors. Returns: A TFModel representing the trained model, backed on disk by a TensorFlow checkpoint or saved_model. """ sc = SparkContext.getOrCreate() logging.info("===== 1. train args: {0}".format(self.args)) logging.info("===== 2. train params: {0}".format(self._paramMap)) local_args = self.merge_args_params() logging.info("===== 3. train args + params: {0}".format(local_args)) if local_args.input_mode == TFCluster.InputMode.TENSORFLOW: if dfutil.isLoadedDF(dataset): # if just a DataFrame loaded from tfrecords, just point to original source path logging.info("Loaded DataFrame of TFRecord.") local_args.tfrecord_dir = dfutil.loadedDF[dataset] else: # otherwise, save as tfrecords and point to save path assert local_args.tfrecord_dir, "Please specify --tfrecord_dir to export DataFrame to TFRecord." if self.getInputMapping(): # if input mapping provided, filter only required columns before exporting dataset = dataset.select(list(self.getInputMapping())) logging.info("Exporting DataFrame {} as TFRecord to: {}".format(dataset.dtypes, local_args.tfrecord_dir)) dfutil.saveAsTFRecords(dataset, local_args.tfrecord_dir) logging.info("Done saving") tf_args = self.args.argv if self.args.argv else local_args cluster = TFCluster.run(sc, self.train_fn, tf_args, local_args.cluster_size, local_args.num_ps, local_args.tensorboard, local_args.input_mode, driver_ps_nodes=local_args.driver_ps_nodes) if local_args.input_mode == TFCluster.InputMode.SPARK: # feed data, using a deterministic order for input columns (lexicographic by key) input_cols = sorted(self.getInputMapping()) cluster.train(dataset.select(input_cols).rdd, local_args.epochs) cluster.shutdown(grace_secs=30) # Run export function, if provided if self.export_fn: assert local_args.export_dir, "Export function requires --export_dir to be set" logging.info("Exporting saved_model (via export_fn) to: {}".format(local_args.export_dir)) def _export(iterator, fn, args): single_node_env(args) fn(args) # Run on a single exeucutor sc.parallelize([1], 1).foreachPartition(lambda it: _export(it, self.export_fn, tf_args)) return self._copyValues(TFModel(self.args))
0.009782
def code_timer(reset=False): '''Sets a global variable for tracking the timer accross multiple files ''' global CODE_TIMER if reset: CODE_TIMER = CodeTimer() else: if CODE_TIMER is None: return CodeTimer() else: return CODE_TIMER
0.003356
def dist_sift4(src, tar, max_offset=5, max_distance=0): """Return the normalized "common" Sift4 distance between two terms. This is a wrapper for :py:meth:`Sift4.dist`. Parameters ---------- src : str Source string for comparison tar : str Target string for comparison max_offset : int The number of characters to search for matching letters max_distance : int The distance at which to stop and exit Returns ------- float The normalized Sift4 distance Examples -------- >>> round(dist_sift4('cat', 'hat'), 12) 0.333333333333 >>> dist_sift4('Niall', 'Neil') 0.4 >>> dist_sift4('Colin', 'Cuilen') 0.5 >>> dist_sift4('ATCG', 'TAGC') 0.5 """ return Sift4().dist(src, tar, max_offset, max_distance)
0.001205
def _check_algorithm_values(item): """Check for misplaced inputs in the algorithms. - Identify incorrect boolean values where a choice is required. """ problems = [] for k, v in item.get("algorithm", {}).items(): if v is True and k not in ALG_ALLOW_BOOLEANS: problems.append("%s set as true" % k) elif v is False and (k not in ALG_ALLOW_BOOLEANS and k not in ALG_ALLOW_FALSE): problems.append("%s set as false" % k) if len(problems) > 0: raise ValueError("Incorrect settings in 'algorithm' section for %s:\n%s" "\nSee configuration documentation for supported options:\n%s\n" % (item["description"], "\n".join(problems), ALG_DOC_URL))
0.006605
def _as_rdf_xml(self, ns): """ Return identity details for the element as XML nodes """ self.rdf_identity = self._get_identity(ns) elements = [] elements.append(ET.Element(NS('sbol', 'persistentIdentity'), attrib={NS('rdf', 'resource'): self._get_persistent_identitity(ns)})) if self.name is not None: name = ET.Element(NS('dcterms', 'title')) name.text = self.name elements.append(name) if self.display_id is not None: display_id = ET.Element(NS('sbol', 'displayId')) display_id.text = self.display_id elements.append(display_id) if self.version is not None: version = ET.Element(NS('sbol', 'version')) version.text = self.version elements.append(version) if self.was_derived_from is not None: elements.append(ET.Element(NS('prov', 'wasDerivedFrom'), attrib={NS('rdf', 'resource'): self.was_derived_from})) if self.description is not None: description = ET.Element(NS('dcterms', 'description')) description.text = self.description elements.append(description) for a in self.annotations: elements.append(a._as_rdf_xml(ns)) return elements
0.002803
def get_deployments(self, prefix=""): """ This endpoint lists all deployments. https://www.nomadproject.io/docs/http/deployments.html optional_arguments: - prefix, (default "") Specifies a string to filter deployments on based on an index prefix. This is specified as a querystring parameter. returns: list of dicts raises: - nomad.api.exceptions.BaseNomadException - nomad.api.exceptions.URLNotFoundNomadException """ params = {"prefix": prefix} return self.request(params=params, method="get").json()
0.004615
def estimate_row_means( self, X, observed, column_means, column_scales): """ row_center[i] = sum{j in observed[i, :]}{ (1 / column_scale[j]) * (X[i, j] - column_center[j]) } ------------------------------------------------------------ sum{j in observed[i, :]}{1 / column_scale[j]} """ n_rows, n_cols = X.shape column_means = np.asarray(column_means) if len(column_means) != n_cols: raise ValueError("Expected length %d but got shape %s" % ( n_cols, column_means.shape)) X = X - column_means.reshape((1, n_cols)) column_weights = 1.0 / column_scales X *= column_weights.reshape((1, n_cols)) row_means = np.zeros(n_rows, dtype=X.dtype) row_residual_sums = np.nansum(X, axis=1) for i in range(n_rows): row_mask = observed[i, :] sum_weights = column_weights[row_mask].sum() row_means[i] = row_residual_sums[i] / sum_weights return row_means
0.001802
def _remove_vlan_from_all_service_profiles(self, handle, vlan_id, ucsm_ip): """Deletes VLAN Profile config from server's ethernet ports.""" service_profile_list = [] for key, value in six.iteritems(self.ucsm_sp_dict): if (ucsm_ip in key) and value: service_profile_list.append(value) if not service_profile_list: # Nothing to do return try: handle.StartTransaction() for service_profile in service_profile_list: virtio_port_list = ( CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports) eth_port_paths = ["%s%s" % (service_profile, ep) for ep in virtio_port_list] # 1. From the Service Profile config, access the # configuration for its ports. # 2. Check if that Vlan has been configured on each port # 3. If Vlan config found, remove it. obj = handle.GetManagedObject( None, self.ucsmsdk.LsServer.ClassId(), {self.ucsmsdk.LsServer.DN: service_profile}) if obj: # Check if this vlan_id has been configured on the # ports in this Service profile for eth_port_path in eth_port_paths: eth = handle.GetManagedObject( obj, self.ucsmsdk.VnicEther.ClassId(), {self.ucsmsdk.VnicEther.DN: eth_port_path}, True) if eth: vlan_name = self.make_vlan_name(vlan_id) vlan_path = eth_port_path + "/if-" + vlan_name vlan = handle.GetManagedObject(eth, self.ucsmsdk.VnicEtherIf.ClassId(), {self.ucsmsdk.VnicEtherIf.DN: vlan_path}) if vlan: # Found vlan config. Now remove it. handle.RemoveManagedObject(vlan) handle.CompleteTransaction() except Exception as e: # Raise a Neutron exception. Include a description of # the original exception. raise cexc.UcsmConfigDeleteFailed(config=vlan_id, ucsm_ip=ucsm_ip, exc=e)
0.001968
def dirinfo(path, opts=None): ''' Return information on a directory located on the Moose CLI Example: .. code-block:: bash salt '*' moosefs.dirinfo /path/to/dir/ [-[n][h|H]] ''' cmd = 'mfsdirinfo' ret = {} if opts: cmd += ' -' + opts cmd += ' ' + path out = __salt__['cmd.run_all'](cmd, python_shell=False) output = out['stdout'].splitlines() for line in output: if not line: continue comps = line.split(':') ret[comps[0].strip()] = comps[1].strip() return ret
0.001761
def regression(fname="regression.png"): """ Create figures for regression models """ _, axes = plt.subplots(ncols=2, figsize=(18, 6)) alphas = np.logspace(-10, 1, 300) data = load_concrete(split=True) # Plot prediction error in the middle oz = PredictionError(LassoCV(alphas=alphas), ax=axes[0]) oz.fit(data.X.train, data.y.train) oz.score(data.X.test, data.y.test) oz.finalize() # Plot residuals on the right oz = ResidualsPlot(RidgeCV(alphas=alphas), ax=axes[1]) oz.fit(data.X.train, data.y.train) oz.score(data.X.test, data.y.test) oz.finalize() # Save figure path = os.path.join(FIGURES, fname) plt.tight_layout() plt.savefig(path)
0.001393
def add_private_note(self, private_notes, source=None): """Add private notes. :param private_notes: hidden notes for the current document :type private_notes: string :param source: source for the given private notes :type source: string """ self._append_to('_private_notes', self._sourced_dict( source, value=private_notes, ))
0.004808
def set_current_context(self, name): """Set the current context in kubeconfig.""" if self.context_exists(name): self.data['current-context'] = name else: raise KubeConfError("Context does not exist.")
0.008065
def from_environment_or_defaults(cls, environment=None): """Create a Run object taking values from the local environment where possible. The run ID comes from WANDB_RUN_ID or is randomly generated. The run mode ("dryrun", or "run") comes from WANDB_MODE or defaults to "dryrun". The run directory comes from WANDB_RUN_DIR or is generated from the run ID. The Run will have a .config attribute but its run directory won't be set by default. """ if environment is None: environment = os.environ run_id = environment.get(env.RUN_ID) resume = environment.get(env.RESUME) storage_id = environment.get(env.RUN_STORAGE_ID) mode = environment.get(env.MODE) disabled = InternalApi().disabled() if not mode and disabled: mode = "dryrun" elif disabled and mode != "dryrun": wandb.termlog( "WARNING: WANDB_MODE is set to run, but W&B was disabled. Run `wandb on` to remove this message") elif disabled: wandb.termlog( 'W&B is disabled in this directory. Run `wandb on` to enable cloud syncing.') group = environment.get(env.RUN_GROUP) job_type = environment.get(env.JOB_TYPE) run_dir = environment.get(env.RUN_DIR) sweep_id = environment.get(env.SWEEP_ID) program = environment.get(env.PROGRAM) description = environment.get(env.DESCRIPTION) args = env.get_args() wandb_dir = env.get_dir() tags = env.get_tags() config = Config.from_environment_or_defaults() run = cls(run_id, mode, run_dir, group, job_type, config, sweep_id, storage_id, program=program, description=description, args=args, wandb_dir=wandb_dir, tags=tags, resume=resume) return run
0.004692
def homepage(request): ''' Context: all_metadata Templates: - billy/web/public/homepage.html ''' all_metadata = db.metadata.find() return render(request, templatename('homepage'), dict(all_metadata=all_metadata))
0.003676
def get_model_spec_ting(atomic_number): """ X_u_template[0:2] are teff, logg, vturb in km/s X_u_template[:,3] -> onward, put atomic number atomic_number is 6 for C, 7 for N """ DATA_DIR = "/Users/annaho/Data/LAMOST/Mass_And_Age" temp = np.load("%s/X_u_template_KGh_res=1800.npz" %DATA_DIR) X_u_template = temp["X_u_template"] wl = temp["wavelength"] grad_spec = X_u_template[:,atomic_number] return wl, grad_spec
0.010917
def hourly(place): """return data as list of dicts with all data filled in.""" # time in utc? lat, lon = place url = "https://api.forecast.io/forecast/%s/%s,%s?solar" % (APIKEY, lat, lon) w_data = json.loads(urllib2.urlopen(url).read()) hourly_data = w_data['hourly']['data'] mangled = [] for i in hourly_data: mangled.append(mangle(i)) return mangled
0.002183
def get_teams(self): """ Return json current roster of team """ return self.make_request(host="erikberg.com", sport='nba', method="teams", id=None, format="json", parameters={})
0.006849
def _make_plan(plan_dict): """ Construct a Plan or ProfiledPlan from a dictionary of metadata values. :param plan_dict: :return: """ operator_type = plan_dict["operatorType"] identifiers = plan_dict.get("identifiers", []) arguments = plan_dict.get("args", []) children = [_make_plan(child) for child in plan_dict.get("children", [])] if "dbHits" in plan_dict or "rows" in plan_dict: db_hits = plan_dict.get("dbHits", 0) rows = plan_dict.get("rows", 0) return ProfiledPlan(operator_type, identifiers, arguments, children, db_hits, rows) else: return Plan(operator_type, identifiers, arguments, children)
0.002963
def load(self, updates): """Load configuration data""" # Go through in order and override the config (`.mbed_cloud_config.json` loader) for path in self.paths(): if not path: continue abs_path = os.path.abspath(os.path.expanduser(path)) if not os.path.isfile(abs_path): self._using_paths.append('missing: %s' % abs_path) continue self._using_paths.append(' exists: %s' % abs_path) with open(abs_path) as fh: self.update(json.load(fh)) # New dotenv loader - requires explicit instructions to use current working directory load_dotenv(find_dotenv(usecwd=True)) # Pluck config values out of the environment for env_var, key in {ENVVAR_API_HOST: 'host', ENVVAR_API_KEY: 'api_key'}.items(): env_value = os.getenv(env_var) if env_value is not None: self[key] = env_value if updates: self.update(updates) self.validate()
0.004704
def count_nonzero(data, mapper=None, blen=None, storage=None, create='array', **kwargs): """Count the number of non-zero elements.""" return reduce_axis(data, reducer=np.count_nonzero, block_reducer=np.add, mapper=mapper, blen=blen, storage=storage, create=create, **kwargs)
0.00289
def parse_spec(self, spec): """Parse the given spec into a `specs.Spec` object. :param spec: a single spec string. :return: a single specs.Specs object. :raises: CmdLineSpecParser.BadSpecError if the address selector could not be parsed. """ if spec.endswith('::'): spec_path = spec[:-len('::')] return DescendantAddresses(self._normalize_spec_path(spec_path)) elif spec.endswith(':'): spec_path = spec[:-len(':')] return SiblingAddresses(self._normalize_spec_path(spec_path)) else: spec_parts = spec.rsplit(':', 1) spec_path = self._normalize_spec_path(spec_parts[0]) name = spec_parts[1] if len(spec_parts) > 1 else os.path.basename(spec_path) return SingleAddress(spec_path, name)
0.015666
def create(self, email, verify=None, components=None): """Create a new subscriber :param str email: Email address to subscribe :param bool verify: Whether to send verification email :param list components: Components ID list, defaults to all :return: Created subscriber data (:class:`dict`) .. seealso:: https://docs.cachethq.io/reference#subscribers """ data = ApiParams() data['email'] = email data['verify'] = verify data['components'] = components return self._post('subscribers', data=data)['data']
0.003328
def find_prop_overlap(rdf, prop1, prop2): """Generate (subject,object) pairs connected by two properties.""" for s, o in sorted(rdf.subject_objects(prop1)): if (s, prop2, o) in rdf: yield (s, o)
0.004505