text
stringlengths
78
104k
score
float64
0
0.18
def run(self, inputs): """Run many steps of the simulation. The argument is a list of input mappings for each step, and its length is the number of steps to be executed. """ steps = len(inputs) # create i/o arrays of the appropriate length ibuf_type = ctypes.c_uint64*(steps*self._ibufsz) obuf_type = ctypes.c_uint64*(steps*self._obufsz) ibuf = ibuf_type() obuf = obuf_type() # these array will be passed to _crun self._crun.argtypes = [ctypes.c_uint64, ibuf_type, obuf_type] # build the input array for n, inmap in enumerate(inputs): for w in inmap: if isinstance(w, WireVector): name = w.name else: name = w start, count = self._inputpos[name] start += n*self._ibufsz val = inmap[w] if val >= 1 << self._inputbw[name]: raise PyrtlError( 'Wire {} has value {} which cannot be represented ' 'using its bitwidth'.format(name, val)) # pack input for pos in range(start, start+count): ibuf[pos] = val & ((1 << 64)-1) val >>= 64 # run the simulation self._crun(steps, ibuf, obuf) # save traced wires for name in self.tracer.trace: rname = self._probe_mapping.get(name, name) if rname in self._outputpos: start, count = self._outputpos[rname] buf, sz = obuf, self._obufsz elif rname in self._inputpos: start, count = self._inputpos[rname] buf, sz = ibuf, self._ibufsz else: raise PyrtlInternalError('Untraceable wire in tracer') res = [] for n in range(steps): val = 0 # unpack output for pos in reversed(range(start, start+count)): val <<= 64 val |= buf[pos] res.append(val) start += sz self.tracer.trace[name].extend(res)
0.000894
def stop(id): """ Stop a running job. """ try: experiment = ExperimentClient().get(normalize_job_name(id)) except FloydException: experiment = ExperimentClient().get(id) if experiment.state not in ["queued", "queue_scheduled", "running"]: floyd_logger.info("Job in {} state cannot be stopped".format(experiment.state)) sys.exit(1) if not ExperimentClient().stop(experiment.id): floyd_logger.error("Failed to stop job") sys.exit(1) floyd_logger.info("Experiment shutdown request submitted. Check status to confirm shutdown")
0.00495
def _parse_the_ned_position_results( self, ra, dec, nedResults): """ *parse the ned results* **Key Arguments:** - ``ra`` -- the search ra - ``dec`` -- the search dec **Return:** - ``results`` -- list of result dictionaries """ self.log.info('starting the ``_parse_the_ned_results`` method') results = [] resultLen = 0 if nedResults: # OPEN THE RESULT FILE FROM NED pathToReadFile = nedResults try: self.log.debug("attempting to open the file %s" % (pathToReadFile,)) readFile = codecs.open( pathToReadFile, encoding='utf-8', mode='rb') thisData = readFile.read() readFile.close() except IOError, e: message = 'could not open the file %s' % (pathToReadFile,) self.log.critical(message) raise IOError(message) readFile.close() # CHECK FOR ERRORS if "Results from query to NASA/IPAC Extragalactic Database" not in thisData: print "something went wrong with the NED query" self.log.error( "something went wrong with the NED query" % locals()) sys.exit(0) # SEARCH FROM MATCHES IN RESULTS FILE matchObject = re.search( r"No\.\|Object Name.*?\n(.*)", thisData, re.S) if matchObject: theseLines = string.split(matchObject.group(), '\n') resultLen = len(theseLines) csvReader = csv.DictReader( theseLines, dialect='excel', delimiter='|', quotechar='"') for row in csvReader: thisEntry = {"searchRa": ra, "searchDec": dec, "matchName": row["Object Name"].strip()} results.append(thisEntry) if self.nearestOnly: break self.log.info('completed the ``_parse_the_ned_results`` method') return results, resultLen
0.001336
def uninstalled(name, version=None, uninstall_args=None, override_args=False): ''' Uninstalls a package name The name of the package to be uninstalled version Uninstalls a specific version of the package. Defaults to latest version installed. uninstall_args A list of uninstall arguments you want to pass to the uninstallation process i.e product key or feature list override_args Set to true if you want to override the original uninstall arguments ( for the native uninstaller)in the package and use your own. When this is set to False uninstall_args will be appended to the end of the default arguments .. code-block: yaml Removemypackage: chocolatey.uninstalled: - name: mypackage - version: '21.5' ''' ret = {'name': name, 'result': True, 'changes': {}, 'comment': ''} # Get list of currently installed packages pre_uninstall = __salt__['chocolatey.list'](local_only=True) # Determine if package is installed if name.lower() in [package.lower() for package in pre_uninstall.keys()]: try: ret['changes'] = { name: '{0} version {1} will be removed'.format( name, pre_uninstall[name][0] ) } except KeyError: ret['changes'] = {name: '{0} will be removed'.format(name)} else: ret['comment'] = 'The package {0} is not installed'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'The uninstall was tested' return ret # Uninstall the package result = __salt__['chocolatey.uninstall'](name, version, uninstall_args, override_args) if 'Running chocolatey failed' not in result: ret['result'] = True else: ret['result'] = False if not ret['result']: ret['comment'] = 'Failed to uninstall the package {0}'.format(name) # Get list of installed packages after 'chocolatey.uninstall' post_uninstall = __salt__['chocolatey.list'](local_only=True) ret['changes'] = salt.utils.data.compare_dicts(pre_uninstall, post_uninstall) return ret
0.000834
def minvar(X, order, sampling=1., NFFT=default_NFFT): r"""Minimum Variance Spectral Estimation (MV) This function computes the minimum variance spectral estimate using the Musicus procedure. The Burg algorithm from :func:`~spectrum.burg.arburg` is used for the estimation of the autoregressive parameters. The MV spectral estimator is given by: .. math:: P_{MV}(f) = \frac{T}{e^H(f) R^{-1}_p e(f)} where :math:`R^{-1}_p` is the inverse of the estimated autocorrelation matrix (Toeplitz) and :math:`e(f)` is the complex sinusoid vector. :param X: Array of complex or real data samples (length N) :param int order: Dimension of correlation matrix (AR order = order - 1 ) :param float T: Sample interval (PSD scaling) :param int NFFT: length of the final PSD :return: * PSD - Power spectral density values (two-sided) * AR - AR coefficients (Burg algorithm) * k - Reflection coefficients (Burg algorithm) .. note:: The MV spectral estimator is not a true PSD function because the area under the MV estimate does not represent the total power in the measured process. MV minimises the variance of the output of a narrowband filter and adpats itself to the spectral content of the input data at each frequency. :Example: The following example computes a PSD estimate using :func:`minvar` The output PSD is transformed to a ``centerdc`` PSD and plotted. .. plot:: :width: 80% :include-source: from spectrum import * from pylab import plot, log10, linspace, xlim psd, A, k = minvar(marple_data, 15) psd = twosided_2_centerdc(psd) # switch positive and negative freq f = linspace(-0.5, 0.5, len(psd)) plot(f, 10 * log10(psd/max(psd))) xlim(-0.5, 0.5 ) .. seealso:: * External functions used are :meth:`~spectrum.burg.arburg` and numpy.fft.fft * :class:`pminvar`, a Class dedicated to MV method. :Reference: [Marple]_ """ errors.is_positive_integer(order) errors.is_positive_integer(NFFT) psi = np.zeros(NFFT, dtype=complex) # First, we need to compute the AR values (note that order-1) A, P, k = arburg (X, order - 1) # add the order 0 A = np.insert(A, 0, 1.+0j) # We cannot compare the output with those of MARPLE in a precise way. # Indeed the burg algorithm is only single precision in fortram code # So, the AR values are slightly differnt. # The followign values are those from Marple """A[1] = 2.62284255-0.701703191j A[2] = 4.97930574-2.32781982j A[3] = 6.78445101-5.02477741j A[4] =7.85207081-8.01284409j A[5] =7.39412165-10.7684202j A[6] =6.03175116-12.7067814j A[7] =3.80106878-13.6808891j A[8] =1.48207295-13.2265558j A[9] =-0.644280195-11.4574194j A[10] =-2.02386642-8.53268814j A[11] =-2.32437634-5.25636244j A[12] =-1.75356281-2.46820402j A[13] =-0.888899028-0.781434655j A[14] =-0.287197977-0.0918145925j P = 0.00636525545 """ # if we use exactly the same AR coeff and P from Marple Burg output, then # we can compare the following code. This has been done and reveals that # the FFT in marple is also slightly different (precision) from this one. # However, the results are sufficiently close (when NFFT is small) that # we are confident the following code is correct. # Compute the psi coefficients for K in range(0, order): SUM = 0. MK = order-K # Correlate the autoregressive parameters for I in range(0, order - K): SUM = SUM + float(MK-2*I) * A[I].conjugate()*A[I+K] # Eq. (12.25) SUM = SUM/P if K != 0: psi[NFFT-K] = SUM.conjugate() psi[K] = SUM # Compute FFT of denominator psi = fft(psi, NFFT) # Invert the psi terms at this point to get PSD values PSD = sampling / np.real(psi) return PSD, A, k
0.001492
def _get_bucket_name(**values): """ Generates the bucket name for url_for. """ app = current_app # manage other special values, all have no meaning for static urls values.pop('_external', False) # external has no meaning here values.pop('_anchor', None) # anchor as well values.pop('_method', None) # method too url_style = get_setting('FLASKS3_URL_STYLE', app) if url_style == 'host': url_format = '{bucket_name}.{bucket_domain}' elif url_style == 'path': url_format = '{bucket_domain}/{bucket_name}' else: raise ValueError('Invalid S3 URL style: "{}"'.format(url_style)) if get_setting('FLASKS3_CDN_DOMAIN', app): bucket_path = '{}'.format(get_setting('FLASKS3_CDN_DOMAIN', app)) else: bucket_path = url_format.format( bucket_name=get_setting('FLASKS3_BUCKET_NAME', app), bucket_domain=get_setting('FLASKS3_BUCKET_DOMAIN', app), ) bucket_path += _get_statics_prefix(app).rstrip('/') return bucket_path, values
0.000948
def split_token(output): """ Split an output into token tuple, real output tuple. :param output: :return: tuple, tuple """ output = ensure_tuple(output) flags, i, len_output, data_allowed = set(), 0, len(output), True while i < len_output and isflag(output[i]): if output[i].must_be_first and i: raise ValueError("{} flag must be first.".format(output[i])) if i and output[i - 1].must_be_last: raise ValueError("{} flag must be last.".format(output[i - 1])) if output[i] in flags: raise ValueError("Duplicate flag {}.".format(output[i])) flags.add(output[i]) data_allowed &= output[i].allows_data i += 1 output = output[i:] if not data_allowed and len(output): raise ValueError("Output data provided after a flag that does not allow data.") return flags, output
0.002217
def implied_feature (implicit_value): """ Returns the implicit feature associated with the given implicit value. """ assert isinstance(implicit_value, basestring) components = implicit_value.split('-') if components[0] not in __implicit_features: raise InvalidValue ("'%s' is not a value of an implicit feature" % implicit_value) return __implicit_features[components[0]]
0.009877
def values(self): """ Returns the labels, strings or relation-values. :return: all the values, None if not NOMINAL, STRING, or RELATION :rtype: list """ enm = javabridge.call(self.jobject, "enumerateValues", "()Ljava/util/Enumeration;") if enm is None: return None else: return typeconv.enumeration_to_list(enm)
0.0075
def get_family(families): """Return the first installed font family in family list""" if not isinstance(families, list): families = [ families ] for family in families: if font_is_installed(family): return family else: print("Warning: None of the following fonts is installed: %r" % families) # spyder: test-skip return QFont().family()
0.010076
def writerow(self, cells): """ Write a row of cells into the default sheet of the spreadsheet. :param cells: A list of cells (most basic Python types supported). :return: Nothing. """ if self.default_sheet is None: self.default_sheet = self.new_sheet() self.default_sheet.writerow(cells)
0.005634
def get_organization_events(self, org): """ :calls: `GET /users/:user/events/orgs/:org <http://developer.github.com/v3/activity/events>`_ :param org: :class:`github.Organization.Organization` :rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Event.Event` """ assert isinstance(org, github.Organization.Organization), org return github.PaginatedList.PaginatedList( github.Event.Event, self._requester, "/users/" + self.login + "/events/orgs/" + org.login, None )
0.006745
def p_Value(self, p): """Value : valueofObjectSyntax | '{' BitsValue '}'""" n = len(p) if n == 2: p[0] = p[1] elif n == 4: p[0] = p[2]
0.009662
def set_page_label(self, page_id, label): """ Set a label on the page :param page_id: content_id format :param label: label to add :return: """ url = 'rest/api/content/{page_id}/label'.format(page_id=page_id) data = {'prefix': 'global', 'name': label} return self.post(path=url, data=data)
0.005305
def pad_dialogues(self, dialogues): """ Pad the entire dataset. This involves adding padding at the end of each sentence, and in the case of a hierarchical model, it also involves adding padding at the end of each dialogue, so that every training sample (dialogue) has the same dimension. """ self.log('info', 'Padding the dialogues ...') empty_turn = [self.config['pad-d']] * (self.properties['max-utterance-length'] + 1) for i, d in enumerate(dialogues): for j, u in enumerate(d): dif = self.properties['max-utterance-length'] - len(u) + 1 dialogues[i][j] += [self.config['pad-u']] * dif # only pad the dialogue if we're training a hierarchical model if self.config['hierarchical']: dif = self.properties['max-dialogue-length'] - len(d) dialogues[i] += [empty_turn] * dif return dialogues
0.009775
def _downloaded_filename(self): """Download the package's archive if necessary, and return its filename. --no-deps is implied, as we have reimplemented the bits that would ordinarily do dependency resolution. """ # Peep doesn't support requirements that don't come down as a single # file, because it can't hash them. Thus, it doesn't support editable # requirements, because pip itself doesn't support editable # requirements except for "local projects or a VCS url". Nor does it # support VCS requirements yet, because we haven't yet come up with a # portable, deterministic way to hash them. In summary, all we support # is == requirements and tarballs/zips/etc. # TODO: Stop on reqs that are editable or aren't ==. # If the requirement isn't already specified as a URL, get a URL # from an index: link = self._link() or self._finder.find_requirement(self._req, upgrade=False) if link: lower_scheme = link.scheme.lower() # pip lower()s it for some reason. if lower_scheme == 'http' or lower_scheme == 'https': file_path = self._download(link) return basename(file_path) elif lower_scheme == 'file': # The following is inspired by pip's unpack_file_url(): link_path = url_to_path(link.url_without_fragment) if isdir(link_path): raise UnsupportedRequirementError( "%s: %s is a directory. So that it can compute " "a hash, peep supports only filesystem paths which " "point to files" % (self._req, link.url_without_fragment)) else: copy(link_path, self._temp_path) return basename(link_path) else: raise UnsupportedRequirementError( "%s: The download link, %s, would not result in a file " "that can be hashed. Peep supports only == requirements, " "file:// URLs pointing to files (not folders), and " "http:// and https:// URLs pointing to tarballs, zips, " "etc." % (self._req, link.url)) else: raise UnsupportedRequirementError( "%s: couldn't determine where to download this requirement from." % (self._req,))
0.001982
def get_block(self, x, y, z): """Get a block from relative x,y,z.""" sy,by = divmod(y, 16) section = self.get_section(sy) if section == None: return None return section.get_block(x, by, z)
0.016598
def list_alarms(self, limit=None, marker=None, return_next=False): """ Returns a list of all the alarms created on this entity. """ return self._alarm_manager.list(limit=limit, marker=marker, return_next=return_next)
0.011364
def fillPelicanHole(site, username, password, tstat_name, start_time, end_time): """Fill a hole in a Pelican thermostat's data stream. Arguments: site -- The thermostat's Pelican site name username -- The Pelican username for the site password -- The Pelican password for the site tstat_name -- The name of the thermostat, as identified by Pelican start_time -- The start of the data hole in UTC, e.g. "2018-01-29 15:00:00" end_time -- The end of the data hole in UTC, e.g. "2018-01-29 16:00:00" Returns: A Pandas dataframe with historical Pelican data that falls between the specified start and end times. Note that this function assumes the Pelican thermostat's local time zone is US/Pacific. It will properly handle PST vs. PDT. """ start = datetime.strptime(start_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) end = datetime.strptime(end_time, _INPUT_TIME_FORMAT).replace(tzinfo=pytz.utc).astimezone(_pelican_time) heat_needs_fan = _lookupHeatNeedsFan(site, username, password, tstat_name) if heat_needs_fan is None: return None # Pelican's API only allows a query covering a time range of up to 1 month # So we may need run multiple requests for historical data history_blocks = [] while start < end: block_start = start block_end = min(start + timedelta(days=30), end) blocks = _lookupHistoricalData(site, username, password, tstat_name, block_start, block_end) if blocks is None: return None history_blocks.extend(blocks) start += timedelta(days=30, minutes=1) output_rows = [] for block in history_blocks: runStatus = block.find("runStatus").text if runStatus.startswith("Heat"): fanState = (heatNeedsFan == "Yes") else: fanState = (runStatus != "Off") api_time = datetime.strptime(block.find("timestamp").text, "%Y-%m-%dT%H:%M").replace(tzinfo=_pelican_time) # Need to convert seconds to nanoseconds timestamp = int(api_time.timestamp() * 10**9) output_rows.append({ "temperature": float(block.find("temperature").text), "relative_humidity": float(block.find("humidity").text), "heating_setpoint": float(block.find("heatSetting").text), "cooling_setpoint": float(block.find("coolSetting").text), # Driver explicitly uses "Schedule" field, but we don't have this in history "override": block.find("setBy").text != "Schedule", "fan": fanState, "mode": _mode_name_mappings[block.find("system").text], "state": _state_mappings.get(runStatus, 0), "time": timestamp, }) df = pd.DataFrame(output_rows) df.drop_duplicates(subset="time", keep="first", inplace=True) return df
0.002729
def dfa_projection(dfa: dict, symbols_to_remove: set) -> dict: """ Returns a NFA that reads the language recognized by the input DFA where all the symbols in **symbols_to_project** are projected out of the alphabet. Projection in a DFA is the operation that existentially removes from a word all occurrence of symbols in a set X. Given a dfa :math:`A = (Σ, S, s_0 , ρ, F )`, we can define an NFA :math:`A_{πX}` that recognizes the language :math:`πX(L(A))` as :math:`A_{πX}= ( Σ−X, S, S_0 , ρ_X , F )` where • :math:`S_0 = \{s | (s_0 , s) ∈ ε_X \}` • :math:`(s,a,s_y ) ∈ ρ_X` iff there exist :math:`(t, t_y)` s.t. :math:`(s,t) ∈ ε_X , t_y = ρ(t,a)` and :math:`(t_y , s_y ) ∈ ε_X` :param dict dfa: input DFA; :param set symbols_to_remove: set containing symbols ∈ dfa[ 'alphabet'] to be projected out from DFA. :return: *(dict)* representing a NFA. """ nfa = { 'alphabet': dfa['alphabet'].difference(symbols_to_remove), 'states': dfa['states'].copy(), 'initial_states': {dfa['initial_state']}, 'accepting_states': dfa['accepting_states'].copy(), 'transitions': dict() } current_nfa_transitions = None current_e_x = None e_x = dict() # equivalence relation dictionary # while no more changes are possible while current_nfa_transitions != nfa['transitions'] or current_e_x != e_x: current_nfa_transitions = nfa['transitions'].copy() current_e_x = deepcopy(e_x) for (state, a) in dfa['transitions']: next_state = dfa['transitions'][state, a] if a in symbols_to_remove: # mark next_state as equivalent to state e_x.setdefault(state, set()).add(next_state) app_set = set() for equivalent in e_x[state]: # mark states equivalent to next_states also to state if equivalent in e_x: app_set.update(e_x[equivalent]) # add all transitions of equivalent states to state for act in nfa['alphabet']: if (equivalent, act) in dfa['transitions']: equivalent_next = dfa['transitions'][ equivalent, act] nfa['transitions'].setdefault( (state, act), set()).add(equivalent_next) # if equivalent_next has equivalent states if equivalent_next in e_x: # the transition leads also to these states nfa['transitions'][state, act].update( e_x[equivalent_next]) e_x[state].update(app_set) else: # add the transition to the NFA nfa['transitions'].setdefault((state, a), set()).add( next_state) # if next_state has equivalent states if next_state in e_x: # the same transition arrive also to all these other states nfa['transitions'][state, a].update(e_x[next_state]) # Add all state equivalent to the initial one to NFA initial states set if dfa['initial_state'] in e_x: nfa['initial_states'].update(e_x[dfa['initial_state']]) return nfa
0.000867
def run_lint_command(): """ Run lint command in the shell and save results to lint-result.xml """ lint, app_dir, lint_result, ignore_layouts = parse_args() if not lint_result: if not distutils.spawn.find_executable(lint): raise Exception( '`%s` executable could not be found and path to lint result not specified. See --help' % lint) lint_result = os.path.join(app_dir, 'lint-result.xml') call_result = subprocess.call([lint, app_dir, '--xml', lint_result]) if call_result > 0: print('Running the command failed with result %s. Try running it from the console.' ' Arguments for subprocess.call: %s' % (call_result, [lint, app_dir, '--xml', lint_result])) else: if not os.path.isabs(lint_result): lint_result = os.path.join(app_dir, lint_result) lint_result = os.path.abspath(lint_result) return lint_result, app_dir, ignore_layouts
0.004111
def name(): """ Generates a random person's name which has the following structure <optional prefix> <first name> <second name> <optional suffix> :return: a random name. """ result = "" if RandomBoolean.chance(3, 5): result += random.choice(_name_prefixes) + " " result += random.choice(_first_names) + " " + random.choice(_last_names) if RandomBoolean.chance(5, 10): result += " " + random.choice(_name_suffixes) return result
0.005597
def revoke_session(self, sid='', token=''): """ Mark session as revoked but also explicitly revoke all issued tokens :param token: any token connected to the session :param sid: Session identifier """ if not sid: if token: sid = self.handler.sid(token) else: raise ValueError('Need one of "sid" or "token"') for typ in ['access_token', 'refresh_token', 'code']: try: self.revoke_token(self[sid][typ], typ) except KeyError: # If no such token has been issued pass self.update(sid, revoked=True)
0.002972
def unbind(self): """Unlisten and close each bound item.""" for variable in self.variables: self.__unbind_variable(variable) for result in self.results: self.__unbind_result(result)
0.008734
def _random_edge_iterator(graph, n_edges: int) -> Iterable[Tuple[BaseEntity, BaseEntity, int, Mapping]]: """Get a random set of edges from the graph and randomly samples a key from each. :type graph: pybel.BELGraph :param n_edges: Number of edges to randomly select from the given graph """ edges = list(graph.edges()) edge_sample = random.sample(edges, n_edges) for u, v in edge_sample: keys = list(graph[u][v]) k = random.choice(keys) yield u, v, k, graph[u][v][k]
0.00578
def commit_history(self, branch, limit=None, days=None, ignore_globs=None, include_globs=None): """ Returns a pandas DataFrame containing all of the commits for a given branch. The results from all repositories are appended to each other, resulting in one large data frame of size <limit>. If a limit is provided, it is divided by the number of repositories in the project directory to find out how many commits to pull from each project. Future implementations will use date ordering across all projects to get the true most recent N commits across the project. Included in that DataFrame will be the columns: * repository * date (index) * author * committer * message * lines * insertions * deletions * net :param branch: the branch to return commits for :param limit: (optional, default=None) a maximum number of commits to return, None for no limit :param days: (optional, default=None) number of days to return if limit is None :param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing :param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything. :return: DataFrame """ if limit is not None: limit = int(limit / len(self.repo_dirs)) df = pd.DataFrame(columns=['author', 'committer', 'message', 'lines', 'insertions', 'deletions', 'net']) for repo in self.repos: try: ch = repo.commit_history(branch, limit=limit, days=days, ignore_globs=ignore_globs, include_globs=include_globs) ch['repository'] = repo.repo_name df = df.append(ch) except GitCommandError: print('Warning! Repo: %s seems to not have the branch: %s' % (repo, branch)) df.reset_index() return df
0.006993
def handle_sub_rectangles(self, images, sub_rectangles): """ handle_sub_rectangles(images) Handle the sub-rectangle stuff. If the rectangles are given by the user, the values are checked. Otherwise the subrectangles are calculated automatically. """ image_info = [im.info for im in images] if isinstance(sub_rectangles, (tuple, list)): # xy given directly # Check xy xy = sub_rectangles if xy is None: xy = (0, 0) if hasattr(xy, '__len__'): if len(xy) == len(images): xy = [xxyy for xxyy in xy] else: raise ValueError("len(xy) doesn't match amount of images.") else: xy = [xy for im in images] xy[0] = (0, 0) else: # Calculate xy using some basic image processing # Check Numpy if np is None: raise RuntimeError("Need Numpy to use auto-sub_rectangles.") # First make numpy arrays if required for i in range(len(images)): im = images[i] if isinstance(im, Image.Image): tmp = im.convert() # Make without palette a = np.asarray(tmp) if len(a.shape) == 0: raise MemoryError("Too little memory to convert PIL image to array") images[i] = a # Determine the sub rectangles images, xy = self.get_sub_rectangles(images) # Done return images, xy, image_info
0.001807
def RegisterAt(cls, *args, **kwargs): """ **RegisterAt** RegisterAt(n, f, library_path, alias=None, original_name=None, doc=None, wrapped=None, explanation="", method_type=utils.identity, explain=True, _return_type=None) Most of the time you don't want to register an method as such, that is, you don't care about the `self` builder object, instead you want to register a function that transforms the value being piped down the DSL. For this you can use `RegisterAt` so e.g. def some_fun(obj, arg1, arg2): # code @MyBuilder.RegisterMethod("my_lib.") def some_fun_wrapper(self, arg1, arg2): return self.ThenAt(1, some_fun, arg1, arg2) can be written directly as @MyBuilder.RegisterAt(1, "my_lib.") def some_fun(obj, arg1, arg2): # code For this case you can just use `Register` which is a shortcut for `RegisterAt(1, ...)` @MyBuilder.Register("my_lib.") def some_fun(obj, arg1, arg2): # code **Also See** * `phi.builder.Builder.RegisterMethod` """ unpack_error = True try: n, f, library_path = args unpack_error = False cls._RegisterAt(n, f, library_path, **kwargs) except: if not unpack_error: raise def register_decorator(f): n, library_path = args cls._RegisterAt(n, f, library_path, **kwargs) return f return register_decorator
0.004043
def _references(self, i, sequence=False): """Handle references.""" value = '' c = next(i) if c == '\\': # \\ if sequence and self.bslash_abort: raise PathNameException value = r'\\' if self.bslash_abort: if not self.in_list: value = self.get_path_sep() + _ONE_OR_MORE self.set_start_dir() else: value = self._restrict_extended_slash() + value elif c == '/': # \/ if sequence and self.pathname: raise PathNameException if self.pathname: value = r'\\' if self.in_list: value = self._restrict_extended_slash() + value i.rewind(1) else: value = re.escape(c) else: # \a, \b, \c, etc. value = re.escape(c) if c == '.' and self.after_start and self.in_list: self.allow_special_dir = True self.reset_dir_track() return value
0.001745
def _formatparam(param, value=None, quote=True): """Convenience function to format and return a key=value pair. This will quote the value if needed or if quote is true. If value is a three tuple (charset, language, value), it will be encoded according to RFC2231 rules. If it contains non-ascii characters it will likewise be encoded according to RFC2231 rules, using the utf-8 charset and a null language. """ if value is not None and len(value) > 0: # A tuple is used for RFC 2231 encoded parameter values where items # are (charset, language, value). charset is a string, not a Charset # instance. RFC 2231 encoded values are never quoted, per RFC. if isinstance(value, tuple): # Encode as per RFC 2231 param += '*' value = utils.encode_rfc2231(value[2], value[0], value[1]) return '%s=%s' % (param, value) else: try: value.encode('ascii') except UnicodeEncodeError: param += '*' value = utils.encode_rfc2231(value, 'utf-8', '') return '%s=%s' % (param, value) # BAW: Please check this. I think that if quote is set it should # force quoting even if not necessary. if quote or tspecials.search(value): return '%s="%s"' % (param, utils.quote(value)) else: return '%s=%s' % (param, value) else: return param
0.000671
def tripledes_cbc_pkcs5_decrypt(key, data, iv): """ Decrypts 3DES ciphertext in CBC mode using either the 2 or 3 key variant (16 or 24 byte long key) and PKCS#5 padding. :param key: The encryption key - a byte string 16 or 24 bytes long (2 or 3 key mode) :param data: The ciphertext - a byte string :param iv: The initialization vector - a byte string 8-bytes long :raises: ValueError - when any of the parameters contain an invalid value TypeError - when any of the parameters are of the wrong type OSError - when an error is returned by OpenSSL :return: A byte string of the plaintext """ if len(key) != 16 and len(key) != 24: raise ValueError(pretty_message( ''' key must be 16 bytes (2 key) or 24 bytes (3 key) long - is %s ''', len(key) )) if len(iv) != 8: raise ValueError(pretty_message( ''' iv must be 8 bytes long - is %s ''', len(iv) )) cipher = 'tripledes_3key' # Expand 2-key to actual 24 byte byte string used by cipher if len(key) == 16: key = key + key[0:8] cipher = 'tripledes_2key' return _decrypt(cipher, key, data, iv, True)
0.001524
def after(self, *nodes: Union[AbstractNode, str]) -> None: """Append nodes after this node. If nodes contains ``str``, it will be converted to Text node. """ if self.parentNode: node = _to_node_list(nodes) _next_node = self.nextSibling if _next_node is None: self.parentNode.appendChild(node) else: self.parentNode.insertBefore(node, _next_node)
0.004367
def invoke_webhook_handlers(self): """ Invokes any webhook handlers that have been registered for this event based on event type or event sub-type. See event handlers registered in the ``djstripe.event_handlers`` module (or handlers registered in djstripe plugins or contrib packages). """ webhooks.call_handlers(event=self) signal = WEBHOOK_SIGNALS.get(self.type) if signal: return signal.send(sender=Event, event=self)
0.027027
def response(uri, method, res, token='', keyword='', content='', raw_flag=False): """Response of tonicdns_client request Arguments: uri: TonicDNS API URI method: TonicDNS API request method res: Response of against request to TonicDNS API token: TonicDNS API token keyword: Processing keyword content: JSON data raw_flag: True is return responsed raw data, False is pretty print """ if method == 'GET' or (method == 'PUT' and not token): # response body data = res.read() data_utf8 = data.decode('utf-8') if token: datas = json.loads(data_utf8) else: token = json.loads(data_utf8)['hash'] return token if keyword == 'serial': # filtering with keyword record = search_record(datas, 'SOA')[0] # if SOA record, remove priority unnecessary del record['priority'] # override ttl record['ttl'] = int(record['ttl']) c = JSONConverter(content['domain']) new_record = c.get_soa(record, content) return record, new_record elif keyword: # '--search' option of 'get' subcommand records = search_record(datas, keyword) datas.update({"records": records}) if uri.split('/')[3] == 'template': # 'tmpl_get' subcommand if len(uri.split('/')) == 5: # when specify template identfier #print_formatted(datas) utils.pretty_print(datas) else: # when get all templates for data in datas: #print_formatted(data) utils.pretty_print(datas) else: # 'get' subcommand if raw_flag: return datas else: #print_formatted(datas) if len(uri.split('zone/')) > 1: domain = uri.split('zone/')[1] else: domain = '' utils.pretty_print(datas, keyword, domain) else: # response non JSON data data = res.read() print(data)
0.001751
def calc_outputs_v1(self): """Performs the actual interpolation or extrapolation. Required control parameters: |XPoints| |YPoints| Required derived parameter: |NmbPoints| |NmbBranches| Required flux sequence: |Input| Calculated flux sequence: |Outputs| Examples: As a simple example, assume a weir directing all discharge into `branch1` until the capacity limit of 2 m³/s is reached. The discharge exceeding this threshold is directed into `branch2`: >>> from hydpy.models.hbranch import * >>> parameterstep() >>> xpoints(0., 2., 4.) >>> ypoints(branch1=[0., 2., 2.], ... branch2=[0., 0., 2.]) >>> model.parameters.update() Low discharge example (linear interpolation between the first two supporting point pairs): >>> fluxes.input = 1. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=1.0, branch2=0.0) Medium discharge example (linear interpolation between the second two supporting point pairs): >>> fluxes.input = 3. >>> model.calc_outputs_v1() >>> print(fluxes.outputs) outputs(branch1=2.0, branch2=1.0) High discharge example (linear extrapolation beyond the second two supporting point pairs): >>> fluxes.input = 5. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=2.0, branch2=3.0) Non-monotonous relationships and balance violations are allowed, e.g.: >>> xpoints(0., 2., 4., 6.) >>> ypoints(branch1=[0., 2., 0., 0.], ... branch2=[0., 0., 2., 4.]) >>> model.parameters.update() >>> fluxes.input = 7. >>> model.calc_outputs_v1() >>> fluxes.outputs outputs(branch1=0.0, branch2=5.0) """ con = self.parameters.control.fastaccess der = self.parameters.derived.fastaccess flu = self.sequences.fluxes.fastaccess # Search for the index of the two relevant x points... for pdx in range(1, der.nmbpoints): if con.xpoints[pdx] > flu.input: break # ...and use it for linear interpolation (or extrapolation). for bdx in range(der.nmbbranches): flu.outputs[bdx] = ( (flu.input-con.xpoints[pdx-1]) * (con.ypoints[bdx, pdx]-con.ypoints[bdx, pdx-1]) / (con.xpoints[pdx]-con.xpoints[pdx-1]) + con.ypoints[bdx, pdx-1])
0.000385
def parse(cls, datestr): """Parse string <DATE_VALUE> string and make :py:class:`DateValue` instance out of it. :param str datestr: String with GEDCOM date, range, period, etc. """ # some apps generate DATE recods without any value, which is # non-standard, return empty DateValue for those if not datestr: return cls() for regex, tmpl in DATES: m = regex.match(datestr) if m is not None: groups = {} for key, val in m.groupdict().items(): if key != 'phrase': val = CalendarDate.parse(val) groups[key] = val return cls(tmpl, groups) # if cannot parse string assume it is a phrase return cls("($phrase)", dict(phrase=datestr))
0.002342
def get_code(self): """Returns code representation of value of widget""" selection = self.GetSelection() if selection == wx.NOT_FOUND: selection = 0 # Return code string return self.styles[selection][1]
0.007782
def query(self, sql, *args, **kwargs): """Executes an SQL SELECT query, returning a result set as a Statement object. :param sql: query to execute :param args: parameters iterable :param kwargs: parameters iterable :return: result set as a Statement object :rtype: pydbal.statement.Statement """ self.ensure_connected() stmt = Statement(self) stmt.execute(sql, *args, **kwargs) return stmt
0.006276
def press(self): ''' press key via name or key code. Supported key name includes: home, back, left, right, up, down, center, menu, search, enter, delete(or del), recent(recent apps), volume_up, volume_down, volume_mute, camera, power. Usage: d.press.back() # press back key d.press.menu() # press home key d.press(89) # press keycode ''' @param_to_property( key=["home", "back", "left", "right", "up", "down", "center", "menu", "search", "enter", "delete", "del", "recent", "volume_up", "volume_down", "volume_mute", "camera", "power"] ) def _press(key, meta=None): if isinstance(key, int): return self.server.jsonrpc.pressKeyCode(key, meta) if meta else self.server.jsonrpc.pressKeyCode(key) else: return self.server.jsonrpc.pressKey(str(key)) return _press
0.003067
def generate_association_rules(patterns, confidence_threshold): """ Given a set of frequent itemsets, return a dict of association rules in the form {(left): ((right), confidence)} """ rules = {} for itemset in patterns.keys(): upper_support = patterns[itemset] for i in range(1, len(itemset)): for antecedent in itertools.combinations(itemset, i): antecedent = tuple(sorted(antecedent)) consequent = tuple(sorted(set(itemset) - set(antecedent))) if antecedent in patterns: lower_support = patterns[antecedent] confidence = float(upper_support) / lower_support if confidence >= confidence_threshold: rules[antecedent] = (consequent, confidence) return rules
0.001172
def auto_decode(data): # type: (bytes) -> Text """Check a bytes string for a BOM to correctly detect the encoding Fallback to locale.getpreferredencoding(False) like open() on Python3""" for bom, encoding in BOMS: if data.startswith(bom): return data[len(bom):].decode(encoding) # Lets check the first two lines as in PEP263 for line in data.split(b'\n')[:2]: if line[0:1] == b'#' and ENCODING_RE.search(line): encoding = ENCODING_RE.search(line).groups()[0].decode('ascii') return data.decode(encoding) return data.decode( locale.getpreferredencoding(False) or sys.getdefaultencoding(), )
0.001466
def matches(self, s): """Whether the pattern matches anywhere in the string s.""" regex_matches = self.compiled_regex.search(s) is not None return not regex_matches if self.inverted else regex_matches
0.004717
def vector_angle_cos(u, v): ''' vector_angle_cos(u, v) yields the cosine of the angle between the two vectors u and v. If u or v (or both) is a (d x n) matrix of n vectors, the result will be a length n vector of the cosines. ''' u = np.asarray(u) v = np.asarray(v) return (u * v).sum(0) / np.sqrt((u ** 2).sum(0) * (v ** 2).sum(0))
0.008242
def _aodata(echo, columns, xnxq=None, final_exam=False): """ 生成用于post的数据 :param echo: a int to check is response is write :type echo: int :param columns: 所有columns列名组成的list :type columns: list :param xnxq: str :type xnxq: string :param final_exam: 是否期末考试 :rtype: bool :return: a valid data for post to get data """ ao_data = [{"name": "sEcho", "value": echo}, {"name": "iColumns", "value": len(columns)}, {"name": "sColumns", "value": ""}, {"name": "iDisplayStart", "value": 0}, {"name": "iDisplayLength", "value": -1}, ] if xnxq: if final_exam: ao_data.append( {"name": "ksrwid", "value": "000000005bf6cb6f015bfac609410d4b"}) ao_data.append({"name": "xnxq", "value": xnxq}) for index, value in enumerate(columns): ao_data.append( {"name": "mDataProp_{}".format(index), "value": value}) ao_data.append( {"name": "bSortable_{}".format(index), "value": False}) return urlencode({"aoData": ao_data})
0.002423
def query(self): """Return all start records for this the dataset, grouped by the start record""" return self._session.query(Process).filter(Process.d_vid == self._d_vid)
0.02139
def kl_prep(self,mlt_df): """ prepare KL based parameterizations Parameters ---------- mlt_df : pandas.DataFrame a dataframe with multiplier array information Note ---- calls pyemu.helpers.setup_kl() """ if len(self.kl_props) == 0: return if self.kl_geostruct is None: self.logger.warn("kl_geostruct is None,"\ " using ExpVario with contribution=1 and a=(10.0*max(delr,delc))") kl_dist = 10.0 * float(max(self.m.dis.delr.array.max(), self.m.dis.delc.array.max())) v = pyemu.geostats.ExpVario(contribution=1.0,a=kl_dist) self.kl_geostruct = pyemu.geostats.GeoStruct(variograms=v) kl_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix,:] layers = kl_df.layer.unique() #kl_dict = {l:list(kl_df.loc[kl_df.layer==l,"prefix"].unique()) for l in layers} # big assumption here - if prefix is listed more than once, use the lowest layer index #for i,l in enumerate(layers): # p = set(kl_dict[l]) # for ll in layers[i+1:]: # pp = set(kl_dict[ll]) # d = pp - p # kl_dict[ll] = list(d) kl_prefix = list(kl_df.loc[:,"prefix"]) kl_array_file = {p:m for p,m in zip(kl_df.prefix,kl_df.mlt_file)} self.logger.statement("kl_prefix: {0}".format(str(kl_prefix))) fac_file = os.path.join(self.m.model_ws, "kl.fac") self.log("calling kl_setup() with factors file {0}".format(fac_file)) kl_df = kl_setup(self.kl_num_eig,self.m.sr,self.kl_geostruct,kl_prefix, factors_file=fac_file,basis_file=fac_file+".basis.jcb", tpl_dir=self.m.model_ws) self.logger.statement("{0} kl parameters created". format(kl_df.shape[0])) self.logger.statement("kl 'pargp':{0}". format(','.join(kl_df.pargp.unique()))) self.log("calling kl_setup() with factors file {0}".format(fac_file)) kl_mlt_df = mlt_df.loc[mlt_df.suffix==self.kl_suffix] for prefix in kl_df.prefix.unique(): prefix_df = kl_df.loc[kl_df.prefix==prefix,:] in_file = os.path.split(prefix_df.loc[:,"in_file"].iloc[0])[-1] assert prefix in mlt_df.prefix.values,"{0}:{1}".format(prefix,mlt_df.prefix) mlt_df.loc[mlt_df.prefix==prefix,"pp_file"] = in_file mlt_df.loc[mlt_df.prefix==prefix,"fac_file"] = os.path.split(fac_file)[-1] print(kl_mlt_df) mlt_df.loc[mlt_df.suffix == self.kl_suffix, "tpl_file"] = np.NaN self.par_dfs[self.kl_suffix] = kl_df
0.012608
def wait_for_mouse_move_from(self, origin_x, origin_y): """ Wait for the mouse to move from a location. This function will block until the condition has been satisified. :param origin_x: the X position you expect the mouse to move from :param origin_y: the Y position you expect the mouse to move from """ _libxdo.xdo_wait_for_mouse_move_from(self._xdo, origin_x, origin_y)
0.004651
def add_file_path_in_work_tree(self, path, work_tree, verbose=True): """ Add a new file as blob in the storage and add its tree entry into the index. """ args = ['--work-tree', work_tree, 'add', '-f'] if verbose: args.append('--verbose') args.append(path) self.command_exec(args, show_output=verbose)
0.008152
def ret_list_minions(self): ''' Return minions that match via list ''' tgt = _tgt_set(self.tgt) return self._ret_minions(tgt.intersection)
0.011236
def color(x, y): """triangles. Colors: - http://paletton.com/#uid=70l150klllletuehUpNoMgTsdcs shade 2 """ if (x-4) > (y-4) and -(y-4) <= (x-4): # right return "#CDB95B" elif (x-4) > (y-4) and -(y-4) > (x-4): # top return "#CD845B" elif (x-4) <= (y-4) and -(y-4) <= (x-4): # bottom return "#57488E" elif (x-4) <= (y-4) and -(y-4) > (x-4): # left return "#3B8772" # should not happen return "black"
0.001988
def get_benchmark_returns(symbol): """ Get a Series of benchmark returns from IEX associated with `symbol`. Default is `SPY`. Parameters ---------- symbol : str Benchmark symbol for which we're getting the returns. The data is provided by IEX (https://iextrading.com/), and we can get up to 5 years worth of data. """ r = requests.get( 'https://api.iextrading.com/1.0/stock/{}/chart/5y'.format(symbol) ) data = r.json() df = pd.DataFrame(data) df.index = pd.DatetimeIndex(df['date']) df = df['close'] return df.sort_index().tz_localize('UTC').pct_change(1).iloc[1:]
0.001538
def on_change_plot_cursor(self,event): """ If mouse is over data point making it selectable change the shape of the cursor @param: event -> the wx Mouseevent for that click """ if not self.xdata or not self.ydata: return pos=event.GetPosition() width, height = self.canvas.get_width_height() pos[1] = height - pos[1] xpick_data,ypick_data = pos xdata_org = self.xdata ydata_org = self.ydata data_corrected = self.map.transData.transform(vstack([xdata_org,ydata_org]).T) xdata,ydata = data_corrected.T xdata = list(map(float,xdata)) ydata = list(map(float,ydata)) e = 4e0 if self.plot_setting == "Zoom": self.canvas.SetCursor(wx.Cursor(wx.CURSOR_CROSS)) else: self.canvas.SetCursor(wx.Cursor(wx.CURSOR_ARROW)) for i,(x,y) in enumerate(zip(xdata,ydata)): if 0 < sqrt((x-xpick_data)**2. + (y-ypick_data)**2.) < e: self.canvas.SetCursor(wx.Cursor(wx.CURSOR_HAND)) break event.Skip()
0.01355
def group(self, groupId): """ gets a group based on it's ID """ url = "%s/%s" % (self.root, groupId) return Group(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initalize=False)
0.005587
def p_For(p): ''' For : FOR Expression IN Expression COLON Terminator Block | FOR Expression COMMA Expression IN Expression COLON Terminator Block ''' if len(p) <= 8: p[0] = For(p[2], None, p[4], p[6], p[7]) else: p[0] = For(p[2], p[4], p[6], p[8], p[9])
0.003356
def create_features(bam_in, loci_file, reference, out_dir): """ Use feature extraction module from CoRaL """ lenvec_plus = op.join(out_dir, 'genomic_lenvec.plus') lenvec_minus = op.join(out_dir, 'genomic_lenvec.minus') compute_genomic_cmd = ("compute_genomic_lenvectors " "{bam_in} {lenvec_plus} " "{lenvec_minus} " "{min_len} " "{max_len} ") index_genomic_cmd = ("index_genomic_lenvectors " "{lenvec} ") genomic_lenvec = op.join(out_dir, 'genomic_lenvec') feat_len_file = op.join(out_dir, 'feat_lengths.txt') compute_locus_cmd = ("compute_locus_lenvectors " "{loci_file} " "{genomic_lenvec} " "{min_len} " "{max_len} " "> {feat_len_file}") cov_S_file = op.join(out_dir, 'loci.cov_anti') coverage_anti_cmd = ("coverageBed -S -counts -b " "{bam_in} -a {loci_file} " "> {cov_S_file}") feat_posentropy = op.join(out_dir, 'feat_posentropy.txt') entropy_cmd = ("compute_locus_entropy.rb " "{counts_reads} " "> {feat_posentropy}") with utils.chdir(out_dir): run(compute_genomic_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute_genomic") run(index_genomic_cmd.format(lenvec=lenvec_plus), "Run index in plus") run(index_genomic_cmd.format(lenvec=lenvec_minus), "Run index in minus") run(compute_locus_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute locus") run(coverage_anti_cmd.format(**locals()), "Run coverage antisense") feat_antisense = _order_antisense_column(cov_S_file, min_trimmed_read_len) counts_reads = _reads_per_position(bam_in, loci_file, out_dir) run(entropy_cmd.format(**locals()), "Run entropy") rnafold = calculate_structure(loci_file, reference)
0.002343
def ParseAttributes(self, problems): """Parse all attributes, calling problems as needed. Return True if all of the values are valid. """ if util.IsEmpty(self.shape_id): problems.MissingValue('shape_id') return try: if not isinstance(self.shape_pt_sequence, int): self.shape_pt_sequence = \ util.NonNegIntStringToInt(self.shape_pt_sequence, problems) elif self.shape_pt_sequence < 0: problems.InvalidValue('shape_pt_sequence', self.shape_pt_sequence, 'Value should be a number (0 or higher)') except (TypeError, ValueError): problems.InvalidValue('shape_pt_sequence', self.shape_pt_sequence, 'Value should be a number (0 or higher)') return try: if not isinstance(self.shape_pt_lat, (int, float)): self.shape_pt_lat = util.FloatStringToFloat(self.shape_pt_lat, problems) if abs(self.shape_pt_lat) > 90.0: problems.InvalidValue('shape_pt_lat', self.shape_pt_lat) return except (TypeError, ValueError): problems.InvalidValue('shape_pt_lat', self.shape_pt_lat) return try: if not isinstance(self.shape_pt_lon, (int, float)): self.shape_pt_lon = util.FloatStringToFloat(self.shape_pt_lon, problems) if abs(self.shape_pt_lon) > 180.0: problems.InvalidValue('shape_pt_lon', self.shape_pt_lon) return except (TypeError, ValueError): problems.InvalidValue('shape_pt_lon', self.shape_pt_lon) return if abs(self.shape_pt_lat) < 1.0 and abs(self.shape_pt_lon) < 1.0: problems.InvalidValue('shape_pt_lat', self.shape_pt_lat, 'Point location too close to 0, 0, which means ' 'that it\'s probably an incorrect location.', type=problems_module.TYPE_WARNING) return if self.shape_dist_traveled == '': self.shape_dist_traveled = None if (self.shape_dist_traveled is not None and not isinstance(self.shape_dist_traveled, (int, float))): try: self.shape_dist_traveled = \ util.FloatStringToFloat(self.shape_dist_traveled, problems) except (TypeError, ValueError): problems.InvalidValue('shape_dist_traveled', self.shape_dist_traveled, 'This value should be a positive number.') return if self.shape_dist_traveled is not None and self.shape_dist_traveled < 0: problems.InvalidValue('shape_dist_traveled', self.shape_dist_traveled, 'This value should be a positive number.') return return True
0.009276
def update_metadata_statement(self, metadata_statement, receiver='', federation=None, context=''): """ Update a metadata statement by: * adding signed metadata statements or uris pointing to signed metadata statements. * adding the entities signing keys * create metadata statements one per signed metadata statement or uri sign these and add them to the metadata statement :param metadata_statement: A :py:class:`fedoidcmsg.MetadataStatement` instance :param receiver: The intended receiver of the metadata statement :param federation: :param context: :return: An augmented metadata statement """ self.add_sms_spec_to_request(metadata_statement, federation=federation, context=context) self.add_signing_keys(metadata_statement) metadata_statement = self.self_sign(metadata_statement, receiver) # These are unprotected here so can as well be removed del metadata_statement['signing_keys'] return metadata_statement
0.002588
def proportions( self, axis=None, weighted=True, include_transforms_for_dims=None, include_mr_cat=False, prune=False, ): """Return percentage values for cube as `numpy.ndarray`. This function calculates the proportions across the selected axis of a crunch cube. For most variable types, it means the value divided by the margin value. For a multiple-response variable, the value is divided by the sum of selected and non-selected slices. *axis* (int): base axis of proportions calculation. If no axis is provided, calculations are done across the entire table. *weighted* (bool): Specifies weighted or non-weighted proportions. *include_transforms_for_dims* (list): Also include headings and subtotals transformations for the provided dimensions. If the dimensions have the transformations, they'll be included in the resulting numpy array. If the dimensions don't have the transformations, nothing will happen (the result will be the same as if the argument weren't provided). *include_transforms_for_dims* (list): Include headers and subtotals (H&S) across various dimensions. The dimensions are provided as list elements. For example: "include_transforms_for_dims=[0, 1]" instructs the CrunchCube to return H&S for both rows and columns (if it's a 2D cube). *include_mr_cat* (bool): Include MR categories. *prune* (bool): Instructs the CrunchCube to prune empty rows/cols. Emptiness is determined by the state of the margin (if it's either 0 or nan at certain index). If it is, the corresponding row/col is not included in the result. Example 1:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions() np.array([ [0.3333333, 0.1333333], [0.3333333, 0.2000000], ]) Example 2:: >>> cube = CrunchCube(fixt_cat_x_cat) np.array([ [5, 2], [5, 3], ]) >>> cube.proportions(axis=0) np.array([ [0.5, 0.4], [0.5, 0.6], ]) """ # Calculate numerator from table (include all H&S dimensions). table = self._measure(weighted).raw_cube_array num = self._apply_subtotals( self._apply_missings(table), include_transforms_for_dims ) proportions = num / self._denominator( weighted, include_transforms_for_dims, axis ) if not include_mr_cat: proportions = self._drop_mr_cat_dims(proportions) # Apply correct mask (based on the as_array shape) arr = self.as_array( prune=prune, include_transforms_for_dims=include_transforms_for_dims ) if isinstance(arr, np.ma.core.MaskedArray): proportions = np.ma.masked_array(proportions, arr.mask) return proportions
0.001258
def from_nid(cls, lib, nid): """ Instantiate a new :py:class:`_EllipticCurve` associated with the given OpenSSL NID. :param lib: The OpenSSL library binding object. :param nid: The OpenSSL NID the resulting curve object will represent. This must be a curve NID (and not, for example, a hash NID) or subsequent operations will fail in unpredictable ways. :type nid: :py:class:`int` :return: The curve object. """ return cls(lib, nid, _ffi.string(lib.OBJ_nid2sn(nid)).decode("ascii"))
0.003442
def cudnnCreateTensorDescriptor(): """ Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor. """ tensor = ctypes.c_void_p() status = _libcudnn.cudnnCreateTensorDescriptor(ctypes.byref(tensor)) cudnnCheckStatus(status) return tensor.value
0.002457
def stop(self, labels=None): """Stop specified timer(s). Parameters ---------- labels : string or list, optional (default None) Specify the label(s) of the timer(s) to be stopped. If it is ``None``, stop the default timer with label specified by the ``dfltlbl`` parameter of :meth:`__init__`. If it is equal to the string specified by the ``alllbl`` parameter of :meth:`__init__`, stop all timers. """ # Get current time t = timer() # Default label is self.dfltlbl if labels is None: labels = self.dfltlbl # All timers are affected if label is equal to self.alllbl, # otherwise only the timer(s) specified by label if labels == self.alllbl: labels = self.t0.keys() elif not isinstance(labels, (list, tuple)): labels = [labels,] # Iterate over specified label(s) for lbl in labels: if lbl not in self.t0: raise KeyError('Unrecognized timer key %s' % lbl) # If self.t0[lbl] is None, the corresponding timer is # already stopped, so no action is required if self.t0[lbl] is not None: # Increment time accumulator from the elapsed time # since most recent start call self.td[lbl] += t - self.t0[lbl] # Set start time to None to indicate timer is not running self.t0[lbl] = None
0.001967
def _get_node(name: str, args: str): """Get node from object name and arg string Not Used. Left for future reference purpose. """ obj = get_object(name) args = ast.literal_eval(args) if not isinstance(args, tuple): args = (args,) return obj.node(*args)
0.003448
def set_courses(self, course_ids=None): """Sets the courses. arg: courseIds (osid.id.Id): the course Ids raise: INVALID_ARGUMENT - courseIds is invalid raise: NullArgument - courseIds is null raise: NoAccess - metadata.is_read_only() is true compliance: mandatory - This method must be implemented. """ if course_ids is None: raise NullArgument() metadata = Metadata(**settings.METADATA['course_ids']) if metadata.is_read_only(): raise NoAccess() if self._is_valid_input(course_ids, metadata, array=True): for course_id in course_ids: self._my_map['courseIds'].append(str(course_id)) else: raise InvalidArgument
0.002567
def bulk_remove(self, named_graph, add, size=DEFAULT_CHUNK_SIZE): """ Remove batches of statements in n-sized chunks. """ return self.bulk_update(named_graph, add, size, is_add=False)
0.009302
def show_raslog_output_show_all_raslog_number_of_entries(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_raslog = ET.Element("show_raslog") config = show_raslog output = ET.SubElement(show_raslog, "output") show_all_raslog = ET.SubElement(output, "show-all-raslog") number_of_entries = ET.SubElement(show_all_raslog, "number-of-entries") number_of_entries.text = kwargs.pop('number_of_entries') callback = kwargs.pop('callback', self._callback) return callback(config)
0.003401
def get_parent_label(self, treepos): """Given the treeposition of a node, return the label of its parent. Returns None, if the tree has no parent. """ parent_pos = self.get_parent_treepos(treepos) if parent_pos is not None: parent = self.dgtree[parent_pos] return parent.label() else: return None
0.005263
def get_locale_with_proxy(proxy): """Given a Proxy, returns the Locale This assumes that instantiating a dlkit.mongo.locale.objects.Locale without constructor arguments wlll return the default Locale. """ from .locale.objects import Locale if proxy is not None: locale = proxy.get_locale() if locale is not None: return locale return Locale()
0.004854
def splitBy(data, num): """ Turn a list to list of list """ return [data[i:i + num] for i in range(0, len(data), num)]
0.007937
def read_from_file( self, filename, negative_occupancies='warn' ): """ Reads the projected wavefunction character of each band from a VASP PROCAR file. Args: filename (str): Filename of the PROCAR file. negative_occupancies (:obj:Str, optional): Sets the behaviour for handling negative occupancies. Default is `warn`. Returns: None Note: Valid options for `negative_occupancies` are: `warn` (default): Warn that some partial occupancies are negative, but do not alter any values. `raise`: Raise an AttributeError. `ignore`: Do nothing. `zero`: Negative partial occupancies will be set to zero. """ valid_negative_occupancies = [ 'warn', 'raise', 'ignore', 'zero' ] if negative_occupancies not in valid_negative_occupancies: raise ValueError( '"{}" is not a valid value for the keyword `negative_occupancies`.'.format( negative_occupancies ) ) with open( filename, 'r' ) as file_in: file_in.readline() self.number_of_k_points, self.number_of_bands, self.number_of_ions = [ int( f ) for f in get_numbers_from_string( file_in.readline() ) ] self.read_in = file_in.read() self.parse_k_points() self.parse_bands() self.parse_occupancy() if np.any( self.occupancy[:,1] < 0 ): # Handle negative occupancies if negative_occupancies == 'warn': warnings.warn( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'raise': raise ValueError( "One or more occupancies in your PROCAR file are negative." ) elif negative_occupancies == 'zero': self.occupancy[ self.occupancy < 0 ] = 0.0 self.parse_projections() self.sanity_check() self.read_in = None if self.calculation[ 'spin_polarised' ]: self.data = self.projection_data.reshape( self.spin_channels, self.number_of_k_points, self.number_of_bands, self.number_of_ions + 1, self.number_of_projections )[:,:,:,:,1:].swapaxes( 0, 1).swapaxes( 1, 2 ) else: self.data = self.projection_data.reshape( self.number_of_k_points, self.number_of_bands, self.spin_channels, self.number_of_ions + 1, self.number_of_projections )[:,:,:,:,1:]
0.022745
def powered_up(self): """ Returns True whether the card is "powered up". """ if not self.data.scripts.powered_up: return False for script in self.data.scripts.powered_up: if not script.check(self): return False return True
0.045082
def _valid_baremetal_port(port): """Check if port is a baremetal port with exactly one security group""" if port.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL: return False sgs = port.get('security_groups', []) if len(sgs) == 0: # Nothing to do return False if len(port.get('security_groups', [])) > 1: LOG.warning('SG provisioning failed for %(port)s. Only one ' 'SG may be applied per port.', {'port': port['id']}) return False return True
0.003279
def Star(inner_rule, loc=None): """ A rule that accepts a sequence of tokens satisfying ``inner_rule`` zero or more times, and returns the returned values in a :class:`list`. """ @llrule(loc, lambda parser: []) def rule(parser): results = [] while True: data = parser._save() result = inner_rule(parser) if result is unmatched: parser._restore(data, rule=inner_rule) return results results.append(result) return rule
0.003704
def _normalize_port(scheme, port): """Return port if it is not default port, else None. >>> _normalize_port('http', '80') >>> _normalize_port('http', '8080') '8080' """ if not scheme: return port if port and port != DEFAULT_PORT[scheme]: return port
0.00339
def get_hash(input_string): """ Return the hash of the movie depending on the input string. If the input string looks like a symbolic link to a movie in a Kolekto tree, return its movies hash, else, return the input directly in lowercase. """ # Check if the input looks like a link to a movie: if os.path.islink(input_string): directory, movie_hash = os.path.split(os.readlink(input_string)) input_string = movie_hash return input_string.lower()
0.002033
def first_produced_mesh(self): """The first produced mesh. :return: the first produced mesh :rtype: knittingpattern.Mesh.Mesh :raises IndexError: if no mesh is produced .. seealso:: :attr:`number_of_produced_meshes` """ for instruction in self.instructions: if instruction.produces_meshes(): return instruction.first_produced_mesh raise IndexError("{} produces no meshes".format(self))
0.004175
def create_model(schema, collection, class_name=None): """ Main entry point to creating a new mongothon model. Both schema and Pymongo collection objects must be provided. Returns a new class which can be used as a model class. The class name of the model class by default is inferred from the provided collection (converted to camel case). Optionally, a class_name argument can be provided to override this. """ if not class_name: class_name = camelize(str(collection.name)) model_class = type(class_name, (Model,), dict(schema=schema, _collection_factory=staticmethod(lambda: collection))) # Since we are dynamically creating this class here, we modify __module__ on the # created class to point back to the module from which `create_model` was called model_class.__module__ = _module_name_from_previous_frame(1) return model_class
0.004202
def purge(self, queue, nowait=True, ticket=None, cb=None): ''' Purge all messages in a queue. ''' nowait = nowait and self.allow_nowait() and not cb args = Writer() args.write_short(ticket or self.default_ticket).\ write_shortstr(queue).\ write_bit(nowait) self.send_frame(MethodFrame(self.channel_id, 50, 30, args)) if not nowait: self._purge_cb.append(cb) return self.channel.add_synchronous_cb(self._recv_purge_ok)
0.003766
def _consolidate_auth(ssh_password=None, ssh_pkey=None, ssh_pkey_password=None, allow_agent=True, host_pkey_directories=None, logger=None): """ Get sure authentication information is in place. ``ssh_pkey`` may be of classes: - ``str`` - in this case it represents a private key file; public key will be obtained from it - ``paramiko.Pkey`` - it will be transparently added to loaded keys """ ssh_loaded_pkeys = SSHTunnelForwarder.get_keys( logger=logger, host_pkey_directories=host_pkey_directories, allow_agent=allow_agent ) if isinstance(ssh_pkey, string_types): ssh_pkey_expanded = os.path.expanduser(ssh_pkey) if os.path.exists(ssh_pkey_expanded): ssh_pkey = SSHTunnelForwarder.read_private_key_file( pkey_file=ssh_pkey_expanded, pkey_password=ssh_pkey_password or ssh_password, logger=logger ) elif logger: logger.warning('Private key file not found: {0}' .format(ssh_pkey)) if isinstance(ssh_pkey, paramiko.pkey.PKey): ssh_loaded_pkeys.insert(0, ssh_pkey) if not ssh_password and not ssh_loaded_pkeys: raise ValueError('No password or public key available!') return (ssh_password, ssh_loaded_pkeys)
0.004408
def plot_volume_exposures_gross(grossed_threshold, percentile, ax=None): """ Plots outputs of compute_volume_exposures as line graphs Parameters ---------- grossed_threshold : pd.Series Series of grossed volume exposures (output of compute_volume_exposures). percentile : float Percentile to use when computing and plotting volume exposures - See full explanation in create_risk_tear_sheet """ if ax is None: ax = plt.gca() ax.plot(grossed_threshold.index, grossed_threshold, color='b', label='gross') ax.axhline(0, color='k') ax.set(title='Gross exposure to illiquidity', ylabel='{}th percentile of \n proportion of volume (%)' .format(100 * percentile)) ax.legend(frameon=True, framealpha=0.5) return ax
0.001196
def set_disk_cache(self, results, key=None): """Store result in disk cache with key matching model state.""" if not getattr(self, 'disk_cache_location', False): self.init_disk_cache() disk_cache = shelve.open(self.disk_cache_location) key = self.model.hash if key is None else key disk_cache[key] = results disk_cache.close()
0.005195
def im_open(self, *, user: str, **kwargs) -> SlackResponse: """Opens a direct message channel. Args: user (str): The user id to open a DM with. e.g. 'W1234567890' """ kwargs.update({"user": user}) return self.api_call("im.open", json=kwargs)
0.006803
def to_query(self): """ Returns a json-serializable representation. """ return { "geo_shape": { self.name: { "indexed_shape": { "index": self.index_name, "type": self.doc_type, "id": self.shape_id, "path": self.path } } } }
0.004386
def significance_fdr(p, alpha): """Calculate significance by controlling for the false discovery rate. This function determines which of the p-values in `p` can be considered significant. Correction for multiple comparisons is performed by controlling the false discovery rate (FDR). The FDR is the maximum fraction of p-values that are wrongly considered significant [1]_. Parameters ---------- p : array, shape (channels, channels, nfft) p-values. alpha : float Maximum false discovery rate. Returns ------- s : array, dtype=bool, shape (channels, channels, nfft) Significance of each p-value. References ---------- .. [1] Y. Benjamini, Y. Hochberg. Controlling the false discovery rate: a practical and powerful approach to multiple testing. J. Royal Stat. Soc. Series B 57(1): 289-300, 1995. """ i = np.argsort(p, axis=None) m = i.size - np.sum(np.isnan(p)) j = np.empty(p.shape, int) j.flat[i] = np.arange(1, i.size + 1) mask = p <= alpha * j / m if np.sum(mask) == 0: return mask # find largest k so that p_k <= alpha*k/m k = np.max(j[mask]) # reject all H_i for i = 0...k s = j <= k return s
0.000787
def install(client, force): """Install Git hooks.""" import pkg_resources from git.index.fun import hook_path as get_hook_path for hook in HOOKS: hook_path = Path(get_hook_path(hook, client.repo.git_dir)) if hook_path.exists(): if not force: click.echo( 'Hook already exists. Skipping {0}'.format(str(hook_path)), err=True ) continue else: hook_path.unlink() # Make sure the hooks directory exists. hook_path.parent.mkdir(parents=True, exist_ok=True) Path(hook_path).write_bytes( pkg_resources.resource_string( 'renku.data', '{hook}.sh'.format(hook=hook) ) ) hook_path.chmod(hook_path.stat().st_mode | stat.S_IEXEC)
0.001166
def determine_repo_dir(template, abbreviations, clone_to_dir, checkout, no_input, password=None): """ Locate the repository directory from a template reference. Applies repository abbreviations to the template reference. If the template refers to a repository URL, clone it. If the template is a path to a local repository, use it. :param template: A directory containing a project template directory, or a URL to a git repository. :param abbreviations: A dictionary of repository abbreviation definitions. :param clone_to_dir: The directory to clone the repository into. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param password: The password to use when extracting the repository. :return: A tuple containing the cookiecutter template directory, and a boolean descriving whether that directory should be cleaned up after the template has been instantiated. :raises: `RepositoryNotFound` if a repository directory could not be found. """ template = expand_abbreviations(template, abbreviations) if is_zip_file(template): unzipped_dir = unzip( zip_uri=template, is_url=is_repo_url(template), clone_to_dir=clone_to_dir, no_input=no_input, password=password ) repository_candidates = [unzipped_dir] cleanup = True elif is_repo_url(template): cloned_repo = clone( repo_url=template, checkout=checkout, clone_to_dir=clone_to_dir, no_input=no_input, ) repository_candidates = [cloned_repo] cleanup = False else: repository_candidates = [ template, os.path.join(clone_to_dir, template) ] cleanup = False for repo_candidate in repository_candidates: if repository_has_cookiecutter_json(repo_candidate): return repo_candidate, cleanup raise RepositoryNotFound( 'A valid repository for "{}" could not be found in the following ' 'locations:\n{}'.format( template, '\n'.join(repository_candidates) ) )
0.00043
def _parser(result): ''' parses the output into a dictionary ''' # regexes to match _total_time = re.compile(r'total time:\s*(\d*.\d*s)') _total_execution = re.compile(r'event execution:\s*(\d*.\d*s?)') _min_response_time = re.compile(r'min:\s*(\d*.\d*ms)') _max_response_time = re.compile(r'max:\s*(\d*.\d*ms)') _avg_response_time = re.compile(r'avg:\s*(\d*.\d*ms)') _per_response_time = re.compile(r'95 percentile:\s*(\d*.\d*ms)') # extracting data total_time = re.search(_total_time, result).group(1) total_execution = re.search(_total_execution, result).group(1) min_response_time = re.search(_min_response_time, result).group(1) max_response_time = re.search(_max_response_time, result).group(1) avg_response_time = re.search(_avg_response_time, result).group(1) per_response_time = re.search(_per_response_time, result) if per_response_time is not None: per_response_time = per_response_time.group(1) # returning the data as dictionary return { 'total time': total_time, 'total execution time': total_execution, 'minimum response time': min_response_time, 'maximum response time': max_response_time, 'average response time': avg_response_time, '95 percentile': per_response_time }
0.000752
def __check_suc_cookie(self, components): ''' This is only called if we're on a known sucuri-"protected" site. As such, if we do *not* have a sucuri cloudproxy cookie, we can assume we need to do the normal WAF step-through. ''' netloc = components.netloc.lower() for cookie in self.cj: if cookie.domain_specified and (cookie.domain.lower().endswith(netloc) or (cookie.domain.lower().endswith("127.0.0.1") and ( components.path == "/sucuri_shit_3" or components.path == "/sucuri_shit_2" ))): # Allow testing if "sucuri_cloudproxy_uuid_" in cookie.name: return self.log.info("Missing cloudproxy cookie for known sucuri wrapped site. Doing a pre-emptive chromium fetch.") raise Exceptions.SucuriWrapper("WAF Shit", str(components))
0.027273
def send_build_close(params,response_url): '''send build close sends a final response (post) to the server to bring down the instance. The following must be included in params: repo_url, logfile, repo_id, secret, log_file, token ''' # Finally, package everything to send back to shub response = {"log": json.dumps(params['log_file']), "repo_url": params['repo_url'], "logfile": params['logfile'], "repo_id": params['repo_id'], "container_id": params['container_id']} body = '%s|%s|%s|%s|%s' %(params['container_id'], params['commit'], params['branch'], params['token'], params['tag']) signature = generate_header_signature(secret=params['token'], payload=body, request_type="finish") headers = {'Authorization': signature } finish = requests.post(response_url,data=response, headers=headers) bot.debug("FINISH POST TO SINGULARITY HUB ---------------------") bot.debug(finish.status_code) bot.debug(finish.reason) return finish
0.005573
def uv(self, values): """ Set the UV coordinates. Parameters -------------- values : (n, 2) float Pixel locations on a texture per- vertex """ if values is None: self._data.clear() else: self._data['uv'] = np.asanyarray(values, dtype=np.float64)
0.005814
def ls(serial=None): """ List the files on the micro:bit. If no serial object is supplied, microfs will attempt to detect the connection itself. Returns a list of the files on the connected device or raises an IOError if there's a problem. """ out, err = execute([ 'import os', 'print(os.listdir())', ], serial) if err: raise IOError(clean_error(err)) return ast.literal_eval(out.decode('utf-8'))
0.002146
def fill(self): """Parse all the paths (['Lcom/example/myclass/MyActivity$1;', ...]) and build a tree using the QTreeWidgetItem insertion method.""" log.debug("Fill classes tree") for idx, filename, digest, classes in self.session.get_classes(): for c in sorted(classes, key=lambda c: c.name): sig = Signature(c) path_node = self.root_path_node path = None if not sig.class_path: path = '.' if path not in path_node[0]: path_node[0][path] = ( {}, HashableQTreeWidgetItem(path_node[1])) path_node[0][path][1].setText(0, path) path_node = path_node[0][path] else: # Namespaces for path in sig.class_path: if path not in path_node[0]: path_node[0][path] = ( {}, HashableQTreeWidgetItem(path_node[1])) path_node[0][path][1].setText(0, path) path_node = path_node[0][path] # Class path_node[0][path] = ({}, HashableQTreeWidgetItem(path_node[1])) class_name = sig.class_name if idx > 0: class_name += "@%d" % idx c.current_title = class_name self._reverse_cache[path_node[0][path][1]] = (c, filename, digest) path_node[0][path][1].setText(0, class_name)
0.001778
def validate_request_table(self, request): ''' Validates that all requests have the same table name. Set the table name if it is the first request for the batch operation. request: the request to insert, update or delete entity ''' if self.batch_table: if self.get_request_table(request) != self.batch_table: raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH) else: self.batch_table = self.get_request_table(request)
0.005566
def RFC3339(self): """RFC3339. `Link to RFC3339.`__ __ https://www.ietf.org/rfc/rfc3339.txt """ # get timezone offset delta_sec = time.timezone m, s = divmod(delta_sec, 60) h, m = divmod(m, 60) # timestamp format_string = "%Y-%m-%dT%H:%M:%S.%f" out = self.datetime.strftime(format_string) # timezone if delta_sec == 0.: out += "Z" else: if delta_sec > 0: sign = "+" elif delta_sec < 0: sign = "-" def as_string(num): return str(np.abs(int(num))).zfill(2) out += sign + as_string(h) + ":" + as_string(m) return out
0.002685
def get(cls, context, path, out_fp): """ Streamily download a file from the connection multiplexer process in the controller. :param mitogen.core.Context context: Reference to the context hosting the FileService that will be used to fetch the file. :param bytes path: FileService registered name of the input file. :param bytes out_path: Name of the output path on the local disk. :returns: Tuple of (`ok`, `metadata`), where `ok` is :data:`True` on success, or :data:`False` if the transfer was interrupted and the output should be discarded. `metadata` is a dictionary of file metadata as documented in :meth:`fetch`. """ LOG.debug('get_file(): fetching %r from %r', path, context) t0 = time.time() recv = mitogen.core.Receiver(router=context.router) metadata = context.call_service( service_name=cls.name(), method_name='fetch', path=path, sender=recv.to_sender(), ) received_bytes = 0 for chunk in recv: s = chunk.unpickle() LOG.debug('get_file(%r): received %d bytes', path, len(s)) context.call_service_async( service_name=cls.name(), method_name='acknowledge', size=len(s), ).close() out_fp.write(s) received_bytes += len(s) ok = received_bytes == metadata['size'] if received_bytes < metadata['size']: LOG.error('get_file(%r): receiver was closed early, controller ' 'may be shutting down, or the file was truncated ' 'during transfer. Expected %d bytes, received %d.', path, metadata['size'], received_bytes) elif received_bytes > metadata['size']: LOG.error('get_file(%r): the file appears to have grown ' 'while transfer was in progress. Expected %d ' 'bytes, received %d.', path, metadata['size'], received_bytes) LOG.debug('target.get_file(): fetched %d bytes of %r from %r in %dms', metadata['size'], path, context, 1000 * (time.time() - t0)) return ok, metadata
0.000836
def sign_create_withdrawal(withdrawal_params, key_pair): """ Function to create the withdrawal request by signing the parameters necessary for withdrawal. Execution of this function is as follows:: sign_create_withdrawal(withdrawal_params=signable_params, private_key=eth_private_key) The expected return result for this function is as follows:: { 'blockchain': 'neo', 'asset_id': 'SWTH', 'amount': '100', 'timestamp': 1542090737236, 'contract_hash': 'a195c1549e7da61b8da315765a790ac7e7633b82', 'address': 'fea2b883725ef2d194c9060f606cd0a0468a2c59', 'signature': 'f66d604c0a80940bf70ce9e13c0fd47bc79de....' } :param withdrawal_params: Dictionary specifications for withdrawal from the Switcheo Smart Contract. :type withdrawal_params: dict :param key_pair: The NEO key pair to be used to sign messages for the NEO Blockchain. :type key_pair: KeyPair :return: Dictionary of parameters to be sent to the Switcheo API """ encoded_message = encode_message(withdrawal_params) create_params = withdrawal_params.copy() create_params['address'] = neo_get_scripthash_from_private_key(private_key=key_pair.PrivateKey).ToString() create_params['signature'] = sign_message(encoded_message=encoded_message, private_key_hex=private_key_to_hex(key_pair=key_pair)) return create_params
0.004707
def export_configuration_generator(self, sql, sql_args): """ Generator for :class:`meteorpi_model.ExportConfiguration` :param sql: A SQL statement which must return rows describing export configurations :param sql_args: Any variables required to populate the query provided in 'sql' :return: A generator which produces :class:`meteorpi_model.ExportConfiguration` instances from the supplied SQL, closing any opened cursors on completion. """ self.con.execute(sql, sql_args) results = self.con.fetchall() output = [] for result in results: if result['exportType'] == "observation": search = mp.ObservationSearch.from_dict(json.loads(result['searchString'])) elif result['exportType'] == "file": search = mp.FileRecordSearch.from_dict(json.loads(result['searchString'])) else: search = mp.ObservatoryMetadataSearch.from_dict(json.loads(result['searchString'])) conf = mp.ExportConfiguration(target_url=result['targetURL'], user_id=result['targetUser'], password=result['targetPassword'], search=search, name=result['exportName'], description=result['description'], enabled=result['active'], config_id=result['exportConfigId']) output.append(conf) return output
0.007185
def replace_namespaced_cron_job(self, name, namespace, body, **kwargs): # noqa: E501 """replace_namespaced_cron_job # noqa: E501 replace the specified CronJob # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.replace_namespaced_cron_job(name, namespace, body, async_req=True) >>> result = thread.get() :param async_req bool :param str name: name of the CronJob (required) :param str namespace: object name and auth scope, such as for teams and projects (required) :param V2alpha1CronJob body: (required) :param str pretty: If 'true', then the output is pretty printed. :param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed :return: V2alpha1CronJob If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501 else: (data) = self.replace_namespaced_cron_job_with_http_info(name, namespace, body, **kwargs) # noqa: E501 return data
0.0013
def _prune_fields(field_dict, only): """Filter fields data **in place** with `only` list. Example:: self._prune_fields(field_dict, ['slug', 'text']) self._prune_fields(field_dict, [MyModel.slug]) """ fields = [(isinstance(f, str) and f or f.name) for f in only] for f in list(field_dict.keys()): if f not in fields: field_dict.pop(f) return field_dict
0.004415
def _grow_to(self, width, height, top_tc=None): """ Grow this cell to *width* grid columns and *height* rows by expanding horizontal spans and creating continuation cells to form vertical spans. """ def vMerge_val(top_tc): if top_tc is not self: return ST_Merge.CONTINUE if height == 1: return None return ST_Merge.RESTART top_tc = self if top_tc is None else top_tc self._span_to_width(width, top_tc, vMerge_val(top_tc)) if height > 1: self._tc_below._grow_to(width, height-1, top_tc)
0.003135
def storedata(self, fieldName, values, data_type, vName, vClass): """Create and initialize a single field vdata, returning the vdata reference number. Args:: fieldName Name of the single field in the vadata to create values Sequence of values to store in the field;. Each value can itself be a sequence, in which case the field will be multivalued (all second-level sequences must be of the same length) data_type Values type (one of HC.xxx constants). All values must be of the same type vName Name of the vdata to create vClass Vdata class (string) Returns:: vdata reference number C library equivalent : VHstoredata / VHstoredatam """ # See if the field is multi-valued. nrecs = len(values) if type(values[0]) in [list, tuple]: order = len(values[0]) # Replace input list with a flattened list. newValues = [] for el in values: for e in el: newValues.append(e) values = newValues else: order = 1 n_values = nrecs * order if data_type == HC.CHAR8: buf = _C.array_byte(n_values) # Allow values to be passed as a string. # Noop if a list is passed. values = list(values) for n in range(n_values): values[n] = ord(values[n]) elif data_type in [HC.UCHAR8, HC.UINT8]: buf = _C.array_byte(n_values) elif data_type == HC.INT8: # SWIG refuses negative values here. We found that if we # pass them as byte values, it will work. buf = _C.array_int8(n_values) values = list(values) for n in range(n_values): v = values[n] if v >= 0: v &= 0x7f else: v = abs(v) & 0x7f if v: v = 256 - v else: v = 128 # -128 in 2s complement values[n] = v elif data_type == HC.INT16: buf = _C.array_int16(n_values) elif data_type == HC.UINT16: buf = _C.array_uint16(n_values) elif data_type == HC.INT32: buf = _C.array_int32(n_values) elif data_type == HC.UINT32: buf = _C.array_uint32(n_values) elif data_type == HC.FLOAT32: buf = _C.array_float32(n_values) elif data_type == HC.FLOAT64: buf = _C.array_float64(n_values) else: raise HDF4Error("storedata: illegal or unimplemented data_type") for n in range(n_values): buf[n] = values[n] if order == 1: vd = _C.VHstoredata(self._hdf_inst._id, fieldName, buf, nrecs, data_type, vName, vClass) else: vd = _C.VHstoredatam(self._hdf_inst._id, fieldName, buf, nrecs, data_type, vName, vClass, order) _checkErr('storedata', vd, 'cannot create vdata') return vd
0.000895