text
stringlengths
78
104k
score
float64
0
0.18
def _reorderForPreference(themeList, preferredThemeName): """ Re-order the input themeList according to the preferred theme. Returns None. """ for theme in themeList: if preferredThemeName == theme.themeName: themeList.remove(theme) themeList.insert(0, theme) return
0.003021
def write_page(buf, xfer_offset): """Writes a single page. This routine assumes that memory has already been erased. """ xfer_base = 0x08000000 # Set mem write address set_address(xfer_base+xfer_offset) # Send DNLOAD with fw data __dev.ctrl_transfer(0x21, __DFU_DNLOAD, 2, __DFU_INTERFACE, buf, __TIMEOUT) # Execute last command if get_status() != __DFU_STATE_DFU_DOWNLOAD_BUSY: raise Exception("DFU: write memory failed") # Check command state if get_status() != __DFU_STATE_DFU_DOWNLOAD_IDLE: raise Exception("DFU: write memory failed") if __verbose: print ("Write: 0x%x " % (xfer_base + xfer_offset))
0.00292
def upload(self, *args, **kwargs): """Runs command on every job in the run.""" for job in self.jobs: job.upload(*args, **kwargs)
0.014085
def timeout_two_stage( retries: int, timeout1: int, timeout2: int, ) -> Iterable[int]: """ Timeouts generator with a two stage strategy Timeouts start spaced by `timeout1`, after `retries` increase to `timeout2` which is repeated indefinitely. """ for _ in range(retries): yield timeout1 while True: yield timeout2
0.002639
def com_daltonmaag_check_required_fields(ufo_font): """Check that required fields are present in the UFO fontinfo. ufo2ft requires these info fields to compile a font binary: unitsPerEm, ascender, descender, xHeight, capHeight and familyName. """ recommended_fields = [] for field in [ "unitsPerEm", "ascender", "descender", "xHeight", "capHeight", "familyName" ]: if ufo_font.info.__dict__.get("_" + field) is None: recommended_fields.append(field) if recommended_fields: yield FAIL, f"Required field(s) missing: {recommended_fields}" else: yield PASS, "Required fields present."
0.010989
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: CertificateContext for this CertificateInstance :rtype: twilio.rest.preview.deployed_devices.fleet.certificate.CertificateContext """ if self._context is None: self._context = CertificateContext( self._version, fleet_sid=self._solution['fleet_sid'], sid=self._solution['sid'], ) return self._context
0.008091
def register(type, parser, composer, **meta): """Registers a parser and composer of a format. You can use this method to overwrite existing formats. :param type: The unique name of the format :param parser: The method to parse data as the format :param composer: The method to compose data as the format :param meta: The extra information associated with the format """ return default_bank.register(type, parser, composer, **meta)
0.002155
def set_type(self): """ Set the node type """ if self.device_info['type'] == 'Router': self.node['type'] = self.device_info['model'].upper() else: self.node['type'] = self.device_info['type']
0.007843
def sg_ctc(tensor, opt): r"""Computes the CTC (Connectionist Temporal Classification) Loss between `tensor` and `target`. Args: tensor: A 3-D `float Tensor`. opt: target: A `Tensor` with the same length in the first dimension as the `tensor`. Labels. ( Dense tensor ) name: A `string`. A name to display in the tensor board web UI. Returns: A 1-D `Tensor` with the same length in the first dimension of the `tensor`. For example, ``` tensor = [[[2., -1., 3.], [3., 1., -2.]], [[1., -1., 2.], [3., 1., -2.]]] target = [[2., 1.], [2., 3.]] tensor.sg_ctc(target=target) => [ 4.45940781 2.43091154] ``` """ assert opt.target is not None, 'target is mandatory.' # default sequence length shape = tf.shape(tensor) opt += tf.sg_opt(seq_len=tf.ones((shape[0],), dtype=tf.sg_intx) * shape[1], merge=True) # ctc loss out = tf.nn.ctc_loss(opt.target.sg_to_sparse(), tensor, opt.seq_len, ctc_merge_repeated=opt.merge, time_major=False) out = tf.identity(out, 'ctc') # add summary tf.sg_summary_loss(out, name=opt.name) return out
0.004292
def find_faces(self, image, draw_box=False): """Uses a haarcascade to detect faces inside an image. Args: image: The image. draw_box: If True, the image will be marked with a rectangle. Return: The faces as returned by OpenCV's detectMultiScale method for cascades. """ frame_gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY) faces = self.cascade.detectMultiScale( frame_gray, scaleFactor=1.3, minNeighbors=5, minSize=(50, 50), flags=0) if draw_box: for x, y, w, h in faces: cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2) return faces
0.002571
def _negotiate_value(response): """Extracts the gssapi authentication token from the appropriate header""" if hasattr(_negotiate_value, 'regex'): regex = _negotiate_value.regex else: # There's no need to re-compile this EVERY time it is called. Compile # it once and you won't have the performance hit of the compilation. regex = re.compile('(?:.*,)*\s*Negotiate\s*([^,]*),?', re.I) _negotiate_value.regex = regex authreq = response.headers.get('www-authenticate', None) if authreq: match_obj = regex.search(authreq) if match_obj: return match_obj.group(1) return None
0.004525
def phon2dB(loudness=None): """ Loudness in phons to Sound Pressure Level (SPL) in dB using the ISO/FDIS 226:2003 model. This function needs Scipy, as ``scipy.interpolate.UnivariateSpline`` objects are used as interpolators. Parameters ---------- loudness : The loudness value in phons to be converted, or None (default) to get the threshold of hearing. Returns ------- A callable that returns the SPL dB value for each given frequency in hertz. Note ---- See ``phon2dB.iso226.schema`` and ``phon2dB.iso226.table`` to know the original frequency used for the result. The result for any other value is an interpolation (spline). Don't trust on values nor lower nor higher than the frequency limits there (20Hz and 12.5kHz) as they're not part of ISO226 and no value was collected to estimate them (they're just a spline interpolation to reach 1000dB at -30Hz and 32kHz). Likewise, the trustful loudness input range is from 20 to 90 phon, as written on ISO226, although other values aren't found by a spline interpolation but by using the formula on section 4.1 of ISO226. Hint ---- The ``phon2dB.iso226.table`` also have other useful information, such as the threshold values in SPL dB. """ from scipy.interpolate import UnivariateSpline table = phon2dB.iso226.table schema = phon2dB.iso226.schema freqs = [row[schema.index("freq")] for row in table] if loudness is None: # Threshold levels spl = [row[schema.index("threshold")] for row in table] else: # Curve for a specific phon value def get_pressure_level(freq, alpha, loudness_base, threshold): return 10 / alpha * math.log10( 4.47e-3 * (10 ** (.025 * loudness) - 1.14) + (.4 * 10 ** ((threshold + loudness_base) / 10 - 9)) ** alpha ) - loudness_base + 94 spl = [get_pressure_level(**dict(xzip(schema, args))) for args in table] interpolator = UnivariateSpline(freqs, spl, s=0) interpolator_low = UnivariateSpline([-30] + freqs, [1e3] + spl, s=0) interpolator_high = UnivariateSpline(freqs + [32000], spl + [1e3], s=0) @elementwise("freq", 0) def freq2dB_spl(freq): if freq < 20: return interpolator_low(freq).tolist() if freq > 12500: return interpolator_high(freq).tolist() return interpolator(freq).tolist() return freq2dB_spl
0.008102
def ID_colored_tube(color): """Look up the inner diameter of Ismatec 3-stop tubing given its color code. :param color: Color of the 3-stop tubing :type color: string :returns: Inner diameter of the 3-stop tubing (mm) :rtype: float :Examples: >>> from aguaclara.research.peristaltic_pump import ID_colored_tube >>> from aguaclara.core.units import unit_registry as u >>> ID_colored_tube("yellow-blue") <Quantity(1.52, 'millimeter')> >>> ID_colored_tube("orange-yellow") <Quantity(0.51, 'millimeter')> >>> ID_colored_tube("purple-white") <Quantity(2.79, 'millimeter')> """ tubing_data_path = os.path.join(os.path.dirname(__file__), "data", "3_stop_tubing.txt") df = pd.read_csv(tubing_data_path, delimiter='\t') idx = df["Color"] == color return df[idx]['Diameter (mm)'].values[0] * u.mm
0.003436
def resize(self, shape, format=None): """ Set the render-buffer size and format Parameters ---------- shape : tuple of integers New shape in yx order. A render buffer is always 2D. For symmetry with the texture class, a 3-element tuple can also be given, in which case the last dimension is ignored. format : {None, 'color', 'depth', 'stencil'} The buffer format. If None, the current format is maintained. If that is also None, the format will be set upon attaching it to a framebuffer. One can also specify the explicit enum: GL_RGB565, GL_RGBA4, GL_RGB5_A1, GL_DEPTH_COMPONENT16, or GL_STENCIL_INDEX8 """ if not self._resizeable: raise RuntimeError("RenderBuffer is not resizeable") # Check shape if not (isinstance(shape, tuple) and len(shape) in (2, 3)): raise ValueError('RenderBuffer shape must be a 2/3 element tuple') # Check format if format is None: format = self._format # Use current format (may be None) elif isinstance(format, int): pass # Do not check, maybe user needs desktop GL formats elif isinstance(format, string_types): if format not in ('color', 'depth', 'stencil'): raise ValueError('RenderBuffer format must be "color", "depth"' ' or "stencil", not %r' % format) else: raise ValueError('Invalid RenderBuffer format: %r' % format) # Store and send GLIR command self._shape = tuple(shape[:2]) self._format = format if self._format is not None: self._glir.command('SIZE', self._id, self._shape, self._format)
0.002737
def datetime_str_to_timestamp(datetime_str): ''' '2018-01-01 00:00:00' (str) --> 1514736000 :param str datetime_str: datetime string :return: unix timestamp (int) or None :rtype: int or None ''' try: dtf = DTFormat() struct_time = time.strptime(datetime_str, dtf.datetime_format) return time.mktime(struct_time) except: return None
0.006742
def add_prefix(self, ncname: str) -> None: """ Look up ncname and add it to the prefix map if necessary @param ncname: name to add """ if ncname not in self.prefixmap: uri = cu.expand_uri(ncname + ':', self.curi_maps) if uri and '://' in uri: self.prefixmap[ncname] = uri else: print(f"Unrecognized prefix: {ncname}", file=sys.stderr) self.prefixmap[ncname] = f"http://example.org/unknown/{ncname}/"
0.005814
def var(self): """ Compute the variance across images. """ return self._constructor(self.values.var(axis=0, keepdims=True))
0.012903
def find_shadowed(self, extra=()): """Find all the shadowed names. extra is an iterable of variables that may be defined with `add_special` which may occour scoped. """ i = self.identifiers return (i.declared | i.outer_undeclared) & \ (i.declared_locally | i.declared_parameter) | \ set(x for x in extra if i.is_declared(x))
0.007595
def create_feature_array(text, n_pad=21): """ Create feature array of character and surrounding characters """ n = len(text) n_pad_2 = int((n_pad - 1)/2) text_pad = [' '] * n_pad_2 + [t for t in text] + [' '] * n_pad_2 x_char, x_type = [], [] for i in range(n_pad_2, n_pad_2 + n): char_list = text_pad[i + 1: i + n_pad_2 + 1] + \ list(reversed(text_pad[i - n_pad_2: i])) + \ [text_pad[i]] char_map = [CHARS_MAP.get(c, 80) for c in char_list] char_type = [CHAR_TYPES_MAP.get(CHAR_TYPE_FLATTEN.get(c, 'o'), 4) for c in char_list] x_char.append(char_map) x_type.append(char_type) x_char = np.array(x_char).astype(float) x_type = np.array(x_type).astype(float) return x_char, x_type
0.002427
def _lemmatise_roman_numerals(self, form, pos=False, get_lemma_object=False): """ Lemmatise un mot f si c'est un nombre romain :param form: Mot à lemmatiser :param pos: Récupère la POS :param get_lemma_object: Retrieve Lemma object instead of string representation of lemma """ if estRomain(form): _lemma = Lemme( cle=form, graphie_accentuee=form, graphie=form, parent=self, origin=0, pos="a", modele=self.modele("inv") ) yield Lemmatiseur.format_result( form=form, lemma=_lemma, with_pos=pos, raw_obj=get_lemma_object ) if form.upper() != form: yield from self._lemmatise_roman_numerals(form.upper(), pos=pos, get_lemma_object=get_lemma_object)
0.005821
def match(self, package): """Match ``package`` with the requirement. :param package: Package to test with the requirement. :type package: package expression string or :class:`Package` :returns: ``True`` if ``package`` satisfies the requirement. :rtype: bool """ if isinstance(package, basestring): from .packages import Package package = Package.parse(package) if self.name != package.name: return False if self.version_constraints and \ package.version not in self.version_constraints: return False if self.build_options: if package.build_options: if self.build_options - package.build_options: return False else: return True else: return False else: return True
0.002119
def date(self, proxy, how='median', n=500): """Date a proxy record Parameters ---------- proxy : ProxyRecord how : str How to perform the dating. 'median' returns the average of the MCMC ensemble. 'ensemble' returns a 'n' randomly selected members of the MCMC ensemble. Default is 'median'. n : int If 'how' is 'ensemble', the function will randomly select 'n' MCMC ensemble members, with replacement. Returns ------- DatedProxyRecord """ assert how in ['median', 'ensemble'] ens_members = self.mcmcfit.n_members() if how == 'ensemble': select_idx = np.random.choice(range(ens_members), size=n, replace=True) out = [] for d in proxy.data.depth.values: age = self.agedepth(d) if how == 'median': age = np.median(age) elif how == 'ensemble': age = age[select_idx] out.append(age) return DatedProxyRecord(proxy.data.copy(), out)
0.00554
def build_grad_matrices(V, points): """Build the sparse m-by-n matrices that map a coefficient set for a function in V to the values of dx and dy at a number m of points. """ # See <https://www.allanswered.com/post/lkbkm/#zxqgk> mesh = V.mesh() bbt = BoundingBoxTree() bbt.build(mesh) dofmap = V.dofmap() el = V.element() rows = [] cols = [] datax = [] datay = [] for i, xy in enumerate(points): cell_id = bbt.compute_first_entity_collision(Point(*xy)) cell = Cell(mesh, cell_id) coordinate_dofs = cell.get_vertex_coordinates() rows.append([i, i, i]) cols.append(dofmap.cell_dofs(cell_id)) v = el.evaluate_basis_derivatives_all(1, xy, coordinate_dofs, cell_id) v = v.reshape(3, 2) datax.append(v[:, 0]) datay.append(v[:, 1]) rows = numpy.concatenate(rows) cols = numpy.concatenate(cols) datax = numpy.concatenate(datax) datay = numpy.concatenate(datay) m = len(points) n = V.dim() dx_matrix = sparse.csr_matrix((datax, (rows, cols)), shape=(m, n)) dy_matrix = sparse.csr_matrix((datay, (rows, cols)), shape=(m, n)) return dx_matrix, dy_matrix
0.001652
def pipupdate(): """ Update all currently installed pip packages """ packages = [d for d in pkg_resources.working_set] subprocess.call('pip install --upgrade ' + ' '.join(packages))
0.00495
def extend_access_token(self, app_id, app_secret): """ Extends the expiration time of a valid OAuth access token. See <https://developers.facebook.com/roadmap/offline-access-removal/ #extend_token> """ args = { "client_id": app_id, "client_secret": app_secret, "grant_type": "fb_exchange_token", "fb_exchange_token": self.access_token, } response = urllib2.urlopen("https://graph.facebook.com/oauth/" "access_token?" + urllib.parse.urlencode(args)).read().decode('utf-8') query_str = parse_qs(response) if "access_token" in query_str: result = {"accesstoken": query_str["access_token"][0]} if "expires" in query_str: result["expire"] = query_str["expires"][0] return result else: response = json.loads(response) raise GraphAPIError(response)
0.00293
def client_ident(self): """ Return the client identifier as included in many command replies. """ return irc.client.NickMask.from_params( self.nick, self.user, self.server.servername)
0.008368
def _setup_amplification(self, fle): """ If amplification data is specified then reads into memory and updates the required rupture and site parameters """ self.amplification = AmplificationTable(fle["Amplification"], self.m_w, self.distances) if self.amplification.element == "Sites": self.REQUIRES_SITES_PARAMETERS = set( [self.amplification.parameter]) elif self.amplification.element == "Rupture": # set the site and rupture parameters on the instance self.REQUIRES_SITES_PARAMETERS = set() self.REQUIRES_RUPTURE_PARAMETERS = ( self.REQUIRES_RUPTURE_PARAMETERS | {self.amplification.parameter})
0.002364
def pvariance(self): '返回DataStruct.price的方差 variance' res = self.price.groupby(level=1 ).apply(lambda x: statistics.pvariance(x)) res.name = 'pvariance' return res
0.013216
def get_page_objects_by_type(context, object_type): """ **Arguments** ``object_type`` object type :return selected objects """ try: objects = context['page']['content'][object_type] except KeyError: raise template.TemplateSyntaxError('wrong content type: {0:>s}'.format(object_type)) return objects
0.005618
def get_file_ids(object): """Get the exposure for a particular line in the meausre table""" import MOPdbaccess mysql = MOPdbaccess.connect('cfeps','cfhls',dbSystem='MYSQL') cfeps=mysql.cursor() sql="SELECT file_id FROM measure WHERE provisional LIKE %s" cfeps.execute(sql,(object, )) file_ids=cfeps.fetchall() return (file_ids)
0.019444
def rtt_read(self, buffer_index, num_bytes): """Reads data from the RTT buffer. This method will read at most num_bytes bytes from the specified RTT buffer. The data is automatically removed from the RTT buffer. If there are not num_bytes bytes waiting in the RTT buffer, the entire contents of the RTT buffer will be read. Args: self (JLink): the ``JLink`` instance buffer_index (int): the index of the RTT buffer to read from num_bytes (int): the maximum number of bytes to read Returns: A list of bytes read from RTT. Raises: JLinkRTTException if the underlying JLINK_RTTERMINAL_Read call fails. """ buf = (ctypes.c_ubyte * num_bytes)() bytes_read = self._dll.JLINK_RTTERMINAL_Read(buffer_index, buf, num_bytes) if bytes_read < 0: raise errors.JLinkRTTException(bytes_read) return list(buf)[:bytes_read]
0.003067
def get_template(self, template_name): """ Trying to get a compiled template given a template name :param template_name: The template name. :raises: - TemplateDoesNotExist if no such template exists. - TemplateSyntaxError if we couldn't compile the template using Mako syntax. :return: Compiled Template. """ try: return self.template_class(self.engine.get_template(template_name)) except mako_exceptions.TemplateLookupException as exc: raise TemplateDoesNotExist(exc.args) except mako_exceptions.CompileException as exc: raise TemplateSyntaxError(exc.args)
0.002845
def yml_dump(data, stream, yml_fnc=yml_fnc, **options): """An wrapper of yaml.safe_dump and yaml.dump. :param data: Some data to dump :param stream: a file or file-like object to dump YAML data """ _is_dict = anyconfig.utils.is_dict_like(data) if options.get("ac_safe", False): options = {} elif not options.get("Dumper", False) and _is_dict: # TODO: Any other way to get its constructor? maybe_container = options.get("ac_dict", type(data)) options["Dumper"] = _customized_dumper(maybe_container) if _is_dict: # Type information and the order of items are lost on dump currently. data = anyconfig.dicts.convert_to(data, ac_dict=dict) options = common.filter_from_options("ac_dict", options) return yml_fnc("dump", data, stream, **options)
0.001198
def check_grammar(self, ok_start_symbols = set(), out=sys.stderr): ''' Check grammar for: - unused left-hand side nonterminals that are neither start symbols or listed in ok_start_symbols - unused right-hand side nonterminals, i.e. not tokens - right-recursive rules. These can slow down parsing. ''' warnings = 0 (lhs, rhs, tokens, right_recursive, dup_rhs) = self.check_sets() if lhs - ok_start_symbols: warnings += 1 out.write("LHS symbols not used on the RHS:\n") out.write(" " + (', '.join(sorted(lhs)) + "\n")) if rhs: warnings += 1 out.write("RHS symbols not used on the LHS:\n") out.write((', '.join(sorted(rhs))) + "\n" ) if right_recursive: warnings += 1 out.write("Right recursive rules:\n") for rule in sorted(right_recursive): out.write(" %s ::= %s\n" % (rule[0], ' '.join(rule[1]))) pass pass if dup_rhs: warnings += 1 out.write("Nonterminals with the same RHS\n") for rhs in sorted(dup_rhs.keys()): out.write(" RHS: %s\n" % ' '.join(rhs)) out.write(" LHS: %s\n" % ', '.join(dup_rhs[rhs])) out.write(" ---\n") pass pass return warnings
0.00409
def recognise(self): """ Recognise the PLL case of Cube. """ result = "" for side in "LFRB": for square in self.cube.get_face(side)[0]: for _side in "LFRB": if square.colour == self.cube[_side].colour: result += _side break return result
0.005249
def _cursor_position_changed(self): """ Updates the tip based on user cursor movement. """ cursor = self._text_edit.textCursor() if cursor.position() <= self._start_position: self.hide() else: position, commas = self._find_parenthesis(self._start_position + 1) if position != -1: self.hide()
0.005222
def model(self, *args, **kwargs): """ Piority of Arguments: Arguments passed in `kwargs` has the most piority, 'param' key in `kwargs` has less piority than `kwargs` and dictionary arguments in `args` have the least piority. Other arguments are ignored. Argument List: model - set model type, default value 'default'; dx - default value '1.0e-1'; dt - default value '1.0e-3'; t0 - default value '0.0'; u0 - default value '1.0e-1'; order - default value '5'; pumping - default value ``; !original_params - default value `{}`; !dimless_params - default value `{}`; """ if 'filename' in kwargs: return self.modelFromFile(kwargs['filename']) if 'params' in kwargs: params = kwargs.pop('params') kwargs['model'] = 'default' if 'model' not in kwargs else kwargs['model'] kwargs['original_params'] = {} if 'original_params' not in kwargs else kwargs['original_params'] if 'R' not in kwargs['original_params']: kwargs['original_params']['R'] = 0.0242057488654 if 'gamma' not in kwargs['original_params']: kwargs['original_params']['gamma'] = 0.0242057488654 if 'g' not in kwargs['original_params']: kwargs['original_params']['g'] = 0.00162178517398 if 'tilde_g' not in kwargs['original_params']: kwargs['original_params']['tilde_g'] = 0.0169440242057 if 'gamma_R' not in kwargs['original_params']: kwargs['original_params']['gamma_R'] = 0.242057488654 if kwargs.get('model') in ('1d', 'default', str(Model1D)): return self.fabricateModel1D(*args, **kwargs) elif kwargs.get('model') in ('2d', str(Model2D)): return self.fabricateModel2D(*args, **kwargs) else: raise Exception('Unknown model passed!')
0.003063
def confusion_matrix(links_true, links_pred, total=None): """Compute the confusion matrix. The confusion matrix is of the following form: +----------------------+-----------------------+----------------------+ | | Predicted Positives | Predicted Negatives | +======================+=======================+======================+ | **True Positives** | True Positives (TP) | False Negatives (FN) | +----------------------+-----------------------+----------------------+ | **True Negatives** | False Positives (FP) | True Negatives (TN) | +----------------------+-----------------------+----------------------+ The confusion matrix is an informative way to analyse a prediction. The matrix can used to compute measures like precision and recall. The count of true prositives is [0,0], false negatives is [0,1], true negatives is [1,1] and false positives is [1,0]. Parameters ---------- links_true: pandas.MultiIndex, pandas.DataFrame, pandas.Series The true (or actual) links. links_pred: pandas.MultiIndex, pandas.DataFrame, pandas.Series The predicted links. total: int, pandas.MultiIndex The count of all record pairs (both links and non-links). When the argument is a pandas.MultiIndex, the length of the index is used. If the total is None, the number of True Negatives is not computed. Default None. Returns ------- numpy.array The confusion matrix with TP, TN, FN, FP values. Note ---- The number of True Negatives is computed based on the total argument. This argument is the number of record pairs of the entire matrix. """ links_true = _get_multiindex(links_true) links_pred = _get_multiindex(links_pred) tp = true_positives(links_true, links_pred) fp = false_positives(links_true, links_pred) fn = false_negatives(links_true, links_pred) if total is None: tn = numpy.nan else: tn = true_negatives(links_true, links_pred, total) return numpy.array([[tp, fn], [fp, tn]])
0.000469
def build_gtapp(appname, dry_run, **kwargs): """Build an object that can run ScienceTools application Parameters ---------- appname : str Name of the application (e.g., gtbin) dry_run : bool Print command but do not run it kwargs : arguments used to invoke the application Returns `GtApp.GtApp` object that will run the application in question """ pfiles_orig = _set_pfiles(dry_run, **kwargs) gtapp = GtApp.GtApp(appname) update_gtapp(gtapp, **kwargs) _reset_pfiles(pfiles_orig) return gtapp
0.001776
def _handle_status(self, key, value): """Parse a status code from the attached GnuPG process. :raises: :exc:`~exceptions.ValueError` if the status message is unknown. """ if key in ( 'NO_SECKEY', 'BEGIN_DECRYPTION', 'DECRYPTION_FAILED', 'END_DECRYPTION', 'GOOD_PASSPHRASE', 'BAD_PASSPHRASE', 'KEY_CONSIDERED' ): pass elif key == 'NODATA': self.status = nodata(value) elif key == 'ENC_TO': key, _, _ = value.split() if not self.key: self.key = key self.encrypted_to.append(key) elif key in ('NEED_PASSPHRASE', 'MISSING_PASSPHRASE'): self.need_passphrase = True elif key == 'NEED_PASSPHRASE_SYM': self.need_passphrase_sym = True elif key == 'USERID_HINT': self.userid_hint = value.strip().split() else: raise ValueError("Unknown status message: %r" % key)
0.003697
def _apply_mask(self): """Applies the passed-in mask to the convolution matrix. Returns: w: A copy of the convolution matrix that has had the mask applied. Raises: base.IncompatibleShapeError: If the mask shape has more dimensions than the weight matrix. base.IncompatibleShapeError: If the mask and the weight matrix don't match on shape. """ w = self._w w_shape = w.get_shape() mask_shape = self._mask.get_shape() if mask_shape.ndims > w_shape.ndims: raise base.IncompatibleShapeError( "Invalid mask shape: {}. Max shape: {}".format( mask_shape.ndims, len(self._data_format) ) ) if mask_shape != w_shape[:mask_shape.ndims]: raise base.IncompatibleShapeError( "Invalid mask shape: {}. Weight shape: {}".format( mask_shape, w_shape ) ) # TF broadcasting is a bit fragile. # Expand the shape of self._mask by one dim at a time to the right # until the rank matches `weight_shape`. while self._mask.get_shape().ndims < w_shape.ndims: self._mask = tf.expand_dims(self._mask, -1) # tf.Variable & tf.ResourceVariable don't support *=. w = w * self._mask # pylint: disable=g-no-augmented-assignment return w
0.00306
def list_dms (archive, compression, cmd, verbosity, interactive): """List a DMS archive.""" check_archive_ext(archive) return [cmd, 'v', archive]
0.012739
def get_enterprise_customer_from_catalog_id(catalog_id): """ Get the enterprise customer id given an enterprise customer catalog id. """ try: return str(EnterpriseCustomerCatalog.objects.get(pk=catalog_id).enterprise_customer.uuid) except EnterpriseCustomerCatalog.DoesNotExist: return None
0.006135
def add_material(self, material): """Add a material to the mesh, IF it's not already present.""" if self.has_material(material): return self.materials.append(material)
0.009804
def ca_exists(ca_name, cacert_path=None, ca_filename=None): ''' Verify whether a Certificate Authority (CA) already exists ca_name name of the CA cacert_path absolute path to ca certificates root directory ca_filename alternative filename for the CA .. versionadded:: 2015.5.3 CLI Example: .. code-block:: bash salt '*' tls.ca_exists test_ca /etc/certs ''' set_ca_path(cacert_path) if not ca_filename: ca_filename = '{0}_ca_cert'.format(ca_name) certp = '{0}/{1}/{2}.crt'.format( cert_base_path(), ca_name, ca_filename) if os.path.exists(certp): maybe_fix_ssl_version(ca_name, cacert_path=cacert_path, ca_filename=ca_filename) return True return False
0.001151
def variables(self): """ Returns :class:`Variables` instance. """ return Variables([(k, self._unescape(k, v), sl) for k, v, sl in self._nodes_to_values()])
0.016043
def subscribe(self, request, *args, **kwargs): """ Performs the subscribe action. """ self.object = self.get_object() self.object.subscribers.add(request.user) messages.success(self.request, self.success_message) return HttpResponseRedirect(self.get_success_url())
0.006579
def _createValueObjects(self, valueList, varList, mapTable, indexMap, contaminant, replaceParamFile): """ Populate GSSHAPY MTValue and MTIndex Objects Method """ def assign_values_to_table(value_list, layer_id): for i, value in enumerate(value_list): value = vrp(value, replaceParamFile) # Create MTValue object and associate with MTIndex and MapTable mtValue = MTValue(variable=varList[i], value=float(value)) mtValue.index = mtIndex mtValue.mapTable = mapTable mtValue.layer_id = layer_id # MTContaminant handler (associate MTValue with MTContaminant) if contaminant: mtValue.contaminant = contaminant for row in valueList: # Create GSSHAPY MTIndex object and associate with IndexMap mtIndex = MTIndex(index=row['index'], description1=row['description1'], description2=row['description2']) mtIndex.indexMap = indexMap if len(np.shape(row['values'])) == 2: # this is for ids with multiple layers for layer_id, values in enumerate(row['values']): assign_values_to_table(values, layer_id) else: assign_values_to_table(row['values'], 0)
0.002933
def create_custom_menu(self, menu_data, matchrule): """ 创建个性化菜单:: button = [ { "type":"click", "name":"今日歌曲", "key":"V1001_TODAY_MUSIC" }, { "name":"菜单", "sub_button":[ { "type":"view", "name":"搜索", "url":"http://www.soso.com/" }, { "type":"view", "name":"视频", "url":"http://v.qq.com/" }, { "type":"click", "name":"赞一下我们", "key":"V1001_GOOD" }] }] matchrule = { "group_id":"2", "sex":"1", "country":"中国", "province":"广东", "city":"广州", "client_platform_type":"2", "language":"zh_CN" } client.create_custom_menu(button, matchrule) :param menu_data: 如上所示的 Python 字典 :param matchrule: 如上所示的匹配规则 :return: 返回的 JSON 数据包 """ return self.post( url="https://api.weixin.qq.com/cgi-bin/menu/addconditional", data={ "button": menu_data, "matchrule": matchrule } )
0.001297
def setPositionLinkedTo(self, widgets): """ Sets the widget that this popup will be linked to for positional changes. :param widgets | <QWidget> || [<QWidget>, ..] """ if type(widgets) in (list, set, tuple): new_widgets = list(widgets) else: new_widgets = [] widget = widgets while widget: widget.installEventFilter(self) new_widgets.append(widget) widget = widget.parent() self._positionLinkedTo = new_widgets
0.006557
def build_portfolio(self, cash, max_per_note=25, min_percent=0, max_percent=20, filters=None, automatically_invest=False, do_not_clear_staging=False): """ Returns a list of loan notes that are diversified by your min/max percent request and filters. One way to invest in these loan notes, is to start an order and use add_batch to add all the loan fragments to them. (see examples) Parameters ---------- cash : int The total amount you want to invest across a portfolio of loans (at least $25). max_per_note : int, optional The maximum dollar amount you want to invest per note. Must be a multiple of 25 min_percent : int, optional THIS IS NOT PER NOTE, but the minimum average percent of return for the entire portfolio. max_percent : int, optional THIS IS NOT PER NOTE, but the maxmimum average percent of return for the entire portfolio. filters : lendingclub.filters.*, optional The filters to use to search for portfolios automatically_invest : boolean, optional If you want the tool to create an order and automatically invest in the portfolio that matches your filter. (default False) do_not_clear_staging : boolean, optional Similar to automatically_invest, don't do this unless you know what you're doing. Setting this to True stops the method from clearing the loan staging area before returning Returns ------- dict A dict representing a new portfolio or False if nothing was found. If `automatically_invest` was set to `True`, the dict will contain an `order_id` key with the ID of the completed investment order. Notes ----- **The min/max_percent parameters** When searching for portfolios, these parameters will match a portfolio of loan notes which have an **AVERAGE** percent return between these values. If there are multiple portfolio matches, the one closes to the max percent will be chosen. Examples -------- Here we want to invest $400 in a portfolio with only B, C, D and E grade notes with an average overall return between 17% - 19%. This similar to finding a portfolio in the 'Invest' section on lendingclub.com:: >>> from lendingclub import LendingClub >>> from lendingclub.filters import Filter >>> lc = LendingClub() >>> lc.authenticate() Email:[email protected] Password: True >>> filters = Filter() # Set the search filters (only B, C, D and E grade notes) >>> filters['grades']['C'] = True >>> filters['grades']['D'] = True >>> filters['grades']['E'] = True >>> lc.get_cash_balance() # See the cash you have available for investing 463.80000000000001 >>> portfolio = lc.build_portfolio(400, # Invest $400 in a portfolio... min_percent=17.0, # Return percent average between 17 - 19% max_percent=19.0, max_per_note=50, # As much as $50 per note filters=filters) # Search using your filters >>> len(portfolio['loan_fractions']) # See how many loans are in this portfolio 16 >>> loans_notes = portfolio['loan_fractions'] >>> order = lc.start_order() # Start a new order >>> order.add_batch(loans_notes) # Add the loan notes to the order >>> order.execute() # Execute the order 1861880 Here we do a similar search, but automatically invest the found portfolio. **NOTE** This does not allow you to review the portfolio before you invest in it. >>> from lendingclub import LendingClub >>> from lendingclub.filters import Filter >>> lc = LendingClub() >>> lc.authenticate() Email:[email protected] Password: True # Filter shorthand >>> filters = Filter({'grades': {'B': True, 'C': True, 'D': True, 'E': True}}) >>> lc.get_cash_balance() # See the cash you have available for investing 463.80000000000001 >>> portfolio = lc.build_portfolio(400, min_percent=17.0, max_percent=19.0, max_per_note=50, filters=filters, automatically_invest=True) # Same settings, except invest immediately >>> portfolio['order_id'] # See order ID 1861880 """ assert filters is None or isinstance(filters, Filter), 'filter is not a lendingclub.filters.Filter' assert max_per_note >= 25, 'max_per_note must be greater than or equal to 25' # Set filters if filters: filter_str = filters.search_string() else: filter_str = 'default' # Start a new order self.session.clear_session_order() # Make request payload = { 'amount': cash, 'max_per_note': max_per_note, 'filter': filter_str } self.__log('POST VALUES -- amount: {0}, max_per_note: {1}, filter: ...'.format(cash, max_per_note)) response = self.session.post('/portfolio/lendingMatchOptionsV2.action', data=payload) json_response = response.json() # Options were found if self.session.json_success(json_response) and 'lmOptions' in json_response: options = json_response['lmOptions'] # Nothing found if type(options) is not list or json_response['numberTicks'] == 0: self.__log('No lending portfolios were returned with your search') return False # Choose an investment option based on the user's min/max values i = 0 match_index = -1 match_option = None for option in options: # A perfect match if option['percentage'] == max_percent: match_option = option match_index = i break # Over the max elif option['percentage'] > max_percent: break # Higher than the minimum percent and the current matched option elif option['percentage'] >= min_percent and (match_option is None or match_option['percentage'] < option['percentage']): match_option = option match_index = i i += 1 # Nothing matched if match_option is None: self.__log('No portfolios matched your percentage requirements') return False # Mark this portfolio for investing (in order to get a list of all notes) payload = { 'order_amount': cash, 'lending_match_point': match_index, 'lending_match_version': 'v2' } self.session.get('/portfolio/recommendPortfolio.action', query=payload) # Get all loan fractions payload = { 'method': 'getPortfolio' } response = self.session.get('/data/portfolio', query=payload) json_response = response.json() # Extract fractions from response fractions = [] if 'loanFractions' in json_response: fractions = json_response['loanFractions'] # Normalize by converting loanFractionAmount to invest_amount for frac in fractions: frac['invest_amount'] = frac['loanFractionAmount'] # Raise error if amount is greater than max_per_note if frac['invest_amount'] > max_per_note: raise LendingClubError('ERROR: LendingClub tried to invest ${0} in a loan note. Your max per note is set to ${1}. Portfolio investment canceled.'.format(frac['invest_amount'], max_per_note)) if len(fractions) == 0: self.__log('The selected portfolio didn\'t have any loans') return False match_option['loan_fractions'] = fractions # Validate that fractions do indeed match the filters if filters is not None: filters.validate(fractions) # Not investing -- reset portfolio search session and return if automatically_invest is not True: if do_not_clear_staging is not True: self.session.clear_session_order() # Invest in this porfolio elif automatically_invest is True: # just to be sure order = self.start_order() # This should probably only be ever done here...ever. order._Order__already_staged = True order._Order__i_know_what_im_doing = True order.add_batch(match_option['loan_fractions']) order_id = order.execute() match_option['order_id'] = order_id return match_option else: raise LendingClubError('Could not find any portfolio options that match your filters', response) return False
0.004038
def remove_pipe(self, name): """Remove a component from the pipeline. name (unicode): Name of the component to remove. RETURNS (tuple): A `(name, component)` tuple of the removed component. DOCS: https://spacy.io/api/language#remove_pipe """ if name not in self.pipe_names: raise ValueError(Errors.E001.format(name=name, opts=self.pipe_names)) return self.pipeline.pop(self.pipe_names.index(name))
0.006424
def start(self, exit_on_stop=True, secondary_wait=0, reconnect=False): """ If the DistributedEvaluator is in primary mode, starts the manager process and returns. In this case, the ``exit_on_stop`` argument will be ignored. If the DistributedEvaluator is in secondary mode, it connects to the manager and waits for tasks. If in secondary mode and ``exit_on_stop`` is True, sys.exit() will be called when the connection is lost. ``secondary_wait`` specifies the time (in seconds) to sleep before actually starting when in secondary mode. If 'reconnect' is True, the secondary nodes will try to reconnect when the connection is lost. In this case, sys.exit() will only be called when 'exit_on_stop' is True and the primary node send a forced shutdown command. """ if self.started: raise RuntimeError("DistributedEvaluator already started!") self.started = True if self.mode == MODE_PRIMARY: self._start_primary() elif self.mode == MODE_SECONDARY: time.sleep(secondary_wait) self._start_secondary() self._secondary_loop(reconnect=reconnect) if exit_on_stop: sys.exit(0) else: raise ValueError("Invalid mode {!r}!".format(self.mode))
0.0036
def mirror(self, handler, path_from, path_to, log_files=False): """Recursively mirror the contents of "path_from" into "path_to". "handler" should be self.mirror_to_local_no_recursion or self.mirror_to_remote_no_recursion to represent which way the files are moving. """ q = deque(['']) while q: path = q.popleft() full_from = ('%s/%s' % (path_from, path)) if path else path_from full_to = ('%s/%s' % (path_to, path)) if path else path_to subdirs = handler(full_from, full_to, log_files) for subdir in subdirs: q.append(('%s/%s' % (path, subdir)) if path else subdir)
0.008333
def parse(cls, prefix): """ Extracts informations from `prefix`. :param prefix: prefix with format ``<servername>|<nick>['!'<user>]['@'<host>]``. :type prefix: unicode :return: extracted informations (nickname or host, mode, username, host). :rtype: tuple(str, str, str, str) """ try: nick, rest = prefix.split(u'!') except ValueError: return prefix, None, None, None try: mode, rest = rest.split(u'=') except ValueError: mode, rest = None, rest try: user, host = rest.split(u'@') except ValueError: return nick, mode, rest, None return nick, mode, user, host
0.005376
def _check_api_limits(gh_session, api_required=250, sleep_time=15): """ Simplified check for API limits If necessary, spin in place waiting for API to reset before returning. See: https://developer.github.com/v3/#rate-limiting """ api_rates = gh_session.rate_limit() api_remaining = api_rates['rate']['remaining'] api_reset = api_rates['rate']['reset'] logger.debug('Rate Limit - %d requests remaining', api_remaining) if api_remaining > api_required: return now_time = time.time() time_to_reset = int(api_reset - now_time) logger.warn('Rate Limit Depleted - Sleeping for %d seconds', time_to_reset) while now_time < api_reset: time.sleep(10) now_time = time.time() return
0.001311
def create_asset_content(self, asset_content_form=None): """Creates new ``AssetContent`` for a given asset. arg: asset_content_form (osid.repository.AssetContentForm): the form for this ``AssetContent`` return: (osid.repository.AssetContent) - the new ``AssetContent`` raise: IllegalState - ``asset_content_form`` already used in a create transaction raise: InvalidArgument - one or more of the form elements is invalid raise: NullArgument - ``asset_content_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_content_form`` did not originate from ``get_asset_content_form_for_create()`` *compliance: mandatory -- This method must be implemented.* """ if isinstance(asset_content_form, AssetContentForm): asset_content = self._provider_session.create_asset_content( asset_content_form._payload) else: asset_content = self._provider_session.create_asset_content( asset_content_form) try: if asset_content.has_url() and 'amazonaws.com' in asset_content.get_url(): return AssetContent(asset_content, self._config_map) except TypeError: pass return asset_content
0.002042
def close(self): """Close subscription. """ if self._S is not None: # after .close() self._event should never be called self._S.close() self._S = None self._Q.Signal(None) self._T.Wait()
0.007407
def erase_sector(self, address): """! @brief Erase one sector. @exception FlashEraseFailure """ assert self._active_operation == self.Operation.ERASE # update core register to execute the erase_sector subroutine result = self._call_function_and_wait(self.flash_algo['pc_erase_sector'], address) # check the return code if result != 0: raise FlashEraseFailure('erase_sector(0x%x) error: %i' % (address, result), address, result)
0.009579
def filer_has_permission(context, item, action): """Does the current user (taken from the request in the context) have permission to do the given action on the given item. """ permission_method_name = 'has_{action}_permission'.format(action=action) permission_method = getattr(item, permission_method_name, None) request = context.get('request') if not permission_method or not request: return False # Call the permission method. # This amounts to calling `item.has_X_permission(request)` return permission_method(request)
0.001748
def publish(self, user_id, wifi_fingerprint, action='track', location_id='', port=1883): ''' a method to publish wifi fingerprint data to a mosquitto server :param user_id: string with id of user :param wifi_fingerprint: list of dictionaries with wifi fields mac and rssi :param action: string with type of action to perform with data (track or learn) :param location_id: [optional] string with classifier to add to learning data :param port: [optional] integer with port to connect to :return: True ''' title = '%s.publish' % self.__class__.__name__ # validate inputs input_fields = { 'user_id': user_id, 'wifi_fingerprint': wifi_fingerprint, 'action': action, 'location_id': location_id, 'port': port } for key, value in input_fields.items(): object_title = '%s(%s=%s)' % (title, key, str(value)) self.fields.validate(value, '.%s' % key, object_title) # compose message fingerprint_string = '' for signal in wifi_fingerprint: fingerprint_string += signal['mac'].replace(':','') rssi_string = str(signal['rssi']).replace('-','') if len(rssi_string) > 2: fingerprint_string += ' ' fingerprint_string += rssi_string # compose channel topic_string = '%s/track/%s' % (self.group_name, user_id) if action == 'learn': topic_string = '%s/learn/%s/%s' % (self.group_name, user_id, location_id) # send a single message to server import paho.mqtt.publish as mqtt_publish mqtt_publish.single( topic=topic_string, payload=fingerprint_string, auth={ 'username': self.group_name, 'password': self.password }, hostname=self.server_url, port=port ) return True
0.008008
def sleep(self, unique_id, delay, configs=None): """ Pauses the process for the specified delay and then resumes it :Parameter unique_id: the name of the process :Parameter delay: delay time in seconds """ self.pause(unique_id, configs) time.sleep(delay) self.resume(unique_id, configs)
0.003175
def add_time_dependent_effects(self, ts): """ Given a timeseries, apply a model to it. Parameters ---------- ts: Time series of i.i.d. observations as a Numpy array returns the time series with added time-dependent effects as a Numpy array. """ destts = Vectors.dense([0] * len(ts)) result = self._jmodel.addTimeDependentEffects(_py2java(self._ctx, Vectors.dense(ts)), _py2java(self._ctx, destts)) return _java2py(self._ctx, result.toArray())
0.012658
def add_updates(self, messages: Iterable[CachedMessage], expunged: Iterable[int]) -> None: """Update the messages in the selected mailboxes. The ``messages`` should include non-expunged messages in the mailbox that should be checked for updates. The ``expunged`` argument is the set of UIDs that have been expunged from the mailbox. In an optimized implementation, ``messages`` only includes new messages or messages with metadata updates. This minimizes the comparison needed to determine what untagged responses are necessary. The :attr:`.mod_sequence` attribute may be used to support this optimization. If a backend implementation lacks the ability to determine the subset of messages that have been updated, it should instead use :meth:`.set_messages`. Args: messages: The cached message objects to add. expunged: The set of message UIDs that have been expunged. """ self._messages._update(messages) self._messages._remove(expunged, self._hide_expunged) if not self._hide_expunged: self._session_flags.remove(expunged)
0.002459
def request_comments(self, id, **kwargs): "https://developer.zendesk.com/rest_api/docs/core/requests#listing-comments" api_path = "/api/v2/requests/{id}/comments.json" api_path = api_path.format(id=id) return self.call(api_path, **kwargs)
0.011111
def generateFeatureGroups(fgiContainer, linkageGroups, matchArr, timeKey, massKey, logMassKey, massScalingFactor): """ #TODO: docstring :param fgiContainer: :param linkageGroups: :returns: a list of ids of the newly generated :class:`Fgi` """ #Generate feature groups from the linked features newFgiIds = list() for linkageGroup in viewvalues(linkageGroups): fgiId = fgiContainer._getNextFgiId() fgi = fiGroupFromLinkageGroup(matchArr, linkageGroup, fgiId, timeKey, massKey ) fgiContainer.container[fgiId] = fgi fgi.metrics = clusterMetrics(matchArr[timeKey][linkageGroup], matchArr[logMassKey][linkageGroup], massScalingFactor=massScalingFactor ) fgi.rt = fgi.metrics['meanTime'] fgi.mz = fgi.metrics['meanMass'] newFgiIds.append(fgiId) return newFgiIds
0.001885
def cli_decrypt(context, key): """ Decrypts context.io_manager's stdin and sends that to context.io_manager's stdout. See :py:mod:`swiftly.cli.decrypt` for context usage information. See :py:class:`CLIDecrypt` for more information. """ with context.io_manager.with_stdout() as stdout: with context.io_manager.with_stdin() as stdin: crypt_type = stdin.read(1) if crypt_type == AES256CBC: for chunk in aes_decrypt(key, stdin): stdout.write(chunk) stdout.flush() else: raise ReturnCode( 'contents encrypted with unsupported type %r' % crypt_type)
0.001418
def signum(signame): """ Determine the signal from its name. These forms are supported: signal object (needed for python >= 3.5) integer signal number text signal number SIGNAME (signal name in upper case) signame (signal name in lower case) NAME (name without 'SIG' in upper case) name (name without 'SIG' in lower case) In python 3.5 and above, "signal" is an object, so in addition to mapping signal numbers and stringified numbers, this also maps signal objects. """ if signum.namemap is None: # First time through, map evrything likely to its signal number # signum.namemap = {} for num, nam in sigmap.items(): signum.namemap[num] = num signum.namemap[int(num)] = num signum.namemap[str(num)] = num signum.namemap[str(int(num))] = num signum.namemap[nam.upper()] = num signum.namemap[nam.lower()] = num abbr = nam.replace('SIG', '', 1) if abbr != nam: signum.namemap[abbr.upper()] = num signum.namemap[abbr.lower()] = num return signum.namemap.get(signame)
0.000823
def default_link_tag(item): """ Create an A-HREF tag that points to another page. """ text = item["value"] target_url = item["href"] if not item["href"] or item["type"] in ("span", "current_page"): if item["attrs"]: text = make_html_tag("span", **item["attrs"]) + text + "</span>" return text return make_html_tag("a", text=text, href=target_url, **item["attrs"])
0.006508
def delete_backend(backend): '''delete a backend, and update the secrets file ''' settings = read_client_secrets() if backend in settings: del settings[backend] # If the backend was the active client, remove too if 'SREGISTRY_CLIENT' in settings: if settings['SREGISTRY_CLIENT'] == backend: del settings['SREGISTRY_CLIENT'] update_secrets(settings) print('[delete] %s' %backend) else: if backend is not None: print('%s is not a known client.' %backend) else: print('Please specify a backend to delete.')
0.004739
def readline(self): """Readline wrapper to force readline() to return str objects.""" line = self.fd.__class__.readline(self.fd) if isinstance(line, bytes): line = line.decode() return line
0.008584
def sort(self, field, direction="asc"): """ Adds sort criteria. """ if not isinstance(field, basestring): raise ValueError("Field should be a string") if direction not in ["asc", "desc"]: raise ValueError("Sort direction should be `asc` or `desc`") self.sorts.append({field: direction})
0.005571
def push_app(self, content, content_url=None): '''Push a notification to a Pushed application. Param: content -> content of Pushed notification message content_url (optional) -> enrich message with URL Returns Shipment ID as string ''' parameters = { 'app_key': self.app_key, 'app_secret': self.app_secret } return self._push(content, 'app', parameters, content_url)
0.004329
def decorate_with_checker(func: CallableT) -> CallableT: """Decorate the function with a checker that verifies the preconditions and postconditions.""" assert not hasattr(func, "__preconditions__"), \ "Expected func to have no list of preconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postconditions__"), \ "Expected func to have no list of postconditions (there should be only a single contract checker per function)." assert not hasattr(func, "__postcondition_snapshots__"), \ "Expected func to have no list of postcondition snapshots (there should be only a single contract checker " \ "per function)." sign = inspect.signature(func) param_names = list(sign.parameters.keys()) # Determine the default argument values. kwdefaults = dict() # type: Dict[str, Any] # Add to the defaults all the values that are needed by the contracts. for param in sign.parameters.values(): if param.default != inspect.Parameter.empty: kwdefaults[param.name] = param.default def wrapper(*args, **kwargs): """Wrap func by checking the preconditions and postconditions.""" preconditions = getattr(wrapper, "__preconditions__") # type: List[List[Contract]] snapshots = getattr(wrapper, "__postcondition_snapshots__") # type: List[Snapshot] postconditions = getattr(wrapper, "__postconditions__") # type: List[Contract] resolved_kwargs = _kwargs_from_call(param_names=param_names, kwdefaults=kwdefaults, args=args, kwargs=kwargs) if postconditions: if 'result' in resolved_kwargs: raise TypeError("Unexpected argument 'result' in a function decorated with postconditions.") if 'OLD' in resolved_kwargs: raise TypeError("Unexpected argument 'OLD' in a function decorated with postconditions.") # Assert the preconditions in groups. This is necessary to implement "require else" logic when a class # weakens the preconditions of its base class. violation_err = None # type: Optional[ViolationError] for group in preconditions: violation_err = None try: for contract in group: _assert_precondition(contract=contract, resolved_kwargs=resolved_kwargs) break except ViolationError as err: violation_err = err if violation_err is not None: raise violation_err # pylint: disable=raising-bad-type # Capture the snapshots if postconditions: old_as_mapping = dict() # type: MutableMapping[str, Any] for snap in snapshots: # This assert is just a last defense. # Conflicting snapshot names should have been caught before, either during the decoration or # in the meta-class. assert snap.name not in old_as_mapping, "Snapshots with the conflicting name: {}" old_as_mapping[snap.name] = _capture_snapshot(a_snapshot=snap, resolved_kwargs=resolved_kwargs) resolved_kwargs['OLD'] = _Old(mapping=old_as_mapping) # Execute the wrapped function result = func(*args, **kwargs) if postconditions: resolved_kwargs['result'] = result # Assert the postconditions as a conjunction for contract in postconditions: _assert_postcondition(contract=contract, resolved_kwargs=resolved_kwargs) return result # type: ignore # Copy __doc__ and other properties so that doctests can run functools.update_wrapper(wrapper=wrapper, wrapped=func) assert not hasattr(wrapper, "__preconditions__"), "Expected no preconditions set on a pristine contract checker." assert not hasattr(wrapper, "__postcondition_snapshots__"), \ "Expected no postcondition snapshots set on a pristine contract checker." assert not hasattr(wrapper, "__postconditions__"), "Expected no postconditions set on a pristine contract checker." # Precondition is a list of condition groups (i.e. disjunctive normal form): # each group consists of AND'ed preconditions, while the groups are OR'ed. # # This is necessary in order to implement "require else" logic when a class weakens the preconditions of # its base class. setattr(wrapper, "__preconditions__", []) setattr(wrapper, "__postcondition_snapshots__", []) setattr(wrapper, "__postconditions__", []) return wrapper
0.004781
def get_status(self, batch_id): """Returns the status enum for a batch. Args: batch_id (str): The id of the batch to get the status for Returns: int: The status enum """ with self._lock: if self._batch_committed(batch_id): return ClientBatchStatus.COMMITTED if batch_id in self._invalid: return ClientBatchStatus.INVALID if batch_id in self._pending: return ClientBatchStatus.PENDING return ClientBatchStatus.UNKNOWN
0.00346
def set_m_vol(self, vol=None, relative=False): ''' Set the music volume. If @vol != None, It will be changed to it (or by it, if @relative is True.) ''' if vol != None: if relative: vol += self.m_vol self.m_vol = min(max(vol, 0), 1) pygame.mixer.music.set_volume(self.m_vol)
0.011019
def order_verification(self, institute, case, user, link, variant): """Create an event for a variant verification for a variant and an event for a variant verification for a case Arguments: institute (dict): A Institute object case (dict): Case object user (dict): A User object link (str): The url to be used in the event variant (dict): A variant object Returns: updated_variant(dict) """ LOG.info("Creating event for ordering validation for variant" \ " {0}".format(variant['display_name'])) updated_variant = self.variant_collection.find_one_and_update( {'_id': variant['_id']}, {'$set': {'sanger_ordered': True}}, return_document=pymongo.ReturnDocument.AFTER ) self.create_event( institute=institute, case=case, user=user, link=link, category='variant', verb='sanger', variant=variant, subject=variant['display_name'], ) LOG.info("Creating event for ordering sanger for case" \ " {0}".format(case['display_name'])) self.create_event( institute=institute, case=case, user=user, link=link, category='case', verb='sanger', variant=variant, subject=variant['display_name'], ) return updated_variant
0.003854
def tree_probe(self, **kwargs): """ Perform an os walk down a file system tree, starting from a **kwargs identified 'root', and return lists of files and directories found. kwargs: root = '/some/path' return { 'status': True, 'l_dir': l_dirs, 'l_files': l_files } """ str_topDir = "." l_dirs = [] l_files = [] b_status = False str_path = '' l_dirsHere = [] l_filesHere = [] for k, v in kwargs.items(): if k == 'root': str_topDir = v # for root, dirs, files in os.walk(str_topDir, followlinks = self.b_followLinks): for root, dirs, files in pftree.walklevel(str_topDir, self.maxdepth, followlinks = self.b_followLinks): b_status = True str_path = root.split(os.sep) if dirs: l_dirsHere = [root + '/' + x for x in dirs] l_dirs.append(l_dirsHere) self.dp.qprint('Appending dirs to search space:\n', level = 3) self.dp.qprint("\n" + self.pp.pformat(l_dirsHere), level = 3) if files: l_filesHere = [root + '/' + y for y in files] if len(self.str_inputFile): l_hit = [s for s in l_filesHere if self.str_inputFile in s] if l_hit: l_filesHere = l_hit else: l_filesHere = [] if l_filesHere: l_files.append(l_filesHere) self.dp.qprint('Appending files to search space:\n', level = 3) self.dp.qprint("\n" + self.pp.pformat(l_filesHere), level = 3) return { 'status': b_status, 'l_dir': l_dirs, 'l_files': l_files }
0.012352
def delete(data, id, medium, credentials): """Deletes the [medium] with the given id and data from the user's [medium]List. :param data The data for the [medium] to delete. :param id The id of the data to delete. :param medium Anime or manga (tokens.Medium.ANIME or tokens.Medium.MANGA). :raise ValueError For bad arguments. """ _op(data, id, medium, tokens.Operations.DElETE, credentials)
0.004728
def bytes2uuid(b): """ Return standard human-friendly UUID. """ if b.strip(chr(0)) == '': return None s = b.encode('hex') return "%s-%s-%s-%s-%s" % (s[0:8], s[8:12], s[12:16], s[16:20], s[20:])
0.004587
def _int2farray(ftype, num, length=None): """Convert a signed integer to an farray.""" if num < 0: req_length = clog2(abs(num)) + 1 objs = _uint2objs(ftype, 2**req_length + num) else: req_length = clog2(num + 1) + 1 objs = _uint2objs(ftype, num, req_length) if length: if length < req_length: fstr = "overflow: num = {} requires length >= {}, got length = {}" raise ValueError(fstr.format(num, req_length, length)) else: sign = objs[-1] objs += [sign] * (length - req_length) return farray(objs)
0.001631
def find_autorest_generated_folder(module_prefix="azure"): """Find all Autorest generated code in that module prefix. This actually looks for a "models" package only (not file). We could be smarter if necessary. """ _LOGGER.info(f"Looking for Autorest generated package in {module_prefix}") # Manually skip some namespaces for now if module_prefix in ["azure.cli", "azure.storage", "azure.servicemanagement", "azure.servicebus"]: _LOGGER.info(f"Skip {module_prefix}") return [] result = [] try: _LOGGER.debug(f"Try {module_prefix}") model_module = importlib.import_module(".models", module_prefix) # If not exception, we MIGHT have found it, but cannot be a file. # Keep continue to try to break it, file module have no __path__ model_module.__path__ _LOGGER.info(f"Found {module_prefix}") result.append(module_prefix) except (ModuleNotFoundError, AttributeError): # No model, might dig deeper prefix_module = importlib.import_module(module_prefix) for _, sub_package, ispkg in pkgutil.iter_modules(prefix_module.__path__, module_prefix+"."): if ispkg: result += find_autorest_generated_folder(sub_package) return result
0.00311
def check_token(self, renew=True): """ Checks the exp attribute of the access_token and either refreshes the tokens by calling the renew_access_tokens method or does nothing :param renew: bool indicating whether to refresh on expiration :return: bool indicating whether access_token has expired """ if not self.access_token: raise AttributeError('Access Token Required to Check Token') now = datetime.datetime.now() dec_access_token = jwt.get_unverified_claims(self.access_token) if now > datetime.datetime.fromtimestamp(dec_access_token['exp']): expired = True if renew: self.renew_access_token() else: expired = False return expired
0.002516
def create_remote_subnet(self, account_id, identifier, cidr): """Creates a remote subnet on the given account. :param string account_id: The account identifier. :param string identifier: The network identifier of the remote subnet. :param string cidr: The CIDR value of the remote subnet. :return dict: Mapping of properties for the new remote subnet. """ return self.remote_subnet.createObject({ 'accountId': account_id, 'cidr': cidr, 'networkIdentifier': identifier })
0.003503
def send_file(self, fakeid, fid, type): """ 向特定用户发送媒体文件 :param fakeid: 用户 UID (即 fakeid) :param fid: 文件 ID :param type: 文件类型 (2: 图片, 3: 音频, 15: 视频) :raises NeedLoginError: 操作未执行成功, 需要再次尝试登录, 异常内容为服务器返回的错误数据 :raises ValueError: 参数出错, 错误原因直接打印异常即可 (常见错误内容: ``system error`` 或 ``can not send this type of msg``: 文件类型不匹配, ``user not exist``: 用户 fakeid 不存在, ``file not exist``: 文件 fid 不存在, 还有其他错误请自行检查) """ if type == 4: # 此处判断为兼容历史版本, 微信官方已经将视频类型修改为 15 type = 15 url = 'https://mp.weixin.qq.com/cgi-bin/singlesend?t=ajax-response&f=json&token={token}&lang=zh_CN'.format( token=self.__token, ) payloads = {} if type == 2 or type == 3: # 如果文件类型是图片或者音频 payloads = { 'token': self.__token, 'lang': 'zh_CN', 'f': 'json', 'ajax': 1, 'random': random.random(), 'type': type, 'file_id': fid, 'tofakeid': fakeid, 'fileid': fid, 'imgcode': '', } elif type == 15: # 如果文件类型是视频 payloads = { 'token': self.__token, 'lang': 'zh_CN', 'f': 'json', 'ajax': 1, 'random': random.random(), 'type': type, 'app_id': fid, 'tofakeid': fakeid, 'appmsgid': fid, 'imgcode': '', } headers = { 'referer': 'https://mp.weixin.qq.com/cgi-bin/singlesendpage?tofakeid={fakeid}&t=message/send&action=index&token={token}&lang=zh_CN'.format( fakeid=fakeid, token=self.__token, ), 'cookie': self.__cookies, 'x-requested-with': 'XMLHttpRequest', } r = requests.post(url, data=payloads, headers=headers) try: message = json.loads(r.text) except ValueError: raise NeedLoginError(r.text) try: if message['base_resp']['ret'] != 0: raise ValueError(message['base_resp']['err_msg']) except KeyError: raise NeedLoginError(r.text)
0.002186
def multireport(args): """ %prog multireport layoutfile Generate several Ks value distributions in the same figure. If the layout file is missing then a template file listing all ks files will be written. The layout file contains the Ks file, number of components, colors, and labels: # Ks file, ncomponents, label, color, marker LAP.sorghum.ks, 1, LAP-sorghum, r, o SES.sorghum.ks, 1, SES-sorghum, g, + MOL.sorghum.ks, 1, MOL-sorghum, m, ^ If color or marker is missing, then a random one will be assigned. """ p = OptionParser(multireport.__doc__) p.set_outfile(outfile="Ks_plot.pdf") add_plot_options(p) opts, args, iopts = p.set_image_options(args, figsize="5x5") if len(args) != 1: sys.exit(not p.print_help()) layoutfile, = args ks_min = opts.vmin ks_max = opts.vmax bins = opts.bins fill = opts.fill layout = Layout(layoutfile) print(layout, file=sys.stderr) fig = plt.figure(1, (iopts.w, iopts.h)) ax = fig.add_axes([.12, .1, .8, .8]) kp = KsPlot(ax, ks_max, bins, legendp=opts.legendp) for lo in layout: data = KsFile(lo.ksfile) data = [x.ng_ks for x in data] data = [x for x in data if ks_min <= x <= ks_max] kp.add_data(data, lo.components, label=lo.label, \ color=lo.color, marker=lo.marker, fill=fill, fitted=opts.fit) kp.draw(title=opts.title, filename=opts.outfile)
0.002027
def is_used(self, regs, i, top=None): """ Checks whether any of the given regs are required from the given point to the end or not. """ if i < 0: i = 0 if self.lock: return True regs = list(regs) # make a copy if top is None: top = len(self) else: top -= 1 for ii in range(i, top): for r in self.mem[ii].requires: if r in regs: return True for r in self.mem[ii].destroys: if r in regs: regs.remove(r) if not regs: return False self.lock = True result = self.goes_requires(regs) self.lock = False return result
0.003783
def follow(self, object): '''follow an object on the map''' state = self.state (px,py) = state.panel.pixmapper(object.latlon) ratio = 0.25 if (px > ratio*state.width and px < (1.0-ratio)*state.width and py > ratio*state.height and py < (1.0-ratio)*state.height): # we're in the mid part of the map already, don't move return if not state.follow: # the user has disabled following return (lat, lon) = object.latlon state.panel.re_center(state.width/2, state.height/2, lat, lon)
0.00639
def get_attribute_names(self): """Retrieves the names of all attributes. Returns: list[str]: attribute names. """ attribute_names = [] for attribute_name in iter(self.__dict__.keys()): # Not using startswith to improve performance. if attribute_name[0] == '_': continue attribute_names.append(attribute_name) return attribute_names
0.010309
def get_intent_name(handler_input): # type: (HandlerInput) -> AnyStr """Return the name of the intent request. The method retrieves the intent ``name`` from the input request, only if the input request is an :py:class:`ask_sdk_model.intent_request.IntentRequest`. If the input is not an IntentRequest, a :py:class:`TypeError` is raised. :param handler_input: The handler input instance that is generally passed in the sdk's request and exception components :type handler_input: ask_sdk_core.handler_input.HandlerInput :return: Name of the intent request :rtype: str :raises: TypeError """ request = handler_input.request_envelope.request if isinstance(request, IntentRequest): return request.intent.name raise TypeError("The provided request is not an IntentRequest")
0.001182
def _purge_expired(self): """ Remove all expired entries from the cache. """ time_horizon = time.time() - self._keep_time new_cache = {} for (k, v) in self._cache.items(): if v.timestamp > time_horizon: new_cache[k] = v self._cache = new_cache
0.006116
def get_field_values_as_list(self,field): ''' :param str field: The name of the field for which to pull in values. Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example:: >>> r.get_field_values_as_list('product_name_exact') ['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem'] ''' return [doc[field] for doc in self.docs if field in doc]
0.00641
def open_file(self, file): """ Adds a file to the list (and move it to the top of the list if the file already exists) :param file: file path to add the list of recent files. """ files = self.get_recent_files() try: files.remove(file) except ValueError: pass files.insert(0, file) # discard old files del files[self.max_recent_files:] self.set_value('list', files) self.updated.emit()
0.003891
def _expand_spatial_bounds_to_fit_axes(bounds, ax_width, ax_height): """ Parameters ---------- bounds: dict ax_width: float ax_height: float Returns ------- spatial_bounds """ b = bounds height_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_max'], b['lon_min']) width_meters = util.wgs84_distance(b['lat_min'], b['lon_min'], b['lat_min'], b['lon_max']) x_per_y_meters = width_meters / height_meters x_per_y_axes = ax_width / ax_height if x_per_y_axes > x_per_y_meters: # x-axis # axis x_axis has slack -> the spatial longitude bounds need to be extended width_meters_new = (height_meters * x_per_y_axes) d_lon_new = ((b['lon_max'] - b['lon_min']) / width_meters) * width_meters_new mean_lon = (b['lon_min'] + b['lon_max'])/2. lon_min = mean_lon - d_lon_new / 2. lon_max = mean_lon + d_lon_new / 2. spatial_bounds = { "lon_min": lon_min, "lon_max": lon_max, "lat_min": b['lat_min'], "lat_max": b['lat_max'] } else: # axis y_axis has slack -> the spatial latitude bounds need to be extended height_meters_new = (width_meters / x_per_y_axes) d_lat_new = ((b['lat_max'] - b['lat_min']) / height_meters) * height_meters_new mean_lat = (b['lat_min'] + b['lat_max']) / 2. lat_min = mean_lat - d_lat_new / 2. lat_max = mean_lat + d_lat_new / 2. spatial_bounds = { "lon_min": b['lon_min'], "lon_max": b['lon_max'], "lat_min": lat_min, "lat_max": lat_max } return spatial_bounds
0.004159
def from_json(self, json_text): """Deserialize a JSON object into this object. This method will check that the JSON object has the required keys and will set each of the keys in that JSON object as an instance attribute of this object. :param json_text: the JSON text or object to deserialize from :type json_text: dict or string :raises ValueError: if the JSON object lacks an expected key :rtype: None""" #due to the architecture of response parsing, particularly #where the API returns lists, the JSON might have already been #parsed by the time it gets here if type(json_text) in [str, unicode]: j = json.loads(json_text) else: j = json_text try: for p in self.properties: setattr(self, p, j[p]) except KeyError, e: msg = 'Expected key %s in JSON object, found None' % str(e) raise ValueError(msg)
0.004995
def _request(self, method, url, query_or_data=None, **kwargs): """ Wrapper for the HTTP requests, rate limit backoff is handled here, responses are processed with ResourceBuilder. """ if query_or_data is None: query_or_data = {} request_method = getattr(self, '_http_{0}'.format(method)) response = retry_request(self)(request_method)(url, query_or_data, **kwargs) if self.raw_mode: return response if response.status_code >= 300: error = get_error(response) if self.raise_errors: raise error return error # Return response object on NoContent if response.status_code == 204 or not response.text: return response return ResourceBuilder( self, self.default_locale, response.json() ).build()
0.003222
def ensure_stacker_compat_config(config_filename): """Ensure config file can be loaded by Stacker.""" try: with open(config_filename, 'r') as stream: yaml.safe_load(stream) except yaml.constructor.ConstructorError as yaml_error: if yaml_error.problem.startswith( 'could not determine a constructor for the tag \'!'): LOGGER.error('"%s" appears to be a CloudFormation template, ' 'but is located in the top level of a module ' 'alongside the CloudFormation config files (i.e. ' 'the file or files indicating the stack names & ' 'parameters). Please move the template to a ' 'subdirectory.', config_filename) sys.exit(1)
0.001179
def read_option_value_from_nibble(nibble, pos, values): """ Calculates the value used in the extended option fields. :param nibble: the 4-bit option header value. :return: the value calculated from the nibble and the extended option value. """ if nibble <= 12: return nibble, pos elif nibble == 13: tmp = struct.unpack("!B", values[pos].to_bytes(1, "big"))[0] + 13 pos += 1 return tmp, pos elif nibble == 14: s = struct.Struct("!H") tmp = s.unpack_from(values[pos:].to_bytes(2, "big"))[0] + 269 pos += 2 return tmp, pos else: raise AttributeError("Unsupported option nibble " + str(nibble))
0.003891
def analyze_frames(cls, workdir): '''generate draft from recorded frames''' record = cls(None, workdir) obj = {} with open(os.path.join(workdir, 'frames', 'frames.json')) as f: obj = json.load(f) record.device_info = obj['device'] record.frames = obj['frames'] record.analyze_all() record.save()
0.005263
def _get_envelopes_centroid(envelopes): """ Returns the centroid of an inputted geometry column. Not currently in use, as this is now handled by this library's CRS wrapper directly. Light wrapper over ``_get_envelopes_min_maxes``. Parameters ---------- envelopes : GeoSeries The envelopes of the given geometries, as would be returned by e.g. ``data.geometry.envelope``. Returns ------- (mean_x, mean_y) : tuple The data centroid. """ xmin, xmax, ymin, ymax = _get_envelopes_min_maxes(envelopes) return np.mean(xmin, xmax), np.mean(ymin, ymax)
0.006568