text
stringlengths
78
104k
score
float64
0
0.18
def cmd_gimbal_mode(self, args): '''control gimbal mode''' if len(args) != 1: print("usage: gimbal mode <GPS|MAVLink>") return if args[0].upper() == 'GPS': mode = mavutil.mavlink.MAV_MOUNT_MODE_GPS_POINT elif args[0].upper() == 'MAVLINK': mode = mavutil.mavlink.MAV_MOUNT_MODE_MAVLINK_TARGETING elif args[0].upper() == 'RC': mode = mavutil.mavlink.MAV_MOUNT_MODE_RC_TARGETING else: print("Unsupported mode %s" % args[0]) self.master.mav.mount_configure_send(self.target_system, self.target_component, mode, 1, 1, 1)
0.002571
def revoke_token(self, token, orphan=False, accessor=False): """POST /auth/token/revoke POST /auth/token/revoke-orphan POST /auth/token/revoke-accessor :param token: :type token: :param orphan: :type orphan: :param accessor: :type accessor: :return: :rtype: """ if accessor and orphan: msg = "revoke_token does not support 'orphan' and 'accessor' flags together" raise exceptions.InvalidRequest(msg) elif accessor: params = {'accessor': token} self._adapter.post('/v1/auth/token/revoke-accessor', json=params) elif orphan: params = {'token': token} self._adapter.post('/v1/auth/token/revoke-orphan', json=params) else: params = {'token': token} self._adapter.post('/v1/auth/token/revoke', json=params)
0.003233
def _get_message(self, key, since=None): """Return the MdMessage object for the key. The object is either returned from the cache in the store or made, cached and then returned. If 'since' is passed in the modification time of the file is checked and the message is only returned if the mtime is since the specified time. If the 'since' check fails, None is returned. 'since' must be seconds since epoch. """ stored = self.store[key] if isinstance(stored, dict): filename = stored["path"] folder = stored["folder"] if since and since > 0.0: st = stat(filename) if st.st_mtime < since: return None stored = MdMessage( key, filename = filename, folder = folder, filesystem = folder.filesystem ) self.store[key] = stored else: if since and since > 0.0: st = stat(stored.filename) if st.st_mtime < since: return None return stored
0.009917
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types): """ See :meth:`superclass method <.base.GroundShakingIntensityModel.get_mean_and_stddevs>` for spec of input and result values. """ # extract dictionaries of coefficients specific to required # intensity measure type and for PGA C = self.COEFFS[imt] # For inslab GMPEs the correction term is fixed at -0.3 dc1 = -0.3 C_PGA = self.COEFFS[PGA()] # compute median pga on rock (vs30=1000), needed for site response # term calculation pga1000 = np.exp( self._compute_pga_rock(C_PGA, dc1, sites, rup, dists)) mean = (self._compute_magnitude_term(C, dc1, rup.mag) + self._compute_distance_term(C, rup.mag, dists) + self._compute_focal_depth_term(C, rup) + self._compute_forearc_backarc_term(C, sites, dists) + self._compute_site_response_term(C, sites, pga1000)) stddevs = self._get_stddevs(C, stddev_types, len(sites.vs30)) return mean, stddevs
0.001786
def GetSessionId(self): '''Retrieves the VMSessionID for the current session. Call this function after calling VMGuestLib_UpdateInfo. If VMGuestLib_UpdateInfo has never been called, VMGuestLib_GetSessionId returns VMGUESTLIB_ERROR_NO_INFO.''' sid = c_void_p() ret = vmGuestLib.VMGuestLib_GetSessionId(self.handle.value, byref(sid)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return sid
0.010616
def _execShowCountCmd(self, showcmd): """Execute 'show' command and return result dictionary. @param cmd: Command string. @return: Result dictionary. """ result = None lines = self._execCmd("show", showcmd + " count") for line in lines: mobj = re.match('\s*(\d+)\s+total', line) if mobj: return int(mobj.group(1)) return result
0.015184
def _get_id_and_model(self, id_or_model): """ Get both the model and ID of an object that could be an ID or a model. :param id_or_model: The object that could be an ID string or a model object. :param model_collection: The collection to which the model belongs. """ if isinstance(id_or_model, self.collection.model): model = id_or_model elif isinstance(id_or_model, str): # Assume we have an ID string model = self.collection.get(id_or_model) else: raise TypeError('Unexpected type {}, expected {} or {}'.format( type(id_or_model), str, self.collection.model)) return model.id, model
0.002685
def deprecated(context): """Handle deprecated context input.""" tar = context.get('tar', None) # at least 1 of tarExtract or tarArchive must exist in context tar_extract, tar_archive = context.keys_of_type_exist( ('tarExtract', list), ('tarArchive', list)) found_at_least_one = (tar_extract.key_in_context or tar_archive.key_in_context) if tar and not found_at_least_one: return elif found_at_least_one: tar = context['tar'] = {} if tar_extract.key_in_context and tar_extract.is_expected_type: tar['extract'] = context[tar_extract.key] if tar_archive.key_in_context and tar_archive.is_expected_type: tar['archive'] = context[tar_archive.key] if 'tarFormat' in context: tar['format'] = context['tarFormat'] logger.warning("tarExtract and tarArchive are deprecated. They will " "stop working upon the next major release. " "Use the new context key env instead. It's a lot " "better, promise! For the moment pypyr is creating the " "new env key for you under the hood.")
0.00085
def pip_get_installed(): """Code extracted from the middle of the pip freeze command. FIXME: does not list anything installed via -e """ from pip._internal.utils.misc import dist_is_local return tuple( dist_to_req(dist) for dist in fresh_working_set() if dist_is_local(dist) if dist.key != 'python' # See #220 )
0.00271
def serialised( self ): """Tuple containing the contents of the Block.""" klass = self.__class__ return ((klass.__module__, klass.__name__), tuple( (name, field.serialise( self._field_data[name], parent=self ) ) for name, field in klass._fields.items()))
0.032374
def _starts_with_vowel(self, letter_group: str) -> bool: """Check if a string starts with a vowel.""" if len(letter_group) == 0: return False return self._contains_vowels(letter_group[0])
0.008969
def log_error(self, callback, error=None): """ Log the error that occurred when running the given callback. """ print("Uncaught error during callback: {}".format(callback)) print("Error: {}".format(error))
0.008734
def get_default_attribute_value(cls, object_class, property_name, attr_type=str): """ Gets the default value of a given property for a given object. These properties can be set in a config INI file looking like .. code-block:: ini [NUEntity] default_behavior = THIS speed = 1000 [NUOtherEntity] attribute_name = a value This will be used when creating a :class:`bambou.NURESTObject` when no parameter or data is provided """ if not cls._default_attribute_values_configuration_file_path: return None if not cls._config_parser: cls._read_config() class_name = object_class.__name__ if not cls._config_parser.has_section(class_name): return None if not cls._config_parser.has_option(class_name, property_name): return None if sys.version_info < (3,): integer_types = (int, long,) else: integer_types = (int,) if isinstance(attr_type, integer_types): return cls._config_parser.getint(class_name, property_name) elif attr_type is bool: return cls._config_parser.getboolean(class_name, property_name) else: return cls._config_parser.get(class_name, property_name)
0.00288
def is_client_method_whitelisted(request: AxesHttpRequest) -> bool: """ Check if the given request uses a whitelisted method. """ if settings.AXES_NEVER_LOCKOUT_GET and request.method == 'GET': return True return False
0.004032
def profile_poly2o(data, mask): """Fit a 2D 2nd order polynomial to `data[mask]`""" # lmfit params = lmfit.Parameters() params.add(name="mx", value=0) params.add(name="my", value=0) params.add(name="mxy", value=0) params.add(name="ax", value=0) params.add(name="ay", value=0) params.add(name="off", value=np.average(data[mask])) fr = lmfit.minimize(poly2o_residual, params, args=(data, mask)) bg = poly2o_model(fr.params, data.shape) return bg
0.002037
def spring_system(A, K, L): """ Solving the equilibrium positions of the objects, linked by springs of length L, stiffness of K, and connectivity matrix A. Then solving: F_nodes = -A'KAx - A'KL = 0 In the context of scaffolding, lengths (L) are inferred by mate inserts, stiffness (K) is inferred via the number of links, connectivity (A) is the contigs they connect. The mate pairs form the linkages between the contigs, and can be considered as "springs" of certain lengths. The "springs" are stretched or compressed if the distance deviates from the expected insert size. See derivation from Dayarian et al. 2010. SOPRA paper. o---------o--------------o x0 x1 x2 |~~~~L1~~~|~~~~~~L2~~~~~~| |~~~~~~~~~~L3~~~~~~~~~~~~| >>> A = np.array([[1, -1, 0], [0, 1, -1], [1, 0, -1]]) >>> K = np.eye(3, dtype=int) >>> L = np.array([1, 2, 3]) >>> print spring_system(A, K, L) [ 1. 3.] """ # Linear equation is A'KAx = -A'KL C = np.dot(A.T, K) left = np.dot(C, A) right = - np.dot(C, L) left = left[1:, 1:] right = right[1:] x = np.linalg.solve(left, right) return x
0.001676
def remove_tx_rich(self): """ Remove any `c:tx[c:rich]` child, or do nothing if not present. """ matches = self.xpath('c:tx[c:rich]') if not matches: return tx = matches[0] self.remove(tx)
0.007813
def get_pixbeam(self, ra, dec): """ Get the psf at the location specified in pixel coordinates. The psf is also in pixel coordinates. Parameters ---------- ra, dec : float The sky position (degrees). Returns ------- a, b, pa : float The psf semi-major axis (pixels), semi-minor axis (pixels), and rotation angle (degrees). If a psf is defined then it is the psf that is returned, otherwise the image restoring beam is returned. """ # If there is no psf image then just use the fits header (plus lat scaling) from the wcshelper if self.data is None: return self.wcshelper.get_pixbeam(ra, dec) # get the beam from the psf image data psf = self.get_psf_pix(ra, dec) if not np.all(np.isfinite(psf)): log.warn("PSF requested, returned Null") return None return Beam(psf[0], psf[1], psf[2])
0.005005
def check_id_idx_exclusivity(id, idx): """ Makes sure user didn't provide both ids and idx values to subset by. Input: - id (list or None): if not None, a list of string id names - idx (list or None): if not None, a list of integer id indexes Output: - a tuple: first element is subset type, second is subset content """ if (id is not None and idx is not None): msg = ("'id' and 'idx' fields can't both not be None," + " please specify subset in only one of these fields") logger.error(msg) raise Exception("parse_gctx.check_id_idx_exclusivity: " + msg) elif id is not None: return ("id", id) elif idx is not None: return ("idx", idx) else: return (None, [])
0.001277
def merge(self, add_me): """Merge add_me into context and applies interpolation. Bottom-up merge where add_me merges into context. Applies string interpolation where the type is a string. Where a key exists in context already, add_me's value will overwrite what's in context already. Supports nested hierarchy. add_me can contains dicts/lists/enumerables that contain other enumerables et. It doesn't restrict levels of nesting, so if you really want to go crazy with the levels you can, but you might blow your stack. If something from add_me exists in context already, but add_me's value is of a different type, add_me will overwrite context. Do note this. i.e if you had context['int_key'] == 1 and add_me['int_key'] == 'clearly not a number', the end result would be context['int_key'] == 'clearly not a number' If add_me contains lists/sets/tuples, this merges these additively, meaning it appends values from add_me to the existing sequence. Args: add_me: dict. Merge this dict into context. Returns: None. All operations mutate this instance of context. """ def merge_recurse(current, add_me): """Walk the current context tree in recursive inner function. On 1st iteration, current = self (i.e root of context) On subsequent recursive iterations, current is wherever you're at in the nested context hierarchy. Args: current: dict. Destination of merge. add_me: dict. Merge this to current. """ for k, v in add_me.items(): # key supports interpolation k = self.get_formatted_string(k) # str not mergable, so it doesn't matter if it exists in dest if isinstance(v, str): # just overwrite dest - str adds/edits indiscriminately current[k] = self.get_formatted_string(v) elif isinstance(v, (bytes, bytearray)): # bytes aren't mergable or formattable # only here to prevent the elif on enumerables catching it current[k] = v # deal with things that are mergable - exists already in dest elif k in current: if types.are_all_this_type(Mapping, current[k], v): # it's dict-y, thus recurse through it to merge since # it exists in dest merge_recurse(current[k], v) elif types.are_all_this_type(list, current[k], v): # it's list-y. Extend mutates existing list since it # exists in dest current[k].extend( self.get_formatted_iterable(v)) elif types.are_all_this_type(tuple, current[k], v): # concatenate tuples current[k] = ( current[k] + self.get_formatted_iterable(v)) elif types.are_all_this_type(Set, current[k], v): # join sets current[k] = ( current[k] | self.get_formatted_iterable(v)) else: # at this point it's not mergable nor a known iterable current[k] = v else: # at this point it's not mergable, nor in context current[k] = self.get_formatted_iterable(v) # first iteration starts at context dict root merge_recurse(self, add_me)
0.000524
def content(self): """ The whole response content. """ if not self._content: content = self.httplib_response.read() if self.is_binary_string(content): self._content = content else: self._content = content.decode('utf-8') return self._content
0.005714
def volume_infos(pool=None, volume=None, **kwargs): ''' Provide details on a storage volume. If no volume name is provided, the infos all the volumes contained in the pool are provided. If no pool is provided, the infos of the volumes of all pools are output. :param pool: libvirt storage pool name (default: ``None``) :param volume: name of the volume to get infos from (default: ``None``) :param connection: libvirt connection URI, overriding defaults :param username: username to connect with, overriding defaults :param password: password to connect with, overriding defaults .. versionadded:: Neon CLI Example: .. code-block:: bash salt "*" virt.volume_infos <pool> <volume> ''' result = {} conn = __get_conn(**kwargs) try: backing_stores = _get_all_volumes_paths(conn) domains = _get_domain(conn) domains_list = domains if isinstance(domains, list) else [domains] disks = {domain.name(): {node.get('file') for node in ElementTree.fromstring(domain.XMLDesc(0)).findall('.//disk/source/[@file]')} for domain in domains_list} def _volume_extract_infos(vol): ''' Format the volume info dictionary :param vol: the libvirt storage volume object. ''' types = ['file', 'block', 'dir', 'network', 'netdir', 'ploop'] infos = vol.info() # If we have a path, check its use. used_by = [] if vol.path(): as_backing_store = {path for (path, all_paths) in backing_stores.items() if vol.path() in all_paths} used_by = [vm_name for (vm_name, vm_disks) in disks.items() if vm_disks & as_backing_store or vol.path() in vm_disks] return { 'type': types[infos[0]] if infos[0] < len(types) else 'unknown', 'key': vol.key(), 'path': vol.path(), 'capacity': infos[1], 'allocation': infos[2], 'used_by': used_by, } pools = [obj for obj in conn.listAllStoragePools() if pool is None or obj.name() == pool] vols = {pool_obj.name(): {vol.name(): _volume_extract_infos(vol) for vol in pool_obj.listAllVolumes() if (volume is None or vol.name() == volume) and _is_valid_volume(vol)} for pool_obj in pools} return {pool_name: volumes for (pool_name, volumes) in vols.items() if volumes} except libvirt.libvirtError as err: log.debug('Silenced libvirt error: %s', str(err)) finally: conn.close() return result
0.003239
def tail(self, n=5): """Return Index with the last n values. Parameters ---------- n : int Number of values. Returns ------- Series Index containing the last n values. Examples -------- >>> ind = bl.Index(np.arange(3, dtype=np.float64)) >>> print(ind.tail(2).evaluate()) [1. 2.] """ if self.empty: return self else: if self._length is not None: length = self._length else: length = self._lazy_len().weld_expr # not computing slice here to use with __getitem__ because we'd need to use len which is eager return Index(weld_tail(self.weld_expr, length, n), self.dtype, self.name)
0.003456
def add_to_file_links(self): # type: () -> None ''' Increment the number of POSIX file links on this entry by one. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized') if self.dr_entries.px_record is None: if self.ce_entries.px_record is None: raise pycdlibexception.PyCdlibInvalidInput('No Rock Ridge file links') self.ce_entries.px_record.posix_file_links += 1 else: self.dr_entries.px_record.posix_file_links += 1
0.007429
def _handle_incoming_data(self, conn): """ Handle incoming data on socket. """ connection = [c for c in self.connections if c.conn == conn][0] data = conn.recv(1024) if data: connection.feed(data) else: self.connections.remove(connection)
0.006289
def enable(): """ Enable all benchmarking. """ Benchmark.enable = True ComparisonBenchmark.enable = True BenchmarkedFunction.enable = True BenchmarkedClass.enable = True
0.005076
def dedup_alignment_plot (self): """ Make the HighCharts HTML to plot the duplication rates """ # Specify the order of the different possible categories keys = OrderedDict() keys['not_removed'] = { 'name': 'Not Removed' } keys['reverse_removed'] = { 'name': 'Reverse Removed' } keys['forward_removed'] = { 'name': 'Forward Removed' } keys['merged_removed'] = { 'name': 'Merged Removed' } # Config for the plot config = { 'id': 'dedup_rates', 'title': 'DeDup: Deduplicated Reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads', 'hide_zero_cats': False } return bargraph.plot(self.dedup_data, keys, config)
0.016774
def find_shift_dft(im0, im1, isccs=False, subpix=True): """Find the shift between two images using the DFT method Parameters ---------- im0: 2d array First image im1: 2d array Second image isccs: Boolean, default false Set to True if the images are alredy DFT and in CCS representation subpix: boolean, default True Set to True (default) if you want subpixel precision Returns ------- [y, x]: 2 numbers The offset Notes ----- This algorithm detect a shift using the global phase difference of the DFTs If the images are already DFT and in the CCS format, set isccs to true. In that case the images should have the same size. If subpix is True, a gaussian fit is used for subpix precision """ # sanitize input im0 = np.asarray(im0, dtype=np.float32) im1 = np.asarray(im1, dtype=np.float32) # check input if not isccs: im0, im1 = dft_optsize_same(im0, im1) else: # Work only if the shapes are the same assert(im0.shape == im1.shape) # f0*conj(f1) mulSpec = cv2.mulSpectrums(im0, im1, flags=0, conjB=True) # norm(f0)*norm(f1) normccs = cv2.sqrt(cv2.mulSpectrums(im0, im0, flags=0, conjB=True) * cv2.mulSpectrums(im1, im1, flags=0, conjB=True)) # compute the inverse DFT xc = cv2.dft(ccs_normalize(mulSpec, normccs), flags=cv2.DFT_REAL_OUTPUT | cv2.DFT_INVERSE) # Blur xc to remove some noise and improve the subpixel detection # workaround as GaussianBlur doesn't work with BORDER_WRAP blurRadii = 2 xc = cv2.copyMakeBorder(xc, blurRadii, blurRadii, blurRadii, blurRadii, borderType=cv2.BORDER_WRAP) xc = cv2.GaussianBlur(xc, (2 * blurRadii + 1, 2 * blurRadii + 1), 1.5) xc = xc[blurRadii:-blurRadii, blurRadii:-blurRadii] # save shape shape = np.asarray(xc.shape) # find max idx = np.asarray(np.unravel_index(np.argmax(xc), shape)) """ from matplotlib import pyplot as plt from numpy.fft import fftshift plt.figure() plt.imshow(np.log(np.abs(fftshift(im0)))) plt.figure() plt.imshow(np.log(np.abs(fftshift(im1)))) plt.figure() plt.imshow(fftshift(ccs_normalize(mulSpec,normccs))) plt.figure() extent= (-np.shape(xc)[1]/2, np.shape(xc)[1]/2, -np.shape(xc)[0]/2, np.shape(xc)[0]/2 ) plt.imshow(np.log(np.abs(fftshift(xc))),extent = extent) #""" # plt.imshow(fftshift(xc)) # print(idx) # plt.figure() # if toremove: # plt.figure(1) # l=len(xc[:,0]) # plt.plot(np.arange(l)/l,xc[:,0]) # print(l,xc[-1,0]) # plt.figure(2) #""" if subpix: # update idx idx = np.asarray([get_peak_pos(xc[:, idx[1]], wrap=True), get_peak_pos(xc[idx[0], :], wrap=True)]) else: # restrics to reasonable values idx[idx > shape // 2] -= shape[idx > shape // 2] return idx
0.000995
def get_experiment(self, name, variants): """ Retrieve an experiment by its name and variants (assuming it exists). :param name a unique string name for the experiment :param variants a list of strings, each with a unique variant name Returns a ``cleaver.experiment.Experiment`` or ``None`` """ try: return self.experiment_factory(model.Experiment.get_by(name=name)) finally: self.Session.close()
0.004115
def new_body(name=None, pos=None, **kwargs): """ Creates a body element with attributes specified by @**kwargs. Args: name (str): body name. pos: 3d position of the body frame. """ if name is not None: kwargs["name"] = name if pos is not None: kwargs["pos"] = array_to_string(pos) element = ET.Element("body", attrib=kwargs) return element
0.002475
def description_for_number(numobj, lang, script=None, region=None): """Return a text description of a PhoneNumber object for the given language. The description might consist of the name of the country where the phone number is from and/or the name of the geographical area the phone number is from. This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a text description. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- The region code for a given user. This region will be omitted from the description if the phone number comes from this region. It should be a two-letter upper-case CLDR region code. Returns a text description in the given language code, for the given phone number, or an empty string if no description is available.""" ntype = number_type(numobj) if ntype == PhoneNumberType.UNKNOWN: return "" elif not is_number_type_geographical(ntype, numobj.country_code): return country_name_for_number(numobj, lang, script, region) return description_for_valid_number(numobj, lang, script, region)
0.002628
def create_record_set(self, rs_dict): """Accept a record_set dict. Return a Troposphere record_set object.""" record_set_md5 = get_record_set_md5(rs_dict["Name"], rs_dict["Type"]) rs = route53.RecordSetType.from_dict(record_set_md5, rs_dict) rs = add_hosted_zone_id_if_missing(rs, self.hosted_zone_id) rs = self.add_hosted_zone_id_for_alias_target_if_missing(rs) return self.template.add_resource(rs)
0.004464
def _try_char(character, backup, encoding=sys.stdout.encoding): """ Return `character` if it can be encoded using sys.stdout, else return the backup character. """ if character.encode(encoding, 'replace') == b'?': return backup else: return character
0.003448
def skew(self): "skewness" n, a, b = self.n, self.a, self.b t1 = (a+b+2*n) * (b - a) / (a+b+2) t2 = sqrt((1+a+b) / (n*a*b * (n+a+b))) return t1 * t2
0.010638
def popValue(self, argList): """ Take a flat arglist, and pop relevent values and return as a value or tuple. """ # return self._Tuple(*[name for (name, typeObj) in self._types.items()]) return self._Tuple(*[typeObj.popValue(argList) for (name, typeObj) in self._types.items()])
0.016556
def gaussian_hmm(pi, P, means, sigmas): """ Initializes a 1D-Gaussian HMM Parameters ---------- pi : ndarray(nstates, ) Initial distribution. P : ndarray(nstates,nstates) Hidden transition matrix means : ndarray(nstates, ) Means of Gaussian output distributions sigmas : ndarray(nstates, ) Standard deviations of Gaussian output distributions stationary : bool, optional, default=True If True: initial distribution is equal to stationary distribution of transition matrix reversible : bool, optional, default=True If True: transition matrix will fulfill detailed balance constraints. """ from bhmm.hmm.gaussian_hmm import GaussianHMM from bhmm.output_models.gaussian import GaussianOutputModel # count states nstates = _np.array(P).shape[0] # initialize output model output_model = GaussianOutputModel(nstates, means, sigmas) # initialize general HMM from bhmm.hmm.generic_hmm import HMM as _HMM ghmm = _HMM(pi, P, output_model) # turn it into a Gaussian HMM ghmm = GaussianHMM(ghmm) return ghmm
0.001765
def f_string_check(self, original, loc, tokens): """Handle Python 3.6 format strings.""" return self.check_py("36", "format string", original, loc, tokens)
0.011696
def _map_xpath_flags_to_re(expr: str, xpath_flags: str) -> Tuple[int, str]: """ Map `5.6.2 Flags <https://www.w3.org/TR/xpath-functions-31/#flags>`_ to python :param expr: match pattern :param xpath_flags: xpath flags :returns: python flags / modified match pattern """ python_flags: int = 0 modified_expr = expr if xpath_flags is None: xpath_flags = "" if 's' in xpath_flags: python_flags |= re.DOTALL if 'm' in xpath_flags: python_flags |= re.MULTILINE if 'i' in xpath_flags: python_flags |= re.IGNORECASE if 'x' in xpath_flags: modified_expr = re.sub(r'[\t\n\r ]|\[[^\]]*\]', _char_class_escape, modified_expr) if 'q' in xpath_flags: modified_expr = re.escape(modified_expr) return python_flags, modified_expr
0.003659
def make(self): """Instantiates an instance of the environment with appropriate kwargs""" if self._entry_point is None: raise error.Error('Attempting to make deprecated env {}. (HINT: is there a newer registered version of this env?)'.format(self.id)) cls = load(self._entry_point) env = cls(**self._kwargs) # Make the enviroment aware of which spec it came from. env.spec = self env = env.build(extra_wrappers=self._wrappers) return env
0.007752
def salt_ssh(project, target, module, args=None, kwargs=None): """ Execute a `salt-ssh` command """ cmd = ['salt-ssh'] cmd.extend(generate_salt_cmd(target, module, args, kwargs)) cmd.append('--state-output=mixed') cmd.append('--roster-file=%s' % project.roster_path) cmd.append('--config-dir=%s' % project.salt_ssh_config_dir) cmd.append('--ignore-host-keys') cmd.append('--force-color') cmd = ' '.join(cmd) logger.debug('salt-ssh cmd: %s', cmd) proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = proc.communicate() if proc.returncode != 0 or err: raise Exception(err) return out + err
0.002829
def get_language_and_region(self): """ Returns the combined language+region string or \x00\x00 for the default locale :return: """ if self.locale != 0: _language = self._unpack_language_or_region([self.locale & 0xff, (self.locale & 0xff00) >> 8, ], ord('a')) _region = self._unpack_language_or_region([(self.locale & 0xff0000) >> 16, (self.locale & 0xff000000) >> 24, ], ord('0')) return (_language + "-r" + _region) if _region else _language return "\x00\x00"
0.009191
def kpl_off(self, address, group): """Get the status of a KPL button.""" addr = Address(address) device = self.plm.devices[addr.id] device.states[group].off()
0.010526
def get_prep_lookup(self): """ Convert the Python value(s) used in the lookup to LDAP values. """ field = self.lhs.output_field if self.rhs_is_iterable and not field.multi_valued_field: # self.rhs is an iterable, and the field expects single-valued options. return [field.get_prep_value(v) for v in self.rhs] else: # self.rhs is 'as multi-valued' as the field. return field.get_prep_value(self.rhs)
0.006048
def send_client_cmd(self, data, cmd=None, via_queue=None): """ Send arbitrary cmd and data to client if queue name passed by "via_queue" parameter, that queue will be used instead of users private exchange. Args: data: dict cmd: string via_queue: queue name, """ mq_channel = self._connect_mq() if cmd: data['cmd'] = cmd if via_queue: mq_channel.basic_publish(exchange='', routing_key=via_queue, body=json.dumps(data)) else: mq_channel.basic_publish(exchange=self.prv_exchange, routing_key='', body=json.dumps(data))
0.002448
def train_model(self, balance, *args, **kwargs): """ Args: balance: A 1d arraylike that sums to 1, corresponding to the (possibly estimated) class balance. """ self.balance = np.array(balance)
0.007937
def wait_stopped(self, timeout=None, force=False): """Wait for the thread to stop. You must have previously called signal_stop or this function will hang. Args: timeout (float): The maximum time to wait for the thread to stop before raising a TimeoutExpiredError. If force is True, TimeoutExpiredError is not raised and the thread is just marked as a daemon thread so that it does not block cleanly exiting the process. force (bool): If true and the thread does not exit in timeout seconds no error is raised since the thread is marked as daemon and will be killed when the process exits. """ self.join(timeout) if self.is_alive() and force is False: raise TimeoutExpiredError("Error waiting for background thread to exit", timeout=timeout)
0.005353
def _write_recordio(f, data): """Writes a single data point as a RecordIO record to the given file.""" length = len(data) f.write(struct.pack('I', _kmagic)) f.write(struct.pack('I', length)) pad = (((length + 3) >> 2) << 2) - length f.write(data) f.write(padding[pad])
0.003378
def get_assign_groups(line, ops=ops): """ Split a line into groups by assignment (including augmented assignment) """ group = [] for item in line: group.append(item) if item in ops: yield group group = [] yield group
0.003521
def append_to_run_from_text(cls, r, text): """ Create a "one-shot" ``_RunContentAppender`` instance and use it to append the run content elements corresponding to *text* to the ``<w:r>`` element *r*. """ appender = cls(r) appender.add_text(text)
0.006645
def get_all_invoices(self, params=None): """ Get all invoices This will iterate over all pages until it gets all elements. So if the rate limit exceeded it will throw an Exception and you will get nothing :param params: search params :return: list """ if not params: params = {} return self._iterate_through_pages(self.get_invoices_per_page, resource=INVOICES, **{'params': params})
0.008565
def save_webdriver_logs(self, test_name): """Get webdriver logs and write them to log files :param test_name: test that has generated these logs """ try: log_types = self.driver_wrapper.driver.log_types except Exception: # geckodriver does not implement log_types, but it implements get_log for client and server log_types = ['client', 'server'] self.logger.debug("Reading logs from '%s' and writing them to log files", ', '.join(log_types)) for log_type in log_types: try: self.save_webdriver_logs_by_type(log_type, test_name) except Exception: # Capture exceptions to avoid errors in teardown method pass
0.005181
def get_capability_report(self, raw=True, cb=None): """ This method retrieves the Firmata capability report :param raw: If True, it either stores or provides the callback with a report as list. If False, prints a formatted report to the console :param cb: Optional callback reference to receive a raw report :returns: capability report """ task = asyncio.ensure_future(self.core.get_capability_report()) report = self.loop.run_until_complete(task) if raw: if cb: cb(report) else: return report else: # noinspection PyProtectedMember self.core._format_capability_report(report)
0.002567
def tetrahedral_barycentric_coordinates(tetra, pt): ''' tetrahedral_barycentric_coordinates(tetrahedron, point) yields a list of weights for each vertex in the given tetrahedron in the same order as the vertices given. If all weights are 0, then the point is not inside the tetrahedron. ''' # I found a description of this algorithm here (Nov. 2017): # http://steve.hollasch.net/cgindex/geometry/ptintet.html tetra = np.asarray(tetra) if tetra.shape[0] != 4: if tetra.shape[1] == 4: if tetra.shape[0] == 3: tetra = np.transpose(tetra, (1,0) if len(tetra.shape) == 2 else (1,0,2)) else: tetra = np.transpose(tetra, (1,2,0)) elif tetra.shape[1] == 3: tetra = np.transpose(tetra, (2,1,0)) else: tetra = np.transpose(tetra, (2,0,1)) elif tetra.shape[1] != 3: tetra = np.transpose(tetra, (0,2,1)) if pt.shape[0] != 3: pt = pt.T # Okay, calculate the determinants... d_ = det_4x3(tetra[0], tetra[1], tetra[2], tetra[3]) d0 = det_4x3(pt, tetra[1], tetra[2], tetra[3]) d1 = det_4x3(tetra[0], pt, tetra[2], tetra[3]) d2 = det_4x3(tetra[0], tetra[1], pt, tetra[3]) d3 = det_4x3(tetra[0], tetra[1], tetra[2], pt) s_ = np.sign(d_) z_ = np.logical_or(np.any([s_ * si == -1 for si in np.sign([d0,d1,d2,d3])], axis=0), np.isclose(d_,0)) x_ = np.logical_not(z_) d_inv = x_ / (x_ * d_ + z_) return np.asarray([d_inv * dq for dq in (d0,d1,d2,d3)])
0.015248
def fix_config(self, options): """ Fixes the options, if necessary. I.e., it adds all required elements to the dictionary. :param options: the options to fix :type options: dict :return: the (potentially) fixed options :rtype: dict """ options = super(PRC, self).fix_config(options) opt = "class_index" if opt not in options: options[opt] = [0] if opt not in self.help: self.help[opt] = "The list of 0-based class-label indices to display (list)." opt = "key_loc" if opt not in options: options[opt] = "lower center" if opt not in self.help: self.help[opt] = "The location of the key in the plot (str)." opt = "title" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The title for the plot (str)." opt = "outfile" if opt not in options: options[opt] = None if opt not in self.help: self.help[opt] = "The file to store the plot in (str)." opt = "wait" if opt not in options: options[opt] = True if opt not in self.help: self.help[opt] = "Whether to wait for user to close the plot window (bool)." return options
0.00366
def query_extensions(self, extension_query, account_token=None, account_token_header=None): """QueryExtensions. [Preview API] :param :class:`<ExtensionQuery> <azure.devops.v5_1.gallery.models.ExtensionQuery>` extension_query: :param str account_token: :param String account_token_header: Header to pass the account token :rtype: :class:`<ExtensionQueryResult> <azure.devops.v5_1.gallery.models.ExtensionQueryResult>` """ query_parameters = {} if account_token is not None: query_parameters['accountToken'] = self._serialize.query('account_token', account_token, 'str') content = self._serialize.body(extension_query, 'ExtensionQuery') response = self._send(http_method='POST', location_id='eb9d5ee1-6d43-456b-b80e-8a96fbc014b6', version='5.1-preview.1', query_parameters=query_parameters, content=content) return self._deserialize('ExtensionQueryResult', response)
0.00641
def eigh_robust(a, b=None, eigvals=None, eigvals_only=False, overwrite_a=False, overwrite_b=False, turbo=True, check_finite=True): """Robustly solve the Hermitian generalized eigenvalue problem This function robustly solves the Hermetian generalized eigenvalue problem ``A v = lambda B v`` in the case that B is not strictly positive definite. When B is strictly positive-definite, the result is equivalent to scipy.linalg.eigh() within floating-point accuracy. Parameters ---------- a : (M, M) array_like A complex Hermitian or real symmetric matrix whose eigenvalues and eigenvectors will be computed. b : (M, M) array_like, optional A complex Hermitian or real symmetric matrix. If omitted, identity matrix is assumed. eigvals : tuple (lo, hi), optional Indexes of the smallest and largest (in ascending order) eigenvalues and corresponding eigenvectors to be returned: 0 <= lo <= hi <= M-1. If omitted, all eigenvalues and eigenvectors are returned. eigvals_only : bool, optional Whether to calculate only eigenvalues and no eigenvectors. (Default: both are calculated) turbo : bool, optional Use divide and conquer algorithm (faster but expensive in memory, only for generalized eigenvalue problem and if eigvals=None) overwrite_a : bool, optional Whether to overwrite data in `a` (may improve performance) overwrite_b : bool, optional Whether to overwrite data in `b` (may improve performance) check_finite : bool, optional Whether to check that the input matrices contain only finite numbers. Disabling may give a performance gain, but may result in problems (crashes, non-termination) if the inputs do contain infinities or NaNs. Returns ------- w : (N,) float ndarray The N (1<=N<=M) selected eigenvalues, in ascending order, each repeated according to its multiplicity. v : (M, N) complex ndarray (if eigvals_only == False) """ kwargs = dict(eigvals=eigvals, eigvals_only=eigvals_only, turbo=turbo, check_finite=check_finite, overwrite_a=overwrite_a, overwrite_b=overwrite_b) # Check for easy case first: if b is None: return linalg.eigh(a, **kwargs) # Compute eigendecomposition of b kwargs_b = dict(turbo=turbo, check_finite=check_finite, overwrite_a=overwrite_b) # b is a for this operation S, U = linalg.eigh(b, **kwargs_b) # Combine a and b on left hand side via decomposition of b S[S <= 0] = np.inf Sinv = 1. / np.sqrt(S) W = Sinv[:, None] * np.dot(U.T, np.dot(a, U)) * Sinv output = linalg.eigh(W, **kwargs) if eigvals_only: return output else: evals, evecs = output return evals, np.dot(U, Sinv[:, None] * evecs)
0.00034
def has_table(self, table): """ Determine if the given table exists. :param table: The table :type table: str :rtype: bool """ sql = self._grammar.compile_table_exists() table = self._connection.get_table_prefix() + table return len(self._connection.select(sql, [table])) > 0
0.005698
def send(self, tid, company_code, session, **kwargs): '''taobao.logistics.online.send 在线订单发货处理(支持货到付款) - 用户调用该接口可实现在线订单发货(支持货到付款) - 调用该接口实现在线下单发货,有两种情况: - 如果不输入运单号的情况:交易状态不会改变,需要调用taobao.logistics.online.confirm确认发货后交易状态才会变成卖家已发货。 - 如果输入运单号的情况发货:交易订单状态会直接变成卖家已发货 。''' request = TOPRequest('taobao.logistics.online.send') request['tid'] = tid request['company_code'] = company_code for k, v in kwargs.iteritems(): if k not in ('out_sid', 'sender_id', 'cancel_id', 'feature') and v==None: continue request[k] = v self.create(self.execute(request, session), fields=['shipping', ], models={'shipping':Shipping}) return self.shipping
0.014667
def clear_all(self): """Delete all Features.""" logger.info("Clearing ALL Features and FeatureKeys.") self.session.query(Feature).delete(synchronize_session="fetch") self.session.query(FeatureKey).delete(synchronize_session="fetch")
0.007576
def get_next_action(self, request, application, label, roles): """ Django view method. """ actions = self.get_actions(request, application, roles) if label == "approve" and 'approve' in actions: application_form = self.get_approve_form( request, application, roles) form = application_form(request.POST or None, instance=application) if request.method == 'POST': if 'back' in request.POST: url = base.get_url(request, application, roles) return HttpResponseRedirect(url) if 'approve' in request.POST and form.is_valid(): form.save() return "approve" return render( template_name=self.template_approve, context={ 'application': application, 'form': form, 'authorised_text': self.authorised_text, 'actions': self.get_actions(request, application, roles), 'roles': roles }, request=request) elif label == "cancel" and 'cancel' in actions: if request.method == 'POST': form = EmailForm(request.POST) if 'back' in request.POST: url = base.get_url(request, application, roles) return HttpResponseRedirect(url) if 'cancel' in request.POST and form.is_valid(): to_email = application.applicant.email subject, body = form.get_data() emails.send_mail( subject, body, settings.ACCOUNTS_EMAIL, [to_email]) return "cancel" else: link, is_secret = base.get_email_link(application) subject, body = emails.render_email( 'common_declined', {'receiver': application.applicant, 'authorised_text': self.authorised_text, 'application': application, 'link': link, 'is_secret': is_secret}) initial_data = {'body': body, 'subject': subject} form = EmailForm(initial=initial_data) return render( template_name=self.template_decline, context={ 'application': application, 'form': form, 'authorised_text': self.authorised_text, 'actions': self.get_actions(request, application, roles), 'roles': roles }, request=request) elif request.method == "POST": for action in ['approve', 'cancel']: if action in request.POST: url = base.get_url(request, application, roles, action) return HttpResponseRedirect(url) # Not parent class method will do the same thing, however this makes it # explicit. if label is not None: return HttpResponseBadRequest("<h1>Bad Request</h1>") self.context = { 'authorised_text': self.authorised_text, } return super(StateWaitingForApproval, self).get_next_action( request, application, label, roles)
0.000585
def wfdb_strptime(time_string): """ Given a time string in an acceptable wfdb format, return a datetime.time object. Valid formats: SS, MM:SS, HH:MM:SS, all with and without microsec. """ n_colons = time_string.count(':') if n_colons == 0: time_fmt = '%S' elif n_colons == 1: time_fmt = '%M:%S' elif n_colons == 2: time_fmt = '%H:%M:%S' if '.' in time_string: time_fmt += '.%f' return datetime.datetime.strptime(time_string, time_fmt).time()
0.001919
def read_block(self, block): """Read an 8-byte data block at address (block * 8). """ if block < 0 or block > 255: raise ValueError("invalid block number") log.debug("read block {0}".format(block)) cmd = "\x02" + chr(block) + 8 * chr(0) + self.uid return self.transceive(cmd)[1:9]
0.005882
def _full_axis_reduce(self, axis, func, alternate_index=None): """Applies map that reduce Manager to series but require knowledge of full axis. Args: func: Function to reduce the Manager by. This function takes in a Manager. axis: axis to apply the function to. alternate_index: If the resulting series should have an index different from the current query_compiler's index or columns. Return: Pandas series containing the reduced data. """ result = self.data.map_across_full_axis(axis, func) if axis == 0: columns = alternate_index if alternate_index is not None else self.columns return self.__constructor__(result, index=["__reduced__"], columns=columns) else: index = alternate_index if alternate_index is not None else self.index return self.__constructor__(result, index=index, columns=["__reduced__"])
0.00818
def _end_consent(self, context, internal_response): """ Clear the state for consent and end the consent step :type context: satosa.context.Context :type internal_response: satosa.internal.InternalData :rtype: satosa.response.Response :param context: response context :param internal_response: the response :return: response """ del context.state[STATE_KEY] return super().process(context, internal_response)
0.004024
def _tot_services_by_state(self, services, state): """Get the number of service in the specified state :param state: state to filter service :type state: :return: number of service with s.state_id == state :rtype: int """ return str(sum(1 for s in self.services if services[s].state_id == state))
0.005319
def document(self): """Render the error document""" resp = request.environ.get('pylons.original_response') content = literal(resp.body) or cgi.escape(request.GET.get('message')) page = error_document_template % \ dict(prefix=request.environ.get('SCRIPT_NAME', ''), code=cgi.escape(request.GET.get('code', str(resp.status_int))), message=content) return page
0.006772
def add_tag(self, _tags): """Add tag(s) to a DayOneEntry""" if isinstance(_tags, list): for t in _tags: self.tags.append(t) else: self.tags.append(_tags)
0.009217
def unpack_qubit(qubit): """ Get a qubit from an object. :param qubit: An int or Qubit. :return: A Qubit instance """ if isinstance(qubit, integer_types): return Qubit(qubit) elif isinstance(qubit, Qubit): return qubit elif isinstance(qubit, QubitPlaceholder): return qubit else: raise TypeError("qubit should be an int or Qubit instance")
0.002451
def _init_logging(verbose): """Enable logging o stream.""" hdlr = logging.StreamHandler() hdlr.setFormatter(logging.Formatter( '%(asctime)s [%(levelname)s] [%(module)s] %(message)s')) LOG.addHandler(hdlr) if verbose: LOG.setLevel(logging.DEBUG) LOG.debug('Verbose output enabled.') else: LOG.setLevel(logging.INFO)
0.002703
def setup_logfile(self, logfile, mode='w'): '''start logging to the given logfile, with timestamps''' self.logfile = open(logfile, mode=mode)
0.012739
def check_exptime(filelist): """ Removes files with EXPTIME==0 from filelist. """ toclose = False removed_files = [] for f in filelist: if isinstance(f, str): f = fits.open(f) toclose = True try: exptime = f[0].header['EXPTIME'] except KeyError: removed_files.append(f) print("Warning: There are files without keyword EXPTIME") continue if exptime <= 0: removed_files.append(f) print("Warning: There are files with zero exposure time: keyword EXPTIME = 0.0") if removed_files != []: print("Warning: Removing the following files from input list") for f in removed_files: print('\t',f.filename() or "") return removed_files
0.003686
def quick_confirm(prompt, default_value): """ Function to display a quick confirmation for user input **Parameters:** - **prompt:** Text to display before confirm - **default_value:** Default value for no entry **Returns:** 'y', 'n', or Default value. """ valid = False value = default_value.lower() while not valid: input_val = compat_input(prompt + "[{0}]: ".format(default_value)) if input_val == "": value = default_value.lower() valid = True else: try: if input_val.lower() in ['y', 'n']: value = input_val.lower() valid = True else: print("ERROR: enter 'Y' or 'N'.") valid = False except ValueError: print("ERROR: enter 'Y' or 'N'.") valid = False return value
0.001923
def append(self, obj): """ If it is a list it will append the obj, if it is a dictionary it will convert it to a list and append :param obj: dict or list of the object to append :return: None """ if isinstance(obj, dict) and self._col_names: obj = [obj.get(col, None) for col in self._col_names] assert isinstance(obj, list), \ "obj appended to ReprListList needs to be a list or dict" self._original.append(obj)
0.003937
def handle(send, msg, args): """Implements several XKCD comics.""" output = textutils.gen_xkcd_sub(msg, True) if output is None: return if args['type'] == 'action': send("correction: * %s %s" % (args['nick'], output)) else: send("%s actually meant: %s" % (args['nick'], output))
0.003106
def comment(self, issue, body): """Comment on existing issue on Github. For JSON data returned by Github refer: https://developer.github.com/v3/issues/comments/#create-a-comment :param issue: object of exisiting issue :param body: body of the comment :returns: dict of JSON data returned by Github of the new comment :rtype: `dict` """ url = issue.comments_url data = {'body': body} response = self.session.post(url, json.dumps(data)) assert response.status_code == 201 return json.loads(response.content)
0.003263
def controller(url_prefix_or_controller_cls: Union[str, Type[Controller]], controller_cls: Optional[Type[Controller]] = None, *, rules: Optional[Iterable[Union[Route, RouteGenerator]]] = None, ) -> RouteGenerator: """ This function is used to register a controller class's routes. Example usage:: routes = lambda: [ controller(SiteController), ] Or with the optional prefix argument:: routes = lambda: [ controller('/products', ProductController), ] Specify ``rules`` to only include those routes from the controller:: routes = lambda: [ controller(SecurityController, rules=[ rule('/login', SecurityController.login), rule('/logout', SecurityController.logout), rule('/sign-up', SecurityController.register), ]), ] :param url_prefix_or_controller_cls: The controller class, or a url prefix for all of the rules from the controller class passed as the second argument :param controller_cls: If a url prefix was given as the first argument, then the controller class must be passed as the second argument :param rules: An optional list of rules to limit/customize the routes included from the controller """ url_prefix, controller_cls = _normalize_args( url_prefix_or_controller_cls, controller_cls, _is_controller_cls) url_prefix = url_prefix or controller_cls.Meta.url_prefix routes = [] controller_routes = getattr(controller_cls, CONTROLLER_ROUTES_ATTR) if rules is None: routes = controller_routes.values() else: for route in _reduce_routes(rules): existing = controller_routes.get(route.method_name) if existing: routes.append(_inherit_route_options(route, existing[0])) else: routes.append(route) yield from _normalize_controller_routes(routes, controller_cls, url_prefix=url_prefix)
0.002695
def swap(self, a, b): """ Swaps mem positions a and b """ self.mem[a], self.mem[b] = self.mem[b], self.mem[a] self.asm[a], self.asm[b] = self.asm[b], self.asm[a]
0.010363
def collectChildProperties(self, kwargs, properties, collector, **kw): """Collapse the child values into a dictionary. This is intended to be called by child classes to fix up the fullName->name conversions.""" childProperties = {} for field in self.fields: # pylint: disable=not-an-iterable yield collector.collectValidationErrors(field.fullName, field.updateFromKwargs, kwargs=kwargs, properties=childProperties, collector=collector, **kw) kwargs[self.fullName] = childProperties
0.002506
def superimpose(self, module): """ superimpose a task module on registered tasks''' :param module: ape tasks module that is superimposed on available ape tasks :return: None """ featuremonkey.compose(module, self._tasks) self._tasks.FEATURE_SELECTION.append(module.__name__)
0.009091
def update(self, client): """Execute update command on instance.""" update_cmd = "{sudo} '{refresh};{update}'".format( sudo=self.get_sudo_exec_wrapper(), refresh=self.get_refresh_repo_cmd(), update=self.get_update_cmd() ) out = '' try: out = ipa_utils.execute_ssh_command( client, update_cmd ) except Exception as error: raise IpaDistroException( 'An error occurred updating instance: %s' % error ) return out
0.003344
def _startRecording(self, filename): """ Start recording the session to a file for debug purposes. """ self.setOption('_log_file_name', filename) self.setOption('_log_input_only', True) self.setOption('_log', True)
0.007634
def getDefaultParList(self): """ Return a par list just like ours, but with all default values. """ # The code below (create a new set-to-dflts obj) is correct, but it # adds a tenth of a second to startup. Clicking "Defaults" in the # GUI does not call this. But this can be used to set the order seen. # But first check for rare case of no cfg file name if self.filename is None: # this is a .cfgspc-only kind of object so far self.filename = self.getDefaultSaveFilename(stub=True) return copy.deepcopy(self.__paramList) tmpObj = ConfigObjPars(self.filename, associatedPkg=self.__assocPkg, setAllToDefaults=True, strict=False) return tmpObj.getParList()
0.002532
def autocovariance(self, num_autocov=16): """ Compute the autocovariance function from the ARMA parameters over the integers range(num_autocov) using the spectral density and the inverse Fourier transform. Parameters ---------- num_autocov : scalar(int), optional(default=16) The number of autocovariances to calculate """ spect = self.spectral_density()[1] acov = np.fft.ifft(spect).real # num_autocov should be <= len(acov) / 2 return acov[:num_autocov]
0.003534
def phenotypeAssociationSetsGenerator(self, request): """ Returns a generator over the (phenotypeAssociationSet, nextPageToken) pairs defined by the specified request """ dataset = self.getDataRepository().getDataset(request.dataset_id) return self._topLevelObjectGenerator( request, dataset.getNumPhenotypeAssociationSets(), dataset.getPhenotypeAssociationSetByIndex)
0.004545
def decode_field(self, field, value): """Decode the given JSON value. Args: field: a messages.Field for the field we're decoding. value: a python value we'd like to decode. Returns: A value suitable for assignment to field. """ for decoder in _GetFieldCodecs(field, 'decoder'): result = decoder(field, value) value = result.value if result.complete: return value if isinstance(field, messages.MessageField): field_value = self.decode_message( field.message_type, json.dumps(value)) elif isinstance(field, messages.EnumField): value = GetCustomJsonEnumMapping( field.type, json_name=value) or value try: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) except messages.DecodeError: if not isinstance(value, six.string_types): raise field_value = None else: field_value = super( _ProtoJsonApiTools, self).decode_field(field, value) return field_value
0.001633
def _handle_pong(self, ts, *args, **kwargs): """ Handles pong messages; resets the self.ping_timer variable and logs info message. :param ts: timestamp, declares when data was received by the client :return: """ log.info("BitfinexWSS.ping(): Ping received! (%ss)", ts - self.ping_timer) self.ping_timer = None
0.005128
def create_nonimplemented_method(op_name, klass): """ Creates a new method that raises NotImplementedError. """ def new_method(self, *args): raise NotImplementedError( 'Special method %s has not been implemented for PyMC variables.' % op_name) new_method.__name__ = '__' + op_name + '__' setattr( klass, new_method.__name__, UnboundMethodType( new_method, None, klass))
0.002058
def atlasdb_sync_zonefiles( db, start_block, zonefile_dir, atlas_state, validate=True, end_block=None, path=None, con=None ): """ Synchronize atlas DB with name db NOT THREAD SAFE """ ret = None with AtlasDBOpen(con=con, path=path) as dbcon: ret = atlasdb_queue_zonefiles( dbcon, db, start_block, zonefile_dir, validate=validate, end_block=end_block ) atlasdb_cache_zonefile_info( con=dbcon ) if atlas_state: # it could have been the case that a zone file we already have was re-announced. # if so, then inform any storage listeners in the crawler thread that this has happened # (such as the subdomain system). crawler_thread = atlas_state['zonefile_crawler'] for zfinfo in filter(lambda zfi: zfi['present'], ret): log.debug('Store re-discovered zonefile {} at {}'.format(zfinfo['zonefile_hash'], zfinfo['block_height'])) crawler_thread.store_zonefile_cb(zfinfo['zonefile_hash'], zfinfo['block_height']) return ret
0.012621
def read_line(self, line): """ Match a line of input according to the format specified and return a tuple of the resulting values """ if not self._read_line_init: self.init_read_line() match = self._re.match(line) assert match is not None, f"Format mismatch (line = {line})" matched_values = [] for i in range(self._re.groups): cvt_re = self._match_exps[i] cvt_div = self._divisors[i] cvt_fn = self._in_cvt_fns[i] match_str = match.group(i + 1) match0 = re.match(cvt_re, match_str) if match0 is not None: if cvt_fn == "float": if "." in match_str: val = float(match_str) else: val = int(match_str) / cvt_div elif cvt_fn == "int": val = int(match_str) else: sys.stderr.write( f"Unrecognized conversion function: {cvt_fn}\n" ) else: sys.stderr.write( f"Format conversion failed: {match_str}\n" ) matched_values.append(val) return tuple(matched_values)
0.001521
def _filtered_data_zeroed(self): """ A 2D `~numpy.nddarray` cutout from the input ``filtered_data`` (or ``data`` if ``filtered_data`` is `None`) where any masked pixels (_segment_mask, _input_mask, or _data_mask) are set to zero. Invalid values (e.g. NaNs or infs) are set to zero. Units are dropped on the input ``filtered_data`` (or ``data``). Negative data values are also set to zero because negative pixels (especially at large radii) can result in image moments that result in negative variances. """ filt_data = self._filtered_data[self._slice] filt_data = np.where(self._total_mask, 0., filt_data) # copy filt_data[filt_data < 0] = 0. return filt_data.astype(np.float64)
0.002519
def show(block=False): """Show current figures using vispy Parameters ---------- block : bool If True, blocking mode will be used. If False, then non-blocking / interactive mode will be used. Returns ------- canvases : list List of the vispy canvases that were created. """ if not has_matplotlib(): raise ImportError('Requires matplotlib version >= 1.2') cs = [_mpl_to_vispy(plt.figure(ii)) for ii in plt.get_fignums()] if block and len(cs) > 0: cs[0].app.run() return cs
0.001783
def get_proxy_session(self): """Gets a ``ProxySession`` which is responsible for acquiring authentication credentials on behalf of a service client. :return: a proxy session for this service :rtype: ``osid.proxy.ProxySession`` :raise: ``OperationFailed`` -- unable to complete request :raise: ``Unimplemented`` -- ``supports_proxy()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_proxy()`` is ``true``.* """ if not self.supports_proxy(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() try: session = sessions.ProxySession() except AttributeError: raise # OperationFailed() return session
0.004739
def _combine_season_stats(self, table_rows, career_stats, all_stats_dict): """ Combine all stats for each season. Since all of the stats are spread across multiple tables, they should be combined into a single field which can be used to easily query stats at once. Parameters ---------- table_rows : generator A generator where each element is a row in a stats table. career_stats : generator A generator where each element is a row in the footer of a stats table. Career stats are kept in the footer, hence the usage. all_stats_dict : dictionary A dictionary of all stats separated by season where each key is the season ``string``, such as '2017', and the value is a ``dictionary`` with a ``string`` of 'data' and ``string`` containing all of the data. Returns ------- dictionary Returns an updated version of the passed all_stats_dict which includes more metrics from the provided table. """ most_recent_season = self._most_recent_season if not table_rows: table_rows = [] for row in table_rows: season = self._parse_season(row) try: all_stats_dict[season]['data'] += str(row) except KeyError: all_stats_dict[season] = {'data': str(row)} most_recent_season = season self._most_recent_season = most_recent_season if not career_stats: return all_stats_dict try: all_stats_dict['Career']['data'] += str(next(career_stats)) except KeyError: try: all_stats_dict['Career'] = {'data': str(next(career_stats))} # Occurs when the player doesn't have any career stats listed on # their page in error. except StopIteration: return all_stats_dict return all_stats_dict
0.000979
def update_dist_caches(dist_path, fix_zipimporter_caches): """ Fix any globally cached `dist_path` related data `dist_path` should be a path of a newly installed egg distribution (zipped or unzipped). sys.path_importer_cache contains finder objects that have been cached when importing data from the original distribution. Any such finders need to be cleared since the replacement distribution might be packaged differently, e.g. a zipped egg distribution might get replaced with an unzipped egg folder or vice versa. Having the old finders cached may then cause Python to attempt loading modules from the replacement distribution using an incorrect loader. zipimport.zipimporter objects are Python loaders charged with importing data packaged inside zip archives. If stale loaders referencing the original distribution, are left behind, they can fail to load modules from the replacement distribution. E.g. if an old zipimport.zipimporter instance is used to load data from a new zipped egg archive, it may cause the operation to attempt to locate the requested data in the wrong location - one indicated by the original distribution's zip archive directory information. Such an operation may then fail outright, e.g. report having read a 'bad local file header', or even worse, it may fail silently & return invalid data. zipimport._zip_directory_cache contains cached zip archive directory information for all existing zipimport.zipimporter instances and all such instances connected to the same archive share the same cached directory information. If asked, and the underlying Python implementation allows it, we can fix all existing zipimport.zipimporter instances instead of having to track them down and remove them one by one, by updating their shared cached zip archive directory information. This, of course, assumes that the replacement distribution is packaged as a zipped egg. If not asked to fix existing zipimport.zipimporter instances, we still do our best to clear any remaining zipimport.zipimporter related cached data that might somehow later get used when attempting to load data from the new distribution and thus cause such load operations to fail. Note that when tracking down such remaining stale data, we can not catch every conceivable usage from here, and we clear only those that we know of and have found to cause problems if left alive. Any remaining caches should be updated by whomever is in charge of maintaining them, i.e. they should be ready to handle us replacing their zip archives with new distributions at runtime. """ # There are several other known sources of stale zipimport.zipimporter # instances that we do not clear here, but might if ever given a reason to # do so: # * Global setuptools pkg_resources.working_set (a.k.a. 'master working # set') may contain distributions which may in turn contain their # zipimport.zipimporter loaders. # * Several zipimport.zipimporter loaders held by local variables further # up the function call stack when running the setuptools installation. # * Already loaded modules may have their __loader__ attribute set to the # exact loader instance used when importing them. Python 3.4 docs state # that this information is intended mostly for introspection and so is # not expected to cause us problems. normalized_path = normalize_path(dist_path) _uncache(normalized_path, sys.path_importer_cache) if fix_zipimporter_caches: _replace_zip_directory_cache_data(normalized_path) else: # Here, even though we do not want to fix existing and now stale # zipimporter cache information, we still want to remove it. Related to # Python's zip archive directory information cache, we clear each of # its stale entries in two phases: # 1. Clear the entry so attempting to access zip archive information # via any existing stale zipimport.zipimporter instances fails. # 2. Remove the entry from the cache so any newly constructed # zipimport.zipimporter instances do not end up using old stale # zip archive directory information. # This whole stale data removal step does not seem strictly necessary, # but has been left in because it was done before we started replacing # the zip archive directory information cache content if possible, and # there are no relevant unit tests that we can depend on to tell us if # this is really needed. _remove_and_clear_zip_directory_cache_data(normalized_path)
0.00021
def get_lastfunction_header(self, header, default_return_value=None): """Returns a specific header from the last API call This will return None if the header is not present :param header: (required) The name of the header you want to get the value of Most useful for the following header information: x-rate-limit-limit, x-rate-limit-remaining, x-rate-limit-class, x-rate-limit-reset """ if self._last_call is None: raise TwythonError('This function must be called after an API call. \ It delivers header information.') return self._last_call['headers'].get(header, default_return_value)
0.003942
def print_results(distributions, list_all_files): """ Print the informations from installed distributions found. """ results_printed = False for dist in distributions: results_printed = True logger.info("---") logger.info("Metadata-Version: %s" % dist.get('metadata-version')) logger.info("Name: %s" % dist['name']) logger.info("Version: %s" % dist['version']) logger.info("Summary: %s" % dist.get('summary')) logger.info("Home-page: %s" % dist.get('home-page')) logger.info("Author: %s" % dist.get('author')) logger.info("Author-email: %s" % dist.get('author-email')) logger.info("License: %s" % dist.get('license')) logger.info("Location: %s" % dist['location']) logger.info("Requires: %s" % ', '.join(dist['requires'])) if list_all_files: logger.info("Files:") if dist['files'] is not None: for line in dist['files']: logger.info(" %s" % line.strip()) else: logger.info("Cannot locate installed-files.txt") if 'entry_points' in dist: logger.info("Entry-points:") for line in dist['entry_points']: logger.info(" %s" % line.strip()) return results_printed
0.000756
def move_examples(root, lib_dir): """find examples not under lib dir, and move into ``examples``""" all_pde = files_multi_pattern(root, INO_PATTERNS) lib_pde = files_multi_pattern(lib_dir, INO_PATTERNS) stray_pde = all_pde.difference(lib_pde) if len(stray_pde) and not len(lib_pde): log.debug( 'examples found outside lib dir, moving them: %s', stray_pde) examples = lib_dir / EXAMPLES examples.makedirs() for x in stray_pde: d = examples / x.namebase d.makedirs() x.move(d)
0.001742
def program_rtr_all_nwk_next_hop(self, tenant_id, rout_id, next_hop, excl_list): """Program the next hop for all networks of a tenant. """ namespace = self.find_rtr_namespace(rout_id) if namespace is None: LOG.error("Unable to find namespace for router %s", rout_id) return False net_list = self.get_network_by_tenant(tenant_id) for net in net_list: subnet_lst = self.get_subnets_for_net(net.get('id')) for subnet_elem in subnet_lst: subnet = subnet_elem.get('cidr').split('/')[0] subnet_and_mask = subnet_elem.get('cidr') if subnet not in excl_list: args = ['route', 'add', '-net', subnet_and_mask, 'gw', next_hop] ret = self.program_rtr(args, rout_id, namespace=namespace) if not ret: LOG.error("Program router returned error for %s", rout_id) return False return True
0.002674
def add_post(self, path: str, handler: _WebHandler, **kwargs: Any) -> AbstractRoute: """ Shortcut for add_route with method POST """ return self.add_route(hdrs.METH_POST, path, handler, **kwargs)
0.012295
def _smb3kdf(self, ki, label, context): """ See SMB 3.x key derivation function https://blogs.msdn.microsoft.com/openspecification/2017/05/26/smb-2-and-smb-3-security-in-windows-10-the-anatomy-of-signing-and-cryptographic-keys/ :param ki: The session key is the KDK used as an input to the KDF :param label: The purpose of this derived key as bytes string :param context: The context information of this derived key as bytes string :return: Key derived by the KDF as specified by [SP800-108] 5.1 """ kdf = KBKDFHMAC( algorithm=hashes.SHA256(), mode=Mode.CounterMode, length=16, rlen=4, llen=4, location=CounterLocation.BeforeFixed, label=label, context=context, fixed=None, backend=default_backend() ) return kdf.derive(ki)
0.002128