text
stringlengths
78
104k
score
float64
0
0.18
def _create_image_url(self, file_path, type_, target_size): """The the closest available size for specified image type. Arguments: file_path (:py:class:`str`): The image file path. type_ (:py:class:`str`): The type of image to create a URL for, (``'poster'`` or ``'profile'``). target_size (:py:class:`int`): The size of image to aim for (used as either width or height). """ if self.image_config is None: logger.warning('no image configuration available') return return ''.join([ self.image_config['secure_base_url'], self._image_size(self.image_config, type_, target_size), file_path, ])
0.002656
def round_linestring_coords(ls, precision): """ Round the coordinates of a shapely LineString to some decimal precision. Parameters ---------- ls : shapely LineString the LineString to round the coordinates of precision : int decimal precision to round coordinates to Returns ------- LineString """ return LineString([[round(x, precision) for x in c] for c in ls.coords])
0.004566
def clear(self): """ Clears out all the items from this tab bar. """ self.blockSignals(True) items = list(self.items()) for item in items: item.close() self.blockSignals(False) self._currentIndex = -1 self.currentIndexChanged.emit(self._currentIndex)
0.00597
def progress(rest): "Display the progress of something: start|end|percent" if rest: left, right, amount = [piece.strip() for piece in rest.split('|')] ticks = min(int(round(float(amount) / 10)), 10) bar = "=" * ticks return "%s [%-10s] %s" % (left, bar, right)
0.025926
def iter_links(self): # type: () -> Iterable[Link] """Yields all links in the page""" document = html5lib.parse( self.content, transport_encoding=_get_encoding_from_headers(self.headers), namespaceHTMLElements=False, ) base_url = _determine_base_url(document, self.url) for anchor in document.findall(".//a"): if anchor.get("href"): href = anchor.get("href") url = _clean_link(urllib_parse.urljoin(base_url, href)) pyrequire = anchor.get('data-requires-python') pyrequire = unescape(pyrequire) if pyrequire else None yield Link(url, self.url, requires_python=pyrequire)
0.004027
def get_updates(self, offset=None, limit=None, timeout=None): """ Use this method to receive incoming updates using long polling (wiki). An Array of Update objects is returned. """ payload = dict(offset=offset, limit=limit, timeout=timeout) return self._get('getUpdates', payload)
0.008929
def disconnect_layer_listener(self): """Destroy the signal/slot to listen for layers loaded in QGIS. ..seealso:: connect_layer_listener """ project = QgsProject.instance() project.layersWillBeRemoved.disconnect(self.get_layers) project.layersAdded.disconnect(self.get_layers) project.layersRemoved.disconnect(self.get_layers) self.iface.mapCanvas().layersChanged.disconnect(self.get_layers) self.iface.currentLayerChanged.disconnect(self.layer_changed)
0.003802
def encrypt_assertion(self, statement, enc_key, template, key_type='des-192', node_xpath=None, node_id=None): """ Will encrypt an assertion :param statement: A XML document that contains the assertion to encrypt :param enc_key: File name of a file containing the encryption key :param template: A template for the encryption part to be added. :param key_type: The type of session key to use. :return: The encrypted text """ if six.PY2: _str = unicode else: _str = str if isinstance(statement, SamlBase): statement = pre_encrypt_assertion(statement) _, fil = make_temp( _str(statement), decode=False, delete=self._xmlsec_delete_tmpfiles ) _, tmpl = make_temp(_str(template), decode=False) if not node_xpath: node_xpath = ASSERT_XPATH com_list = [ self.xmlsec, '--encrypt', '--pubkey-cert-pem', enc_key, '--session-key', key_type, '--xml-data', fil, '--node-xpath', node_xpath, ] if node_id: com_list.extend(['--node-id', node_id]) try: (_stdout, _stderr, output) = self._run_xmlsec(com_list, [tmpl]) except XmlsecError as e: six.raise_from(EncryptError(com_list), e) return output.decode('utf-8')
0.002085
def unset_state(self): """ Resets the state required for this actor to the default state. Currently only disables the target of the texture of the material, it may still be bound. """ glDisable(self.region.material.target) self.region.bone.unsetRotate(self.data)
0.012539
def concatechain(*generators: types.FrameGenerator, separator: str = ''): """Return a generator that in each iteration takes one value from each of the supplied generators, joins them together with the specified separator and yields the result. Stops as soon as any iterator raises StopIteration and returns the value contained in it. Primarily created for chaining string generators, hence the name. Args: generators: Any number of generators that yield types that can be joined together with the separator string. separator: A separator to insert between each value yielded by the different generators. Returns: A generator that yields strings that are the concatenation of one value from each of the generators, joined together with the separator string. """ while True: try: next_ = [next(gen) for gen in generators] yield separator.join(next_) except StopIteration as exc: return exc.value
0.001938
def _update_limits_from_api(self): """ Query DynamoDB's DescribeLimits API action, and update limits with the quotas returned. Updates ``self.limits``. """ self.connect() logger.info("Querying DynamoDB DescribeLimits for limits") # no need to paginate lims = self.conn.describe_limits() self.limits['Account Max Read Capacity Units']._set_api_limit( lims['AccountMaxReadCapacityUnits'] ) self.limits['Account Max Write Capacity Units']._set_api_limit( lims['AccountMaxWriteCapacityUnits'] ) self.limits['Table Max Read Capacity Units']._set_api_limit( lims['TableMaxReadCapacityUnits'] ) self.limits['Table Max Write Capacity Units']._set_api_limit( lims['TableMaxWriteCapacityUnits'] ) logger.debug("Done setting limits from API")
0.002186
def authenticate_nova_user(self, keystone, user, password, tenant): """Authenticates a regular user with nova-api.""" self.log.debug('Authenticating nova user ({})...'.format(user)) ep = keystone.service_catalog.url_for(service_type='identity', interface='publicURL') if keystone.session: return nova_client.Client(NOVA_CLIENT_VERSION, session=keystone.session, auth_url=ep) elif novaclient.__version__[0] >= "7": return nova_client.Client(NOVA_CLIENT_VERSION, username=user, password=password, project_name=tenant, auth_url=ep) else: return nova_client.Client(NOVA_CLIENT_VERSION, username=user, api_key=password, project_id=tenant, auth_url=ep)
0.001992
def fast_combine_pairs(files, force_single, full_name, separators): """ assume files that need to be paired are within 10 entries of each other, once the list is sorted """ files = sort_filenames(files) chunks = tz.sliding_window(10, files) pairs = [combine_pairs(chunk, force_single, full_name, separators) for chunk in chunks] pairs = [y for x in pairs for y in x] longest = defaultdict(list) # for each file, save the longest pair it is in for pair in pairs: for file in pair: if len(longest[file]) < len(pair): longest[file] = pair # keep only unique pairs longest = {tuple(sort_filenames(x)) for x in longest.values()} # ensure filenames are R1 followed by R2 return [sort_filenames(list(x)) for x in longest]
0.003722
def init(*, threshold_lvl=1, quiet_stdout=False, log_file): """ Initiate the log module :param threshold_lvl: messages under this level won't be issued/logged :param to_stdout: activate stdout log stream """ global _logger, _log_lvl # translate lvl to those used by 'logging' module _log_lvl = _set_lvl(threshold_lvl) # logger Creation _logger = logging.getLogger(PKG_NAME) _logger.setLevel(_log_lvl) # create syslog handler and set level to info log_h = logging.FileHandler(log_file) # Base message format base_fmt = '%(asctime)s - %(name)s - [%(levelname)s] - %(message)s' # set formatter log_fmt = logging.Formatter(base_fmt) log_h.setFormatter(log_fmt) # add Handler _logger.addHandler(log_h) # create stout handler if not quiet_stdout: global _stdout _stdout = True
0.001135
def belu(x): """Bipolar ELU as in https://arxiv.org/abs/1709.04054.""" x_shape = shape_list(x) x1, x2 = tf.split(tf.reshape(x, x_shape[:-1] + [-1, 2]), 2, axis=-1) y1 = tf.nn.elu(x1) y2 = -tf.nn.elu(-x2) return tf.reshape(tf.concat([y1, y2], axis=-1), x_shape)
0.025735
def ExecuteQuery(self, query, args=None): """Get connection from pool and execute query.""" def Action(connection): connection.cursor.execute(query, args) rowcount = connection.cursor.rowcount results = connection.cursor.fetchall() return results, rowcount return self._RetryWrapper(Action)
0.015244
def IntegerDifference(left: vertex_constructor_param_types, right: vertex_constructor_param_types, label: Optional[str]=None) -> Vertex: """ Subtracts one vertex from another :param left: the vertex to be subtracted from :param right: the vertex to subtract """ return Integer(context.jvm_view().IntegerDifferenceVertex, label, cast_to_integer_vertex(left), cast_to_integer_vertex(right))
0.014388
def mime(self): ''' Retrieves mime metadata if available :return: ''' return self.metadata['ContentType'] if 'ContentType' in self.metadata else BaseEngine.get_mimetype(self.buffer)
0.013575
async def get_counts(self): """ see :class:`datasketch.MinHashLSH`. """ fs = (hashtable.itemcounts() for hashtable in self.hashtables) return await asyncio.gather(*fs)
0.009662
def killJobs(self, jobsToKill): """ Kills the given set of jobs and then sends them for processing """ if len(jobsToKill) > 0: self.batchSystem.killBatchJobs(jobsToKill) for jobBatchSystemID in jobsToKill: self.processFinishedJob(jobBatchSystemID, 1)
0.006211
def partial_steps_data(self, start=0): """ Iterates 5 steps from start position and provides tuple for packing into buffer. returns (0, 0) if stpe doesn't exist. :param start: Position to start from (typically 0 or 5) :yield: (setting, duration) """ cnt = 0 if len(self._prog_steps) >= start: # yields actual steps for encoding for step in self._prog_steps[start:start+5]: yield((step.raw_data)) cnt += 1 while cnt < 5: yield((0, 0)) cnt += 1
0.003311
def max_rigid_id(self): """Returns the maximum rigid body ID contained in the Compound. This is usually used by compound.root to determine the maximum rigid_id in the containment hierarchy. Returns ------- int or None The maximum rigid body ID contained in the Compound. If no rigid body IDs are found, None is returned """ try: return max([particle.rigid_id for particle in self.particles() if particle.rigid_id is not None]) except ValueError: return
0.003339
def generate_private_key(key_type): """ Generate a random private key using sensible parameters. :param str key_type: The type of key to generate. One of: ``rsa``. """ if key_type == u'rsa': return rsa.generate_private_key( public_exponent=65537, key_size=2048, backend=default_backend()) raise ValueError(key_type)
0.002778
def errors(self): """ Get the errors of the tag. If invalid then the list will consist of errors containing each a code and message explaining the error. Each error also refers to the respective (sub)tag(s). :return: list of errors of the tag. If the tag is valid, it returns an empty list. """ errors = [] data = self.data error = self.error # Check if the tag is grandfathered and if the grandfathered tag is deprecated (e.g. no-nyn). if 'record' in data: if 'Deprecated' in data['record']: errors.append(error(self.ERR_DEPRECATED)) # Only check every subtag if the tag is not explicitly listed as grandfathered or redundant. return errors # Check that all subtag codes are meaningful. codes = data['tag'].split('-') for i, code in enumerate(codes): # Ignore anything after a singleton (break) if len(code) < 2: # Check that each private-use subtag is within the maximum allowed length. for code in codes[i + 1:]: if len(code) > 8: errors.append(error(self.ERR_TOO_LONG, code)) break if code not in index: errors.append(error(self.ERR_UNKNOWN, code)) # Continue to the next item. continue # Check that first tag is a language tag. subtags = self.subtags if not len(subtags): errors.append(error(self.ERR_NO_LANGUAGE)) return errors elif subtags[0].type != 'language': errors.append(error(self.ERR_NO_LANGUAGE)) return errors # Check for more than one of some types and for deprecation. found = dict(language=[], extlang=[], variant=[], script=[], region=[]) for subtag in subtags: type = subtag.type if subtag.deprecated: errors.append(error(self.ERR_SUBTAG_DEPRECATED, subtag)) if type in found: found[type].append(subtag) if 'language' == type: if len(found['language']) > 1: errors.append(error(self.ERR_EXTRA_LANGUAGE, subtag)) elif 'region' == type: if len(found['region']) > 1: errors.append(error(self.ERR_EXTRA_REGION, subtag)) elif 'extlang' == type: if len(found['extlang']) > 1: errors.append(error(self.ERR_EXTRA_EXTLANG, subtag)) elif 'script' == type: if len(found['script']) > 1: errors.append(error(self.ERR_EXTRA_SCRIPT, subtag)) # Check if script is same as language suppress-script. else: script = subtags[0].script if script: if script.format == subtag.format: errors.append(error(self.ERR_SUPPRESS_SCRIPT, subtag)) elif 'variant' == type: if len(found['variant']) > 1: for variant in found['variant']: if variant.format == subtag.format: errors.append(error(self.ERR_DUPLICATE_VARIANT, subtag)) break # Check for correct order. if len(subtags) > 1: priority = dict(language=4, extlang=5, script=6, region=7, variant=8) for i, subtag in enumerate(subtags[0:len(subtags)-1]): next = subtags[i + 1] if next: if priority[subtag.type] > priority[next.type]: errors.append(error(self.ERR_WRONG_ORDER, [subtag, next])) return errors
0.002865
def set_client_params( self, start_unsubscribed=None, clear_on_exit=None, unsubscribe_on_reload=None, announce_interval=None): """Sets subscribers related params. :param bool start_unsubscribed: Configure subscriptions but do not send them. .. note:: Useful with master FIFO. :param bool clear_on_exit: Force clear instead of unsubscribe during shutdown. :param bool unsubscribe_on_reload: Force unsubscribe request even during graceful reload. :param int announce_interval: Send subscription announce at the specified interval. Default: 10 master cycles. """ self._set('start-unsubscribed', start_unsubscribed, cast=bool) self._set('subscription-clear-on-shutdown', clear_on_exit, cast=bool) self._set('unsubscribe-on-graceful-reload', unsubscribe_on_reload, cast=bool) self._set('subscribe-freq', announce_interval) return self._section
0.008264
def Execute(self, message): """This function parses the RDFValue from the server. The Run method will be called with the specified RDFValue. Args: message: The GrrMessage that we are called to process. Returns: Upon return a callback will be called on the server to register the end of the function and pass back exceptions. Raises: RuntimeError: The arguments from the server do not match the expected rdf type. """ self.message = message if message: self.require_fastpoll = message.require_fastpoll args = None try: if self.message.args_rdf_name: if not self.in_rdfvalue: raise RuntimeError("Did not expect arguments, got %s." % self.message.args_rdf_name) if self.in_rdfvalue.__name__ != self.message.args_rdf_name: raise RuntimeError( "Unexpected arg type %s != %s." % (self.message.args_rdf_name, self.in_rdfvalue.__name__)) args = self.message.payload # Only allow authenticated messages in the client if self._authentication_required and ( self.message.auth_state != rdf_flows.GrrMessage.AuthorizationState.AUTHENTICATED): raise RuntimeError("Message for %s was not Authenticated." % self.message.name) self.cpu_start = self.proc.cpu_times() self.cpu_limit = self.message.cpu_limit if getattr(flags.FLAGS, "debug_client_actions", False): pdb.set_trace() try: self.Run(args) # Ensure we always add CPU usage even if an exception occurred. finally: used = self.proc.cpu_times() self.cpu_used = (used.user - self.cpu_start.user, used.system - self.cpu_start.system) except NetworkBytesExceededError as e: self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.NETWORK_LIMIT_EXCEEDED, "%r: %s" % (e, e), traceback.format_exc()) # We want to report back all errors and map Python exceptions to # Grr Errors. except Exception as e: # pylint: disable=broad-except self.SetStatus(rdf_flows.GrrStatus.ReturnedStatus.GENERIC_ERROR, "%r: %s" % (e, e), traceback.format_exc()) if flags.FLAGS.pdb_post_mortem: self.DisableNanny() pdb.post_mortem() if self.status.status != rdf_flows.GrrStatus.ReturnedStatus.OK: logging.info("Job Error (%s): %s", self.__class__.__name__, self.status.error_message) if self.status.backtrace: logging.debug(self.status.backtrace) if self.cpu_used: self.status.cpu_time_used.user_cpu_time = self.cpu_used[0] self.status.cpu_time_used.system_cpu_time = self.cpu_used[1] # This returns the error status of the Actions to the flow. self.SendReply(self.status, message_type=rdf_flows.GrrMessage.Type.STATUS) self._RunGC()
0.007035
def instruction_BVS(self, opcode, ea): """ Tests the state of the V (overflow) bit and causes a branch if it is set. That is, branch if the twos complement result was invalid. When used after an operation on twos complement binary values, this instruction will branch if there was an overflow. source code forms: BVS dd; LBVS DDDD CC bits "HNZVC": ----- """ if self.V == 1: # log.info("$%x BVS branch to $%x, because V==1 \t| %s" % ( # self.program_counter, ea, self.cfg.mem_info.get_shortest(ea) # )) self.program_counter.set(ea)
0.007657
def thumbnail(self): """ Returns a thumbnail image in PIL.Image. When the file does not contain an embedded thumbnail image, returns None. """ if 'THUMBNAIL_RESOURCE' in self.image_resources: return pil_io.convert_thumbnail_to_pil( self.image_resources.get_data('THUMBNAIL_RESOURCE') ) elif 'THUMBNAIL_RESOURCE_PS4' in self.image_resources: return pil_io.convert_thumbnail_to_pil( self.image_resources.get_data('THUMBNAIL_RESOURCE_PS4'), 'BGR' ) return None
0.003373
def MPTfileCSV(file_or_path): """Simple function to open MPT files as csv.DictReader objects Checks for the correct headings, skips any comments and returns a csv.DictReader object and a list of comments """ if isinstance(file_or_path, str): mpt_file = open(file_or_path, 'r') else: mpt_file = file_or_path magic = next(mpt_file) if magic.rstrip() != 'EC-Lab ASCII FILE': raise ValueError("Bad first line for EC-Lab file: '%s'" % magic) nb_headers_match = re.match('Nb header lines : (\d+)\s*$', next(mpt_file)) nb_headers = int(nb_headers_match.group(1)) if nb_headers < 3: raise ValueError("Too few header lines: %d" % nb_headers) ## The 'magic number' line, the 'Nb headers' line and the column headers ## make three lines. Every additional line is a comment line. comments = [next(mpt_file) for i in range(nb_headers - 3)] mpt_csv = csv.DictReader(mpt_file, dialect='excel-tab') expected_fieldnames = ( ["mode", "ox/red", "error", "control changes", "Ns changes", "counter inc.", "time/s", "control/V/mA", "Ewe/V", "dq/mA.h", "P/W", "<I>/mA", "(Q-Qo)/mA.h", "x"], ['mode', 'ox/red', 'error', 'control changes', 'Ns changes', 'counter inc.', 'time/s', 'control/V', 'Ewe/V', 'dq/mA.h', '<I>/mA', '(Q-Qo)/mA.h', 'x'], ["mode", "ox/red", "error", "control changes", "Ns changes", "counter inc.", "time/s", "control/V", "Ewe/V", "I/mA", "dQ/mA.h", "P/W"], ["mode", "ox/red", "error", "control changes", "Ns changes", "counter inc.", "time/s", "control/V", "Ewe/V", "<I>/mA", "dQ/mA.h", "P/W"]) if mpt_csv.fieldnames not in expected_fieldnames: raise ValueError("Unrecognised headers for MPT file format") return mpt_csv, comments
0.0027
def demultiplex_samples(fastq, out_dir, nedit, barcodes): ''' Demultiplex a fastqtransformed FASTQ file into a FASTQ file for each sample. ''' annotations = detect_fastq_annotations(fastq) re_string = construct_transformed_regex(annotations) parser_re = re.compile(re_string) if barcodes: barcodes = set(barcode.strip() for barcode in barcodes) else: barcodes = set() if nedit == 0: filter_bc = partial(exact_sample_filter, barcodes=barcodes) else: barcodehash = MutationHash(barcodes, nedit) filter_bc = partial(correcting_sample_filter, barcodehash=barcodehash) sample_set = set() batch = collections.defaultdict(list) parsed = 0 safe_makedir(out_dir) for read in read_fastq(fastq): parsed += 1 read = filter_bc(read) if not read: continue match = parser_re.search(read).groupdict() sample = match['SB'] sample_set.add(sample) batch[sample].append(read) # write in batches to avoid opening up file handles repeatedly if not parsed % 10000000: for sample, reads in batch.items(): out_file = os.path.join(out_dir, sample + ".fq") with open(out_file, "a") as out_handle: for read in reads: fixed = filter_bc(read) if fixed: out_handle.write(fixed) batch = collections.defaultdict(list) for sample, reads in batch.items(): out_file = os.path.join(out_dir, sample + ".fq") with open(out_file, "a") as out_handle: for read in reads: fixed = filter_bc(read) if fixed: out_handle.write(read)
0.000553
def _call(self, x): """Apply the functional to the given point.""" return (self.functional(x) + self.quadratic_coeff * x.inner(x) + x.inner(self.linear_term) + self.constant)
0.009009
def calc_pvalue(self, study_count, study_n, pop_count, pop_n): """pvalues are calculated in derived classes.""" fnc_call = "calc_pvalue({SCNT}, {STOT}, {PCNT} {PTOT})".format( SCNT=study_count, STOT=study_n, PCNT=pop_count, PTOT=pop_n) raise Exception("NOT IMPLEMENTED: {FNC_CALL} using {FNC}.".format( FNC_CALL=fnc_call, FNC=self.pval_fnc))
0.005141
def infer(msg, mrar=False): """Estimate the most likely BDS code of an message. Args: msg (String): 28 bytes hexadecimal message string mrar (bool): Also infer MRAR (BDS 44) and MHR (BDS 45). Defaults to False. Returns: String or None: BDS version, or possible versions, or None if nothing matches. """ df = common.df(msg) if common.allzeros(msg): return 'EMPTY' # For ADS-B / Mode-S extended squitter if df == 17: tc = common.typecode(msg) if 1 <= tc <= 4: return 'BDS08' # indentification and category if 5 <= tc <= 8: return 'BDS06' # surface movement if 9 <= tc <= 18: return 'BDS05' # airborne position, baro-alt if tc == 19: return 'BDS09' # airborne velocity if 20 <= tc <= 22: return 'BDS05' # airborne position, gnss-alt if tc == 28: return 'BDS61' # aircraft status if tc == 29: return 'BDS62' # target state and status if tc == 31: return 'BDS65' # operational status # For Comm-B replies IS10 = bds10.is10(msg) IS17 = bds17.is17(msg) IS20 = bds20.is20(msg) IS30 = bds30.is30(msg) IS40 = bds40.is40(msg) IS50 = bds50.is50(msg) IS60 = bds60.is60(msg) IS44 = bds44.is44(msg) IS45 = bds45.is45(msg) if mrar: allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40", "BDS44", "BDS45", "BDS50", "BDS60"]) mask = [IS10, IS17, IS20, IS30, IS40, IS44, IS45, IS50, IS60] else: allbds = np.array(["BDS10", "BDS17", "BDS20", "BDS30", "BDS40", "BDS50", "BDS60"]) mask = [IS10, IS17, IS20, IS30, IS40, IS50, IS60] bds = ','.join(sorted(allbds[mask])) if len(bds) == 0: return None else: return bds
0.001568
def count_multiplicities(times, tmax=20): """Calculate an array of multiplicities and corresponding coincidence IDs Note that this algorithm does not take care about DOM IDs, so it has to be fed with DOM hits. Parameters ---------- times: array[float], shape=(n,) Hit times for n hits dt: int [default: 20] Time window of a coincidence Returns ------- (array[int]), array[int]), shape=(n,) """ n = times.shape[0] mtp = np.ones(n, dtype='<i4') # multiplicities cid = np.zeros(n, '<i4') # coincidence id idx0 = 0 _mtp = 1 _cid = 0 t0 = times[idx0] for i in range(1, n): dt = times[i] - t0 if dt > tmax: mtp[idx0:i] = _mtp cid[idx0:i] = _cid _mtp = 0 _cid += 1 idx0 = i t0 = times[i] _mtp += 1 if i == n - 1: mtp[idx0:] = _mtp cid[idx0:] = _cid break return mtp, cid
0.00099
def stop_if(expr, msg='', no_output=False): '''Abort the execution of the current step or loop and yield an warning message `msg` if `expr` is False ''' if expr: raise StopInputGroup(msg=msg, keep_output=not no_output) return 0
0.003984
def segment_tripoints(self, ratio=0.33333): """ Identify the trisection points of every line segment in the triangulation """ segments = self.identify_segments() points = self.points mids1 = ratio * points[segments[:,0]] + (1.0-ratio) * points[segments[:,1]] mids1 /= np.linalg.norm(mids1, axis=1).reshape(-1,1) mids2 = (1.0-ratio) * points[segments[:,0]] + ratio * points[segments[:,1]] mids2 /= np.linalg.norm(mids2, axis=1).reshape(-1,1) mids = np.vstack((mids1,mids2)) midlls = xyz2lonlat(mids[:,0], mids[:,1], mids[:,2]) return midlls
0.024922
def delete_project_actors(self, project_key, role_id, actor, actor_type=None): """ Deletes actors (users or groups) from a project role. Delete a user from the role: /rest/api/2/project/{projectIdOrKey}/role/{roleId}?user={username} Delete a group from the role: /rest/api/2/project/{projectIdOrKey}/role/{roleId}?group={groupname} :param project_key: :param role_id: :param actor: :param actor_type: str : group or user string :return: """ url = 'rest/api/2/project/{projectIdOrKey}/role/{roleId}'.format(projectIdOrKey=project_key, roleId=role_id) params = {} if actor_type is not None and actor_type in ['group', 'user']: params[actor_type] = actor return self.delete(url, params=params)
0.006764
def snapshots(self): """ Get all Volumes of type Snapshot. Updates every time - no caching. :return: a `list` of all the `ScaleIO_Volume` that have a are of type Snapshot. :rtype: list """ self.connection._check_login() response = self.connection._do_get("{}/{}".format(self.connection._api_url, "types/Volume/instances")).json() all_volumes_snapshot = [] for volume in response: if volume['volumeType'] == 'Snapshot': all_volumes_snapshot.append( Volume.from_dict(volume) ) return all_volumes_snapshot
0.007776
def realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count): """realtimeBar(EWrapper self, TickerId reqId, long time, double open, double high, double low, double close, long volume, double wap, int count)""" return _swigibpy.EWrapper_realtimeBar(self, reqId, time, open, high, low, close, volume, wap, count)
0.011628
def format_trigger(self, stream): """Create a user understandable string like count(stream) >= X. Args: stream (DataStream): The stream to use to format ourselves. Returns: str: The formatted string """ src = u'value' if self.use_count: src = u'count' return u"{}({}) {} {}".format(src, stream, self.comp_string, self.reference)
0.007075
def _wrapNavFrag(self, frag, useAthena): """ Wrap the given L{INavigableFragment} in an appropriate L{_FragmentWrapperMixin} subclass. """ username = self._privateApplication._getUsername() cf = getattr(frag, 'customizeFor', None) if cf is not None: frag = cf(username) if useAthena: pageClass = GenericNavigationAthenaPage else: pageClass = GenericNavigationPage return pageClass(self._privateApplication, frag, self._privateApplication.getPageComponents(), username)
0.003155
def dist_percentile_threshold(dist_matrix, perc_thr=0.05, k=1): """Thresholds a distance matrix and returns the result. Parameters ---------- dist_matrix: array_like Input array or object that can be converted to an array. perc_thr: float in range of [0,100] Percentile to compute which must be between 0 and 100 inclusive. k: int, optional Diagonal above which to zero elements. k = 0 (the default) is the main diagonal, k < 0 is below it and k > 0 is above. Returns ------- array_like """ triu_idx = np.triu_indices(dist_matrix.shape[0], k=k) upper = np.zeros_like(dist_matrix) upper[triu_idx] = dist_matrix[triu_idx] < np.percentile(dist_matrix[triu_idx], perc_thr) return upper
0.003576
def _add_new_spawn_method(cls): """ TODO """ def new_spawn_method(self, dependency_mapping): # TODO/FIXME: Check that this does the right thing: # (i) the spawned generator is independent of the original one (i.e. they can be reset independently without altering the other's behaviour) # (ii) ensure that it also works if this custom generator's __init__ requires additional arguments #new_instance = self.__class__() # # FIXME: It would be good to explicitly spawn the field generators of `self` # here because this would ensure that the internal random generators # of the spawned versions are in the same state as the ones in `self`. # This would guarantee that the spawned custom generator produces the # same elements as `self` even before reset() is called explicitly. new_instance = cls() return new_instance cls.spawn = new_spawn_method
0.009091
def _other_to_dict(self, other): """When serializing models, this allows attached models (children, parents, etc.) to also be serialized. """ if isinstance(other, ModelBase): return other.to_dict() elif isinstance(other, list): # TODO: what if it's not a list? return [self._other_to_dict(i) for i in other] else: return other
0.016043
def SqueezeNet(include_top=True, weights='imagenet', input_tensor=None, input_shape=None, pooling=None, classes=1000): """Instantiates the SqueezeNet architecture. """ if weights not in {'imagenet', None}: raise ValueError('The `weights` argument should be either ' '`None` (random initialization) or `imagenet` ' '(pre-training on ImageNet).') if weights == 'imagenet' and classes != 1000: raise ValueError('If using `weights` as imagenet with `include_top`' ' as true, `classes` should be 1000') input_shape = _obtain_input_shape(input_shape, default_size=227, min_size=48, data_format=K.image_data_format(), require_flatten=include_top) if input_tensor is None: img_input = Input(shape=input_shape) else: if not K.is_keras_tensor(input_tensor): img_input = Input(tensor=input_tensor, shape=input_shape) else: img_input = input_tensor x = Convolution2D(64, (3, 3), strides=(2, 2), padding='valid', name='conv1')(img_input) x = Activation('relu', name='relu_conv1')(x) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool1')(x) x = fire_module(x, fire_id=2, squeeze=16, expand=64) x = fire_module(x, fire_id=3, squeeze=16, expand=64) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool3')(x) x = fire_module(x, fire_id=4, squeeze=32, expand=128) x = fire_module(x, fire_id=5, squeeze=32, expand=128) x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), name='pool5')(x) x = fire_module(x, fire_id=6, squeeze=48, expand=192) x = fire_module(x, fire_id=7, squeeze=48, expand=192) x = fire_module(x, fire_id=8, squeeze=64, expand=256) x = fire_module(x, fire_id=9, squeeze=64, expand=256) if include_top: # It's not obvious where to cut the network... # Could do the 8th or 9th layer... some work recommends cutting earlier layers. x = Dropout(0.5, name='drop9')(x) x = Convolution2D(classes, (1, 1), padding='valid', name='conv10')(x) x = Activation('relu', name='relu_conv10')(x) x = GlobalAveragePooling2D()(x) x = Activation('softmax', name='loss')(x) else: if pooling == 'avg': x = GlobalAveragePooling2D()(x) elif pooling=='max': x = GlobalMaxPooling2D()(x) elif pooling==None: pass else: raise ValueError("Unknown argument for 'pooling'=" + pooling) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_tensor is not None: inputs = get_source_inputs(input_tensor) else: inputs = img_input model = Model(inputs, x, name='squeezenet') # load weights if weights == 'imagenet': if include_top: weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels.h5', WEIGHTS_PATH, cache_subdir='models') else: weights_path = get_file('squeezenet_weights_tf_dim_ordering_tf_kernels_notop.h5', WEIGHTS_PATH_NO_TOP, cache_subdir='models') model.load_weights(weights_path) if K.backend() == 'theano': layer_utils.convert_all_kernels_in_model(model) if K.image_data_format() == 'channels_first': if K.backend() == 'tensorflow': warnings.warn('You are using the TensorFlow backend, yet you ' 'are using the Theano ' 'image data format convention ' '(`image_data_format="channels_first"`). ' 'For best performance, set ' '`image_data_format="channels_last"` in ' 'your Keras config ' 'at ~/.keras/keras.json.') return model
0.00349
def get_float(prompt=None): """ Read a line of text from standard input and return the equivalent float as precisely as possible; if text does not represent a double, user is prompted to retry. If line can't be read, return None. """ while True: s = get_string(prompt) if s is None: return None if len(s) > 0 and re.search(r"^[+-]?\d*(?:\.\d*)?$", s): try: return float(s) except ValueError: pass # Temporarily here for backwards compatibility if prompt is None: print("Retry: ", end="")
0.001582
def running_apps(self): """Return a list of running user applications.""" ps = self.adb_shell(RUNNING_APPS_CMD) if ps: return [line.strip().rsplit(' ', 1)[-1] for line in ps.splitlines() if line.strip()] return []
0.011673
def case_report_content(store, institute_obj, case_obj): """Gather contents to be visualized in a case report Args: store(adapter.MongoAdapter) institute_obj(models.Institute) case_obj(models.Case) Returns: data(dict) """ variant_types = { 'causatives_detailed': 'causatives', 'suspects_detailed': 'suspects', 'classified_detailed': 'acmg_classification', 'tagged_detailed': 'manual_rank', 'dismissed_detailed': 'dismiss_variant', 'commented_detailed': 'is_commented', } data = case_obj for individual in data['individuals']: try: sex = int(individual.get('sex', 0)) except ValueError as err: sex = 0 individual['sex_human'] = SEX_MAP[sex] individual['phenotype_human'] = PHENOTYPE_MAP.get(individual['phenotype']) # Add the case comments data['comments'] = store.events(institute_obj, case=case_obj, comments=True) data['manual_rank_options'] = MANUAL_RANK_OPTIONS data['dismissed_options'] = DISMISS_VARIANT_OPTIONS data['genetic_models'] = dict(GENETIC_MODELS) data['report_created_at'] = datetime.datetime.now().strftime("%Y-%m-%d %H:%M") evaluated_variants = {} for vt in variant_types: evaluated_variants[vt] = [] # We collect all causatives and suspected variants # These are handeled in separate since they are on case level for var_type in ['causatives', 'suspects']: #These include references to variants vt = '_'.join([var_type, 'detailed']) for var_id in case_obj.get(var_type,[]): variant_obj = store.variant(var_id) if not variant_obj: continue # If the variant exists we add it to the evaluated variants evaluated_variants[vt].append(variant_obj) ## get variants for this case that are either classified, commented, tagged or dismissed. for var_obj in store.evaluated_variants(case_id=case_obj['_id']): # Check which category it belongs to for vt in variant_types: keyword = variant_types[vt] # When found we add it to the categpry # Eac variant can belong to multiple categories if keyword in var_obj: evaluated_variants[vt].append(var_obj) for var_type in evaluated_variants: decorated_variants = [] for var_obj in evaluated_variants[var_type]: # We decorate the variant with some extra information if var_obj['category'] == 'snv': decorated_info = variant_decorator( store=store, institute_obj=institute_obj, case_obj=case_obj, variant_id=None, variant_obj=var_obj, add_case=False, add_other=False, get_overlapping=False ) else: decorated_info = sv_variant( store=store, institute_id=institute_obj['_id'], case_name=case_obj['display_name'], variant_obj=var_obj, add_case=False, get_overlapping=False ) decorated_variants.append(decorated_info['variant']) # Add the decorated variants to the case data[var_type] = decorated_variants return data
0.002545
def get_rounded(self, digits): """ Return a vector with the elements rounded to the given number of digits. """ result = self.copy() result.round(digits) return result
0.015075
def request(input, representation, resolvers=None, get3d=False, tautomers=False, **kwargs): """Make a request to CIR and return the XML response. :param string input: Chemical identifier to resolve :param string representation: Desired output representation :param list(string) resolvers: (Optional) Ordered list of resolvers to use :param bool get3d: (Optional) Whether to return 3D coordinates (where applicable) :param bool tautomers: (Optional) Whether to return all tautomers :returns: XML response from CIR :rtype: Element :raises HTTPError: if CIR returns an error code :raises ParseError: if CIR response is uninterpretable """ url = construct_api_url(input, representation, resolvers, get3d, tautomers, **kwargs) log.debug('Making request: %s', url) response = urlopen(url) return etree.parse(response).getroot()
0.004535
def callproc(self, procname, parameters=()): """ Call a stored procedure with the given name. :param procname: The name of the procedure to call :type procname: str :keyword parameters: The optional parameters for the procedure :type parameters: sequence """ self._assert_open() return self._callproc(procname, parameters)
0.005063
def get_scenarios(network_id,**kwargs): """ Get all the scenarios in a given network. """ user_id = kwargs.get('user_id') try: net_i = db.DBSession.query(Network).filter(Network.id == network_id).one() net_i.check_read_permission(user_id=user_id) except NoResultFound: raise ResourceNotFoundError("Network %s not found"%(network_id)) return net_i.scenarios
0.009662
def shrink(self): """ Calculate the Constant-Correlation covariance matrix. :return: shrunk sample covariance matrix :rtype: np.ndarray """ x = np.nan_to_num(self.X.values) # de-mean returns t, n = np.shape(x) meanx = x.mean(axis=0) x = x - np.tile(meanx, (t, 1)) # compute sample covariance matrix sample = (1.0 / t) * np.dot(x.T, x) # compute prior var = np.diag(sample).reshape(-1, 1) sqrtvar = np.sqrt(var) _var = np.tile(var, (n,)) _sqrtvar = np.tile(sqrtvar, (n,)) r_bar = (np.sum(sample / (_sqrtvar * _sqrtvar.T)) - n) / (n * (n - 1)) prior = r_bar * (_sqrtvar * _sqrtvar.T) prior[np.eye(n) == 1] = var.reshape(-1) # compute shrinkage parameters and constant if self.delta is None: # what we call pi-hat y = x ** 2.0 phi_mat = np.dot(y.T, y) / t - 2 * np.dot(x.T, x) * sample / t + sample ** 2 phi = np.sum(phi_mat) # what we call rho-hat term1 = np.dot((x ** 3).T, x) / t help_ = np.dot(x.T, x) / t help_diag = np.diag(help_) term2 = np.tile(help_diag, (n, 1)).T * sample term3 = help_ * _var term4 = _var * sample theta_mat = term1 - term2 - term3 + term4 theta_mat[np.eye(n) == 1] = np.zeros(n) rho = sum(np.diag(phi_mat)) + r_bar * np.sum( np.dot((1.0 / sqrtvar), sqrtvar.T) * theta_mat ) # what we call gamma-hat gamma = np.linalg.norm(sample - prior, "fro") ** 2 # compute shrinkage constant kappa = (phi - rho) / gamma shrinkage = max(0.0, min(1.0, kappa / t)) self.delta = shrinkage else: # use specified constant shrinkage = self.delta # compute the estimator sigma = shrinkage * prior + (1 - shrinkage) * sample return self.format_and_annualise(sigma)
0.001444
def get_sortkey(table): """Get a field to sort by """ # Just pick the first column in the table in alphabetical order. # Ideally we would get the primary key from bcdc api, but it doesn't # seem to be available wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
0.00271
def check_rollout(edits_service, package_name, days): """Check if package_name has a release on staged rollout for too long""" edit = edits_service.insert(body={}, packageName=package_name).execute() response = edits_service.tracks().get(editId=edit['id'], track='production', packageName=package_name).execute() releases = response['releases'] for release in releases: if release['status'] == 'inProgress': url = 'https://archive.mozilla.org/pub/mobile/releases/{}/SHA512SUMS'.format(release['name']) resp = requests.head(url) if resp.status_code != 200: if resp.status_code != 404: # 404 is expected for release candidates logger.warning("Could not check %s: %s", url, resp.status_code) continue age = time.time() - calendar.timegm(eu.parsedate(resp.headers['Last-Modified'])) if age >= days * DAY: yield release, age
0.006148
def get_color_mode(mode): """Convert PIL mode to ColorMode.""" name = mode.upper() name = name.rstrip('A') # Trim alpha. name = {'1': 'BITMAP', 'L': 'GRAYSCALE'}.get(name, name) return getattr(ColorMode, name)
0.004348
def word2vec( train, output, size=100, window=5, sample="1e-3", hs=0, negative=5, threads=12, iter_=5, min_count=5, alpha=0.025, debug=2, binary=1, cbow=1, save_vocab=None, read_vocab=None, verbose=False, ): """ word2vec execution Parameters for training: train <file> Use text data from <file> to train the model output <file> Use <file> to save the resulting word vectors / word clusters size <int> Set size of word vectors; default is 100 window <int> Set max skip length between words; default is 5 sample <float> Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled; default is 0 (off), useful value is 1e-5 hs <int> Use Hierarchical Softmax; default is 1 (0 = not used) negative <int> Number of negative examples; default is 0, common values are 5 - 10 (0 = not used) threads <int> Use <int> threads (default 1) min_count <int> This will discard words that appear less than <int> times; default is 5 alpha <float> Set the starting learning rate; default is 0.025 debug <int> Set the debug mode (default = 2 = more info during training) binary <int> Save the resulting vectors in binary moded; default is 0 (off) cbow <int> Use the continuous back of words model; default is 1 (use 0 for skip-gram model) save_vocab <file> The vocabulary will be saved to <file> read_vocab <file> The vocabulary will be read from <file>, not constructed from the training data verbose Print output from training """ command = ["word2vec"] args = [ "-train", "-output", "-size", "-window", "-sample", "-hs", "-negative", "-threads", "-iter", "-min-count", "-alpha", "-debug", "-binary", "-cbow", ] values = [ train, output, size, window, sample, hs, negative, threads, iter_, min_count, alpha, debug, binary, cbow, ] for arg, value in zip(args, values): command.append(arg) command.append(str(value)) if save_vocab is not None: command.append("-save-vocab") command.append(str(save_vocab)) if read_vocab is not None: command.append("-read-vocab") command.append(str(read_vocab)) run_cmd(command, verbose=verbose)
0.000341
def t_OPTION_AND_VALUE(self, t): r'[^ \n\r\t=#]+[ \t=]+[^\r\n#]+' # TODO(etingof) escape hash if t.value.endswith('\\'): t.lexer.multiline_newline_seen = False t.lexer.code_start = t.lexer.lexpos - len(t.value) t.lexer.begin('multiline') return lineno = len(re.findall(r'\r\n|\n|\r', t.value)) option, value = self._parse_option_value(t.value) process, option, value = self._pre_parse_value(option, value) if not process: return if value.startswith('<<'): t.lexer.heredoc_anchor = value[2:].strip() t.lexer.heredoc_option = option t.lexer.code_start = t.lexer.lexpos t.lexer.begin('heredoc') return t.value = option, value t.lexer.lineno += lineno return t
0.002315
def retire_subjects(self, subjects, reason='other'): """ Retires subjects in this workflow. - **subjects** can be a list of :py:class:`Subject` instances, a list of subject IDs, a single :py:class:`Subject` instance, or a single subject ID. - **reason** gives the reason the :py:class:`Subject` has been retired. Defaults to **other**. Examples:: workflow.retire_subjects(1234) workflow.retire_subjects([1,2,3,4]) workflow.retire_subjects(Subject(1234)) workflow.retire_subjects([Subject(12), Subject(34)]) """ subjects = [ s.id if isinstance(s, Subject) else s for s in subjects ] return Workflow.http_post( '{}/retired_subjects'.format(self.id), json={ 'subject_ids': subjects, 'retirement_reason': reason } )
0.004283
def _create_storage_folder(self): ''' Creates a storage folder using the query name by replacing spaces in the query with '_' (underscore) ''' try: print(colored('\nCreating Storage Folder...', 'yellow')) self._storageFolder = os.path.join( self._destinationFolder, self._imageQuery.replace(' ', '_')) os.makedirs(self._storageFolder) print(colored('Storage Folder - ' + self._storageFolder + ' created.', 'green')) except FileExistsError: print(colored('Storage Folder - ' + self._storageFolder + ' already exists.', 'yellow')) except Exception as exception: raise exception
0.002558
def prepare_params(modeline, fileconfig, options): """Prepare and merge a params from modelines and configs. :return dict: """ params = dict(skip=False, ignore=[], select=[], linters=[]) if options: params['ignore'] = list(options.ignore) params['select'] = list(options.select) for config in filter(None, [modeline, fileconfig]): for key in ('ignore', 'select', 'linters'): params[key] += process_value(key, config.get(key, [])) params['skip'] = bool(int(config.get('skip', False))) # TODO: skip what? This is causing erratic behavior for linters. params['skip'] = False params['ignore'] = set(params['ignore']) params['select'] = set(params['select']) return params
0.001316
def add_to_path(p): ''' Adds a path to python paths and removes it after the 'with' block ends ''' old_path = sys.path if p not in sys.path: sys.path = sys.path[:] sys.path.insert(0, p) try: yield finally: sys.path = old_path
0.00346
def delete_observation(observation_id: int, access_token: str) -> List[Dict[str, Any]]: """ Delete an observation. :param observation_id: :param access_token: :return: """ headers = _build_auth_header(access_token) headers['Content-type'] = 'application/json' response = requests.delete(url="{base_url}/observations/{id}.json".format(base_url=INAT_BASE_URL, id=observation_id), headers=headers) response.raise_for_status() # According to iNaturalist documentation, proper JSON should be returned. It seems however that the response is # currently empty (while the requests succeed), so you may receive a JSONDecode exception. # TODO: report to iNaturalist team if the issue persists return response.json()
0.006873
def slugify(value, allow_unicode=False): """ Convert to ASCII if 'allow_unicode' is False. Convert spaces to hyphens. Remove characters that aren't alphanumerics, underscores, or hyphens. Convert to lowercase. Also strip leading and trailing whitespace. Copyright: https://docs.djangoproject.com/en/1.9/_modules/django/utils/text/#slugify TODO: replace after stopping support for Django 1.8 """ value = force_text(value) if allow_unicode: value = unicodedata.normalize('NFKC', value) value = re.sub('[^\w\s-]', '', value, flags=re.U).strip().lower() return mark_safe(re.sub('[-\s]+', '-', value, flags=re.U)) value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore').decode('ascii') value = re.sub('[^\w\s-]', '', value).strip().lower() return mark_safe(re.sub('[-\s]+', '-', value))
0.010369
def _folder_item_remarks(self, analysis_brain, item): """Renders the Remarks field for the passed in analysis If the edition of the analysis is permitted, adds the field into the list of editable fields. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row """ if self.analysis_remarks_enabled(): item["Remarks"] = analysis_brain.getRemarks if self.is_analysis_edition_allowed(analysis_brain): item["allow_edit"].extend(["Remarks"])
0.003356
def getAddressBinding(self): """A convenience method to obtain the extension element used as the address binding for the port.""" for item in self.extensions: if isinstance(item, SoapAddressBinding) or \ isinstance(item, HttpAddressBinding): return item raise WSDLError( 'No address binding found in port.' )
0.004878
def order_by(self, *colnames): """ orders the result set. ordering can only use clustering columns. Default order is ascending, prepend a '-' to the column name for descending """ if len(colnames) == 0: clone = copy.deepcopy(self) clone._order = [] return clone conditions = [] for colname in colnames: conditions.append('"{}" {}'.format(*self._get_ordering_condition(colname))) clone = copy.deepcopy(self) clone._order.extend(conditions) return clone
0.00678
def set_images(self, images): """ Set the images in this album. :param images: A list of the images we want the album to contain. Can be Image objects, ids or a combination of the two. Images that images that you cannot set (non-existing or not owned by you) will not cause exceptions, but fail silently. """ url = (self._imgur._base_url + "/3/album/" "{0}/".format(self._delete_or_id_hash)) params = {'ids': images} return self._imgur._send_request(url, needs_auth=True, params=params, method="POST")
0.003077
def STEL(CASRN, AvailableMethods=False, Method=None): # pragma: no cover '''This function handles the retrieval of Short-term Exposure Limit on worker exposure to dangerous chemicals. This API is considered experimental, and is expected to be removed in a future release in favor of a more complete object-oriented interface. >>> STEL('67-64-1') (750.0, 'ppm') >>> STEL('7664-38-2') (0.7489774978301237, 'ppm') >>> STEL('55720-99-5') (2.0, 'mg/m^3') >>> STEL('86290-81-5', AvailableMethods=True) ['Ontario Limits', 'None'] ''' def list_methods(): methods = [] if CASRN in _OntarioExposureLimits and (_OntarioExposureLimits[CASRN]["STEL (ppm)"] or _OntarioExposureLimits[CASRN]["STEL (mg/m^3)"]): methods.append(ONTARIO) methods.append(NONE) return methods if AvailableMethods: return list_methods() if not Method: Method = list_methods()[0] if Method == ONTARIO: if _OntarioExposureLimits[CASRN]["STEL (ppm)"]: _STEL = (_OntarioExposureLimits[CASRN]["STEL (ppm)"], 'ppm') elif _OntarioExposureLimits[CASRN]["STEL (mg/m^3)"]: _STEL = (_OntarioExposureLimits[CASRN]["STEL (mg/m^3)"], 'mg/m^3') elif Method == NONE: _STEL = None else: raise Exception('Failure in in function') return _STEL
0.001443
def format(self, message_format): """ Set the message format :param message_format: The format to set :type message_format: str """ if message_format not in self.formats: self._log.error('Invalid Message format specified: {format}'.format(format=message_format)) return self._log.debug('Setting message format to {format}'.format(format=message_format)) self._format = message_format
0.008457
def extract_gzip (archive, compression, cmd, verbosity, interactive, outdir): """Extract a GZIP archive with the gzip Python module.""" targetname = util.get_single_outfile(outdir, archive) try: with gzip.GzipFile(archive) as gzipfile: with open(targetname, 'wb') as targetfile: data = gzipfile.read(READ_SIZE_BYTES) while data: targetfile.write(data) data = gzipfile.read(READ_SIZE_BYTES) except Exception as err: msg = "error extracting %s to %s: %s" % (archive, targetname, err) raise util.PatoolError(msg) return None
0.003082
def rectwidth(self, wavelengths=None): """Calculate :ref:`bandpass rectangular width <synphot-formula-rectw>`. Parameters ---------- wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None` Wavelength values for sampling. If not a Quantity, assumed to be in Angstrom. If `None`, ``self.waveset`` is used. Returns ------- rectw : `~astropy.units.quantity.Quantity` Bandpass rectangular width. """ equvw = self.equivwidth(wavelengths=wavelengths) tpeak = self.tpeak(wavelengths=wavelengths) if tpeak.value == 0: # pragma: no cover rectw = 0.0 * self._internal_wave_unit else: rectw = equvw / tpeak return rectw
0.002484
def snr_ratio(in1, in2): """ The following function simply calculates the signal to noise ratio between two signals. INPUTS: in1 (no default): Array containing values for signal 1. in2 (no default): Array containing values for signal 2. OUTPUTS: out1 The ratio of the signal to noise ratios of two signals. """ out1 = 20*(np.log10(np.linalg.norm(in1)/np.linalg.norm(in1-in2))) return out1
0.006316
def delete(self): """Del or Backspace pressed. Delete selection""" with self._qpart: for cursor in self.cursors(): if cursor.hasSelection(): cursor.deleteChar()
0.008929
def get_code(self, *args, **kwargs): """ get the python source code from callback """ # FIXME: Honestly should allow multiple commands callback = self._commands[args[0]] # TODO: syntax color would be nice source = _inspect.getsourcelines(callback)[0] """ source_len = len(source) source = PygmentsLexer(CythonLexer).lex_document(source)() """ # FIXME: formatting sucks return "\n" + "".join(source)
0.002222
def get_mim_phenotypes(genemap_lines): """Get a dictionary with phenotypes Use the mim numbers for phenotypes as keys and phenotype information as values. Args: genemap_lines(iterable(str)) Returns: phenotypes_found(dict): A dictionary with mim_numbers as keys and dictionaries with phenotype information as values. { 'description': str, # Description of the phenotype 'hgnc_symbols': set(), # Associated hgnc symbols 'inheritance': set(), # Associated phenotypes 'mim_number': int, # mim number of phenotype } """ # Set with all omim numbers that are phenotypes # Parsed from mim2gene.txt phenotype_mims = set() phenotypes_found = {} # Genemap is a file with one entry per gene. # Each line hold a lot of information and in specific it # has information about the phenotypes that a gene is associated with # From this source we collect inheritane patterns and what hgnc symbols # a phenotype is associated with for entry in parse_genemap2(genemap_lines): hgnc_symbol = entry['hgnc_symbol'] for phenotype in entry['phenotypes']: mim_nr = phenotype['mim_number'] if mim_nr in phenotypes_found: phenotype_entry = phenotypes_found[mim_nr] phenotype_entry['inheritance'] = phenotype_entry['inheritance'].union(phenotype['inheritance']) phenotype_entry['hgnc_symbols'].add(hgnc_symbol) else: phenotype['hgnc_symbols'] = set([hgnc_symbol]) phenotypes_found[mim_nr] = phenotype return phenotypes_found
0.004673
def digit(m: Union[int, pd.Series], n: int) -> Union[int, pd.Series]: """Returns the nth digit of each number in m.""" return (m // (10 ** n)) % 10
0.01227
def get_model(name, dataset_name='wikitext-2', **kwargs): """Returns a pre-defined model by name. Parameters ---------- name : str Name of the model. dataset_name : str or None, default 'wikitext-2'. The dataset name on which the pre-trained model is trained. For language model, options are 'wikitext-2'. For ELMo, Options are 'gbw' and '5bw'. 'gbw' represents 1 Billion Word Language Model Benchmark http://www.statmt.org/lm-benchmark/; '5bw' represents a dataset of 5.5B tokens consisting of Wikipedia (1.9B) and all of the monolingual news crawl data from WMT 2008-2012 (3.6B). If specified, then the returned vocabulary is extracted from the training set of the dataset. If None, then vocab is required, for specifying embedding weight size, and is directly returned. vocab : gluonnlp.Vocab or None, default None Vocabulary object to be used with the language model. Required when dataset_name is not specified. None Vocabulary object is required with the ELMo model. pretrained : bool, default False Whether to load the pre-trained weights for model. ctx : Context, default CPU The context in which to load the pre-trained weights. root : str, default '~/.mxnet/models' Location for keeping the model parameters. Returns ------- gluon.Block, gluonnlp.Vocab, (optional) gluonnlp.Vocab """ models = {'standard_lstm_lm_200' : standard_lstm_lm_200, 'standard_lstm_lm_650' : standard_lstm_lm_650, 'standard_lstm_lm_1500': standard_lstm_lm_1500, 'awd_lstm_lm_1150': awd_lstm_lm_1150, 'awd_lstm_lm_600': awd_lstm_lm_600, 'big_rnn_lm_2048_512': big_rnn_lm_2048_512, 'elmo_2x1024_128_2048cnn_1xhighway': elmo_2x1024_128_2048cnn_1xhighway, 'elmo_2x2048_256_2048cnn_1xhighway': elmo_2x2048_256_2048cnn_1xhighway, 'elmo_2x4096_512_2048cnn_2xhighway': elmo_2x4096_512_2048cnn_2xhighway, 'transformer_en_de_512': transformer_en_de_512, 'bert_12_768_12' : bert_12_768_12, 'bert_24_1024_16' : bert_24_1024_16} name = name.lower() if name not in models: raise ValueError( 'Model %s is not supported. Available options are\n\t%s'%( name, '\n\t'.join(sorted(models.keys())))) kwargs['dataset_name'] = dataset_name return models[name](**kwargs)
0.004315
def intercom_user_hash(data): """ Return a SHA-256 HMAC `user_hash` as expected by Intercom, if configured. Return None if the `INTERCOM_HMAC_SECRET_KEY` setting is not configured. """ if getattr(settings, 'INTERCOM_HMAC_SECRET_KEY', None): return hmac.new( key=_hashable_bytes(settings.INTERCOM_HMAC_SECRET_KEY), msg=_hashable_bytes(data), digestmod=hashlib.sha256, ).hexdigest() else: return None
0.00207
def check_grandparents(self, mother = None, father = None): """ Check if there are any grand parents. Set the grandparents id:s Arguments: mother (Individual): An Individual object that represents the mother father (Individual): An Individual object that represents the father """ if mother: if mother.mother != '0': self.grandparents[mother.mother] = '' elif mother.father != '0': self.grandparents[mother.father] = '' if father: if father.mother != '0': self.grandparents[father.mother] = '' elif father.father != '0': self.grandparents[father.father] = '' return
0.017435
def flag(name=None): """ Creates the grammar for a Flag (F) field, accepting only 'Y', 'N' or 'U'. :param name: name for the field :return: grammar for the flag field """ if name is None: name = 'Flag Field' # Basic field field = pp.Regex('[YNU]') # Name field.setName(name) field.leaveWhitespace() return field
0.002681
def initial_validation(request, prefix): """ Returns the related model instance and post data to use in the comment/rating views below. Both comments and ratings have a ``prefix_ACCOUNT_REQUIRED`` setting. If this is ``True`` and the user is unauthenticated, we store their post data in their session, and redirect to login with the view's url (also defined by the prefix arg) as the ``next`` param. We can then check the session data once they log in, and complete the action authenticated. On successful post, we pass the related object and post data back, which may have come from the session, for each of the comments and ratings view functions to deal with as needed. """ post_data = request.POST login_required_setting_name = prefix.upper() + "S_ACCOUNT_REQUIRED" posted_session_key = "unauthenticated_" + prefix redirect_url = "" if getattr(settings, login_required_setting_name, False): if not request.user.is_authenticated(): request.session[posted_session_key] = request.POST error(request, _("You must be logged in. Please log in or " "sign up to complete this action.")) redirect_url = "%s?next=%s" % (settings.LOGIN_URL, reverse(prefix)) elif posted_session_key in request.session: post_data = request.session.pop(posted_session_key) if not redirect_url: model_data = post_data.get("content_type", "").split(".", 1) if len(model_data) != 2: return HttpResponseBadRequest() try: model = apps.get_model(*model_data) obj = model.objects.get(id=post_data.get("object_pk", None)) except (TypeError, ObjectDoesNotExist, LookupError): redirect_url = "/" if redirect_url: if request.is_ajax(): return HttpResponse(dumps({"location": redirect_url})) else: return redirect(redirect_url) return obj, post_data
0.000498
def chunks(iterable, chunksize, cast=tuple): # type: (Iterable, int, Callable) -> Iterable """ Yields items from an iterator in iterable chunks. """ it = iter(iterable) while True: yield cast(itertools.chain([next(it)], itertools.islice(it, chunksize - 1)))
0.003205
def list_consumers(self, publisher_id=None): """ListConsumers. Get a list of available service hook consumer services. Optionally filter by consumers that support at least one event type from the specific publisher. :param str publisher_id: :rtype: [Consumer] """ query_parameters = {} if publisher_id is not None: query_parameters['publisherId'] = self._serialize.query('publisher_id', publisher_id, 'str') response = self._send(http_method='GET', location_id='4301c514-5f34-4f5d-a145-f0ea7b5b7d19', version='5.0', query_parameters=query_parameters) return self._deserialize('[Consumer]', self._unwrap_collection(response))
0.007519
def stop(self): """Stop the underlying :class:`SparkContext`. """ self._sc.stop() # We should clean the default session up. See SPARK-23228. self._jvm.SparkSession.clearDefaultSession() self._jvm.SparkSession.clearActiveSession() SparkSession._instantiatedSession = None SparkSession._activeSession = None
0.00542
def match(ctx, features, profile, gps_precision): """Mapbox Map Matching API lets you use snap your GPS traces to the OpenStreetMap road and path network. $ mapbox mapmatching trace.geojson An access token is required, see `mapbox --help`. """ access_token = (ctx.obj and ctx.obj.get('access_token')) or None features = list(features) if len(features) != 1: raise click.BadParameter( "Mapmatching requires a single LineString feature") service = mapbox.MapMatcher(access_token=access_token) try: res = service.match( features[0], profile=profile, gps_precision=gps_precision) except mapbox.errors.ValidationError as exc: raise click.BadParameter(str(exc)) if res.status_code == 200: stdout = click.open_file('-', 'w') click.echo(res.text, file=stdout) else: raise MapboxCLIException(res.text.strip())
0.001056
def setValue( self, value ): """ Moves the line to the given value and rebuilds it :param value | <variant> """ scene = self.scene() point = scene.mapFromChart(value, None) self.setPos(point.x(), self.pos().y()) self.rebuild(scene.gridRect())
0.017699
def getPositionFromState(pState): """Return the position of a particle given its state dict. Parameters: -------------------------------------------------------------- retval: dict() of particle position, keys are the variable names, values are their positions """ result = dict() for (varName, value) in pState['varStates'].iteritems(): result[varName] = value['position'] return result
0.004454
def add_camera_make_model(self, make, model): ''' Add camera make and model.''' self._ef['0th'][piexif.ImageIFD.Make] = make self._ef['0th'][piexif.ImageIFD.Model] = model
0.010256
def _post_init(self): """Call the find devices method for the relevant platform.""" if WIN: self._find_devices_win() elif MAC: self._find_devices_mac() else: self._find_devices() self._update_all_devices() if NIX: self._find_leds()
0.006116
def _divide_heigths(self, cli, write_position): """ Return the heights for all rows. Or None when there is not enough space. """ if not self.children: return [] # Calculate heights. given_dimensions = self.get_dimensions(cli) if self.get_dimensions else None def get_dimension_for_child(c, index): if given_dimensions and given_dimensions[index] is not None: return given_dimensions[index] else: return c.preferred_height(cli, write_position.width, write_position.extended_height) dimensions = [get_dimension_for_child(c, index) for index, c in enumerate(self.children)] # Sum dimensions sum_dimensions = sum_layout_dimensions(dimensions) # If there is not enough space for both. # Don't do anything. if sum_dimensions.min > write_position.extended_height: return # Find optimal sizes. (Start with minimal size, increase until we cover # the whole height.) sizes = [d.min for d in dimensions] child_generator = take_using_weights( items=list(range(len(dimensions))), weights=[d.weight for d in dimensions]) i = next(child_generator) while sum(sizes) < min(write_position.extended_height, sum_dimensions.preferred): # Increase until we meet at least the 'preferred' size. if sizes[i] < dimensions[i].preferred: sizes[i] += 1 i = next(child_generator) if not any([cli.is_returning, cli.is_exiting, cli.is_aborting]): while sum(sizes) < min(write_position.height, sum_dimensions.max): # Increase until we use all the available space. (or until "max") if sizes[i] < dimensions[i].max: sizes[i] += 1 i = next(child_generator) return sizes
0.003579
def make_three_color(data, time, step, config, shape=(1280, 1280), lower_val=(0, 0, 0), upper_val=(2.5, 2.5, 2.5)): """ create a three color image according to the config file :param data: a dictionary of fetched data where keys correspond to products :param config: a config object :param shape: the size of a composite image :param lower_val: a tuple of lower values for RGB, any value below this is set to the low value :param upper_val: a tuple of upper values for RGB, any value above this is set to the high value :return: a (m,n,3) numpy array for a three color image where all values are between 0 and 1 """ order = {'red': 0, 'green': 1, 'blue': 2} three_color = np.zeros((shape[0], shape[1], 3)) channel_colors = {color: config.default[color] for color in ['red', 'green', 'blue']} for color, channel in channel_colors.items(): if data[channel][1] is None or \ abs((time - date_parser.parse(data[channel][0]['date-end'])).total_seconds()) > step.total_seconds()/2.0: return np.zeros((shape[0], shape[1], 3)) three_color[:, :, order[color]] = data[channel][1] # scale the image by the power three_color[:, :, order[color]] = np.power(three_color[:, :, order[color]], config.default["{}_power".format(color)]) # adjust the percentile thresholds lower = lower_val[order[color]] upper = upper_val[order[color]] three_color[np.where(three_color[:, :, order[color]] < lower)] = lower three_color[np.where(three_color[:, :, order[color]] > upper)] = upper # image values must be between (0,1) so scale image for color, index in order.items(): three_color[:, :, index] /= upper_val[order[color]] return three_color
0.004878
def reject(self, pn_condition=None): """See Link Reject, AMQP1.0 spec.""" self._pn_link.source.type = proton.Terminus.UNSPECIFIED super(SenderLink, self).reject(pn_condition)
0.010101
def expire_hit(self, hitid): ''' Expire HIT ''' if not self.connect_to_turk(): return False try: self.mtc.update_expiration_for_hit( HITId=hitid, ExpireAt=datetime.datetime.now() ) return True except Exception as e: print "Failed to expire HIT. Please check the ID and try again." print e return False
0.004454
def busmap_from_psql(network, session, scn_name): """ Retrieves busmap from `model_draft.ego_grid_pf_hv_busmap` on the <OpenEnergyPlatform>[www.openenergy-platform.org] by a given scenario name. If this busmap does not exist, it is created with default values. Parameters ---------- network : pypsa.Network object Container for all network components. session : sqlalchemy.orm.session.Session object Establishes interactions with the database. scn_name : str Name of the scenario. Returns ------- busmap : dict Maps old bus_ids to new bus_ids. """ def fetch(): query = session.query(EgoGridPfHvBusmap.bus0, EgoGridPfHvBusmap.bus1).\ filter(EgoGridPfHvBusmap.scn_name == scn_name) return dict(query.all()) busmap = fetch() # TODO: Or better try/except/finally if not busmap: print('Busmap does not exist and will be created.\n') cpu_cores = input('cpu_cores (default 4): ') or '4' busmap_by_shortest_path(network, session, scn_name, fromlvl=[110], tolvl=[220, 380, 400, 450], cpu_cores=int(cpu_cores)) busmap = fetch() return busmap
0.000789
def draw_rect(self, pos, size, color, fillcolor=None): """ Draw a rectangle with the given color on the screen and optionally fill it with fillcolor. :param pos: Top left corner of the rectangle :param size: Sieze of the rectangle :param color: Color for borders :param fillcolor: Color for infill :type pos: tuple :type size: tuple :type color: tuple :type fillcolor: tuple """ # draw top and botton line for x in range(size[0]): self.draw_dot((pos[0] + x, pos[1]), color) self.draw_dot((pos[0] + x, pos[1] + size[1] - 1), color) # draw left and right side for y in range(size[1]): self.draw_dot((pos[0], pos[1] + y), color) self.draw_dot((pos[0] + size[0] - 1, pos[1] + y), color) # draw filled rect if fillcolor and size[0] >= 3 and size[1] >= 3: for x in range(size[0] - 2): for y in range(size[1] - 2): self.draw_dot((pos[0] + 1 + x, pos[1] + 1 + y), fillcolor)
0.00273
def get_users_for_sis_course_id(self, sis_course_id, params={}): """ Returns a list of users for the given sis course id. """ return self.get_users_for_course( self._sis_id(sis_course_id, sis_field="course"), params)
0.007692
def transform(self, X, lenscale=None): """ Apply the Fast Food RBF basis to X. Parameters ---------- X: ndarray (N, d) array of observations where N is the number of samples, and d is the dimensionality of X. lenscale: scalar or ndarray, optional scalar or array of shape (d,) length scales (one for each dimension of X).If not input, this uses the value of the initial length scale. Returns ------- ndarray: of shape (N, 2*nbases) where nbases is number of random bases to use, given in the constructor (to nearest larger two power). """ lenscale = self._check_dim(X.shape[1], lenscale) VX = self._makeVX(X / lenscale) Phi = np.hstack((np.cos(VX), np.sin(VX))) / np.sqrt(self.n) return Phi
0.002247
def check_oversized_pickle(pickled, name, obj_type, worker): """Send a warning message if the pickled object is too large. Args: pickled: the pickled object. name: name of the pickled object. obj_type: type of the pickled object, can be 'function', 'remote function', 'actor', or 'object'. worker: the worker used to send warning message. """ length = len(pickled) if length <= ray_constants.PICKLE_OBJECT_WARNING_SIZE: return warning_message = ( "Warning: The {} {} has size {} when pickled. " "It will be stored in Redis, which could cause memory issues. " "This may mean that its definition uses a large array or other object." ).format(obj_type, name, length) push_error_to_driver( worker, ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR, warning_message, driver_id=worker.task_driver_id)
0.001073
def FailoverInstance(r, instance, iallocator=None, ignore_consistency=False, target_node=None): """Does a failover of an instance. @type instance: string @param instance: Instance name @type iallocator: string @param iallocator: Iallocator for deciding the target node for shared-storage instances @type ignore_consistency: bool @param ignore_consistency: Whether to ignore disk consistency @type target_node: string @param target_node: Target node for shared-storage instances @rtype: string @return: job id """ body = { "ignore_consistency": ignore_consistency, } if iallocator is not None: body["iallocator"] = iallocator if target_node is not None: body["target_node"] = target_node return r.request("put", "/2/instances/%s/failover" % instance, content=body)
0.002198