code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def idxstats(in_bam, data): """Return BAM index stats for the given file, using samtools idxstats. """ index(in_bam, data["config"], check_timestamp=False) AlignInfo = collections.namedtuple("AlignInfo", ["contig", "length", "aligned", "unaligned"]) samtools = config_utils.get_program("samtools", data["config"]) idxstats_out = subprocess.check_output([samtools, "idxstats", in_bam]).decode() out = [] for line in idxstats_out.split("\n"): if line.strip(): contig, length, aligned, unaligned = line.split("\t") out.append(AlignInfo(contig, int(length), int(aligned), int(unaligned))) return out
Return BAM index stats for the given file, using samtools idxstats.
def handle_jmespath_query(self, args): """ handles the jmespath query for injection or printing """ continue_flag = False query_symbol = SELECT_SYMBOL['query'] symbol_len = len(query_symbol) try: if len(args) == 1: # if arguments start with query_symbol, just print query result if args[0] == query_symbol: result = self.last.result elif args[0].startswith(query_symbol): result = jmespath.search(args[0][symbol_len:], self.last.result) print(json.dumps(result, sort_keys=True, indent=2), file=self.output) elif args[0].startswith(query_symbol): # print error message, user unsure of query shortcut usage print(("Usage Error: " + os.linesep + "1. Use {0} stand-alone to display previous result with optional filtering " "(Ex: {0}[jmespath query])" + os.linesep + "OR:" + os.linesep + "2. Use {0} to query the previous result for argument values " "(Ex: group show --name {0}[jmespath query])").format(query_symbol), file=self.output) else: # query, inject into cmd def jmespath_query(match): if match.group(0) == query_symbol: return str(self.last.result) query_result = jmespath.search(match.group(0)[symbol_len:], self.last.result) return str(query_result) def sub_result(arg): escaped_symbol = re.escape(query_symbol) # regex captures query symbol and all characters following it in the argument return json.dumps(re.sub(r'%s.*' % escaped_symbol, jmespath_query, arg)) cmd_base = ' '.join(map(sub_result, args)) self.cli_execute(cmd_base) continue_flag = True except (jmespath.exceptions.ParseError, CLIError) as e: print("Invalid Query Input: " + str(e), file=self.output) continue_flag = True return continue_flag
handles the jmespath query for injection or printing
def _parse_engine(self): """ Parse the storage engine in the config. Returns: str """ if self._parser.has_option('storage', 'engine'): engine = str(self._parser.get('storage', 'engine')) else: engine = ENGINE_DROPBOX assert isinstance(engine, str) if engine not in [ENGINE_DROPBOX, ENGINE_GDRIVE, ENGINE_COPY, ENGINE_ICLOUD, ENGINE_BOX, ENGINE_FS]: raise ConfigError('Unknown storage engine: {}'.format(engine)) return str(engine)
Parse the storage engine in the config. Returns: str
def do_a(self, line): """Send the Master an AnalogInput (group 32) value. Command syntax is: a index value""" index, value_string = self.index_and_value_from_line(line) if index and value_string: try: self.application.apply_update(opendnp3.Analog(float(value_string)), index) except ValueError: print('Please enter a floating-point value as the second argument.')
Send the Master an AnalogInput (group 32) value. Command syntax is: a index value
def p_var_decl(p): """ var_decl : DIM idlist typedef """ for vardata in p[2]: SYMBOL_TABLE.declare_variable(vardata[0], vardata[1], p[3]) p[0] = None
var_decl : DIM idlist typedef
def _check_args(logZ, f, x, samples, weights): """ Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples` """ # convert to arrays if logZ is None: logZ = [0] f = [f] samples = [samples] weights = [weights] # logZ logZ = numpy.array(logZ, dtype='double') if len(logZ.shape) is not 1: raise ValueError("logZ should be a 1D array") # x x = numpy.array(x, dtype='double') if len(x.shape) is not 1: raise ValueError("x should be a 1D array") # f if len(logZ) != len(f): raise ValueError("len(logZ) = %i != len(f)= %i" % (len(logZ), len(f))) for func in f: if not callable(func): raise ValueError("first argument f must be function" "(or list of functions) of two variables") # samples if len(logZ) != len(samples): raise ValueError("len(logZ) = %i != len(samples)= %i" % (len(logZ), len(samples))) samples = [numpy.array(s, dtype='double') for s in samples] for s in samples: if len(s.shape) is not 2: raise ValueError("each set of samples should be a 2D array") # weights if len(logZ) != len(weights): raise ValueError("len(logZ) = %i != len(weights)= %i" % (len(logZ), len(weights))) weights = [numpy.array(w, dtype='double') if w is not None else numpy.ones(len(s), dtype='double') for w, s in zip(weights, samples)] for w, s in zip(weights, samples): if len(w.shape) is not 1: raise ValueError("each set of weights should be a 1D array") if len(w) != len(s): raise ValueError("len(w) = %i != len(s) = %i" % (len(s), len(w))) return logZ, f, x, samples, weights
Sanity-check the arguments for :func:`fgivenx.drivers.compute_samples`. Parameters ---------- f, x, samples, weights: see arguments for :func:`fgivenx.drivers.compute_samples`
def flag_to_list(flagval, flagtype): """Convert a string of comma-separated tf flags to a list of values.""" if flagtype == 'int': return [int(_) for _ in flagval.split(',') if _] elif flagtype == 'float': return [float(_) for _ in flagval.split(',') if _] elif flagtype == 'str': return [_ for _ in flagval.split(',') if _] else: raise Exception("incorrect type")
Convert a string of comma-separated tf flags to a list of values.
def save(self): '''Save all changes on this item (if any) back to Redmine.''' self._check_custom_fields() if not self._changes: return None for tag in self._remap_to_id: self._remap_tag_to_tag_id(tag, self._changes) # Check for custom handlers for tags for tag, type in self._field_type.items(): try: raw_data = self._changes[tag] except: continue # Convert datetime type to a datetime string that Redmine expects if type == 'datetime': try: self._changes[tag] = raw_data.strftime('%Y-%m-%dT%H:%M:%S%z') except AttributeError: continue # Convert date type to a date string that Redmine expects if type == 'date': try: self._changes[tag] = raw_data.strftime('%Y-%m-%d') except AttributeError: continue try: self._update(self._changes) except: raise else: # Successful save, woot! Now clear the changes dict self._changes.clear()
Save all changes on this item (if any) back to Redmine.
def get_relation_cnt(self): """Return a Counter containing all relations contained in the Annotation Extensions.""" ctr = cx.Counter() for ntgpad in self.associations: if ntgpad.Extension is not None: ctr += ntgpad.Extension.get_relations_cnt() return ctr
Return a Counter containing all relations contained in the Annotation Extensions.
def _build_loss(self, lstm_outputs): """ Create: self.total_loss: total loss op for training self.softmax_W, softmax_b: the softmax variables self.next_token_id / _reverse: placeholders for gold input """ batch_size = self.options['batch_size'] unroll_steps = self.options['unroll_steps'] n_tokens_vocab = self.options['n_tokens_vocab'] # DEFINE next_token_id and *_reverse placeholders for the gold input def _get_next_token_placeholders(suffix): name = 'next_token_id' + suffix id_placeholder = tf.placeholder(DTYPE_INT, shape=(batch_size, unroll_steps), name=name) return id_placeholder # get the window and weight placeholders self.next_token_id = _get_next_token_placeholders('') if self.bidirectional: self.next_token_id_reverse = _get_next_token_placeholders( '_reverse') # DEFINE THE SOFTMAX VARIABLES # get the dimension of the softmax weights # softmax dimension is the size of the output projection_dim softmax_dim = self.options['lstm']['projection_dim'] # the output softmax variables -- they are shared if bidirectional if self.share_embedding_softmax: # softmax_W is just the embedding layer self.softmax_W = self.embedding_weights with tf.variable_scope('softmax'), tf.device('/cpu:0'): # Glorit init (std=(1.0 / sqrt(fan_in)) softmax_init = tf.random_normal_initializer(0.0, 1.0 / np.sqrt(softmax_dim)) if not self.share_embedding_softmax: self.softmax_W = tf.get_variable( 'W', [n_tokens_vocab, softmax_dim], dtype=DTYPE, initializer=softmax_init ) self.softmax_b = tf.get_variable( 'b', [n_tokens_vocab], dtype=DTYPE, initializer=tf.constant_initializer(0.0)) # now calculate losses # loss for each direction of the LSTM self.individual_train_losses = [] self.individual_eval_losses = [] if self.bidirectional: next_ids = [self.next_token_id, self.next_token_id_reverse] else: next_ids = [self.next_token_id] for id_placeholder, lstm_output_flat in zip(next_ids, lstm_outputs): # flatten the LSTM output and next token id gold to shape: # (batch_size * unroll_steps, softmax_dim) # Flatten and reshape the token_id placeholders next_token_id_flat = tf.reshape(id_placeholder, [-1, 1]) with tf.control_dependencies([lstm_output_flat]): sampled_losses = tf.nn.sampled_softmax_loss(self.softmax_W, self.softmax_b, next_token_id_flat, lstm_output_flat, self.options['n_negative_samples_batch'], self.options['n_tokens_vocab'], num_true=1) # get the full softmax loss output_scores = tf.matmul( lstm_output_flat, tf.transpose(self.softmax_W) ) + self.softmax_b # NOTE: tf.nn.sparse_softmax_cross_entropy_with_logits # expects unnormalized output since it performs the # softmax internally losses = tf.nn.sparse_softmax_cross_entropy_with_logits( logits=output_scores, labels=tf.squeeze(next_token_id_flat, squeeze_dims=[1]) ) sampled_losses = tf.reshape(sampled_losses, [self.options['batch_size'], -1]) losses = tf.reshape(losses, [self.options['batch_size'], -1]) self.individual_train_losses.append(tf.reduce_mean(sampled_losses, axis=1)) self.individual_eval_losses.append(tf.reduce_mean(losses, axis=1)) # now make the total loss -- it's the train of the individual losses if self.bidirectional: self.total_train_loss = 0.5 * (self.individual_train_losses[0] + self.individual_train_losses[1]) self.total_eval_loss = 0.5 * (self.individual_eval_losses[0] + self.individual_eval_losses[1]) else: self.total_train_loss = self.individual_train_losses[0] self.total_eval_loss = self.individual_eval_losses[0]
Create: self.total_loss: total loss op for training self.softmax_W, softmax_b: the softmax variables self.next_token_id / _reverse: placeholders for gold input
def get_extra_context(site, ctx): 'Returns extra data useful to the templates.' # XXX: clean this up from obsolete stuff ctx['site'] = site ctx['feeds'] = feeds = site.active_feeds.order_by('name') def get_mod_chk(k): mod, chk = ( (max(vals) if vals else None) for vals in ( filter(None, it.imap(op.attrgetter(k), feeds)) for k in ['last_modified', 'last_checked'] ) ) chk = chk or datetime(1970, 1, 1, 0, 0, 0, 0, timezone.utc) ctx['last_modified'], ctx['last_checked'] = mod or chk, chk return ctx[k] for k in 'last_modified', 'last_checked': ctx[k] = lambda: get_mod_chk(k) # media_url is set here for historical reasons, # use static_url or STATIC_URL (from django context) in any new templates. ctx['media_url'] = ctx['static_url'] =\ '{}feedjack/{}'.format(settings.STATIC_URL, site.template)
Returns extra data useful to the templates.
def copyFileToHdfs(localFilePath, hdfsFilePath, hdfsClient, override=True): '''Copy a local file to HDFS directory''' if not os.path.exists(localFilePath): raise Exception('Local file Path does not exist!') if os.path.isdir(localFilePath): raise Exception('localFile should not a directory!') if hdfsClient.exists(hdfsFilePath): if override: hdfsClient.delete(hdfsFilePath) else: return False try: hdfsClient.copy_from_local(localFilePath, hdfsFilePath) return True except Exception as exception: nni_log(LogType.Error, 'Copy local file {0} to hdfs file {1} error: {2}'.format(localFilePath, hdfsFilePath, str(exception))) return False
Copy a local file to HDFS directory
def simple_response(self, status, msg=''): """Write a simple response back to the client.""" status = str(status) proto_status = '%s %s\r\n' % (self.server.protocol, status) content_length = 'Content-Length: %s\r\n' % len(msg) content_type = 'Content-Type: text/plain\r\n' buf = [ proto_status.encode('ISO-8859-1'), content_length.encode('ISO-8859-1'), content_type.encode('ISO-8859-1'), ] if status[:3] in ('413', '414'): # Request Entity Too Large / Request-URI Too Long self.close_connection = True if self.response_protocol == 'HTTP/1.1': # This will not be true for 414, since read_request_line # usually raises 414 before reading the whole line, and we # therefore cannot know the proper response_protocol. buf.append(b'Connection: close\r\n') else: # HTTP/1.0 had no 413/414 status nor Connection header. # Emit 400 instead and trust the message body is enough. status = '400 Bad Request' buf.append(CRLF) if msg: if isinstance(msg, six.text_type): msg = msg.encode('ISO-8859-1') buf.append(msg) try: self.conn.wfile.write(EMPTY.join(buf)) except socket.error as ex: if ex.args[0] not in errors.socket_errors_to_ignore: raise
Write a simple response back to the client.
def redirectURL(self, realm, return_to=None, immediate=False): """Returns a URL with an encoded OpenID request. The resulting URL is the OpenID provider's endpoint URL with parameters appended as query arguments. You should redirect the user agent to this URL. OpenID 2.0 endpoints also accept POST requests, see C{L{shouldSendRedirect}} and C{L{formMarkup}}. @param realm: The URL (or URL pattern) that identifies your web site to the user when she is authorizing it. @type realm: str @param return_to: The URL that the OpenID provider will send the user back to after attempting to verify her identity. Not specifying a return_to URL means that the user will not be returned to the site issuing the request upon its completion. @type return_to: str @param immediate: If True, the OpenID provider is to send back a response immediately, useful for behind-the-scenes authentication attempts. Otherwise the OpenID provider may engage the user before providing a response. This is the default case, as the user may need to provide credentials or approve the request before a positive response can be sent. @type immediate: bool @returns: The URL to redirect the user agent to. @returntype: str """ message = self.getMessage(realm, return_to, immediate) return message.toURL(self.endpoint.server_url)
Returns a URL with an encoded OpenID request. The resulting URL is the OpenID provider's endpoint URL with parameters appended as query arguments. You should redirect the user agent to this URL. OpenID 2.0 endpoints also accept POST requests, see C{L{shouldSendRedirect}} and C{L{formMarkup}}. @param realm: The URL (or URL pattern) that identifies your web site to the user when she is authorizing it. @type realm: str @param return_to: The URL that the OpenID provider will send the user back to after attempting to verify her identity. Not specifying a return_to URL means that the user will not be returned to the site issuing the request upon its completion. @type return_to: str @param immediate: If True, the OpenID provider is to send back a response immediately, useful for behind-the-scenes authentication attempts. Otherwise the OpenID provider may engage the user before providing a response. This is the default case, as the user may need to provide credentials or approve the request before a positive response can be sent. @type immediate: bool @returns: The URL to redirect the user agent to. @returntype: str
def is_response(cls, response): '''Return whether the document is likely to be a Sitemap.''' if response.body: if cls.is_file(response.body): return True
Return whether the document is likely to be a Sitemap.
def check_update_J(self): """ Checks if the full J should be updated. Right now, just updates after update_J_frequency loops """ self._J_update_counter += 1 update = self._J_update_counter >= self.update_J_frequency return update & (not self._fresh_JTJ)
Checks if the full J should be updated. Right now, just updates after update_J_frequency loops
def parse_json_date(value): """ Parses an ISO8601 formatted datetime from a string value """ if not value: return None return datetime.datetime.strptime(value, JSON_DATETIME_FORMAT).replace(tzinfo=pytz.UTC)
Parses an ISO8601 formatted datetime from a string value
def _initializer_for(self, raw_name: str, cooked_name: str, prefix: Optional[str]) -> List[str]: """Create an initializer entry for the entry :param raw_name: name unadjusted for python compatibility. :param cooked_name: name that may or may not be python compatible :param prefix: owner of the element - used when objects passed as arguments :return: Initialization statements """ mt_val = self._ebnf.mt_value(self._typ) rval = [] if is_valid_python(raw_name): if prefix: # If a prefix exists, the input has already been processed - no if clause is necessary rval.append(f"self.{raw_name} = {prefix}.{raw_name}") else: cons = raw_name rval.append(f"self.{raw_name} = {cons}") elif is_valid_python(cooked_name): if prefix: rval.append(f"setattr(self, '{raw_name}', getattr({prefix}, '{raw_name}')") else: cons = f"{cooked_name} if {cooked_name} is not {mt_val} else _kwargs.get('{raw_name}', {mt_val})" rval.append(f"setattr(self, '{raw_name}', {cons})") else: getter = f"_kwargs.get('{raw_name}', {mt_val})" if prefix: rval.append(f"setattr(self, '{raw_name}', getattr({prefix}, '{getter}')") else: rval.append(f"setattr(self, '{raw_name}', {getter})") return rval
Create an initializer entry for the entry :param raw_name: name unadjusted for python compatibility. :param cooked_name: name that may or may not be python compatible :param prefix: owner of the element - used when objects passed as arguments :return: Initialization statements
def columns(self): """Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg. """ if self.df.empty: return None columns = [] sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index)) sample = self.df if sample_size: sample = self.df.sample(sample_size) for col in self.df.dtypes.keys(): db_type_str = ( self._type_dict.get(col) or self.db_type(self.df.dtypes[col]) ) column = { 'name': col, 'agg': self.agg_func(self.df.dtypes[col], col), 'type': db_type_str, 'is_date': self.is_date(self.df.dtypes[col], db_type_str), 'is_dim': self.is_dimension(self.df.dtypes[col], col), } if not db_type_str or db_type_str.upper() == 'OBJECT': v = sample[col].iloc[0] if not sample[col].empty else None if isinstance(v, str): column['type'] = 'STRING' elif isinstance(v, int): column['type'] = 'INT' elif isinstance(v, float): column['type'] = 'FLOAT' elif isinstance(v, (datetime, date)): column['type'] = 'DATETIME' column['is_date'] = True column['is_dim'] = False # check if encoded datetime if ( column['type'] == 'STRING' and self.datetime_conversion_rate(sample[col]) > INFER_COL_TYPES_THRESHOLD): column.update({ 'is_date': True, 'is_dim': False, 'agg': None, }) # 'agg' is optional attribute if not column['agg']: column.pop('agg', None) columns.append(column) return columns
Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg.
def get_num_chunks(length, chunksize): r""" Returns the number of chunks that a list will be split into given a chunksize. Args: length (int): chunksize (int): Returns: int: n_chunks CommandLine: python -m utool.util_progress --exec-get_num_chunks:0 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> length = 2000 >>> chunksize = 256 >>> n_chunks = get_num_chunks(length, chunksize) >>> result = ('n_chunks = %s' % (six.text_type(n_chunks),)) >>> print(result) n_chunks = 8 """ n_chunks = int(math.ceil(length / chunksize)) return n_chunks
r""" Returns the number of chunks that a list will be split into given a chunksize. Args: length (int): chunksize (int): Returns: int: n_chunks CommandLine: python -m utool.util_progress --exec-get_num_chunks:0 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_progress import * # NOQA >>> length = 2000 >>> chunksize = 256 >>> n_chunks = get_num_chunks(length, chunksize) >>> result = ('n_chunks = %s' % (six.text_type(n_chunks),)) >>> print(result) n_chunks = 8
def create( cls, api_key=None, idempotency_key=None, stripe_account=None, **params ): """Return a deferred.""" url = cls.class_url() headers = populate_headers(idempotency_key) return make_request( cls, 'post', url, stripe_account=stripe_account, headers=headers, params=params)
Return a deferred.
def register_func_list(self, func_and_handler): """ register a function to determine if the handle should be used for the type """ for func, handler in func_and_handler: self._function_dispatch.register(func, handler) self.dispatch.cache_clear()
register a function to determine if the handle should be used for the type
def candidate(self, cand_func, args=None, kwargs=None, name='Candidate', context=None): ''' Adds a candidate function to an experiment. Can be used multiple times for multiple candidates. :param callable cand_func: your control function :param iterable args: positional arguments to pass to your function :param dict kwargs: keyword arguments to pass to your function :param string name: a name for your observation :param dict context: observation-specific context ''' self._candidates.append({ 'func': cand_func, 'args': args or [], 'kwargs': kwargs or {}, 'name': name, 'context': context or {}, })
Adds a candidate function to an experiment. Can be used multiple times for multiple candidates. :param callable cand_func: your control function :param iterable args: positional arguments to pass to your function :param dict kwargs: keyword arguments to pass to your function :param string name: a name for your observation :param dict context: observation-specific context
def safe_unicode(obj, *args): """ return the unicode representation of obj """ try: return unicode(obj, *args) # noqa for undefined-variable except UnicodeDecodeError: # obj is byte string ascii_text = str(obj).encode('string_escape') try: return unicode(ascii_text) # noqa for undefined-variable except NameError: # This is Python 3, just return the obj as it's already unicode return obj except NameError: # This is Python 3, just return the obj as it's already unicode return obj
return the unicode representation of obj
def libvlc_media_new_path(p_instance, path): '''Create a media for a certain file path. See L{libvlc_media_release}. @param p_instance: the instance. @param path: local filesystem path. @return: the newly created media or NULL on error. ''' f = _Cfunctions.get('libvlc_media_new_path', None) or \ _Cfunction('libvlc_media_new_path', ((1,), (1,),), class_result(Media), ctypes.c_void_p, Instance, ctypes.c_char_p) return f(p_instance, path)
Create a media for a certain file path. See L{libvlc_media_release}. @param p_instance: the instance. @param path: local filesystem path. @return: the newly created media or NULL on error.
def http_time(time): """Formats a datetime as an RFC 1123 compliant string.""" return formatdate(timeval=mktime(time.timetuple()), localtime=False, usegmt=True)
Formats a datetime as an RFC 1123 compliant string.
def get_cameras(self): """Retrieve a camera list for each onboarded network.""" response = api.request_homescreen(self) try: all_cameras = {} for camera in response['cameras']: camera_network = str(camera['network_id']) camera_name = camera['name'] camera_id = camera['id'] camera_info = {'name': camera_name, 'id': camera_id} if camera_network not in all_cameras: all_cameras[camera_network] = [] all_cameras[camera_network].append(camera_info) return all_cameras except KeyError: _LOGGER.error("Initialization failue. Could not retrieve cameras.") return {}
Retrieve a camera list for each onboarded network.
def sensoryCompute(self, activeMinicolumns, learn): """ @param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing. """ inputParams = { "activeColumns": activeMinicolumns, "basalInput": self.getLocationRepresentation(), "basalGrowthCandidates": self.getLearnableLocationRepresentation(), "learn": learn } self.L4.compute(**inputParams) locationParams = { "anchorInput": self.L4.getActiveCells(), "anchorGrowthCandidates": self.L4.getWinnerCells(), "learn": learn, } for module in self.L6aModules: module.sensoryCompute(**locationParams) return (inputParams, locationParams)
@param activeMinicolumns (numpy array) List of indices of minicolumns to activate. @param learn (bool) If True, the two layers should learn this association. @return (tuple of dicts) Data for logging/tracing.
def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. This function returns an object compatible with the `Thread` class in the Python standard library. The `start()` method on this object is already called by this function. """ return self.server.start_background_task(target, *args, **kwargs)
Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. This function returns an object compatible with the `Thread` class in the Python standard library. The `start()` method on this object is already called by this function.
def _build_url(self, endpoint): """ Builds the absolute URL using the target and desired endpoint. """ try: path = self.endpoints[endpoint] except KeyError: msg = 'Unknown endpoint `{0}`' raise ValueError(msg.format(endpoint)) absolute_url = urljoin(self.target, path) return absolute_url
Builds the absolute URL using the target and desired endpoint.
def create_ebnf_parser(files): """Create EBNF files and EBNF-based parsers""" flag = False for belspec_fn in files: # Get EBNF Jinja template from Github if enabled if config["bel"]["lang"]["specification_github_repo"]: tmpl_fn = get_ebnf_template() # Check if EBNF file is more recent than belspec_fn ebnf_fn = belspec_fn.replace(".yaml", ".ebnf") if not os.path.exists(ebnf_fn) or os.path.getmtime(belspec_fn) > os.path.getmtime(ebnf_fn): with open(belspec_fn, "r") as f: belspec = yaml.load(f, Loader=yaml.SafeLoader) tmpl_dir = os.path.dirname(tmpl_fn) tmpl_basename = os.path.basename(tmpl_fn) bel_major_version = belspec["version"].split(".")[0] env = jinja2.Environment( loader=jinja2.FileSystemLoader(tmpl_dir) ) # create environment for template template = env.get_template(tmpl_basename) # get the template # replace template placeholders with appropriate variables relations_list = [ (relation, belspec["relations"]["info"][relation]["abbreviation"]) for relation in belspec["relations"]["info"] ] relations_list = sorted(list(itertools.chain(*relations_list)), key=len, reverse=True) functions_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "primary" ] functions_list = sorted(list(itertools.chain(*functions_list)), key=len, reverse=True) modifiers_list = [ (function, belspec["functions"]["info"][function]["abbreviation"]) for function in belspec["functions"]["info"] if belspec["functions"]["info"][function]["type"] == "modifier" ] modifiers_list = sorted(list(itertools.chain(*modifiers_list)), key=len, reverse=True) created_time = datetime.datetime.now().strftime("%B %d, %Y - %I:%M:%S%p") ebnf = template.render( functions=functions_list, m_functions=modifiers_list, relations=relations_list, bel_version=belspec["version"], bel_major_version=bel_major_version, created_time=created_time, ) with open(ebnf_fn, "w") as f: f.write(ebnf) parser_fn = ebnf_fn.replace(".ebnf", "_parser.py") parser = tatsu.to_python_sourcecode(ebnf, filename=parser_fn) flag = True with open(parser_fn, "wt") as f: f.write(parser) if flag: # In case we created new parser modules importlib.invalidate_caches()
Create EBNF files and EBNF-based parsers
def reply_count(self, url, mode=5, after=0): """ Return comment count for main thread and all reply threads for one url. """ sql = ['SELECT comments.parent,count(*)', 'FROM comments INNER JOIN threads ON', ' threads.uri=? AND comments.tid=threads.id AND', ' (? | comments.mode = ?) AND', ' comments.created > ?', 'GROUP BY comments.parent'] return dict(self.db.execute(sql, [url, mode, mode, after]).fetchall())
Return comment count for main thread and all reply threads for one url.
def check_venv(self): """ Ensure we're inside a virtualenv. """ if self.zappa: venv = self.zappa.get_current_venv() else: # Just for `init`, when we don't have settings yet. venv = Zappa.get_current_venv() if not venv: raise ClickException( click.style("Zappa", bold=True) + " requires an " + click.style("active virtual environment", bold=True, fg="red") + "!\n" + "Learn more about virtual environments here: " + click.style("http://docs.python-guide.org/en/latest/dev/virtualenvs/", bold=False, fg="cyan"))
Ensure we're inside a virtualenv.
def findExtNum(self, extname=None, extver=1): """Find the extension number of the give extname and extver.""" extnum = None extname = extname.upper() if not self._isSimpleFits: for ext in self._image: if (hasattr(ext,'_extension') and 'IMAGE' in ext._extension and (ext.extname == extname) and (ext.extver == extver)): extnum = ext.extnum else: log.info("Image is simple fits") return extnum
Find the extension number of the give extname and extver.
def get(self, block=True, timeout=None): """Get item from underlying queue.""" return self._queue.get(block, timeout)
Get item from underlying queue.
def add(self, user, password): """ Adds a user with password """ if self.__contains__(user): raise UserExists self.new_users[user] = self._encrypt_password(password) + "\n"
Adds a user with password
def load_aead(self, public_id): """ Loads AEAD from the specified database. """ connection = self.engine.connect() trans = connection.begin() try: s = sqlalchemy.select([self.aead_table]).where( (self.aead_table.c.public_id == public_id) & self.aead_table.c.keyhandle.in_([kh[1] for kh in self.key_handles])) result = connection.execute(s) for row in result: kh_int = row['keyhandle'] aead = pyhsm.aead_cmd.YHSM_GeneratedAEAD(None, kh_int, '') aead.data = row['aead'] aead.nonce = row['nonce'] return aead except Exception as e: trans.rollback() raise Exception("No AEAD in DB for public_id %s (%s)" % (public_id, str(e))) finally: connection.close()
Loads AEAD from the specified database.
def evaluate(ref_time, ref_freqs, est_time, est_freqs, **kwargs): """Evaluate two multipitch (multi-f0) transcriptions, where the first is treated as the reference (ground truth) and the second as the estimate to be evaluated (prediction). Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_ragged_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_ragged_time_series('est.txt') >>> scores = mir_eval.multipitch.evaluate(ref_time, ref_freq, ... est_time, est_freq) Parameters ---------- ref_time : np.ndarray Time of each reference frequency value ref_freqs : list of np.ndarray List of np.ndarrays of reference frequency values est_time : np.ndarray Time of each estimated frequency value est_freqs : list of np.ndarray List of np.ndarrays of estimate frequency values kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved. """ scores = collections.OrderedDict() (scores['Precision'], scores['Recall'], scores['Accuracy'], scores['Substitution Error'], scores['Miss Error'], scores['False Alarm Error'], scores['Total Error'], scores['Chroma Precision'], scores['Chroma Recall'], scores['Chroma Accuracy'], scores['Chroma Substitution Error'], scores['Chroma Miss Error'], scores['Chroma False Alarm Error'], scores['Chroma Total Error']) = util.filter_kwargs( metrics, ref_time, ref_freqs, est_time, est_freqs, **kwargs) return scores
Evaluate two multipitch (multi-f0) transcriptions, where the first is treated as the reference (ground truth) and the second as the estimate to be evaluated (prediction). Examples -------- >>> ref_time, ref_freq = mir_eval.io.load_ragged_time_series('ref.txt') >>> est_time, est_freq = mir_eval.io.load_ragged_time_series('est.txt') >>> scores = mir_eval.multipitch.evaluate(ref_time, ref_freq, ... est_time, est_freq) Parameters ---------- ref_time : np.ndarray Time of each reference frequency value ref_freqs : list of np.ndarray List of np.ndarrays of reference frequency values est_time : np.ndarray Time of each estimated frequency value est_freqs : list of np.ndarray List of np.ndarrays of estimate frequency values kwargs Additional keyword arguments which will be passed to the appropriate metric or preprocessing functions. Returns ------- scores : dict Dictionary of scores, where the key is the metric name (str) and the value is the (float) score achieved.
def Enable(self, value): "enable or disable all top menus" for i in range(self.GetMenuCount()): self.EnableTop(i, value)
enable or disable all top menus
def map_reduce(self, map_function, data, function_kwargs=None, chunk_size=None, data_length=None): """ This method contains the core functionality of the DistributorBaseClass class. It maps the map_function to each element of the data and reduces the results to return a flattened list. How the jobs are calculated, is determined by the classes :func:`tsfresh.utilities.distribution.DistributorBaseClass.distribute` method, which can distribute the jobs in multiple threads, across multiple processing units etc. To not transport each element of the data individually, the data is split into chunks, according to the chunk size (or an empirical guess if none is given). By this, worker processes not tiny but adequate sized parts of the data. :param map_function: a function to apply to each data item. :type map_function: callable :param data: the data to use in the calculation :type data: iterable :param function_kwargs: parameters for the map function :type function_kwargs: dict of string to parameter :param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value. :type chunk_size: int :param data_length: If the data is a generator, you have to set the length here. If it is none, the length is deduced from the len of the data. :type data_length: int :return: the calculated results :rtype: list """ if data_length is None: data_length = len(data) if not chunk_size: chunk_size = self.calculate_best_chunk_size(data_length) chunk_generator = self.partition(data, chunk_size=chunk_size) map_kwargs = {"map_function": map_function, "kwargs": function_kwargs} if hasattr(self, "progressbar_title"): total_number_of_expected_results = math.ceil(data_length / chunk_size) result = tqdm(self.distribute(_function_with_partly_reduce, chunk_generator, map_kwargs), total=total_number_of_expected_results, desc=self.progressbar_title, disable=self.disable_progressbar) else: result = self.distribute(_function_with_partly_reduce, chunk_generator, map_kwargs), result = list(itertools.chain.from_iterable(result)) return result
This method contains the core functionality of the DistributorBaseClass class. It maps the map_function to each element of the data and reduces the results to return a flattened list. How the jobs are calculated, is determined by the classes :func:`tsfresh.utilities.distribution.DistributorBaseClass.distribute` method, which can distribute the jobs in multiple threads, across multiple processing units etc. To not transport each element of the data individually, the data is split into chunks, according to the chunk size (or an empirical guess if none is given). By this, worker processes not tiny but adequate sized parts of the data. :param map_function: a function to apply to each data item. :type map_function: callable :param data: the data to use in the calculation :type data: iterable :param function_kwargs: parameters for the map function :type function_kwargs: dict of string to parameter :param chunk_size: If given, chunk the data according to this size. If not given, use an empirical value. :type chunk_size: int :param data_length: If the data is a generator, you have to set the length here. If it is none, the length is deduced from the len of the data. :type data_length: int :return: the calculated results :rtype: list
def dense(x, output_dim, reduced_dims=None, expert_dims=None, use_bias=True, activation=None, master_dtype=tf.float32, slice_dtype=tf.float32, variable_dtype=None, name=None): """Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim]. """ if variable_dtype is None: variable_dtype = mtf.VariableDType(master_dtype, slice_dtype, x.dtype) if expert_dims is None: expert_dims = [] if reduced_dims is None: reduced_dims = x.shape.dims[-1:] w_shape = mtf.Shape(expert_dims + reduced_dims + [output_dim]) output_shape = mtf.Shape( [d for d in x.shape.dims if d not in reduced_dims] + [output_dim]) with tf.variable_scope(name, default_name="dense"): stddev = mtf.list_product(d.size for d in reduced_dims) ** -0.5 w = mtf.get_variable( x.mesh, "kernel", w_shape, initializer=tf.random_normal_initializer(stddev=stddev), dtype=variable_dtype) w = mtf.cast(w, x.dtype) y = mtf.einsum([x, w], output_shape) if use_bias: b = mtf.get_variable( x.mesh, "bias", mtf.Shape(expert_dims + [output_dim]), initializer=tf.zeros_initializer(), dtype=variable_dtype) y += b if activation is not None: y = activation(y) return y
Dense layer doing (kernel*x + bias) computation. Args: x: a mtf.Tensor of shape [..., reduced_dims]. output_dim: a mtf.Dimension reduced_dims: an optional list of mtf.Dimensions of x to be reduced. If omitted, we reduce the last dimension. expert_dims: an optional list of mtf.Dimension which represent different experts. Different experts get different weights. use_bias: a boolean, whether to add bias. activation: an optional function from mtf.Tensor to mtf.Tensor master_dtype: a tf.dtype (deprecated - use variable_dtype) slice_dtype: a tf.dtype (deprecated - use variable_dtype) variable_dtype: a mtf.VariableDType name: a string. variable scope. Returns: a mtf.Tensor of shape [..., output_dim].
def reset_course_favorites(self): """ Reset course favorites. Reset the current user's course favorites to the default automatically generated list of enrolled courses """ path = {} data = {} params = {} self.logger.debug("DELETE /api/v1/users/self/favorites/courses with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("DELETE", "/api/v1/users/self/favorites/courses".format(**path), data=data, params=params, no_data=True)
Reset course favorites. Reset the current user's course favorites to the default automatically generated list of enrolled courses
def changes(self): """ Returns a mapping of items to their effective change objects which include the old values and the new. The mapping includes only items whose value or raw string value has changed in the context. """ report = {} for k, k_changes in self._changes.items(): if len(k_changes) == 1: report[k] = k_changes[0] else: first = k_changes[0] last = k_changes[-1] if first.old_value != last.new_value or first.old_raw_str_value != last.new_raw_str_value: report[k] = _Change( first.old_value, last.new_value, first.old_raw_str_value, last.new_raw_str_value, ) return report
Returns a mapping of items to their effective change objects which include the old values and the new. The mapping includes only items whose value or raw string value has changed in the context.
def get_single_score(self, point, centroids=None, sd=None): """ Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \ Attributes: observation_score (dict): Original received point and normalised point. :Example: >>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]} nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \ wins. distances (list (float)): List of distances from the Point to each cluster centroid. E.g: >>> [2.38086238, 0.12382605, 2.0362993, 1.43195021] centroids (list (list (float))): A list of the current centroidswhen queried. E.g: >>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \ [0.39421099, 2.36783733] ] :param point: the point to classify :type point: pandas.DataFrame :param centroids: the centroids :type centroids: np.array :param sd: the standard deviation :type sd: np.array :return score: the score for a given observation :rtype score: int """ normalised_point = array(point) / array(sd) observation_score = { 'original': point, 'normalised': normalised_point.tolist(), } distances = [ euclidean(normalised_point, centroid) for centroid in centroids ] return int(distances.index(min(distances)))
Get a single score is a wrapper around the result of classifying a Point against a group of centroids. \ Attributes: observation_score (dict): Original received point and normalised point. :Example: >>> { "original": [0.40369016, 0.65217912], "normalised": [1.65915104, 3.03896181]} nearest_cluster (int): Index of the nearest cluster. If distances match, then lowest numbered cluster \ wins. distances (list (float)): List of distances from the Point to each cluster centroid. E.g: >>> [2.38086238, 0.12382605, 2.0362993, 1.43195021] centroids (list (list (float))): A list of the current centroidswhen queried. E.g: >>> [ [0.23944831, 1.12769265], [1.75621978, 3.11584191], [2.65884563, 1.26494783], \ [0.39421099, 2.36783733] ] :param point: the point to classify :type point: pandas.DataFrame :param centroids: the centroids :type centroids: np.array :param sd: the standard deviation :type sd: np.array :return score: the score for a given observation :rtype score: int
def _graph_wrap(func, graph): """Constructs function encapsulated in the graph.""" @wraps(func) def _wrapped(*args, **kwargs): with graph.as_default(): return func(*args, **kwargs) return _wrapped
Constructs function encapsulated in the graph.
def path_components(path): """ Return the individual components of a given file path string (for the local operating system). Taken from https://stackoverflow.com/q/21498939/438386 """ components = [] # The loop guarantees that the returned components can be # os.path.joined with the path separator and point to the same # location: while True: (new_path, tail) = os.path.split(path) # Works on any platform components.append(tail) if new_path == path: # Root (including drive, on Windows) reached break path = new_path components.append(new_path) components.reverse() # First component first return components
Return the individual components of a given file path string (for the local operating system). Taken from https://stackoverflow.com/q/21498939/438386
def make_forecasting_frame(x, kind, max_timeshift, rolling_direction): """ Takes a singular time series x and constructs a DataFrame df and target vector y that can be used for a time series forecasting task. The returned df will contain, for every time stamp in x, the last max_timeshift data points as a new time series, such can be used to fit a time series forecasting model. See :ref:`forecasting-label` for a detailed description of the rolling process and how the feature matrix and target vector are derived. The returned time series container df, will contain the rolled time series as a flat data frame, the first format from :ref:`data-formats-label`. When x is a pandas.Series, the index will be used as id. :param x: the singular time series :type x: np.array or pd.Series :param kind: the kind of the time series :type kind: str :param rolling_direction: The sign decides, if to roll backwards (if sign is positive) or forwards in "time" :type rolling_direction: int :param max_timeshift: If not None, shift only up to max_timeshift. If None, shift as often as possible. :type max_timeshift: int :return: time series container df, target vector y :rtype: (pd.DataFrame, pd.Series) """ n = len(x) if isinstance(x, pd.Series): t = x.index else: t = range(n) df = pd.DataFrame({"id": ["id"] * n, "time": t, "value": x, "kind": kind}) df_shift = roll_time_series(df, column_id="id", column_sort="time", column_kind="kind", rolling_direction=rolling_direction, max_timeshift=max_timeshift) # drop the rows which should actually be predicted def mask_first(x): """ this mask returns an array of 1s where the last entry is a 0 """ result = np.ones(len(x)) result[-1] = 0 return result mask = df_shift.groupby(['id'])['id'].transform(mask_first).astype(bool) df_shift = df_shift[mask] return df_shift, df["value"][1:]
Takes a singular time series x and constructs a DataFrame df and target vector y that can be used for a time series forecasting task. The returned df will contain, for every time stamp in x, the last max_timeshift data points as a new time series, such can be used to fit a time series forecasting model. See :ref:`forecasting-label` for a detailed description of the rolling process and how the feature matrix and target vector are derived. The returned time series container df, will contain the rolled time series as a flat data frame, the first format from :ref:`data-formats-label`. When x is a pandas.Series, the index will be used as id. :param x: the singular time series :type x: np.array or pd.Series :param kind: the kind of the time series :type kind: str :param rolling_direction: The sign decides, if to roll backwards (if sign is positive) or forwards in "time" :type rolling_direction: int :param max_timeshift: If not None, shift only up to max_timeshift. If None, shift as often as possible. :type max_timeshift: int :return: time series container df, target vector y :rtype: (pd.DataFrame, pd.Series)
def refactor_use_function(self, offset): """Use the function at point wherever possible.""" try: refactor = UseFunction(self.project, self.resource, offset) except RefactoringError as e: raise Fault( 'Refactoring error: {}'.format(e), code=400 ) return self._get_changes(refactor)
Use the function at point wherever possible.
def addGenotypePhenotypeSearchOptions(parser): """ Adds options to a g2p searches command line parser. """ parser.add_argument( "--phenotype_association_set_id", "-s", default=None, help="Only return associations from this phenotype_association_set.") parser.add_argument( "--feature_ids", "-f", default=None, help="Only return associations for these features.") parser.add_argument( "--phenotype_ids", "-p", default=None, help="Only return associations for these phenotypes.") parser.add_argument( "--evidence", "-E", default=None, help="Only return associations to this evidence.")
Adds options to a g2p searches command line parser.
def execute_command_in_dir(command, directory, verbose=DEFAULTS['v'], prefix="Output: ", env=None): """Execute a command in specific working directory""" if os.name == 'nt': directory = os.path.normpath(directory) print_comment("Executing: (%s) in directory: %s" % (command, directory), verbose) if env is not None: print_comment("Extra env variables %s" % (env), verbose) try: if os.name == 'nt': return_string = subprocess.check_output(command, cwd=directory, shell=True, env=env, close_fds=False) else: return_string = subprocess.check_output(command, cwd=directory, shell=True, stderr=subprocess.STDOUT, env=env, close_fds=True) return_string = return_string.decode("utf-8") # For Python 3 print_comment('Command completed. Output: \n %s%s' % \ (prefix,return_string.replace('\n','\n '+prefix)), verbose) return return_string except AttributeError: # For python 2.6... print_comment_v('Assuming Python 2.6...') return_string = subprocess.Popen(command, cwd=directory, shell=True, stdout=subprocess.PIPE).communicate()[0] return return_string except subprocess.CalledProcessError as e: print_comment_v('*** Problem running command: \n %s'%e) print_comment_v('%s%s'%(prefix,e.output.decode().replace('\n','\n'+prefix))) return None except: print_comment_v('*** Unknown problem running command: %s'%e) return None print_comment("Finished execution", verbose)
Execute a command in specific working directory
def as_nddata(self, nddata_class=None): "Return a version of ourself as an astropy.nddata.NDData object" if nddata_class is None: from astropy.nddata import NDData nddata_class = NDData # transfer header, preserving ordering ahdr = self.get_header() header = OrderedDict(ahdr.items()) data = self.get_mddata() wcs = None if hasattr(self, 'wcs') and self.wcs is not None: # for now, assume self.wcs wraps an astropy wcs object wcs = self.wcs.wcs ndd = nddata_class(data, wcs=wcs, meta=header) return ndd
Return a version of ourself as an astropy.nddata.NDData object
def options(self, context, module_options): ''' CONTYPE Specifies the VNC connection type, choices are: reverse, bind (default: reverse). PORT VNC Port (default: 5900) PASSWORD Specifies the connection password. ''' self.contype = 'reverse' self.port = 5900 self.password = None if 'PASSWORD' not in module_options: context.log.error('PASSWORD option is required!') exit(1) if 'CONTYPE' in module_options: self.contype = module_options['CONTYPE'] if 'PORT' in module_options: self.port = int(module_options['PORT']) self.password = module_options['PASSWORD'] self.ps_script1 = obfs_ps_script('cme_powershell_scripts/Invoke-PSInject.ps1') self.ps_script2 = obfs_ps_script('invoke-vnc/Invoke-Vnc.ps1')
CONTYPE Specifies the VNC connection type, choices are: reverse, bind (default: reverse). PORT VNC Port (default: 5900) PASSWORD Specifies the connection password.
def CheckCStyleCast(filename, clean_lines, linenum, cast_type, pattern, error): """Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise. """ line = clean_lines.elided[linenum] match = Search(pattern, line) if not match: return False # Exclude lines with keywords that tend to look like casts context = line[0:match.start(1) - 1] if Match(r'.*\b(?:sizeof|alignof|alignas|[_A-Z][_A-Z0-9]*)\s*$', context): return False # Try expanding current context to see if we one level of # parentheses inside a macro. if linenum > 0: for i in xrange(linenum - 1, max(0, linenum - 5), -1): context = clean_lines.elided[i] + context if Match(r'.*\b[_A-Z][_A-Z0-9]*\s*\((?:\([^()]*\)|[^()])*$', context): return False # operator++(int) and operator--(int) if context.endswith(' operator++') or context.endswith(' operator--'): return False # A single unnamed argument for a function tends to look like old # style cast. If we see those, don't issue warnings for deprecated # casts, instead issue warnings for unnamed arguments where # appropriate. # # These are things that we want warnings for, since the style guide # explicitly require all parameters to be named: # Function(int); # Function(int) { # ConstMember(int) const; # ConstMember(int) const { # ExceptionMember(int) throw (...); # ExceptionMember(int) throw (...) { # PureVirtual(int) = 0; # [](int) -> bool { # # These are functions of some sort, where the compiler would be fine # if they had named parameters, but people often omit those # identifiers to reduce clutter: # (FunctionPointer)(int); # (FunctionPointer)(int) = value; # Function((function_pointer_arg)(int)) # Function((function_pointer_arg)(int), int param) # <TemplateArgument(int)>; # <(FunctionPointerTemplateArgument)(int)>; remainder = line[match.end(0):] if Match(r'^\s*(?:;|const\b|throw\b|final\b|override\b|[=>{),]|->)', remainder): # Looks like an unnamed parameter. # Don't warn on any kind of template arguments. if Match(r'^\s*>', remainder): return False # Don't warn on assignments to function pointers, but keep warnings for # unnamed parameters to pure virtual functions. Note that this pattern # will also pass on assignments of "0" to function pointers, but the # preferred values for those would be "nullptr" or "NULL". matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) if matched_zero and matched_zero.group(1) != '0': return False # Don't warn on function pointer declarations. For this we need # to check what came before the "(type)" string. if Match(r'.*\)\s*$', line[0:match.start(0)]): return False # Don't warn if the parameter is named with block comments, e.g.: # Function(int /*unused_param*/); raw_line = clean_lines.raw_lines[linenum] if '/*' in raw_line: return False # Passed all filters, issue warning here. error(filename, linenum, 'readability/function', 3, 'All parameters should be named in a function') return True # At this point, all that should be left is actual casts. error(filename, linenum, 'readability/casting', 4, 'Using C-style cast. Use %s<%s>(...) instead' % (cast_type, match.group(1))) return True
Checks for a C-style cast by looking for the pattern. Args: filename: The name of the current file. clean_lines: A CleansedLines instance containing the file. linenum: The number of the line to check. cast_type: The string for the C++ cast to recommend. This is either reinterpret_cast, static_cast, or const_cast, depending. pattern: The regular expression used to find C-style casts. error: The function to call with any errors found. Returns: True if an error was emitted. False otherwise.
def add_request_handler_chain(self, request_handler_chain): # type: (GenericRequestHandlerChain) -> None """Checks the type before adding it to the request_handler_chains instance variable. :param request_handler_chain: Request Handler Chain instance. :type request_handler_chain: RequestHandlerChain :raises: :py:class:`ask_sdk_runtime.exceptions.DispatchException` if a null input is provided or if the input is of invalid type """ if request_handler_chain is None or not isinstance( request_handler_chain, GenericRequestHandlerChain): raise DispatchException( "Request Handler Chain is not a GenericRequestHandlerChain " "instance") self._request_handler_chains.append(request_handler_chain)
Checks the type before adding it to the request_handler_chains instance variable. :param request_handler_chain: Request Handler Chain instance. :type request_handler_chain: RequestHandlerChain :raises: :py:class:`ask_sdk_runtime.exceptions.DispatchException` if a null input is provided or if the input is of invalid type
def delete_servers(*servers, **options): ''' Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit ''' test = options.pop('test', False) commit = options.pop('commit', True) return __salt__['net.load_template']('delete_ntp_servers', servers=servers, test=test, commit=commit, inherit_napalm_device=napalm_device)
Removes NTP servers configured on the device. :param servers: list of IP Addresses/Domain Names to be removed as NTP servers :param test (bool): discard loaded config. By default ``test`` is False (will not dicard the changes) :param commit (bool): commit loaded config. By default ``commit`` is True (will commit the changes). Useful when the user does not want to commit after each change, but after a couple. By default this function will commit the config changes (if any). To load without committing, use the ``commit`` option. For dry run use the ``test`` argument. CLI Example: .. code-block:: bash salt '*' ntp.delete_servers 8.8.8.8 time.apple.com salt '*' ntp.delete_servers 172.17.17.1 test=True # only displays the diff salt '*' ntp.delete_servers 192.168.0.1 commit=False # preserves the changes, but does not commit
def makedirs(self, path, mode=0x777): "Super-mkdir: create a leaf directory and all intermediate ones." self.directory_create(path, mode, [library.DirectoryCreateFlag.parents])
Super-mkdir: create a leaf directory and all intermediate ones.
def is_provider_configured(opts, provider, required_keys=(), log_message=True, aliases=()): ''' Check and return the first matching and fully configured cloud provider configuration. ''' if ':' in provider: alias, driver = provider.split(':') if alias not in opts['providers']: return False if driver not in opts['providers'][alias]: return False for key in required_keys: if opts['providers'][alias][driver].get(key, None) is None: if log_message is True: # There's at least one require configuration key which is not # set. log.warning( "The required '%s' configuration setting is missing " "from the '%s' driver, which is configured under the " "'%s' alias.", key, provider, alias ) return False # If we reached this far, there's a properly configured provider. # Return it! return opts['providers'][alias][driver] for alias, drivers in six.iteritems(opts['providers']): for driver, provider_details in six.iteritems(drivers): if driver != provider and driver not in aliases: continue # If we reached this far, we have a matching provider, let's see if # all required configuration keys are present and not None. skip_provider = False for key in required_keys: if provider_details.get(key, None) is None: if log_message is True: # This provider does not include all necessary keys, # continue to next one. log.warning( "The required '%s' configuration setting is " "missing from the '%s' driver, which is configured " "under the '%s' alias.", key, provider, alias ) skip_provider = True break if skip_provider: continue # If we reached this far, the provider included all required keys return provider_details # If we reached this point, the provider is not configured. return False
Check and return the first matching and fully configured cloud provider configuration.
def addFeature(self, f, conflict="error", missing="other"): """ Add a feature. Args: - f(Feature): feature to add. - conflict(str): if a property hasn't compatible values/constrains, do: - ``"error"``: raise exception. - ``"ignore"``: go on. - ``"me"``: keep the old value. - ``"other"``: set the passed value. - missing(str): if a property has not been set yet, do: - ``"error"``: raise exception. - ``"ignore"``: do nothning. - ``"me"``: do nothing. - ``"other"``: set the passed value. """ OPTIONS = ["error", "ignore", "me", "other"] assert missing in OPTIONS, "Invalid value in `missing`." assert conflict in OPTIONS, "Invalid value in `missing`." if f.prop not in self.props and missing == "error": raise Exception("Property has not set.") elif f.prop not in self.props and missing in ["ignore", "first"]: return if isinstance(f.value, int) or isinstance(f.value, float): if f.operator == "=": inter1 = (f, f) elif f.operator[0] == "<": inter1 = (None, f) elif f.operator[0] == ">": inter1 = (f, None) inter0 = self.props.get(f.prop, (None, None)) try: self.props[f.prop] = Features._applyInter(inter0, inter1, conflict) except Exception as e: raise RADLParseException("%s. Involved features: %s" % (e, [str(f0) for f0 in inter0]), line=f.line) elif isinstance(f, SoftFeatures): self.props.setdefault(f.prop, []).append(f) elif f.operator == "contains": if f.prop in self.props and f.value.getValue("name") in self.props[f.prop]: feature = self.props[f.prop][f.value.getValue("name")].clone() for f0 in f.value.features: feature.value.addFeature(f0, conflict, missing) self.props[f.prop][f.value.getValue("name")] = feature else: self.props.setdefault(f.prop, {})[f.value.getValue("name")] = f else: value0 = self.props.get(f.prop, None) if not value0 or (conflict == "other"): self.props[f.prop] = f elif value0.value != f.value and conflict == "error": raise RADLParseException("Conflict adding `%s` because `%s` is already set and conflict is" " %s" % (f, value0, conflict), line=f.line)
Add a feature. Args: - f(Feature): feature to add. - conflict(str): if a property hasn't compatible values/constrains, do: - ``"error"``: raise exception. - ``"ignore"``: go on. - ``"me"``: keep the old value. - ``"other"``: set the passed value. - missing(str): if a property has not been set yet, do: - ``"error"``: raise exception. - ``"ignore"``: do nothning. - ``"me"``: do nothing. - ``"other"``: set the passed value.
def _compute_include_paths(self, target): """Computes the set of paths that thrifty uses to lookup imports. The IDL files under these paths are not compiled, but they are required to compile downstream IDL files. :param target: the JavaThriftyLibrary target to compile. :return: an ordered set of directories to pass along to thrifty. """ paths = OrderedSet() paths.add(os.path.join(get_buildroot(), target.target_base)) def collect_paths(dep): if not dep.has_sources('.thrift'): return paths.add(os.path.join(get_buildroot(), dep.target_base)) collect_paths(target) target.walk(collect_paths) return paths
Computes the set of paths that thrifty uses to lookup imports. The IDL files under these paths are not compiled, but they are required to compile downstream IDL files. :param target: the JavaThriftyLibrary target to compile. :return: an ordered set of directories to pass along to thrifty.
def edit_team_push_restrictions(self, *teams): """ :calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_ :teams: list of strings """ assert all(isinstance(element, (str, unicode)) or isinstance(element, (str, unicode)) for element in teams), teams headers, data = self._requester.requestJsonAndCheck( "POST", self.protection_url + "/restrictions/teams", input=teams )
:calls: `POST /repos/:owner/:repo/branches/:branch/protection/restrictions <https://developer.github.com/v3/repos/branches>`_ :teams: list of strings
def grant_user_access(self, user, db_names, strict=True): """ Gives access to the databases listed in `db_names` to the user. You may pass in either a single db or a list of dbs. If any of the databases do not exist, a NoSuchDatabase exception will be raised, unless you specify `strict=False` in the call. """ user = utils.get_name(user) uri = "/%s/%s/databases" % (self.uri_base, user) db_names = self._get_db_names(db_names, strict=strict) dbs = [{"name": db_name} for db_name in db_names] body = {"databases": dbs} try: resp, resp_body = self.api.method_put(uri, body=body) except exc.NotFound as e: raise exc.NoSuchDatabaseUser("User '%s' does not exist." % user)
Gives access to the databases listed in `db_names` to the user. You may pass in either a single db or a list of dbs. If any of the databases do not exist, a NoSuchDatabase exception will be raised, unless you specify `strict=False` in the call.
def beta_diversity(self, metric="braycurtis", rank="auto"): """Calculate the diversity between two communities. Parameters ---------- metric : {'jaccard', 'braycurtis', 'cityblock'} The distance metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- skbio.stats.distance.DistanceMatrix, a distance matrix. """ if metric not in ("jaccard", "braycurtis", "cityblock"): raise OneCodexException( "For beta diversity, metric must be one of: jaccard, braycurtis, cityblock" ) # needs read counts, not relative abundances if self._guess_normalized(): raise OneCodexException("Beta diversity requires unnormalized read counts.") df = self.to_df(rank=rank, normalize=False) counts = [] for c_id in df.index: counts.append(df.loc[c_id].tolist()) return skbio.diversity.beta_diversity(metric, counts, df.index.tolist())
Calculate the diversity between two communities. Parameters ---------- metric : {'jaccard', 'braycurtis', 'cityblock'} The distance metric to calculate. rank : {'auto', 'kingdom', 'phylum', 'class', 'order', 'family', 'genus', 'species'}, optional Analysis will be restricted to abundances of taxa at the specified level. Returns ------- skbio.stats.distance.DistanceMatrix, a distance matrix.
def read_json(self): """ read metadata from json and set all the found properties. :return: the read metadata :rtype: dict """ with reading_ancillary_files(self): metadata = super(ImpactLayerMetadata, self).read_json() if 'provenance' in metadata: for provenance_step in metadata['provenance']: try: title = provenance_step['title'] if 'IF Provenance' in title: self.append_if_provenance_step( provenance_step['title'], provenance_step['description'], provenance_step['time'], provenance_step['data'] ) else: self.append_provenance_step( provenance_step['title'], provenance_step['description'], provenance_step['time'], ) except KeyError: # we want to get as much as we can without raising # errors pass if 'summary_data' in metadata: self.summary_data = metadata['summary_data'] return metadata
read metadata from json and set all the found properties. :return: the read metadata :rtype: dict
def angleDiff(angle1, angle2, take_smaller=True): """ smallest difference between 2 angles code from http://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles """ a = np.arctan2(np.sin(angle1 - angle2), np.cos(angle1 - angle2)) if isinstance(a, np.ndarray) and take_smaller: a = np.abs(a) # take smaller of both possible angles: ab = np.abs(np.pi - a) with np.errstate(invalid='ignore'): i = a > ab a[i] = ab[i] return a
smallest difference between 2 angles code from http://stackoverflow.com/questions/1878907/the-smallest-difference-between-2-angles
def get_default_jvm_opts(tmp_dir=None, parallel_gc=False): """Retrieve default JVM tuning options Avoids issues with multiple spun up Java processes running into out of memory errors. Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency and responsiveness which are not needed for batch jobs. https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027 https://wiki.csiro.au/pages/viewpage.action?pageId=545034311 http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect However, serial GC causes issues with Spark local runs so we use parallel for those cases: https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070 """ opts = ["-XX:+UseSerialGC"] if not parallel_gc else [] if tmp_dir: opts.append("-Djava.io.tmpdir=%s" % tmp_dir) return opts
Retrieve default JVM tuning options Avoids issues with multiple spun up Java processes running into out of memory errors. Parallel GC can use a lot of cores on big machines and primarily helps reduce task latency and responsiveness which are not needed for batch jobs. https://github.com/bcbio/bcbio-nextgen/issues/532#issuecomment-50989027 https://wiki.csiro.au/pages/viewpage.action?pageId=545034311 http://stackoverflow.com/questions/9738911/javas-serial-garbage-collector-performing-far-better-than-other-garbage-collect However, serial GC causes issues with Spark local runs so we use parallel for those cases: https://github.com/broadinstitute/gatk/issues/3605#issuecomment-332370070
def get_structure_by_id(self, cod_id, **kwargs): """ Queries the COD for a structure by id. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A Structure. """ r = requests.get("http://www.crystallography.net/cod/%s.cif" % cod_id) return Structure.from_str(r.text, fmt="cif", **kwargs)
Queries the COD for a structure by id. Args: cod_id (int): COD id. kwargs: All kwargs supported by :func:`pymatgen.core.structure.Structure.from_str`. Returns: A Structure.
def dec(self,*args,**kwargs): """ NAME: dec PURPOSE: return the declination INPUT: t - (optional) time at which to get dec obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: dec(t) HISTORY: 2011-02-23 - Written - Bovy (NYU) """ _check_roSet(self,kwargs,'dec') radec= self._radec(*args,**kwargs) return radec[:,1]
NAME: dec PURPOSE: return the declination INPUT: t - (optional) time at which to get dec obs=[X,Y,Z] - (optional) position of observer (in kpc) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) OUTPUT: dec(t) HISTORY: 2011-02-23 - Written - Bovy (NYU)
def record_process(self, process, prg=''): """ log a process or program - log a physical program (.py, .bat, .exe) """ self._log(self.logFileProcess, force_to_string(process), prg)
log a process or program - log a physical program (.py, .bat, .exe)
def put_container(self, path): """ Creates a container at the specified path, creating any necessary intermediate containers. :param path: str or Path instance :raises ValueError: A component of path is a field name. """ path = make_path(path) container = self for segment in path: try: container = container._values[segment] if not isinstance(container, ValueTree): raise ValueError() except KeyError: valuetree = ValueTree() container._values[segment] = valuetree container = valuetree
Creates a container at the specified path, creating any necessary intermediate containers. :param path: str or Path instance :raises ValueError: A component of path is a field name.
def MAKE_WPARAM(wParam): """ Convert arguments to the WPARAM type. Used automatically by SendMessage, PostMessage, etc. You shouldn't need to call this function. """ wParam = ctypes.cast(wParam, LPVOID).value if wParam is None: wParam = 0 return wParam
Convert arguments to the WPARAM type. Used automatically by SendMessage, PostMessage, etc. You shouldn't need to call this function.
def _build_dependent_model_list(self, obj_schema): ''' Helper function to build the list of models the given object schema is referencing. ''' dep_models_list = [] if obj_schema: obj_schema['type'] = obj_schema.get('type', 'object') if obj_schema['type'] == 'array': dep_models_list.extend(self._build_dependent_model_list(obj_schema.get('items', {}))) else: ref = obj_schema.get('$ref') if ref: ref_obj_model = ref.split("/")[-1] ref_obj_schema = self._models().get(ref_obj_model) dep_models_list.extend(self._build_dependent_model_list(ref_obj_schema)) dep_models_list.extend([ref_obj_model]) else: # need to walk each property object properties = obj_schema.get('properties') if properties: for _, prop_obj_schema in six.iteritems(properties): dep_models_list.extend(self._build_dependent_model_list(prop_obj_schema)) return list(set(dep_models_list))
Helper function to build the list of models the given object schema is referencing.
def set_learning_rate(self, lr): """Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer. """ if not isinstance(self._optimizer, opt.Optimizer): raise UserWarning("Optimizer has to be defined before its learning " "rate is mutated.") else: self._optimizer.set_learning_rate(lr)
Sets a new learning rate of the optimizer. Parameters ---------- lr : float The new learning rate of the optimizer.
def unquote(s): """unquote('abc%20def') -> 'abc def'.""" res = s.split('%') # fastpath if len(res) == 1: return s s = res[0] for item in res[1:]: try: s += _hextochr[item[:2]] + item[2:] except KeyError: s += '%' + item except UnicodeDecodeError: s += chr(int(item[:2], 16)) + item[2:] return s
unquote('abc%20def') -> 'abc def'.
def print_multi_line(content, force_single_line, sort_key): """ 'sort_key' 参数只在 dict 模式时有效 'sort_key' parameter only available in 'dict' mode """ global last_output_lines global overflow_flag global is_atty if not is_atty: if isinstance(content, list): for line in content: print(line) elif isinstance(content, dict): for k, v in sorted(content.items(), key=sort_key): print("{}: {}".format(k, v)) else: raise TypeError("Excepting types: list, dict. Got: {}".format(type(content))) return columns, rows = get_terminal_size() lines = lines_of_content(content, columns) if force_single_line is False and lines > rows: overflow_flag = True elif force_single_line is True and len(content) > rows: overflow_flag = True # 确保初始输出位置是位于最左处的 # to make sure the cursor is at the left most print("\b" * columns, end="") if isinstance(content, list): for line in content: _line = preprocess(line) print_line(_line, columns, force_single_line) elif isinstance(content, dict): for k, v in sorted(content.items(), key=sort_key): _k, _v = map(preprocess, (k, v)) print_line("{}: {}".format(_k, _v), columns, force_single_line) else: raise TypeError("Excepting types: list, dict. Got: {}".format(type(content))) # 输出额外的空行来清除上一次输出的剩余内容 # do extra blank lines to wipe the remaining of last output print(" " * columns * (last_output_lines - lines), end="") # 回到初始输出位置 # back to the origin pos print(magic_char * (max(last_output_lines, lines)-1), end="") sys.stdout.flush() last_output_lines = lines
'sort_key' 参数只在 dict 模式时有效 'sort_key' parameter only available in 'dict' mode
def uploadFiles(self): """ Uploads all the files in 'filesToSync' """ for each_file in self.filesToSync: self.uploadFile(each_file["name"], each_file["ispickle"], each_file["at_home"])
Uploads all the files in 'filesToSync'
def _compute_closed_central_moments(self, central_from_raw_exprs, n_counter, k_counter): """ Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments up to max_order + 1 order. :param central_from_raw_exprs: :param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments :type n_counter: list[:class:`~means.core.descriptors.Moment`] :param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments :type k_counter: list[:class:`~means.core.descriptors.Moment`] :return: a vector of parametric expression for central moments """ n_species = len([None for pm in k_counter if pm.order == 1]) covariance_matrix = sp.Matrix(n_species, n_species, lambda x,y: self._get_covariance_symbol(n_counter,x,y)) positive_n_counter = [n for n in n_counter if n.order > 1] out_mat = [self._compute_one_closed_central_moment(n, covariance_matrix) for n in positive_n_counter ] return sp.Matrix(out_mat)
Computes parametric expressions (e.g. in terms of mean, variance, covariances) for all central moments up to max_order + 1 order. :param central_from_raw_exprs: :param n_counter: a list of :class:`~means.core.descriptors.Moment`\s representing central moments :type n_counter: list[:class:`~means.core.descriptors.Moment`] :param k_counter: a list of :class:`~means.core.descriptors.Moment`\s representing raw moments :type k_counter: list[:class:`~means.core.descriptors.Moment`] :return: a vector of parametric expression for central moments
def compute_lower_upper_errors(sample, num_sigma=1): """ computes the upper and lower sigma from the median value. This functions gives good error estimates for skewed pdf's :param sample: 1-D sample :return: median, lower_sigma, upper_sigma """ if num_sigma > 3: raise ValueError("Number of sigma-constraints restircted to three. %s not valid" % num_sigma) num = len(sample) num_threshold1 = int(round((num-1)*0.833)) num_threshold2 = int(round((num-1)*0.977249868)) num_threshold3 = int(round((num-1)*0.998650102)) median = np.median(sample) sorted_sample = np.sort(sample) if num_sigma > 0: upper_sigma1 = sorted_sample[num_threshold1-1] lower_sigma1 = sorted_sample[num-num_threshold1-1] else: return median, [[]] if num_sigma > 1: upper_sigma2 = sorted_sample[num_threshold2-1] lower_sigma2 = sorted_sample[num-num_threshold2-1] else: return median, [[median-lower_sigma1, upper_sigma1-median]] if num_sigma > 2: upper_sigma3 = sorted_sample[num_threshold3-1] lower_sigma3 = sorted_sample[num-num_threshold3-1] return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median], [median-lower_sigma3, upper_sigma3-median]] else: return median, [[median-lower_sigma1, upper_sigma1-median], [median-lower_sigma2, upper_sigma2-median]]
computes the upper and lower sigma from the median value. This functions gives good error estimates for skewed pdf's :param sample: 1-D sample :return: median, lower_sigma, upper_sigma
def last(args, dbtype=None): """ %prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719. """ p = OptionParser(last.__doc__) p.add_option("--dbtype", default="nucl", choices=("nucl", "prot"), help="Molecule type of subject database") p.add_option("--path", help="Specify LAST path") p.add_option("--mask", default=False, action="store_true", help="Invoke -c in lastdb") p.add_option("--format", default="BlastTab", choices=("TAB", "MAF", "BlastTab", "BlastTab+"), help="Output format") p.add_option("--minlen", default=0, type="int", help="Filter alignments by how many bases match") p.add_option("--minid", default=0, type="int", help="Minimum sequence identity") p.set_cpus() p.set_params() opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) subject, query = args path = opts.path cpus = opts.cpus if not dbtype: dbtype = opts.dbtype getpath = lambda x: op.join(path, x) if path else x lastdb_bin = getpath("lastdb") lastal_bin = getpath("lastal") subjectdb = subject.rsplit(".", 1)[0] run_lastdb(infile=subject, outfile=subjectdb + ".prj", mask=opts.mask, \ lastdb_bin=lastdb_bin, dbtype=dbtype) u = 2 if opts.mask else 0 cmd = "{0} -u {1}".format(lastal_bin, u) cmd += " -P {0} -i3G".format(cpus) cmd += " -f {0}".format(opts.format) cmd += " {0} {1}".format(subjectdb, query) minlen = opts.minlen minid = opts.minid extra = opts.extra assert minid != 100, "Perfect match not yet supported" mm = minid / (100 - minid) if minlen: extra += " -e{0}".format(minlen) if minid: extra += " -r1 -q{0} -a{0} -b{0}".format(mm) if extra: cmd += " " + extra.strip() lastfile = get_outfile(subject, query, suffix="last") sh(cmd, outfile=lastfile)
%prog database.fasta query.fasta Run LAST by calling LASTDB and LASTAL. LAST program available: <http://last.cbrc.jp> Works with LAST-719.
def record(self): # type: () -> bytes ''' A method to generate the string representing this UDF Long AD. Parameters: None. Returns: A string representing this UDF Long AD. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF Long AD not initialized') return struct.pack(self.FMT, self.extent_length, self.log_block_num, self.part_ref_num, self.impl_use)
A method to generate the string representing this UDF Long AD. Parameters: None. Returns: A string representing this UDF Long AD.
def _getID(self): """Get the ID values as a tuple annotated by sqlPrimary""" id = [] for key in self._sqlPrimary: value = self.__dict__[key] if isinstance(value, Forgetter): # It's another object, we store only the ID if value._new: # It's a new object too, it must be saved! value.save() try: (value,) = value._getID() except: raise "Unsupported: Part %s of %s primary key is a reference to %s, with multiple-primary-key %s " % (key, self.__class__, value.__class__, value) id.append(value) return id
Get the ID values as a tuple annotated by sqlPrimary
def getSystemVariable(self, remote, name): """Get single system variable from CCU / Homegear""" if self._server is not None: return self._server.getSystemVariable(remote, name)
Get single system variable from CCU / Homegear
def point_image_value(image, xy, scale=1): """Extract the output value from a calculation at a point""" return getinfo(ee.Image(image).reduceRegion( reducer=ee.Reducer.first(), geometry=ee.Geometry.Point(xy), scale=scale))
Extract the output value from a calculation at a point
def rename_categories(self, new_categories, inplace=False): """ Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B] """ inplace = validate_bool_kwarg(inplace, 'inplace') cat = self if inplace else self.copy() if isinstance(new_categories, ABCSeries): msg = ("Treating Series 'new_categories' as a list-like and using " "the values. In a future version, 'rename_categories' will " "treat Series like a dictionary.\n" "For dict-like, use 'new_categories.to_dict()'\n" "For list-like, use 'new_categories.values'.") warn(msg, FutureWarning, stacklevel=2) new_categories = list(new_categories) if is_dict_like(new_categories): cat.categories = [new_categories.get(item, item) for item in cat.categories] elif callable(new_categories): cat.categories = [new_categories(item) for item in cat.categories] else: cat.categories = new_categories if not inplace: return cat
Rename categories. Parameters ---------- new_categories : list-like, dict-like or callable * list-like: all items must be unique and the number of items in the new categories must match the existing number of categories. * dict-like: specifies a mapping from old categories to new. Categories not contained in the mapping are passed through and extra categories in the mapping are ignored. .. versionadded:: 0.21.0 * callable : a callable that is called on all items in the old categories and whose return values comprise the new categories. .. versionadded:: 0.23.0 .. warning:: Currently, Series are considered list like. In a future version of pandas they'll be considered dict-like. inplace : bool, default False Whether or not to rename the categories inplace or return a copy of this categorical with renamed categories. Returns ------- cat : Categorical or None With ``inplace=False``, the new categorical is returned. With ``inplace=True``, there is no return value. Raises ------ ValueError If new categories are list-like and do not have the same number of items than the current categories or do not validate as categories See Also -------- reorder_categories add_categories remove_categories remove_unused_categories set_categories Examples -------- >>> c = pd.Categorical(['a', 'a', 'b']) >>> c.rename_categories([0, 1]) [0, 0, 1] Categories (2, int64): [0, 1] For dict-like ``new_categories``, extra keys are ignored and categories not in the dictionary are passed through >>> c.rename_categories({'a': 'A', 'c': 'C'}) [A, A, b] Categories (2, object): [A, b] You may also provide a callable to create the new categories >>> c.rename_categories(lambda x: x.upper()) [A, A, B] Categories (2, object): [A, B]
def create_expanded_design_for_mixing(design, draw_list, mixing_pos, rows_to_mixers): """ Parameters ---------- design : 2D ndarray. All elements should be ints, floats, or longs. Each row corresponds to an available alternative for a given individual. There should be one column per index coefficient being estimated. draw_list : list of 2D ndarrays. All numpy arrays should have the same number of columns (`num_draws`) and the same number of rows (`num_mixers`). All elements of the numpy arrays should be ints, floats, or longs. Should have as many elements as there are lements in `mixing_pos`. mixing_pos : list of ints. Each element should denote a column in design whose associated index coefficient is being treated as a random variable. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. Returns ------- design_3d : 3D numpy array. Each slice of the third dimension will contain a copy of the design matrix corresponding to a given draw of the random variables being mixed over. """ if len(mixing_pos) != len(draw_list): msg = "mixing_pos == {}".format(mixing_pos) msg_2 = "len(draw_list) == {}".format(len(draw_list)) raise ValueError(msg + "\n" + msg_2) # Determine the number of draws being used. Note the next line assumes an # equal number of draws from each random coefficient's mixing distribution. num_draws = draw_list[0].shape[1] orig_num_vars = design.shape[1] # Initialize the expanded design matrix that replicates the columns of the # variables that are being mixed over. arrays_for_mixing = design[:, mixing_pos] expanded_design = np.concatenate((design, arrays_for_mixing), axis=1).copy() design_3d = np.repeat(expanded_design[:, None, :], repeats=num_draws, axis=1) # Multiply the columns that are being mixed over by their appropriate # draws from the normal distribution for pos, idx in enumerate(mixing_pos): rel_draws = draw_list[pos] # Note that rel_long_draws will be a dense, 2D numpy array of shape # (num_rows, num_draws). rel_long_draws = rows_to_mixers.dot(rel_draws) # Create the actual column in design 3d that should be used. # It should be the multiplication of the draws random variable and the # independent variable associated with the param that is being mixed. # NOTE THE IMPLICIT ASSUMPTION THAT ONLY INDEX COEFFICIENTS ARE MIXED. # Also, the final axis is selected on because the final axis sepecifies # the particular variable being multiplied by the draws. We select with # orig_num_vars + pos since the variables being mixed over were added, # in order so we simply need to start at the first position after all # the original variables (i.e. at orig_num_vars) and iterate. design_3d[:, :, orig_num_vars + pos] *= rel_long_draws return design_3d
Parameters ---------- design : 2D ndarray. All elements should be ints, floats, or longs. Each row corresponds to an available alternative for a given individual. There should be one column per index coefficient being estimated. draw_list : list of 2D ndarrays. All numpy arrays should have the same number of columns (`num_draws`) and the same number of rows (`num_mixers`). All elements of the numpy arrays should be ints, floats, or longs. Should have as many elements as there are lements in `mixing_pos`. mixing_pos : list of ints. Each element should denote a column in design whose associated index coefficient is being treated as a random variable. rows_to_mixers : 2D scipy sparse array. All elements should be zeros and ones. Will map the rows of the design matrix to the particular units that the mixing is being performed over. Note that in the case of panel data, this matrix will be different from `rows_to_obs`. Returns ------- design_3d : 3D numpy array. Each slice of the third dimension will contain a copy of the design matrix corresponding to a given draw of the random variables being mixed over.
def tag(self, sbo): """Tag with color green if package already installed, color yellow for packages to upgrade and color red if not installed. """ # split sbo name with version and get name sbo_name = "-".join(sbo.split("-")[:-1]) find = GetFromInstalled(sbo_name).name() if find_package(sbo, self.meta.pkg_path): paint = self.meta.color["GREEN"] self.count_ins += 1 if "--rebuild" in self.flag: self.count_upg += 1 elif sbo_name == find: paint = self.meta.color["YELLOW"] self.count_upg += 1 else: paint = self.meta.color["RED"] self.count_uni += 1 return paint
Tag with color green if package already installed, color yellow for packages to upgrade and color red if not installed.
def register_variable(self, v, key, eternal=True): """ Register a value with the variable tracking system :param v: The BVS to register :param key: A tuple to register the variable under :parma eternal: Whether this is an eternal variable, default True. If False, an incrementing counter will be appended to the key. """ if type(key) is not tuple: raise TypeError("Variable tracking key must be a tuple") if eternal: self.eternal_tracked_variables[key] = v else: self.temporal_tracked_variables = dict(self.temporal_tracked_variables) ctrkey = key + (None,) ctrval = self.temporal_tracked_variables.get(ctrkey, 0) + 1 self.temporal_tracked_variables[ctrkey] = ctrval tempkey = key + (ctrval,) self.temporal_tracked_variables[tempkey] = v
Register a value with the variable tracking system :param v: The BVS to register :param key: A tuple to register the variable under :parma eternal: Whether this is an eternal variable, default True. If False, an incrementing counter will be appended to the key.
def publish_extensions(self, handler): """Publish the Media RSS Feed elements as XML.""" if isinstance(self.media_content, list): [PyRSS2Gen._opt_element(handler, "media:content", mc_element) for mc_element in self.media_content] else: PyRSS2Gen._opt_element(handler, "media:content", self.media_content) if hasattr(self, 'media_title'): PyRSS2Gen._opt_element(handler, "media:title", self.media_title) if hasattr(self, 'media_text'): PyRSS2Gen._opt_element(handler, "media:text", self.media_text)
Publish the Media RSS Feed elements as XML.
def _count(self, X, Y): """Count and smooth feature occurrences.""" self.feature_count_ += safe_sparse_dot(Y.T, X) self.class_count_ += Y.sum(axis=0)
Count and smooth feature occurrences.
def get_commit_req(self): """Lazy commit request getter.""" if not self.commit_req: self.commit_req = datastore.CommitRequest() self.commit_req.transaction = self.tx return self.commit_req
Lazy commit request getter.
def _format_return_timestamps(self, return_timestamps=None): """ Format the passed in return timestamps value as a numpy array. If no value is passed, build up array of timestamps based upon model start and end times, and the 'saveper' value. """ if return_timestamps is None: # Build based upon model file Start, Stop times and Saveper # Vensim's standard is to expect that the data set includes the `final time`, # so we have to add an extra period to make sure we get that value in what # numpy's `arange` gives us. return_timestamps_array = np.arange( self.components.initial_time(), self.components.final_time() + self.components.saveper(), self.components.saveper(), dtype=np.float64 ) elif inspect.isclass(range) and isinstance(return_timestamps, range): return_timestamps_array = np.array(return_timestamps, ndmin=1) elif isinstance(return_timestamps, (list, int, float, np.ndarray)): return_timestamps_array = np.array(return_timestamps, ndmin=1) elif isinstance(return_timestamps, _pd.Series): return_timestamps_array = return_timestamps.as_matrix() else: raise TypeError('`return_timestamps` expects a list, array, pandas Series, ' 'or numeric value') return return_timestamps_array
Format the passed in return timestamps value as a numpy array. If no value is passed, build up array of timestamps based upon model start and end times, and the 'saveper' value.
def json_2_team(json_obj): """ transform JSON obj coming from Ariane to ariane_clip3 object :param json_obj: the JSON obj coming from Ariane :return: ariane_clip3 Team object """ LOGGER.debug("Team.json_2_team") return Team(teamid=json_obj['teamID'], name=json_obj['teamName'], description=json_obj['teamDescription'], color_code=json_obj['teamColorCode'], app_ids=json_obj['teamApplicationsID'], osi_ids=json_obj['teamOSInstancesID'])
transform JSON obj coming from Ariane to ariane_clip3 object :param json_obj: the JSON obj coming from Ariane :return: ariane_clip3 Team object
def canonic(self, file_name): """ returns canonical version of a file name. A canonical file name is an absolute, lowercase normalized path to a given file. """ if file_name == "<" + file_name[1:-1] + ">": return file_name c_file_name = self.file_name_cache.get(file_name) if not c_file_name: c_file_name = os.path.abspath(file_name) c_file_name = os.path.normcase(c_file_name) self.file_name_cache[file_name] = c_file_name return c_file_name
returns canonical version of a file name. A canonical file name is an absolute, lowercase normalized path to a given file.
def filter_dict_by_key(d, keys): """Filter the dict *d* to remove keys not in *keys*.""" return {k: v for k, v in d.items() if k in keys}
Filter the dict *d* to remove keys not in *keys*.
def list(self, device_path, timeout_ms=None): """Yield filesync_service.DeviceFileStat objects for directory contents.""" return self.filesync_service.list( device_path, timeouts.PolledTimeout.from_millis(timeout_ms))
Yield filesync_service.DeviceFileStat objects for directory contents.
def scale(self, new_volume: float) -> "Lattice": """ Return a new Lattice with volume new_volume by performing a scaling of the lattice vectors so that length proportions and angles are preserved. Args: new_volume: New volume to scale to. Returns: New lattice with desired volume. """ versors = self.matrix / self.abc geo_factor = abs(dot(np.cross(versors[0], versors[1]), versors[2])) ratios = np.array(self.abc) / self.c new_c = (new_volume / (geo_factor * np.prod(ratios))) ** (1 / 3.0) return Lattice(versors * (new_c * ratios))
Return a new Lattice with volume new_volume by performing a scaling of the lattice vectors so that length proportions and angles are preserved. Args: new_volume: New volume to scale to. Returns: New lattice with desired volume.
def _validate_apns_certificate(self, certfile): """Validate the APNS certificate at startup.""" try: with open(certfile, "r") as f: content = f.read() check_apns_certificate(content) except Exception as e: raise ImproperlyConfigured( "The APNS certificate file at %r is not readable: %s" % (certfile, e) )
Validate the APNS certificate at startup.
async def inject_request_id(app, handler): """aiohttp middleware: ensures each request has a unique request ID. See: ``inject_request_id``. """ async def trace_request(request): request['x-request-id'] = \ request.headers.get('x-request-id') or str(uuid.uuid4()) return await handler(request) return trace_request
aiohttp middleware: ensures each request has a unique request ID. See: ``inject_request_id``.
def clear_cached_authc_info(self, identifier): """ When cached credentials are no longer needed, they can be manually cleared with this method. However, account credentials may be cached with a short expiration time (TTL), making the manual clearing of cached credentials an alternative use case. :param identifier: the identifier of a specific source, extracted from the SimpleIdentifierCollection (identifiers) """ msg = "Clearing cached authc_info for [{0}]".format(identifier) logger.debug(msg) self.cache_handler.delete('authentication:' + self.name, identifier)
When cached credentials are no longer needed, they can be manually cleared with this method. However, account credentials may be cached with a short expiration time (TTL), making the manual clearing of cached credentials an alternative use case. :param identifier: the identifier of a specific source, extracted from the SimpleIdentifierCollection (identifiers)
def is_propagating(self, images, augmenter, parents, default): """ Returns whether an augmenter may call its children to augment an image. This is independent of the augmenter itself possible changing the image, without calling its children. (Most (all?) augmenters with children currently dont perform any changes themselves.) Returns ------- bool If True, the augmenter may be propagate to its children. If False, it may not. """ if self.propagator is None: return default else: return self.propagator(images, augmenter, parents, default)
Returns whether an augmenter may call its children to augment an image. This is independent of the augmenter itself possible changing the image, without calling its children. (Most (all?) augmenters with children currently dont perform any changes themselves.) Returns ------- bool If True, the augmenter may be propagate to its children. If False, it may not.
def size_changed(self, settings, key, user_data): """If the gconf var window_height or window_width are changed, this method will be called and will call the resize function in guake. """ RectCalculator.set_final_window_rect(self.settings, self.guake.window)
If the gconf var window_height or window_width are changed, this method will be called and will call the resize function in guake.