text
stringlengths
78
104k
score
float64
0
0.18
def scatter(self, method, args={}, nowait=False, timeout=None, **kwargs): """Broadcast method to all agents. if nowait is False, returns generator to iterate over the results. :keyword limit: Limit number of reads from the queue. Unlimited by default. :keyword timeout: the timeout (in float seconds) waiting for replies. Default is :attr:`default_timeout`. **Examples** ``scatter`` is a generator (if nowait is False):: >>> res = scatter() >>> res.next() # one event consumed, or timed out. >>> res = scatter(limit=2): >>> for i in res: # two events consumed or timeout >>> pass See :meth:`call_or_cast` for a full list of supported arguments. """ timeout = timeout if timeout is not None else self.default_timeout r = self.call_or_cast(method, args, type=ACTOR_TYPE.SCATTER, nowait=nowait, timeout=timeout, **kwargs) if not nowait: return r.gather(timeout=timeout, **kwargs)
0.0018
def flash_firmware(self, redfish_inst, file_url): """Perform firmware flashing on a redfish system :param file_url: url to firmware bits. :param redfish_inst: redfish instance :raises: IloError, on an error from iLO. """ action_data = { 'ImageURI': file_url, } target_uri = self._get_firmware_update_element().target_uri try: self._conn.post(target_uri, data=action_data) except sushy.exceptions.SushyError as e: msg = (('The Redfish controller failed to update firmware ' 'with file %(file)s Error %(error)s') % {'file': file_url, 'error': str(e)}) LOG.debug(msg) # noqa raise exception.IloError(msg) self.wait_for_redfish_firmware_update_to_complete(redfish_inst) try: state, percent = self.get_firmware_update_progress() except sushy.exceptions.SushyError as e: msg = ('Failed to get firmware progress update ' 'Error %(error)s' % {'error': str(e)}) LOG.debug(msg) raise exception.IloError(msg) if state == "Error": msg = 'Unable to update firmware' LOG.debug(msg) # noqa raise exception.IloError(msg) elif state == "Unknown": msg = 'Status of firmware update not known' LOG.debug(msg) # noqa else: # "Complete" | "Idle" LOG.info('Flashing firmware file: %s ... done', file_url)
0.001286
def handle(self, pkt, raddress, rport): """Handle a packet we just received.""" if not self.context.tidport: self.context.tidport = rport log.debug("Set remote port for session to %s", rport) # If we're going to successfully transfer the file, then we should see # either an OACK for accepted options, or an ACK to ignore options. if isinstance(pkt, TftpPacketOACK): log.info("Received OACK from server") try: self.handleOACK(pkt) except TftpException: log.error("Failed to negotiate options") self.sendError(TftpErrors.FailedNegotiation) raise else: log.debug("Sending first DAT packet") self.context.pending_complete = self.sendDAT() log.debug("Changing state to TftpStateExpectACK") return TftpStateExpectACK(self.context) elif isinstance(pkt, TftpPacketACK): log.info("Received ACK from server") log.debug("Apparently the server ignored our options") # The block number should be zero. if pkt.blocknumber == 0: log.debug("Ack blocknumber is zero as expected") log.debug("Sending first DAT packet") self.context.pending_complete = self.sendDAT() log.debug("Changing state to TftpStateExpectACK") return TftpStateExpectACK(self.context) else: log.warning("Discarding ACK to block %s" % pkt.blocknumber) log.debug("Still waiting for valid response from server") return self elif isinstance(pkt, TftpPacketERR): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received ERR from server: %s" % pkt) elif isinstance(pkt, TftpPacketRRQ): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received RRQ from server while in upload") elif isinstance(pkt, TftpPacketDAT): self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received DAT from server while in upload") else: self.sendError(TftpErrors.IllegalTftpOp) raise TftpException("Received unknown packet type from server: %s" % pkt) # By default, no state change. return self
0.001227
def masked(name, runtime=False): ''' .. versionadded:: 2017.7.0 .. note:: This state is only available on minions which use systemd_. Ensures that the named service is masked (i.e. prevented from being started). name Name of the service to mask runtime : False By default, this state will manage an indefinite mask for the named service. Set this argument to ``True`` to runtime mask the service. .. note:: It is possible for a service to have both indefinite and runtime masks set for it. Therefore, this state will manage a runtime or indefinite mask independently of each other. This means that if the service is already indefinitely masked, running this state with ``runtime`` set to ``True`` will _not_ remove the indefinite mask before setting a runtime mask. In these cases, if it is desirable to ensure that the service is runtime masked and not indefinitely masked, pair this state with a :py:func:`service.unmasked <salt.states.service.unmasked>` state, like so: .. code-block:: yaml mask_runtime_foo: service.masked: - name: foo - runtime: True unmask_indefinite_foo: service.unmasked: - name: foo - runtime: False .. _systemd: https://freedesktop.org/wiki/Software/systemd/ ''' ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if 'service.masked' not in __salt__: ret['comment'] = 'Service masking not available on this minion' ret['result'] = False return ret mask_type = 'runtime masked' if runtime else 'masked' expected_changes = {mask_type: {'old': False, 'new': True}} try: if __salt__['service.masked'](name, runtime): ret['comment'] = 'Service {0} is already {1}'.format( name, mask_type, ) return ret if __opts__['test']: ret['result'] = None ret['changes'] = expected_changes ret['comment'] = 'Service {0} would be {1}'.format(name, mask_type) return ret __salt__['service.mask'](name, runtime) if __salt__['service.masked'](name, runtime): ret['changes'] = expected_changes ret['comment'] = 'Service {0} was {1}'.format(name, mask_type) else: ret['comment'] = 'Failed to mask service {0}'.format(name) return ret except CommandExecutionError as exc: ret['result'] = False ret['comment'] = exc.strerror return ret
0.000365
def _set_next_host_location(self, context): ''' A function which sets the next host location on the request, if applicable. :param ~azure.storage.models.RetryContext context: The retry context containing the previous host location and the request to evaluate and possibly modify. ''' if len(context.request.host_locations) > 1: # If there's more than one possible location, retry to the alternative if context.location_mode == LocationMode.PRIMARY: context.location_mode = LocationMode.SECONDARY else: context.location_mode = LocationMode.PRIMARY context.request.host = context.request.host_locations.get(context.location_mode)
0.011598
def caffe_to_tensorflow_session(caffe_def_path, caffemodel_path, inputs, graph_name='Graph', conversion_out_dir_path=None, use_padding_same=False): """Create a TensorFlow Session from a Caffe model.""" try: # noinspection PyUnresolvedReferences from caffeflow import convert except ImportError: raise Exception("caffeflow package needs to be installed to freeze Caffe models. Check out the README file.") with (dummy_context_mgr(conversion_out_dir_path) or util.TemporaryDirectory()) as dir_path: params_values_output_path = os.path.join(dir_path, 'params_values.npy') network_output_path = os.path.join(dir_path, 'network.py') convert.convert(caffe_def_path, caffemodel_path, params_values_output_path, network_output_path, False, use_padding_same=use_padding_same) network_module = imp.load_source('module.name', network_output_path) network_class = getattr(network_module, graph_name) network = network_class(inputs) sess = tf.Session() network.load(params_values_output_path, sess) return sess
0.005124
def _get_order_by(order, orderby, order_by_fields): """ Return the order by syntax for a model. Checks whether use ascending or descending order, and maps the fieldnames. """ try: # Find the actual database fieldnames for the keyword. db_fieldnames = order_by_fields[orderby] except KeyError: raise ValueError("Invalid value for 'orderby': '{0}', supported values are: {1}".format(orderby, ', '.join(sorted(order_by_fields.keys())))) # Default to descending for some fields, otherwise be ascending is_desc = (not order and orderby in ORDER_BY_DESC) \ or (order or 'asc').lower() in ('desc', 'descending') if is_desc: return map(lambda name: '-' + name, db_fieldnames) else: return db_fieldnames
0.002551
def _handle_post(self, transaction): """ Handle POST requests :type transaction: Transaction :param transaction: the transaction that owns the request :rtype : Transaction :return: the edited transaction with the response to the request """ path = str("/" + transaction.request.uri_path) transaction.response = Response() transaction.response.destination = transaction.request.source transaction.response.token = transaction.request.token # Create request transaction = self._server.resourceLayer.create_resource(path, transaction) return transaction
0.004518
def take_file_lock(own_file, lock_file, own_content): """ Atomically "move" @own_file to @lock_file if the latter does not exists, else just remove @own_file. @own_file: filepath of the temporary file that contains our PID @lock_file: destination filepath @own_content: content of @own_file Return True if the lock has been successfully taken, else False. (Caller should also be prepared for OSError exceptions) """ try: try: os.link(own_file, lock_file) finally: os.unlink(own_file) except OSError, e: if e.errno == errno.EEXIST: log.warning("The lock file %r already exists - won't " "overwrite it. An other instance of ourself " "is probably running.", lock_file) return False else: raise content = file(lock_file).read(len(own_content) + 1) if content != own_content: log.warning( "I thought I successfully took the lock file %r but " "it does not contain what was expected. Somebody is " "playing with us.", lock_file) return False return True
0.002492
def get_languages(self, abbreviations=False): """ Get praticed languages. :param abbreviations: Get language as abbreviation or not :type abbreviations: bool :return: List of languages :rtype: list of str """ data = [] for lang in self.user_data.languages: if lang['learning']: if abbreviations: data.append(lang['language']) else: data.append(lang['language_string']) return data
0.003656
def job_start(job_backend, trainer, keras_callback): """ Starts the training of a job. Needs job_prepare() first. :type job_backend: JobBackend :type trainer: Trainer :return: """ job_backend.set_status('STARTING') job_model = job_backend.get_job_model() model_provider = job_model.get_model_provider() job_backend.set_status('LOAD DATA') datasets = job_model.get_datasets(trainer) print('trainer.input_shape = %s\n' % (simplejson.dumps(trainer.input_shape, default=invalid_json_values),)) print('trainer.classes = %s\n' % (simplejson.dumps(trainer.classes, default=invalid_json_values),)) multiple_inputs = len(datasets) > 1 insights_x = [] if multiple_inputs else None for dataset_name in job_model.get_input_dataset_names(): dataset = datasets[dataset_name] if is_generator(dataset['X_train']): batch_x, batch_y = dataset['X_train'].next() if multiple_inputs: insights_x.append(batch_x[0]) else: insights_x = batch_x[0] else: if multiple_inputs: insights_x.append(dataset['X_train'][0]) else: insights_x = dataset['X_train'][0] keras_callback.insights_x = insights_x print('Insights sample shape', keras_callback.insights_x.shape) keras_callback.write("Possible data keys '%s'\n" % "','".join(list(datasets.keys()))) data_train = model_provider.get_training_data(trainer, datasets) data_validation = model_provider.get_validation_data(trainer, datasets) keras_callback.set_validation_data(data_validation, trainer.nb_val_samples) trainer.set_status('CONSTRUCT') model = model_provider.get_model(trainer) trainer.set_model(model) trainer.set_status('COMPILING') loss = model_provider.get_loss(trainer) optimizer = model_provider.get_optimizer(trainer) model_provider.compile(trainer, model, loss, optimizer) model.summary() trainer.callbacks.append(keras_callback) model_provider.train(trainer, model, data_train, data_validation)
0.001884
def is_child(self, node): """Check if a node is a child of the current node Parameters ---------- node : instance of Node The potential child. Returns ------- child : bool Whether or not the node is a child. """ if node in self.children: return True for c in self.children: if c.is_child(node): return True return False
0.004228
def is_equal(self, other): """ Two beliefstates are equal if all of their part names are equal and all of their cell's values return True for is_equal(). Note: this only compares the items in the DictCell, not `pos`, `environment_variables` or `deferred_effects`. """ return hash(self) == hash(other) for (this, that) in itertools.izip_longest(self, other): if that[0] is None or this[0] != that[0]: # compare attribute names return False if not this[1].is_equal(that[1]): # compare values return False return True
0.002963
def _readable(self, watcher, events): """Called by the pyev watcher (self.read_watcher) whenever the socket is readable. Calls recv and checks for errors. If there are no errors then read_cb is called with the newly arrived bytes. Otherwise closes the socket and calls close_cb with the error. """ try: data = self.sock.recv(4096) if len(data) == 0: self._close(ConnectionClosed()) else: self.read_cb(data) except IOError as e: self._close(e)
0.003401
def get_col(self, col_name, filter = lambda _ : True): """Return all values in the column corresponding to col_name that satisfies filter, which is a function that takes in a value of the column's type and returns True or False Parameters ---------- col_name : str Name of desired column filter : function, optional A function that takes in a value of the column's type and returns True or False Defaults to a function that always returns True Returns ------- list A list of values in the desired columns by order of their storage in the model Raises ------ ValueError If the desired column name is not found in the model """ if col_name not in self._headers: raise ValueError("{} not found! Model has headers: {}".format(col_name, self._headers)) col = [] for i in range(self.num_rows): row = self._table[i + 1] val = row[col_name] if filter(val): col.append(val) return col
0.00878
def cyl_to_rect(R,phi,Z): """ NAME: cyl_to_rect PURPOSE: convert from cylindrical to rectangular coordinates INPUT: R, phi, Z - cylindrical coordinates OUTPUT: X,Y,Z HISTORY: 2011-02-23 - Written - Bovy (NYU) """ return (R*sc.cos(phi),R*sc.sin(phi),Z)
0.015244
def analyze(self, id): # pylint: disable=invalid-name,redefined-builtin """Get a list of tests that will be skipped for a package. :param id: Package ID as an int. :return: :class:`packages.Analysis <packages.Analysis>` object :rtype: packages.Analysis """ schema = AnalysisSchema() resp = self.service.post(self.base+str(id)+'/', params={'process': 'analyze'}) return self.service.decode(schema, resp)
0.008565
def is_legal_sequence(self, packet: DataPacket) -> bool: """ Check if the Sequence number of the DataPacket is legal. For more information see page 17 of http://tsp.esta.org/tsp/documents/docs/E1-31-2016.pdf. :param packet: the packet to check :return: true if the sequence is legal. False if the sequence number is bad """ # if the sequence of the packet is smaller than the last received sequence, return false # therefore calculate the difference between the two values: try: # try, because self.lastSequence might not been initialized diff = packet.sequence - self.lastSequence[packet.universe] # if diff is between ]-20,0], return False for a bad packet sequence if 0 >= diff > -20: return False except: pass # if the sequence is good, return True and refresh the list with the new value self.lastSequence[packet.universe] = packet.sequence return True
0.007797
def get_config_and_id_from_registry(image, registry, digest, insecure=False, dockercfg_path=None, version='v2'): """Return image config by digest :param image: ImageName, the remote image to inspect :param registry: str, URI for registry, if URI schema is not provided, https:// will be used :param digest: str, digest of the image manifest :param insecure: bool, when True registry's cert is not verified :param dockercfg_path: str, dirname of .dockercfg location :param version: str, which manifest schema versions to fetch digest :return: dict, versions mapped to their digest """ registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path) response = query_registry( registry_session, image, digest=digest, version=version) response.raise_for_status() manifest_config = response.json() config_digest = manifest_config['config']['digest'] config_response = query_registry( registry_session, image, digest=config_digest, version=version, is_blob=True) config_response.raise_for_status() blob_config = config_response.json() context = '/'.join([x for x in [image.namespace, image.repo] if x]) tag = image.tag logger.debug('Image %s:%s has config:\n%s', context, tag, blob_config) return blob_config, config_digest
0.002116
def group_plugins_into_categories(plugins): """ Return all plugins, grouped by category. The structure is a {"Categorynane": [list of plugin classes]} """ if not plugins: return {} plugins = sorted(plugins, key=lambda p: p.verbose_name) categories = {} for plugin in plugins: title = str(plugin.category or u"") # enforce resolving ugettext_lazy proxies. if title not in categories: categories[title] = [] categories[title].append(plugin) return categories
0.003711
def transform_multidim_to_1d_decl(decl): """ Transform ast of multidimensional declaration to a single dimension declaration. In-place operation! Returns name and dimensions of array (to be used with transform_multidim_to_1d_ref()) """ dims = [] type_ = decl.type while type(type_) is c_ast.ArrayDecl: dims.append(type_.dim) type_ = type_.type if dims: # Multidimensional array decl.type.dim = reduce(lambda l, r: c_ast.BinaryOp('*', l, r), dims) decl.type.type = type_ return decl.name, dims
0.005208
def p_labelled_statement(self, p): """labelled_statement : identifier COLON statement""" p[0] = ast.Label(identifier=p[1], statement=p[3])
0.012987
def predict(self, X): """Predict classes on test vectors X. Parameters ---------- X : array-like, shape = [n_samples, n_features] Input vectors, where n_samples is the number of samples and n_features is the number of features. Returns ------- y : array, shape = [n_samples,] The predicted classes of the input samples. """ proba = self.predict_proba(X) return self.classes_.take(np.argmax(proba, axis=1), axis=0)
0.003759
def check_refresh(self, data, ret): ''' Check to see if the modules for this state instance need to be updated, only update if the state is a file or a package and if it changed something. If the file function is managed check to see if the file is a possible module type, e.g. a python, pyx, or .so. Always refresh if the function is recurse, since that can lay down anything. ''' _reload_modules = False if data.get('reload_grains', False): log.debug('Refreshing grains...') self.opts['grains'] = salt.loader.grains(self.opts) _reload_modules = True if data.get('reload_pillar', False): log.debug('Refreshing pillar...') self.opts['pillar'] = self._gather_pillar() _reload_modules = True if not ret['changes']: if data.get('force_reload_modules', False): self.module_refresh() return if data.get('reload_modules', False) or _reload_modules: # User explicitly requests a reload self.module_refresh() return if data['state'] == 'file': if data['fun'] == 'managed': if data['name'].endswith( ('.py', '.pyx', '.pyo', '.pyc', '.so')): self.module_refresh() elif data['fun'] == 'recurse': self.module_refresh() elif data['fun'] == 'symlink': if 'bin' in data['name']: self.module_refresh() elif data['state'] in ('pkg', 'ports'): self.module_refresh()
0.0024
def reset(self): """ Resets solver data. """ self.k = 0. self.x = np.zeros(0) self.lam = np.zeros(0) self.nu = np.zeros(0) self.mu = np.zeros(0) self.pi = np.zeros(0) self.status = self.STATUS_UNKNOWN self.error_msg = '' self.obj_sca = 1.
0.008746
def list(context, job_id, sort, limit, where, verbose): """list(context, sort, limit, where, verbose) List all files. >>> dcictl file-list job-id [OPTIONS] :param string sort: Field to apply sort :param integer limit: Max number of rows to return :param string where: An optional filter criteria :param boolean verbose: Display verbose output """ result = job.list_files(context, id=job_id, sort=sort, limit=limit, verbose=verbose, where=where) utils.format_output(result, context.format, verbose=verbose)
0.001736
def params_size(m: Union[nn.Module,Learner], size: tuple = (3, 64, 64))->Tuple[Sizes, Tensor, Hooks]: "Pass a dummy input through the model to get the various sizes. Returns (res,x,hooks) if `full`" if isinstance(m, Learner): if m.data.is_empty: raise Exception("This is an empty `Learner` and `Learner.summary` requires some data to pass through the model.") ds_type = DatasetType.Train if m.data.train_dl else (DatasetType.Valid if m.data.valid_dl else DatasetType.Test) x = m.data.one_batch(ds_type=ds_type, detach=False, denorm=False)[0] x = [o[:1] for o in x] if is_listy(x) else x[:1] m = m.model elif isinstance(m, nn.Module): x = next(m.parameters()).new(1, *size) else: raise TypeError('You should either pass in a Learner or nn.Module') with hook_outputs(flatten_model(m)) as hook_o: with hook_params(flatten_model(m))as hook_p: x = m.eval()(*x) if is_listy(x) else m.eval()(x) output_size = [((o.stored.shape[1:]) if o.stored is not None else None) for o in hook_o] params = [(o.stored if o.stored is not None else (None,None)) for o in hook_p] params, trainables = map(list,zip(*params)) return output_size, params, trainables
0.011085
def get_goobjs_altgo2goobj(go2obj): """Separate alt GO IDs and key GO IDs.""" goobjs = set() altgo2goobj = {} for goid, goobj in go2obj.items(): goobjs.add(goobj) if goid != goobj.id: altgo2goobj[goid] = goobj return goobjs, altgo2goobj
0.003521
def first_realm_successful_strategy(authc_attempt): """ The FirstRealmSuccessfulStrategy will iterate over the available realms and invoke Realm.authenticate_account(authc_token) on each one. The moment that a realm returns an Account without raising an Exception, that account is returned immediately and all subsequent realms ignored entirely (iteration 'short circuits'). If no realms return an Account: * If only one exception was thrown by any consulted Realm, that exception is thrown. * If more than one Realm threw an exception during consultation, those exceptions are bundled together as a MultiRealmAuthenticationException and that exception is thrown. * If no exceptions were thrown, None is returned, indicating to the calling Authenticator that no Account was found (for that token) :type authc_attempt: AuthenticationAttempt :returns: Account """ authc_token = authc_attempt.authentication_token realm_errors = [] account = None for realm in authc_attempt.realms: if (realm.supports(authc_token)): try: account = realm.authenticate_account(authc_token) except Exception as ex: realm_errors.append(ex) if (account): return account if (realm_errors): if (len(realm_errors) == 1): raise realm_errors[0] else: raise MultiRealmAuthenticationException(realm_errors) return None
0.001274
def get_queryset(self): ''' Only serve site-specific languages ''' request = self.request return (Languages.for_site(request.site) .languages.filter().order_by('pk'))
0.008658
def tseitin(self, auxvarname='aux'): """Convert the expression to Tseitin's encoding.""" if self.is_cnf(): return self _, constraints = _tseitin(self.to_nnf(), auxvarname) fst = constraints[-1][1] rst = [Equal(v, ex).to_cnf() for v, ex in constraints[:-1]] return And(fst, *rst)
0.0059
def duplicate(self, contributor=None, inherit_collections=False): """Duplicate (make a copy) ``Entity`` objects. :param contributor: Duplication user :param inherit_collections: If ``True`` then duplicated entities will be added to collections the original entity is part of. Duplicated entities' data objects will also be added to the collections, but only those which are in the collection :return: A list of duplicated entities """ return [ entity.duplicate(contributor, inherit_collections) for entity in self ]
0.003106
def calculate_edges(self, excludes): """Builds a vertex list adding barycentric coordinates to each vertex. Used to draw turtle borders efficiently, specialised to draw only the some edges. See below for references. http://stackoverflow.com/questions/18035719/drawing-a-border-on-a-2d-polygon-with-a-fragment-shader # NOQA http://codeflow.org/entries/2012/aug/02/easy-wireframe-display-with-barycentric-coordinates/ # NOQA http://strattonbrazil.blogspot.co.uk/2011/09/single-pass-wireframe-rendering_11.html # NOQA """ edges = [] MEW = 100.0 if excludes is None: excludes = [0] * len(self.indices) * 2 for i in range(0, len(self.indices), 3): # each triangle i0 = self.indices[i+0] * 4 i1 = self.indices[i+1] * 4 i2 = self.indices[i+2] * 4 e0 = excludes[i+0] e1 = excludes[i+1] e2 = excludes[i+2] p0 = self.vertices[i0:i0+4] p1 = self.vertices[i1:i1+4] p2 = self.vertices[i2:i2+4] v0 = self.vec2minus(p2, p1) v1 = self.vec2minus(p2, p0) v2 = self.vec2minus(p1, p0) area = fabs(v1[0]*v2[1] - v1[1] * v2[0]) c0 = (area/self.magnitude(v0), e1 * MEW, e2 * MEW) c1 = (e0 * MEW, area/self.magnitude(v1), e2 * MEW) c2 = (e0 * MEW, e1 * MEW, area/self.magnitude(v2)) edges.extend(p0) edges.extend(c0) edges.extend(p1) edges.extend(c1) edges.extend(p2) edges.extend(c2) return create_vertex_buffer(edges)
0.001195
def _item_to_metric(iterator, log_metric_pb): """Convert a metric protobuf to the native object. :type iterator: :class:`~google.api_core.page_iterator.Iterator` :param iterator: The iterator that is currently in use. :type log_metric_pb: :class:`.logging_metrics_pb2.LogMetric` :param log_metric_pb: Metric protobuf returned from the API. :rtype: :class:`~google.cloud.logging.metric.Metric` :returns: The next metric in the page. """ # NOTE: LogMetric message type does not have an ``Any`` field # so `MessageToDict`` can safely be used. resource = MessageToDict(log_metric_pb) return Metric.from_api_repr(resource, iterator.client)
0.001431
def stutter(args): """ %prog stutter a.vcf.gz Extract info from lobSTR vcf file. Generates a file that has the following fields: CHR, POS, MOTIF, RL, ALLREADS, Q """ p = OptionParser(stutter.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) vcf, = args pf = op.basename(vcf).split(".")[0] execid, sampleid = pf.split("_") C = "vcftools --remove-filtered-all --min-meanDP 10" C += " --gzvcf {} --out {}".format(vcf, pf) C += " --indv {}".format(sampleid) info = pf + ".INFO" if need_update(vcf, info): cmd = C + " --get-INFO MOTIF --get-INFO RL" sh(cmd) allreads = pf + ".ALLREADS.FORMAT" if need_update(vcf, allreads): cmd = C + " --extract-FORMAT-info ALLREADS" sh(cmd) q = pf + ".Q.FORMAT" if need_update(vcf, q): cmd = C + " --extract-FORMAT-info Q" sh(cmd) outfile = pf + ".STUTTER" if need_update((info, allreads, q), outfile): cmd = "cut -f1,2,5,6 {}".format(info) cmd += r" | sed -e 's/\t/_/g'" cmd += " | paste - {} {}".format(allreads, q) cmd += " | cut -f1,4,7" sh(cmd, outfile=outfile)
0.000812
def _add_task(self, task): '''Add an already existing task to the task group.''' if hasattr(task, '_task_group'): raise RuntimeError('task is already part of a group') if self._closed: raise RuntimeError('task group is closed') task._task_group = self if task.done(): self._done.append(task) else: self._pending.add(task) task.add_done_callback(self._on_done)
0.004283
def get_kde_scatter(self, xax="area_um", yax="deform", positions=None, kde_type="histogram", kde_kwargs={}, xscale="linear", yscale="linear"): """Evaluate the kernel density estimate for scatter plots Parameters ---------- xax: str Identifier for X axis (e.g. "area_um", "aspect", "deform") yax: str Identifier for Y axis positions: list of two 1d ndarrays or ndarray of shape (2, N) The positions where the KDE will be computed. Note that the KDE estimate is computed from the the points that are set in `self.filter.all`. kde_type: str The KDE method to use kde_kwargs: dict Additional keyword arguments to the KDE method xscale: str If set to "log", take the logarithm of the x-values before computing the KDE. This is useful when data are are displayed on a log-scale. Defaults to "linear". yscale: str See `xscale`. Returns ------- density : 1d ndarray The kernel density evaluated for the filtered data points. """ xax = xax.lower() yax = yax.lower() kde_type = kde_type.lower() if kde_type not in kde_methods.methods: raise ValueError("Not a valid kde type: {}!".format(kde_type)) # Get data x = self[xax][self.filter.all] y = self[yax][self.filter.all] # Apply scale (no change for linear scale) xs = self._apply_scale(x, xscale, xax) ys = self._apply_scale(y, yscale, yax) if positions is None: posx = None posy = None else: posx = self._apply_scale(positions[0], xscale, xax) posy = self._apply_scale(positions[1], yscale, yax) kde_fct = kde_methods.methods[kde_type] if len(x): density = kde_fct(events_x=xs, events_y=ys, xout=posx, yout=posy, **kde_kwargs) else: density = [] return density
0.001831
def checkout(branch, quiet=False, as_path=False): """Check out that branch Defaults to a quiet checkout, giving no stdout if stdout it wanted, call with quiet = False Defaults to checking out branches If as_path is true, then treat "branch" like a file, i.e. $ git checkout -- branch All errors will pass silently, just returning False except any messages about "you need to resolve your current index" These indicate that the repository is not in a normal state and action by a user is usually needed to resolve that So the exception is allowed to rise probably stopping the script """ try: if as_path: branch = '-- %s' % branch run('checkout %s %s' % (quiet and '-q' or '', branch)) return True except GitError as e: if 'need to resolve your current index' in e.output: raise return False
0.001045
def _get_history_daily_window(self, assets, end_dt, bar_count, field_to_use, data_frequency): """ Internal method that returns a dataframe containing history bars of daily frequency for the given sids. """ session = self.trading_calendar.minute_to_session_label(end_dt) days_for_window = self._get_days_for_window(session, bar_count) if len(assets) == 0: return pd.DataFrame(None, index=days_for_window, columns=None) data = self._get_history_daily_window_data( assets, days_for_window, end_dt, field_to_use, data_frequency ) return pd.DataFrame( data, index=days_for_window, columns=assets )
0.007172
def to_underscore(string): """Converts a given string from CamelCase to under_score. >>> to_underscore('FooBar') 'foo_bar' """ new_string = re.sub(r'([A-Z]+)([A-Z][a-z])', r'\1_\2', string) new_string = re.sub(r'([a-z\d])([A-Z])', r'\1_\2', new_string) return new_string.lower()
0.003257
def unflat_unique_rowid_map(func, unflat_rowids, **kwargs): """ performs only one call to the underlying func with unique rowids the func must be some lookup function TODO: move this to a better place. CommandLine: python -m utool.util_list --test-unflat_unique_rowid_map:0 python -m utool.util_list --test-unflat_unique_rowid_map:1 Example0: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> kwargs = {} >>> unflat_rowids = [[1, 2, 3], [2, 5], [1], []] >>> num_calls0 = [0] >>> num_input0 = [0] >>> def func0(rowids, num_calls0=num_calls0, num_input0=num_input0): ... num_calls0[0] += 1 ... num_input0[0] += len(rowids) ... return [rowid + 10 for rowid in rowids] >>> func = func0 >>> unflat_vals = unflat_unique_rowid_map(func, unflat_rowids, **kwargs) >>> result = [arr.tolist() for arr in unflat_vals] >>> print(result) >>> ut.assert_eq(num_calls0[0], 1) >>> ut.assert_eq(num_input0[0], 4) [[11, 12, 13], [12, 15], [11], []] Example1: >>> # ENABLE_DOCTEST >>> from utool.util_list import * # NOQA >>> import utool as ut >>> import numpy as np >>> kwargs = {} >>> unflat_rowids = [[1, 2, 3], [2, 5], [1], []] >>> num_calls1 = [0] >>> num_input1 = [0] >>> def func1(rowids, num_calls1=num_calls1, num_input1=num_input1, np=np): ... num_calls1[0] += 1 ... num_input1[0] += len(rowids) ... return [np.array([rowid + 10, rowid, 3]) for rowid in rowids] >>> func = func1 >>> unflat_vals = unflat_unique_rowid_map(func, unflat_rowids, **kwargs) >>> result = [arr.tolist() for arr in unflat_vals] >>> print(result) >>> ut.assert_eq(num_calls1[0], 1) >>> ut.assert_eq(num_input1[0], 4) [[[11, 1, 3], [12, 2, 3], [13, 3, 3]], [[12, 2, 3], [15, 5, 3]], [[11, 1, 3]], []] """ import utool as ut # First flatten the list, and remember the original dimensions flat_rowids, reverse_list = ut.invertible_flatten2(unflat_rowids) # Then make the input unique flat_rowids_arr = np.array(flat_rowids) unique_flat_rowids, inverse_unique = np.unique(flat_rowids_arr, return_inverse=True) # Then preform the lookup / implicit mapping unique_flat_vals = func(unique_flat_rowids, **kwargs) # Then broadcast unique values back to original flat positions flat_vals_ = np.array(unique_flat_vals)[inverse_unique] #flat_vals_ = np.array(unique_flat_vals).take(inverse_unique, axis=0) output_shape = tuple(list(flat_rowids_arr.shape) + list(flat_vals_.shape[1:])) flat_vals = np.array(flat_vals_).reshape(output_shape) # Then _unflatten the results to the original input dimensions unflat_vals = ut.unflatten2(flat_vals, reverse_list) return unflat_vals
0.001332
def raw(self, clean=False): """Raw identifier. args: clean (bool): clean name returns: str """ if clean: return ''.join(''.join(p) for p in self.parsed).replace('?', ' ') return '%'.join('%'.join(p) for p in self.parsed).strip().strip('%')
0.006192
def parseruninfo(self): """Extracts the flowcell ID, as well as the instrument name from RunInfo.xml. If this file is not provided, NA values are substituted""" # Check if the RunInfo.xml file is provided, otherwise, yield N/A try: runinfo = ElementTree.ElementTree(file=self.runinfo) # Get the run id from the for elem in runinfo.iter(): for run in elem: try: self.runid = run.attrib['Id'] self.runnumber = run.attrib['Number'] except KeyError: break # pull the text from flowcell and instrument values using the .iter(tag="X") function for elem in runinfo.iter(tag="Flowcell"): self.flowcell = elem.text for elem in runinfo.iter(tag="Instrument"): self.instrument = elem.text except IOError: pass # Extract run statistics from either GenerateRunStatistics.xml or indexingQC.txt self.parserunstats()
0.004545
def upload_file(self, fax_file, **kwargs): # noqa: E501 """upload a file # noqa: E501 Before sending a fax you need to upload your files using this API. In order to upload your fax file, you have to send a `multipart/form-data` request with your file. If the upload was successful you would receive a `file_path` which you can use to send your fax. # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async=True >>> thread = api.upload_file(fax_file, async=True) >>> result = thread.get() :param async bool :param file fax_file: (required) :param str format: can be 'pdf' or 'tiff' :return: File If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async'): return self.upload_file_with_http_info(fax_file, **kwargs) # noqa: E501 else: (data) = self.upload_file_with_http_info(fax_file, **kwargs) # noqa: E501 return data
0.001736
def import_util(imp): ''' Lazily imports a utils (class, function,or variable) from a module) from a string. @param imp: ''' mod_name, obj_name = imp.rsplit('.', 1) mod = importlib.import_module(mod_name) return getattr(mod, obj_name)
0.00369
def is_first_root(self): """Return ``True`` if this page is the first root pages.""" if self.parent: return False if self._is_first_root is not None: return self._is_first_root first_root_id = cache.get('PAGE_FIRST_ROOT_ID') if first_root_id is not None: self._is_first_root = first_root_id == self.id return self._is_first_root try: first_root_id = Page.objects.root().values('id')[0]['id'] except IndexError: first_root_id = None if first_root_id is not None: cache.set('PAGE_FIRST_ROOT_ID', first_root_id) self._is_first_root = self.id == first_root_id return self._is_first_root
0.002681
def xflatten(iterable, transform, check=is_iterable): """Apply a transform to iterable before flattening at each level.""" for value in transform(iterable): if check(value): for flat in xflatten(value, transform, check): yield flat else: yield value
0.003195
def release(): """ Release current version to pypi """ with settings(warn_only=True): r = local(clom.git['diff-files']('--quiet', '--ignore-submodules', '--')) if r.return_code != 0: abort('There are uncommitted changes, commit or stash them before releasing') version = open('VERSION.txt').read().strip() print('Releasing %s...' % version) local(clom.git.flow.release.start(version)) local(clom.git.flow.release.finish(version, m='Release-%s' % version)) local(clom.git.push('origin', 'master', 'develop', tags=True)) local(clom.python('setup.py', 'sdist', 'upload'))
0.004747
def path_without_suffix(self): """The relative path to asset without suffix. Example:: >>> attrs = AssetAttributes(environment, 'js/app.js') >>> attrs.path_without_suffix 'js/app' """ if self.suffix: return self.path[:-len(''.join(self.suffix))] return self.path
0.005698
async def build(self): """get the track object for each link in the partial tracks data Returns ------- tracks : List[Track] The tracks """ data = await self.__func() return list(PlaylistTrack(self.__client, track) for track in data['items'])
0.009646
def _get_context(argspec, kwargs): """Prepare a context for the serialization. :param argspec: The argspec of the serialization function. :param kwargs: Dict with context :return: Keywords arguments that function can accept. """ if argspec.keywords is not None: return kwargs return dict((arg, kwargs[arg]) for arg in argspec.args if arg in kwargs)
0.002597
def merge_corpus(self, corpus): """ Merge the given corpus into this corpus. All assets (tracks, utterances, issuers, ...) are copied into this corpus. If any ids (utt-idx, track-idx, issuer-idx, subview-idx, ...) are occurring in both corpora, the ids from the merging corpus are suffixed by a number (starting from 1 until no other is matching). Args: corpus (CorpusView): The corpus to merge. """ # Create a copy, so objects aren't changed in the original merging corpus merging_corpus = Corpus.from_corpus(corpus) self.import_tracks(corpus.tracks.values()) self.import_issuers(corpus.issuers.values()) utterance_idx_mapping = self.import_utterances(corpus.utterances.values()) for subview_idx, subview in merging_corpus.subviews.items(): for filter in subview.filter_criteria: if isinstance(filter, subset.MatchingUtteranceIdxFilter): new_filtered_utt_ids = set() for utt_idx in filter.utterance_idxs: new_filtered_utt_ids.add(utterance_idx_mapping[utt_idx].idx) filter.utterance_idxs = new_filtered_utt_ids new_idx = naming.index_name_if_in_list(subview_idx, self.subviews.keys()) self.import_subview(new_idx, subview) for feat_container_idx, feat_container in merging_corpus.feature_containers.items(): self.new_feature_container(feat_container_idx, feat_container.path)
0.006468
def groups_set_read_only(self, room_id, read_only, **kwargs): """Sets whether the group is read only or not.""" return self.__call_api_post('groups.setReadOnly', roomId=room_id, readOnly=bool(read_only), kwargs=kwargs)
0.012821
def filelist_prune(self, at_data, *args, **kwargs): """ Given a list of files, possibly prune list by extension. """ b_status = True l_file = [] str_path = at_data[0] al_file = at_data[1] if len(self.str_extension): al_file = [x for x in al_file if self.str_extension in x] if len(al_file): al_file.sort() l_file = al_file b_status = True else: self.dp.qprint( "No valid files to analyze found in path %s!" % str_path, comms = 'error', level = 3) l_file = None b_status = False return { 'status': b_status, 'l_file': l_file }
0.022388
def add_to_current_action(self, controller): """Add a controller to the current action.""" item = self.current_item self._history[self._index] = item + (controller,)
0.010582
def parse_pr_numbers(git_log_lines): """ Parse PR numbers from commit messages. At GitHub those have the format: `here is the message (#1234)` being `1234` the PR number. """ prs = [] for line in git_log_lines: pr_number = parse_pr_number(line) if pr_number: prs.append(pr_number) return prs
0.002801
def get_locations(self, locations, as_list=False): """ For list of locations return a Series or list of the values. :param locations: list of index locations :param as_list: True to return a list of values :return: Series or list """ indexes = [self._index[x] for x in locations] return self.get(indexes, as_list)
0.005277
def _os_bootstrap(): """ Set up 'os' module replacement functions for use during import bootstrap. """ global _os_stat, _os_getcwd, _os_environ, _os_listdir global _os_path_join, _os_path_dirname, _os_path_basename global _os_sep names = sys.builtin_module_names join = dirname = environ = listdir = basename = None mindirlen = 0 # Only 'posix' and 'nt' os specific modules are supported. # 'dos', 'os2' and 'mac' (MacOS 9) are not supported. if 'posix' in names: from posix import stat, getcwd, environ, listdir sep = _os_sep = '/' mindirlen = 1 elif 'nt' in names: from nt import stat, getcwd, environ, listdir sep = _os_sep = '\\' mindirlen = 3 else: raise ImportError('no os specific module found') if join is None: def join(a, b, sep=sep): if a == '': return b lastchar = a[-1:] if lastchar == '/' or lastchar == sep: return a + b return a + sep + b if dirname is None: def dirname(a, sep=sep, mindirlen=mindirlen): for i in range(len(a) - 1, -1, -1): c = a[i] if c == '/' or c == sep: if i < mindirlen: return a[:i + 1] return a[:i] return '' if basename is None: def basename(p): i = p.rfind(sep) if i == -1: return p else: return p[i + len(sep):] def _listdir(dir, cache={}): # since this function is only used by caseOk, it's fine to cache the # results and avoid reading the whole contents of a directory each time # we just want to check the case of a filename. if not dir in cache: cache[dir] = listdir(dir) return cache[dir] _os_stat = stat _os_getcwd = getcwd _os_path_join = join _os_path_dirname = dirname _os_environ = environ _os_listdir = _listdir _os_path_basename = basename
0.000951
def _validate_tld_match(self, text, matched_tld, tld_pos): """ Validate TLD match - tells if at found position is really TLD. :param str text: text where we want to find URLs :param str matched_tld: matched TLD :param int tld_pos: position of matched TLD :return: True if match is valid, False otherwise :rtype: bool """ if tld_pos > len(text): return False right_tld_pos = tld_pos + len(matched_tld) if len(text) > right_tld_pos: if text[right_tld_pos] in self._after_tld_chars: if tld_pos > 0 and text[tld_pos - 1] \ not in self._stop_chars_left: return True else: if tld_pos > 0 and text[tld_pos - 1] not in self._stop_chars_left: return True return False
0.002281
def make_new_semver(current_semver, all_triggers, **overrides): """Defines how to increment semver based on which significant figure is triggered""" new_semver = {} bumped = False for sig_fig in SemVerSigFig: # iterate sig figs in order of significance value = getattr(current_semver, sig_fig) override = overrides.get(sig_fig) if override is not None: new_semver[sig_fig] = override if int(override) > int(value): bumped = True elif bumped: new_semver[sig_fig] = "0" elif sig_fig in all_triggers: new_semver[sig_fig] = str(int(value) + 1) bumped = True else: new_semver[sig_fig] = value return SemVer(**new_semver)
0.002591
def loglevel(level): ''' Set the debug level which limits the severity of log messages printed by ``pf(4)``. level: Log level. Should be one of the following: emerg, alert, crit, err, warning, notice, info or debug (OpenBSD); or none, urgent, misc, loud (FreeBSD). CLI example: .. code-block:: bash salt '*' pf.loglevel emerg ''' # There's no way to getting the previous loglevel so imply we've # always made a change. ret = {'changes': True} myos = __grains__['os'] if myos == 'FreeBSD': all_levels = ['none', 'urgent', 'misc', 'loud'] else: all_levels = ['emerg', 'alert', 'crit', 'err', 'warning', 'notice', 'info', 'debug'] if level not in all_levels: raise SaltInvocationError('Unknown loglevel: {0}'.format(level)) result = __salt__['cmd.run_all']('pfctl -x {0}'.format(level), output_loglevel='trace', python_shell=False) if result['retcode'] != 0: raise CommandExecutionError( 'Problem encountered setting loglevel', info={'errors': [result['stderr']], 'changes': False} ) return ret
0.003265
def apply_status_code(self, status_code): """ When a trace entity is generated under the http context, the status code will affect this entity's fault/error/throttle flags. Flip these flags based on status code. """ self._check_ended() if not status_code: return if status_code >= 500: self.add_fault_flag() elif status_code == 429: self.add_throttle_flag() self.add_error_flag() elif status_code >= 400: self.add_error_flag()
0.003527
def _spin(coordinates, theta, around): """Rotate a set of coordinates in place around an arbitrary vector. Parameters ---------- coordinates : np.ndarray, shape=(n,3), dtype=float The coordinates being spun. theta : float The angle by which to spin the coordinates, in radians. around : np.ndarray, shape=(3,), dtype=float The axis about which to spin the coordinates. """ around = np.asarray(around).reshape(3) if np.array_equal(around, np.zeros(3)): raise ValueError('Cannot spin around a zero vector') center_pos = np.mean(coordinates, axis=0) coordinates -= center_pos coordinates = _rotate(coordinates, theta, around) coordinates += center_pos return coordinates
0.001321
def follow( # type: ignore self, users, strategies, total_assets=10000, initial_assets=None, adjust_sell=False, track_interval=10, trade_cmd_expire_seconds=120, cmd_cache=True, slippage: float = 0.0): """跟踪 joinquant 对应的模拟交易,支持多用户多策略 :param users: 支持 easytrader 的用户对象,支持使用 [] 指定多个用户 :param strategies: 雪球组合名, 类似 ZH123450 :param total_assets: 雪球组合对应的总资产, 格式 [组合1对应资金, 组合2对应资金] 若 strategies=['ZH000001', 'ZH000002'], 设置 total_assets=[10000, 10000], 则表明每个组合对应的资产为 1w 元 假设组合 ZH000001 加仓 价格为 p 股票 A 10%, 则对应的交易指令为 买入 股票 A 价格 P 股数 1w * 10% / p 并按 100 取整 :param adjust_sell: 是否根据用户的实际持仓数调整卖出股票数量, 当卖出股票数大于实际持仓数时,调整为实际持仓数。目前仅在银河客户端测试通过。 当 users 为多个时,根据第一个 user 的持仓数决定 :type adjust_sell: bool :param initial_assets: 雪球组合对应的初始资产, 格式 [ 组合1对应资金, 组合2对应资金 ] 总资产由 初始资产 × 组合净值 算得, total_assets 会覆盖此参数 :param track_interval: 轮训模拟交易时间,单位为秒 :param trade_cmd_expire_seconds: 交易指令过期时间, 单位为秒 :param cmd_cache: 是否读取存储历史执行过的指令,防止重启时重复执行已经交易过的指令 :param slippage: 滑点,0.0 表示无滑点, 0.05 表示滑点为 5% """ super().follow(users=users, strategies=strategies, track_interval=track_interval, trade_cmd_expire_seconds=trade_cmd_expire_seconds, cmd_cache=cmd_cache, slippage=slippage) self._adjust_sell = adjust_sell self._users = self.warp_list(users) strategies = self.warp_list(strategies) total_assets = self.warp_list(total_assets) initial_assets = self.warp_list(initial_assets) if cmd_cache: self.load_expired_cmd_cache() self.start_trader_thread(self._users, trade_cmd_expire_seconds) for strategy_url, strategy_total_assets, strategy_initial_assets in zip( strategies, total_assets, initial_assets): assets = self.calculate_assets(strategy_url, strategy_total_assets, strategy_initial_assets) try: strategy_id = self.extract_strategy_id(strategy_url) strategy_name = self.extract_strategy_name(strategy_url) except: log.error('抽取交易id和策略名失败, 无效模拟交易url: %s', strategy_url) raise strategy_worker = Thread( target=self.track_strategy_worker, args=[strategy_id, strategy_name], kwargs={ 'interval': track_interval, 'assets': assets }) strategy_worker.start() log.info('开始跟踪策略: %s', strategy_name)
0.001387
def _create_dynamic_subplots(self, key, items, ranges, **init_kwargs): """ Handles the creation of new subplots when a DynamicMap returns a changing set of elements in an Overlay. """ length = self.style_grouping group_fn = lambda x: (x.type.__name__, x.last.group, x.last.label) for i, (k, obj) in enumerate(items): vmap = self.hmap.clone([(key, obj)]) self.map_lengths[group_fn(vmap)[:length]] += 1 subplot = self._create_subplot(k, vmap, [], ranges) if subplot is None: continue self.subplots[k] = subplot subplot.initialize_plot(ranges, **init_kwargs) subplot.update_frame(key, ranges, element=obj) self.dynamic_subplots.append(subplot)
0.003713
def windowed_divergence(pos, ac1, ac2, size=None, start=None, stop=None, step=None, windows=None, is_accessible=None, fill=np.nan): """Estimate nucleotide divergence between two populations in windows over a single chromosome/contig. Parameters ---------- pos : array_like, int, shape (n_items,) Variant positions, using 1-based coordinates, in ascending order. ac1 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the first population. ac2 : array_like, int, shape (n_variants, n_alleles) Allele counts array for the second population. size : int, optional The window size (number of bases). start : int, optional The position at which to start (1-based). stop : int, optional The position at which to stop (1-based). step : int, optional The distance between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. windows : array_like, int, shape (n_windows, 2), optional Manually specify the windows to use as a sequence of (window_start, window_stop) positions, using 1-based coordinates. Overrides the size/start/stop/step parameters. is_accessible : array_like, bool, shape (len(contig),), optional Boolean array indicating accessibility status for all positions in the chromosome/contig. fill : object, optional The value to use where a window is completely inaccessible. Returns ------- Dxy : ndarray, float, shape (n_windows,) Nucleotide divergence in each window. windows : ndarray, int, shape (n_windows, 2) The windows used, as an array of (window_start, window_stop) positions, using 1-based coordinates. n_bases : ndarray, int, shape (n_windows,) Number of (accessible) bases in each window. counts : ndarray, int, shape (n_windows,) Number of variants in each window. Examples -------- Simplest case, two haplotypes in each population:: >>> import allel >>> h = allel.HaplotypeArray([[0, 0, 0, 0], ... [0, 0, 0, 1], ... [0, 0, 1, 1], ... [0, 1, 1, 1], ... [1, 1, 1, 1], ... [0, 0, 1, 2], ... [0, 1, 1, 2], ... [0, 1, -1, -1], ... [-1, -1, -1, -1]]) >>> ac1 = h.count_alleles(subpop=[0, 1]) >>> ac2 = h.count_alleles(subpop=[2, 3]) >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> dxy, windows, n_bases, counts = windowed_divergence( ... pos, ac1, ac2, size=10, start=1, stop=31 ... ) >>> dxy array([0.15 , 0.225, 0. ]) >>> windows array([[ 1, 10], [11, 20], [21, 31]]) >>> n_bases array([10, 10, 11]) >>> counts array([3, 4, 2]) """ # check inputs pos = SortedIndex(pos, copy=False) is_accessible = asarray_ndim(is_accessible, 1, allow_none=True) # calculate mean pairwise divergence mpd = mean_pairwise_difference_between(ac1, ac2, fill=0) # sum in windows mpd_sum, windows, counts = windowed_statistic( pos, values=mpd, statistic=np.sum, size=size, start=start, stop=stop, step=step, windows=windows, fill=0 ) # calculate value per base dxy, n_bases = per_base(mpd_sum, windows, is_accessible=is_accessible, fill=fill) return dxy, windows, n_bases, counts
0.000265
def labelclear(device, force=False): ''' .. versionadded:: 2018.3.0 Removes ZFS label information from the specified device device : string Device name; must not be part of an active pool configuration. force : boolean Treat exported or foreign devices as inactive CLI Example: .. code-block:: bash salt '*' zpool.labelclear /path/to/dev ''' ## clear label for all specified device res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='labelclear', flags=['-f'] if force else None, target=device, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'labelcleared')
0.002685
def unit_targeting(w, k): """Unit-level magnitude pruning.""" k = tf.to_int32(k) w_shape = shape_list(w) size = tf.to_int32(tf.reduce_prod(w_shape[:-1])) w = tf.reshape(w, [size, w_shape[-1]]) norm = tf.norm(w, axis=0) thres = tf.contrib.framework.sort(norm, axis=0)[k] mask = to_float(thres >= norm)[None, :] mask = tf.tile(mask, [size, 1]) return tf.reshape(mask, w_shape)
0.027778
def match(self, tag): """Match.""" return CSSMatch(self.selectors, tag, self.namespaces, self.flags).match(tag)
0.023438
def from_tibiadata(cls, content): """Builds a guild object from a TibiaData character response. Parameters ---------- content: :class:`str` The json string from the TibiaData response. Returns ------- :class:`Guild` The guild contained in the description or ``None``. Raises ------ InvalidContent If content is not a JSON response of a guild's page. """ json_content = parse_json(content) guild = cls() try: guild_obj = json_content["guild"] if "error" in guild_obj: return None guild_data = guild_obj["data"] guild.name = guild_data["name"] guild.world = guild_data["world"] guild.logo_url = guild_data["guildlogo"] guild.description = guild_data["description"] guild.founded = parse_tibiadata_date(guild_data["founded"]) guild.open_applications = guild_data["application"] except KeyError: raise InvalidContent("content does not match a guild json from TibiaData.") guild.homepage = guild_data.get("homepage") guild.active = not guild_data.get("formation", False) if isinstance(guild_data["disbanded"], dict): guild.disband_date = parse_tibiadata_date(guild_data["disbanded"]["date"]) guild.disband_condition = disband_tibadata_regex.search(guild_data["disbanded"]["notification"]).group(1) for rank in guild_obj["members"]: rank_name = rank["rank_title"] for member in rank["characters"]: guild.members.append(GuildMember(member["name"], rank_name, member["nick"] or None, member["level"], member["vocation"], joined=parse_tibiadata_date(member["joined"]), online=member["status"] == "online")) for invited in guild_obj["invited"]: guild.invites.append(GuildInvite(invited["name"], parse_tibiadata_date(invited["invited"]))) if isinstance(guild_data["guildhall"], dict): gh = guild_data["guildhall"] guild.guildhall = GuildHouse(gh["name"], gh["world"], guild.members[0].name, parse_tibiadata_date(gh["paid"])) return guild
0.004462
def add_prop_descriptor_to_class(self, class_name, new_class_attrs, names_with_refs, container_names, dataspecs): ''' ``MetaHasProps`` calls this during class creation as it iterates over properties to add, to update its registry of new properties. The parameters passed in are mutable and this function is expected to update them accordingly. Args: class_name (str) : name of the class this descriptor is added to new_class_attrs(dict[str, PropertyDescriptor]) : mapping of attribute names to PropertyDescriptor that this function will update names_with_refs (set[str]) : set of all property names for properties that also have references, that this function will update container_names (set[str]) : set of all property names for properties that are container props, that this function will update dataspecs(dict[str, PropertyDescriptor]) : mapping of attribute names to PropertyDescriptor for DataSpec properties that this function will update Return: None ''' from .bases import ContainerProperty from .dataspec import DataSpec name = self.name if name in new_class_attrs: raise RuntimeError("Two property generators both created %s.%s" % (class_name, name)) new_class_attrs[name] = self if self.has_ref: names_with_refs.add(name) if isinstance(self, BasicPropertyDescriptor): if isinstance(self.property, ContainerProperty): container_names.add(name) if isinstance(self.property, DataSpec): dataspecs[name] = self
0.002179
def permission_required(function=None, permission=None, object_id=None, redirect_field_name=REDIRECT_FIELD_NAME, login_url=None): """ Decorator for views that checks that the user is logged in, redirecting to the log-in page if necessary. """ actual_decorator = request_passes_test( lambda r: has_permission(r.session.get('user_permissions'), permission, object_id), # noqa login_url=login_url, redirect_field_name=redirect_field_name ) if function: return actual_decorator(function) return actual_decorator
0.001613
def get_undecorated_callback(self): ''' Return the callback. If the callback is a decorated function, try to recover the original function. ''' func = self.callback func = getattr(func, '__func__' if py3k else 'im_func', func) closure_attr = '__closure__' if py3k else 'func_closure' while hasattr(func, closure_attr) and getattr(func, closure_attr): func = getattr(func, closure_attr)[0].cell_contents return func
0.006173
def pprint_diff(first, second, first_name='first', second_name='second'): """Compare the pprint representation of two objects and yield diff lines.""" return difflib.unified_diff( pprint.pformat(first).splitlines(), pprint.pformat(second).splitlines(), fromfile=first_name, tofile=second_name, lineterm='')
0.009146
def receive_one_message(self): """Handle a single request. Polls the transport for a new message. After a new message has arrived :py:meth:`_spawn` is called with a handler function and arguments to handle the request. The handler function will try to decode the message using the supplied protocol, if that fails, an error response will be sent. After decoding the message, the dispatcher will be asked to handle the resulting request and the return value (either an error or a result) will be sent back to the client using the transport. """ context, message = self.transport.receive_message() if callable(self.trace): self.trace('-->', context, message) # assuming protocol is threadsafe and dispatcher is theadsafe, as # long as its immutable def handle_message(context, message): try: request = self.protocol.parse_request(message) except tinyrpc.exc.RPCError as e: response = e.error_respond() else: response = self.dispatcher.dispatch(request) # send reply if response is not None: result = response.serialize() if callable(self.trace): self.trace('<--', context, result) self.transport.send_reply(context, result) self._spawn(handle_message, context, message)
0.002013
def run(data, samples, force, ipyclient): """ Check all samples requested have been clustered (state=6), make output directory, then create the requested outfiles. Excluded samples are already removed from samples. """ ## prepare dirs data.dirs.outfiles = os.path.join(data.dirs.project, data.name+"_outfiles") if not os.path.exists(data.dirs.outfiles): os.mkdir(data.dirs.outfiles) ## make the snps/filters data base, fills the dups and inds filters ## and fills the splits locations data.database = os.path.join(data.dirs.outfiles, data.name+".hdf5") init_arrays(data) ## Apply filters to supercatg and superhdf5 with selected samples ## and fill the filters and edge arrays. filter_all_clusters(data, samples, ipyclient) ## Everything needed is in the now filled h5 database. Filters were applied ## with 'samples' taken into account. Now we create the loci file (default) ## output and build a stats file. data.outfiles.loci = os.path.join(data.dirs.outfiles, data.name+".loci") data.outfiles.alleles = os.path.join(data.dirs.outfiles, data.name+".alleles.loci") make_loci_and_stats(data, samples, ipyclient) ## OPTIONAL OUTPUTS: output_formats = data.paramsdict["output_formats"] ## held separate from *output_formats cuz it's big and parallelized if any([x in output_formats for x in ["v", "V"]]): full = "V" in output_formats try: make_vcf(data, samples, ipyclient, full=full) except IPyradWarningExit as inst: ## Something fsck vcf build. Sometimes this is simply a memory ## issue, so trap the exception and allow it to try building ## the other output formats. print(" Error building vcf. See ipyrad_log.txt for details.") LOGGER.error(inst) ## make other array-based formats, recalcs keeps and arrays make_outfiles(data, samples, output_formats, ipyclient) ## print friendly message shortpath = data.dirs.outfiles.replace(os.path.expanduser("~"), "~") print("{}Outfiles written to: {}\n".format(data._spacer, shortpath))
0.007856
def load_or_create_client_key(pem_path): """ Load the client key from a directory, creating it if it does not exist. .. note:: The client key that will be created will be a 2048-bit RSA key. :type pem_path: ``twisted.python.filepath.FilePath`` :param pem_path: The certificate directory to use, as with the endpoint. """ acme_key_file = pem_path.asTextMode().child(u'client.key') if acme_key_file.exists(): key = serialization.load_pem_private_key( acme_key_file.getContent(), password=None, backend=default_backend()) else: key = generate_private_key(u'rsa') acme_key_file.setContent( key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption())) return JWKRSA(key=key)
0.001063
def proxy_uri(self): """ Get the Proxy-Uri option of a request. :return: the Proxy-Uri values or None if not specified by the request :rtype : String """ for option in self.options: if option.number == defines.OptionRegistry.PROXY_URI.number: return option.value return None
0.005571
def download_file_helper(url, input_path): """ Manages the chunked downloading of a file given an url """ r = requests.get(url, stream=True) if r.status_code != 200: cli_log.error("Failed to download file: %s" % r.json()["message"]) local_full_path = get_download_dest(input_path, r.url) original_filename = os.path.split(local_full_path)[-1] with open(local_full_path, "wb") as f: click.echo("Downloading {}".format(original_filename), err=True) for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() pprint("Successfully downloaded %s to %s" % (original_filename, local_full_path), True)
0.002649
def parse_datetime(value): """Parses a string and return a datetime.datetime. This function supports time zone offsets. When the input contains one, the output uses a timezone with a fixed offset from UTC. Raises ValueError if the input is well formatted but not a valid datetime. Returns None if the input isn't well formatted. """ match = datetime_re.match(value) if match: kw = match.groupdict() if kw['microsecond']: kw['microsecond'] = kw['microsecond'].ljust(6, '0') tzinfo = kw.pop('tzinfo') if tzinfo == 'Z': tzinfo = utc elif tzinfo is not None: offset_mins = int(tzinfo[-2:]) if len(tzinfo) > 3 else 0 offset = 60 * int(tzinfo[1:3]) + offset_mins if tzinfo[0] == '-': offset = -offset tzinfo = get_fixed_timezone(offset) kw = {k: int(v) for k, v in kw.items() if v is not None} kw['tzinfo'] = tzinfo return datetime.datetime(**kw)
0.000975
def __create_entry(self, entrytype, data, index, ttl=None): ''' Create an entry of any type except HS_ADMIN. :param entrytype: THe type of entry to create, e.g. 'URL' or 'checksum' or ... Note: For entries of type 'HS_ADMIN', please use __create_admin_entry(). For type '10320/LOC', please use 'add_additional_URL()' :param data: The actual value for the entry. Can be a simple string, e.g. "example", or a dict {"format":"string", "value":"example"}. :param index: The integer to be used as index. :param ttl: Optional. If not set, the library's default is set. If there is no default, it is not set by this library, so Handle System sets it. :return: The entry as a dict. ''' if entrytype == 'HS_ADMIN': op = 'creating HS_ADMIN entry' msg = 'This method can not create HS_ADMIN entries.' raise IllegalOperationException(operation=op, msg=msg) entry = {'index':index, 'type':entrytype, 'data':data} if ttl is not None: entry['ttl'] = ttl return entry
0.004266
def on_save(self, event): '''called on save button''' dlg = wx.FileDialog(None, self.settings.get_title(), '', "", '*.*', wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) if dlg.ShowModal() == wx.ID_OK: self.settings.save(dlg.GetPath())
0.006944
def connect(state, host, for_fact=None): ''' Connect to a single host. Returns the SSH client if succesful. Stateless by design so can be run in parallel. ''' kwargs = _make_paramiko_kwargs(state, host) logger.debug('Connecting to: {0} ({1})'.format(host.name, kwargs)) # Hostname can be provided via SSH config (alias), data, or the hosts name hostname = kwargs.pop( 'hostname', host.data.ssh_hostname or host.name, ) try: # Create new client & connect to the host client = SSHClient() client.set_missing_host_key_policy(MissingHostKeyPolicy()) client.connect(hostname, **kwargs) # Enable SSH forwarding session = client.get_transport().open_session() AgentRequestHandler(session) # Log log_message = '{0}{1}'.format( host.print_prefix, click.style('Connected', 'green'), ) if for_fact: log_message = '{0}{1}'.format( log_message, ' (for {0} fact)'.format(for_fact), ) logger.info(log_message) return client except AuthenticationException: auth_kwargs = {} for key, value in kwargs.items(): if key in ('username', 'password'): auth_kwargs[key] = value continue if key == 'pkey' and value: auth_kwargs['key'] = host.data.ssh_key auth_args = ', '.join( '{0}={1}'.format(key, value) for key, value in auth_kwargs.items() ) _log_connect_error(host, 'Authentication error', auth_args) except SSHException as e: _log_connect_error(host, 'SSH error', e) except gaierror: _log_connect_error(host, 'Could not resolve hostname', hostname) except socket_error as e: _log_connect_error(host, 'Could not connect', e) except EOFError as e: _log_connect_error(host, 'EOF error', e)
0.000497
def _from_dict(cls, _dict): """Initialize a BodyCells object from a json dictionary.""" args = {} if 'cell_id' in _dict: args['cell_id'] = _dict.get('cell_id') if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'text' in _dict: args['text'] = _dict.get('text') if 'row_index_begin' in _dict: args['row_index_begin'] = _dict.get('row_index_begin') if 'row_index_end' in _dict: args['row_index_end'] = _dict.get('row_index_end') if 'column_index_begin' in _dict: args['column_index_begin'] = _dict.get('column_index_begin') if 'column_index_end' in _dict: args['column_index_end'] = _dict.get('column_index_end') if 'row_header_ids' in _dict: args['row_header_ids'] = [ RowHeaderIds._from_dict(x) for x in (_dict.get('row_header_ids')) ] if 'row_header_texts' in _dict: args['row_header_texts'] = [ RowHeaderTexts._from_dict(x) for x in (_dict.get('row_header_texts')) ] if 'row_header_texts_normalized' in _dict: args['row_header_texts_normalized'] = [ RowHeaderTextsNormalized._from_dict(x) for x in (_dict.get('row_header_texts_normalized')) ] if 'column_header_ids' in _dict: args['column_header_ids'] = [ ColumnHeaderIds._from_dict(x) for x in (_dict.get('column_header_ids')) ] if 'column_header_texts' in _dict: args['column_header_texts'] = [ ColumnHeaderTexts._from_dict(x) for x in (_dict.get('column_header_texts')) ] if 'column_header_texts_normalized' in _dict: args['column_header_texts_normalized'] = [ ColumnHeaderTextsNormalized._from_dict(x) for x in (_dict.get('column_header_texts_normalized')) ] if 'attributes' in _dict: args['attributes'] = [ Attribute._from_dict(x) for x in (_dict.get('attributes')) ] return cls(**args)
0.000878
def download( state, host, hostname, filename, local_filename=None, force=False, ssh_keyscan=False, ssh_user=None, ): ''' Download files from other servers using ``scp``. + hostname: hostname to upload to + filename: file to download + local_filename: where to download the file to (defaults to ``filename``) + force: always download the file, even if present locally + ssh_keyscan: execute ``ssh.keyscan`` before uploading the file + ssh_user: connect with this user ''' local_filename = local_filename or filename # Get local file info local_file_info = host.fact.file(local_filename) # Local file exists but isn't a file? if local_file_info is False: raise OperationError( 'Local destination {0} already exists and is not a file'.format( local_filename, ), ) # If the local file exists and we're not forcing a re-download, no-op if local_file_info and not force: return # Figure out where we're connecting (host or user@host) connection_target = hostname if ssh_user: connection_target = '@'.join((ssh_user, hostname)) if ssh_keyscan: yield keyscan(state, host, hostname) # Download the file with scp yield 'scp {0}:{1} {2}'.format(connection_target, filename, local_filename)
0.000732
def map(self, callback: Callable[[T], U]) -> 'Option[U]': """ Applies the ``callback`` with the contained value as its argument or returns :py:data:`NONE`. Args: callback: The callback to apply to the contained value. Returns: The ``callback`` result wrapped in an :class:`Option` if the contained value is ``Some``, otherwise :py:data:`NONE` Examples: >>> Some(10).map(lambda x: x * x) Some(100) >>> NONE.map(lambda x: x * x) NONE """ return self._type.Some(callback(self._val)) if self._is_some else cast('Option[U]', NONE)
0.004438
def render_category(category='', template=None): """ Render a category page. Arguments: category -- The category to render template -- The template to render it with """ # pylint:disable=too-many-return-statements # See if this is an aliased path redir = get_redirect() if redir: return redir # Forbidden template types if template and template.startswith('_'): raise http_error.Forbidden("Template is private") if template in ['entry', 'error']: raise http_error.BadRequest("Invalid view requested") if category: # See if there's any entries for the view... if not orm.select(e for e in model.Entry if e.category == category or e.category.startswith(category + '/')): raise http_error.NotFound("No such category") if not template: template = Category(category).get('Index-Template') or 'index' tmpl = map_template(category, template) if not tmpl: # this might actually be a malformed category URL test_path = '/'.join((category, template)) if category else template logger.debug("Checking for malformed category %s", test_path) record = orm.select( e for e in model.Entry if e.category == test_path).exists() if record: return redirect(url_for('category', category=test_path, **request.args)) # nope, we just don't know what this is raise http_error.NotFound("No such view") view_spec = view.parse_view_spec(request.args) view_spec['category'] = category view_obj = view.View(view_spec) rendered, etag = render_publ_template( tmpl, _url_root=request.url_root, category=Category(category), view=view_obj) if request.if_none_match.contains(etag): return 'Not modified', 304 return rendered, {'Content-Type': mime_type(tmpl), 'ETag': etag}
0.001017
def dict_given_run_array(samples, thread_min_max): """ Converts an array of information about samples back into a nested sampling run dictionary (see data_processing module docstring for more details). N.B. the output dict only contains the following keys: 'logl', 'thread_label', 'nlive_array', 'theta'. Any other keys giving additional information about the run output cannot be reproduced from the function arguments, and are therefore ommitted. Parameters ---------- samples: numpy array Numpy array containing columns [logl, thread label, change in nlive at sample, (thetas)] with each row representing a single sample. thread_min_max': numpy array, optional 2d array with a row for each thread containing the likelihoods at which it begins and ends. Needed to calculate nlive_array (otherwise this is set to None). Returns ------- ns_run: dict Nested sampling run dict (see data_processing module docstring for more details). """ ns_run = {'logl': samples[:, 0], 'thread_labels': samples[:, 1], 'thread_min_max': thread_min_max, 'theta': samples[:, 3:]} if np.all(~np.isnan(ns_run['thread_labels'])): ns_run['thread_labels'] = ns_run['thread_labels'].astype(int) assert np.array_equal(samples[:, 1], ns_run['thread_labels']), (( 'Casting thread labels from samples array to int has changed ' 'their values!\nsamples[:, 1]={}\nthread_labels={}').format( samples[:, 1], ns_run['thread_labels'])) nlive_0 = (thread_min_max[:, 0] <= ns_run['logl'].min()).sum() assert nlive_0 > 0, 'nlive_0={}'.format(nlive_0) nlive_array = np.zeros(samples.shape[0]) + nlive_0 nlive_array[1:] += np.cumsum(samples[:-1, 2]) # Check if there are multiple threads starting on the first logl point dup_th_starts = (thread_min_max[:, 0] == ns_run['logl'].min()).sum() if dup_th_starts > 1: # In this case we approximate the true nlive (which we dont really # know) by making sure the array's final point is 1 and setting all # points with logl = logl.min() to have the same nlive nlive_array += (1 - nlive_array[-1]) n_logl_min = (ns_run['logl'] == ns_run['logl'].min()).sum() nlive_array[:n_logl_min] = nlive_0 warnings.warn(( 'duplicate starting logls: {} threads start at logl.min()={}, ' 'and {} points have logl=logl.min(). nlive_array may only be ' 'approximately correct.').format( dup_th_starts, ns_run['logl'].min(), n_logl_min), UserWarning) assert nlive_array.min() > 0, (( 'nlive contains 0s or negative values. nlive_0={}' '\nnlive_array = {}\nthread_min_max={}').format( nlive_0, nlive_array, thread_min_max)) assert nlive_array[-1] == 1, ( 'final point in nlive_array != 1.\nnlive_array = ' + str(nlive_array)) ns_run['nlive_array'] = nlive_array return ns_run
0.000325
def read(self,file): """Read DX field from file. dx = OpenDX.field.read(dxfile) The classid is discarded and replaced with the one from the file. """ DXfield = self p = DXParser(file) p.parse(DXfield)
0.01145
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y): ''' compute the cell type specificity, defined as: :: T_yX = K_yX / K_YX = F_y * k_yX / sum_y(F_y*k_yX) ''' def _get_k_yX_mul_F_y(y, y_index, X_index): # Load data from json dictionary f = open(fname, 'r') data = json.load(f) f.close() #init variables k_yX = 0. for l in [str(key) for key in data['data'][y]['syn_dict'].keys()]: for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][l][x] / 100. k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron'] k_yX += p_yxL * k_yL return k_yX * F_y[y_index] #container T_yX = np.zeros((len(y), len(x_in_X))) #iterate over postsynaptic cell types for i, y_value in enumerate(y): #iterate over presynapse population inds for j in range(len(x_in_X)): k_yX_mul_F_y = 0 for k, yy in enumerate(sum(y_in_Y, [])): if y_value in yy: for yy_value in yy: ii = np.where(np.array(y) == yy_value)[0][0] k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j) if k_yX_mul_F_y != 0: T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y return T_yX
0.014218
def handlePortfolio(self, msg): """ handle portfolio updates """ # log handler msg self.log_msg("portfolio", msg) # contract identifier contract_tuple = self.contract_to_tuple(msg.contract) contractString = self.contractString(contract_tuple) # try creating the contract self.registerContract(msg.contract) # new account? if msg.accountName not in self._portfolios.keys(): self._portfolios[msg.accountName] = {} self._portfolios[msg.accountName][contractString] = { "symbol": contractString, "position": int(msg.position), "marketPrice": float(msg.marketPrice), "marketValue": float(msg.marketValue), "averageCost": float(msg.averageCost), "unrealizedPNL": float(msg.unrealizedPNL), "realizedPNL": float(msg.realizedPNL), "totalPNL": float(msg.realizedPNL) + float(msg.unrealizedPNL), "account": msg.accountName } # fire callback self.ibCallback(caller="handlePortfolio", msg=msg)
0.001741
def p_do_loop(p): """ statement : do_start program_co label_loop | do_start label_loop | DO label_loop """ if len(p) == 4: q = make_block(p[2], p[3]) else: q = p[2] if p[1] == 'DO': gl.LOOPS.append(('DO',)) if q is None: warning(p.lineno(1), 'Infinite empty loop') # An infinite loop and no warnings p[0] = make_sentence('DO_LOOP', q) gl.LOOPS.pop()
0.002193
def processInput(self, dataAveraging=False, windowSize=None): """ #TODO: docstring :param dataAveraging: #TODO: docstring :param windowSize: #TODO: docstring """ self.dependentVar = numpy.array(self.dependentVarInput, dtype=numpy.float64 ) self.independentVar = numpy.array(self.independentVarInput, dtype=numpy.float64 ) sortMask = self.independentVar.argsort() self.dependentVar = self.dependentVar[sortMask] self.independentVar = self.independentVar[sortMask] if dataAveraging: averagedData = averagingData(self.dependentVar, windowSize=windowSize, averagingType=dataAveraging ) averagedData = numpy.array(averagedData, dtype=numpy.float64) missingNumHigh = numpy.floor((self.independentVar.size - averagedData.size ) / 2 ) missingNumLow = ((self.independentVar.size - averagedData.size) - missingNumHigh ) self.dependentVar = averagedData self.independentVar = self.independentVar[missingNumLow: -missingNumHigh]
0.001252
def copy(self): """ Returns a copy of the distribution. Returns ------- LinearGaussianCPD: copy of the distribution Examples -------- >>> from pgmpy.factors.continuous import LinearGaussianCPD >>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3']) >>> copy_cpd = cpd.copy() >>> copy_cpd.variable 'Y' >>> copy_cpd.evidence ['X1', 'X2', 'X3'] """ copy_cpd = LinearGaussianCPD(self.variable, self.beta, self.variance, list(self.evidence)) return copy_cpd
0.00463
def cancel_orders(self, order_ids: List[str]) -> List[str]: """Cancel multiple orders by a list of IDs.""" orders_to_cancel = order_ids self.log.debug(f'Canceling orders on {self.name}: ids={orders_to_cancel}') cancelled_orders = [] if self.dry_run: # Don't cancel if dry run self.log.warning(f'DRY RUN: Orders cancelled on {self.name}: {orders_to_cancel}') return orders_to_cancel try: # Iterate and cancel orders if self.has_batch_cancel: self._cancel_orders(orders_to_cancel) cancelled_orders.append(orders_to_cancel) orders_to_cancel.clear() else: for i, order_id in enumerate(orders_to_cancel): self._cancel_order(order_id) cancelled_orders.append(order_id) orders_to_cancel.pop(i) except Exception as e: msg = f'Failed to cancel {len(orders_to_cancel)} orders on {self.name}: ids={orders_to_cancel}' raise self.exception(OrderNotFound, msg, e) from e self.log.info(f'Orders cancelled on {self.name}: ids={cancelled_orders}') return cancelled_orders
0.00489
def write(self, writer): """ Writes an XML representation of this node (including descendants) to the specified file-like object. :param writer: An :class:`XmlWriter` instance to write this node to """ multiline = bool(self._children) newline_start = multiline and not bool(self.data) writer.start(self.tagname, self.attrs, newline=newline_start) if self.data: writer.data(self.data, newline=bool(self._children)) for c in self._children: c.write(writer) writer.end(self.tagname, indent=multiline)
0.004983
def main(self): """ Run the necessary methods """ logging.info('Preparing metadata') # If this script is run as part of a pipeline, the metadata objects will already exist if not self.metadata: self.filer() else: self.objectprep() # Use the number of metadata objects to calculate the number of cores to use per sample in multi-threaded # methods with sequence calls to multi-threaded applications try: self.threads = int(self.cpus / len(self.metadata)) if self.cpus / len( self.metadata) > 1 else 1 except (TypeError, ZeroDivisionError): self.threads = self.cpus logging.info('Reading and formatting primers') self.primers() logging.info('Baiting .fastq files against primers') self.bait() logging.info('Baiting .fastq files against previously baited .fastq files') self.doublebait() logging.info('Assembling contigs from double-baited .fastq files') self.assemble_amplicon_spades() logging.info('Creating BLAST database') self.make_blastdb() logging.info('Running BLAST analyses') self.blastnthreads() logging.info('Parsing BLAST results') self.parseblast() logging.info('Clearing amplicon files from previous iterations') self.ampliconclear() logging.info('Creating reports') self.reporter()
0.004027
def setencoding(): """Set the string encoding used by the Unicode implementation. The default is 'ascii', but if you're willing to experiment, you can change this.""" encoding = "ascii" # Default value set by _PyUnicode_Init() if 0: # Enable to support locale aware default string encodings. import locale loc = locale.getdefaultlocale() if loc[1]: encoding = loc[1] if 0: # Enable to switch off string to Unicode coercion and implicit # Unicode to string conversion. encoding = "undefined" if encoding != "ascii": # On Non-Unicode builds this will raise an AttributeError... sys.setdefaultencoding(encoding)
0.002774
def check_metadata(self): """Ensure all required meta-data are supplied. Specifically: name, version, URL, author or maintainer Warns if any are missing. If enforce-email option is true, author and/or maintainer must specify an email. """ metadata = self.distribution.metadata missing = [] for attr in ("name", "version", "url"): if not (hasattr(metadata, attr) and getattr(metadata, attr)): missing.append(attr) # https://www.python.org/dev/peps/pep-0345/ # author or maintainer must be specified # author is preferred; if identifcal, specify only author if not metadata.author and not metadata.maintainer: missing.append("author") if self.enforce_email: missing.append("author_email") else: # one or both of author or maintainer specified if ( metadata.author and self.enforce_email and not metadata.author_email ): missing.append("author_email") if ( metadata.maintainer and self.enforce_email and not metadata.maintainer_email ): missing.append("maintainer_email") if ( metadata.author and metadata.maintainer and metadata.author == metadata.maintainer ): self.warn( "Maintainer should be omitted if identical to Author.\n" "See https://www.python.org/dev/peps/pep-0345/" "#maintainer-email-optional" ) if ( metadata.author_email and metadata.maintainer_email and metadata.author_email == metadata.maintainer_email ): self.warn( "Maintainer Email should be omitted if" "identical to Author's.\n" "See https://www.python.org/dev/peps/pep-0345/" "#maintainer-email-optional" ) if missing: self.warn("missing required meta-data: %s" % ", ".join(missing))
0.000871
def xzhdr(self, header, msgid_range=None): """XZHDR command. Args: msgid_range: A message-id as a string, or an article number as an integer, or a tuple of specifying a range of article numbers in the form (first, [last]) - if last is omitted then all articles after first are included. A msgid_range of None (the default) uses the current article. """ args = header if msgid_range is not None: args += " " + utils.unparse_msgid_range(msgid_range) code, message = self.command("XZHDR", args) if code != 221: raise NNTPReplyError(code, message) return self.info(code, message, compressed=True)
0.002635