| text
				 stringlengths 78 104k | score
				 float64 0 0.18 | 
|---|---|
| 
	def add_package(self, package_item):
        """
        Adds a package to the ship request.
        
        @type package_item: WSDL object, type of RequestedPackageLineItem 
            WSDL object.
        @keyword package_item: A RequestedPackageLineItem, created by
            calling create_wsdl_object_of_type('RequestedPackageLineItem') on
            this ShipmentRequest object. See examples/create_shipment.py for
            more details.
        """
        self.RequestedShipment.RequestedPackageLineItems.append(package_item)
        package_weight = package_item.Weight.Value
        self.RequestedShipment.TotalWeight.Value += package_weight
        self.RequestedShipment.PackageCount += 1 | 0.005634 | 
| 
	def get_for_update(self, connection_name='DEFAULT', **kwargs):
        """
        http://docs.sqlalchemy.org/en/latest/orm/query.html?highlight=update#sqlalchemy.orm.query.Query.with_for_update  # noqa
        """
        if not kwargs:
            raise InvalidQueryError(
                "Can not execute a query without parameters")
        obj = self.pool.connections[connection_name].session.query(
            self._model).with_for_update(
                nowait=True, of=self._model).filter_by(**kwargs).first()
        if not obj:
            raise NotFoundError('Object not found')
        return obj | 0.003268 | 
| 
	def create(name, host):
    '''Create a new virtual folder.
    \b
    NAME: Name of a virtual folder.
    HOST: Name of a virtual folder host in which the virtual folder will be created.
    '''
    with Session() as session:
        try:
            result = session.VFolder.create(name, host)
            print('Virtual folder "{0}" is created.'.format(result['name']))
        except Exception as e:
            print_error(e)
            sys.exit(1) | 0.004396 | 
| 
	def is_possible_temp(temp: str) -> bool:
    """
    Returns True if all characters are digits or 'M' (for minus)
    """
    for char in temp:
        if not (char.isdigit() or char == 'M'):
            return False
    return True | 0.00431 | 
| 
	def initialize_wind_turbine_cluster(example_farm, example_farm_2):
    r"""
    Initializes a :class:`~.wind_turbine_cluster.WindTurbineCluster` object.
    Function shows how to initialize a WindTurbineCluster object. In this case
    the cluster only contains two wind farms.
    Parameters
    ----------
    example_farm : WindFarm
        WindFarm object.
    example_farm_2 : WindFarm
        WindFarm object constant wind farm efficiency and coordinates.
    Returns
    -------
    WindTurbineCluster
    """
    # specification of cluster data
    example_cluster_data = {
        'name': 'example_cluster',
        'wind_farms': [example_farm, example_farm_2]}
    # initialize WindTurbineCluster object
    example_cluster = WindTurbineCluster(**example_cluster_data)
    return example_cluster | 0.001232 | 
| 
	def find_plugin(self, name):
        """Find a plugin named name"""
        suffix = ".py"
        if not self.class_name:
            suffix = ""
        for i in self._get_paths():
            path = os.path.join(i, "%s%s" % (name, suffix))
            if os.path.exists(path):
                return path
        return None | 0.006116 | 
| 
	def listTasks(self, opts={}, queryOpts={}):
        """
        Get information about all Koji tasks.
        Calls "listTasks" XML-RPC.
        :param dict opts: Eg. {'state': [task_states.OPEN]}
        :param dict queryOpts: Eg. {'order' : 'priority,create_time'}
        :returns: deferred that when fired returns a list of Task objects.
        """
        opts['decode'] = True  # decode xmlrpc data in "request"
        data = yield self.call('listTasks', opts, queryOpts)
        tasks = []
        for tdata in data:
            task = Task.fromDict(tdata)
            task.connection = self
            tasks.append(task)
        defer.returnValue(tasks) | 0.003003 | 
| 
	def fit_texture(layer):
    """Fits a layer into a texture by scaling each axis to (0, 1).
    Does not preserve aspect ratio (TODO: make this an option).
    Args:
        layer (layer): the layer to scale
    Returns:
        texture: A texture.
    """
    x, y = layer
    x = (x - np.nanmin(x)) / (np.nanmax(x) - np.nanmin(x))
    y = (y - np.nanmin(y)) / (np.nanmax(y) - np.nanmin(y))
    return x, y | 0.002445 | 
| 
	def stats(self, **attrs):
        """ Method for `Data Stream Stats <https://m2x.att.com/developer/documentation/v2/device#Data-Stream-Stats>`_ endpoint.
        :param attrs: Query parameters passed as keyword arguments. View M2X API Docs for listing of available parameters.
        :return: The API response, see M2X API docs for details
        :rtype: dict
        :raises: :class:`~requests.exceptions.HTTPError` if an error occurs when sending the HTTP request
        """
        return self.api.get(self.subpath('/stats'), data=attrs) | 0.009158 | 
| 
	def _get_message(
            self, target_message, indices, pending, timeout, condition):
        """
        Gets the next desired message under the desired condition.
        Args:
            target_message (`object`):
                The target message for which we want to find another
                response that applies based on `condition`.
            indices (`dict`):
                This dictionary remembers the last ID chosen for the
                input `target_message`.
            pending (`dict`):
                This dictionary remembers {msg_id: Future} to be set
                once `condition` is met.
            timeout (`int`):
                The timeout (in seconds) override to use for this operation.
            condition (`callable`):
                The condition callable that checks if an incoming
                message is a valid response.
        """
        start_time = time.time()
        target_id = self._get_message_id(target_message)
        # If there is no last-chosen ID, make sure to pick one *after*
        # the input message, since we don't want responses back in time
        if target_id not in indices:
            for i, incoming in enumerate(self._incoming):
                if incoming.id > target_id:
                    indices[target_id] = i
                    break
            else:
                indices[target_id] = len(self._incoming)
        # We will always return a future from here, even if the result
        # can be set immediately. Otherwise, needing to await only
        # sometimes is an annoying edge case (i.e. we would return
        # a `Message` but `get_response()` always `await`'s).
        future = self._client.loop.create_future()
        # If there are enough responses saved return the next one
        last_idx = indices[target_id]
        if last_idx < len(self._incoming):
            incoming = self._incoming[last_idx]
            if condition(incoming, target_id):
                indices[target_id] += 1
                future.set_result(incoming)
                return future
        # Otherwise the next incoming response will be the one to use
        pending[target_id] = future
        return self._get_result(future, start_time, timeout) | 0.000885 | 
| 
	def parse_mpi(s):
    """See https://tools.ietf.org/html/rfc4880#section-3.2 for details."""
    bits = s.readfmt('>H')
    blob = bytearray(s.read(int((bits + 7) // 8)))
    return sum(v << (8 * i) for i, v in enumerate(reversed(blob))) | 0.004219 | 
| 
	def mean(name, add, match):
    '''
    Accept a numeric value from the matched events and store a running average
    of the values in the given register. If the specified value is not numeric
    it will be skipped
    USAGE:
    .. code-block:: yaml
        foo:
          reg.mean:
            - add: data_field
            - match: my/custom/event
    '''
    ret = {'name': name,
           'changes': {},
           'comment': '',
           'result': True}
    if name not in __reg__:
        __reg__[name] = {}
        __reg__[name]['val'] = 0
        __reg__[name]['total'] = 0
        __reg__[name]['count'] = 0
    for event in __events__:
        try:
            event_data = event['data']['data']
        except KeyError:
            event_data = event['data']
        if salt.utils.stringutils.expr_match(event['tag'], match):
            if add in event_data:
                try:
                    comp = int(event_data)
                except ValueError:
                    continue
            __reg__[name]['total'] += comp
            __reg__[name]['count'] += 1
            __reg__[name]['val'] = __reg__[name]['total'] / __reg__[name]['count']
    return ret | 0.001684 | 
| 
	def from_iso(cls, iso):
        """Retrieve the first datacenter id associated to an ISO."""
        result = cls.list({'sort_by': 'id ASC'})
        dc_isos = {}
        for dc in result:
            if dc['iso'] not in dc_isos:
                dc_isos[dc['iso']] = dc['id']
        return dc_isos.get(iso) | 0.006494 | 
| 
	def set_iam_policy(self, policy, client=None):
        """Update the IAM policy for the bucket.
        See
        https://cloud.google.com/storage/docs/json_api/v1/buckets/setIamPolicy
        If :attr:`user_project` is set, bills the API request to that project.
        :type policy: :class:`google.api_core.iam.Policy`
        :param policy: policy instance used to update bucket's IAM policy.
        :type client: :class:`~google.cloud.storage.client.Client` or
                      ``NoneType``
        :param client: Optional. The client to use.  If not passed, falls back
                       to the ``client`` stored on the current bucket.
        :rtype: :class:`google.api_core.iam.Policy`
        :returns: the policy instance, based on the resource returned from
                  the ``setIamPolicy`` API request.
        """
        client = self._require_client(client)
        query_params = {}
        if self.user_project is not None:
            query_params["userProject"] = self.user_project
        resource = policy.to_api_repr()
        resource["resourceId"] = self.path
        info = client._connection.api_request(
            method="PUT",
            path="%s/iam" % (self.path,),
            query_params=query_params,
            data=resource,
            _target_object=None,
        )
        return Policy.from_api_repr(info) | 0.001456 | 
| 
	def confd_state_snmp_version_v3(self, **kwargs):
        """Auto Generated Code
        """
        config = ET.Element("config")
        confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
        snmp = ET.SubElement(confd_state, "snmp")
        version = ET.SubElement(snmp, "version")
        v3 = ET.SubElement(version, "v3")
        callback = kwargs.pop('callback', self._callback)
        return callback(config) | 0.006397 | 
| 
	def create_proxy_model(self, model):
        """Create a sort filter proxy model for the given model
        :param model: the model to wrap in a proxy
        :type model: :class:`QtGui.QAbstractItemModel`
        :returns: a new proxy model that can be used for sorting and filtering
        :rtype: :class:`QtGui.QAbstractItemModel`
        :raises: None
        """
        proxy = ReftrackSortFilterModel(self)
        proxy.setSourceModel(model)
        model.rowsInserted.connect(self.sort_model)
        return proxy | 0.00381 | 
| 
	def transform(self, maps):
        """This function transforms from chirp distance to luminosity distance,
        given the chirp mass.
        Parameters
        ----------
        maps : a mapping object
        Examples
        --------
        Convert a dict of numpy.array:
        >>> import numpy as np
        >>> from pycbc import transforms
        >>> t = transforms.ChirpDistanceToDistance()
        >>> t.transform({'chirp_distance': np.array([40.]), 'mchirp': np.array([1.2])})
        {'mchirp': array([ 1.2]), 'chirp_distance': array([ 40.]), 'distance': array([ 39.48595679])}
        Returns
        -------
        out : dict
            A dict with key as parameter name and value as numpy.array or float
            of transformed values.
        """
        out = {}
        out[parameters.distance] = \
                conversions.distance_from_chirp_distance_mchirp(
                                                    maps[parameters.chirp_distance],
                                                    maps[parameters.mchirp],
                                                    ref_mass=self.ref_mass)
        return self.format_output(maps, out) | 0.005093 | 
| 
	def create(self, name, *args, **kwargs):
        """
        Need to wrap the default call to handle exceptions.
        """
        try:
            return super(ImageMemberManager, self).create(name, *args, **kwargs)
        except Exception as e:
            if e.http_status == 403:
                raise exc.UnsharableImage("You cannot share a public image.")
            else:
                raise | 0.007426 | 
| 
	def _expand_parameters(circuits, run_config):
    """Verifies that there is a single common set of parameters shared between
    all circuits and all parameter binds in the run_config. Returns an expanded
    list of circuits (if parameterized) with all parameters bound, and a copy of
    the run_config with parameter_binds cleared.
    If neither the circuits nor the run_config specify parameters, the two are
    returned unmodified.
    Raises:
        QiskitError: if run_config parameters are not compatible with circuit parameters
    Returns:
        Tuple(List[QuantumCircuit], RunConfig):
          - List of input circuits expanded and with parameters bound
          - RunConfig with parameter_binds removed
    """
    parameter_binds = run_config.parameter_binds
    if parameter_binds or \
       any(circuit.parameters for circuit in circuits):
        all_bind_parameters = [bind.keys()
                               for bind in parameter_binds]
        all_circuit_parameters = [circuit.parameters for circuit in circuits]
        # Collect set of all unique parameters across all circuits and binds
        unique_parameters = set(param
                                for param_list in all_bind_parameters + all_circuit_parameters
                                for param in param_list)
        # Check that all parameters are common to all circuits and binds
        if not all_bind_parameters \
           or not all_circuit_parameters \
           or any(unique_parameters != bind_params for bind_params in all_bind_parameters) \
           or any(unique_parameters != parameters for parameters in all_circuit_parameters):
            raise QiskitError(
                ('Mismatch between run_config.parameter_binds and all circuit parameters. ' +
                 'Parameter binds: {} ' +
                 'Circuit parameters: {}').format(all_bind_parameters, all_circuit_parameters))
        circuits = [circuit.bind_parameters(binds)
                    for circuit in circuits
                    for binds in parameter_binds]
        # All parameters have been expanded and bound, so remove from run_config
        run_config = copy.deepcopy(run_config)
        run_config.parameter_binds = []
    return circuits, run_config | 0.00397 | 
| 
	def assign_reads_to_otus(original_fasta,
                         filtered_fasta,
                         output_filepath=None,
                         log_name="assign_reads_to_otus.log",
                         perc_id_blast=0.97,
                         global_alignment=True,
                         HALT_EXEC=False,
                         save_intermediate_files=False,
                         remove_usearch_logs=False,
                         working_dir=None):
    """ Uses original fasta file, blasts to assign reads to filtered fasta
    original_fasta = filepath to original query fasta
    filtered_fasta = filepath to enumerated, filtered fasta
    output_filepath = output path to clusters (uc) file
    log_name = string specifying output log name
    perc_id_blast = percent ID for blasting original seqs against filtered set
    usersort = Enable if input fasta not sorted by length purposefully, lest
     usearch will raise an error.  In post chimera checked sequences, the seqs
     are sorted by abundance, so this should be set to True.
    HALT_EXEC: Used for debugging app controller
    save_intermediate_files: Preserve all intermediate files created.
    """
    # Not sure if I feel confortable using blast as a way to recapitulate
    # original read ids....
    if not output_filepath:
        _, output_filepath = mkstemp(prefix='assign_reads_to_otus',
                                     suffix='.uc')
    log_filepath = join(working_dir, log_name)
    params = {'--id': perc_id_blast,
              '--global': global_alignment}
    app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC)
    data = {'--query': original_fasta,
            '--db': filtered_fasta,
            '--uc': output_filepath
            }
    if not remove_usearch_logs:
        data['--log'] = log_filepath
    app_result = app(data)
    return app_result, output_filepath | 0.000523 | 
| 
	def gallery_section(images, title):
    """Create detail section with gallery.
    Args:
        title (str): Title to be displayed for detail section.
        images: stream of marv image files
    Returns
        One detail section.
    """
    # pull all images
    imgs = []
    while True:
        img = yield marv.pull(images)
        if img is None:
            break
        imgs.append({'src': img.relpath})
    if not imgs:
        return
    # create gallery widget and section containing it
    widget = {'title': images.title, 'gallery': {'images': imgs}}
    section = {'title': title, 'widgets': [widget]}
    yield marv.push(section) | 0.001534 | 
| 
	def find_if(pred, iterable, default=None):
    """
    Returns a reference to the first element in the ``iterable`` range for
    which ``pred`` returns ``True``. If no such element is found, the
    function returns ``default``.
        >>> find_if(lambda x: x == 3, [1, 2, 3, 4])
        3
    :param pred: a predicate function to check a value from the iterable range
    :param iterable: an iterable range to check in
    :param default: a value that will be returned if no elements were found
    :returns: a reference to the first found element or default
    """
    return next((i for i in iterable if pred(i)), default) | 0.001587 | 
| 
	def solvers(config_file, profile, solver_def, list_solvers):
    """Get solver details.
    Unless solver name/id specified, fetch and display details for
    all online solvers available on the configured endpoint.
    """
    with Client.from_config(
            config_file=config_file, profile=profile, solver=solver_def) as client:
        try:
            solvers = client.get_solvers(**client.default_solver)
        except SolverNotFoundError:
            click.echo("Solver(s) {} not found.".format(solver_def))
            return 1
        if list_solvers:
            for solver in solvers:
                click.echo(solver.id)
            return
        # ~YAML output
        for solver in solvers:
            click.echo("Solver: {}".format(solver.id))
            click.echo("  Parameters:")
            for name, val in sorted(solver.parameters.items()):
                click.echo("    {}: {}".format(name, strtrunc(val) if val else '?'))
            solver.properties.pop('parameters', None)
            click.echo("  Properties:")
            for name, val in sorted(solver.properties.items()):
                click.echo("    {}: {}".format(name, strtrunc(val)))
            click.echo("  Derived properties:")
            for name in sorted(solver.derived_properties):
                click.echo("    {}: {}".format(name, strtrunc(getattr(solver, name))))
            click.echo() | 0.002843 | 
| 
	def solve_spectral(prob, *args, **kwargs):
    """Solve the spectral relaxation with lambda = 1.
    """
    # TODO: do this efficiently without SDP lifting
    # lifted variables and semidefinite constraint
    X = cvx.Semidef(prob.n + 1)
    W = prob.f0.homogeneous_form()
    rel_obj = cvx.Minimize(cvx.sum_entries(cvx.mul_elemwise(W, X)))
    W1 = sum([f.homogeneous_form() for f in prob.fs if f.relop == '<='])
    W2 = sum([f.homogeneous_form() for f in prob.fs if f.relop == '=='])
    rel_prob = cvx.Problem(
        rel_obj,
        [
            cvx.sum_entries(cvx.mul_elemwise(W1, X)) <= 0,
            cvx.sum_entries(cvx.mul_elemwise(W2, X)) == 0,
            X[-1, -1] == 1
        ]
    )
    rel_prob.solve(*args, **kwargs)
    if rel_prob.status not in [cvx.OPTIMAL, cvx.OPTIMAL_INACCURATE]:
        raise Exception("Relaxation problem status: %s" % rel_prob.status)
    (w, v) = LA.eig(X.value)
    return np.sqrt(np.max(w))*np.asarray(v[:-1, np.argmax(w)]).flatten(), rel_prob.value | 0.001982 | 
| 
	def create(self, resource, uri=None, timeout=-1, custom_headers=None, default_values={}):
        """
        Makes a POST request to create a resource when a request body is required.
        Args:
            resource:
                OneView resource dictionary.
            uri:
                Can be either the resource ID or the resource URI.
            timeout:
                Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
                in OneView; it just stops waiting for its completion.
            custom_headers:
                Allows set specific HTTP headers.
            default_values:
                Dictionary with default values grouped by OneView API version. This dictionary will be be merged with
                the resource dictionary only if the dictionary does not contain the keys.
                This argument is optional and the default value is an empty dictionary.
                Ex.:
                    default_values = {
                        '200': {"type": "logical-switch-group"},
                        '300': {"type": "logical-switch-groupV300"}
                    }
        Returns:
            Created resource.
        """
        if not resource:
            logger.exception(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)
            raise ValueError(RESOURCE_CLIENT_RESOURCE_WAS_NOT_PROVIDED)
        if not uri:
            uri = self._uri
        logger.debug('Create (uri = %s, resource = %s)' %
                     (uri, str(resource)))
        resource = self.merge_default_values(resource, default_values)
        return self.__do_post(uri, resource, timeout, custom_headers) | 0.00472 | 
| 
	def from_raw(self, rval: RawObject, jptr: JSONPointer = "") -> ObjectValue:
        """Override the superclass method."""
        if not isinstance(rval, dict):
            raise RawTypeError(jptr, "object")
        res = ObjectValue()
        for qn in rval:
            if qn.startswith("@"):
                if qn != "@":
                    tgt = qn[1:]
                    if tgt not in rval:
                        raise MissingAnnotationTarget(jptr, tgt)
                    jptr += '/' + tgt
                res[qn] = self._process_metadata(rval[qn], jptr)
            else:
                cn = self._iname2qname(qn)
                ch = self.get_data_child(*cn)
                npath = jptr + "/" + qn
                if ch is None:
                    raise RawMemberError(npath)
                res[ch.iname()] = ch.from_raw(rval[qn], npath)
        return res | 0.002291 | 
| 
	def meanFracdet(map_fracdet, lon_population, lat_population, radius_population):
    """
    Compute the mean fracdet within circular aperture (radius specified in decimal degrees)
    lon, lat, and radius are taken to be arrays of the same length
    """
    nside_fracdet = healpy.npix2nside(len(map_fracdet))
    map_fracdet_zero = np.where(map_fracdet >= 0., map_fracdet, 0.)
    fracdet_population = np.empty(len(lon_population))
    for ii in range(0, len(lon_population)):
        fracdet_population[ii] = np.mean(map_fracdet_zero[ugali.utils.healpix.ang2disc(nside_fracdet, 
                                                                                       lon_population[ii], 
                                                                                       lat_population[ii], 
                                                                                       radius_population if np.isscalar(radius_population) else radius_population[ii],
                                                                                       inclusive=True)])
    return fracdet_population | 0.009991 | 
| 
	def enforce_versioning(force=False):
    """Install versioning on the db."""
    connect_str, repo_url = get_version_data()
    LOG.warning("Your database uses an unversioned benchbuild schema.")
    if not force and not ui.ask(
            "Should I enforce version control on your schema?"):
        LOG.error("User declined schema versioning.")
        return None
    repo_version = migrate.version(repo_url, url=connect_str)
    migrate.version_control(connect_str, repo_url, version=repo_version)
    return repo_version | 0.001901 | 
| 
	def guess_mime_file_text (file_prog, filename):
    """Determine MIME type of filename with file(1)."""
    cmd = [file_prog, "--brief", filename]
    try:
        output = backtick(cmd).strip()
    except OSError:
        # ignore errors, as file(1) is only a fallback
        return None
    # match output against known strings
    for matcher, mime in FileText2Mime.items():
        if output.startswith(matcher) and mime in ArchiveMimetypes:
            return mime
    return None | 0.004115 | 
| 
	def act(self):
        """
        Power on action
        """
        g = get_root(self).globals
        g.clog.debug('Power on pressed')
        if execCommand(g, 'online'):
            g.clog.info('ESO server online')
            g.cpars['eso_server_online'] = True
            if not isPoweredOn(g):
                success = execCommand(g, 'pon')
                if not success:
                    g.clog.warn('Unable to power on CLDC')
                    return False
            # change other buttons
            self.disable()
            g.observe.start.enable()
            g.observe.stop.disable()
            g.setup.powerOff.enable()
            success = execCommand(g, 'seqStart')
            if not success:
                g.clog.warn('Failed to start sequencer after Power On.')
            try:
                g.info.run.configure(text='{0:03d}'.format(getRunNumber(g)))
            except Exception as err:
                g.clog.warn('Failed to determine run number at start of run')
                g.clog.warn(str(err))
                g.info.run.configure(text='UNDEF')
            return True
        else:
            g.clog.warn('Failed to bring server online')
            return False | 0.001637 | 
| 
	def confirmdir(self, target_directory):
        """Test that the target is actually a directory, raising OSError
        if not.
        Args:
            target_directory: Path to the target directory within the fake
                filesystem.
        Returns:
            The FakeDirectory object corresponding to target_directory.
        Raises:
            OSError: if the target is not a directory.
        """
        try:
            directory = self.resolve(target_directory)
        except IOError as exc:
            self.raise_os_error(exc.errno, target_directory)
        if not directory.st_mode & S_IFDIR:
            if self.is_windows_fs and IS_PY2:
                error_nr = errno.EINVAL
            else:
                error_nr = errno.ENOTDIR
            self.raise_os_error(error_nr, target_directory, 267)
        return directory | 0.002328 | 
| 
	def find(dag_id=None, run_id=None, execution_date=None,
             state=None, external_trigger=None, no_backfills=False,
             session=None):
        """
        Returns a set of dag runs for the given search criteria.
        :param dag_id: the dag_id to find dag runs for
        :type dag_id: int, list
        :param run_id: defines the the run id for this dag run
        :type run_id: str
        :param execution_date: the execution date
        :type execution_date: datetime.datetime
        :param state: the state of the dag run
        :type state: airflow.utils.state.State
        :param external_trigger: whether this dag run is externally triggered
        :type external_trigger: bool
        :param no_backfills: return no backfills (True), return all (False).
            Defaults to False
        :type no_backfills: bool
        :param session: database session
        :type session: sqlalchemy.orm.session.Session
        """
        DR = DagRun
        qry = session.query(DR)
        if dag_id:
            qry = qry.filter(DR.dag_id == dag_id)
        if run_id:
            qry = qry.filter(DR.run_id == run_id)
        if execution_date:
            if isinstance(execution_date, list):
                qry = qry.filter(DR.execution_date.in_(execution_date))
            else:
                qry = qry.filter(DR.execution_date == execution_date)
        if state:
            qry = qry.filter(DR.state == state)
        if external_trigger is not None:
            qry = qry.filter(DR.external_trigger == external_trigger)
        if no_backfills:
            # in order to prevent a circular dependency
            from airflow.jobs import BackfillJob
            qry = qry.filter(DR.run_id.notlike(BackfillJob.ID_PREFIX + '%'))
        dr = qry.order_by(DR.execution_date).all()
        return dr | 0.002173 | 
| 
	def get_values(abf,key="freq",continuous=False):
    """returns Xs, Ys (the key), and sweep #s for every AP found."""
    Xs,Ys,Ss=[],[],[]
    for sweep in range(abf.sweeps):
        for AP in cm.matrixToDicts(abf.APs):
            if not AP["sweep"]==sweep:
                continue
            Ys.append(AP[key])
            Ss.append(AP["sweep"])
            if continuous:
                Xs.append(AP["expT"])
            else:
                Xs.append(AP["sweepT"])
    return np.array(Xs),np.array(Ys),np.array(Ss) | 0.020992 | 
| 
	def p_declare_list(p):
    '''declare_list : STRING EQUALS static_scalar
                    | declare_list COMMA STRING EQUALS static_scalar'''
    if len(p) == 4:
        p[0] = [ast.Directive(p[1], p[3], lineno=p.lineno(1))]
    else:
        p[0] = p[1] + [ast.Directive(p[3], p[5], lineno=p.lineno(2))] | 0.003257 | 
| 
	def _port_scan(self, port):
        """Scan the port structure (dict) and update the status key."""
        if int(port['port']) == 0:
            return self._port_scan_icmp(port)
        else:
            return self._port_scan_tcp(port) | 0.008368 | 
| 
	def click_window(self, window, button):
        """
        Send a click for a specific mouse button at the current mouse location.
        :param window:
            The window you want to send the event to or CURRENTWINDOW
        :param button:
            The mouse button. Generally, 1 is left, 2 is middle, 3 is
            right, 4 is wheel up, 5 is wheel down.
        """
        _libxdo.xdo_click_window(self._xdo, window, button) | 0.004535 | 
| 
	def flatten(arys, returns_shapes=True, hstack=np.hstack, ravel=np.ravel,
            shape=np.shape):
    """
    Flatten a potentially recursive list of multidimensional objects.
    .. note::
       Not to be confused with `np.ndarray.flatten()` (a more befitting
       might be `chain` or `stack` or maybe something else entirely
       since this function is more than either `concatenate` or
       `np.flatten` itself. Rather, it is the composition of the former
       with the latter.
    Parameters
    ----------
    arys : list of objects
        One or more input arrays of possibly heterogenous shapes and
        sizes.
    returns_shapes : bool, optional
        Default is `True`. If `True`, the tuple `(flattened, shapes)` is
        returned, otherwise only `flattened` is returned.
    hstack : callable, optional
        a function that implements horizontal stacking
    ravel : callable, optional
        a function that flattens the object
    shape : callable, optional
        a function that returns the shape of the object
    Returns
    -------
    flattened,[shapes] : {1dobject, list of tuples}
        Return the flat (1d) object resulting from the concatenation of
        flattened multidimensional objects. When `returns_shapes` is `True`,
        return a list of tuples containing also the shapes of each array as the
        second element.
    See Also
    --------
    revrand.utils.unflatten : its inverse
    Examples
    --------
    >>> a = 9
    >>> b = np.array([4, 7, 4, 5, 2])
    >>> c = np.array([[7, 3, 1],
    ...               [2, 6, 6]])
    >>> d = np.array([[[6, 5, 5],
    ...                [1, 6, 9]],
    ...               [[3, 9, 1],
    ...                [9, 4, 1]]])
    >>> flatten([a, b, c, d]) # doctest: +NORMALIZE_WHITESPACE
    (array([9, 4, 7, 4, 5, 2, 7, 3, 1, 2, 6, 6, 6, 5, 5, 1, 6, 9, 3, 9,
            1, 9, 4, 1]), [(), (5,), (2, 3), (2, 2, 3)])
    Note that scalars and 0-dimensional arrays are treated differently
    from 1-dimensional singleton arrays.
    >>> flatten([3.14, np.array(2.71), np.array([1.61])])
    ... # doctest: +NORMALIZE_WHITESPACE
    (array([ 3.14,  2.71,  1.61]), [(), (), (1,)])
    >>> flatten([a, b, c, d], returns_shapes=False)
    ... # doctest: +NORMALIZE_WHITESPACE
    array([9, 4, 7, 4, 5, 2, 7, 3, 1, 2, 6, 6, 6, 5, 5, 1, 6, 9, 3, 9,
           1, 9, 4, 1])
    >>> w, x, y, z = unflatten(*flatten([a, b, c, d]))
    >>> w == a
    True
    >>> np.array_equal(x, b)
    True
    >>> np.array_equal(y, c)
    True
    >>> np.array_equal(z, d)
    True
    >>> flatten([3.14, [np.array(2.71), np.array([1.61])]])
    ... # doctest: +NORMALIZE_WHITESPACE
    (array([ 3.14,  2.71,  1.61]), [(), [(), (1,)]])
    """
    if issequence(arys) and len(arys) > 0:
        flat = partial(flatten,
                       returns_shapes=True,
                       hstack=hstack,
                       ravel=ravel,
                       shape=shape
                       )
        flat_arys, shapes = zip(*map(flat, arys))
        flat_ary = hstack(flat_arys)
        shapes = list(shapes)
    else:
        flat_ary = ravel(arys)
        shapes = shape(arys)
    return (flat_ary, shapes) if returns_shapes else flat_ary | 0.000308 | 
| 
	def splitList(self, elements, chunksnum):
        """ Splits a list to a n lists with chunksnum number of elements
            each one.
            For a list [3,4,5,6,7,8,9] with chunksunum 4, the method
            will return the following list of groups:
            [[3,4,5,6],[7,8,9]]
        """
        if len(elements) < chunksnum:
            return [elements]
        groups = zip(*[elements[i::chunksnum] for i in range(chunksnum)])
        if len(groups) * chunksnum < len(elements):
            groups.extend([elements[-(len(elements) - len(groups) * chunksnum):]])
        return groups | 0.004983 | 
| 
	def path_is_remote(path, s3=True):
    """
    Determine whether file path is remote or local.
    Parameters
    ----------
    path : path to file
    Returns
    -------
    is_remote : bool
    """
    prefixes = ("http://", "https://", "/vsicurl/")
    if s3:
        prefixes += ("s3://", "/vsis3/")
    return path.startswith(prefixes) | 0.002907 | 
| 
	def align_times(times, frames):
    """Aligns the times to the closest frame times (e.g. beats).
    Parameters
    ----------
    times: np.ndarray
        Times in seconds to be aligned.
    frames: np.ndarray
        Frame times in seconds.
    Returns
    -------
    aligned_times: np.ndarray
        Aligned times.
    """
    dist = np.minimum.outer(times, frames)
    bound_frames = np.argmax(np.maximum(0, dist), axis=1)
    aligned_times = np.unique(bound_frames)
    return aligned_times | 0.002 | 
| 
	def process_pc_pathsfromto(source_genes, target_genes, neighbor_limit=1,
                           database_filter=None):
    """Returns a BiopaxProcessor for a PathwayCommons paths-from-to query.
    The paths-from-to query finds the paths from a set of source genes to
    a set of target genes.
    http://www.pathwaycommons.org/pc2/#graph
    http://www.pathwaycommons.org/pc2/#graph_kind
    Parameters
    ----------
    source_genes : list
        A list of HGNC gene symbols that are the sources of paths being
        searched for.
        Examples: ['BRAF', 'RAF1', 'ARAF']
    target_genes : list
        A list of HGNC gene symbols that are the targets of paths being
        searched for.
        Examples: ['MAP2K1', 'MAP2K2']
    neighbor_limit : Optional[int]
        The number of steps to limit the length of the paths
        between the source genes and target genes being queried. Default: 1
    database_filter : Optional[list]
        A list of database identifiers to which the query is restricted.
        Examples: ['reactome'], ['biogrid', 'pid', 'psp']
        If not given, all databases are used in the query. For a full
        list of databases see http://www.pathwaycommons.org/pc2/datasources
    Returns
    -------
    bp : BiopaxProcessor
        A BiopaxProcessor containing the obtained BioPAX model in bp.model.
    """
    model = pcc.graph_query('pathsfromto', source_genes,
                             target_genes, neighbor_limit=neighbor_limit,
                             database_filter=database_filter)
    if model is not None:
        return process_model(model) | 0.001852 | 
| 
	def add_mutations_and_flush(self, table, muts):
        """
        Add mutations to a table without the need to create and manage a batch writer.
        """
        if not isinstance(muts, list) and not isinstance(muts, tuple):
            muts = [muts]
        cells = {}
        for mut in muts:
            cells.setdefault(mut.row, []).extend(mut.updates)
        self.client.updateAndFlush(self.login, table, cells) | 0.007109 | 
| 
	def transform(self, X, y=None):
        """
        Apply transforms, and transform with the final estimator
        This also works where final estimator is ``None``: all prior
        transformations are applied.
        Parameters
        ----------
        X : iterable
            Data to transform. Must fulfill input requirements of first step
            of the pipeline.
        y : array-like
            Target
        Returns
        -------
        Xt : array-like, shape = [n_samples, n_transformed_features]
            Transformed data
        yt : array-like, shape = [n_samples]
            Transformed target
        """
        Xt, yt, _ = self._transform(X, y)
        if isinstance(self._final_estimator, XyTransformerMixin):
            Xt, yt, _ = self._final_estimator.transform(Xt, yt)
        else:
            Xt = self._final_estimator.transform(Xt)
        return Xt, yt | 0.002212 | 
| 
	def _list_records_in_zone(self, zone, rdtype=None, name=None, content=None):
        """
        Iterates over all records of the zone and returns a list of records filtered
        by record type, name and content. The list is empty if no records found.
        """
        records = []
        rrsets = zone.iterate_rdatasets() if zone else []
        for rname, rdataset in rrsets:
            rtype = dns.rdatatype.to_text(rdataset.rdtype)
            if ((not rdtype or rdtype == rtype)
                    and (not name or name == rname.to_text())):
                for rdata in rdataset:
                    rdata = rdata.to_text()
                    if not content or self._convert_content(rtype, content) == rdata:
                        raw_rdata = self._clean_TXT_record({'type': rtype,
                                                            'content': rdata})['content']
                        data = {
                            'type': rtype,
                            'name': rname.to_text(True),
                            'ttl': int(rdataset.ttl),
                            'content': raw_rdata,
                            'id': Provider._create_identifier(rtype, rname.to_text(), raw_rdata)
                        }
                        records.append(data)
        return records | 0.005315 | 
| 
	def uni_to(self, target, *args, **kwargs):
        """Unified to."""
        logging.debug(_('target: %s, args: %s, kwargs: %s'),
                      target, args, kwargs)
        return getattr(self, self.func_dict[target])(*args, **kwargs) | 0.00823 | 
| 
	async def get_events(
        self, device_ids, group_ids=None, from_time=None, to_time=None, event_types=None
    ):
        """Get the local installed version."""
        if to_time is None:
            to_time = datetime.utcnow()
        if from_time is None:
            from_time = to_time - timedelta(seconds=EVENT_INTERVAL)
        if event_types is None:
            event_types = ["allEvents"]
        get_params = []
        get_params.extend([("deviceId", value) for value in device_ids])
        if group_ids is not None:
            get_params.extend([("groupId", value) for value in group_ids])
        get_params.extend([("from", from_time.isoformat() + "Z")])
        get_params.extend([("to", to_time.isoformat() + "Z")])
        get_params.extend([("type", value) for value in event_types])
        data = await self.api("reports/events", get_params)
        if self.connected and self.authenticated:
            self._events = data
        else:
            self._events = self._events
        return self._events | 0.003857 | 
| 
	def activate_components_ui(self):
        """
        Activates user selected Components.
        :return: Method success.
        :rtype: bool
        :note: May require user interaction.
        """
        selected_components = self.get_selected_components()
        self.__engine.start_processing("Activating Components ...", len(selected_components))
        activation_failed_components = []
        for component in selected_components:
            if not component.interface.activated:
                success = self.activate_component(component.name) or False
                if not success:
                    activation_failed_components.append(component)
            else:
                self.__engine.notifications_manager.warnify("{0} | '{1}' Component is already activated!".format(
                    self.__class__.__name__, component.name))
            self.__engine.step_processing()
        self.__engine.stop_processing()
        self.__store_deactivated_components()
        if not activation_failed_components:
            return True
        else:
            raise manager.exceptions.ComponentActivationError(
                "{0} | Exception(s) raised while activating '{1}' Component(s)!".format(self.__class__.__name__,
                                                                                        ", ".join((
                                                                                        activation_failed_component.name
                                                                                        for activation_failed_component
                                                                                        in
                                                                                        activation_failed_components)))) | 0.007739 | 
| 
	def ser_iuwt_recomposition(in1, scale_adjust, smoothed_array):
    """
    This function calls the a trous algorithm code to recompose the input into a single array. This is the
    implementation of the isotropic undecimated wavelet transform recomposition for a single CPU core.
    INPUTS:
    in1             (no default):   Array containing wavelet coefficients.
    scale_adjust    (no default):   Indicates the number of truncated array pages.
    smoothed_array  (default=None): For a complete inverse transform, this must be the smoothest approximation.
    OUTPUTS:
    recomposition                   Array containing the reconstructed image.
    """
    wavelet_filter = (1./16)*np.array([1,4,6,4,1])      # Filter-bank for use in the a trous algorithm.
    # Determines scale with adjustment and creates a zero array to store the output, unless smoothed_array is given.
    max_scale = in1.shape[0] + scale_adjust
    if smoothed_array is None:
        recomposition = np.zeros([in1.shape[1], in1.shape[2]])
    else:
        recomposition = smoothed_array
    # The following loops call the a trous algorithm code to recompose the input. The first loop assumes that there are
    # non-zero wavelet coefficients at scales above scale_adjust, while the second loop completes the recomposition
    # on the scales less than scale_adjust.
    for i in range(max_scale-1, scale_adjust-1, -1):
        recomposition = ser_a_trous(recomposition, wavelet_filter, i) + in1[i-scale_adjust,:,:]
    if scale_adjust>0:
        for i in range(scale_adjust-1, -1, -1):
            recomposition = ser_a_trous(recomposition, wavelet_filter, i)
    return recomposition | 0.010131 | 
| 
	def _func(self, volume, params):
        """
        Pourier-Tarantola equation from PRB 70, 224107
        """
        e0, b0, b1, v0 = tuple(params)
        eta = (volume / v0) ** (1. / 3.)
        squiggle = -3.*np.log(eta)
        return e0 + b0 * v0 * squiggle ** 2 / 6. * (3. + squiggle * (b1 - 2)) | 0.006579 | 
| 
	def download_tasks_number(self):
        """获取离线任务总数
        :return: int
        """
        ret = self.list_download_tasks().content
        foo = json.loads(ret)
        return foo['total'] | 0.010363 | 
| 
	def module_imports_on_top_of_file(
        logical_line, indent_level, checker_state, noqa):
    r"""Place imports at the top of the file.
    Always put imports at the top of the file, just after any module comments
    and docstrings, and before module globals and constants.
    Okay: import os
    Okay: # this is a comment\nimport os
    Okay: '''this is a module docstring'''\nimport os
    Okay: r'''this is a module docstring'''\nimport os
    Okay:
    try:\n\timport x\nexcept ImportError:\n\tpass\nelse:\n\tpass\nimport y
    Okay:
    try:\n\timport x\nexcept ImportError:\n\tpass\nfinally:\n\tpass\nimport y
    E402: a=1\nimport os
    E402: 'One string'\n"Two string"\nimport os
    E402: a=1\nfrom sys import x
    Okay: if x:\n    import os
    """
    def is_string_literal(line):
        if line[0] in 'uUbB':
            line = line[1:]
        if line and line[0] in 'rR':
            line = line[1:]
        return line and (line[0] == '"' or line[0] == "'")
    allowed_try_keywords = ('try', 'except', 'else', 'finally')
    if indent_level:  # Allow imports in conditional statements or functions
        return
    if not logical_line:  # Allow empty lines or comments
        return
    if noqa:
        return
    line = logical_line
    if line.startswith('import ') or line.startswith('from '):
        if checker_state.get('seen_non_imports', False):
            yield 0, "E402 module level import not at top of file"
    elif re.match(DUNDER_REGEX, line):
        return
    elif any(line.startswith(kw) for kw in allowed_try_keywords):
        # Allow try, except, else, finally keywords intermixed with imports in
        # order to support conditional importing
        return
    elif is_string_literal(line):
        # The first literal is a docstring, allow it. Otherwise, report error.
        if checker_state.get('seen_docstring', False):
            checker_state['seen_non_imports'] = True
        else:
            checker_state['seen_docstring'] = True
    else:
        checker_state['seen_non_imports'] = True | 0.000485 | 
| 
	def branch_exists(self, branch):
        """Returns true or false depending on if a branch exists"""
        try:
            git(self.gitdir, self.gitwd, "rev-parse", branch)
        except sh.ErrorReturnCode:
            return False
        return True | 0.007843 | 
| 
	def get_value(self, ColumnName, RunNo):
        """
        Retreives the value of the collumn named ColumnName associated 
        with a particular run number.
        Parameters
        ----------
        ColumnName : string
            The name of the desired org-mode table's collumn
        RunNo : int
            The run number for which to retreive the pressure value
        
        Returns
        -------
        Value : float
            The value for the column's name and associated run number
        """
        Value = float(self.ORGTableData[self.ORGTableData.RunNo == '{}'.format(
            RunNo)][ColumnName])
        
        return Value | 0.007508 | 
| 
	def _create_sequences(self, func, iterable, chunksize, collector=None):
        """
        Create the WorkUnit objects to process and pushes them on the
        work queue. Each work unit is meant to process a slice of
        iterable of size chunksize. If collector is specified, then
        the ApplyResult objects associated with the jobs will notify
        collector when their result becomes ready.
        \return the list of WorkUnit objects (basically: JobSequences)
        pushed onto the work queue
        """
        assert not self._closed  # No lock here. We assume it's atomic...
        sequences = []
        results = []
        it_ = iter(iterable)
        exit_loop = False
        while not exit_loop:
            seq = []
            for _ in range(chunksize or 1):
                try:
                    arg = next(it_)
                except StopIteration:
                    exit_loop = True
                    break
                apply_result = ApplyResult(collector)
                job = Job(func, (arg,), {}, apply_result)
                seq.append(job)
                results.append(apply_result)
            sequences.append(JobSequence(seq))
        for seq in sequences:
            self._workq.put(seq)
        return sequences | 0.001566 | 
| 
	def setup_users_page(self, ):
        """Create and set the model on the users page
        :returns: None
        :rtype: None
        :raises: None
        """
        self.users_tablev.horizontalHeader().setResizeMode(QtGui.QHeaderView.ResizeToContents)
        log.debug("Loading users for users page.")
        rootdata = treemodel.ListItemData(['Username', 'First', 'Last', 'Email'])
        rootitem = treemodel.TreeItem(rootdata)
        users = djadapter.users.all()
        for usr in users:
            usrdata = djitemdata.UserItemData(usr)
            treemodel.TreeItem(usrdata, rootitem)
        self.users_model = treemodel.TreeModel(rootitem)
        self.users_tablev.setModel(self.users_model) | 0.00561 | 
| 
	async def apply(self, sender: str, recipient: str, mailbox: str,
                    append_msg: AppendMessage) \
            -> Tuple[Optional[str], AppendMessage]:
        """Run the filter and return the mailbox where it should be appended,
        or None to discard, and the message to be appended, which is usually
        the same as ``append_msg``.
        Args:
            sender: The envelope sender of the message.
            recipient: The envelope recipient of the message.
            mailbox: The intended mailbox to append the message.
            append_msg: The message to be appended.
        raises:
            :exc:`~pymap.exceptions.AppendFailure`
        """
        ... | 0.004292 | 
| 
	def runserver(project_name):
	'''
	Runs a python cgi server in a subprocess.
	'''
	DIR = os.listdir(project_name)
	if 'settings.py' not in DIR:
		raise NotImplementedError('No file called: settings.py found in %s'%project_name)
	CGI_BIN_FOLDER = os.path.join(project_name, 'cgi', 'cgi-bin')
	CGI_FOLDER = os.path.join(project_name, 'cgi')
	if not os.path.exists(CGI_BIN_FOLDER):
		os.makedirs(CGI_BIN_FOLDER)
	os.chdir(CGI_FOLDER)
	subprocess.Popen("python -m http.server --cgi 8000") | 0.030801 | 
| 
	def insert_before(self, value: Union[RawValue, Value],
                      raw: bool = False) -> "ArrayEntry":
        """Insert a new entry before the receiver.
        Args:
            value: The value of the new entry.
            raw: Flag to be set if `value` is raw.
        Returns:
            An instance node of the new inserted entry.
        """
        return ArrayEntry(self.index, self.before, self.after.cons(self.value),
                          self._cook_value(value, raw), self.parinst,
                          self.schema_node, datetime.now()) | 0.005245 | 
| 
	def str_slice(arr, start=None, stop=None, step=None):
    """
    Slice substrings from each element in the Series or Index.
    Parameters
    ----------
    start : int, optional
        Start position for slice operation.
    stop : int, optional
        Stop position for slice operation.
    step : int, optional
        Step size for slice operation.
    Returns
    -------
    Series or Index of object
        Series or Index from sliced substring from original string object.
    See Also
    --------
    Series.str.slice_replace : Replace a slice with a string.
    Series.str.get : Return element at position.
        Equivalent to `Series.str.slice(start=i, stop=i+1)` with `i`
        being the position.
    Examples
    --------
    >>> s = pd.Series(["koala", "fox", "chameleon"])
    >>> s
    0        koala
    1          fox
    2    chameleon
    dtype: object
    >>> s.str.slice(start=1)
    0        oala
    1          ox
    2    hameleon
    dtype: object
    >>> s.str.slice(stop=2)
    0    ko
    1    fo
    2    ch
    dtype: object
    >>> s.str.slice(step=2)
    0      kaa
    1       fx
    2    caeen
    dtype: object
    >>> s.str.slice(start=0, stop=5, step=3)
    0    kl
    1     f
    2    cm
    dtype: object
    Equivalent behaviour to:
    >>> s.str[0:5:3]
    0    kl
    1     f
    2    cm
    dtype: object
    """
    obj = slice(start, stop, step)
    f = lambda x: x[obj]
    return _na_map(f, arr) | 0.001365 | 
| 
	def sudo(self, password=None):
        """
        Enter sudo mode
        """
        if self.username == 'root':
            raise ValueError('Already root user')
        password = self.validate_password(password)
        stdin, stdout, stderr = self.exec_command('sudo su')
        stdin.write("%s\n" % password)
        stdin.flush()
        errors = stderr.read()
        if errors:
            raise ValueError(errors) | 0.004706 | 
| 
	def _sanitize_entity(self, entity):
        """
        Make given entity 'sane' for further use.
        """
        aliases = {
            "current_state": "state",
            "is_flapping": "flapping",
            "scheduled_downtime_depth": "in_downtime",
            "has_been_checked": "checked",
            "should_be_scheduled": "scheduled",
            "active_checks_enabled": "active_checks",
            "passive_checks_enabled": "passive_checks",
        }
        sane = {}
        for akey in aliases.keys():
            sane[aliases[akey]] = None
        aliases_keys = aliases.keys()
        for key in entity.keys():
            if key not in aliases_keys:
                continue
            alias = aliases[key]
            try:
                sane[alias] = int(entity[key])
            except Exception:
                sane[alias] = None
        if sane["active_checks"] not in [0, 1]:
            sane["active_checks"] = 0
        elif sane["active_checks"] == 1:
            sane["passive_checks"] = 0
        if sane["passive_checks"] not in [0, 1]:
            sane["passive_checks"] = 0
        return sane | 0.00175 | 
| 
	def write_meta(self):
        """ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as
         the default :func:`Dataset.write_meta`.
         """
        with h5py.File(self.filename, "r+") as h5file_output:
            h5table_root = h5file_output[self.h5table_root_name]
            if self.description is not None:
                h5table_root.attrs["description"] = self.description
            h5columns = h5table_root if self._version == 1 else h5table_root['columns']
            for column_name in self.columns.keys():
                h5dataset = None
                if column_name in h5columns:
                    h5dataset = h5columns[column_name]
                else:
                    for group in h5columns.values():
                        if 'type' in group.attrs:
                            if group.attrs['type'] in ['csr_matrix']: 
                                for name, column in group.items():
                                    if name == column_name:
                                        h5dataset = column
                if h5dataset is None:
                    raise ValueError('column {} not found'.format(column_name))
                for name, values in [("ucd", self.ucds), ("unit", self.units), ("description", self.descriptions)]:
                    if column_name in values:
                        value = ensure_string(values[column_name], cast=True)
                        h5dataset.attrs[name] = value
                    else:
                        if name in h5columns.attrs:
                            del h5dataset.attrs[name] | 0.003665 | 
| 
	def do_cli(ctx, template, semantic_version):
    """Publish the application based on command line inputs."""
    try:
        template_data = get_template_data(template)
    except ValueError as ex:
        click.secho("Publish Failed", fg='red')
        raise UserException(str(ex))
    # Override SemanticVersion in template metadata when provided in command input
    if semantic_version and SERVERLESS_REPO_APPLICATION in template_data.get(METADATA, {}):
        template_data.get(METADATA).get(SERVERLESS_REPO_APPLICATION)[SEMANTIC_VERSION] = semantic_version
    try:
        publish_output = publish_application(template_data)
        click.secho("Publish Succeeded", fg="green")
        click.secho(_gen_success_message(publish_output))
    except InvalidS3UriError:
        click.secho("Publish Failed", fg='red')
        raise UserException(
            "Your SAM template contains invalid S3 URIs. Please make sure that you have uploaded application "
            "artifacts to S3 by packaging the template. See more details in {}".format(SAM_PACKAGE_DOC))
    except ServerlessRepoError as ex:
        click.secho("Publish Failed", fg='red')
        LOG.debug("Failed to publish application to serverlessrepo", exc_info=True)
        error_msg = '{}\nPlease follow the instructions in {}'.format(str(ex), SAM_PUBLISH_DOC)
        raise UserException(error_msg)
    application_id = publish_output.get('application_id')
    _print_console_link(ctx.region, application_id) | 0.005387 | 
| 
	def kill(self):
        """
        Send a SIGKILL to all worker processes
        """
        for sock in self.workers:
            os.kill(sock.pid, signal.SIGKILL)
        return 'WorkerPool %s killed' % self.ctrl_url | 0.009091 | 
| 
	def save(self, *args, **kwargs):
        """
        **uid**: :code:`division_cycle_ballotmeasure:{number}`
        """
        self.uid = '{}_{}_ballotmeasure:{}'.format(
            self.division.uid,
            self.election_day.uid,
            self.number
        )
        super(BallotMeasure, self).save(*args, **kwargs) | 0.006098 | 
| 
	def read(self, entity=None, attrs=None, ignore=None, params=None):
        """Ignore ``organization`` field as it's never returned by the server
        and is only added to entity to be able to use organization path
        dependent helpers.
        """
        if ignore is None:
            ignore = set()
        ignore.add('organization')
        return super(Subscription, self).read(entity, attrs, ignore, params) | 0.004751 | 
| 
	def get_levels(dict_, n=0, levels=None):
    r"""
    DEPCIRATE
    Args:
        dict_ (dict_):  a dictionary
        n (int): (default = 0)
        levels (None): (default = None)
    CommandLine:
        python -m utool.util_graph --test-get_levels --show
        python3 -m utool.util_graph --test-get_levels --show
    Example:
        >>> # DISABLE_DOCTEST
        >>> from utool.util_graph import *  # NOQA
        >>> import utool as ut
        >>> from_root = {
        >>>     'dummy_annot': {
        >>>         'chip': {
        >>>                 'keypoint': {
        >>>                             'fgweight': None,
        >>>                         },
        >>>             },
        >>>         'probchip': {
        >>>                 'fgweight': None,
        >>>             },
        >>>     },
        >>> }
        >>> dict_ = from_root
        >>> n = 0
        >>> levels = None
        >>> levels_ = get_levels(dict_, n, levels)
        >>> result = ut.repr2(levels_, nl=1)
        >>> print(result)
        [
            ['dummy_annot'],
            ['chip', 'probchip'],
            ['keypoint', 'fgweight'],
            ['fgweight'],
        ]
    """
    if levels is None:
        levels_ = [[] for _ in range(dict_depth(dict_))]
    else:
        levels_ = levels
    if dict_ is None:
        return []
    for key in dict_.keys():
        levels_[n].append(key)
    for val in dict_.values():
        get_levels(val, n + 1, levels_)
    return levels_ | 0.000668 | 
| 
	def leading_whitespace(self, line):  # type: (str) -> str
        """
        For preserving indents
        :param line:
        :return:
        """
        string = ""
        for char in line:
            if char in " \t":
                string += char
                continue
            else:
                return string
        return string | 0.005682 | 
| 
	def connection_made(self, transport):
        """Method run when the UDP broadcast server is started
        """
        #print('started')
        self.transport = transport
        sock = self.transport.get_extra_info("socket")
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
        self.loop.call_soon(self.discover) | 0.007407 | 
| 
	def thumbs_up_songs(self, *, library=True, store=True):
		"""Get a listing of 'Thumbs Up' store songs.
		Parameters:
			library (bool, Optional): Include 'Thumbs Up' songs from library.
				Default: True
			generated (bool, Optional): Include 'Thumbs Up' songs from store.
				Default: True
		Returns:
			list: Dicts of 'Thumbs Up' songs.
		"""
		thumbs_up_songs = []
		if library is True:
			thumbs_up_songs.extend(
				song
				for song in self.songs()
				if song.get('rating', '0') == '5'
			)
		if store is True:
			response = self._call(mc_calls.EphemeralTop)
			thumbs_up_songs.extend(response.body.get('data', {}).get('items', []))
		return thumbs_up_songs | 0.032836 | 
| 
	async def new_job(self, message: BackendNewJob):
        """
        Handles a new job: starts the grading container
        """
        self._logger.info("Received request for jobid %s", message.job_id)
        future_results = asyncio.Future()
        out = await self._loop.run_in_executor(None, lambda: self.__new_job_sync(message, future_results))
        self._create_safe_task(self.handle_running_container(**out, future_results=future_results))
        await self._timeout_watcher.register_container(out["container_id"], out["orig_time_limit"], out["orig_hard_time_limit"]) | 0.008606 | 
| 
	def create(self, request, *args, **kwargs):
        """Create a resource."""
        self.define_contributor(request)
        try:
            return super().create(request, *args, **kwargs)
        except IntegrityError as ex:
            return Response({'error': str(ex)}, status=status.HTTP_409_CONFLICT) | 0.009677 | 
| 
	def update_object(self, url, container, container_object, object_headers,
                      container_headers):
        """Update an existing object in a swift container.
        This method will place new headers on an existing object or container.
        :param url:
        :param container:
        :param container_object:
        """
        headers, container_uri = self._return_base_data(
            url=url,
            container=container,
            container_object=container_object,
            container_headers=container_headers,
            object_headers=object_headers,
        )
        return self._header_poster(
            uri=container_uri,
            headers=headers
        ) | 0.004208 | 
| 
	def store_config(config, suffix = None):
    '''
    Store configuration
    args:
        config (list[dict]): configurations for each project
    '''
    home = os.path.expanduser('~')
    if suffix is not None:
        config_path = os.path.join(home, '.transfer', suffix)
    else:
        config_path = os.path.join(home, '.transfer')
    os.makedirs(config_path, exist_ok = True)
    with open(os.path.join(config_path, 'config.yaml'), 'w') as fp:
        yaml.dump(config, fp) | 0.010309 | 
| 
	def set_ids(self, set_image_id, image_id, set_parent_id, parent_id):
        """Changes the UUID and parent UUID for a hard disk medium.
        in set_image_id of type bool
            Select whether a new image UUID is set or not.
        in image_id of type str
            New UUID for the image. If an empty string is passed, then a new
            UUID is automatically created, provided that @a setImageId is @c true.
            Specifying a zero UUID is not allowed.
        in set_parent_id of type bool
            Select whether a new parent UUID is set or not.
        in parent_id of type str
            New parent UUID for the image. If an empty string is passed, then a
            new UUID is automatically created, provided @a setParentId is
            @c true. A zero UUID is valid.
        raises :class:`OleErrorInvalidarg`
            Invalid parameter combination.
        
        raises :class:`VBoxErrorNotSupported`
            Medium is not a hard disk medium.
        
        """
        if not isinstance(set_image_id, bool):
            raise TypeError("set_image_id can only be an instance of type bool")
        if not isinstance(image_id, basestring):
            raise TypeError("image_id can only be an instance of type basestring")
        if not isinstance(set_parent_id, bool):
            raise TypeError("set_parent_id can only be an instance of type bool")
        if not isinstance(parent_id, basestring):
            raise TypeError("parent_id can only be an instance of type basestring")
        self._call("setIds",
                     in_p=[set_image_id, image_id, set_parent_id, parent_id]) | 0.006072 | 
| 
	def fill_triangular(x, upper=False, name=None):
  r"""Creates a (batch of) triangular matrix from a vector of inputs.
  Created matrix can be lower- or upper-triangular. (It is more efficient to
  create the matrix as upper or lower, rather than transpose.)
  Triangular matrix elements are filled in a clockwise spiral. See example,
  below.
  If `x.shape` is `[b1, b2, ..., bB, d]` then the output shape is
  `[b1, b2, ..., bB, n, n]` where `n` is such that `d = n(n+1)/2`, i.e.,
  `n = int(np.sqrt(0.25 + 2. * m) - 0.5)`.
  Example:
  ```python
  fill_triangular([1, 2, 3, 4, 5, 6])
  # ==> [[4, 0, 0],
  #      [6, 5, 0],
  #      [3, 2, 1]]
  fill_triangular([1, 2, 3, 4, 5, 6], upper=True)
  # ==> [[1, 2, 3],
  #      [0, 5, 6],
  #      [0, 0, 4]]
  ```
  The key trick is to create an upper triangular matrix by concatenating `x`
  and a tail of itself, then reshaping.
  Suppose that we are filling the upper triangle of an `n`-by-`n` matrix `M`
  from a vector `x`. The matrix `M` contains n**2 entries total. The vector `x`
  contains `n * (n+1) / 2` entries. For concreteness, we'll consider `n = 5`
  (so `x` has `15` entries and `M` has `25`). We'll concatenate `x` and `x` with
  the first (`n = 5`) elements removed and reversed:
  ```python
  x = np.arange(15) + 1
  xc = np.concatenate([x, x[5:][::-1]])
  # ==> array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 15, 14, 13,
  #            12, 11, 10, 9, 8, 7, 6])
  # (We add one to the arange result to disambiguate the zeros below the
  # diagonal of our upper-triangular matrix from the first entry in `x`.)
  # Now, when reshapedlay this out as a matrix:
  y = np.reshape(xc, [5, 5])
  # ==> array([[ 1,  2,  3,  4,  5],
  #            [ 6,  7,  8,  9, 10],
  #            [11, 12, 13, 14, 15],
  #            [15, 14, 13, 12, 11],
  #            [10,  9,  8,  7,  6]])
  # Finally, zero the elements below the diagonal:
  y = np.triu(y, k=0)
  # ==> array([[ 1,  2,  3,  4,  5],
  #            [ 0,  7,  8,  9, 10],
  #            [ 0,  0, 13, 14, 15],
  #            [ 0,  0,  0, 12, 11],
  #            [ 0,  0,  0,  0,  6]])
  ```
  From this example we see that the resuting matrix is upper-triangular, and
  contains all the entries of x, as desired. The rest is details:
  - If `n` is even, `x` doesn't exactly fill an even number of rows (it fills
    `n / 2` rows and half of an additional row), but the whole scheme still
    works.
  - If we want a lower triangular matrix instead of an upper triangular,
    we remove the first `n` elements from `x` rather than from the reversed
    `x`.
  For additional comparisons, a pure numpy version of this function can be found
  in `distribution_util_test.py`, function `_fill_triangular`.
  Args:
    x: `Tensor` representing lower (or upper) triangular elements.
    upper: Python `bool` representing whether output matrix should be upper
      triangular (`True`) or lower triangular (`False`, default).
    name: Python `str`. The name to give this op.
  Returns:
    tril: `Tensor` with lower (or upper) triangular elements filled from `x`.
  Raises:
    ValueError: if `x` cannot be mapped to a triangular matrix.
  """
  with tf.name_scope(name or "fill_triangular"):
    x = tf.convert_to_tensor(value=x, name="x")
    m = tf.compat.dimension_value(
        tensorshape_util.with_rank_at_least(x.shape, 1)[-1])
    if m is not None:
      # Formula derived by solving for n: m = n(n+1)/2.
      m = np.int32(m)
      n = np.sqrt(0.25 + 2. * m) - 0.5
      if n != np.floor(n):
        raise ValueError("Input right-most shape ({}) does not "
                         "correspond to a triangular matrix.".format(m))
      n = np.int32(n)
      static_final_shape = x.shape[:-1].concatenate([n, n])
    else:
      m = tf.shape(input=x)[-1]
      # For derivation, see above. Casting automatically lops off the 0.5, so we
      # omit it.  We don't validate n is an integer because this has
      # graph-execution cost; an error will be thrown from the reshape, below.
      n = tf.cast(
          tf.sqrt(0.25 + tf.cast(2 * m, dtype=tf.float32)), dtype=tf.int32)
      static_final_shape = tensorshape_util.with_rank_at_least(
          x.shape, 1)[:-1].concatenate([None, None])
    # Try it out in numpy:
    #  n = 3
    #  x = np.arange(n * (n + 1) / 2)
    #  m = x.shape[0]
    #  n = np.int32(np.sqrt(.25 + 2 * m) - .5)
    #  x_tail = x[(m - (n**2 - m)):]
    #  np.concatenate([x_tail, x[::-1]], 0).reshape(n, n)  # lower
    #  # ==> array([[3, 4, 5],
    #               [5, 4, 3],
    #               [2, 1, 0]])
    #  np.concatenate([x, x_tail[::-1]], 0).reshape(n, n)  # upper
    #  # ==> array([[0, 1, 2],
    #               [3, 4, 5],
    #               [5, 4, 3]])
    #
    # Note that we can't simply do `x[..., -(n**2 - m):]` because this doesn't
    # correctly handle `m == n == 1`. Hence, we do nonnegative indexing.
    # Furthermore observe that:
    #   m - (n**2 - m)
    #   = n**2 / 2 + n / 2 - (n**2 - n**2 / 2 + n / 2)
    #   = 2 (n**2 / 2 + n / 2) - n**2
    #   = n**2 + n - n**2
    #   = n
    ndims = prefer_static_rank(x)
    if upper:
      x_list = [x, tf.reverse(x[..., n:], axis=[ndims - 1])]
    else:
      x_list = [x[..., n:], tf.reverse(x, axis=[ndims - 1])]
    new_shape = (
        tensorshape_util.as_list(static_final_shape)
        if tensorshape_util.is_fully_defined(static_final_shape) else tf.concat(
            [tf.shape(input=x)[:-1], [n, n]], axis=0))
    x = tf.reshape(tf.concat(x_list, axis=-1), new_shape)
    x = tf.linalg.band_part(
        x, num_lower=(0 if upper else -1), num_upper=(-1 if upper else 0))
    tensorshape_util.set_shape(x, static_final_shape)
    return x | 0.003679 | 
| 
	def prepare_gold(ctx, annotations, gout):
    """Prepare bc-evaluate gold file from annotations supplied by CHEMDNER."""
    click.echo('chemdataextractor.chemdner.prepare_gold')
    for line in annotations:
        pmid, ta, start, end, text, category = line.strip().split('\t')
        gout.write('%s\t%s:%s:%s\n' % (pmid, ta, start, end)) | 0.002933 | 
| 
	def steam64_from_url(url, http_timeout=30):
    """
    Takes a Steam Community url and returns steam64 or None
    .. note::
        Each call makes a http request to ``steamcommunity.com``
    .. note::
        For a reliable resolving of vanity urls use ``ISteamUser.ResolveVanityURL`` web api
    :param url: steam community url
    :type url: :class:`str`
    :param http_timeout: how long to wait on http request before turning ``None``
    :type http_timeout: :class:`int`
    :return: steam64, or ``None`` if ``steamcommunity.com`` is down
    :rtype: :class:`int` or :class:`None`
    Example URLs::
        https://steamcommunity.com/gid/[g:1:4]
        https://steamcommunity.com/gid/103582791429521412
        https://steamcommunity.com/groups/Valve
        https://steamcommunity.com/profiles/[U:1:12]
        https://steamcommunity.com/profiles/76561197960265740
        https://steamcommunity.com/id/johnc
    """
    match = re.match(r'^(?P<clean_url>https?://steamcommunity.com/'
                     r'(?P<type>profiles|id|gid|groups)/(?P<value>.*?))(?:/(?:.*)?)?$', url)
    if not match:
        return None
    web = make_requests_session()
    try:
        # user profiles
        if match.group('type') in ('id', 'profiles'):
            text = web.get(match.group('clean_url'), timeout=http_timeout).text
            data_match = re.search("g_rgProfileData = (?P<json>{.*?});[ \t\r]*\n", text)
            if data_match:
                data = json.loads(data_match.group('json'))
                return int(data['steamid'])
        # group profiles
        else:
            text = web.get(match.group('clean_url'), timeout=http_timeout).text
            data_match = re.search("'steam://friends/joinchat/(?P<steamid>\d+)'", text)
            if data_match:
                return int(data_match.group('steamid'))
    except requests.exceptions.RequestException:
        return None | 0.00365 | 
| 
	def agitate(self):
    """Agitate this particle so that it is likely to go to a new position.
    Every time agitate is called, the particle is jiggled an even greater
    amount.
    Parameters:
    --------------------------------------------------------------
    retval:               None
    """
    for (varName, var) in self.permuteVars.iteritems():
      var.agitate()
    self.newPosition() | 0.004975 | 
| 
	def changelist(self):
        """Which :class:`.Changelist` is this revision in"""
        if self._changelist:
            return self._changelist
        if self._p4dict['change'] == 'default':
            return Default(connection=self._connection)
        else:
            return Changelist(str(self._p4dict['change']), self._connection) | 0.005831 | 
| 
	def _RunActions(self, rule, client_id):
    """Run all the actions specified in the rule.
    Args:
      rule: Rule which actions are to be executed.
      client_id: Id of a client where rule's actions are to be executed.
    Returns:
      Number of actions started.
    """
    actions_count = 0
    for action in rule.actions:
      try:
        # Say this flow came from the foreman.
        token = self.token.Copy()
        token.username = "Foreman"
        if action.HasField("hunt_id"):
          if self._CheckIfHuntTaskWasAssigned(client_id, action.hunt_id):
            logging.info(
                "Foreman: ignoring hunt %s on client %s: was started "
                "here before", client_id, action.hunt_id)
          else:
            logging.info("Foreman: Starting hunt %s on client %s.",
                         action.hunt_id, client_id)
            flow_cls = registry.AFF4FlowRegistry.FlowClassByName(
                action.hunt_name)
            flow_cls.StartClients(action.hunt_id, [client_id])
            actions_count += 1
        else:
          flow.StartAFF4Flow(
              client_id=client_id,
              flow_name=action.flow_name,
              token=token,
              **action.argv.ToDict())
          actions_count += 1
      # There could be all kinds of errors we don't know about when starting the
      # flow/hunt so we catch everything here.
      except Exception as e:  # pylint: disable=broad-except
        logging.exception("Failure running foreman action on client %s: %s",
                          action.hunt_id, e)
    return actions_count | 0.0062 | 
| 
	def list_create(self, title):
        """
        Create a new list with the given `title`.
        
        Returns the `list dict`_ of the created list.
        """
        params = self.__generate_params(locals())
        return self.__api_request('POST', '/api/v1/lists', params) | 0.010601 | 
| 
	def pack_value(self, val):
        """Convert 8-byte string into 16-byte list"""
        if isinstance(val, bytes):
            val = list(iterbytes(val))
        slen = len(val)
        if self.pad:
            pad = b'\0\0' * (slen % 2)
        else:
            pad = b''
        return struct.pack('>' + 'H' * slen, *val) + pad, slen, None | 0.00578 | 
| 
	def exchange(_context, component, backend, base, name=''):
        """Handle exchange subdirectives."""
        _context.action(
            discriminator=('currency', 'exchange', component),
            callable=_register_exchange,
            args=(name, component, backend, base)
        ) | 0.006849 | 
| 
	def etag(self, etag):
        """
        Set the ETag of the resource.
        :param etag: the ETag
        """
        if not isinstance(etag, bytes):
            etag = bytes(etag, "utf-8")
        self._etag.append(etag) | 0.00885 | 
| 
	def bucket_policy_to_dict(policy):
    """Produce a dictionary of read, write permissions for an existing bucket policy document"""
    import json
    if not isinstance(policy, dict):
        policy = json.loads(policy)
    statements = {s['Sid']: s for s in policy['Statement']}
    d = {}
    for rw in ('Read', 'Write'):
        for prefix in TOP_LEVEL_DIRS:
            sid = rw.title() + prefix.title()
            if sid in statements:
                if isinstance(statements[sid]['Principal']['AWS'], list):
                    for principal in statements[sid]['Principal']['AWS']:
                        user_name = principal.split('/').pop()
                        d[(user_name, prefix)] = rw[0]
                else:
                    user_name = statements[sid]['Principal']['AWS'].split('/').pop()
                    d[(user_name, prefix)] = rw[0]
    return d | 0.003382 | 
| 
	def _get_all_forecast_from_api(api_result: dict) -> OrderedDict:
    """Converts results fråm API to SmhiForeCast list"""
    # Total time in hours since last forecast
    total_hours_last_forecast = 1.0
    # Last forecast time
    last_time = None
    # Need the ordered dict to get
    # the days in order in next stage
    forecasts_ordered = OrderedDict()
    # Get the parameters
    for forecast in api_result['timeSeries']:
        valid_time = datetime.strptime(
            forecast['validTime'], "%Y-%m-%dT%H:%M:%SZ")
        for param in forecast['parameters']:
            if param['name'] == 't':
                temperature = float(param['values'][0])  # Celcisus
            elif param['name'] == 'r':
                humidity = int(param['values'][0])  # Percent
            elif param['name'] == 'msl':
                pressure = int(param['values'][0])  # hPa
            elif param['name'] == 'tstm':
                thunder = int(param['values'][0])  # Percent
            elif param['name'] == 'tcc_mean':
                octa = int(param['values'][0])  # Cloudiness in octas
                if 0 <= octa <= 8:  # Between 0 -> 8
                    cloudiness = round(100*octa/8)  # Convert octas to percent
                else:
                    cloudiness = 100  # If not determined use 100%
            elif param['name'] == 'Wsymb2':
                symbol = int(param['values'][0])  # category
            elif param['name'] == 'pcat':
                precipitation = int(param['values'][0])  # percipitation
            elif param['name'] == 'pmean':
                mean_precipitation = float(
                    param['values'][0])  # mean_percipitation
            elif param['name'] == 'ws':
                wind_speed = float(param['values'][0])  # wind speed
            elif param['name'] == 'wd':
                wind_direction = int(param['values'][0])  # wind direction
            elif param['name'] == 'vis':
                horizontal_visibility = float(param['values'][0])  # Visibility
            elif param['name'] == 'gust':
                wind_gust = float(param['values'][0])  # wind gust speed
        roundedTemp = int(round(temperature))
        if last_time is not None:
            total_hours_last_forecast = (valid_time - last_time).seconds/60/60
        # Total precipitation, have to calculate with the nr of
        # hours since last forecast to get correct total value
        tp = round(mean_precipitation*total_hours_last_forecast, 2)
        forecast = \
            SmhiForecast(roundedTemp, roundedTemp, roundedTemp,
                         humidity, pressure, thunder, cloudiness,
                         precipitation, wind_direction, wind_speed,
                         horizontal_visibility, wind_gust,
                         round(mean_precipitation, 1), tp, symbol,
                         valid_time)
        if valid_time.day not in forecasts_ordered:
            # add a new list
            forecasts_ordered[valid_time.day] = []
        forecasts_ordered[valid_time.day].append(forecast)
        last_time = valid_time
    return forecasts_ordered | 0.000311 | 
| 
	def zmax(self, return_times=False, func=np.mean,
             interp_kwargs=None, minimize_kwargs=None,
             approximate=False):
        """
        Estimate the maximum ``z`` height of the orbit by identifying local
        maxima in the absolute value of the ``z`` position and interpolating
        between timesteps near the maxima.
        By default, this returns the mean of all local maxima. To get, e.g., the
        largest ``z`` excursion, pass in ``func=np.max``. To get all ``z``
        maxima, pass in ``func=None``.
        Parameters
        ----------
        func : func (optional)
            A function to evaluate on all of the identified z maximum times.
        return_times : bool (optional)
            Also return the times of maximum.
        interp_kwargs : dict (optional)
            Keyword arguments to be passed to
            :class:`scipy.interpolate.InterpolatedUnivariateSpline`.
        minimize_kwargs : dict (optional)
            Keyword arguments to be passed to :class:`scipy.optimize.minimize`.
        approximate : bool (optional)
            Compute approximate values by skipping interpolation.
        Returns
        -------
        zs : float, :class:`~numpy.ndarray`
            Either a single number or an array of maximum z heights.
        times : :class:`~numpy.ndarray` (optional, see ``return_times``)
            If ``return_times=True``, also returns an array of the apocenter
            times.
        """
        if return_times and func is not None:
            raise ValueError("Cannot return times if reducing "
                             "using an input function. Pass `func=None` if "
                             "you want to return all individual values "
                             "and times.")
        if func is None:
            reduce = False
            func = lambda x: x
        else:
            reduce = True
        # time must increase
        if self.t[-1] < self.t[0]:
            self = self[::-1]
        vals = []
        times = []
        for orbit in self.orbit_gen():
            v, t = orbit._max_helper(np.abs(orbit.cylindrical.z),
                                     interp_kwargs=interp_kwargs,
                                     minimize_kwargs=minimize_kwargs,
                                     approximate=approximate)
            vals.append(func(v))
            times.append(t)
        return self._max_return_helper(vals, times, return_times, reduce) | 0.002418 | 
| 
	def alignment(job, ids, input_args, sample):
    """
    Runs BWA and then Bamsort on the supplied fastqs for this sample
    Input1: Toil Job instance
    Input2: jobstore id dictionary
    Input3: Input arguments dictionary
    Input4: Sample tuple -- contains uuid and urls for the sample
    """
    uuid, urls = sample
    # ids['bam'] = job.fileStore.getEmptyFileStoreID()
    work_dir = job.fileStore.getLocalTempDir()
    output_dir = input_args['output_dir']
    key_path = input_args['ssec']
    cores = multiprocessing.cpu_count()
    # I/O
    return_input_paths(job, work_dir, ids, 'ref.fa', 'ref.fa.amb', 'ref.fa.ann',
                                                     'ref.fa.bwt', 'ref.fa.pac', 'ref.fa.sa', 'ref.fa.fai')
    # Get fastqs associated with this sample
    for url in urls:
        download_encrypted_file(work_dir, url, key_path, os.path.basename(url))
    # Parameters for BWA and Bamsort
    docker_cmd = ['docker', 'run', '--rm', '-v', '{}:/data'.format(work_dir)]
    bwa_command = ["jvivian/bwa",
                   "mem",
                   "-R", "@RG\tID:{0}\tPL:Illumina\tSM:{0}\tLB:KapaHyper".format(uuid),
                   "-T", str(0),
                   "-t", str(cores),
                   "/data/ref.fa"] + [os.path.join('/data/',  os.path.basename(x)) for x in urls]
    bamsort_command = ["jeltje/biobambam",
                       "/usr/local/bin/bamsort",
                       "inputformat=sam",
                       "level=1",
                       "inputthreads={}".format(cores),
                       "outputthreads={}".format(cores),
                       "calmdnm=1",
                       "calmdnmrecompindetonly=1",
                       "calmdnmreference=/data/ref.fa",
                       "I=/data/{}".format(uuid + '.sam')]
    # Piping the output to a file handle
    with open(os.path.join(work_dir, uuid + '.sam'), 'w') as f_out:
        subprocess.check_call(docker_cmd + bwa_command, stdout=f_out)
    with open(os.path.join(work_dir, uuid + '.bam'), 'w') as f_out:
        subprocess.check_call(docker_cmd + bamsort_command, stdout=f_out)
    # Save in JobStore
    # job.fileStore.updateGlobalFile(ids['bam'], os.path.join(work_dir, uuid + '.bam'))
    ids['bam'] = job.fileStore.writeGlobalFile(os.path.join(work_dir, uuid + '.bam'))
    # Copy file to S3
    if input_args['s3_dir']:
        job.addChildJobFn(upload_bam_to_s3, ids, input_args, sample, cores=32, memory='20 G', disk='30 G')
    # Move file in output_dir
    if input_args['output_dir']:
        move_to_output_dir(work_dir, output_dir, uuid=None, files=[uuid + '.bam']) | 0.003423 | 
| 
	def ip_addresses(self):
        """
        Access the ip_addresses
        :returns: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressList
        :rtype: twilio.rest.api.v2010.account.sip.ip_access_control_list.ip_address.IpAddressList
        """
        if self._ip_addresses is None:
            self._ip_addresses = IpAddressList(
                self._version,
                account_sid=self._solution['account_sid'],
                ip_access_control_list_sid=self._solution['sid'],
            )
        return self._ip_addresses | 0.00703 | 
| 
	def convert_using_api(from_currency, to_currency):
	""" convert from from_currency to to_currency by requesting API """
	convert_str = from_currency + '_' + to_currency
	options = {'compact': 'ultra', 'q': convert_str}
	api_url = 'https://free.currencyconverterapi.com/api/v5/convert'
	result = requests.get(api_url, params=options).json()
	return result[convert_str] | 0.019074 | 
| 
	def joint_prop(self, properties, pic_path, num_iid, session, id=None, position=None):
        '''taobao.item.joint.propimg 商品关联属性图
        
        - 关联一张商品属性图片到num_iid指定的商品中 
        - 传入的num_iid所对应的商品必须属于当前会话的用户 
        - 图片的属性必须要是颜色的属性,这个在前台显示的时候需要和sku进行关联的 
        - 商品图片关联在卖家身份和图片来源上的限制,卖家要是B卖家或订购了多图服务才能关联图片,并且图片要来自于卖家自己的图片空间才行 
        - 商品图片数量有限制。不管是上传的图片还是关联的图片,他们的总数不能超过一定限额,最多不能超过24张(每个颜色属性都有一张)'''
        request = TOPRequest('taobao.item.joint.prop')
        request['num_iid'] = num_iid
        request['pic_path'] = pic_path
        request['properties'] = properties
        if id!=None:
            request['id'] = id
        if position!=None:
            request['position'] = position
        self.create(self.execute(request, session)['prop_img'])
        return self | 0.015171 | 
| 
	def trim_docstring(docstring):
    """Taken from http://www.python.org/dev/peps/pep-0257/"""
    if not docstring:
        return ''
    # Convert tabs to spaces (following the normal Python rules)
    # and split into a list of lines:
    lines = docstring.expandtabs().splitlines()
    # Determine minimum indentation (first line doesn't count):
    indent = maxsize
    for line in lines[1:]:
        stripped = line.lstrip()
        if stripped:
            indent = min(indent, len(line) - len(stripped))
    # Remove indentation (first line is special):
    lines[0] = lines[0].strip()
    if indent < maxsize:
        index = 1
        for line in lines[1:]:
            lines[index] = line[indent:].rstrip()
            index += 1
    # Strip off trailing and leading blank lines:
    while lines and not lines[-1]:
        del lines[-1]
    while lines and not lines[0]:
        del lines[0]
    # Return a single string:
    return '\n'.join(lines) | 0.001038 | 
| 
	def _handleEsc(self):
        """ Handler for CTRL+Z keypresses """
        if self._typingSms:
            self.serial.write(self.ESC_CHARACTER)
            self._typingSms = False
            self.inputBuffer = []
            self.cursorPos = 0 | 0.00813 | 
| 
	def coordinates(x0, y0, distance, angle):
    """ Returns the location of a point by rotating around origin (x0,y0).
    """
    return (x0 + cos(radians(angle)) * distance,
            y0 + sin(radians(angle)) * distance) | 0.004425 | 
| 
	def _get_cpu_info_from_sysinfo_v1():
	'''
	Returns the CPU info gathered from sysinfo.
	Returns {} if sysinfo is not found.
	'''
	try:
		# Just return {} if there is no sysinfo
		if not DataSource.has_sysinfo():
			return {}
		# If sysinfo fails return {}
		returncode, output = DataSource.sysinfo_cpu()
		if output == None or returncode != 0:
			return {}
		# Various fields
		vendor_id = '' #_get_field(False, output, None, None, 'CPU #0: ')
		processor_brand = output.split('CPU #0: "')[1].split('"\n')[0].strip()
		cache_size = '' #_get_field(False, output, None, None, 'machdep.cpu.cache.size')
		stepping = int(output.split(', stepping ')[1].split(',')[0].strip())
		model = int(output.split(', model ')[1].split(',')[0].strip())
		family = int(output.split(', family ')[1].split(',')[0].strip())
		# Flags
		flags = []
		for line in output.split('\n'):
			if line.startswith('\t\t'):
				for flag in line.strip().lower().split():
					flags.append(flag)
		flags.sort()
		# Convert from GHz/MHz string to Hz
		hz_advertised, scale = _parse_cpu_brand_string(processor_brand)
		hz_actual = hz_advertised
		info = {
		'vendor_id_raw' : vendor_id,
		'brand_raw' : processor_brand,
		'hz_advertised_friendly' : _hz_short_to_friendly(hz_advertised, scale),
		'hz_actual_friendly' : _hz_short_to_friendly(hz_actual, scale),
		'hz_advertised' : _hz_short_to_full(hz_advertised, scale),
		'hz_actual' : _hz_short_to_full(hz_actual, scale),
		'l2_cache_size' : _to_friendly_bytes(cache_size),
		'stepping' : stepping,
		'model' : model,
		'family' : family,
		'flags' : flags
		}
		info = {k: v for k, v in info.items() if v}
		return info
	except:
		#raise # NOTE: To have this throw on error, uncomment this line
		return {} | 0.045061 | 
| 
	def circles_pycairo(width, height, color):
    """ Implementation of circle border with PyCairo. """
    cairo_color = color / rgb(255, 255, 255)
    surface = cairo.ImageSurface(cairo.FORMAT_ARGB32, width, height)
    ctx = cairo.Context(surface)
    # draw a circle in the center
    ctx.new_path()
    ctx.set_source_rgb(cairo_color.red, cairo_color.green, cairo_color.blue)
    ctx.arc(width / 2, height / 2, width / 2, 0, 2 * pi)
    ctx.fill()
    surface.write_to_png('circles.png') | 0.002028 | 
| 
	def flux_balance(model, reaction, tfba, solver):
    """Run flux balance analysis on the given model.
    Yields the reaction id and flux value for each reaction in the model.
    This is a convenience function for sertting up and running the
    FluxBalanceProblem. If the FBA is solved for more than one parameter
    it is recommended to setup and reuse the FluxBalanceProblem manually
    for a speed up.
    This is an implementation of flux balance analysis (FBA) as described in
    [Orth10]_ and [Fell86]_.
    Args:
        model: MetabolicModel to solve.
        reaction: Reaction to maximize. If a dict is given, this instead
            represents the objective function weights on each reaction.
        tfba: If True enable thermodynamic constraints.
        solver: LP solver instance to use.
    Returns:
        Iterator over reaction ID and reaction flux pairs.
    """
    fba = _get_fba_problem(model, tfba, solver)
    fba.maximize(reaction)
    for reaction in model.reactions:
        yield reaction, fba.get_flux(reaction) | 0.00095 | 
| 
	def load(self, json_file):
        """
        Build a cart from a json file
        """
        cart_file = os.path.join(CART_LOCATION, json_file)
        try:
            cart_body = juicer.utils.read_json_document(cart_file)
        except IOError as e:
            juicer.utils.Log.log_error('an error occured while accessing %s:' %
                    cart_file)
            raise JuicerError(e.message)
        self.cart_name = cart_body['_id']
        if cart_body['current_env'] == '':
                self.current_env = juicer.utils.get_login_info()[1]['start_in']
        else:
            self.current_env = cart_body['current_env']
        for repo, items in cart_body['repos_items'].iteritems():
            self.add_repo(repo, items) | 0.005333 | 
| 
	def _find_recursive_dependencies(sql, values, code, resolved_vars, resolving_vars=None):
    """ Recursive helper method for expanding variables including transitive dependencies.
    Placeholders in SQL are represented as $<name>. If '$' must appear within
    the SQL statement literally, then it can be escaped as '$$'.
    Args:
      sql: the raw SQL statement with named placeholders.
      values: the user-supplied dictionary of name/value pairs to use for placeholder values.
      code: an array of referenced UDFs found during expansion.
      resolved_vars: a ref parameter for the variable references completely resolved so far.
      resolving_vars: a ref parameter for the variable(s) we are currently resolving; if we see
          a dependency again that is in this set we know we have a circular reference.
    Returns:
      The formatted SQL statement with placeholders replaced with their values.
    Raises:
      Exception if a placeholder was found in the SQL statement, but did not
      have a corresponding argument value.
    """
    # Get the set of $var references in this SQL.
    dependencies = SqlStatement._get_dependencies(sql)
    for dependency in dependencies:
      # Now we check each dependency. If it is in complete - i.e., we have an expansion
      # for it already - we just continue.
      if dependency in resolved_vars:
        continue
      # Look it up in our resolution namespace dictionary.
      dep = datalab.utils.get_item(values, dependency)
      # If it is a SQL module, get the main/last query from the module, so users can refer
      # to $module. Useful especially if final query in module has no DEFINE QUERY <name> part.
      if isinstance(dep, types.ModuleType):
        dep = _utils.get_default_query_from_module(dep)
      # If we can't resolve the $name, give up.
      if dep is None:
        raise Exception("Unsatisfied dependency $%s" % dependency)
      # If it is a SqlStatement, it may have its own $ references in turn; check to make
      # sure we don't have circular references, and if not, recursively expand it and add
      # it to the set of complete dependencies.
      if isinstance(dep, SqlStatement):
        if resolving_vars is None:
          resolving_vars = []
        elif dependency in resolving_vars:
          # Circular dependency
          raise Exception("Circular dependency in $%s" % dependency)
        resolving_vars.append(dependency)
        SqlStatement._find_recursive_dependencies(dep._sql, values, code, resolved_vars,
                                                  resolving_vars)
        resolving_vars.pop()
        resolved_vars[dependency] = SqlStatement(dep._sql)
      else:
        resolved_vars[dependency] = dep | 0.011322 | 
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.
