content
				 
			stringlengths 35 
			762k 
			 | sha1
				 
			stringlengths 40 
			40 
			 | id
				 
			int64 0 
			3.66M 
			 | 
|---|---|---|
	import os
def get_wharton_sessionid(public=False):
    """ Try to get a GSR session id. """
    sessionid = request.args.get("sessionid")
    cache_key = "studyspaces:gsr:sessionid"
    if sessionid:
        return sessionid
    if public:
        if db.exists(cache_key):
            return db.get(cache_key).decode("utf8")
        return os.environ.get("GSR_SESSIONID")
    return None 
 | 
	83cc911185a8849ca7c37bf74ea0ae652b596461 
 | 3,657,600 
							 | 
					
	def timing ( name = '' , logger = None ) :
    """Simple context manager to measure the clock counts 
    
    >>> with timing () :
    ...   whatever action is here
    at the exit it prints the clock counts 
    
    >>> with timing () as c :
    ...   whatever action is here 
    at the exit it prints the clock counts 
    
    >>> print c.delta
    """
    return Timer ( name , logger ) 
 | 
	f22769f267df8472f8b11db64ab6817db6e24414 
 | 3,657,601 
							 | 
					
	def abs(x):
    """
    complex-step safe version of numpy.abs function.
    Parameters
    ----------
    x : ndarray
        array value to be computed on
    Returns
    -------
    ndarray
    """
    if isinstance(x, np.ndarray):
        return x * np.sign(x)
    elif x.real < 0.0:
        return -x
    return x 
 | 
	71503b89e3a78e12a50f88ce2e0a17301f985ec7 
 | 3,657,602 
							 | 
					
	import time
import re
from operator import sub
async def add_comm_post(request):
#	return json.dumps(current_id, title, link, proc_id)
	"""current_id это id ветки"""
	# ip = request.environ.get('REMOTE_ADDR')
	data = await request.post(); ip = None
	print('data->', data)
	#get ip address client
	peername = request.transport.get_extra_info('peername'); host=None
	if peername is not None:
		host, port = peername
		ip = host
		# print ('host, port->', host, port)
	user = get_current_user(request, True)
	if check_ban(request, host, user):
		return response_json(request, {"result":"fail", "error":"Ваш ip или аккаунт забанен на этом сайте, свяжитесь с администрацией"})
	else: title = data.get('title')
	if not user_has_permission(request, 'des:obj', 'add_com') and  not user_has_permission(request, 'des:obj', 'add_com_pre'):
		return response_json(request, {"result":"fail", "error":"no comment"})
	if not check_user_rate(request, user):
		return response_json(request, {"result":"fail", "error":"Вы не можете оставлять сообщения слишком часто, из-за отрицательной кармы"})
	doc_id = data.get('comm_id')
	id = data.get('id')
	if user_is_logged_in(request): title = get_current_user(request)
	# tle = get_doc(request, doc_id )
	# print( doc_id )
	# print( tle )
	# tle = get_doc(request, doc_id )['doc']['title']
	title_ = ct(request, title )
	title = no_script( title ) if title else 'Аноним'
	parent = data.get('parent', "_")
	descr = data.get( 'descr')
	descr = no_script( descr )
	descr = descr.replace('\n', '<br/>')
	# ретурн если нет и того и другого    а если нет только одного то как раз проверим
	pre = 'true' if not user_has_permission(request, 'des:obj', 'add_com') else 'false'
	date = str( time.strftime("%Y-%m-%d %H:%M:%S") )
	user_ = get_current_user_name(request, title ) or title
	our = "true" if user_is_logged_in(request) else "false"
	body = re.sub(r'(http?://([a-z0-9-]+([.][a-z0-9-]+)+)+(/([0-9a-z._%?#]+)+)*/?)', r'<a href="\1">\1</a>', descr)
	# добавление родителю ребенка
	request.db.doc.update({ "_id": parent }, { "$addToSet": { "child": doc_id } } )
	# занесение коментов в справочник коментов
	doc_id_comm, updated = create_empty_row_(request, 'des:comments', parent, '', { "user":'user:'+title })
	data = {"id":doc_id_comm, "title":title_, "date":date, "body":body, "parent":parent, "owner":id, 'ip':ip, 'name':user_, "our":our, 'pre':pre }
	update_row_(request, 'des:comments', doc_id_comm, data, parent)
	if 'notify_user' in  dir(settings) and settings.notify_user:
	# if 'notify_user' in settings and settings.notify_user:
		# link = make_link('show_object', {'doc_id':doc_id }, True)+'#comm_'+ str( id )
		link = settings.domain+'/news/'+doc_id+'#comm_'+ str( id )
		subject = 'User {} add comment'.format( title )
		sub('user:'+title, link, subject)
	print('id1', id)
	id = get_doc(request, id)['_id']
	print('id2', id)
	invalidate_cache('single_page', id=id)
	# rev = get_doc(request, doc_id)['doc']['rev']
	# reset_cache(type="doc", doc_id=rev)
	# добавление подсчета коментариев в отдельном документе
	request.db.doc.update({ "_id": doc_id }, { "$inc": { "count_branch":1 } } )
	# return json.dumps({"result":"ok", "content":data.update({"title":title}), "hash":""})
	return response_json(request, {"result":"ok", "content":data, "hash":""}) 
 | 
	1038edd1834786ba1325e7f28f77f505adc8fb4b 
 | 3,657,603 
							 | 
					
	def reachable_from_node(node, language=None, include_aliases=True):
    """Returns a tuple of strings containing html <ul> lists of the Nodes and
    pages that are children of "node" and any MetaPages associated with these
    items.  
    :params node: node to find reachables for
    :params language: if None, returns all items, if specified restricts list
        to just those with the given language, defaults to None
    :params include_aliases: False to skip calculation of aliases, returns
        None for second item in tuple
    :returns: (node_list, alias_list)
    """
    alias_list = None
    if include_aliases:
        # find all of the MetaPages that would be unreachable
        nodes = list(node.get_descendants())
        nodes.append(node)
        metapages = MetaPage.objects.filter(node__in=nodes)
        # find anything that aliases one of the targeted metapages
        alias_list = reachable_aliases(metapages, language)
    node_list = \
"""<ul>
%s
</ul>""" % _pages_subtree_as_list(node, node.site.default_language)
    return (node_list, alias_list) 
 | 
	dcc93486fcae168293f17ee2a7c067dbc1eef5fe 
 | 3,657,604 
							 | 
					
	def init_data():
    """
    setup all kinds of constants here, just to make it cleaner :)
    """
    if args.dataset=='imagenet32':
        mean = (0.4811, 0.4575, 0.4078)
        std = (0.2605 , 0.2533, 0.2683)
        num_classes = 1000
    else:
        raise NotImplementedError
    if args.whiten_image==0:
        mean = (0.5, 0.5, 0.5)
        std = (0.5, 0.5, 0.5)
        
    transform_train = transforms.Compose([
        transforms.RandomHorizontalFlip(),  # with p = 0.5
        transforms.RandomCrop(32, padding=4, padding_mode='reflect'),  # with p = 1
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize(mean, std)
    ])
    trainset = ImageNet32(root=args.data_root, train=True,transform=transform_train)
    testset = ImageNet32(root=args.data_root, train=False,transform=transform_test)
    return trainset, testset, transform_train, transform_test, num_classes 
 | 
	7d75af1f316a703041926d9a1875ae18f19c8342 
 | 3,657,605 
							 | 
					
	def make_status_craft():
    """ Cria alguns status de pedido de fabricação"""
    if Statusfabricacao.objects.count() == 0:
        status1 = Statusfabricacao(order=0, status='Pedido Criado')
        status2 = Statusfabricacao(order=1, status='Maturação')
        status3 = Statusfabricacao(order=2, status='Finalização')
        status4 = Statusfabricacao(order=3, status='Produção Encerrada')
        status1.save()
        status2.save()
        status3.save()
        status4.save()
        return True
    return False 
 | 
	01b9e1cbb48654f3baab7a4e55cd0f22a0bb60fe 
 | 3,657,606 
							 | 
					
	import requests
import json
def _call_rest_api(url, input_data, request_type):
    """Calls the other rest api's"""
    try:
        if request_type == 'post':
            req = requests.post(url, params=input_data, json=input_data, timeout=30)
        else:
            req = requests.get(url, params=input_data, timeout=30)
        response = req.text
        val = json.loads(response)
    except Exception as e:
        logger.error("Exception in _call_rest_api : " + str(e))
        raise ValueError("Filter is down!!!!")
    return val 
 | 
	8c67e79c6867d1e63a1487c747682c24da229e46 
 | 3,657,607 
							 | 
					
	def compute_tso_threshold(arr, min_td=0.1, max_td=0.5, perc=10, factor=15.0):
    """
    Computes the daily threshold value separating rest periods from active periods
    for the TSO detection algorithm.
    Parameters
    ----------
    arr : array
        Array of the absolute difference of the z-angle.
    min_td : float
        Minimum acceptable threshold value.
    max_td : float
        Maximum acceptable threshold value.
    perc : integer, optional
        Percentile to use for the threshold. Default is 10.
    factor : float, optional
        Factor to multiply the percentil value by. Default is 15.0.
    Returns
    -------
    td : float
    """
    td = min((max((percentile(arr, perc) * factor, min_td)), max_td))
    return td 
 | 
	4188d4a290e884210351f928d18d6f4bdd4e8a0b 
 | 3,657,608 
							 | 
					
	def run_generator(conversation_name):
    """
    Input:
        conversation_name: name of conversation to analyze
    Output:
        username of next speaker, message for that speaker to send next
    """
    state = settings.DISCORD_CONVERSATION_STATES.get(conversation_name, {})
    (
        next_speaker_username,
        next_message,
        convo,
        index,
    ) = generate_next_speaker_and_message(state, conversation_name)
    if not next_speaker_username:
        return None, None
    bot = TwitterBot.objects.get(username=next_speaker_username)
    post = TwitterPost.objects.create(author=bot, content=next_message)
    convo.twitterconversationpost_set.create(index=index, author=bot, post=post)
    return next_speaker_username, next_message 
 | 
	22735cdd46469976d079f065ee60e3a886dfc654 
 | 3,657,609 
							 | 
					
	def count_uniques(row):
    """
    Count the unique values in row -1 (becase nan counts as a unique value)
    """
    return len(np.unique(row)) - 1 
 | 
	af28e419aba44992ee27c57dacb271ff692fc535 
 | 3,657,610 
							 | 
					
	import numpy
def gmres_dot(X, surf_array, field_array, ind0, param, timing, kernel):
    """
    It computes the matrix-vector product in the GMRES.
    Arguments
    ----------
    X          : array, initial vector guess.
    surf_array : array, contains the surface classes of each region on the
                        surface.
    field_array: array, contains the Field classes of each region on the
                        surface.
    ind0       : class, it contains the indices related to the treecode
                        computation.
    param      : class, parameters related to the surface.
    timing     : class, it contains timing information for different parts of
                        the code.
    kernel     : pycuda source module.
    Returns
    --------
    MV         : array, resulting matrix-vector multiplication.
    """
    Nfield = len(field_array)
    Nsurf = len(surf_array)
    #   Check if there is a complex dielectric
    if any([numpy.iscomplexobj(f.E) for f in field_array]):
        complex_diel = True
    else:
        complex_diel = False
    #   Place weights on corresponding surfaces and allocate memory
    Naux = 0
    for i in range(Nsurf):
        N = len(surf_array[i].triangle)
        if surf_array[i].surf_type == 'dirichlet_surface':
            if complex_diel:
                surf_array[i].XinK = numpy.zeros(N, dtype=numpy.complex)
            else:
                surf_array[i].XinK = numpy.zeros(N)
            surf_array[i].XinV = X[Naux:Naux + N]
            Naux += N
        elif surf_array[i].surf_type == 'neumann_surface' or surf_array[
                i].surf_type == 'asc_surface':
            surf_array[i].XinK = X[Naux:Naux + N]
            if complex_diel:
                surf_array[i].XinV = numpy.zeros(N, dtype=numpy.complex)
            else:
                surf_array[i].XinV = numpy.zeros(N)
            Naux += N
        else:
            surf_array[i].XinK = X[Naux:Naux + N]
            surf_array[i].XinV = X[Naux + N:Naux + 2 * N]
            Naux += 2 * N
        if complex_diel:
            surf_array[i].Xout_int = numpy.zeros(N, dtype=numpy.complex)
            surf_array[i].Xout_ext = numpy.zeros(N, dtype=numpy.complex)
        else:
            surf_array[i].Xout_int = numpy.zeros(N)
            surf_array[i].Xout_ext = numpy.zeros(N)
#   Loop over fields
    for F in range(Nfield):
        parent_type = 'no_parent'
        if len(field_array[F].parent) > 0:
            parent_type = surf_array[field_array[F].parent[0]].surf_type
        if parent_type == 'asc_surface':
            #           ASC only for self-interaction so far
            LorY = field_array[F].LorY
            p = field_array[F].parent[0]
            v = selfASC(surf_array[p], p, p, LorY, param, ind0, timing, kernel)
            surf_array[p].Xout_int += v
        if parent_type != 'dirichlet_surface' and parent_type != 'neumann_surface' and parent_type != 'asc_surface':
            LorY = field_array[F].LorY
            param.kappa = field_array[F].kappa
            if len(field_array[F].parent) > 0:
                p = field_array[F].parent[0]
                v = selfInterior(surf_array[p], p, LorY, param, ind0, timing,
                                 kernel)
                surf_array[p].Xout_int += v
                #           if child surface -> self exterior operator + sibling interaction
                #           sibling interaction: non-self exterior saved on exterior vector
            if len(field_array[F].child) > 0:
                C = field_array[F].child
                for c1 in C:
                    v, t1, t2 = selfExterior(surf_array[c1], c1, LorY, param,
                                             ind0, timing, kernel)
                    surf_array[c1].Xout_ext += v
                    for c2 in C:
                        if c1 != c2:
                            v = nonselfExterior(surf_array, c2, c1, LorY,
                                                param, ind0, timing, kernel)
                            surf_array[c1].Xout_ext += v
#           if child and parent surface -> parent-child and child-parent interaction
#           parent->child: non-self interior saved on exterior vector
#           child->parent: non-self exterior saved on interior vector
            if len(field_array[F].child) > 0 and len(field_array[
                    F].parent) > 0:
                p = field_array[F].parent[0]
                C = field_array[F].child
                for c in C:
                    v = nonselfExterior(surf_array, c, p, LorY, param, ind0,
                                        timing, kernel)
                    surf_array[p].Xout_int += v
                    v = nonselfInterior(surf_array, p, c, LorY, param, ind0,
                                        timing, kernel)
                    surf_array[c].Xout_ext += v
    #   Gather results into the result vector
    if complex_diel:
        MV = numpy.zeros(len(X), dtype=numpy.complex)
    else:
        MV = numpy.zeros(len(X))
    Naux = 0
    for i in range(Nsurf):
        N = len(surf_array[i].triangle)
        if surf_array[i].surf_type == 'dirichlet_surface':
            MV[Naux:Naux + N] = surf_array[i].Xout_ext * surf_array[i].Precond[
                0, :]
            Naux += N
        elif surf_array[i].surf_type == 'neumann_surface':
            MV[Naux:Naux + N] = surf_array[i].Xout_ext * surf_array[i].Precond[
                0, :]
            Naux += N
        elif surf_array[i].surf_type == 'asc_surface':
            MV[Naux:Naux + N] = surf_array[i].Xout_int * surf_array[i].Precond[
                0, :]
            Naux += N
        else:
            MV[Naux:Naux + N] = surf_array[i].Xout_int * surf_array[i].Precond[
                0, :] + surf_array[i].Xout_ext * surf_array[i].Precond[1, :]
            MV[Naux + N:Naux + 2 * N] = surf_array[i].Xout_int * surf_array[
                i].Precond[2, :] + surf_array[i].Xout_ext * surf_array[
                    i].Precond[3, :]
            Naux += 2 * N
    return MV 
 | 
	89ab7b49ef8f55bdeddbd9676acdc6cbe0de321f 
 | 3,657,611 
							 | 
					
	import torch
def update_pris(traj, td_loss, indices, alpha=0.6, epsilon=1e-6, update_epi_pris=False, seq_length=None, eta=0.9):
    """
    Update priorities specified in indices.
    Parameters
    ----------
    traj : Traj
    td_loss : torch.Tensor
    indices : torch.Tensor ot List of int
    alpha : float
    epsilon : float
    update_epi_pris : bool
        If True, all priorities of a episode including indices[0] are updated.
    seq_length : int
        Length of batch.
    eta : float
    Returns
    -------
    traj : Traj
    """
    pris = (torch.abs(td_loss) + epsilon) ** alpha
    traj.data_map['pris'][indices] = pris.detach().to(traj.traj_device())
    if update_epi_pris:
        epi_start = -1
        epi_end = -1
        seq_start = indices[0]
        for i in range(1, len(traj._epis_index)):
            if seq_start < traj._epis_index[i]:
                epi_start = traj._epis_index[i-1]
                epi_end = traj._epis_index[i]
                break
        pris = traj.data_map['pris'][epi_start: epi_end]
        n_seq = len(pris) - seq_length + 1
        abs_pris = np.abs(pris.cpu().numpy())
        seq_pris = np.array([eta * np.max(abs_pris[i:i+seq_length]) + (1 - eta) *
                             np.mean(abs_pris[i:i+seq_length]) for i in range(n_seq)], dtype='float32')
        traj.data_map['seq_pris'][epi_start:epi_start +
                                  n_seq] = torch.tensor(seq_pris, dtype=torch.float, device=get_device())
    return traj 
 | 
	41648ae78f25618b2789d8dde41cffbe0445d16b 
 | 3,657,612 
							 | 
					
	from typing import Sequence
from pydantic import BaseModel  # noqa: E0611
import hashlib
def get_library_version(performer_prefix: str, schemas: Sequence[Schema]) -> str:
    """Generates the library's version string.
    The version string is of the form "{performer_prefix}_{latest_creation_date}_{library_hash}".
    Args:
        performer_prefix: Performer prefix for context.
        schemas: YAML schemas.
    Returns:
        Version string.
    """
    # New class is needed to properly convert entire library to JSON
    class YamlLibrary(BaseModel):
        __root__: Sequence[Schema]
    yaml_library = YamlLibrary(__root__=schemas)
    json_schemas = yaml_library.json(exclude_none=True, ensure_ascii=False)
    input_hash = hashlib.md5(json_schemas.encode()).hexdigest()[:7]
    latest_creation_date = max(schema.creation_date_formatted for schema in schemas)
    library_version = f"{performer_prefix}_{latest_creation_date}_{input_hash}"
    return library_version 
 | 
	90bf2c695eece054f20bc0636b8e9759983affef 
 | 3,657,613 
							 | 
					
	def sizeFromString(sizeStr, relativeSize):
	"""
	Converts from a size string to a float size.
	sizeStr: The string representation of the size.
	relativeSize: The size to use in case of percentages.
	"""
	if not sizeStr:
		raise Exception("Size not specified")
	dpi = 96.0
	cm = 2.54
	if len(sizeStr) > 2 and sizeStr[-2:] == 'cm':
		return float(sizeStr[:-2])*dpi/cm
	elif len(sizeStr) > 2 and sizeStr[-2:] == 'mm':
		return float(sizeStr[:-2])*dpi/(cm*10.0)
	elif len(sizeStr) > 1 and sizeStr[-1:] == 'Q':
		return float(sizeStr[:-1])*dpi/(cm*40.0)
	elif len(sizeStr) > 2 and sizeStr[-2:] == 'in':
		return float(sizeStr[:-2])*dpi
	elif len(sizeStr) > 2 and sizeStr[-2:] == 'pc':
		return float(sizeStr[:-2])*dpi/6.0
	elif len(sizeStr) > 2 and sizeStr[-2:] == 'pt':
		return float(sizeStr[:-2])*dpi/72.0
	elif len(sizeStr) > 2 and sizeStr[-2:] == 'em':
		return float(sizeStr[:-2])*16.0
	elif len(sizeStr) > 2 and sizeStr[-2:] == 'px':
		return float(sizeStr[:-2])
	elif len(sizeStr) > 1 and sizeStr[-1:] == '%':
		return float(sizeStr[:-1])/100.0*relativeSize
	return float(sizeStr) 
 | 
	5f53d7d1ea86d4c54beb3aaebca228f7706e5a9b 
 | 3,657,614 
							 | 
					
	from typing import Union
from typing import List
def plot_r2(
    model: mofa_model,
    x="Group",
    y="Factor",
    factors: Union[int, List[int], str, List[str]] = None,
    groups_df: pd.DataFrame = None,
    group_label: str = None,
    views=None,
    groups=None,
    cmap="Blues",
    vmin=None,
    vmax=None,
    **kwargs,
):
    """
    Plot R2 values for the model
    Parameters
    ----------
    model : mofa_model
        Factor model
    x : str
        Dimension along X axis: Group (default), View, or Factor
    y : str
        Dimension along Y axis: Group, View, or Factor (default)
    factors : optional
        Index of a factor (or indices of factors) to use (all factors by default)
    views : optional
        Make a plot for certain views (None by default to plot all views)
    groups : optional
        Make a plot for certain groups (None by default to plot all groups)
    group_label : optional
        Sample (cell) metadata column to be used as group assignment
    groups_df : optional pd.DataFrame
        Data frame with samples (cells) as index and first column as group assignment
    cmap : optional
        The colourmap for the heatmap (default is 'Blues' with darker colour for higher R2)
    vmin : optional
        Display all R2 values smaller than vmin as vmin (0 by default)
    vmax : optional
        Display all R2 values larger than vmax as vmax (derived from the data by default)
    """
    r2 = model.get_r2(
        factors=factors,
        groups=groups,
        views=views,
        group_label=group_label,
        groups_df=groups_df,
    )
    vmax = r2.R2.max() if vmax is None else vmax
    vmin = 0 if vmin is None else vmin
    split_by = [dim for dim in ["Group", "View", "Factor"] if dim not in [x, y]]
    assert (
        len(split_by) == 1
    ), "x and y values should be different and be one of Group, View, or Factor"
    split_by = split_by[0]
    split_by_items = r2[split_by].unique()
    fig, axes = plt.subplots(ncols=len(split_by_items), sharex=True, sharey=True)
    cbar_ax = fig.add_axes([0.91, 0.3, 0.03, 0.4])
    if len(split_by_items) == 1:
        axes = [axes]
    for i, item in enumerate(split_by_items):
        r2_sub = r2[r2[split_by] == item]
        r2_df = r2_sub.sort_values("R2").pivot(index=y, columns=x, values="R2")
        if y == "Factor":
            # Sort by factor index
            r2_df.index = r2_df.index.astype("category")
            r2_df.index = r2_df.index.reorder_categories(
                sorted(r2_df.index.categories, key=lambda x: int(x.split("Factor")[1]))
            )
            r2_df = r2_df.sort_values("Factor")
        if x == "Factor":
            # Re-order columns by factor index
            r2_df.columns = r2_df.columns.astype("category")
            r2_df.columns = r2_df.columns.reorder_categories(
                sorted(
                    r2_df.columns.categories, key=lambda x: int(x.split("Factor")[1])
                )
            )
            r2_df = r2_df[r2_df.columns.sort_values()]
        g = sns.heatmap(
            r2_df.sort_index(level=0, ascending=False),
            ax=axes[i],
            cmap=cmap,
            vmin=vmin,
            vmax=vmax,
            cbar=i == 0,
            cbar_ax=None if i else cbar_ax,
            **kwargs,
        )
        axes[i].set_title(item)
        axes[i].tick_params(axis="both", which="both", length=0)
        if i == 0:
            g.set_yticklabels(g.yaxis.get_ticklabels(), rotation=0)
        else:
            axes[i].set_ylabel("")
    plt.close()
    return fig 
 | 
	4898f56ca89ef55db775f0dc3b0106c36a2ced05 
 | 3,657,615 
							 | 
					
	from typing import Union
from typing import Optional
from typing import Tuple
from typing import List
def all(x: Union[ivy.Array, ivy.NativeArray],
        axis: Optional[Union[int, Tuple[int], List[int]]] = None,
        keepdims: bool = False)\
        -> ivy.Array:
    """
    Tests whether all input array elements evaluate to ``True`` along a specified axis.
    .. note::
       Positive infinity, negative infinity, and NaN must evaluate to ``True``.
    .. note::
       If ``x`` is an empty array or the size of the axis (dimension) along which to evaluate elements is zero, the test result must be ``True``.
    Parameters
    ----------
    x:
        input array.
    axis:
        axis or axes along which to perform a logical AND reduction. By default, a logical AND reduction must be performed over the entire array. If a tuple of integers, logical AND reductions must be performed over multiple axes. A valid ``axis`` must be an integer on the interval ``[-N, N)``, where ``N`` is the rank (number of dimensions) of ``x``. If an ``axis`` is specified as a negative integer, the function must determine the axis along which to perform a reduction by counting backward from the last dimension (where ``-1`` refers to the last dimension). If provided an invalid ``axis``, the function must raise an exception. Default: ``None``.
    keepdims:
        If ``True``, the reduced axes (dimensions) must be included in the result as singleton dimensions, and, accordingly, the result must be compatible with the input array (see :ref:`broadcasting`). Otherwise, if ``False``, the reduced axes (dimensions) must not be included in the result. Default: ``False``.
    Returns
    -------
    out:
        if a logical AND reduction was performed over the entire array, the returned array must be a zero-dimensional array containing the test result; otherwise, the returned array must be a non-zero-dimensional array containing the test results. The returned array must have a data type of ``bool``.
    """
    return _cur_framework(x).all(x, axis, keepdims) 
 | 
	3854912ea0d6fceb4dc51576dd4b11923da68876 
 | 3,657,616 
							 | 
					
	import re
def verify_time_format(time_str):
    """
    This method is to verify time str format, which is in the format of 'hour:minute', both can be either one or two 
    characters. 
    Hour must be greater or equal 0 and smaller than 24, minute must be greater or equal 0 and smaller than 60
    
    :param time_str: time str
    :return: 
    """
    if not isinstance(time_str, str):
        return False
    time_format = r'^(\d{1,2}):(\d{1,2})$'
    matched = re.match(time_format, time_str)
    if matched:
        if 0 <= int(matched.group(1)) < 24 and 0 <= int(matched.group(2)) < 60:
            return True
        else:
            print('Hour should be within [0, 24); Minute should be within [0, 60)')
            return False
    else:
        return False 
 | 
	fee469248d4d1d792c1ed858cf9043e5695c9f5d 
 | 3,657,617 
							 | 
					
	def extract_region_df(region_code="11"):
    """
    Extracts dataframes that describes regional-level vaccines data for a single region, making some analysis on it.
    :rtype: Dataframe
    """
    df = RAW_DF
    df = df.loc[df['codice_regione_ISTAT'] == region_code]
    df = df.sort_values('data_somministrazione')
    df = df.reset_index()
    # Filter data from September
    df = df[df['data_somministrazione'] >= '2021-01-01']
    # Doses per 100.000 inhabitants
    df['prima_dose_per_100000_ab'] = df.apply(lambda x: x['prima_dose'] / population_dict[x['codice_regione_ISTAT']] * 100000,
                                              axis=1)
    df['seconda_dose_per_100000_ab'] = df.apply(lambda x: x['seconda_dose'] / population_dict[x['codice_regione_ISTAT']]
                                                * 100000, axis=1)
    df['totale_su_pop'] = df.apply(lambda x: x['totale'] / population_dict[x['codice_regione_ISTAT']], axis=1)
    df['totale_per_100000_ab'] = df.apply(lambda x: x['totale_su_pop'] * 100000, axis=1)
    # Historical totals
    df['totale_storico'] = df['totale'].cumsum()
    df['totale_storico_su_pop'] = df.apply(lambda x: x['totale_storico'] / population_dict[x['codice_regione_ISTAT']], axis=1)
    df['prima_dose_totale_storico'] = df['prima_dose'].cumsum()
    df['prima_dose_totale_storico_su_pop'] = df.apply(lambda x: x['prima_dose_totale_storico'] /
                                                      population_dict[x['codice_regione_ISTAT']], axis=1)
    df['seconda_dose_totale_storico'] = df['seconda_dose'].cumsum()
    df['seconda_dose_totale_storico_su_pop'] = df.apply(lambda x: x['seconda_dose_totale_storico'] /
                                                        population_dict[x['codice_regione_ISTAT']], axis=1)
    return df 
 | 
	8c3e77c1548b8bf40d0be31ac52237c532a4c622 
 | 3,657,618 
							 | 
					
	from bs4 import BeautifulSoup
def get_title(offer_markup):
    """ Searches for offer title on offer page
    :param offer_markup: Class "offerbody" from offer page markup
    :type offer_markup: str
    :return: Title of offer
    :rtype: str, None
    """
    html_parser = BeautifulSoup(offer_markup, "html.parser")
    return html_parser.h1.text.strip() 
 | 
	72618da71ea63d1b3431ba76f5d8a9549af6fe76 
 | 3,657,619 
							 | 
					
	import os
import shutil
def genome(request):
    """Create a test genome and location"""
    name = "ce10"  # Use fake name for blacklist test
    fafile = "tests/data/small_genome.fa.gz"
    genomes_dir = os.path.join(os.getcwd(), ".genomepy_plugin_tests")
    if os.path.exists(genomes_dir):
        shutil.rmtree(genomes_dir)
    genome_dir = os.path.join(genomes_dir, name)
    genomepy.utils.mkdir_p(genome_dir)
    fname = os.path.join(genome_dir, f"{name}.fa.gz")
    shutil.copyfile(fafile, fname)
    # unzip genome if required
    if request.param == "unzipped":
        sp.check_call(["gunzip", fname])
        # add annotation (for STAR and hisat2), but only once
        gtf_file = "tests/data/ce10.annotation.gtf.gz"
        aname = os.path.join(genome_dir, f"{name}.annotation.gtf.gz")
        shutil.copyfile(gtf_file, aname)
    return genomepy.Genome(name, genomes_dir=genomes_dir) 
 | 
	21f6b4c41fdfe5c8f934c358d78d9862a16e3324 
 | 3,657,620 
							 | 
					
	def get_twinboundary_shear_structure(twinboundary_relax_structure,
                                     shear_strain_ratio,
                                     previous_relax_structure=None,
                                     **additional_relax_structures,
                                     ):
    """
    If latest_structure is None, use s=0 structure as the original
    structure to be sheared. shear_strain_ratios must include zero.
    additional_relaxes is AttributeDict.
    """
    relax_wf = WorkflowFactory('vasp.relax')
    tb_relax_wf = WorkflowFactory('twinpy.twinboundary_relax')
    ratio = shear_strain_ratio.value
    tb_rlx_node = get_create_node(twinboundary_relax_structure.pk,
                                   tb_relax_wf)
    addi_rlx_pks = []
    for i in range(len(additional_relax_structures)):
        label = 'additional_structure_%02d' % (i+1)
        structure_pk_ = additional_relax_structures[label].pk
        rlx_pk = get_create_node(structure_pk_,
                                  relax_wf).pk
        addi_rlx_pks.append(rlx_pk)
    aiida_twinboundary_relax = \
            AiidaTwinBoudnaryRelaxWorkChain(tb_rlx_node)
    aiida_rlx = aiida_twinboundary_relax.get_aiida_relax(
                    additional_relax_pks=addi_rlx_pks)
    tb_analyzer = \
            aiida_twinboundary_relax.get_twinboundary_analyzer(
                additional_relax_pks=addi_rlx_pks)
    if addi_rlx_pks == []:
        kpt_info = aiida_rlx.get_kpoints_info()
    else:
        kpt_info = aiida_rlx.aiida_relaxes[0].get_kpoints_info()
    if previous_relax_structure is None:
        orig_cell = tb_analyzer.get_shear_cell(
                shear_strain_ratio=ratio,
                is_standardize=False)
        cell = tb_analyzer.get_shear_cell(
            shear_strain_ratio=ratio,
            is_standardize=True)
    else:
        prev_rlx_node = get_create_node(previous_relax_structure.pk, relax_wf)
        create_tb_shr_node = get_create_node(prev_rlx_node.inputs.structure.pk,
                                             CalcFunctionNode)
        prev_orig_structure = \
                create_tb_shr_node.outputs.twinboundary_shear_structure_orig
        prev_orig_cell = get_cell_from_aiida(prev_orig_structure)
        prev_aiida_rlx = AiidaRelaxWorkChain(prev_rlx_node)
        prev_rlx_analyzer = prev_aiida_rlx.get_relax_analyzer(
                original_cell=prev_orig_cell)
        atom_positions = \
                prev_rlx_analyzer.final_cell_in_original_frame[1]
        orig_cell = tb_analyzer.get_shear_cell(
                shear_strain_ratio=ratio,
                is_standardize=False,
                atom_positions=atom_positions)
        cell = tb_analyzer.get_shear_cell(
            shear_strain_ratio=ratio,
            is_standardize=True,
            atom_positions=atom_positions)
    orig_structure = get_aiida_structure(cell=orig_cell)
    structure = get_aiida_structure(cell=cell)
    # kpoints
    rlx_mesh = np.array(kpt_info['mesh'])
    rlx_offset = np.array(kpt_info['offset'])
    rlx_kpoints = (rlx_mesh, rlx_offset)
    std_base = StandardizeCell(tb_analyzer.relax_analyzer.original_cell)
    orig_kpoints = std_base.convert_kpoints(
            kpoints=rlx_kpoints,
            kpoints_type='primitive')['original']
    std = StandardizeCell(orig_cell)
    kpoints = std.convert_kpoints(kpoints=orig_kpoints,
                                  kpoints_type='original')['primitive']
    kpt_orig = KpointsData()
    kpt_orig.set_kpoints_mesh(orig_kpoints[0], offset=orig_kpoints[1])
    kpt = KpointsData()
    kpt.set_kpoints_mesh(kpoints[0], offset=kpoints[1])
    return_vals = {}
    return_vals['twinboundary_shear_structure_orig'] = orig_structure
    return_vals['twinboundary_shear_structure'] = structure
    return_vals['kpoints_orig'] = kpt_orig
    return_vals['kpoints'] = kpt
    return return_vals 
 | 
	c155db78f4d3d7f939e7c38e0c05955c3bd0f8c9 
 | 3,657,621 
							 | 
					
	def _map_spectrum_weight(map, spectrum=None):
    """Weight a map with a spectrum.
    This requires map to have an "energy" axis.
    The weights are normalised so that they sum to 1.
    The mean and unit of the output image is the same as of the input cube.
    At the moment this is used to get a weighted exposure image.
    Parameters
    ----------
    map : `~gammapy.maps.Map`
        Input map with an "energy" axis.
    spectrum : `~gammapy.modeling.models.SpectralModel`
        Spectral model to compute the weights.
        Default is power-law with spectral index of 2.
    Returns
    -------
    map_weighted : `~gammapy.maps.Map`
        Weighted image
    """
    if spectrum is None:
        spectrum = PowerLawSpectralModel(index=2.0)
    # Compute weights vector
    energy_edges = map.geom.get_axis_by_name("energy").edges
    weights = spectrum.integral(
        emin=energy_edges[:-1], emax=energy_edges[1:], intervals=True
    )
    weights /= weights.sum()
    shape = np.ones(len(map.geom.data_shape))
    shape[0] = -1
    return map * weights.reshape(shape.astype(int)) 
 | 
	5a1d9b9e3a94854e8c53947ca494f7448d2af570 
 | 3,657,622 
							 | 
					
	def fetch_all_db_as_df(allow_cached=False):
    """Converts list of dicts returned by `fetch_all_db` to DataFrame with ID removed
    Actual job is done in `_worker`. When `allow_cached`, attempt to retrieve timed cached from
    `_fetch_all_db_as_df_cache`; ignore cache and call `_work` if cache expires or `allow_cached`
    is False.
    """
    def _work():
        ret_dict = fetch_all_db()
        if len(ret_dict) == 0:
            return None
        df_dict = {}
        for level, data in ret_dict.items():
            df = pd.DataFrame.from_records(data)
            df.drop('_id', axis=1, inplace=True)
            df.columns = map(str.lower, df.columns)
            df_dict[level] = df
        return df_dict
    if allow_cached:
        try:
            return _fetch_all_db_as_df_cache['cache']
        except KeyError:
            pass
    ret = _work()
    _fetch_all_db_as_df_cache['cache'] = ret
    return ret 
 | 
	c7f049590c8405a862890944cfaabfefebea1d58 
 | 3,657,623 
							 | 
					
	def tool_proxy_from_persistent_representation(persisted_tool, strict_cwl_validation=True, tool_directory=None):
    """Load a ToolProxy from a previously persisted representation."""
    ensure_cwltool_available()
    return ToolProxy.from_persistent_representation(
        persisted_tool, strict_cwl_validation=strict_cwl_validation, tool_directory=tool_directory
    ) 
 | 
	e1f96d66cb1634d4de82b3e31f0fb9dd81080262 
 | 3,657,624 
							 | 
					
	def has_space_element(source):
    """
    判断对象中的元素,如果存在 None 或空字符串,则返回 True, 否则返回 False, 支持字典、列表和元组
    :param:
        * source: (list, set, dict) 需要检查的对象
    :return:
        * result: (bool) 存在 None 或空字符串或空格字符串返回 True, 否则返回 False
    举例如下::
        print('--- has_space_element demo---')
        print(has_space_element([1, 2, 'test_str']))
        print(has_space_element([0, 2]))
        print(has_space_element([1, 2, None]))
        print(has_space_element((1, [1, 2], 3, '')))
        print(has_space_element({'a': 1, 'b': 0}))
        print(has_space_element({'a': 1, 'b': []}))
        print('---')
    执行结果::
        --- has_space_element demo---
        False
        False
        True
        True
        False
        True
        ---
    """
    if isinstance(source, dict):
        check_list = list(source.values())
    elif isinstance(source, list) or isinstance(source, tuple):
        check_list = list(source)
    else:
        raise TypeError('source except list, tuple or dict, but got {}'.format(type(source)))
    for i in check_list:
        if i is 0:
            continue
        if not (i and str(i).strip()):
            return True
    return False 
 | 
	ab8a968fb807654af73d9017145c0af2259ae41e 
 | 3,657,625 
							 | 
					
	def return_latest_psm_is(df, id_col, file_col, instr_col, psm_col):
    """ Extracts info on PSM number, search ID and Instrument from the last row in DB
    """
    last_row = df.iloc[-1]
    search_id = last_row[id_col]
    instr = last_row[instr_col]
    psm = last_row[psm_col]
    psm_string = str(psm) + ' PSMs in file ' + str(last_row[file_col])
    print('String to put on the graph', psm_string)
    return (search_id, instr, psm, psm_string) 
 | 
	73c5acc945b9a6ef40aa1ce102351152b948a4b6 
 | 3,657,626 
							 | 
					
	def add_parser_arguments_misc(parser):
    """
    Adds the options that the command line parser will search for, some miscellaneous parameters, like use of gpu,
    timing, etc.
    :param parser: the argument parser
    :return: the same parser, but with the added options.
    """
    parser.add_argument('--use_gpu', action='store_true',
                        help='use GPU (CUDA). For loading data on Windows OS, if you get an Access Denied or Operation '
                             'Not Supported for cuda, you must set --loader_num_workers to 0 '
                             '(you can\'t share CUDA tensors among Windows processes).')
    parser.add_argument('--gpu_num', default="0", type=str)
    parser.add_argument('--map_gpu_beginning', action='store_true',
                        help='Will map all tensors (including FULL dataset) to GPU at the start of the instance, if '
                             '--use_gpu flag is supplied and CUDA is available. This option is NOT recommended if you '
                             'have low GPU memory or if you dataset is very large, since you may quickly run out of '
                             'memory.')
    parser.add_argument('--timing', action='store_true',
                        help='if specified, will display times for several parts of training')
    parser.add_argument('--load_args_from_json', type=str, default=None,
                        help='Path to json file containing args to pass. Should be an object containing the keys of '
                             'the attributes you want to change (keys that you don\'t supply will be left unchanged) '
                             'and their values according to their type (int, str, bool, list, etc.)')
    return parser 
 | 
	706ec64dfd6393fd1bd4741568e5e1af1d22a4d0 
 | 3,657,627 
							 | 
					
	from typing import Union
import torch
def colo_model_tensor_clone(t: Union[StatefulTensor, torch.Tensor], target_device: torch.device) -> torch.Tensor:
    """
    Clone a model data tensor
    Args:
        t (Union[StatefulTensor, torch.Tensor]): a model data tensor
        target_device (torch.device): the target device
    Returns:
        torch.Tensor: a cloned torch tensor
    """
    # TODO() rename this function
    colo_model_data_tensor_move_inline(t, target_device)
    t_payload = t.payload if isinstance(t, StatefulTensor) else t
    return t_payload 
 | 
	799d23e5f69ad73ecef040e94fecb64bb7b8c7d9 
 | 3,657,628 
							 | 
					
	def plugin_init(config):
    """Registers HTTP Listener handler to accept sensor readings
    Args:
        config: JSON configuration document for the South device configuration category
    Returns:
        handle: JSON object to be used in future calls to the plugin
    Raises:
    """
    handle = config
    return handle 
 | 
	a3e81bebdc806073b720e0a3174e62240ba81724 
 | 3,657,629 
							 | 
					
	import time
import json
def search(query,page):
    
    """Scrapes the search query page and returns the results in json format.
    
    
    Parameters 
    ------------
    query: The query you want to search for.
    page: The page number for which you want the results.
        Every page returns 11 results. 
          
          
    """
    driver.get(f'https://phys.libretexts.org/Special:Search?qid=&fpid=230&fpth=&query={query}&type=wiki')
    clicks = page
    while clicks>1:
        showMoreButton = driver.find_element_by_xpath('//*[@id="mt-search-spblls-component"]/div[2]/div/button')
        showMoreButton.click()
        clicks -= 1
        time.sleep(2)
    output = []
    start = (page-1)* 11
    stop = start + 12
    for i in range(start+1,stop):
        content = driver.find_element_by_xpath(f'//*[@id="search-results"]/li[{i}]/div/div[2]/div[2]/span[1]').text
    
        path = f'//*[@id="search-results"]/li[{i}]/div/div[1]/a'
        for a in driver.find_elements_by_xpath(path):
            title = a.get_attribute('title')
            link = a.get_attribute('href')
            result = {
            "title":title,
            "link":link,
            "content":content
            }
            output.append(result)
    output_json = {
        "results":output
    }
    driver.close()
    return json.dumps(output_json) 
 | 
	4bcc78aeb29715adaca7b99d98d94c28448e24f7 
 | 3,657,630 
							 | 
					
	import os
import json
def get_jobs(job_filename):
    """Reads jobs from a known job file location
    """
    jobs = list()
    if job_filename and os.path.isfile(job_filename):
        with open(job_filename, 'r') as input_fd:
            data = input_fd.read()
        job_dict = json.loads(data)
        del data
        for job in job_dict['jobs']:
            jobs.append(job)
        os.unlink(job_filename)
    return jobs 
 | 
	eaa091131a026c8a4c5f4e788406e185e1bbffde 
 | 3,657,631 
							 | 
					
	def quote_with_backticks_definer(definer):
    """Quote the given definer clause with backticks.
    This functions quotes the given definer clause with backticks, converting
    backticks (`) in the string with the correct escape sequence (``).
    definer[in]     definer clause to quote.
    Returns string with the definer quoted with backticks.
    """
    if not definer:
        return definer
    parts = definer.split('@')
    if len(parts) != 2:
        return definer
    return '@'.join([quote_with_backticks(parts[0]),
                     quote_with_backticks(parts[1])]) 
 | 
	ab87c8582d8081e324b494d7038916e984d5813a 
 | 3,657,632 
							 | 
					
	import base64
def cvimg_to_b64(img):
    """
    图片转换函数,将二进制图片转换为base64加密格式
    """
    try:
        image = cv2.imencode('.jpg', img)[1] #将图片格式转换(编码)成流数据,赋值到内存缓存中
        base64_data = str(base64.b64encode(image))[2:-1] #将图片加密成base64格式的数据
        return base64_data #返回加密后的结果
    except Exception as e:
        return "error" 
 | 
	c9f4c99ff24578ac4f6216ddefade0602c60c697 
 | 3,657,633 
							 | 
					
	from PIL import Image
from scipy.misc import fromimage
from skimage.color import label2rgb
from skimage.transform import resize
from io import StringIO
def draw_label(label, img, n_class, label_titles, bg_label=0):
    """Convert label to rgb with label titles.
    @param label_title: label title for each labels.
    @type label_title: dict
    """
    colors = labelcolormap(n_class)
    label_viz = label2rgb(label, img, colors=colors[1:], bg_label=bg_label)
    # label 0 color: (0, 0, 0, 0) -> (0, 0, 0, 255)
    label_viz[label == 0] = 0
    # plot label titles on image using matplotlib
    plt.subplots_adjust(left=0, right=1, top=1, bottom=0,
                        wspace=0, hspace=0)
    plt.margins(0, 0)
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.axis('off')
    # plot image
    plt.imshow(label_viz)
    # plot legend
    plt_handlers = []
    plt_titles = []
    for label_value in np.unique(label):
        if label_value not in label_titles:
            continue
        fc = colors[label_value]
        p = plt.Rectangle((0, 0), 1, 1, fc=fc)
        plt_handlers.append(p)
        plt_titles.append(label_titles[label_value])
    plt.legend(plt_handlers, plt_titles, loc='lower right', framealpha=0.5)
    # convert plotted figure to np.ndarray
    f = StringIO.StringIO()
    plt.savefig(f, bbox_inches='tight', pad_inches=0)
    result_img_pil = Image.open(f)
    result_img = fromimage(result_img_pil, mode='RGB')
    result_img = resize(result_img, img.shape, preserve_range=True)
    result_img = result_img.astype(img.dtype)
    return result_img 
 | 
	11b8f1e9c774df3e6312aa3dd0f71e7f300b5547 
 | 3,657,634 
							 | 
					
	def inspect(template_dir, display_type=None):
    """Generates a some string representation of all undefined variables
    in templates.
    Args:
        template_dir (str): all files within are treated as templates
        display_type (str): tabulate.tabulate tablefmt or 'terse'.
    Examples:
        Yields an overview of config parameter placeholders for FireWorks
        config template directory `imteksimfw/fireworks/templates/fwconfig`:
        ╒══════════════════════════════╤══════════════╤══════════════════╤═════════════╤════════════╤════════════════════╤═══════════╤════════════════╤══════════════╤═══════════════════╤═════════╤═══════════════╕
        │                              │ FIREWORKS_DB │ FW_CONFIG_PREFIX │ WEBGUI_PORT │ LOGDIR_LOC │ MONGODB_PORT_LOCAL │ FW_PREFIX │ FIREWORKS_USER │ MONGODB_HOST │ FW_AUTH_FILE_NAME │ MACHINE │ FIREWORKS_PWD │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ FW_config.yaml               │              │ x                │ x           │            │                    │ x         │                │              │ x                 │ x       │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ bwcloud_noqueue_fworker.yaml │              │                  │             │            │                    │           │                │              │                   │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ fireworks_mongodb_auth.yaml  │ x            │                  │             │ x          │ x                  │           │ x              │ x            │                   │         │ x             │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ forhlr2_noqueue_worker.yaml  │              │                  │             │            │                    │           │                │              │                   │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ forhlr2_queue_worker.yaml    │              │                  │             │            │                    │           │                │              │                   │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ forhlr2_slurm_qadapter.yaml  │              │ x                │             │            │                    │           │                │              │ x                 │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ juwels_noqueue_worker.yaml   │              │                  │             │            │                    │           │                │              │                   │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ juwels_queue_worker.yaml     │              │                  │             │            │                    │           │                │              │                   │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ juwels_slurm_qadapter.yaml   │              │ x                │             │            │                    │           │                │              │ x                 │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ nemo_moab_qadapter.yaml      │              │ x                │             │            │                    │           │                │              │ x                 │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ nemo_noqueue_worker.yaml     │              │                  │             │            │                    │           │                │              │                   │         │               │
        ├──────────────────────────────┼──────────────┼──────────────────┼─────────────┼────────────┼────────────────────┼───────────┼────────────────┼──────────────┼───────────────────┼─────────┼───────────────┤
        │ nemo_queue_worker.yaml       │              │                  │             │            │                    │           │                │              │                   │         │               │
        ╘══════════════════════════════╧══════════════╧══════════════════╧═════════════╧════════════╧════════════════════╧═══════════╧════════════════╧══════════════╧═══════════════════╧═════════╧═══════════════╛
    """
    undefined = get_undefined(template_dir)
    return variable_overview(undefined, display_type) 
 | 
	1f557da1742ca3c2118bb5629f228079ee14e729 
 | 3,657,635 
							 | 
					
	def calc_fitness_all(chromosomes, video_list, video_data):
    """Calculates fitness for all chromosomes
    Parameters
    ----------
    chromosomes : np.ndarrray
        List of chromosomes
    video_list : np.ndarray
        List of all video titles (in this case number identifiers)
    video_data : pd dataframe
        Dataframe of Emotion by Time w/ video as a column
    Returns
    -------
    list
        Determinant of the covariance matrix of all emotions by time
    """
    fitness = []
    for chromosome in chromosomes:
        fitness.append(calc_fitness_individual(chromosome, video_list,
                       video_data))
    return fitness 
 | 
	e0a28880a31fb5d1546c4f959cd1836e89822471 
 | 3,657,636 
							 | 
					
	from typing import List
from typing import Set
def grouping_is_valid(
    proposed_grouping: List[Set[str]],
    past_groups: List[Set[str]],
    max_intersection_size: int,
) -> bool:
    """Returns true if no group in the proposed grouping intersects with any
    past group with intersection size strictly greater than
    `max_intersection_size`.
    """
    for group in proposed_grouping:
        for past_group in past_groups:
            if len(group & past_group) > max_intersection_size:
                return False
    return True 
 | 
	caeb7568a2e8fddea9058ccc512dc9c06070ece9 
 | 3,657,637 
							 | 
					
	def next_wire_in_dimension(wire1, tile1, wire2, tile2, tiles, x_wires, y_wires,
                           wire_map, wires_in_node):
    """ next_wire_in_dimension returns true if tile1 and tile2 are in the same
  row and column, and must be adjcent.
  """
    tile1_info = tiles[tile1]
    tile2_info = tiles[tile2]
    tile1_x = tile1_info['grid_x']
    tile2_x = tile2_info['grid_x']
    tile1_y = tile1_info['grid_y']
    tile2_y = tile2_info['grid_y']
    # All wires are in the same row or column or if the each wire lies in its own
    # row or column.
    if len(y_wires) == 1 or len(x_wires) == len(wires_in_node) or abs(
            tile1_y - tile2_y) == 0:
        ordered_wires = sorted(x_wires.keys())
        idx1 = ordered_wires.index(tile1_x)
        idx2 = ordered_wires.index(tile2_x)
        if len(x_wires[tile1_x]) == 1 and len(x_wires[tile2_x]) == 1:
            return abs(idx1 - idx2) == 1
    if len(x_wires) == 1 or len(y_wires) == len(wires_in_node) or abs(
            tile1_x - tile2_x) == 0:
        ordered_wires = sorted(y_wires.keys())
        idx1 = ordered_wires.index(tile1_y)
        idx2 = ordered_wires.index(tile2_y)
        if len(y_wires[tile1_y]) == 1 and len(y_wires[tile2_y]) == 1:
            return abs(idx1 - idx2) == 1
    return None 
 | 
	2c2b6a2cb4d117f2435568437d38f05311b7dd13 
 | 3,657,638 
							 | 
					
	from typing import Optional
def get(*, db_session, report_id: int) -> Optional[Report]:
    """
    Get a report by id.
    """
    return db_session.query(Report).filter(Report.id == report_id).one_or_none() 
 | 
	021a7d35e060a2c92c9443361beff03de9aaf048 
 | 3,657,639 
							 | 
					
	import urllib
def host_from_path(path):
    """returns the host of the path"""
    url = urllib.parse.urlparse(path)
    return url.netloc 
 | 
	95b362e8f20c514a77506356c3a4a0c1ef200490 
 | 3,657,640 
							 | 
					
	def sampleM(a0, bk, njk, m_cap=20):
    """produces sample from distribution over M using normalized log probabilities parameterizing a
    categorical dist."""
    raise DeprecationWarning()
    wts = np.empty((m_cap,))
    sum = 0
    for m in range(m_cap):
        wts[m] = gammaln(a0*bk) - gammaln(a0*bk+njk) + log(stirling.get(njk, m)+1e-9) + m*(a0+bk)
        sum += wts[-1]
    wts = np.array(wts) / sum
    print(wts, np.sum(wts))
    return rand.multinomial(1, wts) 
 | 
	76cc9e0bd6a0594bd8b6350053957073ccf9caf9 
 | 3,657,641 
							 | 
					
	def or_default(none_or_value, default):
    """
    inputs:
        none_or_value: variable to test
        default: value to return if none_or_value is None
    """
    return none_or_value if none_or_value is not None else default 
 | 
	43200fe3bd1308eed87de0ad905873fd3c629067 
 | 3,657,642 
							 | 
					
	def find_optimal_components_subset(contours, edges):
    """Find a crop which strikes a good balance of coverage/compactness.
    Returns an (x1, y1, x2, y2) tuple.
    """
    c_info = props_for_contours(contours, edges)
    c_info.sort(key=lambda x: -x['sum'])
    total = np.sum(edges) / 255
    area = edges.shape[0] * edges.shape[1]
    c = c_info[0]
    del c_info[0]
    this_crop = c['x1'], c['y1'], c['x2'], c['y2']
    crop = this_crop
    covered_sum = c['sum']
    while covered_sum < total:
        changed = False
        recall = 1.0 * covered_sum / total
        prec = 1 - 1.0 * crop_area(crop) / area
        f1 = 2 * (prec * recall / (prec + recall))
        #print '----'
        for i, c in enumerate(c_info):
            this_crop = c['x1'], c['y1'], c['x2'], c['y2']
            new_crop = union_crops(crop, this_crop)
            new_sum = covered_sum + c['sum']
            new_recall = 1.0 * new_sum / total
            new_prec = 1 - 1.0 * crop_area(new_crop) / area
            new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)
            # Add this crop if it improves f1 score,
            # _or_ it adds 25% of the remaining pixels for <15% crop expansion.
            # ^^^ very ad-hoc! make this smoother
            remaining_frac = c['sum'] / (total - covered_sum)
            new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1
            if new_f1 > f1 or (
                    remaining_frac > 0.25 and new_area_frac < 0.15):
                print('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (
                    i, covered_sum, new_sum, total, remaining_frac,
                    crop_area(crop), crop_area(new_crop), area, new_area_frac,
                    f1, new_f1))
                crop = new_crop
                covered_sum = new_sum
                del c_info[i]
                changed = True
                break
        if not changed:
            break
    return crop 
 | 
	016815811b6fa80378142303e3dce8f7736c514c 
 | 3,657,643 
							 | 
					
	import re
def scrape(html):
    """정규표현식으로 도서 정보 추출"""
    books = []
    for partial_html in re.findall(r'<td class="left">Ma.*?</td>', html, re.DOTALL):
        #도서의 URL 추출
        url = re.search(r'<a href="(.*?)">', partial_html).group(1)
        url = 'http://www.hanbit.co.kr' + url
        #태그를 제거해 도서의 제목 추출
        title = re.sub(r'<.*?>', '', partial_html)
        title = unescape(title)
        books.append({'url': url, 'title': title})
    return books 
 | 
	8703c48748607934491e92c3e0243e92cd7edf12 
 | 3,657,644 
							 | 
					
	def get_time_zone_offset(area_code):
    """ Returns an integer offset value if it finds a matching area code,
        otherwise returns None."""
    if not isinstance(area_code, str):
        area_code = str(area_code)
    if area_code in area_code_mapping:
        return area_code_mapping[area_code][1] 
 | 
	4697a07d53af25ef70facf30f4bbef2472494781 
 | 3,657,645 
							 | 
					
	def true_false_counts(series: pd.Series):
    """
    input: a boolean series
    returns: two-tuple (num_true, num_false)
    """
    return series.value_counts().sort_index(ascending=False).tolist() 
 | 
	7fc7d0beb1d11aa7a4e3ccb6dd00155194deac3d 
 | 3,657,646 
							 | 
					
	def phyutility(DIR,alignment,min_col_occup,seqtype,min_chr=10):
	"""
	remove columns with occupancy lower than MIN_COLUMN_OCCUPANCY
	remove seqs shorter than MIN_CHR after filter columns
	"""
	if DIR[-1] != "/": DIR += "/"
	cleaned = alignment+"-cln"
	if os.path.exists(DIR+cleaned): return cleaned
	assert alignment.endswith(".aln"),\
		"phyutility infile "+alignment+" not ends with .aln"
	assert os.stat(DIR+alignment).st_size > 0, DIR+alignment+"empty"
	assert seqtype == "aa" or seqtype == "dna","Input data type: dna or aa"
	if seqtype == "aa":
		cmd = ["phyutility","-aa","-clean",str(min_col_occup),"-in",\
			   DIR+alignment,"-out",DIR+alignment+"-pht"]
	else:
		cmd = ["phyutility","-clean",str(min_col_occup),"-in",\
			   DIR+alignment,"-out",DIR+alignment+"-pht"]
	print " ".join(cmd)
	os.system(" ".join(cmd))
	assert os.path.exists(DIR+alignment+"-pht"),"Error phyutility"
	
	#remove empty and very short seqs
	outfile = open(DIR+cleaned,"w")
	for s in read_fasta_file(DIR+alignment+"-pht"):
		if len(s.seq.replace("-","")) >= min_chr:
			outfile.write(s.get_fasta())
	outfile.close()
	os.remove(DIR+alignment+"-pht")
	return cleaned 
 | 
	42a14d2588e71af5834179f0364925da31d9ef34 
 | 3,657,647 
							 | 
					
	def configProject(projectName):
	""" read in config file"""
	if projectName==None:return
	filename=os.path.join(projectsfolder,unicode(projectName),u"project.cfg" ).encode("utf-8")
	if projectName not in projects: 
		print 'Content-type: text/plain\n\n',"error in projects:",type(projectName),"projectName:",[projectName]
		print projects
		return
		
	if os.path.exists(filename):
		try:
			config = ConfigObj(filename,encoding="UTF-8")
			#config.BOM=True
			if verbose : print "read", filename
						
		except Exception, e:
			if verbose : print "can't read config file:",filename,e
			return
	return readinContent(config,projectName) 
 | 
	e11c31be073b8699c2bd077815720467b9fd6e2e 
 | 3,657,648 
							 | 
					
	def bitwise_not(rasters, extent_type="FirstOf", cellsize_type="FirstOf", astype=None):
    """
    The BitwiseNot operation
    The arguments for this function are as follows:
    :param rasters: array of rasters. If a scalar is needed for the operation, the scalar can be a double or string
    :param extent_type: one of "FirstOf", "IntersectionOf", "UnionOf", "LastOf"
    :param cellsize_type: one of "FirstOf", "MinOf", "MaxOf, "MeanOf", "LastOf"
    :param astype: output pixel type
    :return: the output raster
    """
    return local(rasters, 13, extent_type=extent_type, cellsize_type=cellsize_type, astype=astype) 
 | 
	0edaeaf2b96a48520309dee4809c3251d47c98e8 
 | 3,657,649 
							 | 
					
	import re
def keyclean(key):
    """
    Default way to clean table headers so they make good
    dictionary keys.
    """
    clean = re.sub(r'\s+', '_', key.strip())
    clean = re.sub(r'[^\w]', '', clean)
    return clean 
 | 
	0f28f0e92e2817a98a31396949690a46e7538ace 
 | 3,657,650 
							 | 
					
	import collections
def get_rfactors_for_each(lpin):
    """
  R-FACTORS FOR INTENSITIES OF DATA SET /isilon/users/target/target/Iwata/_proc_ox2r/150415-hirata/1010/06/DS/multi011_1-5/XDS_ASCII_fullres.HKL
 RESOLUTION   R-FACTOR   R-FACTOR   COMPARED
   LIMIT      observed   expected
     5.84        60.4%      50.1%       174
     4.13        58.1%      51.5%       310
     3.38        60.0%      54.6%       410
     2.92        90.3%      76.1%       483
     2.62       130.4%     100.3%       523
     2.39       241.1%     180.5%       612
     2.21       353.9%     277.9%       634
     2.07       541.1%     444.0%       673
     1.95       -99.9%     -99.9%       535
    total        84.5%      71.2%      4354
    """
    read_flag = False
    filename = None
    ret = collections.OrderedDict() # {filename: list of [dmin, Robs, Rexpt, Compared]}
    for l in open(lpin):
        if "R-FACTORS FOR INTENSITIES OF DATA SET" in l:
            filename = l.strip().split()[-1]
        elif "LIMIT      observed   expected" in l:
            read_flag = True
        elif read_flag:
            sp = l.strip().replace("%","").split()
            if len(sp) == 4:
                dmin, robs, rexp, compared = sp
                if dmin != "total": dmin = float(dmin)
                else: dmin, read_flag = None, False
                robs, rexp = map(float, (robs, rexp))
                compared = int(compared)
                ret.setdefault(filename, []).append([dmin, robs, rexp, compared])
    return ret 
 | 
	937ad8e2cf01fa6ab92838d235a385f9bbfb1b63 
 | 3,657,651 
							 | 
					
	def value_left(self, right):
    """
    Returns the value of the right type instance to use in an
    operator method, namely when the method's instance is on the
    left side of the expression.
    """
    return right.value if isinstance(right, self.__class__) else right 
 | 
	f28c2f0548d3e004e3dd37601dda6c1ea5ab36f6 
 | 3,657,652 
							 | 
					
	def correct_throughput(inspec, spFile='BT-Settl_Asplund2009.fits', quiet=False):
    """
    Main function
    Inputs:
        inspec - list of input spectra, each list item should
                 be a 3xN array of wavelenghts (in microns),
                 flux, and variance. One list item for each
                 order for orders 71-77
        spFile - (optional) path to fits file containing
                 BT-Setll grid, default: BT-Settl_Asplund2009.fits
        quiet  - set True to turn off all printed output
    
    Returns:
        wave - wavelength array of final combined spectrum
        flam - flux array
        fvar - variance array
    """
    ## Read in synthetic spectrum grid
    spgrid, spwave, spaxes = readGrid(spFile)
    ## Parse input spectrum
    waves, flams, fvars = parseSpec(inspec, spwave)
    ## Define cheby grid
    norder, npix = waves.shape
    chebx = np.linspace(-1,1,npix)
    ## Initial guesses
    ## Polynomial to correct for blaze function 
    nbpoly = 3
    bpolys = np.zeros((norder, nbpoly+1))
    ## Polynomial to correct wavelength
    nwpoly = 1
    wpolys = np.zeros((norder, nwpoly+1))
    wpolys[:,0] = 1.0
    for i in range(norder):
        bpolys[i] = chebfit(chebx, 1./flams[i], nbpoly)
        rv = getrv(waves[i], flams[i]*chebval(chebx,bpolys[i]), spwave, spgrid[:,9,2])
        wpolys[i,0] = (1.+rv/3e5)
    ## Model parameters
    teff = 3500
    mh   = 0.0
    ips = np.array([np.hstack((bpolys[i],wpolys[i])) for i in range(norder)])
    
    ## Loop over entire model grid and fit for each order
    chi2s = np.zeros([norder,spgrid.shape[1],spgrid.shape[2]])
    chi2s [:] = 9e9
    ps = np.tile(np.zeros_like(ips[0]), [norder,spgrid.shape[1],spgrid.shape[2],1])
    for k in range(0, spgrid.shape[1]):
        for l in range(spgrid.shape[2]):
            if not quiet:
                print('Teff = {0}, [M/H] = {1}'.format(spaxes[0][k],spaxes[1][l]))
            for i in range(norder):
                flam = flams[i]
                fvar = fvars[i]
                wave = waves[i]
                fit = minimize(fitFun, ips[i], args=(wave,flam,fvar,nbpoly,chebx,spwave,spgrid,k,l))
                chi2s[i,k,l] = fit['fun']
                ps[i,k,l] = fit['x']
                #if not quiet:
                #    print('    '+fit['message'])
                #    print('    '+str(fit['x']))
                #    print('    '+str(fit['fun']))
                #    print()
            if not quiet:
                print(np.mean(chi2s[:,k,l]))
    mink, minl = np.unravel_index(np.argmin(np.sum(chi2s,0)),[len(spaxes[0]),len(spaxes[1])])
    bpolys, wpolys = np.split(ps[:,mink,minl], [nbpoly+1], axis=1)
    teff  = spaxes[0][mink]
    mh    = spaxes[1][minl]
    
    ## Correct everything
    corrwaves = np.zeros_like(waves)
    corrflams = np.zeros_like(flams)
    corrfvars = np.zeros_like(fvars)
    for i in range(norder):
        corrwaves[i] = waves[i] * chebval(chebx, wpolys[i])
        corrflams[i] = flams[i] * chebval(chebx, bpolys[i])
        corrfvars[i] = (np.sqrt(fvars[i]) * chebval(chebx, bpolys[i]))**2.
    ## Flatten and sort
    wave = corrwaves.flatten()
    srt = np.argsort(wave)
    wave = wave[srt]
    flam = corrflams.flatten()[srt]
    fvar = corrfvars.flatten()[srt]
    return wave, flam, fvar 
 | 
	1c1eecf308f738cce891176ab8e527be97839493 
 | 3,657,653 
							 | 
					
	import numbers
import collections
def convert_list(
        items,
        ids,
        parent,
        attr_type,
):
    """Converts a list into an XML string."""
    LOG.info('Inside convert_list()')
    output = []
    addline = output.append
    if ids:
        this_id = get_unique_id(parent)
    for (i, item) in enumerate(items):
        LOG.info('Looping inside convert_list(): item="%s", type="%s"'
                 % (unicode_me(item), type(item).__name__))
        attr = ({} if not ids else {'id': '%s_%s' % (this_id, i + 1)})
        if isinstance(item, numbers.Number) or type(item) in (str,
                                                              unicode):
            addline(convert_kv('item', item, attr_type, attr))
        elif hasattr(item, 'isoformat'):
            # datetime
            addline(convert_kv('item', item.isoformat(), attr_type,
                               attr))
        elif type(item) == bool:
            addline(convert_bool('item', item, attr_type, attr))
        elif isinstance(item, dict):
            if not attr_type:
                addline('<item>%s</item>' % convert_dict(item, ids,
                                                         parent, attr_type))
            else:
                addline('<item type="dict">%s</item>'
                        % convert_dict(item, ids, parent, attr_type))
        elif isinstance(item, collections.Iterable):
            if not attr_type:
                addline('<item %s>%s</item>' % (make_attrstring(attr),
                                                convert_list(item,
                                                             ids,
                                                             'item',
                                                             attr_type)))
            else:
                addline('<item type="list"%s>%s</item>'
                        % (make_attrstring(attr), convert_list(item,
                                                               ids,
                                                               'item',
                                                               attr_type)))
        elif item is None:
            addline(convert_none('item', None, attr_type, attr))
        else:
            raise TypeError('Unsupported data ' /
                            'type: %s (%s)' % (item, type(item).__name__))
    return ''.join(output) 
 | 
	3e73fa756e5bd2685d529bb21170ab35dd6dedff 
 | 3,657,654 
							 | 
					
	def get_mid_surface(in_surfaces):
    """get_mid_surface gives the mid surface when dealing with the 7 different surfaces
    Args:
        (list of strings) in_surfaces : List of path to the 7 different surfaces generated by mris_expand
    Returns:
        (string) Path to the mid surface
    """
    return in_surfaces[3] 
 | 
	718ab8fa7a3b716241ae05a4e507f40ab6cb0efd 
 | 3,657,655 
							 | 
					
	def parse_type(msg_type):
    """
    Parse ROS message field type
    :param msg_type: ROS field type, ``str``
    :returns: base_type, is_array, array_length, ``(str, bool, int)``
    :raises: :exc:`ValueError` If *msg_type* cannot be parsed
    """
    if not msg_type:
        raise ValueError("Invalid empty type")
    if '[' in msg_type:
        var_length = msg_type.endswith('[]')
        splits = msg_type.split('[')
        if len(splits) > 2:
            raise ValueError("Currently only support 1-dimensional array types: %s"%msg_type)
        if var_length:
            return msg_type[:-2], True, None
        else:
            try:
                length = int(splits[1][:-1])
                return splits[0], True, length
            except ValueError:
                raise ValueError("Invalid array dimension: [%s]"%splits[1][:-1])
    else:
        return msg_type, False, None 
 | 
	1dfe4f3abb7b69bed17b60ee2666279081666dc6 
 | 3,657,656 
							 | 
					
	from typing import List
from typing import Optional
import glob
def preprocess(feature_modules: List, queries: List[Query],
               prefix: Optional[str] = None,
               process_count: Optional[int] = None):
    """
    Args:
        feature_modules: the feature modules used to generate features, each must implement the add_features function
        queries: all the queri objects that have to be preprocessed
        prefix: prefix for the output files, ./preprocessed-data- by default
        process_count: how many subprocesses will I run simultaneously, by default takes all available cpu cores.
    """
    if process_count is None:
        process_count = cpu_count()
    if prefix is None:
        prefix = "preprocessed-data"
    pool_function = partial(_preprocess_one_query, prefix,
                            [m.__name__ for m in feature_modules])
    with Pool(process_count) as pool:
        pool.map(pool_function, queries)
    output_paths = glob(f"{prefix}-*.hdf5")
    return output_paths 
 | 
	2896482423d9306d01d225ef785e0680844a13a4 
 | 3,657,657 
							 | 
					
	def to_distance(maybe_distance_function):
    """
    Parameters
    ----------
    maybe_distance_function: either a Callable, which takes two arguments, or
    a DistanceFunction instance.
    Returns
    -------
    """
    if maybe_distance_function is None:
        return NoDistance()
    if isinstance(maybe_distance_function, DistanceFunction):
        return maybe_distance_function
    return SimpleFunctionDistance(maybe_distance_function) 
 | 
	4e801a948d86594efdb1d05f352eb449e8bbdd02 
 | 3,657,658 
							 | 
					
	def echo(text):
    """Return echo function."""
    return text 
 | 
	c128bc86bc63006a1ac5b209c10b21f787b7100a 
 | 3,657,659 
							 | 
					
	import os
def predict():
    """Renders the predict page and makes predictions if the method is POST."""
    if request.method == 'GET':
        return render_predict()
    # Get arguments
    checkpoint_name = request.form['checkpointName']
    if 'data' in request.files:
        # Upload data file with SMILES
        data = request.files['data']
        data_name = secure_filename(data.filename)
        data_path = os.path.join(app.config['TEMP_FOLDER'], data_name)
        data.save(data_path)
        # Check if header is smiles
        possible_smiles = get_header(data_path)[0]
        smiles = [possible_smiles] if Chem.MolFromSmiles(possible_smiles) is not None else []
        # Get remaining smiles
        smiles.extend(get_smiles(data_path))
    elif request.form['textSmiles'] != '':
        smiles = request.form['textSmiles'].split()
    else:
        smiles = [request.form['drawSmiles']]
    checkpoint_path = os.path.join(app.config['CHECKPOINT_FOLDER'], checkpoint_name)
    task_names = load_task_names(checkpoint_path)
    num_tasks = len(task_names)
    gpu = request.form.get('gpu')
    # Create and modify args
    parser = ArgumentParser()
    add_predict_args(parser)
    args = parser.parse_args([])
    preds_path = os.path.join(app.config['TEMP_FOLDER'], app.config['PREDICTIONS_FILENAME'])
    args.test_path = 'None'  # TODO: Remove this hack to avoid assert crashing in modify_predict_args
    args.preds_path = preds_path
    args.checkpoint_path = checkpoint_path
    if gpu is not None:
        if gpu == 'None':
            args.no_cuda = True
        else:
            args.gpu = int(gpu)
    modify_predict_args(args)
    # Run predictions
    preds = make_predictions(args, smiles=smiles)
    if all(p is None for p in preds):
        return render_predict(errors=['All SMILES are invalid'])
    # Replace invalid smiles with message
    invalid_smiles_warning = "Invalid SMILES String"
    preds = [pred if pred is not None else [invalid_smiles_warning] * num_tasks for pred in preds]
    return render_predict(predicted=True,
                          smiles=smiles,
                          num_smiles=min(10, len(smiles)),
                          show_more=max(0, len(smiles)-10),
                          task_names=task_names,
                          num_tasks=len(task_names),
                          preds=preds,
                          warnings=["List contains invalid SMILES strings"] if None in preds else None,
                          errors=["No SMILES strings given"] if len(preds) == 0 else None) 
 | 
	bd3fb9d7ca6c54946e6c65e281682e69f3550340 
 | 3,657,660 
							 | 
					
	def zernike_name(index, framework='Noll'):
    """
    Get the name of the Zernike with input index in input framework (Noll or WSS).
    :param index: int, Zernike index
    :param framework: str, 'Noll' or 'WSS' for Zernike ordering framework
    :return zern_name: str, name of the Zernike in the chosen framework
    """
    noll_names = {1: 'piston', 2: 'tip', 3: 'tilt', 4: 'defocus', 5: 'astig45', 6: 'astig0', 7: 'ycoma', 8: 'xcoma',
                  9: 'ytrefoil', 10: 'xtrefoil', 11: 'spherical'}
    wss_names = {1: 'piston', 2: 'tip', 3: 'tilt', 5: 'defocus', 4: 'astig45', 6: 'astig0', 8: 'ycoma', 7: 'xcoma',
                 10: 'ytrefoil', 11: 'xtrefoil', 9: 'spherical'}
    if framework == 'Noll':
        zern_name = noll_names[index]
    elif framework == 'WSS':
        zern_name = wss_names[index]
    else:
        raise ValueError('No known Zernike convention passed.')
    return zern_name 
 | 
	33e73739c11bc2340a47162e161ba7d87e26d279 
 | 3,657,661 
							 | 
					
	def discriminator_train_batch_mle(batches, discriminator, loss_fn, optimizer):
    """
    Summary
    1. watch discriminator trainable_variables
    2. extract encoder_output, labels, sample_weight, styles, captions from batch and make them tensors
    3. predictions = discriminator(encoder_output, captions, styles, training=True)
    4. loss = loss_fn(labels, predictions, sample_weight=sample_weight)
    5. gradients = tape.gradient(loss, discriminator.trainable_variables))
    6. optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))
    """
    with tf.GradientTape(watch_accessed_variables=False) as tape:
        tape.watch(discriminator.trainable_variables)
        encoder_output = tf.concat([b[0] for b in batches], axis=0)
        labels = tf.concat([b[2] for b in batches], axis=0)
        sample_weight = tf.concat([b[3] for b in batches], axis=0)
        styles = tf.concat([b[4] for b in batches], axis=0)
        captions = [b[1] for b in batches]
        max_caption_length = max([c.shape[1] for c in captions])
        captions = [tf.pad(c, paddings=tf.constant([[0, 0], [0, max_caption_length - c.shape[1]]])) for c in captions]
        captions = tf.concat(captions, axis=0)
        predictions = discriminator(encoder_output, captions, styles, training=True)
        loss = loss_fn(labels, predictions, sample_weight=sample_weight)
        gradients = tape.gradient(loss, discriminator.trainable_variables)
    optimizer.apply_gradients(zip(gradients, discriminator.trainable_variables))
    return loss 
 | 
	2bb4cd47ddeea5c2edb6f627e39843ba18593833 
 | 3,657,662 
							 | 
					
	def get_subs_dict(expression, mod):
    """
    Builds a substitution dictionary of an expression based of the
    values of these symbols in a model.
    Parameters
    ----------
    expression : sympy expression
    mod : PysMod
    Returns
    -------
    dict of sympy.Symbol:float
    """
    subs_dict = {}
    symbols = expression.atoms(Symbol)
    for symbol in symbols:
        attr = str(symbol)
        subs_dict[attr] = getattr(mod, attr)
    return subs_dict 
 | 
	075b406dfbdcb5a0049589880ad8b08fbd459159 
 | 3,657,663 
							 | 
					
	def save_index_summary(name, rates, dates, grid_dim):
    """
    Save index file
    Parameters
    ----------
    See Also
    --------
    DataStruct
    """
    with open(name + INDEX_SUMMARY_EXT, "w+b") as file_index:
        nlist = 0
        keywords_data, nums_data, nlist = get_keywords_section_data(rates)  # need to calc NLIST filed for DIMENS
        write_unrst_data_section(f=file_index, name=RESTART, stype=INDEX_META_BLOCK_SPEC[RESTART]['type'],
                                 data_array=np.array(
                                     [' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8, ' ' * 8]))
        dimen = INDEX_META_BLOCK_SPEC[DIMENS]
        dimen['struct']['nlist'].val = nlist
        write_unrst_section(file_index, DIMENS, dimen, grid_dim)
        write_unrst_data_section(f=file_index, name=KEYWORDS, stype=INDEX_SECTIONS_DATA[KEYWORDS].type,
                                 data_array=keywords_data)
        wgnames_date = get_wgnames_section_data(rates)
        write_unrst_data_section(f=file_index, name=WGNAMES, stype=INDEX_SECTIONS_DATA[WGNAMES].type,
                                 data_array=wgnames_date)
        write_unrst_data_section(f=file_index, name=NUMS, stype=INDEX_SECTIONS_DATA[NUMS].type,
                                 data_array=nums_data)
        units_data, nlist = get_units_section_data(rates)
        write_unrst_data_section(f=file_index, name=UNITS, stype=INDEX_SECTIONS_DATA[UNITS].type,
                                 data_array=units_data)
        write_unrst_data_section(f=file_index, name=STARTDAT, stype=INDEX_SECTIONS_DATA[STARTDAT].type,
                                 data_array=get_startdat_section_data(dates[0]))
    return nlist 
 | 
	ac807dac6a1c63eca7b20322dc2c4122dc0b7ec8 
 | 3,657,664 
							 | 
					
	def fluxes_SIF_predict_noSIF(model_NEE, label, EV1, EV2, NEE_max_abs):
    """
    Predict the flux partitioning from a trained NEE model.
    :param model_NEE: full model trained on NEE
    :type model_NEE: keras.Model
    :param label: input of the model part 1 (APAR)
    :type label: tf.Tensor
    :param EV1: input of the model part 2 (GPP_input)
    :type EV1: tf.Tensor
    :param EV2: input of the model part 3 (Reco_input)
    :type EV2: tf.Tensor
    :param NEE_max_abs: normalization factor of NEE
    :type NEE_max_abs: tf.Tensor | float
    :return: corresponding NEE, GPP and Reco value for the provided data
    :rtype: (tf.Tensor, tf.Tensor, tf.Tensor)
    """
    NEE_NN = (layer_output_noSIF(model_NEE, 'NEE', label, EV1, EV2) * NEE_max_abs)
    NEE_NN = tf.reshape(NEE_NN, (NEE_NN.shape[0],))
    GPP_NN = (layer_output_noSIF(model_NEE, 'GPP', label, EV1, EV2) * NEE_max_abs)
    GPP_NN = tf.reshape(GPP_NN, (NEE_NN.shape[0],))
    Reco_NN = (layer_output_noSIF(model_NEE, 'Reco', label, EV1, EV2) * NEE_max_abs)
    Reco_NN = tf.reshape(Reco_NN, (NEE_NN.shape[0],))
    return NEE_NN, GPP_NN, Reco_NN 
 | 
	3f5ecf95c27a4deb04894c84de903a5eb34858d0 
 | 3,657,665 
							 | 
					
	def xml_string(line, tag, namespace, default=None):
    """ Get string value from etree element """
    try:
        val = (line.find(namespace + tag).text)
    except:
        val = default
    return val 
 | 
	77745d463cf6604ed787e220fdabf6ff998f770e 
 | 3,657,666 
							 | 
					
	from datetime import datetime
def generate_header(salutation, name, surname, postSalutation, address, zip, city, phone, email):
    """
    This function generates the header pdf page
    """
    # first we take the html file and parse it as a string
    #print('generating header page', surname, name)
    with open('/home/danielg3/www/crowdlobbying.ch/python/pdf/header.html', 'r', encoding='utf-8') as myfile:
        data = myfile.read()
        to_write = data.format(salutation, name, (surname + ' ' + postSalutation), str(datetime.datetime.now())[0:10])
        pdfkit.from_string(to_write, '/tmp/header.pdf')
    
    return open('/tmp/header.pdf', 'rb') 
 | 
	c979c2985d730eee0ce5b442e55a050e7cc4a672 
 | 3,657,667 
							 | 
					
	def cli_cosmosdb_collection_exists(client, database_id, collection_id):
    """Returns a boolean indicating whether the collection exists """
    return len(list(client.QueryContainers(
        _get_database_link(database_id),
        {'query': 'SELECT * FROM root r WHERE r.id=@id',
         'parameters': [{'name': '@id', 'value': collection_id}]}))) > 0 
 | 
	99ada0b4c4176b02d4bbe00c07b991a579a917d0 
 | 3,657,668 
							 | 
					
	def probabilities (X) -> dict:
	""" This function maps the set of outcomes found in the sequence of events, 'X', to their respective probabilty of occuring in 'X'.
	The return value is a python dictionary where the keys are the set of outcomes and the values are their associated probabilities."""
	# The set of outcomes, denoted as 'C', and the total events, denoted as 'T'.
	C, T = set(X), len(X)
	return {c: X.count(c) / T for c in C} 
 | 
	c908a1186feea270be71bb1f03485c901bc82733 
 | 3,657,669 
							 | 
					
	import time
import requests
def get_recommend_news():
    """获取新闻推荐列表"""
    # 触电新闻主页推荐实际URL
    recommend_news_url = 'https://api.itouchtv.cn:8090/newsservice/v9/recommendNews?size=24&channelId=0'
    # 当前毫秒时间戳
    current_ms = int(time.time() * 1000)
    headers = get_headers(target_url=recommend_news_url, ts_ms=current_ms)
    resp = requests.get(url=recommend_news_url, headers=headers)
    if resp.ok:
        news_data = resp.json()
        return news_data.get('newsList', [])
    else:
        raise Exception('请求异常:\n==> target_url: %s\n==> headers: %s' % (recommend_news_url, headers)) 
 | 
	3bee0bb7c1fb977d9380a9be07aab4b802149d6a 
 | 3,657,670 
							 | 
					
	def put_profile_pic(url, profile):
    """
    Takes a url from filepicker and uploads
    it to our aws s3 account.
    """
    try:
        r = requests.get(url)
        size = r.headers.get('content-length')
        if int(size) > 10000000: #greater than a 1mb #patlsotw
            return False 
        filename, headers = urlretrieve(url + "/resize?w=600&h=600")
        resize_filename, headers = urlretrieve(url + "/resize?w=40&h=40") # store profile sized picture (40x40px)
        conn = S3Connection(settings.AWS["AWS_ACCESS_KEY_ID"], settings.AWS["AWS_SECRET_ACCESS_KEY"])
        b = conn.get_bucket(settings.AWS["BUCKET"])
        _set_key(b, profile.user.username, filename)
        k = _set_key(b, profile.user.username + "resize", resize_filename)
        
    except Exception as e:
        print e
        return False
    return "http://s3.amazonaws.com/%s/%s"% (settings.AWS["BUCKET"], k.key) 
 | 
	7bc201b754f33518a96a7e6a562e5a6ec601dfb5 
 | 3,657,671 
							 | 
					
	from typing import Tuple
from pathlib import Path
from typing import Dict
def get_raw_data() -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
    """Loads serialized data from file.
    Returns:
        Tuple[np.ndarray, np.ndarray, np.ndarray]:  Tuple of
            features, labels and classes for the dataset.
    """
    data_file: str = Path().absolute().joinpath(RAW_DATA_FILE).__str__()
    data_dict: Dict[str, np.ndarray] = np.load(data_file, allow_pickle=True)
    x: np.ndarray = data_dict['X']
    y: np.ndarray = data_dict['Y']
    classes: np.ndarray = data_dict['classes']
    return x, y, classes 
 | 
	58e98b733c396fa8dca5f9dd442625283cae5f1e 
 | 3,657,672 
							 | 
					
	import requests
def cog_pixel_value(
    lon,
    lat,
    url,
    bidx=None,
    titiler_endpoint="https://titiler.xyz",
    verbose=True,
    **kwargs,
):
    """Get pixel value from COG.
    Args:
        lon (float): Longitude of the pixel.
        lat (float): Latitude of the pixel.
        url (str): HTTP URL to a COG, e.g., 'https://opendata.digitalglobe.com/events/california-fire-2020/pre-event/2018-02-16/pine-gulch-fire20/1030010076004E00.tif'
        bidx (str, optional): Dataset band indexes (e.g bidx=1, bidx=1&bidx=2&bidx=3). Defaults to None.
        titiler_endpoint (str, optional): Titiler endpoint, e.g., "https://titiler.xyz", "planetary-computer", "pc". Defaults to None.
        verbose (bool, optional): Print status messages. Defaults to True.
    Returns:
        list: A dictionary of band info.
    """
    titiler_endpoint = check_titiler_endpoint(titiler_endpoint)
    kwargs["url"] = url
    if bidx is not None:
        kwargs["bidx"] = bidx
    r = requests.get(f"{titiler_endpoint}/cog/point/{lon},{lat}", params=kwargs).json()
    bands = cog_bands(url, titiler_endpoint)
    # if isinstance(titiler_endpoint, str):
    #     r = requests.get(f"{titiler_endpoint}/cog/point/{lon},{lat}", params=kwargs).json()
    # else:
    #     r = requests.get(
    #         titiler_endpoint.url_for_stac_pixel_value(lon, lat), params=kwargs
    #     ).json()
    if "detail" in r:
        if verbose:
            print(r["detail"])
        return None
    else:
        values = r["values"]
        result = dict(zip(bands, values))
        return result 
 | 
	40494f5ee491283b127409f52dd0e1d9029bce52 
 | 3,657,673 
							 | 
					
	def select_daily(ds, day_init=15, day_end=21):
    """
    Select lead time days.
    
    Args:
        ds: xarray dataset.
        day_init (int): first lead day selection. Defaults to 15.
        day_end (int): last lead day selection. Defaults to 21.
        
    Returns:
        xarray dataset subset based on time selection.
    
    ::Lead time indices for reference::
    Week 1:  1,  2,  3,  4,  5,  6,  7
    Week 2:  8,  9, 10, 11, 12, 13, 14
    Week 3: 15, 16, 17, 18, 19, 20, 21
    Week 4: 22, 23, 24, 25, 26, 27, 28
    Week 5: 29, 30, 31, 32, 33, 34, 35
    Week 6: 36, 37, 38, 39, 40, 41, 42
        
    """
    return ds.isel(lead=slice(day_init, day_end + 1)) 
 | 
	9948ecba5acc3c1ca2fe28526585d0bfa81fb862 
 | 3,657,674 
							 | 
					
	def project_polarcoord_lines(lines, img_w, img_h):
    """
    Project lines in polar coordinate space <lines> (e.g. from hough transform) onto a canvas of size
    <img_w> by <img_h>.
    """
    if img_w <= 0:
        raise ValueError('img_w must be > 0')
    if img_h <= 0:
        raise ValueError('img_h must be > 0')
    lines_ab = []
    for i, (rho, theta) in enumerate(lines):
        # calculate intersections with canvas dimension minima/maxima
        cos_theta = np.cos(theta)
        sin_theta = np.sin(theta)
        x_miny = rho / cos_theta if cos_theta != 0 else float("inf")  # x for a minimal y (y=0)
        y_minx = rho / sin_theta if sin_theta != 0 else float("inf")  # y for a minimal x (x=0)
        x_maxy = (rho - img_w * sin_theta) / cos_theta if cos_theta != 0 else float("inf")  # x for maximal y (y=img_h)
        y_maxx = (rho - img_h * cos_theta) / sin_theta if sin_theta != 0 else float("inf")  # y for maximal x (y=img_w)
        # because rounding errors happen, sometimes a point is counted as invalid because it
        # is slightly out of the bounding box
        # this is why we have to correct it like this
        def border_dist(v, border):
            return v if v <= 0 else v - border
        # set the possible points
        # some of them will be out of canvas
        possible_pts = [
            ([x_miny, 0], (border_dist(x_miny, img_w), 0)),
            ([0, y_minx], (border_dist(y_minx, img_h), 1)),
            ([x_maxy, img_h], (border_dist(x_maxy, img_w), 0)),
            ([img_w, y_maxx], (border_dist(y_maxx, img_h), 1)),
        ]
        # get the valid and the dismissed (out of canvas) points
        valid_pts = []
        dismissed_pts = []
        for p, dist in possible_pts:
            if 0 <= p[0] <= img_w and 0 <= p[1] <= img_h:
                valid_pts.append(p)
            else:
                dismissed_pts.append((p, dist))
        # from the dismissed points, get the needed ones that are closed to the canvas
        n_needed_pts = 2 - len(valid_pts)
        if n_needed_pts > 0:
            dismissed_pts_sorted = sorted(dismissed_pts, key=lambda x: abs(x[1][0]), reverse=True)
            for _ in range(n_needed_pts):
                p, (dist, coord_idx) = dismissed_pts_sorted.pop()
                p[coord_idx] -= dist  # correct
                valid_pts.append(p)
        p1 = pt(*valid_pts[0])
        p2 = pt(*valid_pts[1])
        lines_ab.append((p1, p2))
    return lines_ab 
 | 
	7a6a75daedadc6ddfd6f8f55a7a57ae80865605e 
 | 3,657,675 
							 | 
					
	def standardize_for_imshow(image):
  """
  A luminance standardization for pyplot's imshow
  This just allows me to specify a simple, transparent standard for what white
  and black correspond to in pyplot's imshow method. Likely could be
  accomplished by the colors.Normalize method, but I want to make this as
  explicit as possible. If the image is nonnegative, we divide by the scalar
  that makes the largest value 1.0. If the image is nonpositive, we
  divide by the scalar that makes the smallest value -1.0, and then add 1, so
  that this value is 0.0, pitch black. If the image has both positive and
  negative values, we divide and shift so that 0.0 in the original image gets
  mapped to 0.5 for imshow and the largest absolute value gets mapped to
  either 0.0 or 1.0 depending on whether it was positive of negative.
  Parameters
  ----------
  image : ndarray
      The image to be standardized, can be (h, w) or (h, w, c). All operations
      are scalar operations applied to every color channel. Note this, may
      change hue of color images, I think.
  Returns
  -------
  standardized_image : ndarray
      An RGB image in the range [0.0, 1.0], ready to be showed by imshow.
  raw_val_mapping : tuple(float, float, float)
      Indicates what raw values got mapped to 0.0, 0.5, and 1.0, respectively
  """
  max_val = np.max(image)
  min_val = np.min(image)
  if max_val == min_val:  # constant value
    standardized_image = 0.5 * np.ones(image.shape)
    if max_val > 0:
      raw_val_mapping = [0.0, max_val, 2*max_val]
    elif max_val < 0:
      raw_val_mapping = [2*max_val, max_val, 0.0]
    else:
      raw_val_mapping = [-1.0, 0.0, 1.0]
  else:
    if min_val >= 0:
      standardized_image = image / max_val
      raw_val_mapping = [0.0, 0.5*max_val, max_val]
    elif max_val <= 0:
      standardized_image = (image / -min_val) + 1.0
      raw_val_mapping = [min_val, 0.5*min_val, 0.0]
    else:
      # straddles 0.0. We want to map 0.0 to 0.5 in the displayed image
      skew_toward_max = np.argmax([abs(min_val), abs(max_val)])
      if skew_toward_max:
        normalizer = (2 * max_val)
        raw_val_mapping = [-max_val, 0.0, max_val]
      else:
        normalizer = (2 * np.abs(min_val))
        raw_val_mapping = [min_val, 0.0, -min_val]
      standardized_image = (image / normalizer) + 0.5
  return standardized_image, raw_val_mapping 
 | 
	8b89235623746019b53d3c44dd8cecc2d313ffbd 
 | 3,657,676 
							 | 
					
	def err_failure(error) :
  """ Check a error on failure """
  return not err_success(error) 
 | 
	17e9edbbe7bb5451d991fb94108148d2d0b1c644 
 | 3,657,677 
							 | 
					
	def rah_fixed_dt( u2m, roh_air, cp, dt, disp, z0m, z0h, tempk):
	"""
	It takes input of air density, air specific heat, difference of temperature between surface skin and a height of about 2m above, and the aerodynamic resistance to heat transport.  This version runs an iteration loop to stabilize psychrometric data for the aerodynamic resistance to heat flux.
	Fixed temperature difference correction of aerodynamic roughness for heat transport
	"""
	PI = 3.14159265358979323846 
	ublend=u2m*(log(100-disp)-log(z0m))/(log(2-disp)-log(z0m))	
	for i in range(10):
		ustar = 0.41*ublend/(log((100-disp)/z0m)-psim)
		rah   = (log((2-disp)/z0h)-psih)/(0.41*ustar)
		h_in  = roh_air * cp * dt / rah
		length= -roh_air*cp*pow(ustar,3)*tempk/(0.41*9.81*h_in)
		xm    = pow(1.0-16.0*((100-disp)/length),0.25)
		xh    = pow(1.0-16.0*((2-disp)/length),0.25)
		psim  = 2.0*log((1.0+xm)/2.0)+log((1+xm*xm)-2*atan(xm)+0.5*PI)
		psih  = 2.0*log((1.0+xh*xh)/2.0)
	return rah 
 | 
	bd48c62817f25964fa394ace35ab24357d455797 
 | 3,657,678 
							 | 
					
	def process_grid_subsets(output_file, start_subset_id=0, end_subset_id=-1):
    """"Execute analyses on the data of the complete grid and save the processed data to a netCDF file.
        By default all subsets are analyzed
    Args:
        output_file (str): Name of netCDF file to which the results are saved for the respective
                           subset. (including format {} placeholders)
        start_subset_id (int): Starting subset id to be analyzed
        end_subset_id (int): Last subset id to be analyzed
                             (set to -1 to process all subsets after start_subset_id)
    """
    ds, lons, lats, levels, hours, i_highest_level = read_raw_data(start_year, final_year)
    check_for_missing_data(hours)
    # Reading the data of all grid points from the NetCDF file all at once requires a lot of memory. On the other hand,
    # reading the data of all grid points one by one takes up a lot of CPU. Therefore, the dataset is analysed in
    # pieces: the subsets are read and processed consecutively.
    n_subsets = int(np.ceil(float(len(lats)) / read_n_lats_per_subset))
    # Define subset range to be processed in this run
    if end_subset_id == -1:
        subset_range = range(start_subset_id, n_subsets)
    else:
        subset_range = range(start_subset_id, end_subset_id+1)
    if subset_range[-1] > (n_subsets-1):
        raise ValueError("Requested subset ID ({}) is higher than maximal subset ID {}."
                         .format(subset_range[-1], (n_subsets-1)))
    # Loop over all specified subsets to write processed data to the output file.
    counter = 0
    total_iters = len(lats) * len(lons)*len(subset_range)/n_subsets
    start_time = timer()
    for i_subset in subset_range:
        # Find latitudes corresponding to the current i_subset
        i_lat0 = i_subset * read_n_lats_per_subset
        if i_lat0+read_n_lats_per_subset < len(lats):
            lat_ids_subset = range(i_lat0, i_lat0 + read_n_lats_per_subset)
        else:
            lat_ids_subset = range(i_lat0, len(lats))
        lats_subset = lats[lat_ids_subset]
        print("Subset {}, Latitude(s) analysed: {} to {}".format(i_subset, lats_subset[0], lats_subset[-1]))
        # Initialize result arrays for this subset
        res = initialize_result_dict(lats_subset, lons)
        print('    Result array configured, reading subset input now, time lapsed: {:.2f} hrs'
              .format(float(timer()-start_time)/3600))
        # Read data for the subset latitudes
        v_levels_east = ds.variables['u'][:, i_highest_level:, lat_ids_subset, :].values
        v_levels_north = ds.variables['v'][:, i_highest_level:, lat_ids_subset, :].values
        v_levels = (v_levels_east**2 + v_levels_north**2)**.5
        t_levels = ds.variables['t'][:, i_highest_level:, lat_ids_subset, :].values
        q_levels = ds.variables['q'][:, i_highest_level:, lat_ids_subset, :].values
        try:
            surface_pressure = ds.variables['sp'][:, lat_ids_subset, :].values
        except KeyError:
            surface_pressure = np.exp(ds.variables['lnsp'][:, lat_ids_subset, :].values)
        print('    Input read, performing statistical analysis now, time lapsed: {:.2f} hrs'
              .format(float(timer()-start_time)/3600))
        for i_lat_in_subset in range(len(lat_ids_subset)):  # Saves a file for each subset.
            for i_lon in range(len(lons)):
                if (i_lon % 20) == 0:  # Give processing info every 20 longitudes
                    print('        {} of {} longitudes analyzed, satistical analysis of longitude {}, time lapsed: '
                          '{:.2f} hrs'.format(i_lon, len(lons), lons[i_lon], float(timer()-start_time)/3600))
                counter += 1
                level_heights, density_levels = compute_level_heights(levels,
                                                                      surface_pressure[:, i_lat_in_subset, i_lon],
                                                                      t_levels[:, :, i_lat_in_subset, i_lon],
                                                                      q_levels[:, :, i_lat_in_subset, i_lon])
                # Determine wind at altitudes of interest by means of interpolating the raw wind data.
                v_req_alt = np.zeros((len(hours), len(heights_of_interest)))  # Interpolation results array.
                rho_req_alt = np.zeros((len(hours), len(heights_of_interest)))
                for i_hr in range(len(hours)):
                    if not np.all(level_heights[i_hr, 0] > heights_of_interest):
                        raise ValueError("Requested height ({:.2f} m) is higher than height of highest model level."
                                         .format(level_heights[i_hr, 0]))
                    v_req_alt[i_hr, :] = np.interp(heights_of_interest, level_heights[i_hr, ::-1],
                                                   v_levels[i_hr, ::-1, i_lat_in_subset, i_lon])
                    rho_req_alt[i_hr, :] = np.interp(heights_of_interest, level_heights[i_hr, ::-1],
                                                     density_levels[i_hr, ::-1])
                p_req_alt = calc_power(v_req_alt, rho_req_alt)
                # Determine wind statistics at fixed heights of interest.
                for i_out, fixed_height_id in enumerate(analyzed_heights_ids['fixed']):
                    v_mean, v_perc5, v_perc32, v_perc50 = get_statistics(v_req_alt[:, fixed_height_id])
                    res['fixed']['wind_speed']['mean'][i_out, i_lat_in_subset, i_lon] = v_mean
                    res['fixed']['wind_speed']['percentile'][5][i_out, i_lat_in_subset, i_lon] = v_perc5
                    res['fixed']['wind_speed']['percentile'][32][i_out, i_lat_in_subset, i_lon] = v_perc32
                    res['fixed']['wind_speed']['percentile'][50][i_out, i_lat_in_subset, i_lon] = v_perc50
                    v_ranks = get_percentile_ranks(v_req_alt[:, fixed_height_id], [4., 8., 14., 25.])
                    res['fixed']['wind_speed']['rank'][4][i_out, i_lat_in_subset, i_lon] = v_ranks[0]
                    res['fixed']['wind_speed']['rank'][8][i_out, i_lat_in_subset, i_lon] = v_ranks[1]
                    res['fixed']['wind_speed']['rank'][14][i_out, i_lat_in_subset, i_lon] = v_ranks[2]
                    res['fixed']['wind_speed']['rank'][25][i_out, i_lat_in_subset, i_lon] = v_ranks[3]
                    p_fixed_height = p_req_alt[:, fixed_height_id]
                    p_mean, p_perc5, p_perc32, p_perc50 = get_statistics(p_fixed_height)
                    res['fixed']['wind_power_density']['mean'][i_out, i_lat_in_subset, i_lon] = p_mean
                    res['fixed']['wind_power_density']['percentile'][5][i_out, i_lat_in_subset, i_lon] = p_perc5
                    res['fixed']['wind_power_density']['percentile'][32][i_out, i_lat_in_subset, i_lon] = p_perc32
                    res['fixed']['wind_power_density']['percentile'][50][i_out, i_lat_in_subset, i_lon] = p_perc50
                    p_ranks = get_percentile_ranks(p_fixed_height, [40., 300., 1600., 9000.])
                    res['fixed']['wind_power_density']['rank'][40][i_out, i_lat_in_subset, i_lon] = p_ranks[0]
                    res['fixed']['wind_power_density']['rank'][300][i_out, i_lat_in_subset, i_lon] = p_ranks[1]
                    res['fixed']['wind_power_density']['rank'][1600][i_out, i_lat_in_subset, i_lon] = p_ranks[2]
                    res['fixed']['wind_power_density']['rank'][9000][i_out, i_lat_in_subset, i_lon] = p_ranks[3]
                # Integrate power along the altitude.
                for range_id in integration_range_ids:
                    height_id_start = analyzed_heights_ids['integration_ranges'][range_id][1]
                    height_id_final = analyzed_heights_ids['integration_ranges'][range_id][0]
                    p_integral = []
                    x = heights_of_interest[height_id_start:height_id_final + 1]
                    for i_hr in range(len(hours)):
                        y = p_req_alt[i_hr, height_id_start:height_id_final+1]
                        p_integral.append(-np.trapz(y, x))
                    res['integration_ranges']['wind_power_density']['mean'][range_id, i_lat_in_subset, i_lon] = \
                        np.mean(p_integral)
                # Determine wind statistics for ceiling cases.
                for i_out, ceiling_id in enumerate(analyzed_heights_ids['ceilings']):
                    # Find the height maximizing the wind speed for each hour.
                    v_ceiling = np.amax(v_req_alt[:, ceiling_id:analyzed_heights_ids['floor'] + 1], axis=1)
                    v_ceiling_ids = np.argmax(v_req_alt[:, ceiling_id:analyzed_heights_ids['floor'] + 1], axis=1) + \
                        ceiling_id
                    # optimal_heights = [heights_of_interest[max_id] for max_id in v_ceiling_ids]
                    # rho_ceiling = get_density_at_altitude(optimal_heights + surf_elev)
                    rho_ceiling = rho_req_alt[np.arange(len(hours)), v_ceiling_ids]
                    p_ceiling = calc_power(v_ceiling, rho_ceiling)
                    v_mean, v_perc5, v_perc32, v_perc50 = get_statistics(v_ceiling)
                    res['ceilings']['wind_speed']['mean'][i_out, i_lat_in_subset, i_lon] = v_mean
                    res['ceilings']['wind_speed']['percentile'][5][i_out, i_lat_in_subset, i_lon] = v_perc5
                    res['ceilings']['wind_speed']['percentile'][32][i_out, i_lat_in_subset, i_lon] = v_perc32
                    res['ceilings']['wind_speed']['percentile'][50][i_out, i_lat_in_subset, i_lon] = v_perc50
                    v_ranks = get_percentile_ranks(v_ceiling, [4., 8., 14., 25.])
                    res['ceilings']['wind_speed']['rank'][4][i_out, i_lat_in_subset, i_lon] = v_ranks[0]
                    res['ceilings']['wind_speed']['rank'][8][i_out, i_lat_in_subset, i_lon] = v_ranks[1]
                    res['ceilings']['wind_speed']['rank'][14][i_out, i_lat_in_subset, i_lon] = v_ranks[2]
                    res['ceilings']['wind_speed']['rank'][25][i_out, i_lat_in_subset, i_lon] = v_ranks[3]
                    p_mean, p_perc5, p_perc32, p_perc50 = get_statistics(p_ceiling)
                    res['ceilings']['wind_power_density']['mean'][i_out, i_lat_in_subset, i_lon] = p_mean
                    res['ceilings']['wind_power_density']['percentile'][5][i_out, i_lat_in_subset, i_lon] = p_perc5
                    res['ceilings']['wind_power_density']['percentile'][32][i_out, i_lat_in_subset, i_lon] = p_perc32
                    res['ceilings']['wind_power_density']['percentile'][50][i_out, i_lat_in_subset, i_lon] = p_perc50
                    p_ranks = get_percentile_ranks(p_ceiling, [40., 300., 1600., 9000.])
                    res['ceilings']['wind_power_density']['rank'][40][i_out, i_lat_in_subset, i_lon] = p_ranks[0]
                    res['ceilings']['wind_power_density']['rank'][300][i_out, i_lat_in_subset, i_lon] = p_ranks[1]
                    res['ceilings']['wind_power_density']['rank'][1600][i_out, i_lat_in_subset, i_lon] = p_ranks[2]
                    res['ceilings']['wind_power_density']['rank'][9000][i_out, i_lat_in_subset, i_lon] = p_ranks[3]
        print('Locations analyzed: ({}/{:.0f}).'.format(counter, total_iters))
        # Flatten output, convert to xarray Dataset and write to output file.
        output_file_name_formatted = output_file.format(**{'start_year': start_year, 'final_year': final_year,
                                                           'lat_subset_id': i_subset, 'max_lat_subset_id': n_subsets-1})
        print('Writing output to file: {}'.format(output_file_name_formatted))
        flattened_subset_output = get_result_dict(lats_subset, lons, hours, res)
        nc_out = xr.Dataset.from_dict(flattened_subset_output)
        nc_out.to_netcdf(output_file_name_formatted)
        nc_out.close()
        time_lapsed = float(timer()-start_time)
        time_remaining = time_lapsed/counter*(total_iters-counter)
        print("Time lapsed: {:.2f} hrs, expected time remaining: {:.2f} hrs.".format(time_lapsed/3600,
                                                                                     time_remaining/3600))
    ds.close()  # Close the input NetCDF file.
    return n_subsets-1 
 | 
	4103cffd3b519f16205fbf5dfb38ae198f315258 
 | 3,657,679 
							 | 
					
	def bulk_lookup(license_dict, pkg_list):
    """Lookup package licenses"""
    pkg_licenses = {}
    for pkg in pkg_list:
        # Failsafe in case the bom file contains incorrect entries
        if not pkg.get("name") or not pkg.get("version"):
            continue
        pkg_key = pkg["name"] + "@" + pkg["version"]
        if pkg.get("vendor"):
            pkg_key = pkg.get("vendor") + ":" + pkg["name"] + "@" + pkg["version"]
        for lic in pkg.get("licenses"):
            if lic == "X11":
                lic = "MIT"
            elif "MIT" in lic:
                lic = "MIT"
            curr_list = pkg_licenses.get(pkg_key, [])
            match_lic = license_dict.get(lic)
            if match_lic:
                curr_list.append(match_lic)
            pkg_licenses[pkg_key] = curr_list
    return pkg_licenses 
 | 
	aa06b02fdfaa079dbfc4e1210ccccc995393dc52 
 | 3,657,680 
							 | 
					
	def pack_bits(bools):
    """Pack sequence of bools into bits"""
    if len(bools) % 8 != 0:
        raise ValueError("list length must be multiple of 8")
    bytes_ = []
    b = 0
    for j, v in enumerate(reversed(bools)):
        b <<= 1
        b |= v
        if j % 8 == 7:
            bytes_.append(b)
            b = 0
    return bytes_ 
 | 
	fadfb5e6abdb80691473262fac57f22384827c50 
 | 3,657,681 
							 | 
					
	def init_ring_dihedral(species,instance,geom = []):
    """
    Calculates the required modifications to a structures dihedral to create a cyclic TS
    """
    
    if len(geom) == 0:
        geom = species.geom
    
    if len(instance) > 3:
        
        if len(instance) < 6:
            final_dihedral = 15.
        else:
            final_dihedral = 1.
        
        dihedrals = []   
        for i in range(len(instance)-3):
            dihedrals.append(calc_dihedral(geom[instance[i]], geom[instance[i+1]], geom[instance[i+2]], geom[instance[i+3]])[0]) 
        dihedral_diff = [final_dihedral - dihedrals[i] for i in range(len(dihedrals))]
        
        return dihedral_diff 
 | 
	7799ec63b4188d79104e4ab758fb42b497a64053 
 | 3,657,682 
							 | 
					
	from typing import List
from typing import Optional
def get_largest_contour(
    contours: List[NDArray], min_area: int = 30
) -> Optional[NDArray]:
    """
    Finds the largest contour with size greater than min_area.
    Args:
        contours: A list of contours found in an image.
        min_area: The smallest contour to consider (in number of pixels)
    Returns:
        The largest contour from the list, or None if no contour was larger
        than min_area.
    Example::
        # Extract the blue contours
        BLUE_HSV_MIN = (90, 50, 50)
        BLUE_HSV_MAX = (110, 255, 255)
        contours = rc_utils.find_contours(
            rc.camera.get_color_image(), BLUE_HSV_MIN, BLUE_HSV_MAX
        )
        # Find the largest contour
        largest_contour = rc_utils.get_largest_contour(contours)
    """
    # Check that the list contains at least one contour
    if len(contours) == 0:
        return None
    # Find and return the largest contour if it is larger than min_area
    greatest_contour = max(contours, key=cv.contourArea)
    if cv.contourArea(greatest_contour) < min_area:
        return None
    return greatest_contour 
 | 
	e505e9265540ae2f35e2de0f587aeaee067e5583 
 | 3,657,683 
							 | 
					
	def particle(
    engine,
    particle_id="",
    color: Tuple4 = (1, 0.4, 0.1, 1),
    random_color: bool = False,
    color_temp: bool = False,
    vx=None,
    vy=None,
    vz=None,
    speed_limit=None,
) -> Material:
    """ Particle material. """
    mat = bpy.data.materials.new(f"Particle{particle_id}")
    # FIXME(tpvasconcelos): Use different colors within a particle system
    # if color_temp == 'temperature':
    #     factor = _get_speed_factor(vx, vy, vz, speed_limit)
    if random_color:
        color = _get_randomcolor()
    if engine == "BLENDER_RENDER":
        return _render_particle(mat, color[:-1])
    return _cycles_particle(mat, color) 
 | 
	2bb120d4fd32c31bad7f9ee9765d1fc5808992a4 
 | 3,657,684 
							 | 
					
	import os
import scipy
def _get_hardware_sharing_throughputs(
    outdirs,
    device,
    device_model,
    precs,
    filename,
    mode,
):
  """ The result is in the format of
  {
    'amp': pd.DataFrame,  # df contains max_B rows
    'fp32': pd.DataFrame, # df contains max_B rows
  }
  df format: (`B` is the index)
  B     {mode}:{prec}:0 {mode}:{prec}:1 ... {mode}:{prec}:avg {mode}:{prec}:min {mode}:{prec}:max
  1          float           float      ...       float             float             float
  2          float           float      ...       float             float             float
  3          float           float      ...       float             float             float
  ...
  max_B      float           float      ...       float             float             float
  """
  throughputs = {}
  for prec in precs:
    throughputs[prec] = {'B': []}
    for outdir_idx, outdir in enumerate(outdirs):
      Bs = []
      throughputs_of_Bs = []
      mode_outdir_path = os.path.join(outdir, device, device_model, prec, mode)
      for B_exp in os.listdir(mode_outdir_path):
        B = int(B_exp[1:])
        Bs.append(B)
        B_outdir_path = os.path.join(mode_outdir_path, B_exp)
        timing_dfs = None
        if mode == 'hfta':
          timing_dfs = [pd.read_csv(os.path.join(B_outdir_path, filename))]
        else:
          timing_dfs = [
              pd.read_csv(
                  os.path.join(B_outdir_path, 'idx{}'.format(idx), filename))
              for idx in range(B)
          ]
        throughputs_of_Bs.append(_calculate_throughputs(timing_dfs, device))
      max_B = max(Bs)
      linear_interpolator = scipy.interpolate.interp1d(Bs, throughputs_of_Bs)
      throughputs[prec]['{}:{}:{}'.format(mode, prec, outdir_idx)] = [
          linear_interpolator(B) for B in range(1, max_B + 1)
      ]
      throughputs[prec]['B'] = range(1, max_B + 1)
    throughputs[prec] = pd.DataFrame(throughputs[prec]).set_index('B')
    _aggregate_along_rows(throughputs[prec], mode, prec)
  return throughputs 
 | 
	bd104c88144cc1635e4387c93aaf838f210b9703 
 | 3,657,685 
							 | 
					
	def mask_to_segm(mask, bbox, segm_size, index=None):
    """Crop and resize mask.
    This function requires cv2.
    Args:
        mask (~numpy.ndarray): See below.
        bbox (~numpy.ndarray): See below.
        segm_size (int): The size of segm :math:`S`.
        index (~numpy.ndarray): See below. :math:`R = N` when
            :obj:`index` is :obj:`None`.
    Returns:
        ~numpy.ndarray: See below.
    .. csv-table::
        :header: name, shape, dtype, format
        :obj:`mask`, ":math:`(N, H, W)`", :obj:`bool`, --
        :obj:`bbox`, ":math:`(R, 4)`", :obj:`float32`, \
        ":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
        :obj:`index` (optional), ":math:`(R,)`", :obj:`int32`, --
        :obj:`segms` (output), ":math:`(R, S, S)`", :obj:`float32`, \
        ":math:`[0, 1]`"
    """
    pad = 1
    _, H, W = mask.shape
    bbox = chainer.backends.cuda.to_cpu(bbox)
    # To work around an issue with cv2.resize (it seems to automatically
    # pad with repeated border values), we manually zero-pad the masks by 1
    # pixel prior to resizing back to the original image resolution.
    # This prevents "top hat" artifacts. We therefore need to expand
    # the reference boxes by an appropriate factor.
    padded_segm_size = segm_size + pad * 2
    expand_scale = padded_segm_size / segm_size
    bbox = _expand_bbox(bbox, expand_scale)
    resize_size = padded_segm_size
    bbox = _integerize_bbox(bbox)
    segm = []
    if index is None:
        index = np.arange(len(bbox))
    else:
        index = chainer.backends.cuda.to_cpu(index)
    for i, bb in zip(index, bbox):
        y_min = max(bb[0], 0)
        x_min = max(bb[1], 0)
        y_max = max(min(bb[2], H), 0)
        x_max = max(min(bb[3], W), 0)
        if y_max <= y_min or x_max <= x_min:
            segm.append(np.zeros((segm_size, segm_size), dtype=np.float32))
            continue
        bb_height = bb[2] - bb[0]
        bb_width = bb[3] - bb[1]
        cropped_m = np.zeros((bb_height, bb_width), dtype=np.bool)
        y_offset = y_min - bb[0]
        x_offset = x_min - bb[1]
        cropped_m[y_offset:y_offset + y_max - y_min,
                  x_offset:x_offset + x_max - x_min] =\
            chainer.backends.cuda.to_cpu(mask[i, y_min:y_max, x_min:x_max])
        with chainer.using_config('cv_resize_backend', 'cv2'):
            sgm = transforms.resize(
                cropped_m[None].astype(np.float32),
                (resize_size, resize_size))[0].astype(np.int32)
        segm.append(sgm[pad:-pad, pad:-pad])
    return np.array(segm, dtype=np.float32) 
 | 
	5fd4003595ce7b13bcf59ce8669bfdb37a545d5b 
 | 3,657,686 
							 | 
					
	def append_unique(func):
    """
    This decorator will append each result - regardless of type - into a
    list.
    """
    def inner(*args, **kwargs):
        return list(
            set(
                _results(
                    args[0],
                    func.__name__,
                    *args,
                    **kwargs
                )
            )
        )
    return inner 
 | 
	ed656c500f95b03e8036605e9af5cc739830ff7b 
 | 3,657,687 
							 | 
					
	def _get_unique_figs(tree):
    """
    Extract duplicate figures from the tree
    """
    return _find_unique_figures_wrap(list(map(_get_fig_values(tree),
                                              tree)), []) 
 | 
	ba8a40766981bca9ca23fd3ec681f1a8d52ad85b 
 | 3,657,688 
							 | 
					
	def read_fssp(fssp_handle):
    """Process a FSSP file and creates the classes containing its parts.
    Returns:
        :header: Contains the file header and its properties.
        :sum_dict: Contains the summary section.
        :align_dict: Contains the alignments.
    """
    header = FSSPHeader()
    sum_dict = FSSPSumDict()
    align_dict = FSSPAlignDict()
    curline = fssp_handle.readline()
    while not summary_title.match(curline):
        # Still in title
        header.fill_header(curline)
        curline = fssp_handle.readline()
    if not summary_title.match(curline):
        raise ValueError("Bad FSSP file: no summary record found")
    curline = fssp_handle.readline()  # Read the title line, discard
    curline = fssp_handle.readline()  # Read the next line
    # Process the summary records into a list
    while summary_rec.match(curline):
        cur_sum_rec = FSSPSumRec(curline)
        sum_dict[cur_sum_rec.nr] = cur_sum_rec
        curline = fssp_handle.readline()
    # Outer loop: process everything up to the EQUIVALENCES title record
    while not equiv_title.match(curline):
        while (not alignments_title.match(curline) and
               not equiv_title.match(curline)):
            curline = fssp_handle.readline()
        if not alignments_title.match(curline):
            if equiv_title.match(curline):
                # print("Reached equiv_title")
                break
            else:
                raise ValueError("Bad FSSP file: no alignments title record found")
        if equiv_title.match(curline):
            break
        # If we got to this point, this means that we have matched an
        # alignments title. Parse the alignment records in a loop.
        curline = fssp_handle.readline()  # Read the title line, discard
        curline = fssp_handle.readline()  # Read the next line
        while alignments_rec.match(curline):
            align_rec = FSSPAlignRec(fff_rec(curline))
            key = align_rec.chain_id + align_rec.res_name + str(align_rec.pdb_res_num)
            align_list = curline[fssp_rec.align.start_aa_list:].strip().split()
            if key not in align_dict:
                align_dict[key] = align_rec
            align_dict[key].add_align_list(align_list)
            curline = fssp_handle.readline()
            if not curline:
                print("EOFEOFEOF")
                raise EOFError
    for i in align_dict.values():
        i.pos_align_list2dict()
        del i.PosAlignList
    align_dict.build_resnum_list()
    return (header, sum_dict, align_dict) 
 | 
	4ac2c61ed40f14102d0ae1a8a3b6fa8e69252f27 
 | 3,657,689 
							 | 
					
	import json
def LoadJSON(json_string):
  """Loads json object from string, or None.
  Args:
    json_string: A string to get object from.
  Returns:
    JSON object if the string represents a JSON object, None otherwise.
  """
  try:
    data = json.loads(json_string)
  except ValueError:
    data = None
  return data 
 | 
	598c9b4d5e358a7a4672b25541c9db7743fcd587 
 | 3,657,690 
							 | 
					
	import inspect
import re
def _dimensions_matrix(channels, n_cols=None, top_left_attribute=None):
    """
    time,x0 y0,x0   x1,x0   y1,x0
    x0,y0   time,y0 x1,y0   y1,y0
    x0,x1   y0,x1   time,x1 y1,x1
    x0,y1   y0,y1   x1,y1   time,y1
    """
    # Generate the dimensions matrix from the docstring.
    ds = inspect.getdoc(_dimensions_matrix).strip()
    x, y = channels[:2]
    def _get_dim(d):
        if d == 'time':
            return d
        assert re.match(r'[xy][01]', d)
        c = x if d[0] == 'x' else y
        f = int(d[1])
        return c, f
    dims = [[_.split(',') for _ in re.split(r' +', line.strip())]
            for line in ds.splitlines()]
    x_dim = {(i, j): _get_dim(dims[i][j][0])
             for i, j in product(range(4), range(4))}
    y_dim = {(i, j): _get_dim(dims[i][j][1])
             for i, j in product(range(4), range(4))}
    return x_dim, y_dim 
 | 
	2c119c74e7e37827d6813437d9e0d8bbd97cbbc7 
 | 3,657,691 
							 | 
					
	def is_monotonic_increasing(x):
    """
    Helper function to determine if a list is monotonically increasing.
    """
    dx = np.diff(x)
    return np.all(dx >= 0) 
 | 
	6d0afe3a6a70d57ec4ae09e20164c34c0739855f 
 | 3,657,692 
							 | 
					
	import copy
def cluster_size_threshold(data, thresh=None, min_size=20, save=False):
    """ Removes clusters smaller than a prespecified number in a stat-file.
    Parameters
    ----------
    data : numpy-array or str
        3D Numpy-array with statistic-value or a string to a path pointing to
        a nifti-file with statistic values.
    thresh : int, float
        Initial threshold to binarize the image and extract clusters.
    min_size : int
        Minimum size (i.e. amount of voxels) of cluster. Any cluster with fewer
        voxels than this amount is set to zero ('removed').
    save : bool
        If data is a file-path, this parameter determines whether the cluster-
        corrected file is saved to disk again.
    """
    if isinstance(data, (str, unicode)):
        fname = copy(data)
        data = nib.load(data)
        affine = data.affine
        data = data.get_data()
    if thresh is not None:
        data[data < thresh] = 0
    clustered, num_clust = label(data > 0)
    values, counts = np.unique(clustered.ravel(), return_counts=True)
    # Get number of clusters by finding the index of the first instance
    # when 'counts' is smaller than min_size
    first_clust = np.sort(counts)[::-1] < min_size
    if first_clust.sum() == 0:
        print('All clusters were larger than: %i, returning original data' %
              min_size)
        return data
    n_clust = np.argmax(first_clust)
    # Sort and trim
    cluster_nrs = values[counts.argsort()[::-1][:n_clust]]
    cluster_nrs = np.delete(cluster_nrs, 0)
    # Set small clusters to zero.
    data[np.invert(np.in1d(clustered, cluster_nrs)).reshape(data.shape)] = 0
    if save:
        img = nib.Nifti1Image(data, affine=affine)
        basename = op.basename(fname)
        nib.save(img, basename.split('.')[0] + '_thresholded.nii.gz')
    return data 
 | 
	3b946a639e2dae1c47fb78ad30dded32b0dd5f06 
 | 3,657,693 
							 | 
					
	def convert_df(df):
    """Makes a Pandas DataFrame more memory-efficient through intelligent use of Pandas data types: 
    specifically, by storing columns with repetitive Python strings not with the object dtype for unique values 
    (entirely stored in memory) but as categoricals, which are represented by repeated integer values. This is a 
    net gain in memory when the reduced memory size of the category type outweighs the added memory cost of storing 
    one more thing. As such, this function checks the degree of redundancy for a given column before converting it."""
    
    converted_df = pd.DataFrame() # Initialize DF for memory-efficient storage of strings (object types)
    # TO DO: Infer dtypes of df
    df_obj = df.select_dtypes(include=['object']).copy() # Filter to only those columns of object data type
    for col in df.columns: 
        if col in df_obj: 
            num_unique_values = len(df_obj[col].unique())
            num_total_values = len(df_obj[col])
            if (num_unique_values / num_total_values) < 0.5: # Only convert data types if at least half of values are duplicates
                converted_df.loc[:,col] = df[col].astype('category') # Store these columns as dtype "category"
            else: 
                converted_df.loc[:,col] = df[col]
        else:    
            converted_df.loc[:,col] = df[col]
                      
    converted_df.select_dtypes(include=['float']).apply(pd.to_numeric,downcast='float')
    converted_df.select_dtypes(include=['int']).apply(pd.to_numeric,downcast='signed')
    
    return converted_df 
 | 
	7f6f2c20762963dceb0f52d36b7b724c5a89d8d4 
 | 3,657,694 
							 | 
					
	def run_add(request):
    """Add a run."""
    if request.method == "POST":
        form = forms.AddRunForm(request.POST, user=request.user)
        run = form.save_if_valid()
        if run is not None:
            messages.success(
                request, u"Run '{0}' added.".format(
                    run.name)
                )
            return redirect("manage_runs")
    else:
        pf = PinnedFilters(request.COOKIES)
        form = forms.AddRunForm(
            user=request.user,
            initial=pf.fill_form_querystring(request.GET).dict(),
            )
    return TemplateResponse(
        request,
        "manage/run/add_run.html",
        {
            "form": form
            }
        ) 
 | 
	c1eca5702e93e3a0751b2b22de18aa1aa4c88db7 
 | 3,657,695 
							 | 
					
	def map_aemo_facility_status(facility_status: str) -> str:
    """
        Maps an AEMO facility status to an Opennem facility status
    """
    unit_status = facility_status.lower().strip()
    if unit_status.startswith("in service"):
        return "operating"
    if unit_status.startswith("in commissioning"):
        return "commissioning"
    if unit_status.startswith("committed"):
        return "committed"
    if unit_status.startswith("maturing"):
        return "maturing"
    if unit_status.startswith("emerging"):
        return "emerging"
    raise Exception(
        "Could not find AEMO status for facility status: {}".format(
            unit_status
        )
    ) 
 | 
	43e1d5e5ea984d36260604cf25f4c7b90d5e56f1 
 | 3,657,696 
							 | 
					
	def demand_monthly_ba(tfr_dfs):
    """A stub transform function."""
    return tfr_dfs 
 | 
	74bbb3d732b64a30f0529f76deedd646cc7d4171 
 | 3,657,697 
							 | 
					
	def render_page(page, title="My Page", context=None):
    """
    A simple helper to render the md_page.html template with [context] vars, and
    the additional contents of `page/[page].md` in the `md_page` variable.
    It automagically adds the global template vars defined above, too.
    It returns a string, usually the HTML contents to display.
    """
    if context is None:
        context = {}
    context['title'] = title
    context['md_page'] = ''
    with file(get_path('page/%s.md' % page)) as f:
        context['md_page'] = f.read()
    return tpl_engine.get_template('md_page.html.jinja2').render(
        dict(tpl_global_vars.items() + context.items())
    ) 
 | 
	fd2e427f096324b2e9a17587f498626a2ebfb47e 
 | 3,657,698 
							 | 
					
	def _SortableApprovalStatusValues(art, fd_list):
  """Return a list of approval statuses relevant to one UI table column."""
  sortable_value_list = []
  for fd in fd_list:
    for av in art.approval_values:
      if av.approval_id == fd.field_id:
        # Order approval statuses by life cycle.
        # NOT_SET == 8 but should be before all other statuses.
        sortable_value_list.append(
            0 if av.status.number == 8 else av.status.number)
  return sortable_value_list 
 | 
	15ce3c6191495957674ab38c2f990d34f10ecdf6 
 | 3,657,699 
							 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.