content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import constants as c import cPickle def get_word_prob(): """Returns the probabilities of all the words in the mechanical turk video labels. """ data = cPickle.load(open(c.datafile)) # Read in the words from the labels wordcount = dict() totalcount = 0 for label in data: for word in label: totalcount += 1 if word in wordcount: wordcount[word] += 1 else: wordcount[word] = 1 wordprob = dict([(word, float(wc)/totalcount) for word, wc in wordcount.items()]) return wordprob
c9f137ad4e844ff3cea3c6f9b1d64e9422359b79
22,965
def angDistance(ra, dec, df, raCol='fieldRA', decCol='fieldDec'): """ """ df['dist'] = angSep(ra, dec, df[raCol], df[decCol]) idx = df.dist.idxmin() rval = df.loc[idx] df.drop('dist', axis=1, inplace=True) return rval
9e88711ff33a7ac1a223608ea5441e1cfdbb7a01
22,966
def offer_in_influencing_offers(offerId, influencing_offers): """ Find if a passed offerId is in the influencing_offers list Parameters ---------- offerId: Offer Id from portfolio dataframe. influencing_offers : List of offers found for a customer Returns ------- 1 if offer is found 0 if not found """ if (offerId in influencing_offers): return 1 else: return 0
81c4a8bcb7432222a1fc5175449192681002539c
22,967
def identity_block(filters, stage, block): """The identity block is the block that has no conv layer at shortcut. # Arguments filters: integer, used for first and second conv layers, third conv layer double this value stage: integer, current stage label, used for generating layer names block: integer, current block label, used for generating layer names # Returns Output layer for the block. """ def layer(input_tensor): conv_params = get_conv_params() bn_params = get_bn_params() conv_name, bn_name, relu_name, sc_name = handle_block_names(stage, block) x = Conv2D(filters, (1, 1), name=conv_name + '1', **conv_params)(input_tensor) x = BatchNormalization(name=bn_name + '1', **bn_params)(x) x = Activation('relu', name=relu_name + '1')(x) x = ZeroPadding2D(padding=(1, 1))(x) x = GroupConv2D(filters, (3, 3), conv_params, conv_name + '2')(x) x = BatchNormalization(name=bn_name + '2', **bn_params)(x) x = Activation('relu', name=relu_name + '2')(x) x = Conv2D(filters * 2, (1, 1), name=conv_name + '3', **conv_params)(x) x = BatchNormalization(name=bn_name + '3', **bn_params)(x) x = Add()([x, input_tensor]) x = Activation('relu', name=relu_name)(x) return x return layer
43eb5d56a83d24db9bd60aabf1e7dbd601a093cb
22,969
import psutil def filebeat_service_running(): """ Checks if the filebeat service is currently running on the OS. :return: True if filebeat service detected and running, False otherwise. """ result = False try: filebeat_service = psutil.win_service_get('filebeat') filebeat_service_info = filebeat_service.as_dict() if filebeat_service_info['status'] == 'running': result = True except psutil.NoSuchProcess: return result return result
51f9bc865b4f7d2de760fcc6952755b5c7c9106a
22,970
def unhandled_request_message(request, cassette): """Generate exception for unhandled requests.""" return UNHANDLED_REQUEST_EXCEPTION.format( url=request.url, cassette_file_path=cassette.cassette_name, cassette_record_mode=cassette.record_mode, cassette_match_options=cassette.match_options )
dcbfec51a88d3ad62395f48c7c046400177c07fd
22,971
from django.contrib.auth import logout as auth_logout def logout(request): """ Logs out the user and displays 'You are logged out' message. """ if request.method == 'GET': return _new_api_403() auth_logout(request)
7e975fdd68295e893d8f14321f722e941833b872
22,972
def compara_dv(cpf, primeiro_dv, segundo_dv): """Valida se dígitos verificadores calculados são iguais aos inseridos.""" return "válido" if primeiro_dv == int(cpf[9]) and segundo_dv == int(cpf[10]) else "inválido"
4b1794f466ce8c00e91c8c5f281996ea262591f8
22,973
def write_file(file_name, data, line_length): """ Writes the results to a text file using a name based on file_name input: string, list returns: int """ pos = file_name.rfind('.') fn_o = file_name[:pos] + '.OUT' + file_name[pos:] f = open(fn_o, "w") for fsn, sequence in data: f.write(fsn + '\n') l_length = len(sequence) if line_length == 0 else line_length for p in range(0, len(sequence), l_length): f.write(sequence[p:p+l_length] + '\n') f.close() return len(data)
0ad1b25106a6c9120289e8d55caafbebf475f9d5
22,974
def handle_duplicates(df, cutoff=5, agg_source_col='multiple'): """Aggregates duplicate measurements in a DataFrame. Parameters ---------- df : pandas DataFrame DataFrame with required columns: 'smiles', 'solvent', 'peakwavs_max' cutoff : int Wavelength cutoff in nm. Duplicate measurements of the same smiles-solvent pair with standard deviation less than cutoff are averaged. Those with standard deviation greater than cutoff are dropped. Returns ------- df : pandas DataFrame An updated DataFrame with duplicates aggregated or removed """ col_names = ['smiles', 'solvent'] + target_names + ['source'] cols = [x for x in df.columns if x not in col_names] agg_dict = {} for property in target_names: agg_dict[property] = ['mean','std'] if agg_source_col=='multiple': agg_dict['source'] = lambda x: 'multiple' if len(x) > 1 else x, elif agg_source_col=='random': np.random.seed(0) agg_dict['source'] = np.random.choice for col in cols: agg_dict[col] = 'mean' # For all smiles+solvent pairs, find mean and std of target property/properties # If std > cutoff, drop; elif std <= cutoff, take mean df = df.groupby(['smiles','solvent']).agg(agg_dict).reset_index() for property in target_names: high_std_idx = df[df[property]['std']>cutoff].index df.drop(index=high_std_idx, inplace=True) df.drop(columns='std', level=1, inplace=True) df.columns = df.columns.get_level_values(0) return df
0f37ba0256d3a73ebc86d628b65d054c049a7456
22,975
def splitData(features, target, trainFraction=0.25): """ Split the data into test and train data Inputs: > features: the model feature data (DataFrame) > target: the target data (Series) > trainFraction (0.25 by default): fraction of events to use for training Outputs: > Training feature data (DataFrame), Testing feature data (DataFrame), Training target data (Series), Testing target data (Series) """ X_train, X_test, y_train, y_test = train_test_split(features, target, test_size=1-trainFraction, random_state=42) return X_train, X_test, y_train, y_test
b3dba6e5b1082062995c4272c7e42fe24c7c8712
22,976
def poisson_moment( k, n): """ returns the moment of x**n with expectation value k CURRENTLY A SET OF HARD CODED EXPRESSIONS! VERY FRAGILE! --> would be *much* better if we could do this algorithmically """ if n==0: return 1 elif n==1: return k elif n==2: return k**2 + k elif n==3: return k**3 + 3*k**2 + k elif n==4: return k**4 + 6*k**3 + 7*k**2 + k elif n==5: return k**5 + 10*k**4 + 25*k**3 + 15*k**2 + k elif n==6: return k**6 + 15*k**5 + 65*k**4 + 90*k**3 + 31*k**2 + k elif n==7: return k**7 + 21*k**6 + 140*k**5 + 350*k**4 + 301*k**3 + 63*k**2 + k elif n==8: return k**8 + 28*k**7 + 266*k**6 + 1050*k*85 + 1701*k**4 + 966*k**3 + 127*k**2 + k else: raise NotImplementedError('currently only support n<=8')
d2af07d550b0cf6ac9a410296b4ec12c78cc1505
22,977
def drug_encoder(input_smiles): """ Drug Encoder Args: input_smiles: input drug sequence. Returns: v_d: padded drug sequence. temp_mask_d: masked drug sequence. """ temp_d = drug_bpe.process_line(input_smiles).split() try: idx_d = np.asarray([drug_idx[i] for i in temp_d]) except: idx_d = np.array([0]) flag = len(idx_d) if flag < D_MAX: v_d = np.pad(idx_d, (0, D_MAX - flag), 'constant', constant_values=0) temp_mask_d = [1] * flag + [0] * (D_MAX - flag) else: v_d = idx_d[:D_MAX] temp_mask_d = [1] * D_MAX return v_d, np.asarray(temp_mask_d)
79f0e391e5cd72f981b9580d105ac41cc53d5f63
22,978
def optimise_acqu_func_mledr(acqu_func, bounds, X_ob, func_gradient=True, gridSize=10000, n_start=5): """ Optimise acquisition function built on GP- model with learning dr :param acqu_func: acquisition function :param bounds: input space bounds :param X_ob: observed input data :param func_gradient: whether to use the acquisition function gradient in optimisation :param gridSize: random grid size :param n_start: the top n_start points in the random grid search from which we do gradient-based local optimisation :return np.array([opt_location]): global optimum input :return f_opt: global optimum """ # Turn the acquisition function to be - acqu_func for minimisation target_func = lambda x: - acqu_func._compute_acq(x) # Define a new function combingin the acquisition function and its derivative def target_func_with_gradient(x): acqu_f, dacqu_f = acqu_func._compute_acq_withGradients(x) return -acqu_f, -dacqu_f # Define bounds for the local optimisers based on the optimal dr nchannel = acqu_func.model.nchannel d = acqu_func.model.opt_dr d_vector = int(acqu_func.model.opt_dr ** 2 * nchannel) bounds = np.vstack([[-1, 1]] * d_vector) # Project X_ob to optimal dr learnt h_d = int(X_ob.shape[1] / acqu_func.model.nchannel) X_ob_d_r = downsample_projection(acqu_func.model.dim_reduction, X_ob, int(d ** 2), h_d, nchannel=nchannel, align_corners=True) # Create grid for random search but split the grid into n_batches to avoid memory overflow good_results_list = [] random_starts_candidates_list = [] n_batch = 5 gridSize_sub = int(gridSize / n_batch) for x_grid_idx in range(n_batch): Xgrid_sub = np.tile(bounds[:, 0], (gridSize_sub, 1)) + np.tile((bounds[:, 1] - bounds[:, 0]), (gridSize_sub, 1)) * np.random.rand(gridSize_sub, d_vector) if x_grid_idx == 0: Xgrid_sub = np.vstack((Xgrid_sub, X_ob_d_r)) results = target_func(Xgrid_sub) top_candidates_sub = results.flatten().argsort()[:5] # give the smallest n_start values in the ascending order random_starts_candidates = Xgrid_sub[top_candidates_sub] good_results = results[top_candidates_sub] random_starts_candidates_list.append(random_starts_candidates) good_results_list.append(good_results) # Find the top n_start candidates from random grid search to perform local optimisation results = np.vstack(good_results_list) X_random_starts = np.vstack(random_starts_candidates_list) top_candidates_idx = results.flatten().argsort()[ :n_start] # give the smallest n_start values in the ascending order random_starts = X_random_starts[top_candidates_idx] f_min = results[top_candidates_idx[0]] opt_location = random_starts[0] # Perform multi-start gradient-based optimisation for random_start in random_starts: if func_gradient: x, f_at_x, info = fmin_l_bfgs_b(target_func_with_gradient, random_start, bounds=bounds, approx_grad=False, maxiter=5000) else: x, f_at_x, info = fmin_l_bfgs_b(target_func, random_start, bounds=bounds, approx_grad=True, maxiter=5000) if f_at_x < f_min: f_min = f_at_x opt_location = x f_opt = -f_min return np.array([opt_location]), f_opt
8a59f9f3c4b7b55a4ae56da93eed1c9820363ef6
22,980
def get_path_from_pc_name(pc_name): """Find out path of a template Parameters ---------- pc_name : string Name of template. Returns ------- tplPath : string Path of template """ tplPath = pc_name + '.json' # change path to template if in subdir for i in pcTplEnv.list_templates(filter_func=filter_func): if i.split('/')[-1] == tplPath: tplPath = i return tplPath
f2ee20f9f8728d672bb0658e80f9f04f2c9f0c11
22,981
def eq(*, alpha=None, omega): """Define dyadic comparison function equal to. Dyadic case: 3 = 2 3 4 0 1 0 """ return int(alpha == omega)
1f8d826711e9d24a3b05de5f42a99b36744f4f38
22,982
import calendar def generate_days(year): """Generates all tuples (YYYY, MM, DD) of days in a year """ cal = calendar.Calendar() days = [] for m in range(1,13): days.extend(list(cal.itermonthdays3(year, m))) days = [d for d in set(days) if d[0] == year] days.sort() return days
6d87910572957d21c9d5df668dfb5f2d02627817
22,983
import requests import json def nounClassifier(word): """Classifies noun as actor o object Parameters ---------- word : str Lematized noun to be classified (case-insensitive). """ word = word.lower() response_raw = requests.get( f"{API_URL}senses/search?lemma={word}&&&partOfSpeech=noun&&&&&&" ) response = json.loads(response_raw.content) response = [ item for item in response["content"] if item["lemma"]["word"].lower() == word ] if len(response) == 0: return None if any( item["domain"]["name"][item["domain"]["name"].rfind("_") + 1 :] in ACTOR_DOMAINS for item in response ): return IGElement.ACTOR else: return IGElement.OBJECT
aef33226b956a0d7b9fcb8b1b751d5a11e9136c4
22,984
def svn_repos_post_lock_hook(*args): """svn_repos_post_lock_hook(svn_repos_t repos, apr_pool_t pool) -> char""" return _repos.svn_repos_post_lock_hook(*args)
56bdafc41fa76d2a4d2f5e6b213fa86e8ca9416b
22,985
def libdmtx_function(fname, restype, *args): """Returns a foreign function exported by `libdmtx`. Args: fname (:obj:`str`): Name of the exported function as string. restype (:obj:): Return type - one of the `ctypes` primitive C data types. *args: Arguments - a sequence of `ctypes` primitive C data types. Returns: cddl.CFunctionType: A wrapper around the function. """ prototype = CFUNCTYPE(restype, *args) return prototype((fname, load_libdmtx()))
ed5f39d435aae453a0aeb8855fc0a21e1db334b8
22,986
from typing import Any async def async_get_config_entry_diagnostics( hass: HomeAssistant, config_entry: ConfigEntry ) -> dict[str, Any]: """Return diagnostics for a config entry.""" control_unit: ControlUnit = hass.data[DOMAIN][config_entry.entry_id] diag: dict[str, Any] = { "config": async_redact_data(config_entry.as_dict(), REDACT_CONFIG) } platform_stats, device_types = control_unit.async_get_entity_stats() diag["platform_stats"] = platform_stats diag["devices"] = device_types return diag
ac4495e49745f9211a32cfaf3a15c03203282e50
22,987
async def instance_set_name_inurl(cluster_id: str, vm_uuid: str, new_name: str): """ Set Instance (VM/Template) Name """ try: try: session = create_session( _id=cluster_id, get_xen_clusters=Settings.get_xen_clusters() ) except KeyError as key_error: raise HTTPException( status_code=400, detail=f"{key_error} is not a valid path" ) _vm: VM = VM.get_by_uuid(session=session, uuid=vm_uuid) if _vm is not None: ret = dict(success=_vm.set_name(new_name)) else: ret = dict(success=False) session.xenapi.session.logout() return ret except Fault as xml_rpc_error: raise HTTPException( status_code=int(xml_rpc_error.faultCode), detail=xml_rpc_error.faultString, ) except RemoteDisconnected as rd_error: raise HTTPException(status_code=500, detail=rd_error.strerror)
3c942af4a57dad0beaef6f629d4b7900421eadbd
22,988
def key_released(key): """ Takes a key, that's either a keycode or a character, and says if it was released this frame. """ keycode = _to_keycode(key) return (keycode not in current_frame_held_buttons) and \ (keycode in last_frame_held_buttons)
4cb66ba924aae909f20db59eb2aef0ccc77d2860
22,989
def trim_snakes_old(lcs,ref,Nb,Ne,dat,Mb,Me): """Previously found matches can cause problems if they are not optimal. In such a case sticking with the matches as found prevents subsequent more advanced diff routines from recovering from an early sub-optimal choice. To counter this all snakes and pseudo-snakes are trimmed down such that they involve whole lines only. The process is: 1. Merge subsequent snakes to build a list in which each pair of snakes is separated by a non-empty section of mismatching tokens. 2. Trim each snake by increasing the starting point to the first token on the next line, and decreasing the end point to the last token on the previous line. If as a result the begin token exceeds the end token then eliminate the snake. The routine returns the revised snake list. """ # # Collapse the snake list by merging adjacent snakes. # nsnake = len(lcs) isnake = 0 if nsnake > 0: lcs_tmp = [] (xi1,yj1,xi2,yj2,itype) = lcs[isnake] isnake = isnake + 1 while (isnake < nsnake): (xi3,yj3,xi4,yj4,itype) = lcs[isnake] isnake = isnake + 1 if (xi2+1 == xi3 and yj2+1 == yj3): # # This snake continues from the previous one so merge the two. # xi2 = xi4 yj2 = yj4 # else: # # This snake is separated from the previous one so store the # previous one and restart the merge procedure. # lcs_tmp.append((xi1,yj1,xi2,yj2,itype)) xi1 = xi3 yj1 = yj3 xi2 = xi4 yj2 = yj4 # # Store the last snake. # lcs_tmp.append((xi1,yj1,xi2,yj2,itype)) lcs = lcs_tmp # # Trim the snakes to precisely matching lines. # nsnake = len(lcs) isnake = 0 lcs_tmp = [] txi = 0 tyj = 0 while (isnake < nsnake): (xi1,yj1,xi2,yj2,itype) = lcs[isnake] isnake = isnake + 1 # # Move the starting point to the first token on the next line unless # the token is the first token on the current line. # lxi1 = toldiff_tokens.tokenno2lineno(dat,xi1) txi1 = toldiff_tokens.lineno2tokenno(dat,lxi1) lyj1 = toldiff_tokens.tokenno2lineno(ref,yj1) tyj1 = toldiff_tokens.lineno2tokenno(ref,lyj1) if txi1 != xi1 or tyj1 != yj1: xi1 = toldiff_tokens.lineno2tokenno(dat,lxi1+1) yj1 = toldiff_tokens.lineno2tokenno(ref,lyj1+1) # # Move the end point to the last token on the previous line unless # the token is the last token on the current line. # lxi2 = toldiff_tokens.tokenno2lineno(dat,xi2) txi2 = toldiff_tokens.lineno2tokenno(dat,lxi2+1)-1 lyj2 = toldiff_tokens.tokenno2lineno(ref,yj2) tyj2 = toldiff_tokens.lineno2tokenno(ref,lyj2+1)-1 if txi2 != xi2 or tyj2 != yj2: xi2 = toldiff_tokens.lineno2tokenno(dat,lxi2)-1 yj2 = toldiff_tokens.lineno2tokenno(ref,lyj2)-1 if xi1-1 <= xi2 and yj1-1 <= yj2 and (xi1 > txi or yj1 > tyj): # # There is a non-empty snake remaining so store it. # lcs_tmp.append((xi1,yj1,xi2,yj2,itype)) txi = max(xi1,xi2) tyj = max(yj1,yj2) # lcs = lcs_tmp return lcs
0bf078ca2198bcf0c25e8a925bd0c096cafa4797
22,990
import asyncio async def start(actual_coroutine): """ Start the testing coroutine and wait 1 second for it to complete. :raises asyncio.CancelledError when the coroutine fails to finish its work in 1 second. :returns: the return value of the actual_coroutine. :rtype: Any """ try: return await asyncio.wait_for(actual_coroutine, 2) except asyncio.CancelledError: pass
26e3737091ca798dbf8c0f6f2a18a1de4b0ec42b
22,991
def get_node(path): """Returns a :class:`Node` instance at ``path`` (relative to the current site) or ``None``.""" try: current_site = Site.objects.get_current() except Site.DoesNotExist: current_site = None trailing_slash = False if path[-1] == '/': trailing_slash = True try: node, subpath = Node.objects.get_with_path(path, root=getattr(current_site, 'root_node', None), absolute_result=False) except Node.DoesNotExist: return None if subpath is None: subpath = "" subpath = "/" + subpath if trailing_slash and subpath[-1] != "/": subpath += "/" node._path = path node._subpath = subpath return node
516460d05df4139ce5354f2c3ef5cf948d4b8213
22,992
from datetime import datetime def new_post(blog_id, username, password, post, publish): """ metaWeblog.newPost(blog_id, username, password, post, publish) => post_id """ user = authenticate(username, password, 'zinnia.add_entry') if post.get('dateCreated'): creation_date = datetime.strptime( post['dateCreated'].value[:18], '%Y-%m-%dT%H:%M:%S') if settings.USE_TZ: creation_date = timezone.make_aware( creation_date, timezone.utc) else: creation_date = timezone.now() entry_dict = {'title': post['title'], 'content': post['description'], 'excerpt': post.get('mt_excerpt', ''), 'publication_date': creation_date, 'creation_date': creation_date, 'last_update': creation_date, 'comment_enabled': post.get('mt_allow_comments', 1) == 1, 'pingback_enabled': post.get('mt_allow_pings', 1) == 1, 'trackback_enabled': post.get('mt_allow_pings', 1) == 1, 'featured': post.get('sticky', 0) == 1, 'tags': 'mt_keywords' in post and post['mt_keywords'] or '', 'slug': 'wp_slug' in post and post['wp_slug'] or slugify( post['title']), 'password': post.get('wp_password', '')} if user.has_perm('zinnia.can_change_status'): entry_dict['status'] = publish and PUBLISHED or DRAFT entry = Entry.objects.create(**entry_dict) author = user if 'wp_author_id' in post and user.has_perm('zinnia.can_change_author'): if int(post['wp_author_id']) != user.pk: author = Author.objects.get(pk=post['wp_author_id']) entry.authors.add(author) entry.sites.add(Site.objects.get_current()) if 'categories' in post: entry.categories.add(*[ Category.objects.get_or_create( title=cat, slug=slugify(cat))[0] for cat in post['categories']]) return entry.pk
4bdd8464458bef5797854776222a178e891d6346
22,993
def upload(userid, filedata): """ Creates a preview-size copy of an uploaded image file for a new avatar selection file. """ if filedata: media_item = media.make_resized_media_item(filedata, (600, 500), 'FileType') orm.UserMediaLink.make_or_replace_link(userid, 'avatar-source', media_item) else: orm.UserMediaLink.clear_link(userid, 'avatar') return bool(filedata)
3ffd3d5a26c35f20e5a3885ca597d8c0182ebc8a
22,994
def time_dconv_bn_nolinear(nb_filter, nb_row, nb_col, stride=(2, 2), activation="relu"): """ Create time convolutional Batch Norm layer in decoders. Parameters: --------- filter_num : int number of filters to use in convolution layer. row_num : int number of row col_num : int number of column stride : int size of stride Returns: --------- dconv_bn """ def _dconv_bn(x): x = TimeDistributed(UnPooling2D(size=stride))(x) x = TimeDistributed(ReflectionPadding2D(padding=(int(nb_row/2), int(nb_col/2))))(x) x = TimeDistributed(Conv2D(nb_filter, (nb_row, nb_col), padding='valid', kernel_regularizer=regularizers. l2(reg_weights)))(x) x = TimeDistributed(BatchNormalization())(x) x = TimeDistributed(Activation(activation))(x) return x return _dconv_bn
016082d3c09388a4ff9f8add5cb84a63a65775e8
22,996
import random def ___generate_random_row_major_GM___(i, j, s=None): """Make a random row major sparse matrix of shape (i,j) at sparsity=s. :param i: :param j: :param s: :return: """ if s is None: s = random.uniform(0, 0.1) if s < 0.02: s = 0 if rAnk == mAster_rank: random_list = random.sample(range(0, i), i) distribution = [i // sIze + (1 if x < i % sIze else 0) for x in range(sIze)] no_empty_rows = list() _ = 0 for r in range(sIze): no_empty_rows.append(random_list[_:_+distribution[r]]) _ += distribution[r] else: no_empty_rows = None no_empty_rows = cOmm.scatter(no_empty_rows, root=mAster_rank) _ = spspa.random(i, j, s, format='csr') A = spspa.lil_matrix((i,j)) A[no_empty_rows,:] = _[no_empty_rows,:] A = A.tocsr() A = GlobalMatrix(A) A.IS.regularly_distributed = 'row' A.___PRIVATE_self_regularity_checker___() return A
59eca064d240dc03fdbdc8d1807dbbbb996239d4
22,997
from typing import Tuple from typing import Any def parse_tuple(value: Tuple[Any, ...]) -> RGBA: """ Parse a tuple or list as a color. """ if len(value) == 3: r, g, b = [parse_color_value(v) for v in value] return RGBA(r, g, b, None) elif len(value) == 4: r, g, b = [parse_color_value(v) for v in value[:3]] return RGBA(r, g, b, parse_float_alpha(value[3])) else: raise ColorError(reason='tuples must have length 3 or 4')
0766bd7189c5e0cd383d94944dacecd5fbef1320
22,998
import pathlib from typing import Optional def find_path( start_path: pathlib.Path = pathlib.Path("."), ) -> Optional[pathlib.Path]: """Traverse the file system looking for the config file .craftier.ini. It will stop earlier at the user's home directory, if it encounters a Git or Mercurial directory, or if it traversed too deep. """ home = pathlib.Path.home() path = start_path.resolve() for path in [path, *path.parents][:_MAX_SEARCH_DEPTH]: config_file = path / CONFIG_FILENAME if config_file.is_file(): return config_file for stop_dir in _STOP_SEARCH_ON_DIRS: if (path / stop_dir).is_dir(): return None if path == home: return None return None
00fbfabc8e0c6dd3c23b190e6278f70af566b25f
22,999
def crop_point_data_to_base_raster(raster_name, raster_directory, csv_file, EPSG_code = 0): """ This function create a new csv file cropped to the base raster. It can lower the processing time if your point data is on a significantly larger area than the base raster. """ print("ok let me load your dataset and your hdr file") # Read the file df = bamboo_bears.read_csv(csv_file) # Read and sort the csv_info with open(raster_directory+raster_name+".hdr","r") as hdr_file: print("I got these") for line in hdr_file: if(line[0:8] == "map info"): info = line[12:-2] info = info.split(",") x_min = float(info[3]) y_max = float(info[4]) x_res = float(info[5]) y_res = float(info[6]) utm_zone = int(info[7]) utm_hemisphere = info[8] else: if(line[0:7] == "samples"): num_col = line.replace(" ","").split("=")[1] print("there are " + str(num_col) + " columns") num_col = int(num_col) else: if(line[0:5] == "lines"): num_lines = line.replace(" ","").split("=")[1] print("there are " + str(num_lines) + " lines") num_lines = int(num_lines) # Now I calculate the size of the dem x_max = x_min + x_res*num_col y_min = y_max - y_res*num_lines # Conversion UTM to lat/long inProj = Proj(init='epsg:'+str(EPSG_code)) outProj = Proj(init='epsg:4326') long_min,lat_min = transform(inProj,outProj,x_min,y_min) long_max,lat_max = transform(inProj,outProj,x_max,y_max) # data sorting df = df[df.longitude<long_max] df = df[df.latitude<lat_max] df = df[df.latitude>lat_min] df = df[df.longitude>long_min] df.to_csv(csv_file[:-4]+"_"+raster_name+"_filtered.csv", index = False) #return the name of the new csv file return csv_file[:-4]+"_"+raster_name+"_filtered.csv"
0392d9633381948ef338c3233ed2f4b81d520678
23,000
def generate_schedule_report_data(pools_info, pools_allocated_mem): """ Generate the schedule report data. :param pools_info: (dict) The information about the configuration and statistics of the pool participating in the scheduling. :param pools_allocated_mem: (dict) The allocated memory of the pool participating in the scheduling. :return: (DataFrame) A DataFrame object of report data. """ columns = [ReportColumn.RESOURCE_POOL, ReportColumn.MEM_BEFORE_SCHEDULE, ReportColumn.MEM_AFTER_SCHEDULE, ReportColumn.MEM_MOVED, ReportColumn.MEM_USED, ReportColumn.MEM_LACK, ReportColumn.QUERY_NUMBER, ReportColumn.WORK_TIME, ReportColumn.QUEUED_TIME, ReportColumn.WEIGHT, ReportColumn.MIN_MEM, ReportColumn.MAX_MEM] data = [[pool_info.pool_name, int(convert_mem_unit(pool_info.current_mem)), int(convert_mem_unit(pools_allocated_mem.get(pool_info.pool_name, pool_info.current_mem))), int(convert_mem_unit(pools_allocated_mem.get(pool_info.pool_name, pool_info.current_mem) - pool_info.current_mem)), int(convert_mem_unit(pool_info.pool_stat.used_mem_avg)) \ if int(convert_mem_unit(pool_info.pool_stat.wait_mem_avg)) == 0 \ else int(convert_mem_unit(pool_info.current_mem)), int(convert_mem_unit(pool_info.pool_stat.wait_mem_avg)), pool_info.pool_stat.query_total, int(pool_info.pool_stat.run_secs), int(pool_info.pool_stat.wait_secs), pool_info.weight, int(convert_mem_unit(pool_info.min_mem)), int(convert_mem_unit(pool_info.max_mem))] for pool_info in list(pools_info.values())] return pd.DataFrame(data, columns=columns)
e9fb9f517c1fe29d9f4c867b416969374f4acd36
23,001
def create_feature_rule_json(device, feature="foo", rule="json"): """Creates a Feature/Rule Mapping and Returns the rule.""" feature_obj, _ = ComplianceFeature.objects.get_or_create(slug=feature, name=feature) rule = ComplianceRule( feature=feature_obj, platform=device.platform, config_type=ComplianceRuleTypeChoice.TYPE_JSON, config_ordered=False, ) rule.save() return rule
985dfccab39c54478ba36f10020779dbd1b6b466
23,002
def default_sv2_sciencemask(): """Returns default mask of bits for science targets in SV1 survey. """ sciencemask = 0 sciencemask |= sv2_mask["LRG"].mask sciencemask |= sv2_mask["ELG"].mask sciencemask |= sv2_mask["QSO"].mask sciencemask |= sv2_mask["BGS_ANY"].mask sciencemask |= sv2_mask["MWS_ANY"].mask sciencemask |= sv2_mask["SCND_ANY"].mask return sciencemask
cf6b45d069ab8538350d35ce28d8fae4ed6525b2
23,003
def solver_softmax(K, R): """ K = the number of arms (domains) R = the sequence of past rewards """ softmax = np.zeros(K, dtype=float) for i, r in R.items(): softmax[i] = np.mean(r) softmax = np.exp(softmax) / np.exp(softmax).sum() si = np.random.choice(np.arange(0, K, 1), size=1, p=softmax)[0] index = {i: 0.0 for i in range(K)} index[si] = 1.0 return index
3ac8984f70c8594f48b00df4d9b15e69dad416ba
23,004
def mapview(request): """Map view.""" context = basecontext(request, 'map') return render(request, 'map.html', context=context)
9c03377c3d047b1672c4ac1972e5552ecdc7488a
23,005
def adapt_coastdat_weather_to_pvlib(weather, loc): """ Adapt the coastdat weather data sets to the needs of the pvlib. Parameters ---------- weather : pandas.DataFrame Coastdat2 weather data set. loc : pvlib.location.Location The coordinates of the weather data point. Returns ------- pandas.DataFrame : Adapted weather data set. Examples -------- >>> cd_id=1132101 >>> cd_weather=fetch_coastdat_weather(2014, cd_id) >>> c=fetch_data_coordinates_by_id(cd_id) >>> location=pvlib.location.Location(**getattr(c, '_asdict')()) >>> pv_weather=adapt_coastdat_weather_to_pvlib(cd_weather, location) >>> 'ghi' in cd_weather.columns False >>> 'ghi' in pv_weather.columns True """ w = pd.DataFrame(weather.copy()) w["temp_air"] = w.temp_air - 273.15 w["ghi"] = w.dirhi + w.dhi clearskydni = loc.get_clearsky(w.index).dni w["dni"] = pvlib.irradiance.dni( w["ghi"], w["dhi"], pvlib.solarposition.get_solarposition( w.index, loc.latitude, loc.longitude ).zenith, clearsky_dni=clearskydni, ) return w
01a7c4340ed2542bb2fe624d6a02e4c82f3ff984
23,006
def bprop_distribute(arr, shp, out, dout): """Backpropagator for primitive `distribute`.""" return (array_reduce(scalar_add, dout, shape(arr)), zeros_like(shp))
5bfd9f1e6ec3b50e4fd13a3a26466ee57e7f084e
23,007
def ids_to_non_bilu_label_mapping(labelset: LabelSet) -> BiluMappings: """Mapping from ids to BILU and non-BILU mapping. This is used to remove the BILU labels to regular labels""" target_names = list(labelset["ids_to_label"].values()) wo_bilu = [bilu_label.split("-")[-1] for bilu_label in target_names] non_bilu_mapping = bilu_to_non_bilu(wo_bilu) BiluMappings.non_bilu_label_to_bilu_ids = {} BiluMappings.non_bilu_label_to_id = {} for target_name, labels_list in non_bilu_mapping.items(): # 'upper_bound': ([1, 2, 3, 4], 1) BiluMappings.non_bilu_label_to_bilu_ids[target_name] = labels_list, labels_list[0] # 'upper_bound': 1 BiluMappings.non_bilu_label_to_id[target_name] = labels_list[0] return BiluMappings
ed6b42784661a7db693a1ea5ba65e9a1f830a46a
23,008
def generate_input_types(): """ Define the different input types that are used in the factory :return: list of items """ input_types = ["Angle_irons", "Tubes", "Channels", "Mig_wire", "Argon_gas", "Galvanised_sheets", "Budget_locks", "Welding_rods", "Body_filler", "Grinding_discs", "Drill_bits", "Primer", "Paints", "Thinner", "Sand_paper", "Masking_tapes", "Carpet", "Pop_rivets", "Electrical_wires", "Bulbs", "Switch", "Insulation_tapes", "Fasteners", "Adhesives", "Reflectors", "Accessories", "Rubbers", "Aluminum_mouldings", "Glasses", "Window_locks"] return input_types
d9e10624daaf5dae92f15512c9b19c47af002139
23,009
from qharv.inspect.axes_elem_pos import ase_tile as atile def ase_tile(cell, tmat): """Create supercell from primitive cell and tiling matrix Args: cell (pyscf.Cell): cell object tmat (np.array): 3x3 tiling matrix e.g. 2*np.eye(3) Return: pyscf.Cell: supercell """ try: except ImportError: msg = 'tiling with non-diagonal matrix require the "ase" package' raise RuntimeError(msg) # get crystal from cell object axes = cell.lattice_vectors() elem = [atom[0] for atom in cell._atom] pos = cell.atom_coords() axes1, elem1, pos1 = atile(axes, elem, pos, tmat) # re-make cell object cell1 = cell.copy() cell1.atom = list(zip(elem1, pos1)) cell1.a = axes1 # !!!! how to change mesh ???? ncopy = np.diag(tmat) cell1.mesh = np.array([ncopy[0]*cell.mesh[0], ncopy[1]*cell.mesh[1], ncopy[2]*cell.mesh[2]]) cell1.build(False, False, verbose=0) cell1.verbose = cell.verbose return cell1
d37d5b5d2cab42d10e7495724bd5cba4391c71e4
23,010
def parse_chat_logs(input_path, user, self): """ Get messages from a person, or between that person and yourself. "self" does not necessarily have to be your name. Args: input_path (str): Path to chat log HTML file user (str): Full name of person, as appears in Messenger app self (str): Your name, as appears in Messenger app Returns: list[str]: Each element is a message, i.e. what gets sent when the enter key is pressed """ data = [] current_user = None user_found = False skip_thread = False for element in etree.parse(input_path).iter(): tag = element.tag content = element.text cls = element.get("class") if tag == "div" and cls == "thread": # Do not parse threads with more than two people skip_thread = content.count(",") > 1 if user_found: user_found = False elif tag == "span" and cls == "user" and not skip_thread: current_user = content if current_user == user: user_found = True elif tag == "p" and not skip_thread: if (current_user == user) or (current_user == self and user_found): data.append(content) return data
b0d9a19d7f27589dac7757539c9d1595150ec0f4
23,012
def pressure_correction(pressure, rigidity): """ function to get pressure correction factors, given a pressure time series and rigidity value for the station :param pressure: time series of pressure values over the time of the data observations :param rigidity: cut-off rigidity of the station making the observations :return: series of correction factors """ p_0 = np.nanmean(pressure) pressure_diff = pressure - p_0 # g cm^-2. See Desilets & Zreda 2003 mass_attenuation_length = attenuation_length(p_0, rigidity) exponent = pressure_diff * mass_attenuation_length pressure_corr = np.exp(exponent) return pressure_corr
9a1baeacc7c954f8825dcd279518357534d84a06
23,013
def prepare_data(song: dict) -> dict: """ Prepares song dataa for database insertion to cut down on duplicates :param song: Song data :return: The song data """ song['artist'] = song['artist'].upper().strip() song['title'] = song['title'].upper().strip() return song
f8f8c9a3a0fe510cb3fb2e7d6d5bd361721337e7
23,016
def com(struct): """ Calculates center of mass of the system. """ geo_array = struct.get_geo_array() element_list = struct.geometry['element'] mass = np.array([atomic_masses_iupac2016[atomic_numbers[x]] for x in element_list]).reshape(-1) total = np.sum(mass) com = np.sum(geo_array*mass[:,None], axis=0) com = com / total return com
239ff2d153739c80f6a4f723fc8060d7418a4862
23,017
def distance_matrix(values, metric): """Generate a matrix of distances based on the `metric` calculation. :param values: list of sequences, e.g. list of strings, list of tuples :param metric: function (value, value) -> number between 0.0 and 1.0""" matrix = [] progress = ProgressTracker(len(values)) for lidx, left in enumerate(values): progress.tick(lidx) row = [] for right in values: row.append(metric(left, right)) matrix.append(row) return np.array(matrix)
339adc59d3b6198d9bc55d7c6504c5489e7770b2
23,018
import struct def _Pack(content, offset, format_string, values): """Pack values to the content at the offset. Args: content: String to be packed. offset: Offset from the beginning of the file. format_string: Format string of struct module. values: Values to struct.pack. Returns: Updated content. """ size = struct.calcsize(format_string) return ''.join([content[:offset], struct.pack(format_string, *values), content[offset + size:]])
c164298e1e8963b20cfabcd38f3d8e44722751ae
23,019
import math def get_rotation_matrix(orientation): """ Get the rotation matrix for a rotation around the x axis of n radians Args: - (float) orientation in radian Return: - (np.array) rotation matrix for a rotation around the x axis """ rotation_matrix = np.array( [[1, 0, 0], [0, math.cos(orientation), -math.sin(orientation)], [0, math.sin(orientation), math.cos(orientation)]]) return rotation_matrix
0f795c974599382039106f28f20c4c48cdd77bb6
23,020
def machinesize(humansize): """convert human-size string to machine-size""" if humansize == UNKNOWN_SIZE: return 0 try: size_str, size_unit = humansize.split(" ") except AttributeError: return float(humansize) unit_converter = { 'Byte': 0, 'Bytes': 0, 'kB': 1, 'MB': 2, 'GB': 3, 'TB': 4, 'PB': 5 } machinesize = float(size_str) * (1000 ** unit_converter[size_unit]) return machinesize
8694d6ac3b2aa1b6624d2fea7a8ce4544f713c36
23,021
import networkx import torch def generate_erdos_renyi_netx(p, N): """ Generate random Erdos Renyi graph """ g = networkx.erdos_renyi_graph(N, p) W = networkx.adjacency_matrix(g).todense() return g, torch.as_tensor(W, dtype=torch.float)
fbb8e293a1b35958301c2e376a03c30012b0c33b
23,022
def kmc_algorithm(process_list): """ :param rate_list: List with all the computed rates for all the neighbours for all the centers :param process_list: List of elements dict(center, process, new molecule). The indexes of each rate in rate_list have the same index that the associated process in process_list. Chooses a process using the list of rates and associates a time with this process using the BKL Kinetic Monte-Carlo algorithm. The algorithm uses 2 random number, one to choose the process and the other for the time. The usage of each random number is in an independent function :return: plan: The chosen proces and the new molecule affected time: the duration of the process """ rates_list = [proc.get_rate_constant() for proc in process_list] process_index = select_process(rates_list) chosen_process = process_list[process_index] time = time_advance(rates_list) return chosen_process, time
5812498f83eede2f6de6f669bd87312705c13be3
23,023
def __matlab_round(x: float = None) -> int: """Workaround to cope the rounding differences between MATLAB and python""" if x - np.floor(x) < 0.5: return int(np.floor(x)) else: return int(np.ceil(x))
d24298c9c072fc83a531fcd498f81c715accf229
23,024
def rayleightest(circ_data, dim='time'): """Returns the p-value for the Rayleigh test of uniformity This test is used to identify a non-uniform distribution, i.e. it is designed for detecting an unimodal deviation from uniformity. More precisely, it assumes the following hypotheses: - H0 (null hypothesis): The population is distributed uniformly around the circle. - H1 (alternative hypothesis): The population is not distributed uniformly around the circle. Parameters ---------- circ_data : xarray DataArray circular data [radian] weights : xarray DataArray, optional weights of the circular data (the default is None) dim : str, optional name of the core dimension (the default is 'time') Returns ------- xarray DataArray p-value """ p_value = xr.apply_ufunc(_rayleightest, circ_data, #kwargs={'weights':weights}, input_core_dims=[[dim]], dask='parallelized', output_dtypes=[float]) p_value.name = 'rayleigh_p' p_value.attrs.update(unit='', description='p-value for rayleigh test of uniformity') return p_value
74342adefe71f1e3193d52af6f716f20c538848f
23,025
from typing import Union from pathlib import Path import yaml def load_cfg(cfg_file: Union[str, Path]) -> dict: """Load the PCC algs config file in YAML format with custom tag !join. Parameters ---------- cfg_file : `Union[str, Path]` The YAML config file. Returns ------- `dict` A dictionary object loaded from the YAML config file. """ # [ref.] https://stackoverflow.com/a/23212524 ## define custom tag handler def join(loader, node): seq = loader.construct_sequence(node) return ''.join([str(i) for i in seq]) ## register the tag handler yaml.add_constructor('!join', join) with open(cfg_file, 'r') as f: cfg = yaml.load(f, Loader=yaml.FullLoader) return cfg
c9137c5052adf8fa62913c352df2bfe9e79fc7ce
23,026
def pdist_triu(x, f=None): """Pairwise distance. Arguments: x: A set of points. shape=(n,d) f (optional): A kernel function that computes the similarity or dissimilarity between two vectors. The function must accept two matrices with shape=(m,d). Returns: Upper triangular pairwise distances in "unrolled" form. """ n = x.shape[0] if f is None: # Use Euclidean distance. def f(x, y): return np.sqrt(np.sum((x - y)**2, axis=1)) # Determine indices of upper triangular matrix (not including # diagonal elements). idx_upper = np.triu_indices(n, 1) return f(x[idx_upper[0]], x[idx_upper[1]])
19d8acb0b38b8dcb6b5b99a1bf7691e055c2ef6d
23,027
def get_model_defaults(cls): """ This function receives a model class and returns the default values for the class in the form of a dict. If the default value is a function, the function will be executed. This is meant for simple functions such as datetime and uuid. Args: cls: (obj) : A Model class. Returns: defaults: (dict) : A dictionary of the default values. """ tmp = {} for key in cls.__dict__.keys(): col = cls.__dict__[key] if hasattr(col, "expression"): if col.expression.default is not None: arg = col.expression.default.arg if callable(arg): tmp[key] = arg(cls.db) else: tmp[key] = arg return tmp
93c29af27446c558b165159cee4bb41bbb3cad4d
23,029
def add_response_headers(headers=None): """This decorator adds the headers passed in to the response""" headers = headers or {} def decorator(f): @wraps(f) def decorated_function(*args, **kwargs): resp = make_response(f(*args, **kwargs)) h = resp.headers for header, value in headers.items(): h[header] = value return resp return decorated_function return decorator
9f26048dcff6de65d9a25ede6002c955f1551ff5
23,030
def restore(request: Request, project_id: int) -> JSONResponse: # pylint: disable=invalid-name,redefined-builtin """Restore project. Args: project_id {int}: project id Returns: starlette.responses.JSONResponse """ log_request(request, { 'project_id': project_id }) project_manager = ProjectManager() project_manager.restore(project_id) project = project_manager.get_project(project_id) return JSONResponse(project, HTTPStatus.OK)
0316132e42331ec9fe3b5c4ce73c364cc4726e2b
23,031
def _broadcast_arrays(x, y): """Broadcast arrays.""" # Cast inputs as numpy arrays # with nonzero dimension x = np.atleast_1d(x) y = np.atleast_1d(y) # Get shapes xshape = list(x.shape) yshape = list(y.shape) # Get singltons that mimic shapes xones = [1] * x.ndim yones = [1] * y.ndim # Broadcast x = np.tile(np.reshape(x, xshape + yones), xones + yshape) y = np.tile(np.reshape(y, xones + yshape), xshape + yones) # Return broadcast arrays return x, y
8272e17a05803e529295ded70253b4c80615d426
23,032
from re import M def Output(primitive_spec): """Mark a typespec as output.""" typespec = BuildTypespec(primitive_spec) typespec.meta.sigdir = M.SignalDir.OUTPUT return typespec
737262c1414e7a33480a4512a9441d1b3eef45c8
23,033
def partitionFromMask(mask): """ Return the start and end address of the first substring without wildcards """ for i in range(len(mask)): if mask[i] == '*': continue for j in range(i+1, len(mask)): if mask[j] == '*': break else: if i+1 == len(mask): j = i+1 else: j += 1 break return i, (j-1)
1b77f68a223e36e8dc9ec4b70464924d6b1dbe4a
23,035
def mask_to_bias(mask: Array, dtype: jnp.dtype) -> Array: """Converts a mask to a bias-like Array suitable for adding to other biases. Arguments: mask: <bool> array of arbitrary shape dtype: jnp.dtype, desired dtype of the returned array Returns: bias: <bool> array of the same shape as the input, with 0 in place of truthy values and -1e10 in place of falsy values of mask """ return lax.select(mask, jnp.full(mask.shape, 0).astype(dtype), jnp.full(mask.shape, -1e10).astype(dtype))
0e74765bde98fba50e224382e57acf35b7e35e55
23,037
from typing import Union from typing import Iterable from typing import Optional from typing import Dict def optimize_clustering( data, algorithm_names: Union[Iterable, str] = variables_to_optimize.keys(), algorithm_parameters: Optional[Dict[str, dict]] = None, random_search: bool = True, random_search_fraction: float = 0.5, algorithm_param_weights: Optional[dict] = None, algorithm_clus_kwargs: Optional[dict] = None, evaluation_methods: Optional[list] = None, gold_standard: Optional[Iterable] = None, metric_kwargs: Optional[dict] = None, ) -> tuple: """ Runs through many clusterers and parameters to get best clustering labels. Args: data: Dataframe with elements to cluster as index and examples as columns. algorithm_names: Which clusterers to try. Default is in variables_to_optimize.Can also put 'slow', 'fast' or 'fastest' for subset of clusterers. See hypercluster.constants.speeds. algorithm_parameters: Dictionary of str:dict, with parameters to optimize for each clusterer. Ex. structure:: {'clusterer1':{'param1':['opt1', 'opt2', 'opt3']}}. random_search: Whether to search a random selection of possible parameters or all possibilities. Default True. random_search_fraction: If random_search is True, what fraction of the possible parameters to search, applied to all clusterers. Default 0.5. algorithm_param_weights: Dictionary of str: dictionaries. Ex format - {'clusterer_name': {'parameter_name':{'param_option_1':0.5, 'param_option_2':0.5}}}. algorithm_clus_kwargs: Dictionary of additional kwargs per clusterer. evaluation_methods: Str name of evaluation metric to use. For options see hypercluster.categories.evaluations. Default silhouette. gold_standard: If using a evaluation needs ground truth, must provide ground truth labels. For options see hypercluster.constants.need_ground_truth. metric_kwargs: Additional evaluation metric kwargs. Returns: Best labels, dictionary of clustering evaluations, dictionary of all clustering labels """ if algorithm_param_weights is None: algorithm_param_weights = {} if algorithm_clus_kwargs is None: algorithm_clus_kwargs = {} if algorithm_parameters is None: algorithm_parameters = {} if metric_kwargs is None: metric_kwargs = {} if evaluation_methods is None: evaluation_methods = inherent_metrics if algorithm_names in list(categories.keys()): algorithm_names = categories[algorithm_names] clustering_labels = {} clustering_labels_df = pd.DataFrame() for clusterer_name in algorithm_names: label_df = ( AutoClusterer( clusterer_name=clusterer_name, params_to_optimize=algorithm_parameters.get(clusterer_name, None), random_search=random_search, random_search_fraction=random_search_fraction, param_weights=algorithm_param_weights.get(clusterer_name, None), clus_kwargs=algorithm_clus_kwargs.get(clusterer_name, None), ) .fit(data) .labels_ ) label_df.index = pd.MultiIndex.from_tuples(label_df.index) clustering_labels[clusterer_name] = label_df # Put all parameter labels into 1 for a big df label_df = label_df.transpose() cols_for_labels = label_df.index.to_frame() inds = cols_for_labels.apply( lambda row: param_delim.join( [clusterer_name] + ["%s%s%s" % (k, val_delim, v) for k, v in row.to_dict().items()] ), axis=1, ) label_df.index = inds label_df = label_df.transpose() clustering_labels_df = pd.concat( [clustering_labels_df, label_df], join="outer", axis=1 ) evaluation_results_df = pd.DataFrame({"methods": evaluation_methods}) for col in clustering_labels_df.columns: evaluation_results_df[col] = evaluation_results_df.apply( lambda row: evaluate_results( clustering_labels_df[col], method=row["methods"], data=data, gold_standard=gold_standard, metric_kwargs=metric_kwargs.get(row["methods"], None), ), axis=1, ) return evaluation_results_df, clustering_labels_df, clustering_labels
7bf38c317a17c6803a12316eaa3960bb8198d701
23,039
def sql_fingerprint(query, hide_columns=True): """ Simplify a query, taking away exact values and fields selected. Imperfect but better than super explicit, value-dependent queries. """ parsed_query = parse(query)[0] sql_recursively_simplify(parsed_query, hide_columns=hide_columns) return str(parsed_query)
985f9a5afc9a9acddece29535954f25d29580b62
23,040
def get_account(): """Return one account and cache account key for future reuse if needed""" global _account_key if _account_key: return _account_key.get() acc = Account.query().get() _account_key = acc.key return acc
31f424c1c8e642f6c423f3c0e61896be4ad3b080
23,041
from typing import Sequence from typing import Optional import abc def is_seq_of( seq: Sequence, expected_type: type, seq_type: Optional[type] = None ) -> bool: """Check whether it is a sequence of some type. Args: seq (Sequence): Sequence to be checked. expected_type (type): Expected type of sequence items. seq_type (type, optional): Expected sequence type. """ if seq_type is None: exp_seq_type = abc.Sequence else: if not isinstance(seq_type, type): raise TypeError(f"`seq_type` must be a valid type. But got: {seq_type}.") exp_seq_type = seq_type if not isinstance(seq, exp_seq_type): return False for item in seq: if not isinstance(item, expected_type): return False return True
4937a23a91a507a18519109c1b473add0e263ca7
23,042
def mutate_single_residue(atomgroup, new_residue_name): """ Mutates the residue into new_residue_name. The only atoms retained are the backbone and CB (unless the new residue is GLY). If the original resname == new_residue_name the residue is left untouched. """ resnames = atomgroup.resnames() if len(resnames) == 1: if resnames[0] == new_residue_name: edited_atomgroup = atomgroup else: if new_residue_name == 'GLY': edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O"]) else: edited_atomgroup = select_atoms_by_name(atomgroup, ["C", "CA", "N", "O", "CB"]) for t in edited_atomgroup: t.resname = new_residue_name else: edited_atomgroup = atomgroup return edited_atomgroup
89ea175809fb518d778390867cb7f311343a06cc
23,043
from typing import Dict from typing import OrderedDict import warnings def future_bi_end_f30_base(s: [Dict, OrderedDict]): """期货30分钟笔结束""" v = Factors.Other.value for f_ in [Freq.F30.value, Freq.F5.value, Freq.F1.value]: if f_ not in s['级别列表']: warnings.warn(f"{f_} not in {s['级别列表']},默认返回 Other") return v # 开多仓因子 # -------------------------------------------------------------------------------------------------------------- long_opens = { Factors.L2A0.value: [ [f"{Freq.F30.value}_倒1表里关系#{Signals.BD0.value}"], ] } for name, factors in long_opens.items(): for factor in factors: if match_factor(s, factor): v = name # 平多仓因子 # -------------------------------------------------------------------------------------------------------------- long_exits = { Factors.S2A0.value: [ [f"{Freq.F30.value}_倒1表里关系#{Signals.BU0.value}"], ] } for name, factors in long_exits.items(): for factor in factors: if match_factor(s, factor): v = name return v
3a40cf658bf09ddea2347ce190c46f84c7cc1eb2
23,044
from typing import OrderedDict def convert_pre_to_021(cfg): """Convert config standard 0.20 into 0.21 Revision 0.20 is the original standard, which lacked a revision. Variables moved from top level to inside item 'variables'. Ocean Sites nomenclature moved to CF standard vocabulary: - TEMP -> sea_water_temperature - PSAL -> sea_water_salinity """ def label(v): """Convert Ocean Sites vocabulary to CF standard names """ if v == 'PRES': return 'sea_water_pressure' if v == 'TEMP': return 'sea_water_temperature' elif v == 'PSAL': return 'sea_water_salinity' else: return v keys = list(cfg.keys()) output = OrderedDict() output['revision'] = '0.21' if 'inherit' in keys: output['inherit'] = cfg['inherit'] keys.remove('inherit') if 'main' in cfg: output['common'] = cfg['main'] keys.remove('main') elif 'common' in cfg: output['common'] = cfg['common'] keys.remove('common') def fix_threshold(cfg): """Explicit threshold""" for t in cfg: if isinstance(cfg[t], (int, float)): cfg[t] = {"threshold": cfg[t]} return cfg def fix_regional_range(cfg): """Explicit regions """ if "regional_range" in cfg: cfg["regional_range"] = {"regions": cfg["regional_range"]} return cfg def fix_profile_envelop(cfg): """Explicit layers Note ---- Should I confirm that cfg['profile_envelop'] is a list? """ if "profile_envelop" in cfg: cfg["profile_envelop"] = {"layers": cfg["profile_envelop"]} return cfg output['variables'] = OrderedDict() for k in keys: cfg[k] = fix_threshold(cfg[k]) cfg[k] = fix_regional_range(cfg[k]) cfg[k] = fix_profile_envelop(cfg[k]) output['variables'][label(k)] = cfg[k] # output[k] = cfg[k] return output
874751b70481f50a1243791677a1c2ad0f354952
23,045
def get_alerts_alarms_object(): """ helper function to get alert alarms """ result = [] # Get query filters, query SystemEvents using event_filters event_filters, definition_filters = get_query_filters(request.args) if event_filters is None: # alerts_alarms alerts_alarms = db.session.query(SystemEvent).all() else: alerts_alarms = db.session.query(SystemEvent).filter_by(**event_filters) # Process alert_alarm json output based on definition filters if alerts_alarms is not None: result_json = get_alert_alarm_json(alerts_alarms, definition_filters) if result_json is None: result = [] else: result = result_json return result
7ab1c25cdaa30be0e70b110d47e7ea807713f404
23,046
import glob import pickle def data_cubes_combine_by_pixel(filepath, gal_name): """ Grabs datacubes and combines them by pixel using addition, finding the mean and the median. Parameters ---------- filepath : list of str the data cubes filepath strings to pass to glob.glob gal_name : str galaxy name/descriptor Returns ------- lamdas : :obj:'~numpy.ndarray' the wavelength vector for the cubes cube_added : :obj:'~numpy.ndarray' all cubes added cube_mean : :obj:'~numpy.ndarray' the mean of all the cubes cube_median : :obj:'~numpy.ndarray' the median of all the cubes header : FITS header object the header from the fits file """ #create list to append datas to all_data = [] all_var = [] all_lamdas = [] #iterate through the filenames #they should all be from fits files, so we can just use that loading function for file in glob.glob(filepath): fits_stuff = read_in_data_fits(file) if len(fits_stuff) > 3: lamdas, data, var, header = fits_stuff all_var.append(var) else: lamdas, data, header = fits_stuff #apply corrections to lambdas lamdas = air_to_vac(lamdas) lamdas = barycentric_corrections(lamdas, header) all_lamdas.append(lamdas) #apply Milky Way extinction correction data = milky_way_extinction_correction(lamdas, data) #append the data all_data.append(data) #check if var has the same number of cubes as the data, and if it doesn't, delete it if len(all_data) > len(all_var): del all_var #because the exposures are so close together, the difference in lamda between #the first to the last is only around 0.001A. There's a difference in the #total length of about 0.0003A between the longest and shortest wavelength #vectors after the corrections. So I'm taking the median across the whole #collection. This does introduce some error, making the line spread function #of the averaged spectra larger. lamdas = np.median(all_lamdas, axis=0) #adding the data cube_added = np.zeros_like(all_data[0]) for cube in all_data: cube_added += cube #finding the mean cube_mean = np.mean(all_data, axis=0) #finding the median cube_median = np.median(all_data, axis=0) #if all_var in locals(): #adding the variances #pickle the results with open(filepath.split('*')[0]+'_'+gal_name+'_combined_by_pixel_'+str(date.today()),'wb') as f: pickle.dump([lamdas, cube_added, cube_mean, cube_median], f) f.close() return lamdas, cube_added, cube_mean, cube_median, header
1ae269ade8ac00b269fc52d474e038c9e2ca8d92
23,047
def usdm_bypoint_service( fmt: SupportedFormats, ): """Replaced above.""" return Response(handler(fmt), media_type=MEDIATYPES[fmt])
8f957e8778aab81f94d52cbc07a78346f74ac0c2
23,048
import pandas as pd def read_geonames(filename): """ Parse geonames file to a pandas.DataFrame. File may be downloaded from http://download.geonames.org/export/dump/; it should be unzipped and in a "geonames table" format. """ return pd.read_csv(filename, **_GEONAMES_PANDAS_PARAMS)
638fe3c02d61467fa47ee19e20f4f0022c8b57c2
23,049
def create_property_map(cls, property_map=None): """ Helper function for creating property maps """ _property_map = None if property_map: if callable(property_map): _property_map = property_map(cls) else: _property_map = property_map.copy() else: _property_map = {} return _property_map
b67d0fdcd75c592f3443993f2948a2686e22322d
23,050
import Scientific import Scientific.IO import Scientific.IO.NetCDF def readNetCDF(filename, varName='intensity'): """ Reads a netCDF file and returns the varName variable. """ ncfile = Scientific.IO.NetCDF.NetCDFFile(filename,"r") var1 = ncfile.variables[varName] data = sp.array(var1.getValue(),dtype=float) ncfile.close() return data
887b88f6cef8767be56d4bf828f048a2b7e09606
23,051
import click def show(ctx, name_only, cmds, under, fields, format, **kwargs): """Show the parameters of a command""" cmds = cmds or sorted(config.parameters.readonly.keys()) if under: cmds = [cmd for cmd in cmds if cmd.startswith(under)] with TablePrinter(fields, format) as tp, Colorer(kwargs) as colorer: for cmd_name in cmds: if name_only: click.echo(cmd_name) else: cmd = get_command_safe(cmd_name) def get_line(profile_name): return ' '.join( [quote(p) for p in config.parameters.all_settings.get(profile_name, {}).get(cmd_name, [])]) if config.parameters.readprofile == 'settings-file': args = config.parameters.readonly.get(cmd_name, []) else: values = {profile.name: get_line(profile.name) for profile in config.all_enabled_profiles} args = colorer.colorize(values, config.parameters.readprofile) if args == ['']: # the command most likely has implicit settings and only # explicit values are asked for. Skip it continue if cmd is None: LOGGER.warning('You should know that the command {} does not exist'.format(cmd_name)) args = args or 'None' tp.echo(cmd_name, args)
6f4c959662cc75925cae82143913b7f2b7434a7b
23,052
def make_count_set(conds, r): """ returns an r session with a new count data set loaded as cds """ #r.assign('conds', vectors.StrVector.factor(vectors.StrVector(conds))) r.assign('conds', vectors.StrVector(conds)) r(''' require('DSS') cds = newSeqCountSet(count_matrix, conds) ''') return r
956ad076d1368cc5c0cf16365d6941157db1c664
23,053
def RunCommand(cmd, timeout_time=None, retry_count=3, return_output=True, stdin_input=None): """Spawn and retry a subprocess to run the given shell command. Args: cmd: shell command to run timeout_time: time in seconds to wait for command to run before aborting. retry_count: number of times to retry command return_output: if True return output of command as string. Otherwise, direct output of command to stdout. stdin_input: data to feed to stdin Returns: output of command """ result = None while True: try: result = RunOnce(cmd, timeout_time=timeout_time, return_output=return_output, stdin_input=stdin_input) except errors.WaitForResponseTimedOutError: if retry_count == 0: raise retry_count -= 1 logger.Log("No response for %s, retrying" % cmd) else: # Success return result
cc1b4421a3a390bfa296faa279df0338985ff851
23,054
def get_clusters_low_z(min_mass = 10**4, basepath='/lustre/scratch/mqezlou/TNG300-1/output'): """Script to write the position of z ~ 0 large mass halos on file """ halos = il.groupcat.loadHalos(basepath, 98, fields=['GroupMass', 'GroupPos','Group_R_Crit200']) ind = np.where(halos['GroupMass'][:] > min_mass) with h5py.File('clusters_TNG300-1.hdf5','w') as f : f['Mass'] = halos['GroupMass'][ind] f['Group_R_Crit200'] = halos['Group_R_Crit200'][ind] f['x'], f['y'], f['z'] = halos['GroupPos'][ind[0],0], halos['GroupPos'][ind[0],1], halos['GroupPos'][ind[0],2] f.close() return 0
5b868a8be11e109f126ad920b65d67984a7ffdca
23,055
def _serialize_key(key: rsa.RSAPrivateKeyWithSerialization) -> bytes: """Return the PEM bytes from an RSA private key""" return key.private_bytes( encoding=serialization.Encoding.PEM, format=serialization.PrivateFormat.TraditionalOpenSSL, encryption_algorithm=serialization.NoEncryption(), )
f9064c1d7a1143d04e757d5ad3b3d7620e67e233
23,057
def ldns_key_algo_supported(*args): """LDNS buffer.""" return _ldns.ldns_key_algo_supported(*args)
e5184eb314aa315a852bb5bf0fe9b1ae01e4d9fe
23,058
def read_k_bytes(sock, remaining=0): """ Read exactly `remaining` bytes from the socket. Blocks until the required bytes are available and return the data read as raw bytes. Call to this function blocks until required bytes are available in the socket. Arguments --------- sock : Socket to inspect remaining : Number of bytes to read from socket. """ ret = b"" # Return byte buffer while remaining > 0: d = sock.recv(remaining) ret += d remaining -= len(d) return ret
3d75eaa43b84ac99ac37b4b1a048f1a6615901b1
23,059
def total_minutes(data): """ Calcula a quantidade total de minutos com base nas palestras submetidas. """ soma = 0 for item in data.keys(): soma += (item*len(data[item])) return soma
c85f6ac0a1d58b67d1e53ae5ff87b8762e3d050c
23,060
def grid_to_vector(grid, categories): """Transform a grid of active classes into a vector of labels. In case several classes are active at time i, the label is set to 'overlap'. See :func:`ChildProject.metrics.segments_to_grid` for a description of grids. :param grid: a NumPy array of shape ``(n, len(categories))`` :type grid: numpy.array :param categories: the list of categories :type categories: list :return: the vector of labels of length ``n`` (e.g. ``np.array([none FEM FEM FEM overlap overlap CHI])``) :rtype: numpy.array """ return np.vectorize(lambda x: categories[x])( grid.shape[1] - np.argmax(grid[:, ::-1], axis=1) - 1 )
849c481ecf1dc608d7457875fef9b6f241d53e91
23,061
import optparse def standard_script_options(usage, description): """Create option parser pre-populated with standard observation script options. Parameters ---------- usage, description : string Usage and description strings to be used for script help Returns ------- parser : :class:`optparse.OptionParser` object Parser populated with standard script options """ parser = optparse.OptionParser(usage=usage, description=description) parser.add_option('--sb-id-code', type='string', help='Schedule block id code for observation, ' 'required in order to allocate correct resources') parser.add_option('-u', '--experiment-id', help='Experiment ID used to link various parts of ' 'experiment together (use sb-id-code by default, or random UUID)') parser.add_option('-o', '--observer', help='Name of person doing the observation (**required**)') parser.add_option('-d', '--description', default='No description.', help="Description of observation (default='%default')") parser.add_option('-f', '--centre-freq', type='float', default=1822.0, help='Centre frequency, in MHz (default=%default)') parser.add_option('-r', '--dump-rate', type='float', default=1.0, help='Dump rate, in Hz (default=%default)') # This option used to be in observe1, but did not make it to the # common set of options of observe1 / observe2 # parser.add_option('-w', '--discard-slews', dest='record_slews', action='store_false', default=True, # help='Do not record all the time, i.e. pause while antennas are slewing to the next target') parser.add_option('-n', '--nd-params', default='coupler,10,10,180', help="Noise diode parameters as '<diode>,<on>,<off>,<period>', " "in seconds or 'off' for no noise diode firing (default='%default')") parser.add_option('-p', '--projection', type='choice', choices=projections, default=default_proj, help="Spherical projection in which to perform scans, " "one of '%s' (default), '%s'" % ( projections[0], "', '".join(projections[1:]))) parser.add_option('-y', '--dry-run', action='store_true', default=False, help="Do not actually observe, but display script " "actions at predicted times (default=%default)") parser.add_option('--stow-when-done', action='store_true', default=False, help="Stow the antennas when the capture session ends") parser.add_option('--mode', help="DBE mode to use for experiment, keeps current mode by default)") parser.add_option('--dbe-centre-freq', type='float', default=None, help="DBE centre frequency in MHz, used to select coarse band for " "narrowband modes (unchanged by default)") parser.add_option('--horizon', type='float', default=5.0, help="Session horizon (elevation limit) in degrees (default=%default)") parser.add_option('--no-mask', action='store_true', default=False, help="Keep all correlation products by not applying baseline/antenna mask") return parser
9d16aeb0481f03e5d19955744c7d29b1c42375b3
23,062
def default_handler(request): """ The default handler gets invoked if no handler is set for a request """ return alexa.create_response(message=request.get_slot_map()["Text"])
ae4343c9de86141bb0b112123b9e420bbf1ac5c6
23,064
def ajax_available_variants_list(request): """Return variants filtered by request GET parameters. Response format is that of a Select2 JS widget. """ available_skills = Skill.objects.published().prefetch_related( 'category', 'skill_type__skill_attributes') queryset = SkillVariant.objects.filter( skill__in=available_skills).prefetch_related( 'skill__category', 'skill__skill_type__skill_attributes') search_query = request.GET.get('q', '') if search_query: queryset = queryset.filter( Q(sku__icontains=search_query) | Q(name__icontains=search_query) | Q(skill__name__icontains=search_query)) variants = [ {'id': variant.id, 'text': variant.get_ajax_label(request.discounts)} for variant in queryset] return JsonResponse({'results': variants})
7093352368d975e3d3e663dd2541fc81a89ede0c
23,065
def jaccard2_coef(y_true, y_pred, smooth=SMOOTH): """Jaccard squared index coefficient :param y_true: true label :type y_true: int :param y_pred: predicted label :type y_pred: int or float :param smooth: smoothing parameter, defaults to SMOOTH :type smooth: float, optional :return: Jaccard coefficient :rtype: float """ y_true_f = K.flatten(y_true) y_pred_f = K.flatten(y_pred) intersection = K.sum(y_true_f * y_pred_f) union = K.sum(y_true_f * y_true_f) + K.sum(y_pred_f * y_pred_f) - intersection return (intersection + smooth) / (union + smooth)
dfd480814737a1d725874ec81287948dded3ba2e
23,066
def marginal_density_from_linear_conditional_relationship( mean1,cov1,cov2g1,Amat,bvec): """ Compute the marginal density of P(x2) Given p(x1) normal with mean and covariance m1, C1 Given p(x2|x1) normal with mean and covariance m_2|1=A*x1+b, C_2|1 P(x2) is normal with mean and covariance m2=A*m1+b, C2=C_2|1+A*C1*A.T Parameters ---------- mean1 : np.ndarray (nvars1) The mean (m1) of the Gaussian distribution of x1 cov1 : np.ndarray (nvars1,nvars1) The covariance (C1) of the Gaussian distribution of x1 cov2g1 : np.ndarray (nvars2,nvars2) The covariance (C_2|1) of the Gaussian distribution of P(x2|x1) Amat : np.ndarray (nvars2,nvars1) The matrix (A) of the conditional distribution P(x2|x1) bvec : np.ndarray (nvars2) The vector (b) of the conditional distribution P(x2|x1) Returns ------- mean2 : np.ndarray (nvars2) The mean (m2) of P(x2) cov2 : np.ndarray (nvars2,nvars2) The covariance (C_2) of P(x2) """ AC1 = np.dot(Amat, cov1) mean2 = Amat.dot(mean1)+bvec cov2 = cov2g1+AC1.dot(Amat.T) return mean2, cov2
3baa69910cd78a02bec5ba1517ca3a8ea189f845
23,067
def rowcount_fetcher(cursor): """ Return the rowcount returned by the cursor. """ return cursor.rowcount
21b30665391aa16d158083ccb37149bd6ec0f548
23,068
def view_hello_heartbeat(request): """Hello to TA2 with no logging. Used for testing""" # Let's call the TA2! # resp_info = ta2_hello() if not resp_info.success: return JsonResponse(get_json_error(resp_info.err_msg)) json_str = resp_info.result_obj # Convert JSON str to python dict - err catch here # - let it blow up for now--should always return JSON json_format_info = json_loads(json_str) if not json_format_info.success: return JsonResponse(get_json_error(json_format_info.err_msg)) json_info = get_json_success('success!', data=json_format_info.result_obj) return JsonResponse(json_info)
736c5b4d9832f16b6bac36abf2c1a6aa3443b768
23,070
import functools import contextlib def with_environment(server_contexts_fn): """A decorator for running tests in an environment.""" def decorator_environment(fn): @functools.wraps(fn) def wrapper_environment(self): with contextlib.ExitStack() as stack: for server_context in server_contexts_fn(): stack.enter_context(server_context) fn(self) return wrapper_environment return decorator_environment
dbd4b435d920a08b97dd2921c534c14ce8d18acb
23,071
import ast def get_number_of_unpacking_targets_in_for_loops(node: ast.For) -> int: """Get the number of unpacking targets in a `for` loop.""" return get_number_of_unpacking_targets(node.target)
9ce94f93d18e87cddbd2e2bbbfb05b026901c0da
23,072
def dmp_degree(f, u): """Returns leading degree of `f` in `x_0` in `K[X]`. """ if dmp_zero_p(f, u): return -1 else: return len(f) - 1
da2df32019d1121c40424893773928225201e584
23,073
import requests def fetch_remote_content(url: str) -> Response: """ Executes a GET request to an URL. """ response = requests.get(url) # automatically generates a Session object. return response
0b98315b8acf1f1a4ad7f177ef689af4c6a7ba63
23,074
def optimization(loss, warmup_steps, num_train_steps, learning_rate, train_program, startup_prog, weight_decay, scheduler='linear_warmup_decay', decay_steps=[], lr_decay_dict_file="", lr_decay_ratio=0.1): """ optimization implementation """ if warmup_steps > 0: if scheduler == 'noam_decay': scheduled_lr = fluid.layers.learning_rate_scheduler \ .noam_decay(1 / (warmup_steps * (learning_rate ** 2)), warmup_steps) elif scheduler == 'linear_warmup_decay': scheduled_lr = linear_warmup_decay(learning_rate, warmup_steps, num_train_steps) elif scheduler == 'manual_warmup_decay': scheduled_lr = manual_warmup_decay(learning_rate, warmup_steps, num_train_steps, decay_steps, lr_decay_ratio) else: raise ValueError("Unkown learning rate scheduler, should be " "'noam_decay' or 'linear_warmup_decay' or 'manual_warmup_decay'") else: scheduled_lr = fluid.layers.create_global_var( name=fluid.unique_name.generate("learning_rate"), shape=[1], value=learning_rate, dtype='float32', persistable=True) lr_decay_dict = {} if lr_decay_dict_file != "": with open(lr_decay_dict_file) as f: for line in f: param, decay_rate = line.strip().split('\t') lr_decay_dict[param] = float(decay_rate) for param in fluid.default_main_program().block(0).all_parameters(): if param.name in lr_decay_dict: print (param.name, lr_decay_dict[param.name]) param.optimize_attr['learning_rate'] = lr_decay_dict[param.name] optimizer = fluid.optimizer.Adam(learning_rate=scheduled_lr) optimizer._learning_rate_map[fluid.default_main_program( )] = scheduled_lr fluid.clip.set_gradient_clip( clip=fluid.clip.GradientClipByGlobalNorm(clip_norm=1.0)) def exclude_from_weight_decay(name): """ Parameters not use weight decay """ if name.find("layer_norm") > -1: return True bias_suffix = ["_bias", "_b", ".b_0"] for suffix in bias_suffix: if name.endswith(suffix): return True return False param_list = dict() for param in train_program.global_block().all_parameters(): param_list[param.name] = param * 1.0 param_list[param.name].stop_gradient = True _, param_grads = optimizer.minimize(loss) if weight_decay > 0: for param, grad in param_grads: if exclude_from_weight_decay(param.name): continue with param.block.program._optimized_guard( [param, grad]), fluid.framework.name_scope("weight_decay"): updated_param = param - param_list[ param.name] * weight_decay * scheduled_lr * param.optimize_attr['learning_rate'] fluid.layers.assign(output=param, input=updated_param) return scheduled_lr
f3b2e2311551d13d9e2930847afff38636ea2b27
23,075
def build_full_record_to(pathToFullRecordFile): """structure of full record: {commitID: {'build-time': time, files: {filename: {record}, filename: {record}}}} """ full_record = {} # this leads to being Killed by OS due to tremendous memory consumtion... #if os.path.isfile(pathToFullRecordFile): # with open(pathToFullRecordFile, 'r') as fullRecordFile: # print "loading full record from " + pathToFullRecordFile # full_record = eval(fullRecordFile.read()) # print "read full record from " + pathToFullRecordFile #else: full_record = build_full_record() # f = open(pathToFullRecordFile, 'w') # try: # f.write(repr(full_record) + "\n") # except MemoryError as me: # print me # raise # finally: # print time.ctime() # f.close() # print "built full record, wrote to " + pathToFullRecordFile return full_record
8c9c070c14ffce848cb98a3e8a71b389418aadd0
23,076
def xsthrow_format(formula): """formats the string to follow the xstool_throw convention for toy vars """ return (formula. replace('accum_level[0]', 'accum_level[xstool_throw]'). replace('selmu_mom[0]', 'selmu_mom[xstool_throw]'). replace('selmu_theta[0]', 'selmu_theta[xstool_throw]'))
b36183df77e681b967ce48a9164fe37861ffd11c
23,077