content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def scale_values(tbl, columns): """Scale values in a dataframe using MinMax scaling. :param tbl: Table :param columns: iterable with names of columns to be scaled :returns: Table with scaled columns """ new_tbl = tbl.copy() for col in columns: name = new_tbl.labels[col] x_scaled = minmax_scale(new_tbl[name]) new_tbl[name] = x_scaled return new_tbl
c2b6ff0414ab7930020844005e3bdf4783609589
33,059
def get_params_out_of_range( params: list, lower_params: list, upper_params: list ) -> list: """ Check if any parameter specified by the user is out of the range that was defined :param params: List of parameters read from the .inp file :param lower_params: List of lower bounds provided by the user in the .inp file :param upper_params: List of upper bounds provided by the user in the .inp file :return: List of parameters out of the defined range """ params_out = [ i for i in range(len(lower_params)) if params[i] < lower_params[i] or params[i] > upper_params[i] ] return params_out
67a8ca57a29da8b431ae26f863ff8ede58f41a34
33,062
import functools def _filter_work_values( works: np.ndarray, max_value: float = 1e4, max_n_devs: float = 100, min_sample_size: int = 10, ) -> np.ndarray: """Remove pairs of works when either is determined to be an outlier. Parameters ---------- works : ndarray Array of records containing fields "forward" and "reverse" max_value : float Remove work values with magnitudes greater than this max_n_devs : float Remove work values farther than this number of standard deviations from the mean min_sample_size : int Only apply the `max_n_devs` criterion when sample size is larger than this Returns ------- out : ndarray 1-D array of filtered works. ``out.shape == (works.size, 1)`` """ mask_work_outliers = functools.partial( _mask_outliers, max_value=max_value, max_n_devs=max_n_devs, min_sample_size=min_sample_size, ) f_good = mask_work_outliers(works["forward"]) r_good = mask_work_outliers(works["reverse"]) both_good = f_good & r_good return works[both_good]
93002df6f7bdaf0ffd639f37021a8e6844fee4bd
33,063
def pf_from_ssig(ssig, ncounts): """Estimate pulsed fraction for a sinusoid from a given Z or PDS power. See `a_from_ssig` and `pf_from_a` for more details Examples -------- >>> round(a_from_pf(pf_from_ssig(150, 30000)), 1) 0.1 """ a = a_from_ssig(ssig, ncounts) return pf_from_a(a)
235b473f60420f38dd8c0ad19c64366f85c8ac4c
33,064
def get_aic(mse: float, n: int, p: int): """ Calcuate AIC score. Parameters ---------- mse: float Mean-squared error. n: int Number of observations. p: int Number of parameters Returns ------- float AIC value. """ return n * log(mse) + 2 * p
033cb5ea7e9d06a2f630d3eb2718630904e4209f
33,065
def triangle(a, b): """ Return triangle function: ^ . | / \ |____/___\____ a b """ return partial(primitives.tri, a, b)
f28bbe0bacb260fb2fb30b9811b1d5d6e5b99750
33,066
import pickle def get_max_trans_date() -> date: """Return the date of the last transaction in the database""" return pickle.load(open(conf("etl_accts"), "rb"))[1]
61db89cfbbdc9f7e2b86930f50db75dcc213205c
33,067
def build_argparser(): """ Parse command line arguments. :return: command line arguments """ parser = ArgumentParser() parser.add_argument("-m", "--model", required=True, type=str, help="Path to an xml file with a trained model.") parser.add_argument("-i", "--input", required=True, type=str, help="Path to image or video file") parser.add_argument("-l", "--cpu_extension", required=False, type=str, default=None, help="MKLDNN (CPU)-targeted custom layers." "Absolute path to a shared library with the" "kernels impl.") parser.add_argument("-d", "--device", type=str, default="CPU", help="Specify the target device to infer on: " "CPU, GPU, FPGA or MYRIAD is acceptable. Sample " "will look for a suitable plugin for device " "specified (CPU by default)") parser.add_argument("-pt", "--prob_threshold", type=float, default=0.5, help="Probability threshold for detections filtering" "(0.5 by default)") return parser
65c6e45e30b67ff879dbcf1a64cd8321192adfcf
33,068
from pathlib import Path def read_file(in_file: str): """Read input file.""" file_path = Path(in_file) data = [] count = 0 with open(file_path) as fp: for line in fp: data.append(line) count = count + 1 return ''.join(data), count
4fbae8f1af7800cb5f89784a0230680a1d6b139a
33,069
def limit(value, limits): """ :param <float> value: value to limit :param <list>/<tuple> limits: (min, max) limits to which restrict the value :return <float>: value from within limits, if input value readily fits into the limits its left unchanged. If value exceeds limit on either boundary its set to that boundary. """ if value < limits[0]: value = limits[0] elif value > limits[1]: value = limits[1] else: pass return value
55fb603edb478a26b238d7c90084e9c17c3113b8
33,071
def _cdp_no_split_worker(work_queue, counts_by_ref, seq_1, seq_2, nt): """ Worker process - get refseq from work queue, aligns reads from seq_1 and seq_2, and adds as (x,y) coords to counts_by_ref if there are alignments. :param work_queue: joinable queue with refseq header and seq tuples (JoinableQueue(header,ref_seq)) :param counts_by_ref: Manager dict for counts for each reference result (mgr.dict) :param seq_1: seq file set 1 (SRNASeq) :param seq_2: seq file set 2 (SRNASeq) :param nt: read length to align (int) :return: True """ try: while not work_queue.empty(): both_aligned = _cdp_no_split_single_ref_align(work_queue.get(), seq_1, seq_2, nt) if both_aligned is not None: counts_by_ref[both_aligned[0]] = (both_aligned[1], both_aligned[2]) except Exception as e: print(e) return True
498e9da9c9f30adc1fa2564590cc7a99cbed9b94
33,072
def createIQSatelliteChannel(satellite_id): """Factory This method creates a satellite channel object that exchanges IQ data in between both ends. """ return SatelliteChannel(satellite_id, stellarstation_pb2.Framing.Value('IQ'))
e1fc08de59692ab308716abc8d137d0ab90336bc
33,073
def scaled_herding_forward_pass(weights, scales, input_data, n_steps): """ Do a forward pass with scaled units. :param weights: A length:L list of weight matrices :param scales: A length:L+1 list of scale vectors :param input_data: An (n_samples, n_dims) array of input data :param n_steps: Number of steps to run for :param rng: A random number generator or see d :return: A (n_samples, n_steps, n_outputs) integar array representing the output spike count in each time bin """ assert all(w_in.shape[1]==w_out.shape[0] for w_in, w_out in zip(weights[:-1], weights[1:])) assert len(scales) == len(weights)+1 assert all(s.ndim==1 for s in scales) assert all(w.ndim==2 for w in weights) assert all(len(s_in)==w.shape[0] and len(s_out)==w.shape[1] for w, s_in, s_out in zip(weights, scales[:-1], scales[1:])) assert input_data.shape[1] == len(scales[0]) # scaled_weights = [s_in[:, None]**-1 * w * s_out[None, :] for s_in, w, s_out in zip(scales[:-1], weights, scales[1:])] spikes = sequential_quantize(input_data*scales[0], n_steps=n_steps) for s, w in zip(scales[1:], weights): spikes = sequential_quantize(spikes.dot(w)*s) # spikes = sequential_quantize(spikes.dot(w/s_pre[:, None])*s_post) return spikes/scales[-1]
e5af2c099b1296bdc1f584acaa6ce8f832fb29f6
33,074
from io import StringIO def open_remote_factory(mocker): """Fixture providing open_remote function for ReferenceLoader construction.""" return mocker.Mock(return_value=StringIO(REMOTE_CONTENT))
b3df151b021cfd3a07c5737b50d8856d3dc0a599
33,075
def get_displacement_bcs(domain, macro_strain): """Get the shift and fixed BCs. The shift BC has the the right top and bottom points in x, y and z fixed or displaced. The fixed BC has the left top and bottom points in x, y and z fixed. Args: domain: an Sfepy domain macro_strain: the macro strain Returns: the Sfepy boundary conditions """ return Conditions( [ get_shift_or_fixed_bcs( domain, lambda min_, max_: {"u.0": macro_strain * (max_[0] - min_[0])}, "shift", lambda min_, max_: (max_[0],), ), get_shift_or_fixed_bcs( domain, lambda min_, max_: merge( {"u.0": 0.0, "u.1": 0.0}, {"u.2": 0.0} if len(min_) == 3 else dict() ), "fix", lambda min_, max_: (min_[0],), ), ] )
02090e4d64f75b671597c4b916faf086c5bfe096
33,076
def public_rest_url(path_url: str = "", domain: str = CONSTANTS.DEFAULT_DOMAIN, only_hostname: bool = False, domain_api_version: str = None, endpoint_api_version: str = None) -> str: """ Creates a full URL for provided public REST endpoint :param path_url: a public REST endpoint :param host: the CoinFLEX host to connect to ("live" or "test"). The default value is "live" :return: the full URL to the endpoint """ local_domain_api_version = domain_api_version or CONSTANTS.PUBLIC_API_VERSION local_endpoint_api_version = endpoint_api_version or CONSTANTS.PUBLIC_API_VERSION subdomain_prefix = f"{local_domain_api_version}stg" if domain == "test" else local_domain_api_version endpoint = "" if not len(path_url) else f"/{path_url}" if only_hostname: return CONSTANTS.REST_URL.format(subdomain_prefix) return "https://" + CONSTANTS.REST_URL.format(subdomain_prefix) + f"/{local_endpoint_api_version}{endpoint}"
3c99f5a388c33d5c7aa1e018d2d38c7cbe82b112
33,077
import torch def get_dir_cos(dist_vec): """ Calculates directional cosines from distance vectors. Calculate directional cosines with respect to the standard cartesian axes and avoid division by zero Args: dist_vec: distance vector between particles Returns: dir_cos, array of directional cosines of distances between particles """ norm = torch.linalg.norm(dist_vec, axis=-1) dir_cos = dist_vec * torch.repeat_interleave(torch.unsqueeze(torch.where( torch.linalg.norm(dist_vec, axis=-1) == 0, torch.zeros(norm.shape, device=dist_vec.device), 1 / norm), axis=-1), 3, dim=-1) return dir_cos
f325ca5535eaf9083082b147ff90f727214031ec
33,078
def lambda_handler(event, context): """ Entry point for the Get All Lambda function. """ handler_request.log_event(event) # Get gk_user_id from requestContext player_id = handler_request.get_player_id(event) if player_id is None: return handler_response.return_response(401, 'Unauthorized.') # get bundle_name from path bundle_name = handler_request.get_path_param(event, 'bundle_name') if bundle_name is None: return handler_response.return_response(400, 'Invalid bundle name') if len(bundle_name) > user_game_play_constants.BUNDLE_NAME_MAX_LENGTH: return handler_response.return_response(414, 'Invalid bundle name') bundle_name = sanitizer.sanitize(bundle_name) # get bundle_item_key from path bundle_item_key = handler_request.get_path_param(event, 'bundle_item_key') if bundle_item_key is None: return handler_response.return_response(400, 'Invalid bundle item key') if len(bundle_item_key) > user_game_play_constants.BUNDLE_NAME_MAX_LENGTH: return handler_response.return_response(414, 'Invalid bundle item key') bundle_item_key = sanitizer.sanitize(bundle_item_key) # get payload from body (an items value) item_data = handler_request.get_body_as_json(event) if item_data is None: return handler_response.return_response(400, 'Missing payload') if "bundle_item_value" not in item_data: return handler_response.return_response(400, 'Invalid payload') item_key = sanitizer.sanitize(item_data["bundle_item_value"]) if not item_key: return handler_response.return_response(400, 'Invalid payload') player_id_bundle = f'{player_id}_{bundle_name}' try: ddb_client.update_item(**_build_bundle_item_update_request(player_id_bundle, bundle_item_key, item_key)) except ddb_client.exceptions.ConditionalCheckFailedException: return handler_response.return_response(404, 'Bundle and/or bundle item not found.') except botocore.exceptions.ClientError as err: logger.error(f'Error updating bundle item, please ensure bundle item exists. Error: {err}') raise err # Return operation result return handler_response.return_response(204, None)
2cc1aeb6a451feb41cbf7a121c67ddfbd06e686f
33,079
def reverse_complement_dna(seq): """ Reverse complement of a DNA sequence Parameters ---------- seq : str Returns str """ return complement_dna(seq)[::-1]
680cf032c0a96fc254928bfa58eb25bee56e44dc
33,080
def Wizard(): """ Creates a wizardcharacter :returns: fully initialised wizard :rtype: Character """ character = (CharacterBuilder() .with_hit_points(5) .with_max_hp(5) .with_spirit(20) .with_max_spirit(20) .with_speed(4) .with_body(4) .with_mind(8) .with_attack(1) .with_name('Wizard') .build()) return character
23019f41ba6bf51e049ffe16831a725dd3c20aa2
33,081
def tournament_communication(comm, comm_fn=lambda x,y: None, comm_kw={}): """ This is useful for the development of parallelized O(N) duplicate check functions. The problem with such functions is that the operation of checking if a set of parameters/descriptor has been observed previously requires there be a master set of observed values to compare against and add to. This means that it is not naively parallel. In order to achieve decent scaling for O(N) duplicate checks, this method has been implemented. This method will combine the results from ranks in a tournament braket style such that in the end, the master list will be on rank 0. This achieves better scaling because in the beginning, all ranks compare their lists to another rank adding unique values to one of the ranks. This rank moves forward to another comparions with another rank that has completed its comparisons as well. This continues until all results are on the master rank. comm_fn API: comm_fn(comm, (rank1, rank2), **kwargs) """ ### Step 1 is to build the tournament braket size = comm.Get_size() rank = comm.Get_rank() rank_list = np.arange(0,size) tournament = [] temp_tournament = [] for idx,value in enumerate(rank_list[::2]): value2 = value+1 temp_tournament.append((value,value2)) tournament.append(temp_tournament) if size <= 1: tournament = [(0,)] prev_tournament = tournament[0] while len(prev_tournament) != 1: temp_tournament = [] for idx,entry in enumerate(prev_tournament[::2]): next_idx = idx*2+1 keep_rank1 = min(entry) if (next_idx+1) > len(prev_tournament): temp_tournament.append((keep_rank1,)) else: keep_rank2 = min(prev_tournament[next_idx]) temp_tournament.append((keep_rank1, keep_rank2)) tournament.append(temp_tournament) prev_tournament = tournament[-1] if len(tournament) > 1: tournament.append([(0,)]) if tournament == [(0,)]: return idx = 0 for braket in tournament: if rank == 0: print("Round {} of {}".format(idx, len(tournament)), flush=True) idx += 1 # ### Rank loop is here to emulate parallel execution # for rank in rank_list: found = False for entry in braket: if rank in entry: found = True break if found: comm_fn(comm, entry, **comm_kw) # if found: # print("{}: {}".format(rank, entry)) return tournament
03827a02f3df099aa3eead3d8214f0f2f90e60b1
33,082
def create_permutation_feature(number, rate_pert=1., name=None): """Create permutation for features.""" n = np.random.randint(0, 100000) if name is None: name = f_stringer_pert_rate(rate_pert) lista_permuts = [] for i in range(number): lista_permuts.append((name, PartialPermutationPerturbationGeneration, {'seed': n+i, 'rate_pert': rate_pert}, f_pert_partialfeatures_instantiation)) return lista_permuts
8c6931e2e2b1dcd9313fda5d8be63bfb0c549f5f
33,083
import random def DiceRoll(): """A function to simulate rolling of one or more dice.""" def Roll(): return random.randint(1,6) print("\nRoll Dice: Simulates rolling of one or more dice.") num = 1 try: num = int(input("\nEnter the number of dice you wish to roll: ")) except: print("Input should be a number.") if num > 0: out = [] # list to store roll output i = 1 while i <= num: out.append(str(Roll())) i+=1 print("\nRoll Result(s)") print("============") print(", ".join(out))
90e9587473fb06541ec9daa2ec223759940a5ecb
33,084
def rook_move(self, game, src): """ Validates rook move """ x = src[0] y = src[1] result = [] loop_condition = (lambda i: i < 8) if self.color == 'white' else (lambda i: i >= 0) reverse_loop_condition = (lambda i: i < 8) if self.color == 'black' else (lambda i: i >= 0) counter_eval = +1 if self.color == 'white' else -1 reverse_counter_eval = -counter_eval loops = [ [loop_condition, counter_eval], [reverse_loop_condition, reverse_counter_eval] ] for loop in loops: i = x while loop[0](i): if i != x: if game.board[i][y] is not None: if game.board[i][y].color != self.color: result.append([i, y]) break result.append([i, y]) i += loop[1] for loop in loops: i = y while loop[0](i): if i != y: if game.board[x][i] is not None: if game.board[x][i].color != self.color: result.append([x, i]) break result.append([x, i]) i += loop[1] return result
76a782541c565d14a84c1845841338d99f23704d
33,085
def figure_5a(): """ This creates the plot for figure 5A in the Montague paper. Figure 5A is a 'plot of ∂(t) over time for three trials during training (1, 30, and 50).' """ # Create Processing Components sample_mechanism = pnl.TransferMechanism(default_variable=np.zeros(60), name=pnl.SAMPLE) action_selection = pnl.TransferMechanism(default_variable=np.zeros(60), function=pnl.Linear(slope=1.0, intercept=0.01), name='Action Selection') sample_to_action_selection = pnl.MappingProjection(sender=sample_mechanism, receiver=action_selection, matrix=np.zeros((60, 60))) # Create Composition composition_name = 'TD_Learning_Figure_5A' comp = pnl.Composition(name=composition_name) # Add Processing Components to the Composition pathway = [sample_mechanism, sample_to_action_selection, action_selection] # Add Learning Components to the Composition learning_related_components = comp.add_td_learning_pathway(pathway, learning_rate=0.3).learning_components # Unpack Relevant Learning Components prediction_error_mechanism = learning_related_components[pnl.OBJECTIVE_MECHANISM] target_mechanism = learning_related_components[pnl.TARGET_MECHANISM] # Create Log prediction_error_mechanism.log.set_log_conditions(pnl.VALUE) # Create Stimulus Dictionary no_reward_trials = {14, 29, 44, 59, 74, 89} inputs = build_stimulus_dictionary(sample_mechanism, target_mechanism, no_reward_trials) # Run Composition comp.learn(inputs=inputs) if args.enable_plot: # Get Delta Values from Log delta_vals = prediction_error_mechanism.log.nparray_dictionary()[composition_name][pnl.VALUE] # Plot Delta Values form trials 1, 30, and 50 with plt.style.context('seaborn'): plt.plot(delta_vals[0][0], "-o", label="Trial 1") plt.plot(delta_vals[29][0], "-s", label="Trial 30") plt.plot(delta_vals[49][0], "-o", label="Trial 50") plt.title("Montague et. al. (1996) -- Figure 5A") plt.xlabel("Timestep") plt.ylabel("∂") plt.legend() plt.xlim(xmin=35) plt.xticks() plt.show(block=not pnl._called_from_pytest) return comp
a8764f75cd9fc7cf0e0a9ddadc452ffdf05f099e
33,086
import json def lambda_handler(event, context): """ Transforms a binary payload by invoking "decode_{event.type}" function Parameters ---------- DeviceId : str Device Id ApplicationId : int LoRaWAN Application Id / Port number PayloadData : str Base64 encoded input payload Returns ------- This function returns a JSON object with the following keys: - status: 200 or 500 - transformed_payload: result of calling "{PayloadDecoderName}.dict_from_payload" (only if status == 200) - lns_payload: a representation of payload as received from an LNS - error_type (only if status == 500) - error_message (only if status == 500) - stackTrace (only if status == 500) """ logger.info("Received event: %s" % json.dumps(event)) input_base64 = event.get("PayloadData") device_id = event.get("WirelessDeviceId") metadata = event.get("WirelessMetadata")["LoRaWAN"] try: # Invoke a payload conversion function decoded_payload = rfi_power_switch.dict_from_payload( event.get("PayloadData")) # Define the output of AWS Lambda function in case of successful decoding decoded_payload["status"] = 200 result = decoded_payload logger.info(result) return result except Exception as exp: logger.error(f"Exception {exp} during binary decoding") raise exp
d645454656d85589652942b944e84863cb22a425
33,088
def _get_adi_snrs(psf, angle_list, fwhm, plsc, flux_dist_theta_all, wavelengths=None, mode='median', ncomp=2): """ Get the mean S/N (at 3 equidistant positions) for a given flux and distance, on a median subtracted frame. """ snrs = [] theta = flux_dist_theta_all[2] flux = flux_dist_theta_all[0] dist = flux_dist_theta_all[1] # 3 equidistant azimuthal positions for ang in [theta, theta + 120, theta + 240]: cube_fc, posx, posy = create_synt_cube(GARRAY, psf, angle_list, plsc, flux=flux, dist=dist, theta=ang, verbose=False) fr_temp = _compute_residual_frame(cube_fc, angle_list, dist, fwhm, wavelengths, mode, ncomp, svd_mode='lapack', scaling=None, collapse='median', imlib='opencv', interpolation='lanczos4') res = frame_quick_report(fr_temp, fwhm, source_xy=(posx, posy), verbose=False) # mean S/N in circular aperture snrs.append(np.mean(res[-1])) # median of mean S/N at 3 equidistant positions median_snr = np.median(snrs) return flux, median_snr
3d00ccb6163962dbfdcedda7aa565dfc549e1f2b
33,089
def compute_nearest_neighbors(fit_embeddings_matrix, query_embeddings_matrix, n_neighbors, metric='cosine'): """Compute nearest neighbors. Args: fit_embeddings_matrix: NxD matrix """ fit_eq_query = False if ((fit_embeddings_matrix.shape == query_embeddings_matrix.shape) and np.allclose(fit_embeddings_matrix, query_embeddings_matrix)): fit_eq_query = True if metric == 'cosine': distances, indices, sort_indices = compute_nearest_neighbors_cosine(fit_embeddings_matrix, query_embeddings_matrix, n_neighbors, fit_eq_query) else: raise ValueError('Use cosine distance.') return distances, indices, sort_indices
1020827cbaab50d591b3741d301ebe88c4ac6d93
33,090
import re def commodify_cdli_no( cdli_no ): """ Given a CDLI number, fetch the text of the corresponding artifact from the database and pass it to commodify_text """ # Ensure that we have a valid artifact number: if re.match(r'P[0-9]{6}', cdli_no) is not None: art_no = int(cdli_no[1:]) elif re.match(r'[0-9]{6}', cdli_no) is not None: art_no = int(cdli_no) else: raise Exception("%s: not a well-formed artifact id"%(cdli_no)) # For the moment, only accept texts in Sumerian: LANG_ID_SUMERIAN = 5 # Connect to DB: conn = mariadb.connect( user=config['db']['user'], password=config['db']['password'], host=config['db']['host'], port=config['db']['port'], database=config['db']['database'] ) cur = conn.cursor() # DB query to get text content and language: cur.execute("SELECT transliteration, language_id FROM inscriptions INNER JOIN artifacts_languages ON inscriptions.artifact_id = artifacts_languages.artifact_id WHERE inscriptions.artifact_id=%s", (art_no,)) text = None for atf, lang_id in cur: if lang_id == LANG_ID_SUMERIAN: text = [line.strip().split(" ") for line in atf.split("\n")] break cur.close() conn.close() if text is not None: return commodify_text( text, cdli_no ) # If no text found with specified id # and correct language, raise exception raise Exception("%s: artifact not found or language not supported"%(cdli_no))
5c194f40cbde371329671712d648019ac2e43a90
33,091
def zvalues(r, N=1): """ Generate random pairs for the CDF a normal distribution. The z-values are from the cumulative distribution function of the normal distribution. Args: r: radius of the CDF N: number of pairs to generate Returns: pairs of random numbers """ y1, y2 = box_muller(0, 1, N) z1 = (np.sqrt(1 + r) * y1 - np.sqrt(1 - r) * y2) / np.sqrt(2) z2 = (np.sqrt(1 + r) * y1 + np.sqrt(1 - r) * y2) / np.sqrt(2) return z1, z2
146d363d7fbb92a9152c6b05a8f38562d4cfc107
33,093
def exp(fdatagrid): """Perform a element wise exponential operation. Args: fdatagrid (FDataGrid): Object to whose elements the exponential operation is going to be applied. Returns: FDataGrid: Object whose elements are the result of exponentiating the elements of the original. """ return fdatagrid.copy(data_matrix=np.exp(fdatagrid.data_matrix))
aef02937bf0fac701e0ae2bac75911a5a2a8ee9e
33,094
def ksvm(param, data): """ kernelized SVM """ certif = np.linalg.eigvalsh(data['K'])[0] if certif < 0: data['K'] = data['K'] - 2 * certif * np.eye(data['K'].shape[0]) optimal = {} if len(param['kappa']) > 1 or float('inf') not in param['kappa']: optimal.update(dist_rob_ksvm(param, data)) if float('Inf') in param['kappa']: optimal.update(regularized_ksvm(param, data)) return optimal
e549411b0ac12926753e2eefa968e978414829fa
33,095
def _ordered_unique(arr): """ Get the unique elements of an array while preserving order. """ arr = np.asarray(arr) _, idx = np.unique(arr, return_index=True) return arr[np.sort(idx)]
c4e2578a41d7481b602c4251890276dc2a92dbe9
33,096
def is_byte_array(value, count): """Returns whether the given value is the Python equivalent of a byte array.""" return isinstance(value, tuple) and len(value) == count and all(map(lambda x: x >= 0 and x <= 255, value))
16793415885ea637aecbeeefe24162d6efe9eb39
33,097
def _FilterSubstructureMatchByAtomMapNumbers(Mol, PatternMol, AtomIndices, AtomMapIndices): """Filter substructure match atom indices by atom map indices corresponding to atom map numbers. """ if AtomMapIndices is None: return list(AtomIndices) return [AtomIndices[Index] for Index in AtomMapIndices]
3594a11452848c9ae11f770fa560fe29d68aa418
33,098
def questions_for_written_answer_tabled_in_range(start, end): """Returns a list of all Questions for Written Answer tabled in date range. """ try: _start = start.isoformat() except AttributeError: return [] try: _end = end.isoformat() except AttributeError: return [] q_args = {'startDate': _start, 'endDate': _end} resp = _call_questions_service("GetQuestionsForWrittenAnswer_TabledInRange", **q_args) return _parse_list_response(resp, "QuestionsList", "Question")
5702005be754bb7485fb81e49ff9aab6fbc1d549
33,099
def get_headers_token(security_scopes: SecurityScopes, encoded: str = Depends(get_reusable_oauth2())) -> Token: """ This FastAPI dependency *will not* result in an argument added to the OpenAPI spec. This should generally be used for dependencies involving the access token. """ return get_validated_token(security_scopes.scopes, encoded)
f7c9794fcd4b95f1e541b98203bfbe4fb1a6ec60
33,100
def worker_mode(self): """ bool: Whether or not all MPI ranks are in worker mode, in which all worker ranks are listening for calls from the controller rank. If *True*, all workers are continuously listening for calls made with :meth:`~_make_call` until set to *False*. By default, this is *False*. Setting this value to *True* allows for easier use of *PRISM* in combination with serial/OpenMP codes (like MCMC methods). """ return(bool(self._worker_mode))
45d256e47bfeffe9e3878297c6009061801e5d8d
33,101
def _pcolor(text, color, indent=0): """ Colorized print to standard output """ esc_dict = { 'black':30, 'red':31, 'green':32, 'yellow':33, 'blue':34, 'magenta':35, 'cyan':36, 'white':37, 'none':-1 } if esc_dict[color] != -1: return ( '\033[{color_code}m{indent}{text}\033[0m'.format( color_code=esc_dict[color], indent=' '*indent, text=text ) ) return '{indent}{text}'.format(indent=' '*indent, text=text)
b156f86b2c73c00b44b8b5fd968499549bc79389
33,102
def naninterp(X, method='linear'): """ --------------------------------------------------------------------- fill 'gaps' in data (marked by NaN) by interpolating --------------------------------------------------------------------- """ inan = np.argwhere(np.isnan(X)) if inan.size == 0: return X, inan elif len(inan) == 1: if inan > 0: X[inan] = X[inan - 1] else: X[inan] = X[inan + 1] else: try: if method != 'pchip': set_interp = interp1d(np.transpose(np.argwhere(~np.isnan(X)))[0], np.transpose(X[np.argwhere(~np.isnan(X))])[0], kind=method) X[inan] = set_interp(inan) else: set_interp = PchipInterpolator(np.transpose(np.argwhere(~np.isnan(X)))[0], np.transpose(X[np.argwhere(~np.isnan(X))])[0], extrapolate=False) X[inan] = set_interp(inan) except: raise ValueError('linear interpolation with NaNs') return X, inan
33b95a879ead6ce93fa7cf4826c05c8086194c5f
33,103
def to_op_list(elements: list) -> OperatorList: """elements should be a properly written reverse polish notation expression to be made into OperatorLists""" if len(elements) == 0: raise InvalidExpressionError() new_elements = [] for e in elements: if isinstance(e, Element): new_elements.append(e) elif e in operations: operand_2 = new_elements.pop() operand_1 = new_elements.pop() result = OperatorList(operand_1, operand_2, operation=e) if len(result.members) == 1: result = Element(repr(result.members[0])) new_elements.append(result) if len(new_elements) > 1: raise ValueError("was not able to process expression") if type(new_elements[0]) == OperatorList: return new_elements[0] return OperatorList(*new_elements, operation='+')
d849d840c4c391559dd85c5135909bc3381ed9cc
33,104
def future(fn): """Mark a test as expected to unconditionally fail. Takes no arguments, omit parens when using as a decorator. """ fn_name = fn.__name__ def decorated(*args, **kw): try: fn(*args, **kw) except Exception, ex: print ("Future test '%s' failed as expected: %s " % ( fn_name, str(ex))) return True else: raise AssertionError( "Unexpected success for future test '%s'" % fn_name) return _function_named(decorated, fn_name)
ae7b5fed9aea4546dcc07fa89a1d37dac89c0a94
33,105
def prodXTXv(v): """ Fast computation function for the product between a vector v, the matrix X and the transpose of the matrix X, where X is the triangular matrix with only one below the diagonal. :parameters: - v : (array-like) the vector v :return: - res : the result of the matrix-vector product""" return np.cumsum(np.cumsum(v)[::-1])[::-1]
0e7526df922518278791c6c67c7233d2f53ca6c8
33,106
def input_fn(mode, batch_size, data_dir): """Input_fn using the contrib.data input pipeline for CIFAR-10 dataset. Args: mode: Standard names for model modes (tf.estimators.ModeKeys). batch_size: The number of samples per batch of input requested. """ dataset = record_dataset(filenames(mode, data_dir)) # For training repeat forever. if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.repeat() dataset = dataset.map(dataset_parser, num_threads=1, output_buffer_size=2 * batch_size) # For training, preprocess the image and shuffle. if mode == tf.estimator.ModeKeys.TRAIN: dataset = dataset.map(train_preprocess_fn, num_threads=1, output_buffer_size=2 * batch_size) # Ensure that the capacity is sufficiently large to provide good random # shuffling. buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size dataset = dataset.shuffle(buffer_size=buffer_size) # Subtract off the mean and divide by the variance of the pixels. dataset = dataset.map( lambda image, label: (tf.image.per_image_standardization(image), label), num_threads=1, output_buffer_size=2 * batch_size) # Batch results by up to batch_size, and then fetch the tuple from the # iterator. iterator = dataset.batch(batch_size).make_one_shot_iterator() images, labels = iterator.get_next() return {INPUT_TENSOR_NAME: images}, labels
4e59ed0e8a7ff1151a93a457b68cb99d3a47124b
33,107
from typing import Any from typing import Set import inspect from unittest.mock import Mock def _get_default_arguments(obj: Any) -> Set[str]: """Get the names of the default arguments on an object The default arguments are determined by comparing the object to one constructed from the object's class's initializer with Mocks for all positional arguments Arguments: obj: the object to find default arguments on Returns: the set of default arguments """ cls = type(obj) obj_sig = inspect.signature(cls.__init__) try: mocked_obj = cls( **{ param.name: Mock() for param in obj_sig.parameters.values() if param.default == param.empty and param.kind != param.VAR_KEYWORD and param.name != "self" } ) except Exception: return set() return { key for key, value in obj.__dict__.items() if key in mocked_obj.__dict__ and mocked_obj.__dict__[key] == value }
483fe82dd79aadfe1da387fb0c602beb503f344b
33,108
from typing import Union from typing import Optional import logging def execute_query( connection: mysql.connector.connection_cext.CMySQLConnection, sql_query: str, data: Union[dict, tuple], commit=True, ) -> Optional[int]: """ Execute and commit MySQL query Parameters ---------- connection : mysql.connector.connection_cext.CMySQLConnection mysql connection class sql_query : str SQL query data : Union[dict, tuple] _description_ commit : bool, optional Make database change persistent, by default True Returns ------- Optional[int] Query id should be int """ cursor = connection.cursor() try: cursor.execute(sql_query, data) if commit: connection.commit() logging.info("MySQL committed ...") id = cursor.lastrowid cursor.close() return id except mysql.connector.errors.Error as e: logging.error(f"{e}")
7c7fbeb7880d2b4efd758387af860dc6af05bbfd
33,109
def rpn_targets(anchors, bbox_gt, config): """Build the targets for training the RPN Arguments --------- anchors: [N, 4] All potential anchors in the image bbox_gt: [M, 4] Ground truth bounding boxes config: Config Instance of the Config class that stores the parameters Returns ------- rpn_match: [N, 1] Array same length as anchors with 1=positive, 0=neutral, -1=negative rpn_bbox: [config.TRAIN_ANCHORS_PER_IMAGE, 4] Array that stores the bounding box shifts needed to adjust the positive anchors by """ # Outputs rpn_match = np.zeros(anchors.shape[0], np.float32) rpn_bbox = np.zeros((config.TRAIN_ANCHORS_PER_IMAGE, 4), np.float32) # Find the iou between all anchors and all bboxes iou_mat = iou_matrix(anchors, bbox_gt) # Find best bbox index for each anchor best_bboxs = np.argmax(iou_mat, axis=1) # Find best anchor for every bbox best_anchors = np.argmax(iou_mat, axis=0) # Create the IOU matrix anchor_iou = iou_mat[np.arange(0, iou_mat.shape[0]), best_bboxs] # Set the ground truth values for RPN match rpn_match[anchor_iou < .3] = -1 rpn_match[anchor_iou > .7] = 1 # Assign a value to all bboxes - note there will be duplicates rpn_match[best_anchors] = 1 # There can only be 1:1 ratio of positive anchors to negative anchors at max positive_anchors = np.where(rpn_match == 1)[0] if len(positive_anchors) > config.TRAIN_ANCHORS_PER_IMAGE // 2: set_to_zero = np.random.choice(positive_anchors, len(positive_anchors) - config.TRAIN_ANCHORS_PER_IMAGE // 2, replace=False) # Set extras to zero rpn_match[set_to_zero] = 0 # Reset positive anchors positive_anchors = np.where(rpn_match == 1)[0] # Set negative anchors to the difference between allowed number of total anchors and the positive anchors negative_anchors = np.where(rpn_match == -1)[0] set_to_zero = np.random.choice(negative_anchors, len(negative_anchors) - (config.TRAIN_ANCHORS_PER_IMAGE - len(positive_anchors)), replace=False) rpn_match[set_to_zero] = 0 # Reset negative anchors negative_anchors = np.where(rpn_match == -1)[0] # Create the RPN bbox targets target_anchors = anchors[positive_anchors] # The anchor adjustments are assigned to the top half or less of rpn_bbox, the rest are zeros. for idx in range(target_anchors.shape[0]): # Get the closest bbox and target corresponding anchor bbox = bbox_gt[best_bboxs[positive_anchors[idx]]] anchor = target_anchors[idx] # Bbox dimensions and centroids bbox_height = bbox[2] - bbox[0] bbox_width = bbox[3] - bbox[1] bbox_y = np.mean([bbox[2], bbox[0]]) bbox_x = np.mean([bbox[3], bbox[1]]) # Anchor dimensions and centroids anchor_height = anchor[2] - anchor[0] anchor_width = anchor[3] - anchor[1] anchor_y = np.mean([anchor[2], anchor[0]]) anchor_x = np.mean([anchor[3], anchor[1]]) # Adjustment in normalized coordinates adjustment = np.array([(bbox_y - anchor_y) / anchor_height, (bbox_x - anchor_x) / anchor_width, np.log(bbox_height / anchor_height), np.log(bbox_width / anchor_width)]) # Normalize further by dividing by std normalized_adjustment = adjustment / config.RPN_BBOX_STD_DEV # Set the ground truth rpn bbox rpn_bbox[idx] = normalized_adjustment return rpn_match, rpn_bbox
3775c2c3222911377a85ddaae802a625a181a9d9
33,110
from datetime import datetime def add_years(d, years): """Return a date that's `years` years after the date (or datetime). Return the same calendar date (month and day) in the destination year, if it exists, otherwise use the following day (thus changing February 29 to February 28). """ try: return d.replace(year=d.year + years) except ValueError: return d + (datetime.date(d.year + years, 3, 1) - datetime.date(d.year, 3, 1))
325085694b92f39e3ed8d22f690b5b520cdcbe5f
33,111
def adj_to_knn(adj, n_neighbors): """convert the adjacency matrix of a nearest neighbor graph to the indices and weights for a knn graph. Arguments --------- adj: matrix (`.X`, dtype `float32`) Adjacency matrix (n x n) of the nearest neighbor graph. n_neighbors: 'int' (optional, default 15) The number of nearest neighbors of the kNN graph. Returns ------- idx: :class:`~numpy.ndarray` The matrix (n x n_neighbors) that stores the indices for each node's n_neighbors nearest neighbors. wgt: :class:`~numpy.ndarray` The matrix (n x n_neighbors) that stores the weights on the edges for each node's n_neighbors nearest neighbors. """ n_cells = adj.shape[0] idx = np.zeros((n_cells, n_neighbors), dtype=int) wgt = np.zeros((n_cells, n_neighbors), dtype=adj.dtype) for cur_cell in range(n_cells): # returns the coordinate tuple for non-zero items cur_neighbors = adj[cur_cell, :].nonzero() # set itself as the nearest neighbor idx[cur_cell, :] = cur_cell wgt[cur_cell, :] = 0 # there could be more or less than n_neighbors because of an approximate search cur_n_neighbors = len(cur_neighbors[1]) if cur_n_neighbors > n_neighbors - 1: sorted_indices = np.argsort(adj[cur_cell][:, cur_neighbors[1]].A)[0][: (n_neighbors - 1)] idx[cur_cell, 1:] = cur_neighbors[1][sorted_indices] wgt[cur_cell, 1:] = adj[cur_cell][0, cur_neighbors[1][sorted_indices]].A else: idx_ = np.arange(1, (cur_n_neighbors + 1)) idx[cur_cell, idx_] = cur_neighbors[1] wgt[cur_cell, idx_] = adj[cur_cell][:, cur_neighbors[1]].A return idx, wgt
baed2ea35131705bf99fe01f5ffd6924eb689f32
33,112
def typeck(banana_file): """ Type-check the provided BananaFile instance. If it type check, it returns the associated TypeTable. :type banana_file: ast.BananaFile :param banana_file: The file to typecheck. :rtype: typetbl.TypeTable :return: Returns the TypeTable for this BananaFile """ type_table = typetbl.TypeTable() statement_index = 0 for stmt in banana_file.statements: lhs, rhs = stmt type_computed = typeck_rhs(rhs, type_table) type_table.set_type(lhs, type_computed, statement_index) statement_index += 1 conn.typeck_connections(banana_file.connections, type_table) return type_table
71516db1fcb34666b35f2f62736e585d46766c5c
33,113
import aiohttp import async_timeout async def authenticate( session: aiohttp.ClientSession, username: str, password: str ) -> str: """Authenticate and return a token.""" with async_timeout.timeout(10): resp = await session.request( "post", BASE_URL + "authenticate", data={"username": username, "password": password}, raise_for_status=True, ) data = await resp.json() return data["token"]
5728503c49e2173f872eda7fbd5152f67f50d6c2
33,115
async def create_individual_sensors( hass: HomeAssistantType, sensor_config: dict ) -> list[SensorEntity]: """Create entities (power, energy, utility_meters) which track the appliance.""" source_entity = await create_source_entity(sensor_config[CONF_ENTITY_ID], hass) try: power_sensor = await create_power_sensor(hass, sensor_config, source_entity) except PowercalcSetupError as err: return [] entities_to_add = [power_sensor] if sensor_config.get(CONF_CREATE_ENERGY_SENSOR): energy_sensor = await create_energy_sensor( hass, sensor_config, power_sensor, source_entity ) entities_to_add.append(energy_sensor) if sensor_config.get(CONF_CREATE_UTILITY_METERS): meter_types = sensor_config.get(CONF_UTILITY_METER_TYPES) for meter_type in meter_types: entities_to_add.append( create_utility_meter_sensor(energy_sensor, meter_type) ) return entities_to_add
3112b85721fe4692ddec3f15c13ae9e5192dfb55
33,116
def collatz(number): """If number is even (number // 2) else (3 * number + 1) Args: number (int): number to collatz Returns: int: collatz number """ if (number % 2) == 0: print(number // 2) return number // 2 print(3 * number + 1) return 3 * number + 1
221fd238bd6d0c40c9cb80be2c58746bb206c17b
33,117
def maximum_clique(adjacency): """Maximum clique of an adjacency matrix. Parameters ---------- adjacency : (M, M) array Adjacency matrix. Returns ------- maximum_clique : list with length = size of maximum clique Row indices of maximum clique coordinates. Set to False if no maximum clique is found. """ G = nx.Graph(adjacency) cliques = list(nx.find_cliques(G)) if len(cliques) < 1: print('No maximal cliques found.') return False clique_sizes = [len(i) for i in cliques] maximum_clique = cliques[np.argmax(np.array(clique_sizes))] return maximum_clique
a8cf5cb67b74f334ea632263f68127b35f8e7816
33,118
def get_disk_at(board, position): """ Return the disk at the given position on the given board. - None is returned if there is no disk at the given position. - The function also returns None if no disk can be obtained from the given board at the given position. This is for instance the case if a string, a number, ... is passed instead of a board or a position, if the given position is outside the boundaries of the given board, ... ASSUMPTIONS - None (same remark as for the function dimension) """ if not isinstance(position, (tuple, list)) or not isinstance(board, dict): return None else: return board[position]
4b793ce1947b2f71d666b1d1676ca894f43c3b58
33,119
def incr_key_store(key, amount=1): """ increments value of key in store with amount :param key: key of the data :param amount: amount to add :return: new value """ if get_use_redis(): return rds.incr(str(key), amount) else: if exists_key_store(key): value = get_key_store(key) set_key_store(key, value + amount) return value + amount else: set_key_store(key, amount) return amount
b31d024952c133863f38a6504d4c39e92493f27a
33,120
def firstUniqChar(s): """ :type s: str :rtype: int """ if len(s) == 0: return -1 if len(s) == 1: return 0 hash_table = {} for i in s: if i not in hash_table: hash_table[i] = 1 else: hash_table[i] += 1 for i in s: if hash_table[i] <= 1: return s.find(i) return -1
25148de95099094991339bb0fe6815644e5b94cb
33,121
def sum_of_powers_of_transition_matrix(adj, pow): """Computes \sum_{r=1}^{pow) (D^{-1}A)^r. Parameters ----- adj: sp.csr_matrix, shape [n_nodes, n_nodes] Adjacency matrix of the graph pow: int Power exponent Returns ---- sp.csr_matrix Sum of powers of the transition matrix of a graph. """ deg = adj.sum(1).A1 deg[deg == 0] = 1 transition_matrix = sp.diags(1 / deg).dot(adj) sum_of_powers = transition_matrix last = transition_matrix for i in range(1, pow): last = last.dot(transition_matrix) sum_of_powers += last return sum_of_powers
6b20b6cf8f9bba2d04672a04401f265f751cd5f0
33,122
from typing import Optional from typing import Union from typing import Callable def get_current_sites( brand_id: Optional[BrandID] = None, *, include_brands: bool = False ) -> set[Union[Site, SiteWithBrand]]: """Return all "current" (i.e. enabled and not archived) sites.""" query = db.session.query(DbSite) if brand_id is not None: query = query.filter_by(brand_id=brand_id) if include_brands: query = query.options(db.joinedload(DbSite.brand)) sites = query \ .filter_by(enabled=True) \ .filter_by(archived=False) \ .all() transform: Callable[[DbSite], Union[Site, SiteWithBrand]] if include_brands: transform = _db_entity_to_site_with_brand else: transform = _db_entity_to_site return {transform(site) for site in sites}
7bddf17bc2d2b3b5854f6c2bf47089bbac930d0a
33,123
def get_text_editor_for_attr(traits_ui, attr): """ Grab the Qt QLineEdit for attr off the UI and return its text. """ widget = get_widget_for_attr(traits_ui, attr) return widget.text()
6691fe0ddc13b379b1edcc5494e6378317df3bdc
33,124
def check_coords(lat, lng): """ Accepts a list of lat/lng tuples. returns the list of tuples that are within the bounding box for the US. NB. THESE ARE NOT NECESSARILY WITHIN THE US BORDERS! """ if bottom <= lat <= top and left <= lng <= right: inside_box = 1 else: inside_box = 0 return inside_box
e9b1678b2d736dbae9c2524df08bc59a3fe1912f
33,125
async def confirm_email(body: ConfirmTokenModel, include_in_schema=False): """Mark a user's email as confirmed""" email = verify_access_token(body.token) if not email: logger.info("Error getting email") return JSONResponse( status_code=status.HTTP_401_UNAUTHORIZED, content={"error":"Token has expired, please sign up again."}, ) login = await Login.filter(email=email.lower()).first() if not login: return JSONResponse( status_code=status.HTTP_401_UNAUTHORIZED, content={"error":"Incorrect username or password"}, headers={"WWW-Authenticate": "Bearer"}, ) await login.set_confirmed() return status.HTTP_200_OK
428ce5b9ed1e23bde91d57f16e5a18f9add3da83
33,126
def create_config_hyperparameter_search(dataset_name): """ Create the config file for the hyper-parameter tuning given a dataset. """ hidden_layer_size = [16, 32, 64] n_hidden_layers = [2, 3, 4] learning_rate = [0.005, 0.01, 0.02, 0.05] lr_decay = [0.99, 0.995, 0.999, 1.0] pytorch_init_seeds = np.arange(10).tolist() sweep_config = {'program': 'workflow.py', 'method': 'grid', 'name': f'{dataset_name}_tuning'} metric = { 'name': 'best_validation_loss', 'goal': 'minimize' } parameters_dict = { 'epochs': {'value': 3000}, 'epochs_initial_training': {'value': 1000}, 'hidden_layer_size': {'values': hidden_layer_size}, 'n_hidden_layers': {'values': n_hidden_layers}, 'learning_rate': {'values': learning_rate}, 'lr_decay': {'values': lr_decay}, 'dataset_name': {'value': dataset_name}, 'dataset_split_seed': {'value': 10}, 'pytorch_init_seed': {'values': pytorch_init_seeds}, 'threads': {'value': 1}, } if '_PR' in dataset_name: parameters_dict['jacobian_regulariser'] = {'values': [0.001, 0.01, 0.1, 1.0]} else: parameters_dict['jacobian_regulariser'] = {'value': 0.0} if '_NI' in dataset_name or '_VI' in dataset_name: parameters_dict['resampling_region'] = {'values': ['marginal_tight', 'marginal_wide']} else: parameters_dict['resampling_region'] = {'value': None} sweep_config['parameters'] = parameters_dict sweep_config['metric'] = metric return sweep_config
d8635f42bf66e782c11dd9bc310b2c1ae30deff2
33,127
def get_genome_set(gids=[], def_name=None): """Wrapper for Genome object creation, checks if cache (created through unique option set) exists first and returns that. returns dict: key = genome_id, value = Genome() object see: help(Genome) """ if not gids: sys.stderr.write("No ids inputted\n") return cache_id = "_".join(sorted(gids)) cache_md5 = hashlib.md5(cache_id).hexdigest() cache_obj = load_object(cache_md5) if cache_obj is not None: print "Loading Genome objects for selected genomes from cached object" return cache_obj else: # hack to get variable name if def_name == None: (filename,line_number,function_name,text)=traceback.extract_stack()[-2] def_name = text[:text.find('=')].strip() print "Loading Genome objects for selected genomes through API. Please wait, this may take several minutes ..." new_obj = dict([(x, Genome(genome_id=x, def_name="%s['%s']"%(def_name, x))) for x in gids]) save_object(new_obj, cache_md5) print "Done loading through API" return new_obj
ea3cc9f5094b5ac21c760c3f2dfa2f0072c3ead4
33,128
def sample_tag(user: User, name: str = "Main course") -> Tag: """Create and return a sample name""" return Tag.objects.create(user=user, name=name)
d0181ae04479661bde1ed4d72c6e871de95a016a
33,129
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = cdist(X, X, p=2.) dist_X_embedded = cdist(X_embedded, X_embedded, p=2.) ind_X = np.argsort(dist_X , axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t
d5db0a3c8b3ecbdb3375171a9601320bd6628020
33,131
def compute_logomaker_df(adata, indices, fixed_length: int = None): """ The sample names (adata.obs_names) must be strings made of amino acid characters. The list of allowed characters is stored in the variable aminoacids. """ if fixed_length is None: pos_list = np.arange(max([len(adata.obs_names[idx]) for idx in indices])) total_size = len(indices) else: pos_list = np.arange(fixed_length) total_size = 0 for idx in indices: if len(adata.obs_names[idx]) == fixed_length: total_size += 1 if total_size == 0: raise ValueError("Cannot compute logo on an empty set.") probability_matrix = np.zeros((len(pos_list), len(aminoacids))) for position in pos_list: counts = {} for aa in aminoacids: counts[aa] = 0 for idx in indices: if fixed_length is None: if position < len(adata.obs_names[idx]): counts[adata.obs_names[idx][position]] += 1 else: if len(adata.obs_names[idx]) == fixed_length: counts[adata.obs_names[idx][position]] += 1 for k in range(len(aminoacids)): probability_matrix[position, k] = counts[aminoacids[k]] / total_size # from probabilities to bits: max_entropy = -np.log2(1 / len(aminoacids)) for position in pos_list: pos_entropy = max_entropy - entropy( 1e-10 + probability_matrix[position, :], base=2 ) probability_matrix[position, :] *= pos_entropy dico = {"pos": pos_list} for k in range(len(aminoacids)): dico[aminoacids[k]] = probability_matrix[:, k] df_ = DataFrame(dico) df_ = df_.set_index("pos") return df_
760715143605bb9cc1b632978a10d8bd2d56bd66
33,132
from typing import Optional from typing import Tuple import re def parse_test_stats_from_output(output: str, fail_type: Optional[str]) -> Tuple[int, int]: """Parse tasks output and determine test counts. Return tuple (number of tests, number of test failures). Default to the entire task representing a single test as a fallback. """ # pytest m = re.search('^=+ (.*) in [0-9.]+ seconds =+\n\Z', output, re.MULTILINE) if m: counts = {} for part in m.group(1).split(', '): # e.g., '3 failed, 32 passed, 345 deselected' count, key = part.split() counts[key] = int(count) return (sum(c for k, c in counts.items() if k != 'deselected'), counts.get('failed', 0)) # myunit m = re.search('^([0-9]+)/([0-9]+) test cases failed(, ([0-9]+) skipped)?.$', output, re.MULTILINE) if m: return int(m.group(2)), int(m.group(1)) m = re.search('^([0-9]+) test cases run(, ([0-9]+) skipped)?, all passed.$', output, re.MULTILINE) if m: return int(m.group(1)), 0 # Couldn't find test counts, so fall back to single test per tasks. if fail_type is not None: return 1, 1 else: return 1, 0
8ec67d226c2280eb08de3589cba7b6aa0a09024c
33,133
from typing import List from typing import Tuple from typing import Dict def _constraint_items_missing_from_collection( constraints: List[Tuple], collection: Dict[str, int] ) -> List[str]: """ Determine the constrained items that are not specified in the collection. """ constrained_items = set() for constraint in constraints: if len(constraint) > 1: constrained_items.add(constraint[1]) return sorted(constrained_items - collection.keys())
918667f1e8b001637c9adf00ef5323b2e8587775
33,134
import scipy def coldpool_edge_shear_direction_split( tv0100, ds_profile, l_smoothing=L_SMOOTHING_DEFUALT, l_edge=L_EDGE_DEFAULT, d_theta_v=COLDPOOL_THRESHOLD_DEFAULT, shear_calc_z_max=SHEAR_DIRECTION_Z_MAX_DEFAULT, profile_time_tolerance=60.0, ): """ Computes a mask for the edge of coldpools in the upshear direction by comparing the direction of the coldpool edge to the mean shear (up to `shear_calc_z_max`) """ ds_edge = coldpool_edge( tv0100=tv0100, l_smoothing=l_smoothing, l_edge=l_edge, d_theta_v=d_theta_v ) ds = xr.Dataset(coords=ds_edge.coldpool.coords) ds["shear_calc_time_tolerance"] = ((), profile_time_tolerance, dict(units="s")) ds["shear_calc_z_max"] = ((), shear_calc_z_max, dict(units="m")) ds["l_edge"] = ((), l_edge, dict(units="m")) # make a stencil which will pick out only neighbouring cells dx = np.max(np.gradient(ds.xt)) nx_disk = int(0.5 * l_edge / dx) m_neigh = skimage.morphology.disk(nx_disk) m_neigh[nx_disk, nx_disk] = 0 # convolve with stencil to count how many coldpool cells are near a # particular edge cell n_coldpool = np.where( ds_edge.coldpool_edge, scipy.ndimage.convolve( # cast to int here so we have range that bool doesn't supply ds_edge.coldpool.astype(int), m_neigh, mode="wrap", ), np.nan, ) ds["n_coldpool"] = (ds_edge.coldpool.dims, n_coldpool) def _find_mean_dir(ds, x_): l_ = np.where( # only compute for cells which actually are "near" coldpool, # will depend on m_neigh size, should make sure n_neigh is big enough ds.n_coldpool > 0, # use positions for cells inside inner-most region of coldpool, # at the edge we ignore the points outside the domain # (working out wrapping with the positions is too hard for now...) scipy.ndimage.convolve( np.where(ds_edge.m_inner, x_, 0), m_neigh, mode="constant", cval=0.0 ), np.nan, ) # from sum of mean of all neighbouring directions we subtract the # central position return l_ / ds.n_coldpool - np.where(ds.n_coldpool > 0, x_, np.nan) x, y = np.meshgrid(ds.xt, ds.yt) print("Finding x-component of direction for edge") lx = _find_mean_dir(ds=ds, x_=x) print("Finding y-component of direction for edge") ly = _find_mean_dir(ds=ds, x_=y) print("Defining edge direction vector for each point") dims = tuple(["component"] + list(ds_edge.coldpool.dims)) # use raw values for significant speedup ds["edge_direction"] = (dims, [lx.values, ly.values]) ds.edge_direction.values /= np.linalg.norm(ds.edge_direction.values, axis=0) print("Identifying mean shear direction") time = ds.time.values p_sel = ds_profile.sel( time=time, method="nearest", tolerance=profile_time_tolerance ) u_wind, v_wind = p_sel.u.squeeze(), p_sel.v.squeeze() # have to select on u and v separately here because UCLALES (incorrectly) # labels v-wind as existing at the cell interface z_max = shear_calc_z_max dudz_mean = np.gradient(u_wind.sel(zt=slice(0, z_max))).mean() dvdz_mean = np.gradient(v_wind.sel(zm=slice(0, z_max))).mean() shear_dir = np.array([dudz_mean, dvdz_mean]) shear_dir /= np.linalg.norm(shear_dir) # note that y-direction should be first argument to arctan # https://docs.scipy.org/doc/numpy-1.12.0/reference/generated/numpy.arctan2.html ds["mean_shear_direction"] = np.arctan2(shear_dir[1], shear_dir[0]) * 180.0 / pi ds.mean_shear_direction.attrs["units"] = "deg" # compute similarity in direction between shear and coldpool edge nx, ny = ds_edge.coldpool.shape co_dir = np.dot(shear_dir, ds.edge_direction.values.reshape((2, nx * ny))).reshape( (nx, ny) ) co_dir = np.where(ds_edge.coldpool_edge > 0, co_dir, np.nan) ds["coldpool_edge_upshear"] = ( ds_edge.coldpool.dims, co_dir < 0.0, dict(longname="coldpool edge in upshear direction"), ) ds["coldpool_edge_downshear"] = ( ds_edge.coldpool.dims, co_dir > 0.0, dict(longname="coldpool edge in downshear direction"), ) return ds
9a8f97005c1ed638828a9a9ae00ffbbf516606c3
33,135
import json def store_audio_tracks(): """ Store the audio tracks of an event identified after probing the first HLS video segment. Body: .. code-block:: python { "Name": string, "Program": string, "AudioTracks": list } Returns: None Raises: 500 - ChaliceViewError """ try: event = json.loads(app.current_request.raw_body.decode()) name = event["Name"] program = event["Program"] audio_tracks = event["AudioTracks"] print( f"Storing the audio tracks '{audio_tracks}' of event '{name}' in program '{program}' in the DynamoDB table '{EVENT_TABLE_NAME}'") event_table = ddb_resource.Table(EVENT_TABLE_NAME) event_table.update_item( Key={ "Name": name, "Program": program }, UpdateExpression="SET #AudioTracks = :AudioTracks", ExpressionAttributeNames={ "#AudioTracks": "AudioTracks" }, ExpressionAttributeValues={ ":AudioTracks": audio_tracks } ) except Exception as e: print(f"Unable to store the audio tracks '{audio_tracks}' of event '{name}' in program '{program}': {str(e)}") raise ChaliceViewError( f"Unable to store the audio tracks '{audio_tracks}' of event '{name}' in program '{program}': {str(e)}") else: print(f"Successfully stored the audio tracks '{audio_tracks}' of event '{name}' in program '{program}'") return {}
4310bd8bfa7e65b4be113bfac93db74957b4e822
33,136
import operator def isqrt(n): """ Return the integer part of the square root of the input. (math.isqrt from Python 3.8) """ n = operator.index(n) if n < 0: raise ValueError("isqrt() argument must be nonnegative") if n == 0: return 0 c = (n.bit_length() - 1) // 2 a = 1 d = 0 for s in reversed(range(c.bit_length())): # Loop invariant: (a-1)**2 < (n >> 2*(c - d)) < (a+1)**2 e = d d = c >> s a = (a << d - e - 1) + (n >> 2*c - e - d + 1) // a return a - (a*a > n)
b841bc3907c15677ddc97a5c5366b1a0312d12b6
33,137
import csv def main(file_in, file_out): """ Read in lines, flatten list of lines to list of words, sort and tally words, write out tallies. """ with open(file_in, 'r') as f_in: word_lists = [line.split() for line in f_in] # Flatten the list # http://stackoverflow.com/questions/952914/\ # making-a-flat-list-out-of-list-of-lists-in-python words = [word for word_list in word_lists for word in word_list] counts = map(words.count, words) tallies = sorted(set(zip(words, counts))) with open(file_out, 'w') as f_out: writer = csv.writer(f_out, quoting=csv.QUOTE_NONNUMERIC) for (word, count) in tallies: writer.writerow([word, count]) return None
225dd5d5b4c2bcb158ee61aa859f78e1e608a5fe
33,138
def check_api_key(current_request): """ Check if an API Key for GitHub was provided and return a 403 if no x-api-key header was sent by the client. """ x_api_key = current_request.headers.get('x-api-key', False) if not x_api_key: return Response( body='Missing x-api-key header', status_code=403, )
cb0b76e8b76135fe4498fdc66f34b1f5184441d1
33,139
def read_mm_stamp(fh, byteorder, dtype, count): """Read MM_STAMP tag from file and return as numpy.array.""" return numpy_fromfile(fh, byteorder+'8f8', 1)[0]
f575243ecfa67160bdbd90a476fd9fd3c6b0bed8
33,140
def Qfromq(q): """ converts five-element set q of unique Q-tensor elements to the full 3x3 Q-tensor matrix """ return np.array( [ [ q[0], q[1], q[2] ], [ q[1], q[3], q[4] ], [ q[2], q[4], -q[0] - q[3] ] ] )
cebc58a8023588fffd0a504e8894cd2c075cde3a
33,141
def load_atomic(val): """ Load a std::atomic<T>'s value. """ valty = val.type.template_argument(0) # XXX This assumes std::atomic<T> has the same layout as a raw T. return val.address.reinterpret_cast(valty.pointer()).dereference()
307bcc9d3eae2eede6a8e2275104280a1e7b4b94
33,142
def sample_trunc_beta(a, b, lower, upper): """ Samples from a truncated beta distribution in log space Parameters ---------- a, b: float Canonical parameters of the beta distribution lower, upper: float Lower and upper truncations of the beta distribution Returns ------- s: float Sampled value from the truncated beta distribution in log space """ # Check boundaries are correct if upper < lower: return # If a=1 and b=1, then we're sampling truncated uniform distribution # (i.e. peak formula below is not valid, but also not needed) if a == 1 and b == 1: s = np.random.uniform(low=lower, high=upper) return s # Get location of peak of distribution to determine type of sampling peak = (a-1) / (a+b-2) # If peak of beta dist is outside truncation, use uniform rejection sampling if peak < lower or peak > upper: # Sample a proposal s = np.random.uniform(low=lower, high=upper) # Get components of rejection sampling log_f_s = beta.logpdf(s, a, b) log_g_s = -1*np.log(upper-lower) log_M = max(beta.logpdf(lower,a,b), beta.logpdf(upper,a,b))\ + np.log(upper-lower) # Keep sampling until proposal is accepted while np.log(np.random.random()) > log_f_s - (log_M + log_g_s): s = np.random.uniform(low=lower, high=upper) log_f_s = beta.logpdf(s, a, b) # If peak of beta is inside truncation, sample from beta directly else: s = beta.rvs(a, b) # Keep sampling until proposal falls inside truncation boundaries while s < lower or s > upper: s = beta.rvs(a,b) return s
40380436b82c4f5f169e21443aabbe2d30ccf84a
33,143
import torch def reduce_dict(input_dict, average=True): # ref: https://github.com/pytorch/vision/blob/3711754a508e429d0049df3c4a410c4cde08e4e6/references/detection/utils.py#L118 """ Args: input_dict (dict): all the values will be reduced average (bool): whether to do average or sum Reduce the values in the dictionary from all processes so that all processes have the averaged results. Returns a dict with the same fields as input_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return input_dict with torch.no_grad(): names = [] values = [] # sort the keys so that they are consistent across processes for k in sorted(input_dict.keys()): names.append(k) values.append(input_dict[k]) values = torch.stack(values, dim=0) dist.all_reduce(values) if average: values /= world_size reduced_dict = {k: v for k, v in zip(names, values)} return reduced_dict
877919878977df23fae0cecddb9042762394d083
33,144
def dataId_to_dict(dataId): """ Parse an LSST dataId to a dictionary. Args: dataId (dataId): The LSST dataId object. Returns: dict: The dictionary version of the dataId. """ return dataId.to_simple().dict()["dataId"]
77b7566492b80a8c6e2becacafff36737c8a7256
33,145
def norm_max(tab): """ Short Summary ------------- Normalize an array or a list by the maximum. Parameters ---------- `tab` : {numpy.array}, {list} input array or list. Returns ------- `tab_norm` : {numpy.array}, {list} Normalized array. """ tab_norm = tab/np.max(tab) return tab_norm
657f6ed358e81c6635e967d5b995663c05145c57
33,146
def getHoliday(holidayName): """Returns a specific holiday. Args: holidayName (str): The name of the holiday to return. Case-sensitive. Returns: HolidayModel: The holiday, as a HolidayModel object, or None if not found. """ print(holidayName) return None
15b67fd6ac607d1ff12a216cc3c4baab61305be6
33,147
def draw_object(obj, bb: "BBLike" = None, ax=None): """ Draw easymunk object using matplotlib. """ options = DrawOptions(ax or plt.gca(), bb=bb) options.draw_object(obj) return ax
3b7322d12290d22be6eb47ac74d30494a1c091de
33,148
import six import hashlib def hash_mod(text, divisor): """ returns the module of dividing text md5 hash over given divisor """ if isinstance(text, six.text_type): text = text.encode('utf8') md5 = hashlib.md5() md5.update(text) digest = md5.hexdigest() return int(digest, 16) % divisor
3f127837bb072df5ee609b3afa80dd04e4f7b794
33,149
import re def normalize_mac_address_table(input_string): """ :param input_string: cli string has been get from network device by command like "show mac-address table" :return: {(mac_address', 'vlan'): 'l2_interface'} """ res = {} same_local_mac_address_count: int = 0 mac_address_number: int = 0 # if where is no mac address number line mac_address_line_count: int = 0 vlan_reg = re.compile( '\s([0-9]{1,4}?)\/?-?\s|' \ '^([0-9]{1,4}?)\/?-?\s|' \ 'All' ) mac_reg = re.compile( '(?:[0-9a-fA-F]{2}(?:[:-][0-9a-fA-F]{2}){5})|' \ '(?:[0-9a-fA-F]{4}(?:\.[0-9a-fA-F]{4}){2})|' \ '(?:[0-9a-fA-F]{4}(?:-[0-9a-fA-F]{4}){2})|' \ '(?:[0-9a-fA-F]{6}(?:-[0-9a-fA-F]{6}))' ) interface_reg = re.compile('Vlan\S+|' \ 'Eth\S+|' \ 'Fa\w+[0-9]{0,1}\/?[0-9]{0,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ '10GE\w+[0-9]{0,1}\/?[0-9]{1,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ 'G\w+[0-9]{0,1}\/?[0-9]{1,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ 'XGE\w+[0-9]{0,1}\/?[0-9]{1,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ 'Ten-GigabitEthernet\w+[0-9]{0,1}\/?[0-9]{1,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ 'Bridge-Aggregation[0-9]{0,1}\/?[0-9]{1,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ 'BAGG[0-9]{0,1}\/?[0-9]{1,2}\/?[0-9]{0,2}\/?[0-9]{0,2}|' \ 'Po\S+|'\ 'CPU|'\ 'Self|'\ 'ether\d|'\ 'sfp\d|'\ 'bridge\d' ) procurve_mac_address_line_reg = re.compile('(?:[0-9a-fA-F]{6}(?:-[0-9a-fA-F]{6}))\s+?[0-9]{1,4} *([0-9]{1,4})?') mikrotik_mac_address_line_reg = re.compile('(?:[0-9A-F]{2}(?::[0-9A-F]{2}){5})') for line in re.split('\r\n|\n', input_string): # find line with total mac addresses number if 'Total Mac' in line or \ 'Total items: ' in line or \ 'mac address(es) found' in line or\ 'Total items displayed =' in line: mac_address_number_line = re.search(r'\d+', line) if mac_address_number_line: mac_address_number = int(mac_address_number_line.group()) mac_address_line = mac_reg.search(line) if not mac_address_line: continue mac_address_line_count += 1 l2_interface = '' vlan = '' # default vlan procurve_mac_address_line = procurve_mac_address_line_reg.search(line) mikrotik_mac_address_line = mikrotik_mac_address_line_reg.search(line) l2_interface_line = interface_reg.search(line) vlan_line = vlan_reg.search(line) # for procurve mac address format if procurve_mac_address_line: procurve_line = procurve_mac_address_line.group() if len(procurve_line.split()) == 3: mac_address, l2_interface, vlan = procurve_line.split() else: mac_address, l2_interface = procurve_line.split() mac_address = normalize_mac_address(mac_address) # for mikrotik format if mikrotik_mac_address_line: mac_address_number, mac_address_type, mac_address, vlan, l2_interface = re.split('\s+', line.strip())[:5] mac_address_number = int(mac_address_number) + 1 mac_address = normalize_mac_address(mac_address) if not vlan.isdigit(): l2_interface = vlan vlan = '' if 'L' in mac_address_type: l2_interface = 'local' else: mac_address = normalize_mac_address(mac_address_line.group()) if l2_interface_line: l2_interface = l2_interface_line.group() if vlan_line: vlan = vlan_line.group().split('/')[0] # to compare found mac address number with shown by cli if (mac_address, vlan) in res: if res[(mac_address, vlan)] == 'local': same_local_mac_address_count += 1 else: raise L2LoopDetection(f'{normalize_interface_name(l2_interface)}, {vlan} : {mac_address} <--> ' f'{res[(mac_address, vlan)]}, {vlan}: {mac_address}') res[(mac_address, vlan)] = normalize_interface_name(l2_interface) if not mac_address_number: mac_address_number = mac_address_line_count if not int(len(res)) == (mac_address_number - same_local_mac_address_count): raise NotFullMacAddressTableFound(f'normalized table {len(res)} != ' f'{mac_address_number - same_local_mac_address_count} in cli table') return res
f4c010d440e6e3ea89dd35c967019096fb2269ef
33,150
def _get_exploration_memcache_key(exploration_id): """Returns a memcache key for an exploration.""" return 'exploration:%s' % exploration_id
1400607cc86f84c242201c9c9fe36a7a06cd2357
33,151
from typing import Optional from typing import Tuple def corr_filter( corrs: np.ndarray, value_range: Optional[Tuple[float, float]] = None, k: Optional[int] = None, ) -> Tuple[np.ndarray, np.ndarray]: """ Filter correlation values by k and value_range """ assert (value_range is None) or ( k is None ), "value_range and k cannot be present in both" if k is not None: sorted_idx = np.argsort(corrs) sorted_corrs = corrs[sorted_idx] # pylint: disable=invalid-unary-operand-type return (sorted_idx[-k:], corrs[sorted_idx[-k:]]) # pylint: enable=invalid-unary-operand-type sorted_idx = np.argsort(corrs) sorted_corrs = corrs[sorted_idx] if value_range is not None: start, end = value_range istart = np.searchsorted(sorted_corrs, start) iend = np.searchsorted(sorted_corrs, end, side="right") return sorted_idx[istart:iend], sorted_corrs[istart:iend] return sorted_idx, sorted_corrs
bbf84b19edb39ac2b787517ad1e9817bf2a042f5
33,152
import re def isValid(text): """ Returns True if the input is related to jokes/humor. Arguments: text -- user-input, typically transcribed speech """ return bool(re.search(r'WATCH|OUT', text, re.IGNORECASE))
7d94e30bf9e0da267bd4ccab1ce823aaa315eba2
33,153
def pairs_from_array(a): """ Given an array of strings, create a list of pairs of elements from the array Creates all possible combinations without symmetry (given pair [a,b], it does not create [b,a]) nor repetition (e.g. [a,a]) :param a: Array of strings :return: list of pairs of strings """ pairs = list() for i in range(len(a)): for j in range(len(a[i + 1 :])): pairs.append([a[i], a[i + 1 + j]]) return pairs
50c489d660a7e82c18baf4800e599b8a3cd083f0
33,155
def cleanse_comments(line): """Removes //-comments and single-line C-style /* */ comments. Args: line: A line of C++ source. Returns: The line with single-line comments removed. """ comment_position = line.find('//') if comment_position != -1 and not is_cpp_string(line[:comment_position]): line = line[:comment_position] # get rid of /* ... */ return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line)
d209b93070ab33d3f85f5a1d5c44ed47cde2fe91
33,156
def get_next_document_id(request, document_id): """Gets the id of the next document, by the current document's id Gets the id of the next document, by the current document's id The function is accessible for users with 'read_project' permission of the project and the owner of the project. Args: document_id: The id of the document. Returns: If there is no next document, returns the current document id, else returns the next id. The return value is converted to string. Or forbidden or not found. example: {"id": "1"} """ doc = fetch_doc_check_perm(document_id, request.user, "stave_backend.read_project") docs = doc.project.documents next_doc = docs.filter(id__gt=document_id).first() if next_doc: next_id = next_doc.id else: next_id = document_id return JsonResponse({'id': str(next_id)}, safe=False)
c9a098077a4672e27f438dc58ce0fb3c93d528d2
33,157
def is_merged(request): """Makes tests try both merged and closed pull requests.""" return request.param
d621f44b2ac3fe4a8639fd71d11e146fde9ca725
33,158
from typing import Dict from typing import cast def is_table(base_url: str, token: str) -> bool: """check if service layer is a table""" params: Dict[str, str] = init_params(token) type_layer = cast(Dict[str, str], request(base_url.rstrip('/'), params))['type'] return type_layer.lower() == 'table'
23c67be574335689ac5e906c748e465d3d6ed0a0
33,159
def shared_options(option_list): """Define decorator for common options.""" def _shared_options(func): for option in reversed(option_list): func = option(func) return func return _shared_options
7ef551ea9879b708e6b449ce1155d47b662efd3d
33,160
from typing import Any def complex_key(c: complex) -> Any: """Defines a sorting order for complex numbers.""" return c.real != int(c.real), c.real, c.imag
55c17b0d4adf8bcfb39b50c66d5bd8133f5bb814
33,161
def make_raw_query_nlist_test_set(box, points, query_points, mode, r_max, num_neighbors, exclude_ii): """Helper function to test multiple neighbor-finding data structures. Args: box (:class:`freud.box.Box`): Simulation box. points ((:math:`N_{points}`, 3) :class:`numpy.ndarray`): Reference points used to calculate the correlation function. query_points ((:math:`N_{query_points}`, 3) :class:`numpy.ndarray`, optional): query_points used to calculate the correlation function. Uses :code:`points` if not provided or :code:`None`. (Default value = :code:`None`). mode (str): String indicating query mode. r_max (float): Maximum cutoff distance. num_neighbors (int): Number of nearest neighbors to include. exclude_ii (bool): Whether to exclude self-neighbors. Returns: tuple: Contains points or :class:`freud.locality.NeighborQuery`, :class:`freud.locality.NeighborList` or :code:`None`, query_args :class:`dict` or :code:`None`. """ # noqa: E501 test_set = [] query_args = {'mode': mode, 'exclude_ii': exclude_ii} if mode == "ball": query_args['r_max'] = r_max if mode == 'nearest': query_args['num_neighbors'] = num_neighbors query_args['r_guess'] = r_max test_set.append(((box, points), query_args)) test_set.append((freud.locality._RawPoints(box, points), query_args)) test_set.append((freud.locality.AABBQuery(box, points), query_args)) test_set.append( (freud.locality.LinkCell(box, points, r_max), query_args)) aq = freud.locality.AABBQuery(box, points) if mode == "ball": nlist = aq.query(query_points, dict(r_max=r_max, exclude_ii=exclude_ii) ).toNeighborList() if mode == "nearest": nlist = aq.query(query_points, dict(num_neighbors=num_neighbors, exclude_ii=exclude_ii, r_guess=r_max) ).toNeighborList() test_set.append(((box, points), nlist)) return test_set
2f9b6bd9436f6f416d62e3056b9baf663ed3b209
33,162
def runs_new_in_exp(exp='xpptut15', procname='pixel_status', verb=0) : """Returns list of (4-char str) runs which are found in xtc directory and not yet listed in the log file, e.g. ['0059', '0060',...] """ runs_log = runs_in_log_file(exp, procname) runs_xtc = runs_in_xtc_dir(exp) runs_new = [s for s in runs_xtc if not(s in runs_log)] if verb & 2: for srun in runs_xtc : if srun in runs_new : print('%s - new' % srun) else : print('%s - processed %s' % (srun, dsname(exp, srun))) if verb : print('\nScan summary for exp=%s process="%s"' % (exp, procname)) print('%4d runs in xtc dir : %s' % (len(runs_xtc), xtc_dir(exp)),\ '\n%4d runs in log file : %s' % (len(runs_log), log_file(exp, procname)),\ '\n%4d runs NEW in xtc directory' % len(runs_new)) return runs_new
42faf8c27bdea9e979a8b2390a89fd6c3cad72f4
33,163
def get_targets( initial_positions, trajectory_target_positions): """Returns the averaged particle mobilities from the sampled trajectories. Args: initial_positions: the initial positions of the particles with shape [n_particles, 3]. trajectory_target_positions: the absolute positions of the particles at the target time for all sampled trajectories, each with shape [n_particles, 3]. """ targets = np.mean([np.linalg.norm(t - initial_positions, axis=-1) for t in trajectory_target_positions], axis=0) return targets.astype(np.float32)
e9f5bde1f791fe2f0546ba7d5323745b90f1029b
33,165
def get_campaign_goal(campaign, goal_identifier): """Returns goal from given campaign and Goal_identifier. Args: campaign (dict): The running campaign goal_identifier (string): Goal identifier Returns: dict: Goal corresponding to goal_identifer in respective campaign """ if not campaign or not goal_identifier: return None for goal in campaign.get("goals"): if goal.get("identifier") == goal_identifier: return goal return None
1a8738416ee8187ad2a6a977b36b68f66052bfe8
33,166
def vox2mm(ijk, affine): """ Convert matrix subscripts to coordinates. .. versionchanged:: 0.0.8 * [ENH] This function was part of `nimare.transforms` in previous versions (0.0.3-0.0.7) Parameters ---------- ijk : (X, 3) :obj:`numpy.ndarray` Matrix subscripts for coordinates being transformed. One row for each coordinate, with three columns: i, j, and k. affine : (4, 4) :obj:`numpy.ndarray` Affine matrix from image. Returns ------- xyz : (X, 3) :obj:`numpy.ndarray` Coordinates in image-space. Notes ----- From here: http://blog.chrisgorgolewski.org/2014/12/how-to-convert-between-voxel-and-mm.html """ xyz = nib.affines.apply_affine(affine, ijk) return xyz
9800c53a447e2f7eab85e62ea802c91aaa23aea7
33,167