content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def softmax_logits_kld(ops, p_logits, q_logits, keepdims=False): """ Compute the KL-divergence between two softmax categorical distributions via logits. The last dimension of `p` and `q` are treated as the softmax dimension, and will be reduced for computing KL-divergence. .. math:: \\operatorname{D}_{KL}(p(y)\\|q(y)) = \\sum_y p(y) \\left(\\log p(y) - \\log q(y)\\right) Args: ops (npyops or tfops): The math operations module. p_logits: Logits of softmax categorical :math:`p(y)`. q_logits: Logits of softmax categorical :math:`q(y)`. keepdims (bool): Whether or not to keep the reduced dimension? (default :obj:`False`) Returns: The computed softmax categorical distributions KL-divergence. """ p_logits = ops.convert_to_tensor(p_logits) q_logits = ops.convert_to_tensor(q_logits) with ops.name_scope('softmax_logits_kld', values=[p_logits, q_logits]): log_p = log_softmax(ops, p_logits) log_q = log_softmax(ops, q_logits) p = softmax(ops, p_logits) # TODO: can we reduce time consumption by ``np.exp(log_p)``? # p = ops.exp(log_p) return ops.reduce_sum(p * (log_p - log_q), axis=-1, keepdims=keepdims)
b8566c611e6dd01374d1239bcdab81e2c371fe11
14,500
def E_inductive_from_ElectricDipoleWholeSpace( XYZ, srcLoc, sig, f, current=1.0, length=1.0, orientation="X", kappa=1.0, epsr=1.0, t=0.0, ): """ Computing Inductive portion of Electric fields from Electrical Dipole in a Wholespace TODO: Add description of parameters """ mu = mu_0 * (1 + kappa) epsilon = epsilon_0 * epsr sig_hat = sig + 1j * omega(f) * epsilon XYZ = utils.asArray_N_x_Dim(XYZ, 3) # Check if XYZ.shape[0] > 1 & f.shape[0] > 1: raise Exception( "I/O type error: For multiple field locations only a single frequency can be specified." ) dx = XYZ[:, 0] - srcLoc[0] dy = XYZ[:, 1] - srcLoc[1] dz = XYZ[:, 2] - srcLoc[2] r = np.sqrt(dx ** 2.0 + dy ** 2.0 + dz ** 2.0) # k = np.sqrt( -1j*2.*np.pi*f*mu*sig ) k = np.sqrt(omega(f) ** 2.0 * mu * epsilon - 1j * omega(f) * mu * sig) front = current * length / (4.0 * np.pi * sig_hat * r ** 3) * np.exp(-1j * k * r) if orientation.upper() == "X": Ex_inductive = front * (k ** 2 * r ** 2) Ey_inductive = np.zeros_like(Ex_inductive) Ez_inductive = np.zeros_like(Ex_inductive) return Ex_inductive, Ey_inductive, Ez_inductive elif orientation.upper() == "Y": # x--> y, y--> z, z-->x Ey_inductive = front * (k ** 2 * r ** 2) Ez_inductive = np.zeros_like(Ey_inductive) Ex_inductive = np.zeros_like(Ey_inductive) return Ex_inductive, Ey_inductive, Ez_inductive elif orientation.upper() == "Z": # x --> z, y --> x, z --> y Ez_inductive = front * (k ** 2 * r ** 2) Ex_inductive = np.zeros_like(Ez_inductive) Ey_inductive = np.zeros_like(Ez_inductive) return Ex_inductive, Ey_inductive, Ez_inductive
bc98b693771a535f74f693ee097b682632f4fbf8
14,501
import pytz from datetime import datetime def str2posix(timelist): """ This will take a list of strings with the date along with a start and end time and make a list with the posix times. Inputs timelist - A list of strings with the data followed by two times. The date for the second time can also be used, it will be at index 2 and the second time will be at index 3. Outputs dtts - A list of posix times from the original inputs""" if len(timelist)==3: timelist.insert(2,timelist[0]) (dt1,dt2) = parser.parse(timelist[0]+ ' '+timelist[1]),parser.parse(timelist[2]+ ' '+timelist[3]) dt1 =dt1.replace(tzinfo=pytz.utc) dt2 = dt2.replace(tzinfo=pytz.utc) dt1ts = (dt1 -datetime(1970,1,1,0,0,0,tzinfo=pytz.utc)).total_seconds() dt2ts = (dt2 -datetime(1970,1,1,0,0,0,tzinfo=pytz.utc)).total_seconds() return [dt1ts,dt2ts]
476fc634b967419818ef41d9ff4b21f9e4f76ff1
14,502
def keys_verif(verif: bool = True): """ Used to verify existence of private or/and public keys of ElGamal. """ print("\nChecking the presence of keys in the system....") if isFileHere("public_key.kpk", config.DIRECTORY_PROCESSING): # from cipher.asymmetric import elGamal as elG print(f"\nPublic key is already here.\n") if isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING): print(f"Private key is here too.\n") if verif and not query_yn("Do you want to keep them? (default: No)", "no"): rmFile("public_key.kpk", config.DIRECTORY_PROCESSING) rmFile("private_key.kpk", config.DIRECTORY_PROCESSING) rmFile("encrypted.kat", config.DIRECTORY_PROCESSING) return True else: print("Private key's missing.\n") if query_yn("Do you want to add them now?\n"): while not isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING): input("Please put your 'private_key.kpk' file into the 'processing' folder.") print("Find it !") keys_verif() else: katsuAsymm() elif isFileHere("private_key.kpk", config.DIRECTORY_PROCESSING): print("\nPrivate key's already here but not public one's.\n") if query_yn("Do you want to add them now? ( default: No)\n", "no"): while not isFileHere("public_key.kpk", config.DIRECTORY_PROCESSING): input("Please put your 'public_key.kpk' file into the 'processing' folder.") print("find it !") keys_verif() else: return True else: return True return False
824b8c31ad9cc0fb1b2ef7eef585e47c2e338a8b
14,503
def r2(ground_truth, simulation, join='inner', fill_value=0): """ R-squared value between ground truth and simulation Inputs: ground_truth - ground truth measurement (data frame) with measurement in the "value" column simulation - simulation measurement (data frame) with measurement in the "value" column join - type of join to perform between ground truth and simulation fill_value - fill value for non-overlapping joins """ if simulation is None or ground_truth is None: return None if len(simulation) == 0 or len(ground_truth) == 0: return None if type(ground_truth) is list: ground_truth = np.nan_to_num(ground_truth) simulation = np.nan_to_num(simulation) ground_truth = ground_truth[np.isfinite(ground_truth)] simulation = simulation[np.isfinite(simulation)] return np.sqrt(((np.asarray(ground_truth) - np.asarray(simulation)) ** 2).mean()) ground_truth = ground_truth[np.isfinite(ground_truth.value)] simulation = simulation[np.isfinite(simulation.value)] df = join_dfs(ground_truth,simulation,join=join,fill_value=fill_value) if df.empty: return None else: return r2_score(df["value_gt"],df["value_sim"])
75d78e575bef0a59620cbdbf1992396a8edd0929
14,504
import os def add_team_batting_stats(df, years, batter_metrics): """ """ gids = list(set(df['gameId'])) bat_saber_paths = [ CONFIG.get('paths').get('batter_saber') + gid + \ "/batter_saber_team.parquet" for gid in os.listdir(CONFIG.get('paths').get('batter_saber')) ] curr_gids = list(set( list(df['homePrevGameId']) + list(df['awayPrevGameId']) )) bat_saber_paths = [ x for x in bat_saber_paths if any( gid in x for gid in curr_gids ) ] batter_saber = pd.concat( objs=[pd.read_parquet(path) for path in bat_saber_paths], axis=0 ) print(batter_saber.shape) print("batter saber shape above") # Get top 9 by AB batter_saber['game_id_team'] = ( batter_saber['gameId'] + batter_saber['team'] ) batter_saber.sort_values(by=['game_id_team', 'woba_trail6'], ascending=False, inplace=True) batter_saber['rank'] = batter_saber.groupby('game_id_team')\ ['batterId'].cumcount() batter_saber = batter_saber.loc[batter_saber['rank'] <= 9, :] batter_saber.loc[batter_saber['rank'] < 5, 'batter_group'] = 'high' batter_saber.loc[batter_saber['rank'] >= 5, 'batter_group'] = 'low' # Aggregate batter_saber = batter_saber.groupby( by=['gameId', 'team', 'batter_group'], as_index=False ).agg({k: 'mean' for k in batter_metrics}) batter_saber = batter_saber.pivot_table( index=['gameId', 'team'], columns=['batter_group'], values=[k for k in batter_metrics], aggfunc='mean' ) batter_saber.reset_index(inplace=True) batter_saber.columns = [ x[0] if x[1] == '' else x[0]+"_"+x[1] for x in batter_saber.columns ] batter_saber.to_csv( '/Users/peteraltamura/Desktop/batter_saber_wide.csv', index=False) # Merge Home df = pd.merge( df, batter_saber, how='left', left_on=['homePrevGameId', 'home_code'], right_on=['gameId', 'team'], validate='1:1', suffixes=['', '_HOME'] ) # Merge Away df = pd.merge( df, batter_saber, how='left', left_on=['awayPrevGameId', 'away_code'], right_on=['gameId', 'team'], validate='1:1', suffixes=['', '_AWAY'] ) return df
bdce73590dfe1a0ee7883ce4c65544b612298704
14,505
def depth_analysis_transform_1(rgb_tensor, depth_tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): # --------------------------------------- rgb branch with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D( num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) rgb_tensor = layer(rgb_tensor) # --------------------------------------- depth branch with tf.variable_scope("layer_d0"): layer = tfc.SignalConv2D( num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) depth_tensor = layer(depth_tensor) # --------------------------------------- fusion tf.summary.histogram('rgb_tensor', rgb_tensor) tf.summary.histogram('depth_tensor', depth_tensor) tensor = rgb_tensor + depth_tensor with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.relu) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
27637e35619f61e5da2b965392a39b38cdfb6a29
14,506
def boxPlot(med, quartiles, minmax, mean=None, outliers=None, name='boxplot', horiz=True, offset=0, legendGroup='boxplot', showleg=False, plot=False, col='blue', width=8): """ Makes very light plotly boxplot. Unlike theirs, this can take externally calc'd values rather than just data to make it go much faster. :param med: :param quartiles: :param minmax: :param mean: :param name: :param horiz: :param offset: :param legendGroup: :param plot: :param col: :return: """ show_indiv_leg=False #set to true for debug mode if horiz: wideaxis='x' offsetaxis='y' else: wideaxis = 'y' offsetaxis = 'x' if mean: text='Median=%.3e <br> Mean=%.3e <br> [Q1,Q2]=[%.3e,%.3e] <br> [min, max]=[%.3e,%.3e]' % \ (med,mean, *quartiles, *minmax) else: text = 'Median=%.3e <br> [Q1,Q2]=[%.3e,%.3e] <br> [min, max]=[%.2f,%.2f]' \ % (med, *quartiles, *minmax) thickLine = [{wideaxis:quartiles, offsetaxis:[offset]*2, 'name':name, 'showlegend':showleg, 'legendgroup':legendGroup, 'type': 'scatter', 'line':{'color': col, 'width': width}, 'opacity':.4, 'hovertext':text, 'hoverinfo':'name+text', }] thinLine = [{wideaxis:minmax, offsetaxis:[offset]*2, 'name':name, 'showlegend':show_indiv_leg, 'legendgroup':legendGroup, 'type': 'scatter', 'line': {'color': col, 'width': 2}, 'opacity':.4, 'hovertext':text, 'hoverinfo':'name+text'}] medPoint = [{wideaxis:[med], offsetaxis:[offset], 'hovertext':text, 'hoverinfo':'name+text', 'name':name, 'showlegend':show_indiv_leg, 'legendgroup':legendGroup, 'mode': 'markers', 'marker':{'color':'black', 'symbol':'square', 'size':8}, 'opacity':1}] boxPlots = thickLine + thinLine + medPoint if mean is not None: meanPoint = [{wideaxis: [mean], offsetaxis: [offset], 'hovertext':text, 'hoverinfo':'name+text', 'name': name, 'showlegend': show_indiv_leg, 'legendgroup': legendGroup, 'mode': 'markers', 'marker': {'color': 'white', 'symbol': 'diamond', 'size': 8, 'line': {'color':'black', 'width':1} }, 'opacity': 1, 'line': {'color':'black'}}] boxPlots += meanPoint if outliers is not None: outlierplot = [{wideaxis:outliers, offsetaxis:[offset]*len(outliers), 'name':name, 'legendgroup':legendGroup, 'mode':'markers', 'marker':dict(size = 2, color=col), 'hoverinfo': wideaxis+'+name'}] boxPlots += outlierplot fig = go.Figure(data=boxPlots) # as boxPlot is used primarily as a subcomponent in other plots, its output is not simply plotOut(fig, plot) if plot: fig = go.Figure(data=boxPlots) plotfunc = pyo.iplot if in_notebook() else pyo.plot plotfunc(fig) else: return boxPlots
ba4b746bc5129cef758a01a26633d0fcf0ab3245
14,507
def hub_quantile_prediction_dict_validator(target_group_dict, prediction_dict): """ Does hub prediction_dict validation as documented in `json_io_dict_from_quantile_csv_file()` """ error_messages = [] # return value. filled next valid_quantiles = target_group_dict['quantiles'] prediction_quantiles = prediction_dict['prediction']['quantile'] if set(valid_quantiles) != set(prediction_quantiles): error_messages.append(f"prediction_dict quantiles != valid_quantiles. valid_quantiles={valid_quantiles}, " f"prediction_quantiles={prediction_quantiles}") return error_messages
ec13824557ef9533d7c4a777daadd07414752767
14,508
def allclose_periodical(x, y, a, b, atol=1e-10): """ Checks np.allclose(x,y), but assumes both x and y are periodical with respect to interval (a,b) """ assert(len(x) == len(y)) period = b-a x_p = np.remainder(x-a,period) # now in 0, b-a y_p = np.remainder(y-a,period) return all(np.isclose(x_p[i], y_p[i], atol=atol) or np.isclose(x_p[i], y_p[i]+period, atol=atol) or np.isclose(x_p[i], y_p[i]-period, atol=atol) for i in range(len(x_p)))
bd1c58a362a9c3926bffbcb0a27e355bfc982955
14,509
import operator def get_categories_to_rows_ratio(df): """ Gets ratio of unique categories to number of rows in the categorical variable; do this for each categorical variable :param df: pd.DataFrame :return: array of tuples """ cat_columns = get_categorical_variable_names(df) ratios = {col:len(df[col].unique()) / df[col].count() for col in cat_columns} sorted_ratios = sorted(ratios.items(), key=operator.itemgetter(1), reverse=True) return sorted_ratios
2734b898b6c6538b65d54709be617a6dd393c3da
14,510
def _width_left_set(size: int, lsize: int, value: list, fmt: str, meta: dict) -> dict: """Width setting of paragraph with left repositioning.""" return Plain([RawInline(fmt, '<p style="text-align:left !important;' 'text-indent:0 !important;' 'position:relative;width:{0};left:{1}">'. format(size, lsize))] + value + [RawInline(fmt, '</p>')])
6042b0d255fe804d7423b5e49dd700bd7f0b9bdf
14,511
def GetMappingKeyName(run, user): """Returns a str used to uniquely identify a mapping.""" return 'RunTesterMap_%s_%s' % (run.key().name(), str(user.user_id()))
b4eb80ca5f084ea956f6a458f92de1b85e722cda
14,512
def get_invitee_from_table(invite_code: str, table): """ Get a dictionary of the stored information for this invite code. Args: invite_code: The invitation code to search for table: A DynamoDB table for querying Returns: A dictionary of information stored under the invite code Throws: UnknownInviteCodeError: If the invite code is not in the database """ response = table.query( KeyConditionExpression=Key('invite_code').eq(invite_code) ) items = response['Items'] if len(items) == 0: # If there were no matches to the code then throw an error raise UnknownInviteCodeError() # The output will be a list, so we'll just use the first one since there # should not be duplicates items = items[0] # DynamoDB cannot store empty strings, so we use null instead and convert # between it as needed. At this point in time, we have no significance for # null so this works fine. items = {k: convert_null_to_empty_string(v) for k, v in items.items()} return items
1377e20a58174f69d8984e36aab3426c0eb392bd
14,513
import math def d_beta_dr(radius, beta, mass_r, epsilon, pressure, h_r): """ d_beta_dr """ return 2. * (1 - 2 * (mass_r/radius)) ** (-1.) * h_r * \ ( -2. * math.pi * (5*epsilon + 9*pressure + f(epsilon, pressure)) + (3/radius**2.) + 2*(1 - 2 * mass_r / radius)**(-1) * \ ((mass_r/radius) + 4 * math.pi*radius*pressure)**2 ) + (2 * beta/radius) *(1 - 2 * mass_r / radius)**(-1) * \ (-1 + mass_r/radius + 2 * math.pi * radius**2 * (epsilon - pressure))
0880439516b70e07c01be3164a3c030bb9deeaca
14,514
import json def score(capstone, student_api): """ Calculates the score of the students' API model :param student_api: StudentApi object :return: score as a float """ # Check which simulators have datapoints with outcomes outcomes simulator_ids = [] for simulator in capstone.simulators.all(): if simulator.datapoints.exclude(outcome="").count() > 0: simulator_ids.append(simulator.id) if len(simulator_ids) == 0: raise RuntimeError("No simulators with outcomes found.") qs = DueDatapoint.objects.filter( simulator_id__in=simulator_ids, student=student_api.student, ) outcomes = [] predictions = [] sensitive_class_race = {} sensitive_class_sex = {} for ddp in qs: # loop through each entry in DueDataPoint outcome = bool(json.loads(ddp.datapoint.outcome)) data = json.loads(ddp.datapoint.data) if ddp.response_status != 200: # Missing or bad response predictions.append(not outcome) outcomes.append(outcome) else: try: prediction = json.loads(ddp.response_content)["prediction"] except (json.JSONDecodeError, KeyError): predictions.append(not outcome) outcomes.append(outcome) else: sex = data["sex"].lower() if sex not in sensitive_class_sex: sensitive_class_sex[sex] = { "outcomes": [], "predictions": [], } sensitive_class_sex[sex]["outcomes"].append(outcome) sensitive_class_sex[sex]["predictions"].append(prediction) race = data["race"].lower() if race not in sensitive_class_race: sensitive_class_race[race] = { "outcomes": [], "predictions": [], } sensitive_class_race[race]["outcomes"].append(outcome) sensitive_class_race[race]["predictions"].append(prediction) if not isinstance(prediction, bool): predictions.append(not outcome) else: predictions.append(prediction) outcomes.append(outcome) logger.info(student_api.student) f1_score = metrics.f1_score(outcomes, predictions, pos_label=True) logger.info("f1_score %s" % f1_score) race_diff = fairness_score_precision(sensitive_class_race) sex_diff = fairness_score_precision(sensitive_class_sex) is_fair = race_diff < 0.2 and sex_diff < 0.2 logger.info("race_diff %s" % race_diff) logger.info("sex_diff %s" % sex_diff) logger.info("is_fair %s" % is_fair) if not is_fair: f1_score -= 0.1 return f1_score
bb4f545835f480c9fac97acc698daef08a7684f2
14,515
def clean_lhdf(df: pd.DataFrame): """ Removes unneccessary columms from the location history data frame and computes new required columns Parameters ---------- df : pandas.DataFrame DataFrame to process Returns ------- Copy of `df`, altered the following way: * Colums removed * `activity` * `altitude` * `heading` * Columns expected in `df` * `time` * `latitudeE7` * `longitudeE7` * Columns added * `date` (Format `YYYY-MM-DD`) * `weekday` (Format: `0-6`; 0 = Sunday) * `daytime` (Format: HH:ii, 24h style) * `lat` (Format: dd.ddddd) * `lon` (Format: dd.ddddd) """ df = df.copy() # Drop unneccessary cols df.drop(labels=["activity", "altitude", "heading"], axis=1, inplace=True) # compute time cols df.loc[:, "date"] = df.time.dt.strftime("%Y-%m-%d") df.loc[:, "weekday"] = df.time.dt.strftime("%w") #was: %u df.loc[:, "daytime"] = df.time.dt.strftime("%H:%M") df.loc[:,"lat"] = pd.to_numeric(df.latitudeE7) / 1e7 df.loc[:,"lng"] = pd.to_numeric(df.longitudeE7) / 1e7 return df
86280a333082e964553030d4e586a267e93edfae
14,516
def year_from_operating_datetime(df): """Add a 'year' column based on the year in the operating_datetime. Args: df (pandas.DataFrame): A DataFrame containing EPA CEMS data. Returns: pandas.DataFrame: A DataFrame containing EPA CEMS data with a 'year' column. """ df['year'] = df.operating_datetime_utc.dt.year return df
1c7bbc6465d174465151e5e777671f319ee656b7
14,517
def is_thrift(target): """Returns True if the target has thrift IDL sources.""" return isinstance(target, JavaThriftLibrary)
4a56cf5cec923933fec628173cb2ab1a122b0127
14,518
def get_instance(value, model): """Returns a model instance from value. If value is a string, gets by key name, if value is an integer, gets by id and if value is an instance, returns the instance. """ if not issubclass(model, db.Model): raise TypeError('Invalid type (model); expected subclass of Model.') if isinstance(value, basestring): return model.get_by_key_name(value) elif isinstance(value, (int, long)): return model.get_by_id(value) elif isinstance(value, model): return value else: raise TypeError('Invalid type (value); expected string, number or ' '%s.' % model.__name__)
85544b057e3e6c82730ba743a625610c55b48ff0
14,519
def clean_infix(token, INFIX): """ Checks token for infixes. (ex. bumalik = balik) token: word to be stemmed for infixes returns STRING """ if check_validation(token): return token for infix in INFIX_SET: if len(token) - len(infix) >= 3 and count_vowel(token[len(infix):]) >= 2: if token[0] == token[4] and token[1: 4] == infix: INFIX.append(infix) return token[4:] elif token[2] == token[4] and token[1: 3] == infix: INFIX.append(infix) return token[0] + token[3:] elif token[1: 3] == infix and check_vowel(token[3]): INFIX.append(infix) return token[0] + token[3:] return token
fdd8e90bdea14ca2344dd465622bd2e79905e4fe
14,520
def seq_to_encoder(input_seq): """从输入空格分隔的数字id串,转成预测用的encoder、decoder、target_weight等 """ input_seq_array = [int(v) for v in input_seq.split()] encoder_input = [PAD_ID] * \ (input_seq_len - len(input_seq_array)) + input_seq_array decoder_input = [GO_ID] + [PAD_ID] * (output_seq_len - 1) encoder_inputs = [np.array([v], dtype=np.int32) for v in encoder_input] decoder_inputs = [np.array([v], dtype=np.int32) for v in decoder_input] target_weights = [np.array([1.0], dtype=np.float32)] * output_seq_len return encoder_inputs, decoder_inputs, target_weights
9a9203aa9e3005acd7d55516fbe8c5710ea25ae3
14,521
def getMergers(tree, map_strain2species, options): """merge strains to species. returns the new tree with species merged and a dictionary of genes including the genes that have been merged. Currently, only binary merges are supported. """ n = TreeTools.GetSize(tree) + 1 all_strains = map_strain2species.keys() all_species = map_strain2species.values() genes = [] for x in range(n): g = {} for s in all_strains: g[s] = set() genes.append(g) # build list of species pairs that can be joined. map_species2strain = IOTools.getInvertedDictionary(map_strain2species) pairs = [] for species, strains in map_species2strain.items(): for x in range(len(strains)): for y in range(0, x): pairs.append((strains[x], strains[y])) # map of genes to new genes # each entry in the list is a pair of genes of the same species # but different strains to be joined. map_genes2new_genes = [] # dictionary of merged genes. This is to ensure that no gene # is merged twice merged_genes = {} def count_genes(node_id): """record number of genes per species for each node This is done separately for each strain. The counts are aggregated for each species over strains by taking the maximum gene count per strain. This ignores any finer tree structure below a species node. """ node = tree.node(node_id) if node.succ: this_node_set = genes[node_id] # process non-leaf node for s in node.succ: # propagate: terminated nodes force upper nodes to terminate # (assigned to None). if not genes[s]: this_node_set = None break # check if node merges genes that are not part of the positive # set for strain in all_strains: if strain in map_strain2species: # merge genes from all children this_node_set[strain] = this_node_set[ strain].union(genes[s][strain]) if len(this_node_set[strain]) > 1: # more than two genes for a single species, so no # join this_node_set = None break elif strain not in map_strain2species and \ this_node_set[strain] > 0: this_node_set = None break if this_node_set is None: genes[node_id] = None return for strain_x, strain_y in pairs: if len(this_node_set[strain_x]) == 1 and len(this_node_set[strain_y]) == 1: species = map_strain2species[strain_x] gene_x, gene_y = tuple(this_node_set[strain_x])[0], tuple( this_node_set[strain_y])[0] # check if these to genes have already been merged or are # merged with other partners already # The merged genes are assigned the same node_id, if they have # been already merged. key1 = strain_x + gene_x key2 = strain_y + gene_y if key1 > key2: key1, key2 = key2, key1 merge = False if key1 in merged_genes and key2 in merged_genes: if merged_genes[key1] == merged_genes[key2]: merge = True elif key1 not in merged_genes and key2 not in merged_genes: merge = True merged_genes[key1] = node_id merged_genes[key2] = node_id if merge: map_genes2new_genes.append( (node_id, species, strain_x, gene_x, strain_y, gene_y)) # once two genes have been joined, they can not be remapped # further genes[node_id] = None return else: # process leaf strain, t, g, q = parseIdentifier(node.data.taxon, options) if strain in map_strain2species: genes[node_id][strain].add(g) else: # do not process nodes that do not need to be mapped genes[node_id] = None tree.dfs(tree.root, post_function=count_genes) return map_genes2new_genes
48fb083027e00d93754ee4064edbc268ea4047a5
14,522
def convolve_with_gaussian( data: np.ndarray, kernel_width: int = 21 ) -> np.ndarray: """ Convolves a 1D array with a gaussian kernel of given width """ # create kernel and normalize area under curve norm = stats.norm(0, kernel_width) X = np.linspace(norm.ppf(0.0001), norm.ppf(0.9999), kernel_width) _kernnel = norm.pdf(X) kernel = _kernnel / np.sum(_kernnel) return np.convolve(data, kernel, mode="same")
63949d9c235a1a467858077de8dda8455c139551
14,523
def post_netspeed(event, context): """ Speed test data ingestion handler """ return process_reading(event['query'], NETSPEED_SQL)
8414fe3608b7433a177f0c8c54cce61d01339b67
14,524
import json def notify_host_disabled(token, host_name): """ Notify OpenStack Nova that a host is disabled """ url = token.get_service_url(OPENSTACK_SERVICE.NOVA, strip_version=True) if url is None: raise ValueError("OpenStack Nova URL is invalid") # Get the service ID for the nova-compute service. compute_service_id = get_host_service_id(token, host_name, 'nova-compute') api_cmd = url + "/v2.1/%s/os-services/%s" % (token.get_tenant_id(), compute_service_id) api_cmd_headers = dict() api_cmd_headers['Content-Type'] = "application/json" api_cmd_headers['X-OpenStack-Nova-API-Version'] = NOVA_API_VERSION api_cmd_payload = dict() api_cmd_payload['forced_down'] = True response = rest_api_request(token, "PUT", api_cmd, api_cmd_headers, json.dumps(api_cmd_payload)) return response
904c951a37b8b84df4aa48951c424686a123ff30
14,525
def compute_moments_weights_slow(mu, x2, neighbors, weights): """ This version exaustively iterates over all |E|^2 terms to compute the expected moments exactly. Used to test the more optimized formulations that follow """ N = neighbors.shape[0] K = neighbors.shape[1] # Calculate E[G] EG = 0 for i in range(N): for k in range(K): j = neighbors[i, k] wij = weights[i, k] EG += wij*mu[i]*mu[j] # Calculate E[G^2] EG2 = 0 for i in range(N): EG2_i = 0 for k in range(K): j = neighbors[i, k] wij = weights[i, k] for x in range(N): for z in range(K): y = neighbors[x, z] wxy = weights[x, z] s = wij*wxy if s == 0: continue if i == x: if j == y: t1 = x2[i]*x2[j] else: t1 = x2[i]*mu[j]*mu[y] elif i == y: if j == x: t1 = x2[i]*x2[j] else: t1 = x2[i]*mu[j]*mu[x] else: # i is unique since i can't equal j if j == x: t1 = mu[i] * x2[j] * mu[y] elif j == y: t1 = mu[i] * x2[j] * mu[x] else: # i and j are unique, no shared nodes t1 = mu[i] * mu[j] * mu[x] * mu[y] EG2_i += s * t1 EG2 += EG2_i return EG, EG2
5a2984174f366a34f16490bb7b9252ec4eaf08db
14,526
import functools def sum_fn(fun, ndims=0): """Higher order helper for summing the result of fun.""" @functools.wraps(fun) def wrapped(*args): batch_loglik = fun(*args) return jnp.sum( batch_loglik.reshape((-1,) + batch_loglik.shape[-ndims + len(batch_loglik.shape):]), axis=0) return wrapped
521a4084fee84f16de5714010be9528296f8b231
14,527
from typing import Optional def get_control_policy_attachments(language: Optional[str] = None, output_file: Optional[str] = None, policy_type: Optional[str] = None, target_id: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetControlPolicyAttachmentsResult: """ This data source provides the Resource Manager Control Policy Attachments of the current Alibaba Cloud user. > **NOTE:** Available in v1.120.0+. ## Example Usage Basic Usage ```python import pulumi import pulumi_alicloud as alicloud example = alicloud.resourcemanager.get_control_policy_attachments(target_id="example_value") pulumi.export("firstResourceManagerControlPolicyAttachmentId", example.attachments[0].id) ``` :param str language: The language. Valid value `zh-CN`, `en`, and `ja`. Default value `zh-CN` :param str policy_type: The type of policy. :param str target_id: The Id of target. """ __args__ = dict() __args__['language'] = language __args__['outputFile'] = output_file __args__['policyType'] = policy_type __args__['targetId'] = target_id if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('alicloud:resourcemanager/getControlPolicyAttachments:getControlPolicyAttachments', __args__, opts=opts, typ=GetControlPolicyAttachmentsResult).value return AwaitableGetControlPolicyAttachmentsResult( attachments=__ret__.attachments, id=__ret__.id, ids=__ret__.ids, language=__ret__.language, output_file=__ret__.output_file, policy_type=__ret__.policy_type, target_id=__ret__.target_id)
cfa39ca8926c281151b5ae06ef89ce865dfd0af4
14,528
def get_gdb(chip_name=None, gdb_path=None, log_level=None, log_stream_handler=None, log_file_handler=None, log_gdb_proc_file=None, remote_target=None, remote_address=None, remote_port=None, **kwargs): """ set to != None value to redefine get_gdb logic Parameters ---------- chip_name : Any(None, str) gdb_path : Any(None, str) log_level : Any(None, str) log_stream_handler : Any(None, str) log_file_handler : Any(None, str) log_gdb_proc_file : Any(None, str) remote_target : Any(None, str) remote_address : Any(None, str) remote_port : Any(None, str) Returns ------- Gdb """ _gdb = _str_to_class("Gdb" + get_good_name(chip_name)) return _gdb(gdb_path=gdb_path, log_level=log_level, log_stream_handler=log_stream_handler, log_file_handler=log_file_handler, log_gdb_proc_file=log_gdb_proc_file, remote_target=remote_target, remote_address=remote_address, remote_port=remote_port, **kwargs)
15dc451b9cbf21c5f96279a17449e4169e0bae83
14,529
import math from datetime import datetime def parse_source_gpx_file(inp_path, source): """Parse a GPX file having the following structure: <gpx xmlns="http://www.topografix.com/GPX/1/1" xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" creator="Suunto app" version="1.1" xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd http://www.garmin.com/xmlschemas/TrackPointExtension/v1 http://www.garmin.com/xmlschemas/TrackPointExtensionv1.xsd"> <metadata> <name></name> <desc/> <author> <name></name> </author> </metadata> <trk> <name></name> <trkseg> <trkpt lat="12.345678" lon="-23.456789"> <ele>-3.4</ele> <time>2022-02-22T18:09:02Z</time> <extensions> <gpxtpx:TrackPointExtension> <gpxtpx:hr>95</gpxtpx:hr> </gpxtpx:TrackPointExtension> </extensions> </trkpt> </trkseg> </trk> </gpx> Parameters ---------- inp_path : pathlib.Path() Path of the GPX file to parse source : dict The source configuration Returns ------- gpx : dict Track, track segments, and track points vld_t Time from start of track [s] vld_lambda Geodetic longitude [rad] vld_varphi Geodetic latitude [rad] vld_h Elevation [m] See also: https://en.wikipedia.org/wiki/GPS_Exchange_Format """ logger.info(f"Parsing {inp_path}") # Parse input file parser = etree.XMLParser(remove_blank_text=True) tree = etree.parse(inp_path, parser) # Pretty print input file locally out_path = inp_path.with_name(inp_path.name.replace(".gpx", "-pretty.gpx")) tree.write(out_path, pretty_print=True) # Collect tracks root = tree.getroot() gpx = {} gpx["metadata"] = {} gpx["trks"] = [] for trk_element in root.iter("{http://www.topografix.com/GPX/1/1}trk"): # Collect track segments trk = {} trk["name"] = trk_element.find("{http://www.topografix.com/GPX/1/1}name").text trk["trksegs"] = [] for trkseg_element in root.iter("{http://www.topografix.com/GPX/1/1}trkseg"): # Collect track points trkseg = {} trkseg["lat"] = [] trkseg["lon"] = [] trkseg["ele"] = [] trkseg["time"] = [] start_time = None for trkpt_element in trkseg_element.iter( "{http://www.topografix.com/GPX/1/1}trkpt" ): trkseg["lat"].append( math.radians(float(trkpt_element.get("lat"))) ) # [rad] trkseg["lon"].append( math.radians(float(trkpt_element.get("lon"))) ) # [rad] ele_element = trkpt_element.find( "{http://www.topografix.com/GPX/1/1}ele" ) if ele_element is not None: trkseg["ele"].append(float(ele_element.text)) # [m] else: trkseg["ele"].append(-R_OPLUS) cur_time = datetime.fromisoformat( trkpt_element.find("{http://www.topografix.com/GPX/1/1}time").text[ :-1 ] ) if start_time is None: start_time = cur_time trkseg["time"].append(0.0) else: trkseg["time"].append( (cur_time - start_time).total_seconds() ) # [s] trk["trksegs"].append(trkseg) gpx["trks"].append(trk) # Assign longitude, latitude, elevation, and time from start of # track # TODO: Check single track and track segment assumption _t = np.array( gpx["trks"][0]["trksegs"][0]["time"] ) # time from start of track [s] _lambda = np.array( gpx["trks"][0]["trksegs"][0]["lon"] ) # geodetic longitude [rad] _varphi = np.array( gpx["trks"][0]["trksegs"][0]["lat"] ) # geodetic latitude [rad] _h = np.array(gpx["trks"][0]["trksegs"][0]["ele"]) # elevation [m] # Ignore points at which the elevation was not recorded vld_idx = np.logical_and( np.logical_and(source["start_t"] < _t, _t < source["stop_t"]), _h != -R_OPLUS, ) logger.info( f"Found {np.sum(vld_idx)} valid values out of all {_t.shape[0]} values" ) vld_t = _t[vld_idx] vld_lambda = _lambda[vld_idx] vld_varphi = _varphi[vld_idx] vld_h = _h[vld_idx] return gpx, vld_t, vld_lambda, vld_varphi, vld_h
22a3c724f27a29afbb5a69fd36dc1ed618a6c8b3
14,530
from typing import Callable from typing import Awaitable def check(func: Callable[..., Awaitable[Callable[[CommandContext], Awaitable[bool]]]]) -> Check: """ A decorator which creates a check from a function. """ return Check(func)
2354eef311e1867333ade47996fb37cee07ce4cd
14,531
def service_c(request): """ Renders the service chair page with service submissions """ events = ServiceEvent.objects.filter(semester=get_semester()) submissions_pending = ServiceSubmission.objects.filter(semester=get_semester(), status='0').order_by("date") submissions_submitted = ServiceSubmission.objects.filter(semester=get_semester(), status='1').order_by( "date") position = Position.objects.get(title=Position.PositionChoices.SERVICE_CHAIR) hours_pending = 0 for submission in submissions_pending: hours_pending += submission.hours for submission in submissions_submitted: hours_pending += submission.hours hours_approved = 0 submissions_approved = ServiceSubmission.objects.filter(semester=get_semester(), status='2') for submission in submissions_approved: hours_approved += submission.hours context = { 'events': events, 'hours_approved': hours_approved, 'hours_pending': hours_pending, 'submissions_pending': submissions_pending, 'submissions_submitted': submissions_submitted, 'position': position, } return render(request, 'service-chair/service-chair.html', context)
96d9b281c562a0ddb31d09723012cc9411c4ff09
14,532
def get_tank_history(request, tankid): """ Returns a response listing the device history for each tank. """ # Sanitize tankid tankid = int(tankid) # This query is too complex to be worth constructing in ORM, so just use raw SQL. cursor = connection.cursor() cursor.execute("""\ SELECT t.time, t.device_id AS mac FROM (SELECT d.time, d.device_id, LAG(d.device_id) OVER(ORDER BY d.time) AS prev_device_id FROM (SELECT time, tankid, device_id FROM devices_datum WHERE tankid = %s ) AS d ) AS t WHERE t.device_id IS DISTINCT FROM t.prev_device_id; """, [tankid]) history = dictfetchall(cursor) history_serializer = TankHistorySerializer(history, many=True) return JsonResponse(history_serializer.data, safe=False)
39c21c9761ff0e40e1c1f8904cf9b9881faf13ed
14,533
import inspect def get_absolute_module(obj): """ Get the abolulte path to the module for the given object. e.g. assert get_absolute_module(get_absolute_module) == 'artemis.general.should_be_builtins' :param obj: A python module, class, method, function, traceback, frame, or code object :return: A string representing the import path. """ file_path = inspect.getfile(obj) return file_path_to_absolute_module(file_path)
ea2c85d9ba90414cddce00dcc5ed092b8c6777a2
14,534
def parse_c45(file_base, rootdir='.'): """ Returns an ExampleSet from the C4.5 formatted data """ schema_name = file_base + NAMES_EXT data_name = file_base + DATA_EXT schema_file = find_file(schema_name, rootdir) if schema_file is None: raise ValueError('Schema file not found') data_file = find_file(data_name, rootdir) if data_file is None: raise ValueError('Data file not found') return _parse_c45(schema_file, data_file)
14d651a48c2fe65ad68c441722c4c39854efef2a
14,535
import torch def to_numpy(tensor: torch.Tensor): """ Convert a PyTorch Tensor to a Numpy Array. """ if tensor is None: return tensor if tensor.is_quantized: tensor = tensor.dequantize() return tensor.cpu().detach().contiguous().numpy()
ed6bd50ef5db30b3df1304a0152998f2f27750c6
14,536
from flask_sqlalchemy import _wrap_with_default_query_class, SQLAlchemy def initialize_flask_sqlathanor(db): """Initialize **SQLAthanor** contents on a `Flask-SQLAlchemy`_ instance. :param db: The :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` instance. :type db: :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` :returns: A mutated instance of ``db`` that replaces `SQLAlchemy`_ components and their `Flask-SQLAlchemy`_ flavors with **SQLAthanor** analogs while maintaining `Flask-SQLAlchemy`_ and `SQLAlchemy`_ functionality and interfaces. :rtype: :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` :raises ImportError: if called when `Flask-SQLAlchemy`_ is not installed :raises ValueError: if ``db`` is not an instance of :class:`flask_sqlalchemy.SQLAlchemy <flask_sqlalchemy:flask_sqlalchemy.SQLAlchemy>` """ if not isinstance(db, SQLAlchemy): raise ValueError('db must be an instance of flask_sqlalchemy.SQLAlchemy') db.Column = Column db.relationship = _wrap_with_default_query_class(relationship, db.Query) return db
565c010a49e9d0ac2e82b40803b4f0871b526177
14,537
def add_vcf_header( vcf_reader ): """ Function to add a new field to the vcf header Input: A vcf reader object Return: The vcf reader object with new headers added """ # Metadata vcf_reader.metadata['SMuRFCmd'] = [get_command_line()] # Formats vcf_reader.formats['VAF'] = pyvcf.parser._Format('VAF',None,'Float','Variant Allele Frequency calculated from the BAM file') vcf_reader.formats['CAD'] = pyvcf.parser._Format('CAD',None,'Integer','Calculated Allelic Depth, used for VAF calculation') vcf_reader.formats['FT'] = pyvcf.parser._Format('FT',None,'String','Sample filter') # Filters vcf_reader.filters['KnownVariant'] = pyvcf.parser._Filter('KnownVariant','Variant has already an ID, excluding COSMIC_IDs') vcf_reader.filters['BadMQ'] = pyvcf.parser._Filter('BadMQ', 'Variant with MQ <'+str(cfg['SMuRF']['mq'])) vcf_reader.filters['BadQual'] = pyvcf.parser._Filter('BadQual','Variant with a QUAL <'+str(cfg['SMuRF']['qual'])) vcf_reader.filters['MultiAllelic'] = pyvcf.parser._Filter('MultiAllelic', 'Variant has multiple alternative alleles') vcf_reader.filters['BlackList'] = pyvcf.parser._Filter('BlackList', 'Variant exists in a blacklist') vcf_reader.filters['Indel'] = pyvcf.parser._Filter('Indel','Variant is an indel') vcf_reader.filters['ControlEvidence'] = pyvcf.parser._Filter('ControlEvidence','Variant is also found in a control based on the GT') vcf_reader.filters['NoSampleEvidence'] = pyvcf.parser._Filter('NoSampleEvidence','Variant is not found in any of the samples based on the GT') vcf_reader.filters['AllSamplesFailedQC'] = pyvcf.parser._Filter('AllSamplesFailedQC', 'All samples failed the quality control') vcf_reader.filters['AllControlsFailedQC'] = pyvcf.parser._Filter('AllControlsFailedQC', 'All controls failed the quality control') vcf_reader.filters['ControlSubclonal'] = pyvcf.parser._Filter('ControlSubclonal', 'Variant is found as subclonal in a control based on the recalculated VAF') vcf_reader.filters['ControlClonal'] = pyvcf.parser._Filter('ControlClonal', 'Variant is found as clonal in a control based on the recalculated VAF') vcf_reader.filters['NoClonalSample'] = pyvcf.parser._Filter('NoClonalSample', 'Variant is not found as clonal in any of the samples based on the recalculated VAF') # Sample filters vcf_reader.filters['LowCov'] = pyvcf.parser._Filter('LowCov', 'Variant has a coverage <'+str(cfg['SMuRF']['coverage'])+' in this sample/control') vcf_reader.filters['NoGenoType'] = pyvcf.parser._Filter('NoGenoType', 'Genotype is empty for this sample/control') vcf_reader.filters['isRef'] = pyvcf.parser._Filter('isRef', 'Genotype is a reference (i.e. reference 0/0)') vcf_reader.filters['isVariant'] = pyvcf.parser._Filter('isVariant', 'Genotype is a variant (i.e. not reference 0/0)') vcf_reader.filters['LowGQ'] = pyvcf.parser._Filter('LowGQ', 'Variant has a low genome quality for this sample/control') # Infos vcf_reader.infos['ABSENT_SAMPLES'] = pyvcf.parser._Info('ABSENT_SAMPLES',1,'Integer','Number of samples without the variant', None, None) vcf_reader.infos['SUBCLONAL_SAMPLES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLES',1,'Integer','Number of samples with a subclonal variant', None, None) vcf_reader.infos['CLONAL_SAMPLES'] = pyvcf.parser._Info('CLONAL_SAMPLES',1,'Integer','Number of samples with a clonal variant', None, None) vcf_reader.infos['ABSENT_CONTROLS'] = pyvcf.parser._Info('ABSENT_CONTROLS',1,'Integer','Number of controls without the variant', None, None) vcf_reader.infos['SUBCLONAL_CONTROLS'] = pyvcf.parser._Info('SUBCLONAL_CONTROLS',1,'Integer','Number of controls with a subclonal variant', None, None) vcf_reader.infos['CLONAL_CONTROLS'] = pyvcf.parser._Info('CLONAL_CONTROLS',1,'Integer','Number of controls with a clonal variant', None, None) vcf_reader.infos['ABSENT_SAMPLE_NAMES'] = pyvcf.parser._Info('ABSENT_SAMPLE_NAMES',None,'String','Samples without the variant', None, None) vcf_reader.infos['SUBCLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('SUBCLONAL_SAMPLE_NAMES',None,'String','Samples with a subclonal variant', None, None) vcf_reader.infos['CLONAL_SAMPLE_NAMES'] = pyvcf.parser._Info('CLONAL_SAMPLE_NAMES',None,'String','Samples with a clonal variant', None, None) vcf_reader.infos['ABSENT_CONTROL_NAMES'] = pyvcf.parser._Info('ABSENT_CONTROL_NAMES',None,'String','Controls without the variant', None, None) vcf_reader.infos['SUBCLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('SUBCLONAL_CONTROL_NAMES',None,'String','Controls with a subclonal variant', None, None) vcf_reader.infos['CLONAL_CONTROL_NAMES'] = pyvcf.parser._Info('CLONAL_CONTROL_NAMES',None,'String','Controls with a clonal variant', None, None) vcf_reader.infos['PASS_QC_SAMPLES'] = pyvcf.parser._Info('PASS_QC_SAMPLES',1,'Integer','Number of samples which pass all quality control filters', None, None) vcf_reader.infos['PASS_QC_CONTROLS'] = pyvcf.parser._Info('PASS_QC_CONTROLS',1,'Integer','Number of controls which pass all quality control filters', None, None) vcf_reader.infos['FAIL_QC_SAMPLES'] = pyvcf.parser._Info('FAIL_QC_SAMPLES',1,'Integer','Number of samples which failed one or multiple quality control filters', None, None) vcf_reader.infos['FAIL_QC_CONTROLS'] = pyvcf.parser._Info('FAIL_QC_CONTROLS',1,'Integer','Number of controls which failed one or multiple quality control filters', None, None) vcf_reader.infos['PASS_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('PASS_QC_SAMPLE_NAMES',None,'String','Samples which pass all quality control filters', None, None) vcf_reader.infos['PASS_QC_CONTROL_NAMES'] = pyvcf.parser._Info('PASS_QC_CONTROL_NAMES',None,'String','Controls which pass all quality control filters', None, None) vcf_reader.infos['FAIL_QC_SAMPLE_NAMES'] = pyvcf.parser._Info('FAIL_QC_SAMPLE_NAMES',None,'String','Samples which failed one or multiple quality control filters', None, None) vcf_reader.infos['FAIL_QC_CONTROL_NAMES'] = pyvcf.parser._Info('FAIL_QC_CONTROL_NAMES',None,'String','Controls which failed one or multiple quality control filters', None, None) return( vcf_reader )
36e5819de6c09c7e60638b183bfe415fc19361db
14,538
def get_seats_percent(election_data): """ This function takes a lists of lists as and argument, with each list representing a party's election results, and returns a tuple with the percentage of Bundestag seats won by various political affiliations. Parameters: election_data (list): A list of lists, each representing a party's election results Returns: A tuple with percentage of Bundestag seats won by various political affiliations """ left_seats = 0 right_seats = 0 extreme_seats = 0 center_seats = 0 total_bundestag_seats = 0 for party in election_data[1:]: total_bundestag_seats += int(party[1]) if 'far' in party[2]: extreme_seats += party[1] else: center_seats += party[1] if 'left' in party[2]: left_seats += party[1] else: right_seats += party[1] left_percent = round((left_seats / total_bundestag_seats * 100), 2) right_percent = round((right_seats / total_bundestag_seats * 100), 2) extreme_percent = round((extreme_seats / total_bundestag_seats * 100), 2) center_percent = round((center_seats / total_bundestag_seats * 100), 2) return left_percent, right_percent, extreme_percent, center_percent
a131d64747c5c0dde8511e9ec4da07252f96a6ec
14,539
def get_player_gamelog(player_id, season, season_type='Regular Season', timeout=30): """ Coleta de histórico departidas de um determinado jogador em uma determinada temporada, considerando ainda um tipo específico de temporada (pré-season, temporada regular ou playoffs). Parâmetros ---------- :param player_id: Identificação do jogador alvo [type: int] :param season: Temporada alvo de análise [type: str, exemplo: "2020-21"] :param season_type: Tipo específico de temporada aceito pelo endpoint [type: str, default='Regular Season'] :param timeout: Tempo máximo de espera da requisição. [type: int, default=30] Retorno ------- :return df_gamelog: Base de dados com informações específicas e detalhadas sobre o histórico de partidas extraído do jogador. Informações sobre o conteúdo desta base de retorno podem ser encontradas na documentação oficial do endpoint playergamelog. [type: pd.DataFrame] """ # Retornando gamelog de jogador player_gamelog = playergamelog.PlayerGameLog( player_id=player_id, season=season, season_type_all_star=season_type, timeout=timeout ) # Transformando dados em DataFrame e adicionando informações de temporada df_gamelog = player_gamelog.player_game_log.get_data_frame() df_gamelog['SEASON'] = season df_gamelog['SEASON_TYPE'] = season_type # Transformando coluna de data na base df_gamelog['GAME_DATE'] = pd.to_datetime(df_gamelog['GAME_DATE']) df_gamelog.columns = [col.lower().strip() for col in df_gamelog.columns] return df_gamelog
d21132df8a4f72055f37ef7039427f42ce03610e
14,540
import re def unbound_text_to_html5(text, language=None): """ Converts the provided text to HTML5 custom data attributes. Usage: {{text|unbound_text_to_html5:"Greek"}} """ # If the language is English, then don't bother doing anything if language is not None and language.lower() == "english": return text # Make the document that will contain the verse converted_doc = minidom.Document() # Make the verse node to attach the content to verse_node = converted_doc.createElement( "span" ) verse_node.setAttribute("class", "verse") # Append the converted_doc.appendChild(verse_node) # Split up the text and place the text segments in nodes segments = re.findall("[\s]+|[\[\],.:.;]|[^\s\[\],.:.;]+", text) for s in segments: # Don't wrap punctuation in a word node if s in [";", ",", ".", "[", "]", ":"] or len(s.strip()) == 0: txt_node = converted_doc.createTextNode(s) verse_node.appendChild(txt_node) else: word_node = converted_doc.createElement( "span" ) word_node.setAttribute( "class", "word" ) # Create the text node and append it if language is None or language.lower() == "greek": txt_node = converted_doc.createTextNode(s) else: txt_node = converted_doc.createTextNode(transform_text(s, language)) word_node.appendChild(txt_node) # Append the node verse_node.appendChild(word_node) return converted_doc.toxml( encoding="utf-8" )
1fe50c4844e126395c1aa1e8d5ba464217003557
14,541
def sort_points(points): """Sorts points first by argument, then by modulus. Parameters ---------- points : array_like (n_points, 3) The points to be sorted: (x, y, intensity) Returns ------- points_sorted : :class:`numpy.ndarray` (n_points, 3) The sorted points. """ positions = points[:, :2].astype(float) with np.errstate(invalid='ignore', divide='ignore'): tangents = np.nan_to_num(positions[:, 1]/positions[:, 0]) arguments = np.arctan(tangents) moduli = np.sqrt(np.sum(np.square(positions), axis=1)) inds = np.lexsort((moduli, arguments)) points_sorted = points[inds] return points_sorted
3d5ae7cdfa33abba906cefaf3cd1ab0ab5899e32
14,542
def get_pairs(scores): """ Returns pairs of indexes where the first value in the pair has a higher score than the second value in the pair. Parameters ---------- scores : list of int Contain a list of numbers Returns ------- query_pair : list of pairs This contains a list of pairs of indexes in scores. """ query_pair = [] for query_scores in scores: temp = sorted(query_scores, reverse=True) pairs = [] for i in range(len(temp)): for j in range(len(temp)): if temp[i] > temp[j]: pairs.append((i,j)) query_pair.append(pairs) return query_pair
1d4bf17dffb7ec8b934701254448e5a7dfe41cf9
14,543
def make(): """Make a new migration. Returns: Response: json status message """ response = None try: with capture_print(escape=True) as content: current_app.config.get('container').make('migrator').make(request.form['name']) response = {'message': content.get_text(), 'status': 'success'} except SystemExit: response = {'message': content.get_text(), 'status': 'error'} return jsonify(response)
24524cc1906f621e9e927d3e0b7265b65ab8ebe5
14,544
def find_files(args, client): """ Get a list of all the objects to process""" objects = [] continuation_token = 'UNSET' while continuation_token: if continuation_token == 'UNSET': object_list = client.list_objects_v2(Bucket=args['bucket'],Prefix=args['prefix']) else: object_list = client.list_objects_v2(Bucket=args['bucket'], Prefix=args['prefix'], ContinuationToken=continuation_token) if args['debug']: log("Found %d items from bucket list_objects_v2(), includes dirs."% object_list['KeyCount'], level="DEBUG") # This means we have no more keys, or none found if object_list['KeyCount'] > 0: for item in object_list['Contents']: if not item['Key'].endswith('/'): # ignore directories objects.append(item['Key']) # And here we check to see if there's more results to recover if object_list['IsTruncated']: continuation_token = object_list['NextContinuationToken'] else: continuation_token = False # What did we get? log("Found %d items"% len(objects)) # If we have a tracking database argument we need to dedupe the list against already # processed files if args['track']: conn = initalise_connection(args['track']) for filepath in conn.execute('''SELECT filepath FROM files WHERE bucket=? AND filepath LIKE ?''', (args['bucket'], args['prefix'] + '%') ): if filepath[0] in objects: objects.remove(filepath[0]) if args['debug']: log("Excluding already processed file %s"% filepath[0], level="DEBUG") conn.close() return objects
784e5514539d794854efbe910f2a0039002c0af9
14,545
from typing import Union import os from typing import Optional from typing import List import filecmp def are_dirs_equal( dir1: Union[str, os.PathLike], dir2: Union[str, os.PathLike], ignore: Optional[List[str]] = None, ) -> bool: """ Compare the content of two directories, recursively. :param dir1: the left operand. :param dir2: the right operand. :param ignore: is a list of names to ignore (see dircmp docs regarding 'ignore'). :return: True if the directories are equal, False otherwise. """ ignore = ignore or None left_only, right_only, diff = dircmp_recursive( filecmp.dircmp(dir1, dir2, ignore=ignore) ) return left_only == right_only == diff == set()
a6923f280d2b1d73f4b121388febb23027dc2d47
14,546
import codecs import traceback def writeCSV(info, resultFile): """ Write info line to CSV :param info: :return: """ try: with codecs.open(resultFile, 'a', encoding='utf8') as fh_results: # Print every field from the field list to the output file for field_pretty in CSV_FIELD_ORDER: field = CSV_FIELDS[field_pretty] try: field = info[field] except KeyError as e: field = "False" try: field = str(field).replace(r'"', r'\"').replace("\n", " ") except AttributeError as e: if args.debug: traceback.print_exc() pass fh_results.write("%s;" % field) # Append vendor scan results for vendor in VENDORS: if vendor in info['vendor_results']: fh_results.write("%s;" % info['vendor_results'][vendor]) else: fh_results.write("-;") fh_results.write('\n') except: if args.debug: traceback.print_exc() return False return True
e1ec45c308b1f18c1633f3213166a8edaf7122b2
14,547
def generate_hmac(str_to_sign, secret): """Signs the specified string using the specified secret. Args: str_to_sign : string, the string to sign secret : string, the secret used to sign Returns: signed_message : string, the signed str_to_sign """ message = str_to_sign.encode('utf-8') secret = secret.encode('utf-8') cmd = ['echo -n "' + str(message) + '" | openssl dgst -sha256 -binary -hmac "' + str(secret) + '"'] process, signed_message, error = linuxutil.popen_communicate(cmd, shell=True) if process.returncode != 0: raise Exception("Unable to generate signature. " + str(error)) return signed_message
c773cb1f470f0f52934758e7f66fe01047419cbd
14,548
def interpolate_scores(coords: np.array, scores: np.array, coord_range: tuple, step: float = 0.001) -> np.array: """ Given a coord_range and values for specific coords - interpolate to the rest of the grid Args: coords: array of lons and lats of points that their values are known scores: array of the coords values coord_range: range of the desired grid step: resolution of sample Returns: z: np.array - 2D array of the values in the entire grid of coord_range """ min_lon, min_lat, max_lon, max_lat = coord_range x = np.arange(min_lon, max_lon, step=step) y = np.arange(min_lat, max_lat, step=step) grid_x, grid_y = np.meshgrid(x, y) z = interpolate.griddata(coords, scores, (x[None, :], y[:, None]), method='linear') return z
2ed5660191f01018344e9b55af372f10133bc6a9
14,549
def remove_blank_from_dict(data): """Optimise data from default outputted dictionary""" if isinstance(data, dict): return dict( (key, remove_blank_from_dict(value)) for key, value in data.items() if is_not_blank(value) and is_not_blank(remove_blank_from_dict(value)) ) if isinstance(data, list): return [ remove_blank_from_dict(value) for value in data if is_not_blank(value) and is_not_blank(remove_blank_from_dict(value)) ] return data
ae77d0b5b9a1cffdd1832df3a5513cc79e600138
14,550
from typing import List from typing import Dict import re import json import subprocess def upload_to_database( database_secrets_manager_arn: str, user_mapping: List[Dict] ) -> str: """Uploads data from disk to an RDS postgres instance. Uses the provided user_mapping to replace the user subs of data in the local file with the user subs of the newly created users in cognito.""" with open(SOURCE_DATABASE_DUMP_FILE, "r") as f: database_dump = f.read() for user in user_mapping: database_dump = database_dump.replace(user["source_sub"], user["target_sub"]) [CURATOR_SUB] = [ target_user["target_sub"] for target_user in user_mapping if target_user["email"] == "[email protected]" ] def replace_missing_sub_with_curator_sub(match): if match.group(0) in [ target_user["target_sub"] for target_user in user_mapping ]: return match.group(0) return CURATOR_SUB database_dump = re.sub( r"[0-9a-f]{8}-(?:[0-9a-f]{4}-){3}[0-9a-f]{12}", replace_missing_sub_with_curator_sub, database_dump, ) with open(SOURCE_DATABASE_DUMP_FILE, "w") as f: f.write(database_dump) secrets = json.loads( secretsmanager_client.get_secret_value(SecretId=database_secrets_manager_arn)[ "SecretString" ] ) dsn = f"postgres://{secrets['username']}:{secrets['password']}@{secrets['host']}/{secrets['dbname']}" subprocess.Popen( f"psql '{dsn}?options=--search_path%3dapt' -f {SOURCE_DATABASE_DUMP_FILE}", shell=True, stdout=subprocess.PIPE, ).stdout.read() return SOURCE_DATABASE_DUMP_FILE
061fe70844ab5ed6136d6c0b087ed91405aae280
14,551
import os import ssl import urllib def new_client( dockerd_url=None, tls=False, tls_verify=False, cert_path=None, timeout=None, ): """ Return a newly configured Docker client. """ _dockerd_url = dockerd_url if not _dockerd_url: _dockerd_url = os.getenv('DOCKER_HOST', DOCKER_DEFAULT_DOCKERD_URL) _tls = tls tls_config = None if tls_verify or str(os.environ.get('DOCKER_TLS_VERIFY', '0')) == '1': _tls = True _cert_path = os.getenv('DOCKER_CERT_PATH', cert_path) if not _cert_path: raise BuildRunnerConfigurationError( "TLS connection specified but cannot determine cert path" " (from DOCKER_CERT_PATH env variable)" ) ca_cert_path = os.path.join(_cert_path, 'ca.pem') client_cert = ( os.path.join(_cert_path, 'cert.pem'), os.path.join(_cert_path, 'key.pem') ) tls_config = docker.tls.TLSConfig( ssl_version=ssl.PROTOCOL_TLSv1, client_cert=client_cert, verify=ca_cert_path, assert_hostname=False, ) if _tls: # make sure the scheme is https url_parts = urllib.parse.urlparse(_dockerd_url) if url_parts.scheme == 'tcp': _dockerd_url = urllib.parse.urlunparse(('https',) + url_parts[1:]) args = {} if timeout is not None: if timeout == 0: args['timeout'] = MAX_TIMEOUT else: args['timeout'] = timeout return Client( base_url=_dockerd_url, version=DOCKER_API_VERSION, tls=tls_config, **args )
8a9bf9b09881fa6daac7bf4aedb2610c92d084f4
14,552
def merge_mosaic_images(mosaic_dict, mosaic_images, orig_images, Y_orig=None): """ Merge the list of mosaic images with all original images. Args: mosaic_dict: Dictionary specifying how mosaic images were created, returned from make_mosaic mosaic_images: List of all mosaic images returned from make_mosaic orig_images: List of all images, some (or all, or none) of which were used to generate the mosaic images Y_orig: If building mosaic images for training, the Y/expected images corresponding to orig_images Returns: 3 lists - merged_images, merged_sizes, merged_Y (empty list if Y_orig was not provided). This list of images can then be resized, windowed, etc., and provided as input images for training or predictions. To split the merged list back into the separate portions, use split_merged_mosaic. """ orig_index = list(range(0, len(orig_images))) merged_images = [] merged_sizes = [] merged_Y = [] # If Y/expected values are desired, construct the merged Y # images to correspond with the mosaic images. if Y_orig: for k, v in mosaic_dict.items(): merged_Y.append(combine_images(Y_orig, v)) # Mosaic images are output first for img in mosaic_images: merged_images.append(img) merged_sizes.append([img.shape[0], img.shape[1]]) mosaic_all_ix=[] [mosaic_all_ix.extend(v) for v in mosaic_dict.values()] leftovers = [x for x in orig_index if x not in mosaic_all_ix] # And then output all images that are not part of a larger mosaic image for ix in leftovers: leftover_img = orig_images[ix] merged_images.append(leftover_img) merged_sizes.append([leftover_img.shape[0], leftover_img.shape[1]]) if Y_orig: merged_Y.append(Y_orig[ix]) return (merged_images, merged_sizes, merged_Y)
d16875462c09b671db785ec101eb09028b1a7cbe
14,553
def show2D(dd, impixel=None, im=None, fig=101, verbose=1, dy=None, sigma=None, colorbar=False, title=None, midx=2, units=None): """ Show result of a 2D scan Args: dd (DataSet) impixel (array or None) im (array or None) """ if dd is None: return None extent, g0, g1, vstep, vsweep, arrayname = dataset2Dmetadata(dd) tr = image_transform(dd, mode='pixel') array = getattr(dd, arrayname) if impixel is None: if im is None: im = np.array(array) impixel = tr._transform(im) else: pass else: pass labels = [s.name for s in array.set_arrays] xx = extent xx = tr.matplotlib_image_extent() ny = vstep.size nx = vsweep.size im = qtt.utilities.tools.diffImageSmooth(impixel, dy=dy, sigma=sigma) if verbose: print('show2D: nx %d, ny %d' % (nx, ny,)) if verbose >= 2: print('extent: %s' % xx) if units is None: unitstr = '' else: unitstr = ' (%s)' % units if fig is not None: scanjob = dd.metadata.get('scanjob', dict()) pgeometry.cfigure(fig) plt.clf() if impixel is None: if verbose >= 2: print('show2D: show raw image') plt.pcolormesh(vstep, vsweep, im) else: if verbose >= 2: print('show2D: show image') plt.imshow(impixel, extent=xx, interpolation='nearest') labelx = labels[1] labely = labels[0] if scanjob.get('sweepdata', None) is not None: labelx = sweepgate(scanjob) plt.xlabel('%s' % labelx + unitstr) else: pass if scanjob.get('stepdata', None) is not None: if units is None: plt.ylabel('%s' % stepgate(scanjob)) else: plt.ylabel('%s (%s)' % (stepgate(scanjob), units)) if not title is None: plt.title(title) if colorbar: plt.colorbar() if verbose >= 2: print('show2D: at show') try: plt.show(block=False) except: # ipython backend does not know about block keyword... plt.show() return xx, vstep, vsweep
d9560e2dd54ed8daf8450e2db8e1f5d8357a601c
14,554
def get_job(api_key, jq_id): """ Fetch a job and its status :param api_key: user id of the client :param jq_id: job queue id :return: job queue id """ if Auth.verify_auth_key(api_key): if Auth.verify_job(api_key, jq_id): return trigger.get_job(jq_id) return abort(400)
f323386e530e354f52bcbcc6301c5fb1af4e4767
14,555
def contour_to_valid(cnt, image_shape): """Convert rect to xys, i.e., eight points The `image_shape` is used to to make sure all points return are valid, i.e., within image area """ # rect = cv2.minAreaRect(cnt) if len(cnt.shape) != 3: assert 1 < 0 rect = cnt.reshape([cnt.shape[0], cnt.shape[2]]) h, w = image_shape[0:2] def get_valid_x(x): if x < 0: return 0 if x >= w: return w - 1 return x def get_valid_y(y): if y < 0: return 0 if y >= h: return h - 1 return y for i_xy, (x, y) in enumerate(rect): x = get_valid_x(x) y = get_valid_y(y) rect[i_xy, :] = [x, y] points = np.reshape(rect, -1) return points
a4f85d77c0805903b220d3670edc5db05ea001ed
14,556
def search(datafile, query, bool_operator): """ Queries on a set of documents. :param datafile: The location of the datafile as a pathlib.Path :param query: the query text :param bool_operator: the operator. Must be one of [OR, AND] :return: the list of indexes matching the search criteria """ # we normalize to uinique lowercase words the query string and split by space query = _extract_words(query) # we read the datafile data = datafile.readlines() # calculating results results = [str(i) for i, text in enumerate(data) if (query.issubset(_extract_words(text)) if bool_operator == 'AND' else bool(query.intersection(_extract_words(text))))] return results
6f8ec35063178e49a557de1849363568561638ed
14,557
from typing import Optional from typing import List def recurse_structures( structure: Component, ignore_components_prefix: Optional[List[str]] = None, ignore_functions_prefix: Optional[List[str]] = None, ) -> DictConfig: """Recurse over structures""" ignore_functions_prefix = ignore_functions_prefix or [] ignore_components_prefix = ignore_components_prefix or [] if ( hasattr(structure, "function_name") and structure.function_name in ignore_functions_prefix ): return DictConfig({}) if hasattr(structure, "name") and any( [structure.name.startswith(i) for i in ignore_components_prefix] ): return DictConfig({}) output = {structure.name: structure.info} for element in structure.references: if ( isinstance(element, ComponentReference) and element.ref_cell.name not in output ): output.update(recurse_structures(element.ref_cell)) return output
696e04a0184ebb7b8d2e0789e711a676c12ed89c
14,558
from spinalcordtoolbox.image import Image import dipy.reconst.dti as dti import dipy.denoise.noise_estimate as ne def compute_dti(fname_in, fname_bvals, fname_bvecs, prefix, method, evecs, file_mask): """ Compute DTI. :param fname_in: input 4d file. :param bvals: bvals txt file :param bvecs: bvecs txt file :param prefix: output prefix. Example: "dti_" :param method: algo for computing dti :param evecs: bool: output diffusion tensor eigenvectors :return: True/False """ # Open file. nii = Image(fname_in) data = nii.data sct.printv('data.shape (%d, %d, %d, %d)' % data.shape) # open bvecs/bvals bvals, bvecs = read_bvals_bvecs(fname_bvals, fname_bvecs) gtab = gradient_table(bvals, bvecs) # mask and crop the data. This is a quick way to avoid calculating Tensors on the background of the image. if not file_mask == '': sct.printv('Open mask file...', param.verbose) # open mask file nii_mask = Image(file_mask) mask = nii_mask.data # fit tensor model sct.printv('Computing tensor using "' + method + '" method...', param.verbose) if method == 'standard': tenmodel = dti.TensorModel(gtab) if file_mask == '': tenfit = tenmodel.fit(data) else: tenfit = tenmodel.fit(data, mask) elif method == 'restore': sigma = ne.estimate_sigma(data) dti_restore = dti.TensorModel(gtab, fit_method='RESTORE', sigma=sigma) if file_mask == '': tenfit = dti_restore.fit(data) else: tenfit = dti_restore.fit(data, mask) # Compute metrics sct.printv('Computing metrics...', param.verbose) # FA nii.data = tenfit.fa nii.save(prefix + 'FA.nii.gz', dtype='float32') # MD nii.data = tenfit.md nii.save(prefix + 'MD.nii.gz', dtype='float32') # RD nii.data = tenfit.rd nii.save(prefix + 'RD.nii.gz', dtype='float32') # AD nii.data = tenfit.ad nii.save(prefix + 'AD.nii.gz', dtype='float32') if evecs: data_evecs = tenfit.evecs # output 1st (V1), 2nd (V2) and 3rd (V3) eigenvectors as 4d data for idim in range(3): nii.data = data_evecs[:, :, :, :, idim] nii.save(prefix + 'V' + str(idim+1) + '.nii.gz', dtype="float32") return True
149cfbc3f4fa2f3c1a33e4d4e6ee09983176e1b4
14,559
import tqdm def solve( netlist=None, parameter_values=None, experiment=None, I_init=1.0, htc=None, initial_soc=0.5, nproc=12, output_variables=None, ): """ Solves a pack simulation Parameters ---------- netlist : pandas.DataFrame A netlist of circuit elements with format. desc, node1, node2, value. Produced by liionpack.read_netlist or liionpack.setup_circuit parameter_values : pybamm.ParameterValues class A dictionary of all the model parameters experiment : pybamm.Experiment class The experiment to be simulated. experiment.period is used to determine the length of each timestep. I_init : float, optional Initial guess for single battery current [A]. The default is 1.0. htc : float array, optional Heat transfer coefficient array of length Nspm. The default is None. initial_soc : float The initial state of charge for every battery. The default is 0.5 nproc : int, optional Number of processes to start in parallel for mapping. The default is 12. output_variables : list, optional Variables to evaluate during solve. Must be a valid key in the model.variables Raises ------ Exception DESCRIPTION. Returns ------- output : ndarray shape [# variable, # steps, # batteries] simulation output array """ if netlist is None or parameter_values is None or experiment is None: raise Exception("Please supply a netlist, paramater_values, and experiment") # Get netlist indices for resistors, voltage sources, current sources Ri_map = netlist["desc"].str.find("Ri") > -1 V_map = netlist["desc"].str.find("V") > -1 I_map = netlist["desc"].str.find("I") > -1 Terminal_Node = np.array(netlist[I_map].node1) Nspm = np.sum(V_map) # Generate the protocol from the supplied experiment protocol = lp.generate_protocol_from_experiment(experiment) dt = experiment.period Nsteps = len(protocol) # Solve the circuit to initialise the electrochemical models V_node, I_batt = lp.solve_circuit(netlist) # Create battery simulation and update initial state of charge sim = lp.create_simulation(parameter_values, make_inputs=True) lp.update_init_conc(sim, SoC=initial_soc) # The simulation output variables calculated at each step for each battery # Must be a 0D variable i.e. battery wide volume average - or X-averaged for 1D model variable_names = [ "Terminal voltage [V]", "Measured battery open circuit voltage [V]", ] if output_variables is not None: for out in output_variables: if out not in variable_names: variable_names.append(out) # variable_names = variable_names + output_variables Nvar = len(variable_names) # Storage variables for simulation data shm_i_app = np.zeros([Nsteps, Nspm], dtype=float) shm_Ri = np.zeros([Nsteps, Nspm], dtype=float) output = np.zeros([Nvar, Nsteps, Nspm], dtype=float) # Initialize currents in battery models shm_i_app[0, :] = I_batt * -1 # Set up integrator integrator, variables_fn, t_eval = _create_casadi_objects( I_init, htc[0], sim, dt, Nspm, nproc, variable_names ) # Step forward in time time = 0 end_time = dt * Nsteps step_solutions = [None] * Nspm V_terminal = [] record_times = [] v_cut_lower = parameter_values["Lower voltage cut-off [V]"] v_cut_higher = parameter_values["Upper voltage cut-off [V]"] sim_start_time = ticker.time() for step in tqdm(range(Nsteps), desc='Solving Pack'): # Step the individual battery models step_solutions, var_eval = _mapped_step( sim.built_model, step_solutions, lp.build_inputs_dict(shm_i_app[step, :], htc), integrator, variables_fn, t_eval, ) output[:, step, :] = var_eval time += dt # Calculate internal resistance and update netlist temp_v = output[0, step, :] temp_ocv = output[1, step, :] # temp_Ri = output[2, step, :] # This could be used instead of Equivalent ECM resistance which has # been changing definition temp_Ri = (temp_ocv - temp_v) / shm_i_app[step, :] # Make Ri more stable current_cutoff = np.abs(shm_i_app[step, :]) < 1e-6 temp_Ri[current_cutoff] = 1e-12 # temp_Ri = 1e-12 shm_Ri[step, :] = temp_Ri netlist.loc[V_map, ("value")] = temp_ocv netlist.loc[Ri_map, ("value")] = temp_Ri netlist.loc[I_map, ("value")] = protocol[step] # Stop if voltage limits are reached if np.any(temp_v < v_cut_lower): print("Low voltage limit reached") break if np.any(temp_v > v_cut_higher): print("High voltage limit reached") break if time <= end_time: record_times.append(time) V_node, I_batt = lp.solve_circuit(netlist) V_terminal.append(V_node[Terminal_Node][0]) if time < end_time: shm_i_app[step + 1, :] = I_batt[:] * -1 # Collect outputs all_output = {} all_output["Time [s]"] = np.asarray(record_times) all_output["Pack current [A]"] = np.asarray(protocol[: step + 1]) all_output["Pack terminal voltage [V]"] = np.asarray(V_terminal) all_output["Cell current [A]"] = shm_i_app[: step + 1, :] for j in range(Nvar): all_output[variable_names[j]] = output[j, : step + 1, :] toc = ticker.time() lp.logger.notice( "Solve circuit time " + str(np.around(toc - sim_start_time, 3)) + "s" ) return all_output
fa51e4e69a434e3a3c728b4675844c0bfa29d3fd
14,560
def global_node_entropy(data, dx=3, dy=1, taux=1, tauy=1, overlapping=True, connections="all", tie_precision=None): """ Calculates global node entropy\\ [#pessa2019]_\\ :sup:`,`\\ [#McCullough]_ for an ordinal network obtained from data. (Assumes directed and weighted edges). Parameters ---------- data : array, return of :func:`ordpy.ordinal_network` Array object in the format :math:`[x_{1}, x_{2}, x_{3}, \\ldots ,x_{n}]` or :math:`[[x_{11}, x_{12}, x_{13}, \\ldots, x_{1m}], \\ldots, [x_{n1}, x_{n2}, x_{n3}, \\ldots, x_{nm}]]` or an ordinal network returned by :func:`ordpy.ordinal_network`\\ [*]_. dx : int Embedding dimension (horizontal axis) (default: 3). dy : int Embedding dimension (vertical axis); it must be 1 for time series (default: 1). taux : int Embedding delay (horizontal axis) (default: 1). tauy : int Embedding delay (vertical axis) (default: 1). overlapping : boolean If `True`, **data** is partitioned into overlapping sliding windows (default: `True`). If `False`, adjacent partitions are non-overlapping. connections : str The ordinal network is constructed using `'all'` permutation successions in a symbolic sequence or only `'horizontal'` or `'vertical'` successions. Parameter only valid for image data (default: `'all'`). tie_precision : int If not `None`, **data** is rounded with `tie_precision` number of decimals (default: `None`). Returns ------- : float Value of global node entropy. Notes ----- .. [*] In case **data** is an ordinal network returned by :func:`ordpy.ordinal_network`, the parameters of :func:`ordpy.global_node_entropy` are infered from the network. Examples -------- >>> global_node_entropy([1,2,3,4,5,6,7,8,9], dx=2) 0.0 >>> >>> global_node_entropy(ordinal_network([1,2,3,4,5,6,7,8,9], dx=2)) 0.0 >>> >>> global_node_entropy(np.random.uniform(size=100000), dx=3) 1.4988332319747597 >>> >>> global_node_entropy(random_ordinal_network(dx=3)) 1.5 >>> >>> global_node_entropy([[1,2,1,4],[8,3,4,5],[6,7,5,6]], dx=2, dy=2, connections='horizontal') 0.25 >>> >>> global_node_entropy([[1,2,1,4],[8,3,4,5],[6,7,5,6]], dx=2, dy=2, connections='vertical') 0.0 """ if len(data)==3 and type(data[0][0])==np.str_: nodes, links, weights = data else: #assumes 'normalized==True' and 'directed==True'. nodes, links, weights = ordinal_network(data, dx, dy, taux, tauy, True, overlapping, True, connections, tie_precision=tie_precision) links_source = links.transpose()[0] links_target = links.transpose()[1] h_gn = 0 for node in nodes: args = np.argwhere(links_source==node).flatten() renorm_weights = weights[args]/np.sum(weights[args]) args_in = np.argwhere(links_target==node).flatten() p_in = np.sum(weights[args_in]) h_i = -np.sum(renorm_weights*np.log2(renorm_weights)) h_gn += p_in*h_i return h_gn
a14e7419bcd58d41400b888443df726836e6d04a
14,561
def get_cert(certificate): """ Return the data of the certificate :returns: the certificate file contents """ cert_file = "{}/certs/{}".format(snapdata_path, certificate) with open(cert_file) as fp: cert = fp.read() return cert
da2ac96bf16a74de9ac75a46d14f3b95b5f64264
14,562
def model(data, ix_to_char, char_to_ix, num_iterations = 35000, n_a = 50, dino_names = 7, vocab_size = 27): """ Trains the model and generates dinosaur names. Arguments: data -- text corpus ix_to_char -- dictionary that maps the index to a character char_to_ix -- dictionary that maps a character to an index num_iterations -- number of iterations to train the model for n_a -- number of units of the RNN cell dino_names -- number of dinosaur names you want to sample at each iteration. vocab_size -- number of unique characters found in the text (size of the vocabulary) Returns: parameters -- learned parameters """ # Retrieve n_x and n_y from vocab_size n_x, n_y = vocab_size, vocab_size # Initialize parameters parameters = initialize_parameters(n_a, n_x, n_y) # Initialize loss (this is required because we want to smooth our loss) loss = get_initial_loss(vocab_size, dino_names) # Build list of all dinosaur names (training examples). with open("dinos.txt") as f: examples = f.readlines() examples = [x.lower().strip() for x in examples] # Shuffle list of all dinosaur names np.random.seed(0) np.random.shuffle(examples) # Initialize the hidden state of your LSTM a_prev = np.zeros((n_a, 1)) # Optimization loop for j in range(num_iterations): ### START CODE HERE ### # Set the index `idx` (see instructions above) idx = j%len(examples) # Set the input X (see instructions above) single_example = examples[idx] single_example_chars = [c for c in single_example] single_example_ix = [char_to_ix[c] for c in single_example_chars] X = [None]+single_example_ix # Set the labels Y (see instructions above) ix_newline = char_to_ix["\n"] Y = X[1:]+[ix_newline] # Perform one optimization step: Forward-prop -> Backward-prop -> Clip -> Update parameters # Choose a learning rate of 0.01 curr_loss, gradients, a_prev = optimize(X, Y, a_prev, parameters, learning_rate = 0.01) ### END CODE HERE ### # Use a latency trick to keep the loss smooth. It happens here to accelerate the training. loss = smooth(loss, curr_loss) # Every 2000 Iteration, generate "n" characters thanks to sample() to check if the model is learning properly if j % 2000 == 0: print('Iteration: %d, Loss: %f' % (j, loss) + '\n') # The number of dinosaur names to print seed = 0 for name in range(dino_names): # Sample indices and print them sampled_indices = sample(parameters, char_to_ix, seed) print_sample(sampled_indices, ix_to_char) seed += 1 # To get the same result (for grading purposes), increment the seed by one. print('\n') return parameters
b1fb202b2c697cae1473c91597b39914e6197dce
14,563
import random def random_choice(gene): """ Randomly select a object, such as strings, from a list. Gene must have defined `choices` list. Args: gene (Gene): A gene with a set `choices` list. Returns: object: Selected choice. """ if not 'choices' in gene.__dict__: raise KeyError("'choices' not defined in this gene, please include a list values!") return random.choice(gene.choices)
8a01a2039a04262aa4fc076bdd87dbf760f45253
14,564
def get_actor(payload: PayloadJSON, actor_id: int) -> ResourceJSON: """Return an actor by actor_id""" actor = ActorModel.find_by_id(actor_id) if actor is None: abort(404) return jsonify({"success": True, "actor": actor.json()},)
0808cba237e47a45dd095f86d44153f97a947e66
14,565
import string def check_if_punctuations(word: str) -> bool: """Returns ``True`` if ``word`` is just a sequence of punctuations.""" for c in word: if c not in string.punctuation: return False return True
64ba5f9dc69c59490a2ea69e7c2d938151d71b37
14,566
import re def normalize_text(string, remove_stopwords=False, stem_words=False): """ Remove punctuation, parentheses, question marks, etc. """ strip_special_chars = re.compile("[^A-Za-z0-9 ]+") string = string.lower() string = string.replace("<br />", " ") string = string.replace(r"(\().*(\))|([^a-zA-Z'])",' ') string = string.replace('&', 'and') string = string.replace('@', 'at') string = string.replace('0', 'zero') string = string.replace('1', 'one') string = string.replace('2', 'two') string = string.replace('3', 'three') string = string.replace('4', 'four') string = string.replace('5', 'five') string = string.replace('6', 'six') string = string.replace('7', 'seven') string = string.replace('8', 'eight') string = string.replace('9', 'nine') string = string.split() if remove_stopwords: stop_words = stopwords.words('english') string = [w for w in string if w not in stop_words] if stem_words: ps = PorterStemmer() string = [ps.stem(w) for w in string] string = ' '.join(string) return re.sub(strip_special_chars, "", string)
0aff8864f526ffe194c661acc69ccb2cf91a6f24
14,567
def get_new_codes(): """ Return New Codes and Refresh DB""" db = dataset.connect(database_url) new_codes = get_code() table = db['promo'] """ Get New Codes""" new = {} for key, value in new_codes.items(): if table.find_one(promo=key) is None: new[key] = [new_codes[key][0], new_codes[key][1]] else: pass """ Add to DB """ for key in new: table.insert(dict(promo=key, desc=new_codes[key][1], exp=new_codes[key][0])) return new
e3ece2e8b43fa43ac4c8d384dbf55957d8bc62c6
14,568
def process_bulk_add_ip(request, formdict): """ Performs the bulk add of ips by parsing the request data. Batches some data into a cache object for performance by reducing large amounts of single database queries. :param request: Django request. :type request: :class:`django.http.HttpRequest` :param formdict: The form representing the bulk uploaded data. :type formdict: dict :returns: :class:`django.http.HttpResponse` """ ip_names = [] cached_results = {} cleanedRowsData = convert_handsontable_to_rows(request) for rowData in cleanedRowsData: if rowData != None and rowData.get(form_consts.IP.IP_ADDRESS) != None: ip_names.append(rowData.get(form_consts.IP.IP_ADDRESS).lower()) ip_results = IP.objects(ip__in=ip_names) for ip_result in ip_results: cached_results[ip_result.ip] = ip_result cache = {form_consts.IP.CACHED_RESULTS: cached_results, 'cleaned_rows_data': cleanedRowsData} response = parse_bulk_upload(request, parse_row_to_bound_ip_form, add_new_ip_via_bulk, formdict, cache) return response
d38bc7766f232b972637da9c92567ebde91ddf52
14,569
def gen_public_e(lambda_: int) -> int: """ Generates decrecingly smaller sequence of bytes and converts them to integer until one satisfies > lambda Continues with half the ammount of necesary bytes decreasing by one integer until gcd(candidate, lambda) == 1. """ bytes_ = 1028 + 1 candidate = crand(bytes_) while candidate > lambda_: # Finds random amount of bytes candidate = crand(bytes_) bytes_ -= 1 candidate = crand(bytes_ // 2) # Generates new candidate in the middle e = 2**16 + 1 while candidate > e: # Finds candidate that satisfies gcd if gcd(candidate, lambda_) == 1: break candidate -= 1 return candidate
903df12b7af83be24d3ef377e2aa95a00a7df089
14,570
from typing import Union from re import T from typing import Callable def lazy(maybe_callable: Union[T, Callable[[], T]]) -> T: """ Call and return a value if callable else return it. >>> lazy(42) 42 >>> lazy(lambda: 42) 42 """ if callable(maybe_callable): return maybe_callable() return maybe_callable
83522ae39b8ec19e86d5e30b2b0a9131a7c56a35
14,571
from typing import List import subprocess def _git_diff(staged_or_modified: bool, extension: str) -> List[str]: """ Args: extension: the extension of files considered, such as "py" or "ml" staged_or_modified (bool) Whether to consider staged files (True) or modified ones (False) Returns: A list of relevant versioned files that are staged or modified """ git_cmd = ["git", "diff"] if staged_or_modified: git_cmd += ["--cached"] git_cmd += ["--name-only", "--diff-filter=ACMR", "*." + extension] git_diff_result = subprocess.run(git_cmd, stdout=subprocess.PIPE, universal_newlines=True, check=True) # The comprehension filters empty lines return [x for x in git_diff_result.stdout.split("\n") if x]
520b3f59061a74b8df3dce22340a47ff413f3c02
14,572
from random import shuffle def shuffle_list(*ls): """ shuffle multiple list at the same time :param ls: :return: """ l = list(zip(*ls)) shuffle(l) return zip(*l)
ec46e4a8da2c04cf62da2866d2d685fc796887e5
14,573
def cancer_variants(institute_id, case_name): """Show cancer variants overview.""" data = controllers.cancer_variants(store, request.args, institute_id, case_name) return data
185282e9308f7a9f8a0d7faf4c5d3608dee556cc
14,574
def nodes(G): """Returns an iterator over the graph nodes.""" return G.nodes()
3a1a543f1af4d43c79fd0083eb77fedd696547ec
14,575
from typing import Union def cyclePosition(image: np.ndarray, startPosition: position) -> Union[position, bool]: """ :param image: numpy image array :param startPosition: from where to go to Tuple (x,y) :return: newPosition (x,y), or false if new coords would fall out of bounds """ if not imageWrapper.boundsChecker(image, startPosition): return False if startPosition.coords[0] == image.shape[1] - 1: if startPosition.coords[1] < image.shape[0] - 1: return position((0, startPosition.coords[1] + 1)) return False return position((startPosition.coords[0] + 1, startPosition.coords[1]))
a4d3eaf1ddecc884f7391614ae04ff4b10029af3
14,576
import os def get_image(file_name): """retrieves an image from a file and returns it as an np array of pixels""" image_array = [] file_name = os.path.abspath(file_name) img = Image.open(file_name) img = img.convert("RGB") img = img.resize((image_size, image_size)) in_data = np.asarray(img) image_array.append(in_data) return np.array(image_array)
cec87f685ba9b613aee371c57c436b5a9cae43c3
14,577
def context_to_ingestion_params(context): """extract the ingestion task params from job/serving context""" featureset_uri = context.get_param("featureset") featureset = context.get_store_resource(featureset_uri) infer_options = context.get_param("infer_options", InferOptions.Null) source = context.get_param("source") if source: source = get_source_from_dict(source) elif featureset.spec.source.to_dict(): source = get_source_from_dict(featureset.spec.source.to_dict()) overwrite = context.get_param("overwrite", None) targets = context.get_param("targets", None) if not targets: targets = featureset.spec.targets targets = [get_target_driver(target, featureset) for target in targets] return featureset, source, targets, infer_options, overwrite
41925ce484bbc273caf9a8f0f33eba0e7163a7c8
14,578
def bining_for_calibration(pSigma_cal_ordered_, minL_sigma, maxL_sigma, Er_vect_cal_orderedSigma_, bins, coverage_percentile): """ Bin the values of the standard deviations observed during inference and estimate a specified coverage percentile in the absolute error (observed during inference as well). Bins that have less than 50 samples are merged until they surpass this threshold. Parameters ---------- pSigma_cal_ordered_ : numpy array Array of standard deviations ordered in ascending way. minL_sigma : float Minimum value of standard deviations included in pSigma_cal_ordered_ array. maxL_sigma : numpy array Maximum value of standard deviations included in pSigma_cal_ordered_ array. Er_vect_cal_orderedSigma_ : numpy array Array ob absolute value of errors corresponding with the array of ordered standard deviations. bins : int Number of bins to split the range of standard deviations included in pSigma_cal_ordered_ array. coverage_percentile : float Value to use for estimating coverage when evaluating the percentiles of the observed absolute value of errors. Return ---------- mean_sigma : numpy array Array with the mean standard deviations computed per bin. min_sigma : numpy array Array with the minimum standard deviations computed per bin. max_sigma : numpy array Array with the maximum standard deviations computed per bin. error_thresholds : numpy array Thresholds of the errors computed to attain a certain error coverage per bin. err_err : numpy array Error bars in errors (one standard deviation for a binomial distribution estimated by bin vs. the other bins) for the calibration error. """ # thresholds = np.logspace(np.log10(minL_sigma), np.log10(maxL_sigma), num=bins) thresholds = np.linspace(minL_sigma, maxL_sigma, num=bins) classes = np.digitize(pSigma_cal_ordered_, thresholds) Nbin = np.zeros(bins + 1) for i in range(bins + 1): indices = (classes == i) Nbin[i] = indices.sum() # Repair bins new_thresholds_l = [] new_nbins_l = [] sumN = 0 for i in range(Nbin.shape[0]): sumN += Nbin[i] if sumN > 50: if i > (thresholds.shape[0] - 1): new_thresholds_l.append(thresholds[-1]) else: new_thresholds_l.append(thresholds[i]) new_nbins_l.append(sumN) sumN = 0 new_thresholds = np.array(new_thresholds_l) new_nbins = np.array(new_nbins_l) new_thresholds[-1] = thresholds[-1] new_nbins[-1] += sumN # classes = np.digitize(pSigma_cal_ordered_, new_thresholds[:-1]) error_thresholds = -1. * np.ones(new_nbins.shape[0]) mean_sigma = -1. * np.ones(new_nbins.shape[0]) min_sigma = -1. * np.ones(new_nbins.shape[0]) max_sigma = -1. * np.ones(new_nbins.shape[0]) err_err = -1. * np.ones(new_nbins.shape[0]) Ncal = pSigma_cal_ordered_.shape[0] for i in range(error_thresholds.shape[0]): indices = (classes == i) n_aux = indices.sum() assert n_aux == new_nbins[i] print('Points in bin %d: %d' % (i, n_aux)) mean_sigma[i] = np.mean(pSigma_cal_ordered_[indices]) min_sigma[i] = np.min(pSigma_cal_ordered_[indices]) max_sigma[i] = np.max(pSigma_cal_ordered_[indices]) error_thresholds[i] = np.percentile(Er_vect_cal_orderedSigma_[indices], coverage_percentile) err_err[i] = np.sqrt(new_nbins[i] * (Ncal - new_nbins[i])) / Ncal * error_thresholds[i] return mean_sigma, min_sigma, max_sigma, error_thresholds, err_err
cb241f6292726a86a7505e950f32fd1c1fefd19f
14,579
def add_user(request): """注册用户""" info = {} tpl_name = 'user/add_user.html' if request.method == 'POST': # 保存用户提交数据 nickname = request.POST.get('nickname') if User.objects.filter(nickname__exact=nickname).exists(): # "昵称" 存在 info = {'error':'"昵称"存在'} # 显示注册页面 return render(request,tpl_name,info) password = request.POST.get('password') password2 = request.POST.get('password2') if password != password2: # 2次密码不一样 info = {'error': '2次密码不一致'} # 显示注册页面 return render(request,tpl_name,info) age = request.POST.get('age') sex = request.POST.get('sex') f_in = request.FILES.get('icon') user = User(nickname=nickname,password=password,age=age,sex=sex) if f_in: user.icon.save(f_in.name,f_in,save=False) user.set_password(password) user.save() # 在session中,记录用户信息 request.session['uid'] = user.id request.session['nickname'] = user.nickname # 跳转到用户信息 url = '/user/read_user/?uid={}'.format(user.id) return redirect(url) else: # 显示注册页面 return render(request,tpl_name,info)
bbfd3bc8ed47f19f527352f39d5e5b2bdc80d450
14,580
def process_input(input_string, max_depth): """ Clean up the input, convert it to an array and compute the longest array, per feature type. """ # remove the quotes and extra spaces from the input string input_string = input_string.replace('"', '').replace(', ', ',').strip() # convert the string to an array and also track the longest array, so # we know how many levels for the feature type. tmp = [] if input_string: tmp = input_string.split(',') if max_depth < len(tmp): max_depth = len(tmp) # return the array and the depth return tmp, max_depth
ca0fddd0b3bf145c7fc0654212ae43f02799b466
14,581
import random def generate_new_shape() -> tuple[int, list[int], list[int]]: """Generate new shape #0: hot_cell_y = [0,1,2,3] hot_cell_x = [5,5,5,5] X X X X #1: hot_cell_y = [0,0,0,0] hot_cell_x = [3,4,5,6] XXXX #2: hot_cell_y = [0,1,0,1] hot_cell_x = [4,4,5,5] XX XX #3. hot_cell_y = [0,0,1,1] hot_cell_x = [4,5,5,6] XX XX #4. hot_cell_y = [0,1,1,2] hot_cell_x = [4,4,5,5] X XX X #5. hot_cell_y = [0,1,2,2] hot_cell_x = [4,4,4,5] X X XX #6. hot_cell_y = [1,0,1,1] hot_cell_x = [4,4,5,6] X XXX """ shape_id = random.randint(1, 7) logger.info("generating shape id => " + str(shape_id)) shape_color = shape_id if(shape_id == 2): shape_y_pos_list = [0, 0, 0, 0] shape_x_pos_list = [3, 4, 5, 6] elif(shape_id == 3): shape_y_pos_list = [0, 1, 0, 1] shape_x_pos_list = [4, 4, 5, 5] elif(shape_id == 4): shape_y_pos_list = [0, 0, 1, 1] shape_x_pos_list = [4, 5, 5, 6] elif(shape_id == 5): shape_y_pos_list = [0, 1, 1, 2] shape_x_pos_list = [4, 4, 5, 5] elif(shape_id == 6): shape_y_pos_list = [0, 1, 2, 2] shape_x_pos_list = [4, 4, 4, 5] elif(shape_id == 7): shape_y_pos_list = [0, 1, 1, 1] shape_x_pos_list = [4, 4, 5, 6] else: shape_y_pos_list = [0, 1, 2, 3] shape_x_pos_list = [5, 5, 5, 5] return (shape_color, shape_x_pos_list, shape_y_pos_list)
d7c2c710f72ed5d21b7e63779815ee7bfb8421f4
14,582
def get_process_list(process): """Analyse the process description and return the Actinia process chain and the name of the processing result :param process: The process description :return: (output_names, actinia_process_list) """ input_names, process_list = analyse_process_graph(process) output_names = [] # First analyse the data entrie if "data_id" not in process: raise Exception("Process %s requires parameter <data_id>" % PROCESS_NAME) output_names.append(process["data_id"]) pc = create_process_chain_entry(input_name=process["data_id"]) process_list.append(pc) # Then add the input to the output for input_name in input_names: # Create the output name based on the input name and method output_name = input_name output_names.append(output_name) return output_names, process_list
f1e6689c50e00117379107fc840a09f4638c3912
14,583
def createAES(key, IV, implList=None): """Create a new AES object. :type key: str :param key: A 16, 24, or 32 byte string. :type IV: str :param IV: A 16 byte string :rtype: tlslite.utils.AES :returns: An AES object. """ if implList is None: implList = ["openssl", "pycrypto", "python"] for impl in implList: if impl == "openssl" and cryptomath.m2cryptoLoaded: return openssl_aes.new(key, 2, IV) elif impl == "pycrypto" and cryptomath.pycryptoLoaded: return pycrypto_aes.new(key, 2, IV) elif impl == "python": return python_aes.new(key, 2, IV) raise NotImplementedError()
c41a5d028028383630b0977522a9617334c94d03
14,584
from typing import OrderedDict import binascii import json import sys def command_info(opts): """Display general information from a .zs file's header. Usage: zs info [--metadata-only] [--] <zs_file> zs info --help Arguments: <zs_file> Path or URL pointing to a .zs file. An argument beginning with the four characters "http" will be treated as a URL. Options: -m, --metadata-only Output only the file's metadata, not any general information about it. Output will be valid JSON. """ with open_zs(opts, parallelism=0) as z: if opts["--metadata-only"]: info = z.metadata else: info = OrderedDict() info["root_index_offset"] = z.root_index_offset info["root_index_length"] = z.root_index_length info["total_file_length"] = z.total_file_length info["codec"] = z.codec info["data_sha256"] = (binascii.hexlify(z.data_sha256) .decode("ascii")) info["metadata"] = z.metadata info["statistics"] = OrderedDict() info["statistics"]["root_index_level"] = z.root_index_level json.dump(info, sys.stdout, indent=4) sys.stdout.write("\n") return 0
8917e69c78a04ff70df181de8851b9688a498988
14,585
import os def load_triple(cdict, label2words, extend=True): """ Loading triples of color modifiers Parameters ---------- cdict : dict Color dictionary maps a string to list of rgb tuples. label2words : dict Dictionary mapping color labels to color names. Returns ------- dict: Triples can be formed by colors in the cdict dictionary. """ bypass_quantifier = ["almost","cobalt"] file_comp = os.path.join(BASE_DIR, "comparatives.txt") quan_comp = os.path.join(BASE_DIR, "quantifiers.txt") to_compara = dict(line.strip().split(":") for line in open(file_comp, encoding="utf-8")) to_more_quanti = dict(line.strip().split(":") for line in open(quan_comp, encoding="utf-8")) triples = [] for label in cdict: words = label2words[label].split() if len(words) > 1: quantifier, base = words[0], "".join(words[1:]) if quantifier == "very": base = "".join(words[2:]) quantifier = words[1] if base in cdict: if words[1] in to_compara: triples.append((base, ("more", to_compara[quantifier]), tuple(label2words[base].split()), label)) else: if base in cdict: if quantifier in to_compara: # uni-gram('lighter',) triples.append((base, (to_compara[quantifier],), tuple(label2words[base].split()), label)) elif quantifier in to_more_quanti: # bigram('more','bluish') triples.append((base, ("more", to_more_quanti[quantifier]), tuple(label2words[base].split()), label)) else: if extend: # this adds more power, but not increase AUC if quantifier not in bypass_quantifier: triples.append((base, ("more", quantifier), tuple(label2words[base].split()), label)) return triples
457a2f11b74485736e8f530ba3dc159ed3096078
14,586
def update_status_issue(issue, status_id, notes): """Request to change the status of a problem in a redmine project. 'issue': A hash of the issue is bound to a redmine project. 'status_id': Id status used by redmine the project. 'notes': Comments about the update. Return value: 0 - on success non zero - HTTP protocol errors are valid responses. """ values = '''{ "issue": { "status_id": "%s", "notes": "%s" } }''' % (status_id, notes) req = Request( '%s/issues/%s.json' % (_service_host_project(), issue), data=values.encode(), method='PUT') req.add_header('Content-Type', 'application/json') req.add_header('X-Redmine-API-Key', _service_access_key()) try: with urlopen(req) as context: pass return 0 if context.code == 200 else context.code except HTTPError as err: print('The server couldn\'t fulfill the request.') print('Error code: ', err.code) except URLError as err: print('We failed to reach a server.') print('Reason: ', err.reason)
6c0118f514083d228ac1d27271d297b3e593be52
14,587
from typing import Optional def _get_avgiver_epost(root: ET.Element, ns: dict) -> Optional[str]: """ Sought: the email of the submitter Can be found in a child element (<mets:note>) of an <mets:agent> with ROLE="OTHER", OTHERROLE="SUBMITTER", TYPE="INDIVIDUAL" """ try: agent = [ agent for agent in _get_agent_elements(root, ns) if ( agent.get("ROLE") == "OTHER" and agent.get("OTHERROLE") == "SUBMITTER" and agent.get("TYPE") == "INDIVIDUAL" ) ].pop() notes = agent.findall("mets:note", namespaces=ns) email = [ note.text for note in notes if "@" in note.text ].pop() return email except IndexError: return None
9be1f53fcf9799f3a559cd12b3bfa056aaac11a7
14,588
def xavier_init(fan_in, fan_out, constant=1): """ Xavier initialization of network weights\ """ low = -constant * np.sqrt(6.0 / (fan_in + fan_out)) high = constant * np.sqrt(6.0 / (fan_in + fan_out)) return tf.random_uniform((fan_in, fan_out), minval=low, maxval=high, dtype=tf.float32, seed=np.random.randint(0, 1e9))
35b6f7b75eb44f1828d82c6743ec4751db4ff234
14,589
def strToMat3(dbstr): """ convert a string like e00, e01, e02, ... into Mat3 :param str: :return: panda Mat4 """ exx = dbstr.split(',') exxdecimal = map(float, exx) assert(len(exxdecimal) is 16) return Mat3(exxdecimal[0], exxdecimal[1], exxdecimal[2], exxdecimal[4], exxdecimal[5], exxdecimal[6], exxdecimal[8], exxdecimal[9], exxdecimal[10])
8db33dc5e2fab613cd6cba00021486fe722c8d32
14,590
def map2(func, *matrix): """ Maps a function onto the elements of a matrix Also accepts multiple matrices. Thus matrix addition is map2(add, matrix1, matrix2) """ matrix2 = [] for i in xrange(len(matrix[0])): row2 = [] matrix2.append(row2) for j in xrange(len(matrix[0][i])): args = [x[i][j] for x in matrix] row2.append(func(* args)) return matrix2
9af6f311c80e70789ba6d623776fc2f80edbd905
14,591
def register_libtype(cls): """Registry of library types we may come across when parsing XML. This allows us to define a few helper functions to dynamically convery the XML into objects. See buildItem() below for an example. """ LIBRARY_TYPES[cls.TYPE] = cls return cls
bb94e9f73ec04be834fa6be7de0cebf7c10a57ec
14,592
def construct_tablepath(fmdict, prefix=''): """ Construct a suitable pathname for a CASA table made from fmdict, starting with prefix. prefix can contain a /. If prefix is not given, it will be set to "ephem_JPL-Horizons_%s" % fmdict['NAME'] """ if not prefix: prefix = "ephem_JPL-Horizons_%s" % fmdict['NAME'] return prefix + "_%.0f-%.0f%s%s.tab" % (fmdict['earliest']['m0']['value'], fmdict['latest']['m0']['value'], fmdict['latest']['m0']['unit'], fmdict['latest']['refer'])
95041aab91ac9994ef2068d5e05f6cd63969d94e
14,593
def _grad_mulAux(kern,x,y,yerr,original_kernel): """ __grad_mulAux() its necesary when we are dealing with multiple terms of sums and multiplications, example: ES*ESS + ES*ESS*WN + RQ*ES*WN and not having everything breaking apart Parameters kern = kernel in use x = range of values of the independent variable (usually time) y = range of values of te dependent variable (the measurments) yerr = error in the measurments original_kernel = original kernel (original sum) being used Returns See _grad_mul(kernel,x,y,yerr) for more info """ original_kernel = original_kernel cov_matrix = build_matrix(original_kernel,x,yerr) listof__kernels = [kern.__dict__["k2"]] #to put each kernel separately kernel_k1 = kern.__dict__["k1"] while len(kernel_k1.__dict__) == 2: listof__kernels.insert(0,kernel_k1.__dict__["k2"]) kernel_k1=kernel_k1.__dict__["k1"] listof__kernels.insert(0,kernel_k1) #each kernel is now separated kernelaux1 = []; kernelaux2 = [] for i, e in enumerate(listof__kernels): kernelaux1.append(listof__kernels[i]) kernelaux2.append(_kernel_deriv(listof__kernels[i])) grad_result = [] kernelaux11 = kernelaux1; kernelaux22 = kernelaux2 ii = 0 while ii<len(listof__kernels): kernelaux11 = kernelaux1[:ii] + kernelaux1[ii+1 :] _kernels = _np.prod(_np.array(kernelaux11)) for ij, e in enumerate(kernelaux22[ii]): result = _grad_lp(kernelaux2[ii][ij]*_kernels,x,y,yerr,cov_matrix) grad_result.insert(0,result) kernelaux11 = kernelaux1;kernelaux22=kernelaux2 ii = ii+1 grad_result = grad_result[::-1] return grad_result
ce140d8a73a8304d0601077b5ed01f93cb17deab
14,594
def get_unbiased_p_hat(number_candidates, c1, c2, p): """Get the p_hat to unbias miracle. Args: number_candidates: The number of candidates to be sampled. c1: The factor that the conditional density of z given x is proportional to if the inner product between x and z is more than gamma. c2: The factor that the conditional density of z given x is proportional to if the inner product between x and z is less than gamma. p: The probability with which privunit samples an unit vector from the shaded spherical cap associated with input (see original privunit paper). Returns: p_hat: The probability with which unbiased miracle will sample an unit vector from the shaded spherical cap associated with input. """ # Compute the fraction of candidates that lie inside the cap. beta = np.array(range(number_candidates + 1)) / number_candidates pi_in = 1 / number_candidates * (c1 / (beta * c1 + (1 - beta) * c2)) p_hat = np.sum( stats.binom.pmf(range(number_candidates + 1), number_candidates, p / c1) * range(number_candidates + 1) * pi_in) return p_hat
5d45696557835f2bc655b601e015bb08356fe2dd
14,595
def prox_gradf(xy, step): """Gradient step""" return xy-step*grad_f(xy)
7700850b9bfb5c5be5f0a63a678df93991673d81
14,596
import numpy import math def CBND(x, y, rho): """ A function for computing bivariate normal probabilities. :: Alan Genz Department of Mathematics Washington State University Pullman, WA 99164-3113 Email : [email protected] This function is based on the method described by :: Drezner, Z and G.O. Wesolowsky, (1990), On the computation of the bivariate normal integral, Journal of Statist. Comput. Simul. 35, pp. 101-107, with major modifications for double precision, and for ``|R|`` close to 1. This code was originally transelated into VBA by Graeme West """ W = numpy.zeros((11,4)) XX = numpy.zeros((11,4)) W[1][1] = 0.17132449237917 XX[1][1] = -0.932469514203152 W[2][1] = 0.360761573048138 XX[2][1] = -0.661209386466265 W[3][1] = 0.46791393457269 XX[3][1] = -0.238619186083197 W[1][2] = 4.71753363865118E-02 XX[1][2] = -0.981560634246719 W[2][2] = 0.106939325995318 XX[2][2] = -0.904117256370475 W[3][2] = 0.160078328543346 XX[3][2] = -0.769902674194305 W[4][2] = 0.203167426723066 XX[4][2] = -0.587317954286617 W[5][2] = 0.233492536538355 XX[5][2] = -0.36783149899818 W[6][2] = 0.249147045813403 XX[6][2] = -0.125233408511469 W[1][3] = 1.76140071391521E-02 XX[1][3] = -0.993128599185095 W[2][3] = 4.06014298003869E-02 XX[2][3] = -0.963971927277914 W[3][3] = 6.26720483341091E-02 XX[3][3] = -0.912234428251326 W[4][3] = 8.32767415767048E-02 XX[4][3] = -0.839116971822219 W[5][3] = 0.10193011981724 XX[5][3] = -0.746331906460151 W[6][3] = 0.118194531961518 XX[6][3] = -0.636053680726515 W[7][3] = 0.131688638449177 XX[7][3] = -0.510867001950827 W[8][3] = 0.142096109318382 XX[8][3] = -0.37370608871542 W[9][3] = 0.149172986472604 XX[9][3] = -0.227785851141645 W[10][3] = 0.152753387130726 XX[10][3] = -7.65265211334973E-02 if numpy.abs(rho) < 0.3: NG = 1 LG = 3 elif numpy.abs(rho) < 0.75: NG = 2 LG = 6 else: NG = 3 LG = 10 h = -x k = -y hk = h * k BVN = 0 if numpy.abs(rho) < 0.925: if numpy.abs(rho) > 0: hs = (h * h + k * k) / 2. asr = math.asin(rho) for i in range(1,LG+1): for ISs in [-1,1]: sn = math.sin(asr * (ISs * XX[i][NG] + 1) / 2) BVN = BVN + W[i][NG] * numpy.exp((sn * hk - hs) / (1 - sn * sn)) BVN = BVN * asr / (4. * numpy.pi) BVN = BVN + CND(-h) * CND(-k) else: if rho < 0: k = -k hk = -hk if numpy.abs(rho) < 1.: Ass = (1. - rho) * (1. + rho) A = numpy.sqrt(Ass) bs = (h - k) ** 2 c = (4. - hk) / 8. d = (12. - hk) / 16. asr = -(bs / Ass + hk) / 2. if asr > -100: BVN = A * numpy.exp(asr) * (1 - c * (bs - Ass) * (1 - d * bs / 5.) / 3. + c * d * Ass * Ass / 5.) if -hk < 100: b = numpy.sqrt(bs) BVN = BVN - numpy.exp(-hk / 2.) * numpy.sqrt(2. * numpy.pi) * CND(-b / A) * b * (1. - c * bs * (1. - d * bs / 5.) / 3.) A = A / 2 for i in range(1,LG+1): for ISs in [-1,1]: xs = (A * (ISs * XX[i][NG] + 1)) ** 2 rs = numpy.sqrt(1 - xs) asr = -(bs / xs + hk) / 2 if asr > -100: BVN = BVN + A * W(i, NG) * numpy.exp(asr) * (numpy.exp(-hk * (1 - rs) / (2 * (1 + rs))) / rs - (1 + c * xs * (1 + d * xs))) BVN = -BVN / (2. * numpy.pi) if rho > 0.: BVN = BVN + CND(-max(h, k)) else: BVN = -BVN if k > h: BVN = BVN + CND(k) - CND(h) CBND = BVN return CBND
3b418e50acec31482df7137f484d396b1673d476
14,597
def prune(root: Node, copy: bool = True) -> Node: """ Prune (or simplify) the given SPN to a minimal and equivalent SPN. :param root: The root of the SPN. :param copy: Whether to copy the SPN before pruning it. :return: A minimal and equivalent SPN. :raises ValueError: If the SPN structure is not a directed acyclic graph (DAG). :raises ValueError: If an unknown node type is found. """ # Copy the SPN before proceeding, if specified if copy: root = deepcopy(root) # Check the SPN check_spn(root, labeled=True, smooth=True, decomposable=True) nodes = topological_order(root) if nodes is None: raise ValueError("SPN structure is not a directed acyclic graph (DAG)") # Build a dictionary that maps each id of a node to the corresponding node object nodes_map = dict(map(lambda n: (n.id, n), nodes)) # Proceed by reversed topological order for node in reversed(nodes): # Skip leaves if isinstance(node, Leaf): continue # Retrieve the children nodes from the mapping children_nodes = list(map(lambda n: nodes_map[n.id], node.children)) if len(children_nodes) == 1: nodes_map[node.id] = children_nodes[0] elif isinstance(node, Product): # Subsequent product nodes, concatenate the children of them children = list() for child in children_nodes: if not isinstance(child, Product): children.append(child) continue product_children = map(lambda n: nodes_map[n.id], child.children) children.extend(product_children) nodes_map[node.id].children = children elif isinstance(node, Sum): # Subsequent sum nodes, concatenate the children of them and adjust the weights accordingly # Important! This implementation take care also of directed acyclic graphs (DAGs) children_weights = defaultdict(float) for i, child in enumerate(children_nodes): if not isinstance(child, Sum): children_weights[child] += node.weights[i] continue sum_children = map(lambda n: nodes_map[n.id], child.children) for j, sum_child in enumerate(sum_children): children_weights[sum_child] += node.weights[i] * child.weights[j] children, weights = zip(*children_weights.items()) nodes_map[node.id].weights = np.array(weights, dtype=node.weights.dtype) nodes_map[node.id].children = children else: raise ValueError("Unknown node type called {}".format(node.__class__.__name__)) return assign_ids(nodes_map[root.id])
e242156bca1d8a3be8ca673a6629dadf967ccb5b
14,598
def GetRevisionAndLogs(slave_location, build_num): """Get a revision number and log locations. Args: slave_location: A URL or a path to the build slave data. build_num: A build number. Returns: A pair of the revision number and a list of strings that contain locations of logs. (False, []) in case of error. """ if slave_location.startswith('http://'): location = slave_location + '/builds/' + str(build_num) else: location = os.path.join(slave_location, str(build_num)) revision = False logs = [] fp = None try: if location.startswith('http://'): fp = urllib2.urlopen(location) contents = fp.read() revisions = re.findall(r'<td class="left">got_revision</td>\s+' '<td>(\d+)</td>\s+<td>Source</td>', contents) if revisions: revision = revisions[0] logs = [location + link + '/text' for link in re.findall(r'(/steps/endure[^/]+/logs/stdio)', contents)] else: fp = open(location, 'rb') build = cPickle.load(fp) properties = build.getProperties() if properties.has_key('got_revision'): revision = build.getProperty('got_revision') candidates = os.listdir(slave_location) logs = [os.path.join(slave_location, filename) for filename in candidates if re.match(r'%d-log-endure[^/]+-stdio' % build_num, filename)] except urllib2.URLError, e: logging.exception('Error reading build URL "%s": %s', location, str(e)) return False, [] except (IOError, OSError), e: logging.exception('Error reading build file "%s": %s', location, str(e)) return False, [] finally: if fp: fp.close() return revision, logs
fb8b25a0f33194af288d411b2218edf904ab9f14
14,599