content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def multi_class_bss(predictions: np.ndarray, targets: np.ndarray) -> float: """ Brier Skill Score: bss = 1 - bs / bs_{ref} bs_{ref} will be computed for a model that makes a predictions according to the prevalance of each class in dataset :param predictions: probability score. Expected Shape [N, C] :param targets: target class (int) per sample. Expected Shape [N] """ # BS bs = multi_class_bs(predictions, targets) # no skill BS no_skill_prediction = [(targets == target_cls).sum() / targets.shape[0] for target_cls in range(predictions.shape[-1])] no_skill_predictions = np.tile(np.array(no_skill_prediction), (predictions.shape[0], 1)) bs_ref = multi_class_bs(no_skill_predictions, targets) return 1.0 - bs / bs_ref
d932649e2eb1a1b91aa2cf3882b0f4b74531dea7
7,565
def get_arxiv_id_or_ascl_id(result_record): """ :param result_record: :return: """ identifiers = result_record.get("identifier", []) for identifier in identifiers: if "arXiv:" in identifier: return identifier.replace("arXiv:", "") if "ascl:" in identifier: return identifier.replace("ascl:", "") return ""
4270fe7ad8f2136ad5d53272acb02aaf60970ea3
7,566
from typing import Mapping from typing import Tuple import torch def get_query_claim_similarities( sim: Mapping[Tuple[str, int], float], softmax: bool, ) -> Mapping[Tuple[str, int], float]: """ Preprocess query claim similarities. :param sim: A mapping from (premise_id, claim_id) to the logits of the similarity model, shape: (2,). :param softmax: Whether to apply softmax or use raw logits. :return: A mapping from (premise_id, claim_id) to scalar similarity value. """ # ensure consistent order pairs = sorted(sim.keys()) # create tensor,shape: (num_pairs, 2) sim = torch.stack( tensors=[ torch.as_tensor(data=sim[pair], dtype=torch.float32) for pair in pairs ], dim=0, ) # apply softmax is requested if softmax: sim = sim.softmax(dim=-1) # take probability of "similar" class sim = sim[:, 1] # one row corresponds to one pair similarity return dict(zip(pairs, sim))
6f1eb9495c7b7243f544564315ca3ae09f31da92
7,567
import re def regexp(options: dict): """ Apply a regexp method to the dataset :param options: contains two values: - find: which string should be find - replace: string that will replace the find string """ def apply_regexp(dataset, tag): """ Apply a regexp to the dataset """ element = dataset.get(tag) if element is not None: element.value = re.sub( options["find"], options["replace"], str(element.value) ) return apply_regexp
20cfaf4f9286ad582dc9f4fea4184cf1c7d0de34
7,568
def do_one_subject(sub_curr, params, verbose=False): """ launch sessions processing for sub_curr parameters: ----------- sub_curr: dict contains subject base directory contains subject index params: dict parameters for layout, data and analysis """ sub_idx, sub_dir = sub_curr['sub_idx'], sub_curr['sub_dir'] nb_sess = params['data']['nb_sess'] dlayo = params['layout'] sess_idx = range(1, nb_sess+1) sess_dirs = [osp.join(sub_dir, (dlayo['dir']['sess+']).format(idx)) for idx in sess_idx] sesss_info = {} sess_curr = {} for sess_idx, sess_dir in enumerate(sess_dirs, 1): # start idx at 1 sess_curr['sess_idx'] = sess_idx sess_curr['sess_dir'] = sess_dir sess_str = (dlayo['dir']['sess+']).format(sess_idx) if verbose: print('\n' + '---'*11 + "\n" + sess_str) sesss_info[sess_str] = do_one_sess(sess_curr, sub_curr, params, verbose=verbose) return sesss_info
68ba212eeccde0197c587a0b929198b2a042328d
7,569
def comp_skin_effect(self, freq, T_op=20, T_ref=20, type_skin_effect=1): """Compute the skin effect factor for the conductor Parameters ---------- self : Conductor an Conductor object freq: float electrical frequency [Hz] T_op: float Conductor operational temperature [degC] T_ref: float Conductor reference temperature [degC] type_skin_effect: int Model type for skin effect calculation: - 1: analytical model (default) Returns ---------- Xkr_skinS : float skin effect coeff for resistance at freq Xke_skinS : float skin effect coeff for inductance at freq """ # initialization Xkr_skinS = 1 Xke_skinS = 1 if type_skin_effect == 1: # analytical calculations based on Pyrhonen sigmar = self.cond_mat.elec.get_conductivity(T_op=T_op, T_ref=T_ref) mu0 = 4 * pi * 1e-7 ws = 2 * pi * freq Slot = self.parent.parent.slot # nsw = len(ws) # case of preformed rectangular wire CondType11 if hasattr(self, "Wwire") and hasattr(self, "Hwire"): Hwire = self.Hwire Wwire = self.Wwire Nwppc_rad = self.Nwppc_rad Nwppc_tan = self.Nwppc_tan # case of round wire CondType12 - approximation based on rectangular wire formula elif hasattr(self, "Wwire") and not hasattr(self, "Hwire"): Hwire = self.Wwire Wwire = self.Wwire Nwppc_tan = self.Nwppc Nwppc_rad = self.Nwppc # case of bar conductor elif hasattr(self, "Hbar") and hasattr(self, "Wbar"): Hwire = self.Hbar Wwire = self.Wbar Nwppc_tan = 1 Nwppc_rad = 1 Alpha_wind = Slot.comp_angle_active_eq() R_wind = Slot.comp_radius_mid_active() W2s = 2 * R_wind * sin(Alpha_wind) # average resistance factor over the slot ksi = Hwire * sqrt((1 / 2) * ws * mu0 * sigmar * Nwppc_tan * Wwire / W2s) phi_skin = self.comp_phi_skin(ksi) psi_skin = self.comp_psi_skin(ksi) phip_skin = self.comp_phip_skin(ksi) psip_skin = self.comp_psip_skin(ksi) Xkr_skinS = phi_skin + ((Nwppc_rad ** 2 - 1) / 3) * psi_skin Xke_skinS = (1 / Nwppc_rad ** 2) * phip_skin + ( 1 - 1 / Nwppc_rad ** 2 ) * psip_skin return Xkr_skinS, Xke_skinS
b71f4385d600713f3fff559e0836d9c532b79b73
7,570
import glob def find_paths(initial_path, extension): """ From a path, return all the files of a given extension inside. :param initial_path: the initial directory of search :param extension: the extension of the files to be searched :return: list of paths inside the initial path """ paths = glob.glob(initial_path+r'/**/*.' + extension, recursive=True) return paths
0220127050b765feaf423c195d020d65ece8d22e
7,572
def ridge_line(df_act, t_range='day', n=1000): """ https://plotly.com/python/violin/ for one day plot the activity distribution over the day - sample uniform from each interval """ df = activities_dist(df_act.copy(), t_range, n) colors = n_colors('rgb(5, 200, 200)', 'rgb(200, 10, 10)', len(df.columns), colortype='rgb') data = df.values.T fig = go.Figure() i = 0 for data_line, color in zip(data, colors): fig.add_trace(go.Violin(x=data_line, line_color=color, name=df.columns[i])) i += 1 fig.update_traces(orientation='h', side='positive', width=3, points=False) fig.update_layout(xaxis_showgrid=False, xaxis_zeroline=False) return fig
7fa2e4946a8de5df6e5c7697236c939703133409
7,573
def op(name, value, display_name=None, description=None, collections=None): """Create a TensorFlow summary op to record data associated with a particular the given guest. Arguments: name: A name for this summary operation. guest: A rank-0 string `Tensor`. display_name: If set, will be used as the display name in TensorBoard. Defaults to `name`. description: A longform readable description of the summary data. Markdown is supported. collections: Which TensorFlow graph collections to add the summary op to. Defaults to `['summaries']`. Can usually be ignored. """ # The `name` argument is used to generate the summary op node name. # That node name will also involve the TensorFlow name scope. # By having the display_name default to the name argument, we make # the TensorBoard display clearer. if display_name is None: display_name = name # We could pass additional metadata other than the PLUGIN_NAME within the # plugin data by using the content parameter, but we don't need any metadata # for this simple example. summary_metadata = tf.SummaryMetadata( display_name=display_name, summary_description=description, plugin_data=tf.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME)) # Return a summary op that is properly configured. return tf.summary.tensor_summary( name, value, summary_metadata=summary_metadata, collections=collections)
f2a6b65299c417e460f6ca2e41fc82e061b29f30
7,574
def select_only_top_n_common_types(dataset: pd.DataFrame, n: int = 10) -> pd.DataFrame: """ First find the most popular 'n' types. Remove any uncommon types from the dataset :param dataset: The complete dataset :param n: The number of top types to select :return: Return the dataframe once the top 'n' types has been removed """ len_before_filtering = len(dataset) print(f'*** Selecting only the most common "{n}" types from the dataset. Current length is {len_before_filtering}') top_types = dataset['type'].value_counts()[:n].to_dict() dataset = dataset[dataset['type'].apply(lambda x: x in top_types)] len_after_filtering = len(dataset) print( f'Removed {len_before_filtering - len_after_filtering} elements, the current length of the dataset is {len_after_filtering}\n') return dataset
b4d95682d1abbf062b4730213cefc6da71a5c605
7,575
def __one_both_closed(x, y, c = None, l = None): """convert coordinates to zero-based, both strand, open/closed coordinates. Parameters are from, to, is_positive_strand, length of contig. """ return x - 1, y
ce4dfca3cc347de925f4c26460e486fb38a2d5e5
7,577
def get_corners(img, sigma=1, alpha=0.05, thresh=1000): """ Returns the detected corners as a list of tuples """ ret = [] i_x = diff_x(img) i_y = diff_y(img) i_xx = ndimage.gaussian_filter(i_x ** 2, sigma=sigma) i_yy = ndimage.gaussian_filter(i_y ** 2, sigma=sigma) i_xy = ndimage.gaussian_filter(i_x * i_y, sigma=sigma) height, width = img.shape[:2] det = i_xx * i_yy - i_xy ** 2 trace = i_xx + i_yy r_val = det - alpha * trace ** 2 for i in range(2, height - 3): for j in range(2, width - 3): if r_val[i, j] > thresh and r_val[i, j] == np.amax(r_val[i - 1:i + 2, j - 1:j + 2]): ret.append((i, j)) return ret
d581df8daff7f20e2f15b5eb5af9ea686c0520e4
7,578
import numpy def add_param_starts(this_starts, params_req, global_conf, run_period_len, start_values_min, start_values_max): """Process the param starts information taken from the generator, and add it to the array being constructed. Inputs: this_starts: a tuple with (starts_min, starts_max), the output from a generator's get_param_starts() function. params_req: integer, the number of parameters this generator requires global_conf: a dict including 'min_param_val' and 'max_param_val' run_period_len: the number of periods to run for start_values_min: the array to append the min start values to start_values_max: the array to append the max start values to Outputs: start_values_min, start_values_max, updated versions (not necessarily in-place) """ (starts_min, starts_max) = this_starts starts_min = numpy.array(starts_min) starts_max = numpy.array(starts_max) if starts_min.size == 0: start_values_min = numpy.hstack((start_values_min, ( (numpy.ones((run_period_len, params_req)) * global_conf['min_param_val']).tolist()))) else: start_values_min = numpy.hstack((start_values_min, starts_min)) if starts_max.size == 0: start_values_max = numpy.hstack((start_values_max, ( (numpy.ones((run_period_len, params_req)) * global_conf['max_param_val']).tolist()))) else: start_values_max = numpy.hstack((start_values_max, starts_max)) return start_values_min, start_values_max
b50f538b9d5096fe6061b4b990ccb9ad6ba05ef6
7,579
def pareto(data, name=None, exp=None, minval=None, maxval=None, **kwargs): """the pareto distribution: val ~ val**exp | minval <= val < maxval """ assert (exp is not None) and (minval is not None) and (maxval is not None), \ 'must supply exp, minval, and maxval!' ### done to make command-line arguments easier in add-prior-weights if name is not None: data = data[name] ans = exp*np.log(data) ans[np.logical_not((minval<=val)*(val<maxval))] = -np.infty return ans
8607bf6783ba5e8be95d2b4319a42e8723b71da0
7,580
def codegen_reload_data(): """Parameters to codegen used to generate the fn_ansible_tower package""" reload_params = {"package": u"fn_ansible_tower", "incident_fields": [], "action_fields": [u"ansible_tower_arguments", u"ansible_tower_credential", u"ansible_tower_hosts", u"ansible_tower_inventory", u"ansible_tower_job_name", u"ansible_tower_module", u"ansible_tower_module_arguments", u"ansible_tower_run_tags", u"ansible_tower_skip_tags", u"job_status", u"last_updated", u"tower_project", u"tower_save_as", u"tower_template_pattern"], "function_params": [u"incident_id", u"tower_arguments", u"tower_credential", u"tower_hosts", u"tower_inventory", u"tower_job_id", u"tower_job_status", u"tower_last_updated", u"tower_module", u"tower_project", u"tower_run_tags", u"tower_save_as", u"tower_skip_tags", u"tower_template_id", u"tower_template_name", u"tower_template_pattern"], "datatables": [u"ansible_tower_job_templates", u"ansible_tower_launched_jobs"], "message_destinations": [u"fn_ansible_tower"], "functions": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command"], "phases": [], "automatic_tasks": [], "scripts": [], "workflows": [u"ansible_tower_get_ad_hoc_command_results", u"ansible_tower_get_job_results", u"ansible_tower_launch_job_template", u"ansible_tower_list_job_templates", u"ansible_tower_list_jobs", u"ansible_tower_run_an_ad_hoc_command", u"ansible_tower_run_job__artifact", u"ansible_tower_run_job__incident"], "actions": [u"Ansible Tower Get Ad Hoc Command Results", u"Ansible Tower Get Job Results", u"Ansible Tower List Job Templates", u"Ansible Tower List Jobs", u"Ansible Tower Run an Ad Hoc Command", u"Ansible Tower Run Job", u"Ansible Tower Run Job - Artifact", u"Ansible Tower Run Job - Incident"], "incident_artifact_types": [] } return reload_params
49dee7d9a1dc297ff31f51e4583740c353831cd9
7,581
def get_text_item(text): """Converts a text into a tokenized text item :param text: :return: """ if config['data']['lowercased']: text = text.lower() question_tokens = [Token(t) for t in word_tokenize(text)] question_sentence = Sentence(' '.join([t.text for t in question_tokens]), question_tokens) return TextItem(question_sentence.text, [question_sentence])
79fdec4cdcb419751d49a564eff7c3b624c80a22
7,583
def Ltotal(scatter: bool): """ Graph for computing 'Ltotal'. """ graph = beamline(scatter=scatter) if not scatter: return graph del graph['two_theta'] return graph
d38b7947b4c6397157e1bfec33b275a814dc1ec0
7,584
def is_valid_page_to_edit(prev_pg_to_edit, pg_to_edit): """Check if the page is valid to edit or not Args: prev_pg_to_edit (obj): page to edit object of previous page pg_to_edit (obj): page to edit object of current page Returns: boolean: true if valid else false """ try: prev_pg_ref_end = int(prev_pg_to_edit.ref_end_page_no) cur_pg_ref_start = int(pg_to_edit.ref_start_page_no) cur_pg_ref_end = int(pg_to_edit.ref_end_page_no) except Exception: return False if prev_pg_to_edit == pg_to_edit: if cur_pg_ref_end >= cur_pg_ref_start: return True else: return False elif prev_pg_to_edit.vol != pg_to_edit.vol and cur_pg_ref_start <= cur_pg_ref_end: return True elif cur_pg_ref_start <= cur_pg_ref_end and prev_pg_ref_end <= cur_pg_ref_start: return True else: return False
ce594804f105b749062f79d63fc3021296631c1b
7,586
def get_diffs(backups, backup_id, partner_backups, bound=10): """ Given a list `backups`, a `backup_id`, and `bound` Compute the a dict containing diffs/stats of surronding the `backup_id`: diff_dict = { "stats": diff_stats_list, "files": files_list, "partners": partner_files_list, "prev_backup_id": prev_backup_id, "backup_id": backup_id, "next_backup_id": next_backup_id } return {} if `backup_id` not found """ backup_dict = _get_backup_range(backups, backup_id, bound) if not backup_dict: return {} backups = backup_dict["backups"] backup_id = backup_dict["backup_id"] # relevant backup_id might be different prev_backup_id = backup_dict["prev_backup_id"] next_backup_id = backup_dict["next_backup_id"] get_recent_backup = _recent_backup_finder(partner_backups) assign_files = backups[0].assignment.files files_list, diff_stats_list, partner_files_list = [], [], [] for i, backup in enumerate(backups): if not i: # first unique backup => no diff continue prev = backups[i - 1].files() curr = backup.files() files = highlight.diff_files(prev, curr, "short") files_list.append(files) backup_stats = { 'submitter': backup.submitter.email, 'backup_id' : backup.hashid, 'bid': backup.id, 'partner_backup_id': None, 'partner_bid': None, 'question': None, 'time': None, 'passed': None, 'failed': None } analytics = backup and backup.analytics() grading = backup and backup.grading() partner_backup_files = None if analytics: backup_stats['time'] = analytics.get('time') partner_backup = get_recent_backup(analytics) if partner_backup: backup_stats["partner_backup_id"] = partner_backup.hashid backup_stats["partner_bid"] = partner_backup.id partner_backup_files = highlight.diff_files(partner_backup.files(), curr, "short") if grading: questions = list(grading.keys()) question = None passed, failed = 0, 0 for question in questions: passed += grading.get(question).get('passed') passed += grading.get(question).get('failed') if len(questions) > 1: question = questions backup_stats['question'] = question backup_stats['passed'] = passed backup_stats['failed'] = failed else: unlock = backup.unlocking() backup_stats['question'] = "[Unlocking] " + unlock.split(">")[0] diff_stats_list.append(backup_stats) partner_files_list.append(partner_backup_files) diff_dict = { "stats": diff_stats_list, "files": files_list, "partners": partner_files_list, "prev_backup_id": prev_backup_id, "backup_id": backup_id, "next_backup_id": next_backup_id } return diff_dict
fd896dc22270090eb88b41b3ab3fae2872d2ad06
7,587
from typing import List def admits_voc_list(cid: CID) -> List[str]: """ Return list of nodes in cid with positive value of control. """ return [x for x in list(cid.nodes) if admits_voc(cid, x)]
a2db0dbb062a205ebb75f5db93ed14b11b25ccc1
7,588
def contour(data2d, levels, container=None, **kwargs): """HIDE""" if container is None: _checkContainer() container = current.container current.object = kaplot.objects.Contour(container, data2d, levels, **kwargs) return current.object
a9f56a8bcd54cbc38687682f78e684c03315f85b
7,589
def FilterSuboptimal(old_predictions, new_predictions, removed_predictions, min_relative_coverage=0.0, min_relative_score=0.0, min_relative_pide=0.0): """remove suboptimal alignments. """ best_predictions = {} for p in old_predictions: if not best_predictions.has_key(p.mQueryToken): best_predictions[p.mQueryToken] = MyBestEntry() x = best_predictions[p.mQueryToken] x.mQueryCoverage = max(x.mQueryCoverage, p.mQueryCoverage) x.score = max(x.score, p.score) x.mPercentIdentity = max(x.mPercentIdentity, p.mPercentIdentity) nnew = 0 for p in old_predictions: x = best_predictions[p.mQueryToken] if p.mQueryCoverage / x.mQueryCoverage < min_relative_coverage: if param_loglevel >= 2: print "# PRUNING: reason: coverage below best: removing %s" % str(p) if param_benchmarks: CheckBenchmark(p) removed_predictions.append(p) continue if p.score / x.score < min_relative_score: if param_loglevel >= 2: print "# PRUNING: reason: score below best: removing %s" % str(p) if param_benchmarks: CheckBenchmark(p) removed_predictions.append(p) continue if p.mPercentIdentity / x.mPercentIdentity < min_relative_pide: if param_loglevel >= 2: print "# PRUNING: reason: percent identity below best: removing %s" % str(p) if param_benchmarks: CheckBenchmark(p) removed_predictions.append(p) continue new_predictions.append(p) nnew += 1 return nnew
570399a0310f836261d5d65455cfee54e697a23c
7,590
def process_pair(librispeech_md_file, librispeech_dir, wham_md_file, wham_dir, n_src, pair): """Process a pair of sources to mix.""" utt_pair, noise = pair # Indices of the utterances and the noise # Read the utterance files and get some metadata source_info, source_list = read_utterances( librispeech_md_file, utt_pair, librispeech_dir) # Add the noise source_info, source_list = add_noise( wham_md_file, wham_dir, noise, source_list, source_info) # Compute initial loudness, randomize loudness and normalize sources loudness, _, source_list_norm = set_loudness(source_list) # Randomly place the speech clips in the mixture source_info, source_list_pad = randomly_pad(source_list_norm, source_info, n_src) # Do the mixture mixture = mix(source_list_pad) # Check the mixture for clipping and renormalize if necessary # (we pass source_list_norm here because we don't want the zero padding # to influence the loudness) renormalize_loudness, did_clip = check_for_clipping(mixture, source_list_norm) # Compute gain gain_list = compute_gain(loudness, renormalize_loudness) return source_info, gain_list, did_clip
3dea4b1dc93b0bc54ad199e09db7612e6dad18d5
7,591
def getMultiDriverSDKs(driven, sourceDriverFilter=None): """get the sdk nodes that are added through a blendweighted node Args: driven (string): name of the driven node sourceDriverFilter (list, pynode): Driver transforms to filter by, if the connected SDK is not driven by this node it will not be returned. Returns: list: of sdk nodes """ sdkDrivers = [] for sdkUtility in SDK_UTILITY_TYPE: blend_NodePair = pm.listConnections(driven, source=True, type=sdkUtility, exactType=True, plugs=True, connections=True, sourceFirst=True, scn=True) or [] if not blend_NodePair: continue for pairs in blend_NodePair: sdkPairs = getConnectedSDKs(pairs[0].nodeName(), sourceDriverFilter=sourceDriverFilter) for sPair in sdkPairs: sdkDrivers.append([sPair[0], pairs[1]]) return sdkDrivers
4f7fe2d959619d3eaca40ba6366a5d4d62e047ff
7,592
def resnet_model_fn(features, labels, mode, model_class, resnet_size, weight_decay, learning_rate_fn, momentum, data_format, version, loss_filter_fn=None, multi_gpu=False): """Shared functionality for different resnet model_fns. Initializes the ResnetModel representing the model layers and uses that model to build the necessary EstimatorSpecs for the `mode` in question. For training, this means building losses, the optimizer, and the train op that get passed into the EstimatorSpec. For evaluation and prediction, the EstimatorSpec is returned without a train op, but with the necessary parameters for the given mode. Args: features: tensor representing input images labels: tensor representing class labels for all input images mode: current estimator mode; should be one of `tf.estimator.ModeKeys.TRAIN`, `EVALUATE`, `PREDICT` model_class: a class representing a TensorFlow model that has a __call__ function. We assume here that this is a subclass of ResnetModel. resnet_size: A single integer for the size of the ResNet model. weight_decay: weight decay loss rate used to regularize learned variables. learning_rate_fn: function that returns the current learning rate given the current global_step momentum: momentum term used for optimization data_format: Input format ('channels_last', 'channels_first', or None). If set to None, the format is dependent on whether a GPU is available. version: Integer representing which version of the ResNet network to use. See README for details. Valid values: [1, 2] loss_filter_fn: function that takes a string variable name and returns True if the var should be included in loss calculation, and False otherwise. If None, batch_normalization variables will be excluded from the loss. multi_gpu: If True, wrap the optimizer in a TowerOptimizer suitable for data-parallel distribution across multiple GPUs. Returns: EstimatorSpec parameterized according to the input params and the current mode. """ # Generate a summary node for the images tf.summary.image('images', features, max_outputs=6) model = model_class(resnet_size, data_format, version=version) logits = model(features, mode == tf.estimator.ModeKeys.TRAIN) predictions = { 'classes': tf.argmax(logits, axis=1), 'probabilities': tf.nn.softmax(logits, name='softmax_tensor') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) # Calculate loss, which includes softmax cross entropy and L2 regularization. cross_entropy = tf.losses.softmax_cross_entropy( logits=logits, onehot_labels=labels) # Create a tensor named cross_entropy for logging purposes. tf.identity(cross_entropy, name='cross_entropy') tf.summary.scalar('cross_entropy', cross_entropy) # If no loss_filter_fn is passed, assume we want the default behavior, # which is that batch_normalization variables are excluded from loss. if not loss_filter_fn: def loss_filter_fn(name): return 'batch_normalization' not in name # Add weight decay to the loss. loss = cross_entropy + weight_decay * tf.add_n( [tf.nn.l2_loss(v) for v in tf.trainable_variables() if loss_filter_fn(v.name)]) # Create a tensor named cross_entropy for logging purposes. tf.identity(loss, name='train_loss') tf.summary.scalar('train_loss', loss) if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() learning_rate = learning_rate_fn(global_step) # Create a tensor named learning_rate for logging purposes tf.identity(learning_rate, name='learning_rate') tf.summary.scalar('learning_rate', learning_rate) optimizer = tf.train.MomentumOptimizer( learning_rate=learning_rate, momentum=momentum) # If we are running multi-GPU, we need to wrap the optimizer. if multi_gpu: optimizer = tf.contrib.estimator.TowerOptimizer(optimizer) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) train_op = tf.group(optimizer.minimize(loss, global_step), update_ops) else: train_op = None accuracy = tf.metrics.accuracy( tf.argmax(labels, axis=1), predictions['classes']) metrics = {'acc': accuracy} # Create a tensor named train_accuracy for logging purposes tf.identity(accuracy[1], name='train_accuracy') tf.summary.scalar('train_acc', accuracy[1]) return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics)
4adc5fc3ca461d4eb4a051861e8c82d2c1aab5dd
7,593
def dataframe_from_stomate(filepattern,largefile=True,multifile=True, dgvmadj=False,spamask=None, veget_npindex=np.s_[:],areaind=np.s_[:], out_timestep='annual',version=1, replace_nan=False): """ Parameters: ----------- filepattern: could be a single filename, or a file pattern out_timestep: the timestep of output file, used to provide information to properly scale the variable values, could be 'annual' or 'daily'. when 'annual', flux_scale_factor = 365 will be used. dgvmadj: use DGVM adjustment, in this case tBIOMASS rathern than TOTAL_M is used. veget_npindex: passed to the function of get_pftsum: 1. could be used to restrict for example the PFT weighted average only among natural PFTs by setting veget_npindex=np.s_[:,0:11,:,:]. It will be used to slice VEGET_MAX variable. 2. could also be used to slice only for some subgrid of the whole grid, eg., veget_npindex=np.s_[...,140:300,140:290]. Notes: ------ 1. This function could handle automatically the case of a single-point file or a regional file. When a single-point file (pattern) is given, PFT-weighted carbon density will be used rather than the total C over the spatial area. """ gnc_sto = gnc.Ncdata(filepattern,largefile=largefile,multifile=multifile, replace_nan=replace_nan) if version == 1: # list all pools and fluxes list_flux_pft = ['GPP','NPP','HET_RESP','CO2_FIRE','CO2FLUX','CO2_TAKEN'] list_flux_pftsum = ['CONVFLUX','CFLUX_PROD10','CFLUX_PROD100','HARVEST_ABOVE'] list_flux = list_flux_pft+list_flux_pftsum list_pool = ['TOTAL_M','TOTAL_SOIL_CARB'] list_all = list_flux_pft+list_flux_pftsum+list_pool nlist_var = [list_flux_pft, list_flux_pftsum, list_pool] for varlist in nlist_var: gnc_sto.retrieve_variables(varlist) gnc_sto.get_pftsum(print_info=False,veget_npindex=veget_npindex) gnc_sto.remove_variables(varlist) #handle adjustment of different variables if dgvmadj: gnc_sto.retrieve_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE']) gnc_sto.pftsum.__dict__['NPP'] = gnc_sto.d1.tGPP - gnc_sto.d1.tRESP_MAINT - gnc_sto.d1.tRESP_GROWTH gnc_sto.pftsum.__dict__['HET_RESP'] = gnc_sto.d1.tRESP_HETERO gnc_sto.pftsum.__dict__['CO2_FIRE'] = gnc_sto.d1.tCO2_FIRE gnc_sto.remove_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE']) gnc_sto.retrieve_variables(['tBIOMASS','tLITTER','tSOILC']) gnc_sto.pftsum.__dict__['TOTAL_M'] = gnc_sto.d1.tBIOMASS gnc_sto.pftsum.__dict__['TOTAL_SOIL_CARB'] = gnc_sto.d1.tLITTER + gnc_sto.d1.tSOILC gnc_sto.remove_variables(['tBIOMASS','tLITTER','tSOILC']) # we have to treat product pool independently try: gnc_sto.retrieve_variables(['PROD10','PROD100']) gnc_sto.pftsum.PROD10 = gnc_sto.d1.PROD10.sum(axis=1) gnc_sto.pftsum.PROD100 = gnc_sto.d1.PROD100.sum(axis=1) gnc_sto.remove_variables(['PROD10','PROD100']) except KeyError: gnc_sto.pftsum.PROD10 = gnc_sto.pftsum.NPP * 0. gnc_sto.pftsum.PROD100 = gnc_sto.pftsum.NPP * 0. # get the spatial operation and pass them into dataframe if not gnc_sto._SinglePoint: gnc_sto.get_spa() dft = pa.DataFrame(gnc_sto.spasum.__dict__) else: dft = pa.DataFrame(gnc_sto.pftsum.__dict__) # treat the output time step if out_timestep == 'annual': flux_scale_factor = 365. dft['CO2FLUX'] = dft['CO2FLUX']/30. #CO2FLUX is monthly output elif out_timestep == 'daily': flux_scale_factor = 1 dft[list_flux] = dft[list_flux]*flux_scale_factor # get total carbon pool dft['PROD'] = dft['PROD10'] + dft['PROD100'] dft['CarbonPool'] = dft['TOTAL_M'] + dft['TOTAL_SOIL_CARB'] + dft['PROD'] # calcate NBP dft['NBP_npp'] = dft['NPP']+dft['CO2_TAKEN']-dft['CONVFLUX']-dft['CFLUX_PROD10']-dft['CFLUX_PROD100']-dft['CO2_FIRE']-dft['HARVEST_ABOVE']-dft['HET_RESP'] dft['NBP_co2flux'] = -1*(dft['CO2FLUX']+dft['HARVEST_ABOVE']+dft['CONVFLUX']+dft['CFLUX_PROD10']+dft['CFLUX_PROD100']) elif version == 2: # list all pools and fluxes list_flux_pft = ['GPP','NPP','HET_RESP','CO2_FIRE','CO2FLUX','CO2_TAKEN','METHANE','RANIMAL'] list_flux_pftsum = ['CONVFLUX_LCC','CONVFLUX_HAR','CFLUX_PROD10_LCC','CFLUX_PROD10_HAR','CFLUX_PROD100_LCC','CFLUX_PROD100_HAR','HARVEST_ABOVE'] list_flux = list_flux_pft+list_flux_pftsum list_pool = ['TOTAL_M','TOTAL_SOIL_CARB','LEAF_M','SAP_M_AB','SAP_M_BE', 'HEART_M_AB','HEART_M_BE','ROOT_M','FRUIT_M','RESERVE_M', 'LITTER_STR_AB','LITTER_STR_BE','LITTER_MET_AB','LITTER_MET_BE'] list_all = list_flux_pft+list_flux_pftsum+list_pool nlist_var = [list_flux_pft, list_flux_pftsum, list_pool] for varlist in nlist_var: gnc_sto.retrieve_variables(varlist,mask=spamask) gnc_sto.get_pftsum(print_info=False,veget_npindex=veget_npindex) gnc_sto.remove_variables(varlist) #handle adjustment of different variables if dgvmadj: if veget_npindex != np.s_[:]: raise ValueError("dgvmadj is not handled when veget_npindex does not include all") else: gnc_sto.retrieve_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE'],mask=spamask) gnc_sto.pftsum.__dict__['NPP'] = gnc_sto.d1.tGPP - gnc_sto.d1.tRESP_MAINT - gnc_sto.d1.tRESP_GROWTH gnc_sto.pftsum.__dict__['HET_RESP'] = gnc_sto.d1.tRESP_HETERO gnc_sto.pftsum.__dict__['CO2_FIRE'] = gnc_sto.d1.tCO2_FIRE gnc_sto.remove_variables(['tGPP','tRESP_GROWTH','tRESP_MAINT','tRESP_HETERO','tCO2_FIRE']) gnc_sto.retrieve_variables(['tBIOMASS','tLITTER','tSOILC'],mask=spamask) gnc_sto.pftsum.__dict__['TOTAL_M'] = gnc_sto.d1.tBIOMASS gnc_sto.pftsum.__dict__['TOTAL_SOIL_CARB'] = gnc_sto.d1.tLITTER + gnc_sto.d1.tSOILC gnc_sto.remove_variables(['tBIOMASS','tLITTER','tSOILC']) # we have to treat product pool independently list_prod = ['PROD10_LCC','PROD10_HAR','PROD100_LCC','PROD100_HAR'] gnc_sto.retrieve_variables(list_prod,mask=spamask) for var in list_prod: gnc_sto.pftsum.__dict__[var] = gnc_sto.d1.__dict__[var][veget_npindex].sum(axis=1) print gnc_sto.d1.__dict__['PROD10_LCC'][veget_npindex].shape print gnc_sto.d1.__dict__['PROD10_LCC'].shape print gnc_sto.pftsum.__dict__['PROD10_LCC'].shape gnc_sto.remove_variables(list_prod) # get the spatial operation and pass them into dataframe if not gnc_sto._SinglePoint: gnc_sto.get_spa(areaind=areaind) dft = pa.DataFrame(gnc_sto.spasum.__dict__) else: dft = pa.DataFrame(gnc_sto.pftsum.__dict__) # 2016-03-30: the shape of gnc_sto.d1.ContAreas could be # (nlat,nlon) when there is no "CONTFRAC" or "NONBIOFRAC" in # the history file, but could be (ntime,nlat,nlon) when they're # present. # # [++temporary++] treat CO2_TAKEN # # In case of shifting cultivation is simulated, the CO2_TAKEN # # could be big at the last day. However the veget_max is kept # # the same as the old one over the year, so we have to use # # last-year CO2_TAKEN multiply with the next-year veget_max. # gnc_sto.retrieve_variables(['CO2_TAKEN']) # co2taken_pftsum = np.ma.sum(gnc_sto.d1.CO2_TAKEN[:-1] * gnc_sto.d1.VEGET_MAX[1:],axis=1) # if not gnc_sto._SinglePoint: # dt = np.sum(co2taken_pftsum*gnc_sto.d1.ContAreas,axis=(1,2)) # else: # dt = co2taken_pftsum # dft['CO2_TAKEN'].iloc[:-1] = dt # treat the output time step if out_timestep == 'annual': flux_scale_factor = 365. dft['CO2FLUX'] = dft['CO2FLUX']/30. #CO2FLUX is monthly output elif out_timestep == 'daily': flux_scale_factor = 1 dft[list_flux] = dft[list_flux]*flux_scale_factor # get total carbon pool dft['PROD'] = dft['PROD10_LCC'] + dft['PROD10_HAR'] + dft['PROD100_LCC'] + dft['PROD100_HAR'] dft['CarbonPool'] = dft['TOTAL_M'] + dft['TOTAL_SOIL_CARB'] + dft['PROD'] dft['LITTER_AB'] = dft['LITTER_STR_AB'] + dft['LITTER_MET_AB'] dft['LITTER_BE'] = dft['LITTER_MET_BE'] + dft['LITTER_STR_BE'] dft['LITTER'] = dft['LITTER_BE'] + dft['LITTER_AB'] dft['BIOMASS_AB'] = dft.SAP_M_AB + dft.HEART_M_AB + dft.LEAF_M + dft.FRUIT_M + dft.RESERVE_M dft['BIOMASS_BE'] = dft.SAP_M_BE + dft.HEART_M_BE + dft.ROOT_M # treat GM dft['RANIMAL'] = dft['RANIMAL']*1000 dft['METHANE'] = dft['METHANE']*1000 dft['GMsource'] = dft['RANIMAL'] + dft['METHANE'] # treat LUC dft['CONVFLUX'] = dft['CONVFLUX_LCC'] + dft['CONVFLUX_HAR'] dft['CFLUX_PROD10'] = dft['CFLUX_PROD10_LCC'] + dft['CFLUX_PROD10_HAR'] dft['CFLUX_PROD100'] = dft['CFLUX_PROD100_LCC'] + dft['CFLUX_PROD100_HAR'] dft['LUCsource'] = dft['CONVFLUX'] + dft['CFLUX_PROD10'] + dft['CFLUX_PROD100'] # calcate NBP dft['NBP_npp'] = dft['NPP']+dft['CO2_TAKEN']-dft['CONVFLUX']-dft['CFLUX_PROD10']-dft['CFLUX_PROD100']-dft['CO2_FIRE'] \ -dft['HARVEST_ABOVE']-dft['HET_RESP']-dft['RANIMAL']-dft['METHANE'] dft['NBP_co2flux'] = -1*(dft['CO2FLUX']+dft['HARVEST_ABOVE']+dft['CONVFLUX']+dft['CFLUX_PROD10']+dft['CFLUX_PROD100']) # litter dft['LITTER'] = dft[['LITTER_STR_AB','LITTER_STR_BE','LITTER_MET_AB','LITTER_MET_BE']].sum(axis=1) dft['LITTER_AB'] = dft[['LITTER_STR_AB','LITTER_MET_AB']].sum(axis=1) dft['LITTER_BE'] = dft[['LITTER_STR_BE','LITTER_MET_BE']].sum(axis=1) dft['SOILC'] = dft['TOTAL_SOIL_CARB'] - dft['LITTER'] else: raise ValueError("Unknown version!") gnc_sto.close() return dft
ba448d020ea8b41b75bd91d4b48ffca2d527b230
7,594
from applications.models import Application # circular import def random_application(request, event, prev_application): """ Get a new random application for a particular event, that hasn't been scored by the request user. """ return Application.objects.filter( form__event=event ).exclude( pk=prev_application.id ).exclude( scores__user=request.user ).order_by('?').first()
1d1b781b61328af67d7cc75c0fe9ec6f404b1b82
7,595
def flutter_velocity(pressures, speeds_of_sound, root_chord, tip_chord, semi_span, thickness, shear_modulus=2.62e9): """Calculate flutter velocities for a given fin design. Fin dimensions are given via the root_chord, tip_chord, semi_span and thickness arguments. All dimensions are in centimetres. Use shear_modulus to specify the shear modulus of the fin material in Pascals. >>> import numpy as np >>> zs = np.linspace(0, 30000, 100) >>> ps, _, ss = model_atmosphere(zs) >>> vels = flutter_velocity(ps, ss, 20, 10, 10, 0.2) >>> assert vels.shape == ps.shape Args: pressures (np.array): 1-d array of atmospheric pressures in Pascals speeds_of_sound (np.array): 1-d array of speeds of sound in m/s root_chord: fin root chord (cm) tip_chord: fin tip chord (cm) semi_span: fin semi-span (cm) thickness: fin thickness (cm) shear_modulus: fin material shear modulus (Pascals) Returns: A 1-d array containing corresponding flutter velocities in m/s. """ # Ensure input is 1d array of floating point values pressures = np.atleast_1d(pressures).astype(np.float) # Compute derived dimensions from fin specification. S = 0.5 * (root_chord + tip_chord) * semi_span # Area Ra = (semi_span * semi_span) / S # Aspect ratio k = tip_chord / root_chord # Taper ratio Vf = np.zeros_like(pressures) A = 1.337 * Ra**3 * pressures * (k+1) B = 2 * (Ra + 2) * (thickness / root_chord)**3 Vf = speeds_of_sound * np.sqrt(shear_modulus * B / A) return Vf
6a6fcbc2fffe541ef85f824f282924bb38199f46
7,596
import re def replace_within(begin_re, end_re, source, data): """Replace text in source between two delimeters with specified data.""" pattern = r'(?s)(' + begin_re + r')(?:.*?)(' + end_re + r')' source = re.sub(pattern, r'\1@@REPL@@\2' , source) if '@@REPL@@' in source: source = source.replace('@@REPL@@', data) else: log.log('') log.log('ERROR: Cannot match {!r} and {!r}'.format(begin_re, end_re)) log.log('') return source
23320d11a8bf0d6387f4687555d1fa472ad4c4d0
7,597
import itertools def random_outputs_for_tier(rng, input_amount, scale, offset, max_count, allow_extra_change=False): """ Make up to `max_number` random output values, chosen using exponential distribution function. All parameters should be positive `int`s. None can be returned for expected types of failures, which will often occur when the input_amount is too small or too large, since it becomes uncommon to find a random assortment of values that satisfy the desired constraints. On success, this returns a list of length 1 to max_count, of non-negative integer values that sum up to exactly input_amount. The returned values will always exactly sum up to input_amount. This is done by renormalizing them, which means the actual effective `scale` will vary depending on random conditions. If `allow_extra_change` is passed (this is abnormal!) then this may return max_count+1 outputs; the last output will be the leftover change if all max_counts outputs were exhausted. """ if input_amount < offset: return None lambd = 1./scale remaining = input_amount values = [] # list of fractional random values without offset for _ in range(max_count+1): val = rng.expovariate(lambd) # A ceil here makes sure rounding errors won't sometimes put us over the top. # Provided that scale is much larger than 1, the impact is negligible. remaining -= ceil(val) + offset if remaining < 0: break values.append(val) else: if allow_extra_change: result = [(round(v) + offset) for v in values[:-1]] result.append(input_amount - sum(result)) return result # Fail because we would need too many outputs # (most likely, scale was too small) return None assert len(values) <= max_count if not values: # Our first try put us over the limit, so we have nothing to work with. # (most likely, scale was too large) return None desired_random_sum = input_amount - len(values) * offset assert desired_random_sum >= 0 # Now we need to rescale and round the values so they fill up the desired. # input amount exactly. We perform rounding in cumulative space so that the # sum is exact, and the rounding is distributed fairly. cumsum = list(itertools.accumulate(values)) rescale = desired_random_sum / cumsum[-1] normed_cumsum = [round(rescale * v) for v in cumsum] assert normed_cumsum[-1] == desired_random_sum differences = ((a - b) for a,b in zip(normed_cumsum, itertools.chain((0,),normed_cumsum))) result = [(offset + d) for d in differences] assert sum(result) == input_amount return result
eb3b7d813740e9aa9457fe62c4e0aaf86fad7bce
7,599
def create_connection(host, username, password): """ create a database connection to the SQLite database specified by db_file :return: Connection object or None """ try: conn = mysql.connect(host=host, # your host, usually db-guenette_neutrinos.rc.fas.harvard.edu user=username, # your username passwd=password, # your password db='guenette_neutrinos') # name of the data base # autocommit=False) # Prevent automatic commits return conn except mysql.Error as e: print(e) return None
09c540115ce788d1f5fd09d789327ac6951cb9a2
7,600
def Dadjust(profile_ref, profile_sim, diffsys, ph, pp=True, deltaD=None, r=0.02): """ Adjust diffusion coefficient fitting function by comparing simulated profile against reference profile. The purpose is to let simulated diffusion profile be similar to reference profile. Parameters ---------- profile_ref : DiffProfile Reference diffusion profile profile_sim : DiffProfile Simulated diffusion profile diffsys : DiffSystem Diffusion system ph : int Phase # to be adjusted, 0 <= ph <= diffsys.Np-1 Xp : 1d-array Reference composition to adjust their corresponding diffusivities. If provided, spline function Dfunc must be determined by [Xp, Dp] alone, where Dp = exp(Dfunc(Xp)). pp : bool, optional Point Mode (True) or Phase Mode (False). Point Mode adjusts each Dp at Xp by itself. In Phase Mode, all Dp are adjusted by the same rate, i.e. the diffusivity curve shape won't change. deltaD: float, optional Only useful at Phase Mode. deltaD gives the rate to change diffusion coefficients DC. DC = DC * 10^deltaD r : float, optional Only useful at Phase Mode, default = 0.02, 0 < r < 1. r gives the range to calculate the concentration gradient around X, [X-r, X+r]. """ dref, Xref, Ifref = profile_ref.dis, profile_ref.X, profile_ref.If dsim, Xsim, Ifsim = profile_sim.dis, profile_sim.X, profile_sim.If if ph >= diffsys.Np: raise ValueError('Incorrect phase #, 0 <= ph <= %i' % diffsys.Np-1) if pp and 'Xspl' not in dir(diffsys): raise ValueError('diffsys must have Xspl properties in per-point mode') Dfunc, Xr, Np = diffsys.Dfunc[ph], diffsys.Xr[ph], diffsys.Np rate = 1 # If there is phase consumed, increase adjustment rate if len(Ifref) != len(Ifsim): print('Phase consumed found, increase adjustment rate') rate = 2 if Xr[1] > Xr[0]: idref = np.where((Xref >= Xr[0]) & (Xref <= Xr[1]))[0] idsim = np.where((Xsim >= Xr[0]) & (Xsim <= Xr[1]))[0] else: idref = np.where((Xref <= Xr[0]) & (Xref >= Xr[1]))[0] idsim = np.where((Xsim <= Xr[0]) & (Xsim >= Xr[1]))[0] if 'Xspl' in dir(diffsys): Xp = diffsys.Xspl[ph] else: Xp = np.linspace(Xr[0], Xr[1], 30) Dp = np.exp(splev(Xp, Dfunc)) # If this is consumed phase, increase DC by 2 or 10^deltaD if len(idsim) == 0: Dp = np.exp(splev(Xp, Dfunc)) if deltaD is None: return Dfunc_spl(Xp, Dp*2) else: return Dfunc_spl(Xp, Dp*10**deltaD) dref, Xref = dref[idref], Xref[idref] dsim, Xsim = dsim[idsim], Xsim[idsim] # Per phase adjustment if not pp: if deltaD is not None: return Dfunc_spl(Xp, Dp*10**deltaD) # Calculate deltaD by phase width # When it comes to first or last phase, data closed to end limits are not considered fdis_ref = disfunc(dref, Xref) fdis_sim = disfunc(dsim, Xsim) X1, X2 = Xr[0], Xr[1] if ph == 0: X1 = Xr[0]*0.9 + Xr[1]*0.1 if ph == Np-1: X2 = Xr[0]*0.1 + Xr[1]*0.9 ref = splev([X1, X2], fdis_ref) sim = splev([X1, X2], fdis_sim) wref = ref[1]-ref[0] wsim = sim[1]-sim[0] Dp *= np.sqrt(wref/wsim) return Dfunc_spl(Xp, Dp) # Point Mode adjustment for i in range(len(Xp)): # X1, X2 is the lower, upper bound to collect profile data # X1, X2 cannot exceed phase bound Xr if Xr[0] < Xr[1]: X1, X2 = max(Xp[i]-r, Xr[0]), min(Xp[i]+r, Xr[1]) else: X1, X2 = max(Xp[i]-r, Xr[1]), min(Xp[i]+r, Xr[0]) # Calculate the gradient inside [X1, X2] by linear fitting fdis_ref = disfunc(dref, Xref) fdis_sim = disfunc(dsim, Xsim) Xf = np.linspace(X1, X2, 10) pref = np.polyfit(splev(Xf, fdis_ref), Xf, 1)[0] psim = np.polyfit(splev(Xf, fdis_sim), Xf, 1)[0] # Adjust DC by gradient difference Dp[i] *= (psim/pref)**rate return Dfunc_spl(Xp, Dp)
d8b13e8d785a31219197936a9bd7b5d275f23351
7,601
def setup_test(): """setup test""" def create_test_tables(db): """create test tables""" db(""" create table if not exists person ( id integer PRIMARY KEY AUTOINCREMENT, name varchar(100), age integer, kids integer, salary decimal(10,2), birthdate date ) """) def delete_test_tables(db): """drop test tables""" db('drop table if exists person') db = zoom.database.database('sqlite3', ':memory:') delete_test_tables(db) create_test_tables(db) return db
539ca396ba3098e79ec5064ccde7245d91106ef2
7,602
def mapdict_values(function, dic): """ Apply a function to a dictionary values, creating a new dictionary with the same keys and new values created by applying the function to the old ones. :param function: A function that takes the dictionary value as argument :param dic: A dictionary :return: A new dicitonary with same keys and values changed Example: >>> dic1 = { 'a' : 10, 'b' : 20, 'c' : 30 } >>> mapdict_values(lambda x: x*2, dic1) {'a': 20, 'b': 40, 'c': 60} >>> dic1 {'a': 10, 'b': 20, 'c': 30} """ return dict(map(lambda x: (x[0], function(x[1])), dic.items()))
03abbe7d7ec32d70ad0d4729913037f2199e977c
7,604
from typing import Optional async def callback( request: Request, code: str = None, error: Optional[str] = Query(None), db: AsyncSession = Depends(get_db), ): """ Complete the OAuth2 login flow """ client = get_discord_client() with start_span(op="oauth"): with start_span(op="oauth.authorization_token"): # Get the authorization token if code: token = await client.authorize_access_token(request) else: return RedirectResponse(URL("/login").include_query_params(error=error)) with start_span(op="oauth.user_info"): # Get the user's info client.token = token user_info = await client.userinfo(token=token) user_id = int(user_info.get("id")) with start_span(op="permissions"): with start_span(op="permissions.access"): # Get the user's role ids roles = list(map(lambda r: r.id, await get_user_roles(user_id))) # Determine if the user has panel access if (await CONFIG.panel_access_role()) not in roles: return RedirectResponse("/login?error=unauthorized") with start_span(op="permissions.admin"): # Get all the user's guilds async with ClientSession() as session: async with session.get( "https://discord.com/api/v8/users/@me/guilds", headers={"Authorization": f"Bearer {token['access_token']}"}, ) as response: guilds = await response.json() # Determine if the user has admin access is_owner = any( map( lambda g: g.get("id") == str(SETTINGS.discord_guild_id) and g.get("owner"), guilds, ) ) is_admin = (await CONFIG.management_role()) in roles or is_owner # Save the user's info to the database user = User( id=user_id, username=user_info["username"], avatar=user_info["picture"], is_admin=is_admin, ) # Insert and ignore failures try: db.add(user) await db.commit() except IntegrityError: pass # Store the info in the session request.session["logged_in"] = True request.session["user"] = dict(user_info) request.session["is_admin"] = is_admin request.session["expiration"] = dict(token).get("expires_at") return RedirectResponse("/login/complete")
f7d76c385360f6d2113cd7fb470344c1e7c96027
7,605
def align_centroids(config, ref): """Align centroids""" diff_centroids = np.round(ref.mean(axis=0) - config.mean(axis=0)) # diff_centroids = np.round(diff_centroids).astype(int) config = config + diff_centroids return config
cd579a911cb4ae59aa274836de156620305e592a
7,606
def _make_headers_df(headers_response): """ Parses the headers portion of the watson response and creates the header dataframe. :param headers_response: the ``row_header`` or ``column_header`` array as returned from the Watson response, :return: the completed header dataframe """ headers_df = util.make_dataframe(headers_response) headers_df = headers_df[ ["text", "column_index_begin", "column_index_end", "row_index_begin", "row_index_end", "cell_id", "text_normalized"]] return headers_df
621d46da0de2056ac98747a51f2ac2cbfdd52e5e
7,607
def getMemInfo() -> CmdOutput: """Returns the RAM size in bytes. Returns: CmdOutput: The output of the command, as a `CmdOutput` instance containing `stdout` and `stderr` as attributes. """ return runCommand(exe_args=ExeArgs("wmic", ["memorychip", "get", "capacity"]))
c57312d83182349e0847d0eb49606c401a3a0d27
7,608
def svn_swig_py_make_editor(*args): """svn_swig_py_make_editor(PyObject * py_editor, apr_pool_t pool)""" return _delta.svn_swig_py_make_editor(*args)
2041342a1bef3ea0addb004e1bd4539c58445c66
7,609
def register_confirm(request, activation_key): """finish confirmation and active the account Args: request: the http request activation_key: the activation key Returns: Http redirect to successful page """ user_safety = get_object_or_404(UserSafety, activation_key=activation_key) if user_safety.user.is_confirmed: return HttpResponseRedirect('/home/project') if user_safety.key_expires < timezone.now(): return render_to_response('accounts/confirmExpires.html') user = user_safety.user user.is_confirmed = True user.save() return render_to_response('accounts/confirmed.html')
c677f246ff3088d58912bc136f1d2461f58ba10b
7,610
def get_best_z_index(classifications): """Get optimal z index based on quality classifications Ties are broken using the index nearest to the center of the sequence of all possible z indexes """ nz = len(classifications) best_score = np.min(classifications) top_z = np.argwhere(np.array(classifications) == best_score).ravel() return top_z[np.argmin(np.abs(top_z - (nz // 2)))]
90b10dda47c071a3989a9de87061694270e67d69
7,611
import glob def mean_z_available(): """docstring for mean_z_available""" if glob.glob("annual_mean_z.nc"): return True return False
d53f8dc6fe540e8f74fd00760d1c810e510e53b8
7,612
import time def wait_for_url(monitor_url, status_code=None, timeout=None): """Blocks until the URL is availale""" if not timeout: timeout = URL_TIMEOUT end_time = time.time() + timeout while (end_time - time.time()) > 0: if is_url(monitor_url, status_code): return True time.sleep(1) LOG.error('URL %s could not be reached after %s seconds', monitor_url, timeout) return False
7d7ca1fd51d4415c58ab3928bd163401fb548b9a
7,613
import requests import io import tarfile def sources_from_arxiv(eprint): """ Download sources on arXiv for a given preprint. :param eprint: The arXiv id (e.g. ``1401.2910`` or ``1401.2910v1``). :returns: A ``TarFile`` object of the sources of the arXiv preprint. """ r = requests.get("http://arxiv.org/e-print/%s" % (eprint,)) file_object = io.BytesIO(r.content) return tarfile.open(fileobj=file_object)
b26c46009b23c5a107d6303b567ab97492f91ad9
7,614
def render(): """ This method renders the HTML webside including the isOnline Status and the last 30 database entries. :return: """ online = isonline() return render_template("index.html", news=News.query.order_by(News.id.desc()).limit(30), online=online)
4b0584d33fb84f05afbbcfe016d7428c4f75a4d3
7,616
import aiohttp async def execute_request(url): """Method to execute a http request asynchronously """ async with aiohttp.ClientSession() as session: json = await fetch(session, url) return json
1845fed4acce963a0bc1bb780cdea16ba9dec394
7,617
from typing import List def game_over(remaining_words: List[str]) -> bool: """Return True iff remaining_words is empty. >>> game_over(['dan', 'paul']) False >>> game_over([]) True """ return remaining_words == []
8d29ef06bd5d60082646cef00f77bbabfbac32eb
7,618
import csv def read_manifest(instream): """Read manifest file into a dictionary Parameters ---------- instream : readable file like object """ reader = csv.reader(instream, delimiter="\t") header = None metadata = {} for row in reader: if header is None: header = row else: metadata[row[0]] = row[1] return metadata
afa6c2bb0a9d81267b1d930026a229be924a1994
7,619
def get_backbone_from_model(model:Model, key_chain:list) -> nn.Cell: """Obtain the backbone from a wrapped mindspore Model using the key chain provided. Args: model(Model): A Model instance with wrapped network and loss. key_chain(list[str]): the keys in the right order according to to which we can get backbone. Returns: The desired backbone(nn.Cell).""" network = model.train_network # if network is a WithLossCell if getattr(model, '_loss_fn') is None: assert hasattr(network, '_net') network = getattr(network, '_net') for key in key_chain: assert hasattr(network, key), f'network has no attr named {key}' network = getattr(network, key) return network
0ddabf30c50e9d58ce18b0010107d92f8518b9bc
7,620
def dv_upper_lower_bound(f): """ Donsker-Varadhan lower bound, but upper bounded by using log outside. Similar to MINE, but did not involve the term for moving averages. """ first_term = f.diag().mean() second_term = logmeanexp_nodiag(f) return first_term - second_term
a7f9a3910a934f836204c5c47d9139be31860ec1
7,621
def create_training_files_for_document( file_name, key_field_names, ground_truth_df, ocr_data, pass_number): """ Create the ocr.json file and the label file for a document :param file_path: location of the document :param file_name: just the document name.ext :param key_field_names: names of the key fields to extract :param ocr_data: Previously OCR form :param pass_number: Are we processing word level or both word and line level """ extraction_file_name = file_name[:-4] + '.ocr.json' # Now we go and reverse search the form for the Ground Truth values key_field_data = find_anchor_keys_in_form( df_gt=ground_truth_df, filename=extraction_file_name, data=ocr_data, anchor_keys=key_field_names, pass_number=pass_number) print(f"key_field_data {len(key_field_data)} {key_field_data} {file_name}") label_file, unique_fields_extracted = create_label_file( file_name, key_field_names, key_field_data[extraction_file_name] ) return ocr_data, label_file, unique_fields_extracted
4832e28904f2c950ceb5526eaa8ab61568c55a8c
7,622
def incoming(ui, repo, source="default", **opts): """show new changesets found in source Show new changesets found in the specified path/URL or the default pull location. These are the changesets that would have been pulled if a pull at the time you issued this command. See pull for valid source format details. .. container:: verbose With -B/--bookmarks, the result of bookmark comparison between local and remote repositories is displayed. With -v/--verbose, status is also displayed for each bookmark like below:: BM1 01234567890a added BM2 1234567890ab advanced BM3 234567890abc diverged BM4 34567890abcd changed The action taken locally when pulling depends on the status of each bookmark: :``added``: pull will create it :``advanced``: pull will update it :``diverged``: pull will create a divergent bookmark :``changed``: result depends on remote changesets From the point of view of pulling behavior, bookmark existing only in the remote repository are treated as ``added``, even if it is in fact locally deleted. .. container:: verbose For remote repository, using --bundle avoids downloading the changesets twice if the incoming is followed by a pull. Examples: - show incoming changes with patches and full description:: hg incoming -vp - show incoming changes excluding merges, store a bundle:: hg in -vpM --bundle incoming.hg hg pull incoming.hg - briefly list changes inside a bundle:: hg in changes.hg -T "{desc|firstline}\\n" Returns 0 if there are incoming changes, 1 otherwise. """ if opts.get('graph'): cmdutil.checkunsupportedgraphflags([], opts) def display(other, chlist, displayer): revdag = cmdutil.graphrevs(other, chlist, opts) showparents = [ctx.node() for ctx in repo[None].parents()] cmdutil.displaygraph(ui, revdag, displayer, showparents, graphmod.asciiedges) hg._incoming(display, lambda: 1, ui, repo, source, opts, buffered=True) return 0 if opts.get('bundle') and opts.get('subrepos'): raise util.Abort(_('cannot combine --bundle and --subrepos')) if opts.get('bookmarks'): source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) other = hg.peer(repo, opts, source) if 'bookmarks' not in other.listkeys('namespaces'): ui.warn(_("remote doesn't support bookmarks\n")) return 0 ui.status(_('comparing with %s\n') % util.hidepassword(source)) return bookmarks.incoming(ui, repo, other) repo._subtoppath = ui.expandpath(source) try: return hg.incoming(ui, repo, source, opts) finally: del repo._subtoppath
9bf41cdc4de5c82634fae038940951ad738fd636
7,623
import time def timeout(limit=5): """ Timeout This decorator is used to raise a timeout error when the given function exceeds the given timeout limit. """ @decorator def _timeout(func, *args, **kwargs): start = time.time() result = func(*args, **kwargs) duration = time.time() - start if duration > limit: msg = f"Function {func.__name__} exceeded timeout limit ({limit} seconds)" raise TimeoutError(msg) return result return _timeout
c68fee9530512ce1603ec7976f4f1278205b1f92
7,624
from typing import Union def OIII4363_flux_limit(combine_flux_file: str, verbose: bool = False, log: Logger = log_stdout()) -> \ Union[None, np.ndarray]: """ Determine 3-sigma limit on [OIII]4363 based on H-gamma measurements :param combine_flux_file: Filename of ASCII file containing emission-line flux measurements :param verbose: Write verbose message to stdout. Default: file only :param log: logging.Logger object :return: Array containing 3-sigma flux limit """ log_verbose(log, "starting ...", verbose=verbose) try: combine_fits = asc.read(combine_flux_file) except FileNotFoundError: log.warning(f"File not found! {combine_flux_file}") return Hgamma = combine_fits['HGAMMA_Flux_Gaussian'].data Hgamma_SN = combine_fits['HGAMMA_S/N'].data flux_limit = (Hgamma / Hgamma_SN) * 3 log_verbose(log, "finished.", verbose=verbose) return flux_limit
109f887693df16661d7766840b0026f7e9bca82d
7,625
import numpy def convert_units_co2(ds,old_data,old_units,new_units): """ Purpose: General purpose routine to convert from one set of CO2 concentration units to another. Conversions supported are: umol/m2/s to gC/m2 (per time step) gC/m2 (per time step) to umol/m2/s mg/m3 to umol/mol mgCO2/m3 to umol/mol umol/mol to mg/m3 mg/m2/s to umol/m2/s mgCO2/m2/s to umol/m2/s Usage: new_data = qcutils.convert_units_co2(ds,old_data,old_units,new_units) where ds is a data structure old_data (numpy array) is the data to be converted old_units (string) is the old units new_units (string) is the new units Author: PRI Date: January 2016 """ ts = int(ds.globalattributes["time_step"]) if old_units=="umol/m2/s" and new_units=="gC/m2": new_data = old_data*12.01*ts*60/1E6 elif old_units=="gC/m2" and new_units=="umol/m2/s": new_data = old_data*1E6/(12.01*ts*60) elif old_units in ["mg/m3","mgCO2/m3"] and new_units=="umol/mol": Ta,f,a = GetSeriesasMA(ds,"Ta") ps,f,a = GetSeriesasMA(ds,"ps") new_data = mf.co2_ppmfrommgpm3(old_data,Ta,ps) elif old_units=="umol/mol" and new_units in ["mg/m3","mgCO2/m3"]: Ta,f,a = GetSeriesasMA(ds,"Ta") ps,f,a = GetSeriesasMA(ds,"ps") new_data = mf.co2_mgpm3fromppm(old_data,Ta,ps) elif old_units in ["mg/m2/s","mgCO2/m2/s"] and new_units=="umol/m2/s": new_data = mf.Fc_umolpm2psfrommgpm2ps(old_data) else: msg = " Unrecognised conversion from "+old_units+" to "+new_units log.error(msg) new_data = numpy.ma.array(old_data,copy=True,mask=True) return new_data
38ce2987bfa4c5505fe64779ce752617862138fd
7,626
def query_urlhaus(session, provided_ioc, ioc_type): """ """ uri_dir = ioc_type if ioc_type in ["md5_hash", "sha256_hash"]: uri_dir = "payload" api = "https://urlhaus-api.abuse.ch/v1/{}/" resp = session.post(api.format(uri_dir), timeout=180, data={ioc_type: provided_ioc}) ioc_dicts = [] if resp.status_code == 200 and resp.text != "": resp_content = resp.json() if ioc_type == "host": if "urls" not in resp_content.keys() or len(resp_content["urls"]) == 0: ioc_dicts.append({"no data": provided_ioc}) return ioc_dicts for url in resp_content["urls"]: ioc_dict = { "provided_ioc": provided_ioc, "host": resp_content.get("host", None), "firstseen (host)": resp_content.get("firstseen", None), "urlhaus_reference (host)": resp_content.get("urlhaus_reference", None), "url": url.get("url", None), "url_status": url.get("url_status", None), "date_added (url)": url.get("date_added", None), "urlhaus_reference (url)": url.get("urlhaus_reference", None) } if url["tags"] != None: ioc_dict.update({ "tags (url)": ",".join(url.get("tags", None)) }) ioc_dicts.append(ioc_dict) elif ioc_type == "url": if "payloads" not in resp_content.keys() or len(resp_content["payloads"]) == 0: ioc_dicts.append({"invalid": provided_ioc}) return ioc_dicts for payload in resp_content["payloads"]: ioc_dict = { "provided_ioc": provided_ioc, "host": resp_content.get("host", None), "url": resp_content.get("url", None), "url_status": resp_content.get("url_status", None), "date_added (url)": resp_content.get("date_added", None), "urlhaus_reference (url)": resp_content.get("urlhaus_reference", None), "filename (payload)": payload.get("filename", None), "content_type (payload)": payload.get("content_type", None), "response_size (payload)": payload.get("response_size", None), "md5_hash (payload)": payload.get("response_md5", None), "sha256_hash (payload)": payload.get("response_sha256", None), "firstseen (payload)": payload.get("firstseen", None), "signature (payload)": payload.get("signature", None) } if resp_content["tags"] != None: ioc_dict.update({ "tags (url)": ",".join(resp_content.get("tags", None)) }) if payload["virustotal"] != None: ioc_dict.update({ "vt_result (payload)": payload["virustotal"].get("result", None), "vt_link (payload)": payload["virustotal"].get("link", None) }) ioc_dicts.append(ioc_dict) elif ioc_type in ["md5_hash", "sha256_hash"]: if len(resp_content["urls"]) == 0: ioc_dicts.append({"invalid": provided_ioc}) return ioc_dicts for url in resp_content["urls"]: ioc_dict = { "provided_ioc": provided_ioc, "content_type (payload)": resp_content.get("content_type", None), "file_size (payload)": resp_content.get("file_size", None), "md5_hash (payload)": resp_content.get("md5_hash", None), "sha256_hash (payload)": resp_content.get("sha256_hash", None), "firstseen (payload)": resp_content.get("firstseen", None), "lastseen (payload)": resp_content.get("lastseen", None), "signature (payload)": resp_content.get("signature", None), "url": url.get("url", None), "url_status": url.get("url_status", None), "filename (url)": url.get("filename", None), "firstseen (url)": url.get("firstseen", None), "lastseen (url)": url.get("lastseen", None), "urlhaus_reference (url)": url.get("urlhaus_reference", None) } if resp_content["virustotal"] != None: ioc_dict.update({ "vt_result (payload)": resp_content["virustotal"].get("result", None), "vt_link (payload)": resp_content["virustotal"].get("link", None) }) ioc_dicts.append(ioc_dict) return ioc_dicts return [{"invalid": provided_ioc}]
171bff1e9b1bfdf8ac6b91a4bbbd7226f80c8c4c
7,627
def arrow_to_json(data): """ Convert an arrow FileBuffer into a row-wise json format. Go via pandas (To be revisited!!) """ reader = pa.ipc.open_file(data) try: frame = reader.read_pandas() return frame.to_json(orient='records') except: raise DataStoreException("Unable to convert to JSON")
d49ee49b7071d0b857feeb878c99ce65e82460e9
7,628
import pathlib def get_wmc_pathname(subject_id, bundle_string): """Generate a valid pathname of a WMC file given subject_id and bundle_string (to resolve ACT vs noACT). The WMC file contrains the bundle-labels for each streamline of the corresponding tractogram. """ global datadir ACT_string = 'ACT' if bundle_string in noACT_list: ACT_string = 'noACT' try: pathname = next(pathlib.Path(f'{datadir}/sub-{subject_id}/').glob(f'dt-neuro-wmc.tag-{ACT_string}.id-*/classification.mat')) return pathname except StopIteration: print('WMC file not available!') raise FileNotFoundError
fcc570e3e59b99b94de95dc4f15c1fee2fe0f0f2
7,629
def _union_polygons(polygons, precision = 1e-4, max_points = 4000): """ Performs the union of all polygons within a PolygonSet or list of polygons. Parameters ---------- polygons : PolygonSet or list of polygons A set containing the input polygons. precision : float Desired precision for rounding vertex coordinates. max_points : int The maximum number of vertices within the resulting polygon. Returns ------- unioned : polygon The result of the union of all the polygons within the input PolygonSet. """ polygons = _merge_floating_point_errors(polygons, tol = precision/1000) unioned = gdspy.boolean(polygons, [], operation = 'or', precision = precision, max_points = max_points) return unioned
f6951a67a2ed4099b5321b98517810de43024036
7,630
from typing import Callable from re import T from typing import Optional def parse_or_none( field: str, field_name: str, none_value: str, fn: Callable[[str, str], T], ) -> Optional[T]: """ If the value is the same as the none value, will return None. Otherwise will attempt to run the fn with field and field name as the first and 2nd arguments. """ if field == none_value: return None try: val = fn(field, field_name) except LineParseError as e: msg = e.message + ( f"\nThe value may also be '{none_value}', which will be" "interpreted as None." ) raise LineParseError(msg) return val
4a0c2d8ec819fe6b8a9a24a60f54c62cb83e68ac
7,631
def get_lattice_parameter(elements, concentrations, default_title): """Finds the lattice parameters for the provided atomic species using Vagars law. :arg elements: A dictionary of elements in the system and their concentrations. :arg title: The default system title. :arg concentrations: The concentrations of each element. """ if elements == None: lat_param = 1.0 title = default_title else: if len(elements) != len(concentrations): raise ValueError("You have provided {} element names when {} elements are present " "in the system. Please provide the correct number of elements." .format(len(elements),len(concentrations))) else: title = "" lat_param = 0 for i in range(len(elements)): lat_param += concentrations[i]*all_elements[elements[i]] if concentrations[i] > 0: title += " {} ".format(elements[i]) lat_param = float(lat_param) / sum(concentrations) title = "{0} {1}\n".format(default_title.strip(),title) return lat_param, title
34e914e38b8c4d25d9ed5fd09f435d7358f99a99
7,632
import string def tokenize(text): """ Tokenizes,normalizes and lemmatizes a given text. Input: text: text string Output: - array of lemmatized and normalized tokens """ def is_noun(tag): return tag in ['NN', 'NNS', 'NNP', 'NNPS'] def is_verb(tag): return tag in ['VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ'] def is_adverb(tag): return tag in ['RB', 'RBR', 'RBS'] def is_adjective(tag): return tag in ['JJ', 'JJR', 'JJS'] def penn_to_wn(tag): if is_adjective(tag): return wn.ADJ elif is_noun(tag): return wn.NOUN elif is_adverb(tag): return wn.ADV elif is_verb(tag): return wn.VERB return wn.NOUN tokens = word_tokenize(text.lower()) #split words into tokens and turn thwm into lower case tokens = [w for w in tokens if (w not in stopwords.words("english") and w not in string.punctuation)] # remove stopwords and punctuation tagged_words = pos_tag(tokens) #tag the tokens lemmed = [WordNetLemmatizer().lemmatize(w.lower(), pos=penn_to_wn(tag)) for (w,tag) in tagged_words] #lemmatize the tagged words if len(lemmed) == 0: #no lemmatized word should have zero length return ["error"] return lemmed
672af73d594c7a134226f4ae9a265f19b14ced34
7,633
def bandpass_filterbank(bands, fs=1.0, order=8, output="sos"): """ Create a bank of Butterworth bandpass filters Parameters ---------- bands: array_like, shape == (n, 2) The list of bands ``[[flo1, fup1], [flo2, fup2], ...]`` fs: float, optional Sampling frequency (default 1.) order: int, optional The order of the IIR filters (default: 8) output: {'ba', 'zpk', 'sos'} Type of output: numerator/denominator ('ba'), pole-zero ('zpk'), or second-order sections ('sos'). Default is 'ba'. Returns ------- b, a : ndarray, ndarray Numerator (b) and denominator (a) polynomials of the IIR filter. Only returned if output='ba'. z, p, k : ndarray, ndarray, float Zeros, poles, and system gain of the IIR filter transfer function. Only returned if output='zpk'. sos : ndarray Second-order sections representation of the IIR filter. Only returned if output=='sos'. """ filters = [] nyquist = fs / 2.0 for band in bands: # remove bands above nyquist frequency if band[0] >= nyquist: raise ValueError("Bands should be below Nyquist frequency") # Truncate the highest band to Nyquist frequency norm_band = np.minimum(0.99, np.array(band) / nyquist) # Compute coefficients coeffs = butter(order / 2, norm_band, "bandpass", output=output) filters.append(coeffs) return filters
4cbe3acb30a0f08d39e28b46db520fdac420010d
7,634
def get_couch_client(https: bool = False, host: str = 'localhost', port: int = 5984, request_adapter: BaseHttpClient = HttpxCouchClient, **kwargs) -> CouchClient: """ Initialize CouchClient Parameters ---------- https: bool = False Schema type. Use https if value is True host: str = 'localhost' CouchDB host port: int = 5984 CouchDB port request_adapter: BaseHttpClient = HttpxCouchClient Http client adapter Returns ------- CouchClient CouchDB API realisation """ schema = 'http' if https: schema += 's' url = f'{schema}://{host}' if port: url += f':{port}' http_client = request_adapter.get_client(url, **kwargs) return CouchClient(http_client=http_client)
db242556c11debc9dff57929182d3e6932ef13d1
7,635
def compute_rmse(loss_mse): """ Computes the root mean squared error. Args: loss_mse: numeric value of the mean squared error loss Returns: loss_rmse: numeric value of the root mean squared error loss """ return np.sqrt(2 * loss_mse)
a81024cd402c00b0d6f3bfaccc089695fb5f4e0a
7,636
def __detect_geometric_decomposition(pet: PETGraphX, root: CUNode) -> bool: """Detects geometric decomposition pattern :param pet: PET graph :param root: root node :return: true if GD pattern was discovered """ for child in pet.subtree_of_type(root, NodeType.LOOP): if not (child.reduction or child.do_all): return False for child in pet.direct_children_of_type(root, NodeType.FUNC): for child2 in pet.direct_children_of_type(child, NodeType.LOOP): if not (child2.reduction or child2.do_all): return False return True
27d90b6ced48a0db081d9881e39600d641855343
7,637
def add_two_frags_together(fragList, atm_list, frag1_id, frag2_id): """Combine two fragments in fragList.""" new_id = min(frag1_id, frag2_id) other_id = max(frag1_id, frag2_id) new_fragList = fragList[:new_id] # copy up to the combined one new_frag = { # combined frag 'ids': fragList[frag1_id]['ids'] + fragList[frag2_id]['ids'], 'syms': fragList[frag1_id]['syms'] + fragList[frag2_id]['syms'], 'grp': new_id, 'chrg': fragList[frag1_id]['chrg'] + fragList[frag2_id]['chrg'], 'mult': fragList[frag1_id]['mult'] + fragList[frag2_id]['mult'] - 1, 'name': fragList[new_id]['name'], } new_frag = add_centroids([new_frag], atm_list) new_fragList.extend(new_frag) # add new frag # add up to removed frag new_fragList.extend(fragList[new_id+1:other_id]) # change rest of values for i in range(other_id+1,len(fragList)): fragList[i]['grp'] = i-1 fragList[i]['name'] = f"frag{i-1}" new_fragList.append(fragList[i]) for i in range(len(new_fragList)): if i != new_fragList[i]["grp"]: print(i, "does not") return new_fragList, new_id
9c226883d6c021e151c51889017f56ea6a4cba3a
7,638
def concatenate(arrays, axis=0): """ Joins a sequence of tensors along an existing axis. Args: arrays: Union[Tensor, tuple(Tensor), list(Tensor)], a tensor or a list of tensors to be concatenated. axis (int, optional): The axis along which the tensors will be joined, if axis is None, tensors are flattened before use. Default is 0. Returns: Tensor, a tensor concatenated from a tensor or a list of tensors. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> x1 = np.ones((1,2,3)) >>> x2 = np.ones((1,2,1)) >>> x = np.concatenate((x1, x2), axis=-1) >>> print(x.shape) (1, 2, 4) """ array_type = F.typeof(arrays) if _check_is_tensor(array_type): # if the input is a single tensor # if only one tensor is provided, it is treated as a tuple along the # first dimension. For example, a tensor of shape (3,4,5) will be treated # as: tuple(tensor_1(4,5), tensor_2(4,5), tensor_3(4,5)) if axis is None: return ravel(arrays) arr_shape = F.shape(arrays) _check_axes_range((axis,), len(arr_shape)) # move axis 0 to the disiganated position, while keep other axes' relative # positions unchanged new_axes, new_shape = _move_axes_for_concatenate(arr_shape, axis) arrays = transpose(arrays, new_axes) arrays = reshape(arrays, new_shape) return arrays flattened_arrays = () if axis is None: for arr in arrays: flattened_arrays += (ravel(arr),) axis = -1 return P.Concat(axis)(flattened_arrays) arr_shape = F.shape(arrays[0]) _check_axes_range((axis,), len(arr_shape)) # if only one tensor in the tuple/list, return the tensor itself if len(arrays) == 1: return arrays[0] return P.Concat(axis)(arrays)
a85db3673d3a50d76374b809b583a8ca5325d4c3
7,640
def withCHID(fcn): """decorator to ensure that first argument to a function is a Channel ID, ``chid``. The test performed is very weak, as any ctypes long or python int will pass, but it is useful enough to catch most accidental errors before they would cause a crash of the CA library. """ # It may be worth making a chid class (which could hold connection # data of _cache) that could be tested here. For now, that # seems slightly 'not low-level' for this module. def wrapper(*args, **kwds): "withCHID wrapper" if len(args)>0: chid = args[0] args = list(args) if isinstance(chid, int): args[0] = chid = dbr.chid_t(args[0]) if not isinstance(chid, dbr.chid_t): msg = "%s: not a valid chid %s %s args %s kwargs %s!" % ( (fcn.__name__, chid, type(chid), args, kwds)) raise ChannelAccessException(msg) return fcn(*args, **kwds) wrapper.__doc__ = fcn.__doc__ wrapper.__name__ = fcn.__name__ wrapper.__dict__.update(fcn.__dict__) return wrapper
98ac8fdc812a8e9b7706e1932db662819e830597
7,644
def asin(a: Dual) -> Dual: """inverse of sine or arcsine of the dual number a, using math.asin(x)""" if abs(a.value) >= 1: raise ValueError('Arcsin cannot be evaluated at {}.'.format(a.value)) value = np.arcsin(a.value) ders = dict() for k,v in a.ders.items(): ders[k] = 1/(np.sqrt(1-a.value**2))*v return Dual(value, ders)
6b15e737ae5beb69f8963aa752d7fba761dce56f
7,646
def hydrotopeQ(cover,hydrotopemap): """Get mean values of the cover map for the hydrotopes""" grass.message(('Get mean hydrotope values for %s' %cover)) tbl = grass.read_command('r.univar', map=cover, zones=hydrotopemap, flags='gt').split('\n')[:-1] #:-1 as last line hast line break] tbl = [tuple(l.split('|')) for l in tbl] tbl = np.array(tbl[1:], dtype=list(zip(tbl[0],['S250']*len(tbl[0])))) tbl = np.array(list(zip(tbl['zone'],tbl['mean'])), dtype=[('cat',np.int64),('mean',np.float64)]) return tbl[np.isfinite(tbl['mean'])]
371dc496a4bb2e33fc382dddaea66e83aa613abc
7,647
import re def convert_to_seconds(duration_str): """ return duration in seconds """ seconds = 0 if re.match(r"[0-9]+$", duration_str): seconds = int(duration_str) elif re.match(r"[0-9]+s$", duration_str): seconds = int(duration_str[:-1]) elif re.match(r"[0-9]+m$", duration_str): seconds = 60 * int(duration_str[:-1]) elif re.match(r"[0-9]+h$", duration_str): seconds = 3600 * int(duration_str[:-1]) elif re.match(r"[0-9]+d$", duration_str): seconds = 84600 * int(duration_str[:-1]) return seconds
222905e6089510c6f204c6ea710572a5b2132d28
7,648
def get_chunk_n_rows(row_bytes: int, working_memory: Num, max_n_rows: int = None) -> int: """Calculates how many rows can be processed within working_memory Parameters ---------- row_bytes : int The expected number of bytes of memory that will be consumed during the processing of each row. working_memory : int or float, optional The number of rows to fit inside this number of MiB will be returned. max_n_rows : int, optional The maximum return value. Returns ------- int or the value of n_samples Warns ----- Issues a UserWarning if ``row_bytes`` exceeds ``working_memory`` MiB. """ chunk_n_rows = int(working_memory * (2 ** 20) // row_bytes) if max_n_rows is not None: chunk_n_rows = min(chunk_n_rows, max_n_rows) if chunk_n_rows < 1: # Could not adhere to working_memory config. chunk_n_rows = 1 return chunk_n_rows
b7c2ab10c59edb6c2541e31264b28e06266d2fc3
7,649
def find_signal_analysis(prior, sparsity, sigma_data): """ Generates a signal using an analytic prior. Works only with square and overcomplete full-rank priors. """ N, L = prior.shape k = np.sum(np.random.random(L) > (1 - sparsity)) V = np.zeros(shape=(L, L - k)) while np.linalg.matrix_rank(V) != L - k: s = np.random.permutation(N) V = prior[s[:L - k]] x = np.random.normal(scale=sigma_data, size=(L)) x / np.linalg.norm(x) x -= np.linalg.pinv(V) @ V @ x return x
49a7c26b6bc934d3588ae25c99eb62e0b544616f
7,651
from typing import List import asyncio import requests def download_images(sorted_urls) -> List: """Download images and convert to list of PIL images Once in an array of PIL.images we can easily convert this to a PDF. :param sorted_urls: List of sorted URLs for split financial disclosure :return: image_list """ async def main(urls): image_list = [] loop = asyncio.get_event_loop() futures = [loop.run_in_executor(None, requests.get, url) for url in urls] for response in await asyncio.gather(*futures): image_list.append(response.content) return image_list loop = asyncio.get_event_loop() image_list = loop.run_until_complete(main(sorted_urls)) return image_list
3efde31975c7912e16ab2d990417c2aa753ca5bf
7,652
def get_molecules(struct, bonds_kw={"mult":1.20, "skin":0.0, "update":False}, ret="idx"): """ Returns the index of atoms belonging to each molecule in the Structure. """ bonds = struct.get_bonds(**bonds_kw) ## Build connectivity matrix graph = np.zeros((struct.geometry.shape[0],struct.geometry.shape[0])) for atom_idx,bonded_idx_list in enumerate(bonds): for bond_idx in bonded_idx_list: graph[atom_idx][bonded_idx_list] = 1 graph = csr_matrix(graph) n_components, component_list = connected_components(graph) molecule_idx_list = [np.where(component_list == x)[0] for x in range(n_components)] if ret == "idx": return molecule_idx_list elif ret == "struct": ## Returns list of structures geo = struct.get_geo_array() ele = struct.geometry["element"] molecule_struct_list = [] for idx,entry in enumerate(molecule_idx_list): mol_geo = geo[entry] mol_ele = ele[entry] mol = Structure.from_geo(mol_geo,mol_ele) mol.struct_id = "{}_molecule_{}".format(struct.struct_id, idx) molecule_struct_list.append(mol) return molecule_struct_list else: ## Returns list of structures geo = struct.get_geo_array() ele = struct.geometry["element"] molecule_struct_dict = {} for idx,entry in enumerate(molecule_idx_list): mol_geo = geo[entry] mol_ele = ele[entry] mol = Structure.from_geo(mol_geo,mol_ele) mol.struct_id = "{}_molecule_{}".format(struct.struct_id, idx) molecule_struct_dict[mol.struct_id] = mol return molecule_struct_dict
99b67f95114ddd6c712c8fe63a0713a914b8888f
7,653
def cdivs(a,b,c,d,e,f,al1,al2,al3,x11,x21,x22,x23,x31,x32,x33): """Finds the c divides conditions for the symmetry preserving HNFs. Args: a (int): a from the HNF. b (int): b from the HNF. c (int): c from the HNF. d (int): d from the HNF. e (int): e from the HNF. f (int): f from the HNF. al1 (numpy.array): array of alpha1 values from write up. al2 (numpy.array): array of alpha2 values from write up. al3 (numpy.array): array of alpha3 values from write up. x11 (numpy.array): array of pg values for x(1,1) spot. x21 (numpy.array): array of pg values for x(2,1) spot. x22 (numpy.array): array of pg values for x(2,2) spot. x23 (numpy.array): array of pg values for x(2,3) spot. x31 (numpy.array): array of pg values for x(3,1) spot. x32 (numpy.array): array of pg values for x(3,2) spot. x33 (numpy.array): array of pg values for x(3,3) spot. Returns: HNFs (list of lists): The symmetry preserving HNFs. """ HNFs = [] if np.allclose(x23,0): if b == None: # find the b values, d and e still unkown if not np.allclose(al3, 0): N=0 at = al3[np.nonzero(al3)] val = np.unique(N*c/at) while any(abs(val) <c): for v in val: if v < c and v >= 0 and np.allclose(v%1==0): b = v c1 = a*x21 + b*(x22-al1-x11) c2 =(-b*al2) if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 =c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(N*c/at) elif not np.allclose(al2,0): N=0 at = al2[np.nonzero(al2)] val = np.unique(N*c/at) while any(abs(val) <c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v c1 = a*x21 + b*(x22-al1-x11) c3 =(-b*al3) if np.allclose(c1%c,0) and np.allclose(c3%c,0): be1 = c1/c be2 =-b*al2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N += 1 val = np.unique(N*c/at) else: if not np.allclose((x22-x11-al1),0): N=0 xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt))) while any(abs(val) <c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v c2 = -b*al2 c3 =(-b*al3) if np.allclose(c2%c,0) and np.allclose(c3%c,0): be1 = (a*x21+b*(x22-x11-al1))/c be2 =-b*al2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in HNFs: HNFs.append(t) N += 1 xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21,1/xt),len(x21)*len(xt))) else: c1 = a*x21 c2 = 0 c3 = 0 if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in HNFs: HNFs.append(t) else: c1 = a*x21 + b*(x22-al1-x11) c2 = (-b*al2) c3 = (-b*a13) if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in HNFs: HNFs.append(t) else: if np.allclose(al3,0): if np.allclose((f*x23)%c,0): if b == None and e == None and d == None: if np.allclose(al3,0) and np.allclose(al2,0) and np.allclose(al3,0): N = 0 xt = x23[np.nonzero(x23)] val = np.unique(N*c/xt) while any(abs(val)<f): for v in val: if v <f and v>=0 and np.allclose(v%1,0): e = v for b in range(c): N2 =0 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): d = v2 be1 = (a*x21+b*(x22-x11)+d*x23)/c be2 = e*x23/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.appned(t) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer((N2*c-a*x21-b*(x22-x11)),1/xt),len(x22)*len(xt))) N += 1 val = np.unique(N*c/xt) elif not np.allclose(al3,0): N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(al3))) while any(abs(val) < c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22))) while any(abs(val2)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c be2 = (e*x32-b*al2)/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(x22)*len(xt))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) else: for b in range(c): N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2 >= 0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val2)<f): for v3 in val3: if v3 <f and v3 >= 0 and np.allclose(v3%1,0): d = v3 be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c be2 = (e*x32-b*al2)/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(xt)*len(x22))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(al2)*len(xt))) elif b == None: if not np.allclose(al3,0): N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) elif not np.allclose(al2,0): N = max(np.round(e*x23/c)) at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al2[np.nonzero(al2)] val = np.unique(np.reshape(np.outer(-N*c+e*x23,1/at),len(x23)*len(at))) else: if not np.allclose((x22-x11-al1),0): N = min(np.round((a*x21-d*x23)/c)) xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt))) while any(abs(val) < c): for v in val: if v < c and v>=0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N += 1 xt = (x22-x11-al1) xt = xt[np.nonzero(xt)] val = np.unique(np.reshape(np.outer(N*c-a*x21sd*x23,1/xt),len(x23)*len(xt))) else: c1 = a*x21+d*x23 c2 = e*x23 c3 = f*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) elif d == None and e == None: N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val3)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 be1 = (a*x21+b*(x22-x11-al1)+d*x23)/c be2 = (e*x32-b*al2)/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) else: c1 = a*x21+b*(x22-al1-x11)+d*x23 c2 = -b*al2+e*x23 c3 = -b*al3+f*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0) and np.allclose(c3%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) # else: # print("f: ",f) # print("c: ",c) # print("x32: ",x32) # print("failed f*x32/c") else: if b==None and d==None and e==None: N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val3)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) elif b==None: N = max(np.round(f*x23/c)) at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) while any(abs(val) < c): for v in val: if v < c and v>= 0 and np.allclose(v%1,0): b = v c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N -= 1 at = al3[np.nonzero(al3)] val = np.unique(np.reshape(np.outer(-N*c+f*x23,1/at),len(x23)*len(at))) elif d==None and e==None: N2 = min(np.round(-b*al2/c)) xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) while any(abs(val2)<f): for v2 in val2: if v2 <f and v2>=0 and np.allclose(v2%1,0): e = v2 N3 = min(np.round((a*x21+b*(x22-x11-al1))/c)) xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) while any(abs(val3)<f): for v3 in val3: if v3 <f and v3>=0 and np.allclose(v3%1,0): d = v3 c1 = a*x21+b*(x22-x11-al1)+d*x23 c2 = -b*al2+e*x23 if np.allclose(c1%c,0) and np.allclose(c2%c,0): be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) N3 += 1 xt = x23[np.nonzero(x23)] val3 = np.unique(np.reshape(np.outer(N3*c-a*x21-b*(x22-x11-al1),1/xt),len(x22)*len(xt))) N2 += 1 xt = x23[np.nonzero(x23)] val2 = np.unique(np.reshape(np.outer(N2*c+b*al2,1/xt),len(xt)*len(al2))) else: be1 = c1/c be2 = c2/c tHNFs = fdivs(a,b,c,d,e,f,al1,al2,be1,be2,x11,x22,x31,x32,x33) for t in tHNFs: HNFs.append(t) return HNFs
20a0044050964c5705f3bce2297f2724d6f12f71
7,654
def numeric_field_list(model_class): """Return a list of field names for every numeric field in the class.""" def is_numeric(type): return type in [BigIntegerField, DecimalField, FloatField, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, SmallIntegerField] fields = [] for (field, type) in field_list(model_class): if is_numeric(type): fields += [field] return fields
a501c2a7bc87f7cdea8945a946937f72cc0576a9
7,655
import tokenize def _get_lambda_source_code(lambda_fn, src): """Attempt to find the source code of the ``lambda_fn`` within the string ``src``.""" def gen_lambdas(): def gen(): yield src + "\n" g = gen() step = 0 tokens = [] for tok in tokenize.generate_tokens(getattr(g, "next", getattr(g, "__next__", None))): if step == 0: if tok[0] == tokenize.NAME and tok[1] == "lambda": step = 1 tokens = [tok] level = 0 elif step == 1: if tok[0] == tokenize.NAME: tokens.append(tok) step = 2 else: step = 0 elif step == 2: if tok[0] == tokenize.OP and tok[1] == ":": tokens.append(tok) step = 3 else: step = 0 elif step == 3: if level == 0 and (tok[0] == tokenize.OP and tok[1] in ",)" or tok[0] == tokenize.ENDMARKER): yield tokenize.untokenize(tokens).strip() step = 0 else: tokens.append(tok) if tok[0] == tokenize.OP: if tok[1] in "[({": level += 1 if tok[1] in "])}": level -= 1 assert not tokens actual_code = lambda_fn.__code__.co_code for lambda_src in gen_lambdas(): try: fn = eval(lambda_src, globals(), locals()) if fn.__code__.co_code == actual_code: return lambda_src.split(":", 1)[1].strip() except Exception: pass return "<lambda>"
5192a299bf88c9fdc070fae28e585cda3a09aadc
7,656
import requests import json def retrieve_keycloak_public_key_and_algorithm(token_kid: str, oidc_server_url: str) -> (str, str): """ Retrieve the public key for the token from keycloak :param token_kid: The user token :param oidc_server_url: Url of the server to authorize with :return: keycloak public key and algorithm """ handle = f'{oidc_server_url}/protocol/openid-connect/certs' logger.info(f'Getting public key for the kid={token_kid} from the keycloak...') r = requests.get(handle) if r.status_code != 200: error = "Could not get certificates from the keycloak. " \ "Reason: [{}]: {}".format(r.status_code, r.text) logger.error(error) raise ValueError(error) try: json_response = r.json() except Exception: error = "Could not retrieve the public key. " \ "Got unexpected response: '{}'".format(r.text) logger.error(error) raise ValueError(error) try: matching_key = next((item for item in json_response.get('keys') if item['kid'] == token_kid), None) matching_key_json = json.dumps(matching_key) public_key = RSAAlgorithm.from_jwk(matching_key_json) except Exception as e: error = f'Invalid public key!. Reason: {e}' logger.error(error) raise ValueError(error) logger.info(f'The public key for the kid={token_kid} has been fetched.') return matching_key.get('alg'), public_key
87e706b56c63b991e1524b5d6ffcec86d6a9bc67
7,657
def read_conformations(filename, version="default", sep="\t", comment="#", encoding=None, mode="rb", **kw_args): """ Extract conformation information. Parameters ---------- filename: str Relative or absolute path to file that contains the RegulonDB information. Returns ------- """ kw_args["mode"] = mode kw_args["encoding"] = encoding conformations = list() with open_file(filename, **kw_args) as (file_h, ext): iter_rowset = FILE_PARSERS.get(ext, iter_rowset_flat_file) for row in iter_rowset(file_h): tf_id = row["transcription_factor_id"] try: t_factor = elem.TranscriptionFactor[tf_id, version] except KeyError: LOGGER.warn("unknown transcription factor %s", tf_id) LOGGER.warn("Please parse transcription factor information before"\ " parsing conformations.") continue conf = elem.Conformation( unique_id=row["conformation_id"], name_space=version, tf=t_factor, state=row["final_state"], interaction=row["interaction_type"], conformation_type=row.get("conformation_type", None), # version dependent apo_holo=row.get("apo_holo_conformation", None) # version dependent ) t_factor.conformations.add(conf) conformations.append(conf) return conformations
3588ee68a8a498dbfb1f85d65a8eff65b5ff5ed1
7,658
def maskRipple(inRpl, outFile, mask): """maskRipple(inRpl, outFile, mask) Sets the individual data items to zero based on the specified mask. If mask.getRGB(c,r)>0 / then copy the contents at(c,r) of inRpl to outFile.rpl. Otherwise the contents of outFile / is set to all zeros.""" outRpl = "%s.rpl" % outFile outRaw = "%s.raw" % outFile len = rpl.getDepth() ty = rpl.getDataType() res = ept.RippleFile(rpl.getColumns(), rpl.getRows(), rpl.getDepth(), rpl.getDataType(), rpl.getDataSize(), ept.RippleFile.DONT_CARE_ENDIAN, outRpl, outRaw) zero = (0) * len for c in xrange(0, rpl.getColumns()): for r in xrange(0, rpl.getRows()): rpl.setPosition(c, r) res.setPosition(c, r) if mask.getRGB(c, r) > 0: if ty == rpl.FLOAT: res.write(rpl.readDouble(len)) else: res.write(rpl.readInt(len)) return res
65d5464e9de469cf45b47991ed838a79c587d965
7,659
def GetCurrentScene() -> Scene: """ Returns current scene. Raises SpykeException if current scene is not set. """ if not _currentScene: raise SpykeException("No scene is set current.") return _currentScene
82a065e4cbd0aa4b326d53b3360aac52a99ac682
7,660
def timeago(seconds=0, accuracy=4, format=0, lang="en", short_name=False): """Translate seconds into human-readable. :param seconds: seconds (float/int). :param accuracy: 4 by default (units[:accuracy]), determine the length of elements. :param format: index of [led, literal, dict]. :param lang: en or cn. :param units: day, hour, minute, second, ms. >>> timeago(93245732.0032424, 5) '1079 days, 05:35:32,003' >>> timeago(93245732.0032424, 4, 1) '1079 days 5 hours 35 minutes 32 seconds' >>> timeago(-389, 4, 1) '-6 minutes 29 seconds 0 ms' """ assert format in [0, 1, 2], ValueError("format arg should be one of 0, 1, 2") negative = "-" if seconds < 0 else "" is_en = lang == "en" seconds = abs(seconds) if is_en: if short_name: units = ("day", "hr", "min", "sec", "ms") else: units = ("day", "hour", "minute", "second", "ms") elif lang == "cn": if short_name: units = (u"日", u"时", u"分", u"秒", u"毫秒") else: units = (u"天", u"小时", u"分钟", u"秒", u"毫秒") times = split_seconds(seconds) if format == 2: return dict(zip(units, times)) day, hour, minute, second, ms = times if format == 0: day_str = ("%d %s%s, " % (day, units[0], "s" if day > 1 and is_en else "") if day else "") mid_str = ":".join(("%02d" % i for i in (hour, minute, second))) if accuracy > 4: mid_str += ",%03d" % ms return negative + day_str + mid_str elif format == 1: if seconds: # find longest valid fields index (non-zero for head and tail) for index, item in enumerate(times): if item != 0: head_index = index break for index, item in enumerate(reversed(times)): if item != 0: tail_index = len(times) - index break result_str = [ "%d %s%s" % (num, unit, "s" if is_en and num > 1 and unit != "ms" else "") for num, unit in zip(times, units) ][head_index:tail_index][:accuracy] result_str = " ".join(result_str) else: result_str = "0 %s" % units[-1] return negative + result_str
b6a5858c3f5c5291b03654d076eb3f1e835f78c0
7,662
def generate_headline(ids=None): """Generate and return an awesome headline. Args: ids: Iterable of five IDs (intro, adjective, prefix, suffix, action). Optional. If this is ``None``, random values are fetched from the database. Returns: Tuple of parts and permalink (intro, adjective, prefix, suffix, action, permalink) """ print('[schlagzeilengenerator] Generating a headline...') # Correct endings adjective_endings = { 'm': 'r', 'f': '', 's': 's', 'p': '', } # Get random database entries if ids is not None: d_intro = get_by_id('intro', ids[0]) d_adjective = get_by_id('adjective', ids[1]) d_prefix = get_by_id('prefix', ids[2]) d_suffix = get_by_id('suffix', ids[3]) d_action = get_by_id('action', ids[4]) else: d_intro = get_random('intro') d_adjective = get_random('adjective') d_prefix = get_random('prefix') d_suffix = get_random('suffix') d_action = get_random('action') ids = (d_intro['id'], d_adjective['id'], d_prefix['id'], d_suffix['id'], d_action['id']) # Get data from dictionaries case = d_suffix['case'] intro = d_intro['text'] adjective = d_adjective['text'] + adjective_endings[case] prefix = d_prefix['text'] suffix = d_suffix['text'] if case == 'p': action = '%s %s' % (d_action['action_p'], d_action['text']) else: action = '%s %s' % (d_action['action_s'], d_action['text']) # Build permalink permalink = b64encode(b','.join(str(i).encode('ascii') for i in ids)) return intro, adjective, prefix, suffix, action.strip(), permalink
09fda0075b036ea51972b2f124733de9f34671fc
7,663
import webbrowser def open_in_browser(path): """ Open directory in web browser. """ return webbrowser.open(path)
41328b2b478f0bd69695da1868c412188e494d08
7,664
def lstm_cell_forward(xt, a_prev, c_prev, parameters): """ Implement a single forward step of the LSTM-cell as described in Figure (4) Arguments: xt -- your input data at timestep "t", numpy array of shape (n_x, m). a_prev -- Hidden state at timestep "t-1", numpy array of shape (n_a, m) c_prev -- Memory state at timestep "t-1", numpy array of shape (n_a, m) parameters -- python dictionary containing: Wf -- Weight matrix of the forget gate, numpy array of shape (n_a, n_a + n_x) bf -- Bias of the forget gate, numpy array of shape (n_a, 1) Wi -- Weight matrix of the update gate, numpy array of shape (n_a, n_a + n_x) bi -- Bias of the update gate, numpy array of shape (n_a, 1) Wc -- Weight matrix of the first "tanh", numpy array of shape (n_a, n_a + n_x) bc -- Bias of the first "tanh", numpy array of shape (n_a, 1) Wo -- Weight matrix of the output gate, numpy array of shape (n_a, n_a + n_x) bo -- Bias of the output gate, numpy array of shape (n_a, 1) Wy -- Weight matrix relating the hidden-state to the output, numpy array of shape (n_y, n_a) by -- Bias relating the hidden-state to the output, numpy array of shape (n_y, 1) Returns: a_next -- next hidden state, of shape (n_a, m) c_next -- next memory state, of shape (n_a, m) yt_pred -- prediction at timestep "t", numpy array of shape (n_y, m) cache -- tuple of values needed for the backward pass, contains (a_next, c_next, a_prev, c_prev, xt, parameters) Note: ft/it/ot stand for the forget/update/output gates, cct stands for the candidate value (c tilde), c stands for the cell state (memory) """ # Retrieve parameters from "parameters" Wf = parameters["Wf"] # forget gate weight bf = parameters["bf"] Wi = parameters["Wi"] # update gate weight (notice the variable name) bi = parameters["bi"] # (notice the variable name) Wc = parameters["Wc"] # candidate value weight bc = parameters["bc"] Wo = parameters["Wo"] # output gate weight bo = parameters["bo"] Wy = parameters["Wy"] # prediction weight by = parameters["by"] # Retrieve dimensions from shapes of xt and Wy n_x, m = xt.shape n_y, n_a = Wy.shape ### START CODE HERE ### # Concatenate a_prev and xt (≈1 line) concat = np.concatenate((a_prev,xt),axis=0) # Compute values for ft (forget gate), it (update gate), # cct (candidate value), c_next (cell state), # ot (output gate), a_next (hidden state) (≈6 lines) ft = sigmoid(np.dot(Wf,concat)+bf) # forget gate it = sigmoid(np.dot(Wi,concat)+bi) # update gate cct = np.tanh(np.dot(Wc,concat)+bc) # candidate value c_next = ft*c_prev+it*cct # cell state ot = sigmoid(np.dot(Wo,concat)+bo) # output gate a_next = ot*np.tanh(c_next) # hidden state # Compute prediction of the LSTM cell (≈1 line) yt_pred = softmax(np.dot(Wy,a_next)+by) ### END CODE HERE ### # store values needed for backward propagation in cache cache = (a_next, c_next, a_prev, c_prev, ft, it, cct, ot, xt, parameters) return a_next, c_next, yt_pred, cache
9d1ae3ea6da9de6827b5ecd9f8871ee8aae26d30
7,665
def encode_letter(letter): """ This will encode a tetromino letter as a small integer """ value = None if letter == 'i': value = 0 elif letter == 'j': value = 1 elif letter == 'l': value = 2 elif letter == 'o': value = 3 elif letter == 's': value = 4 elif letter == 't': value = 5 elif letter == 'z': value = 6 return value
6c72c4c9e44c93d045296ab1f49c7783f2b4fc59
7,666
async def register_log_event( registration: LogEventRegistration, db: Session = Depends(get_db) ): """ Log event registration handler. :param db: :param registration: Registration object :return: None """ reg_id = str(uuid4()) # Generate message for registration topic msg = LogEventRegistrationMessage( to_address=registration.address, keyword=registration.keyword, position=registration.position, ) # Produce message for registration topic producer.produce( topic=settings.REGISTRATIONS_TOPIC, key=string_serializer(reg_id, key_context), value=json_serializer(msg.dict(), value_context), callback=acked, ) retry_count = 0 while True: if retry_count >= settings.MAX_CONFIRM_WAIT: raise HTTPException( 500, "Registration not confirmed. Try again. (NOINSERT)" ) try: # Query the DB to check if insert was done correctly row = crud.get_event_registration_by_id_no_404(db, reg_id) if row: break else: retry_count += 1 sleep(1) except: retry_count += 1 sleep(1) # Check if query returned correct result if ( not row.to_address == registration.address and not row.keyword == registration.keyword and not row.position == registration.position ): raise HTTPException(500, "Registration not confirmed. Try again. (NOMATCH)") return {"reg_id": reg_id, "status": "registered"}
62b84b9efa88512634d9c7a050e7c61ff06ba71a
7,667
def cvAbsDiffS(*args): """cvAbsDiffS(CvArr src, CvArr dst, CvScalar value)""" return _cv.cvAbsDiffS(*args)
b888683d1522c46c9dc7738a18b80f56efe975d3
7,668
from . import views # this must be placed here, after the app is created def create_template_app(**kwargs): """Create a template Flask app""" app = create_app(**kwargs) app.register_blueprints() return app
fbbb0018cd4da6897842f658ba3baf207e5614cc
7,669
def mse(predict, actual): """ Examples(rounded for precision): >>> actual = [1,2,3];predict = [1,4,3] >>> np.around(mse(predict,actual),decimals = 2) 1.33 >>> actual = [1,1,1];predict = [1,1,1] >>> mse(predict,actual) 0.0 """ predict = np.array(predict) actual = np.array(actual) difference = predict - actual square_diff = np.square(difference) score = square_diff.mean() return score
c42ee6d5531d40f727c41463f938c9c8f4ec6e84
7,670
import random def make_demo_measurements(num_measurements, extra_tags=frozenset()): """Make a measurement object.""" return [ make_flexural_test_measurement( my_id=__random_my_id(), deflection=random.random(), extra_tags=extra_tags ) for _ in range(num_measurements) ]
10c452936e889a8553afd1a9a570e34abae73470
7,671
from re import S def _nrc_coron_rescale(self, res, coord_vals, coord_frame, siaf_ap=None, sp=None): """ Function for better scaling of NIRCam coronagraphic output for sources that overlap the image masks. """ if coord_vals is None: return res nfield = np.size(coord_vals[0]) psf_sum = _nrc_coron_psf_sums(self, coord_vals, coord_frame, siaf_ap=siaf_ap) if psf_sum is None: return res # Scale by countrate of observed spectrum if (sp is not None) and (not isinstance(sp, list)): nspec = 1 obs = S.Observation(sp, self.bandpass, binset=self.bandpass.wave) sp_counts = obs.countrate() elif (sp is not None) and (isinstance(sp, list)): nspec = len(sp) if nspec==1: obs = S.Observation(sp[0], self.bandpass, binset=self.bandpass.wave) sp_counts = obs.countrate() else: sp_counts = [] for i, sp_norm in enumerate(sp): obs = S.Observation(sp_norm, self.bandpass, binset=self.bandpass.wave) sp_counts.append(obs.countrate()) sp_counts = np.array(sp_counts) else: nspec = 0 sp_counts = 1 if nspec>1 and nspec!=nfield: _log.warn("Number of spectra should be 1 or equal number of field points") # Scale by count rate psf_sum *= sp_counts # Re-scale PSF by total sums if isinstance(res, fits.HDUList): for i, hdu in enumerate(res): hdu.data *= (psf_sum[i] / hdu.data.sum()) elif nfield==1: res *= (psf_sum[0] / res.sum()) else: for i, data in enumerate(res): data *= (psf_sum[i] / data.sum()) return res
3b4e8596177e126955c7665333dd1305603f4e66
7,672
def csv_to_blob_ref(csv_str, # type: str blob_service, # type: BlockBlobService blob_container, # type: str blob_name, # type: str blob_path_prefix=None, # type: str charset=None # type: str ): # type: (...) -> AzmlBlobTable """ Uploads the provided CSV to the selected Blob Storage service, and returns a reference to the created blob in case of success. :param csv_str: :param blob_service: the BlockBlobService to use, defining the connection string :param blob_container: the name of the blob storage container to use. This is the "root folder" in azure blob storage wording. :param blob_name: the "file name" of the blob, ending with .csv or not (in which case the .csv suffix will be appended) :param blob_path_prefix: an optional folder prefix that will be used to store your blob inside the container. For example "path/to/my/" :param charset: :return: """ # setup the charset used for file encoding if charset is None: charset = 'utf-8' elif charset != 'utf-8': print("Warning: blobs can be written in any charset but currently only utf-8 blobs may be read back into " "DataFrames. We recommend setting charset to None or utf-8 ") # validate inputs (the only one that is not validated below) validate('csv_str', csv_str, instance_of=str) # 1- first create the references in order to check all params are ok blob_reference, blob_full_name = create_blob_ref(blob_service=blob_service, blob_container=blob_container, blob_path_prefix=blob_path_prefix, blob_name=blob_name) # -- push blob blob_stream = BytesIO(csv_str.encode(encoding=charset)) # noinspection PyTypeChecker blob_service.create_blob_from_stream(blob_container, blob_full_name, blob_stream, content_settings=ContentSettings(content_type='text.csv', content_encoding=charset)) # (For old method with temporary files: see git history) return blob_reference
c0df47839e963a5401204bcd422c7f78a94efc87
7,675
def col_rev_reduce(matrix, col, return_ops=False): """ Reduces a column into reduced echelon form by transforming all numbers above the pivot position into 0's :param matrix: list of lists of equal length containing numbers :param col: index of column :param return_ops: performed operations are returned :return: list of lists of equal length containing numbers """ ops = [] pivot_row = 0 # Defaults to top row # Find pivot row of the column for row in range(len(matrix)-1, -1, -1): if matrix[row][col] != 0: pivot_row = row break # Transform all numbers above the pivot to 0 if matrix[pivot_row][col] != 0 and matrix[pivot_row][col] != 1: factor = 1 / matrix[pivot_row][col] matrix = row_multiply(matrix, pivot_row, factor) ops.append(['multiplication', pivot_row, factor]) if pivot_row != 0: for row in range(pivot_row): if matrix[row][col] != 0: factor = matrix[row][col] / matrix[pivot_row][col] matrix = row_subtract(matrix, pivot_row, row, factor) ops.append(['subtract', pivot_row, row, factor]) if return_ops: return matrix, ops else: return matrix
ab97078f0c92537532673d3dba3cb399d932342e
7,676
def query_category_members(category, language='en', limit=100): """ action=query,prop=categories Returns all the members of a category up to the specified limit """ url = api_url % (language) query_args = { 'action': 'query', 'list': 'categorymembers', 'cmtitle': category, 'format': 'json', 'cmlimit': min(limit, 500) } members = [] while True: json = _run_query(query_args, language) for member in json['query']['categorymembers']: members.append(member['title']) if 'query-continue' in json and len(members) <= limit: continue_item = json['query-continue']['categorymembers']['cmcontinue'] query_args['cmcontinue'] = continue_item else: break return members[0:limit]
4a09d73cce237152405031004e967192ad3f8929
7,680
from typing import List def _tokenize_text(text: str, language: str) -> List[str]: """Splits text into individual words using the correct method for the given language. Args: text: Text to be split. language: The configured language code. Returns: The text tokenized into a list of words. """ if language == constants.LANGUAGE_CODE_JA: return _split_words_in_japanese(text) else: return text.split()
284f1a7625de149b7f97ce51dcf88110ebae02b0
7,681
def ml64_sort_order(c): """ Sort function for measure contents. Items are sorted by time and then, for equal times, in this order: * Patch Change * Tempo * Notes and rests """ if isinstance(c, chirp.Note): return (c.start_time, 10) elif isinstance(c, Rest): return (c.start_time, 10) elif isinstance(c, MeasureMarker): return (c.start_time, 1) elif isinstance(c, TempoEvent): return (c.start_time, 3) elif isinstance(c, ProgramEvent): return (c.start_time, 2) else: return (c.start_time, 5)
752a68796a12835661cfce5b2cfe5cba3ad5d7ef
7,682
def electron_mass_MeVc2(): """The rest mass of the electron in MeV/c**2 https://en.wikipedia.org/wiki/Electron """ return 0.5109989461
4496ddcc35a0aa6528cc19e47233f5a81626fefe
7,684