content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def is_compiled_with_npu(): """ Whether paddle was built with WITH_ASCEND_CL=ON to support Ascend NPU. Returns (bool): `True` if NPU is supported, otherwise `False`. Examples: .. code-block:: python import paddle support_npu = paddle.device.is_compiled_with_npu() """ return core.is_compiled_with_npu()
54bf625843a098bfee93d8c1ac5b79bd562602fe
19,900
def odd_occurrence_parity_set(arr): """ A similar implementation to the XOR idea above, but more naive. As we iterate over the passed list, a working set keeps track of the numbers that have occurred an odd number of times. At the end, the set will only contain one number. Though the worst-case time complexity is the same as the hashmap method implemented below, this will probably be significantly faster as dictionaries have much longer lookup times than sets. Space complexity: $O(n)$; Time complexity: $O(n)$. Parameters ---------- arr : integer Returns ------- integer """ seen_odd_times = set() for num in arr: if num in seen_odd_times: seen_odd_times.remove(num) else: seen_odd_times.add(num) return list(seen_odd_times)[0]
57f9362e05786724a1061bef07e49635b1b2b142
19,901
import time def larmor_step_search(step_search_center=cfg.LARMOR_FREQ, steps=200, step_bw_MHz=5e-3, plot=False, shim_x=cfg.SHIM_X, shim_y=cfg.SHIM_Y, shim_z=cfg.SHIM_Z, delay_s=1, gui_test=False): """ Run a stepped search through a range of frequencies to find the highest signal response Used to find a starting point, not for precision Args: step_search_center (float): [MHz] Center for search, defaults to config LARMOR_FREQ steps (int): Number of search steps step_bw_MHz (float): [MHz] Distance in MHz between each step plot (bool): Default False, plot final data shim_x, shim_y, shim_z (float): Shim value, defaults to config SHIM_ values, must be less than 1 magnitude delay_s (float): Delay between readings in seconds gui_test (bool): Default False, takes dummy data instead of actual data for GUI testing away from scanner Returns: float: Estimated larmor frequency in MHz dict: Dictionary of data """ # Pick out the frequencies to run through swept_freqs = np.linspace(step_search_center - ((steps-1)/2 * step_bw_MHz), step_search_center + ((steps-1)/2 * step_bw_MHz), num=steps) larmor_freq = swept_freqs[0] # Set the sequence file for a single spin echo seq_file = cfg.MGH_PATH + 'cal_seq_files/se_1.seq' # Run the experiment once to prep array rxd, rx_t = scr.run_pulseq(seq_file, rf_center=larmor_freq, tx_t=1, grad_t=10, tx_warmup=100, shim_x=shim_x, shim_y=shim_y, shim_z=shim_z, grad_cal=False, save_np=False, save_mat=False, save_msgs=False, gui_test=gui_test) # Create array for storing data rx_arr = np.zeros((rxd.shape[0], steps), dtype=np.cdouble) rx_arr[:,0] = rxd # Pause for spin recovery time.sleep(delay_s) # Repeat for each frequency after the first for i in range(1, steps): print(f'{swept_freqs[i]:.4f} MHz') rx_arr[:,i], _ = scr.run_pulseq(seq_file, rf_center=swept_freqs[i], tx_t=1, grad_t=10, tx_warmup=100, shim_x=shim_x, shim_y=shim_y, shim_z=shim_z, grad_cal=False, save_np=False, save_mat=False, save_msgs=False, gui_test=gui_test) time.sleep(delay_s) # Find the frequency data with the largest maximum absolute value max_ind = np.argmax(np.max(np.abs(rx_arr), axis=0, keepdims=False)) max_freq = swept_freqs[max_ind] print(f'Max frequency: {max_freq:.4f} MHz') # Plot figure if plot: fig, axs = plt.subplots(2, 1, constrained_layout=True) fig.suptitle(f'{steps}-step search around {step_search_center:.4f} MHz') axs[0].plot(np.real(rx_arr)) axs[0].legend([f'{freq:.4f} MHz' for freq in swept_freqs]) axs[0].set_title('Concatenated signal -- Real') axs[1].plot(np.abs(rx_arr)) axs[1].set_title('Concatenated signal -- Magnitude') plt.show() # Output of useful data for visualization data_dict = {'rx_arr': rx_arr, 'rx_t': rx_t, 'larmor_freq': larmor_freq } # Return the frequency that worked the best return max_freq, data_dict
647d67a491cf787dbc092a621c9ba5ad8097b21e
19,902
from typing import Optional def get_role_tempalte(context: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRoleTempalteResult: """ Use this data source to access information about an existing resource. """ pulumi.log.warn("""get_role_tempalte is deprecated: rancher2.getRoleTempalte has been deprecated in favor of rancher2.getRoleTemplate""") __args__ = dict() __args__['context'] = context __args__['name'] = name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('rancher2:index/getRoleTempalte:getRoleTempalte', __args__, opts=opts, typ=GetRoleTempalteResult).value return AwaitableGetRoleTempalteResult( administrative=__ret__.administrative, annotations=__ret__.annotations, builtin=__ret__.builtin, context=__ret__.context, default_role=__ret__.default_role, description=__ret__.description, external=__ret__.external, hidden=__ret__.hidden, id=__ret__.id, labels=__ret__.labels, locked=__ret__.locked, name=__ret__.name, role_template_ids=__ret__.role_template_ids, rules=__ret__.rules)
edc2bdaba9f287995f6c4323a4acb45935be02e4
19,903
from sklearn.metrics import roc_curve, roc_auc_score def threshold_xr_via_auc(ds, df, res_factor=3, if_nodata='any'): """ Takes a xarray dataset/array of gdv likelihood values and thresholds them according to a pandas dataframe (df) of field occurrence points. Scipy roc curve and auc is generated to perform thresholding. Pandas dataframe must include absences along with presences or the roc curve cannot be performed. Parameters ---------- ds : xarray dataset/array A dataset with x, y and time dims with likelihood values. df : pandas dataframe A dataframe of field occurrences with x, y values and presence, absence column. res_factors : int Controls the tolerance of occurence points intersection with nearest pixels. In other words, number of pixels that a occurrence point can be 'out'. if_nodata : str Whether to exclude a point from the auc threshold method if any or all values are nan. Default is any. Returns ---------- ds_thresh : xarray dataset or array. """ # imports check try: except: raise ImportError('Could not import sklearn.') # notify print('Thresholding dataset via occurrence records and AUC.') # check xr type, dims, num time if not isinstance(ds, (xr.Dataset, xr.DataArray)): raise TypeError('Dataset not an xarray type.') elif 'x' not in list(ds.dims) or 'y' not in list(ds.dims): raise ValueError('No x or y dimensions in dataset.') # we need a dataset, try and convert from array was_da = False if isinstance(ds, xr.DataArray): try: was_da = True ds = ds.to_dataset(dim='variable') except: raise TypeError('Failed to convert xarray DataArray to Dataset.') # check if pandas type, columns, actual field if not isinstance(df, pd.DataFrame): raise TypeError('Occurrence records is not a pandas type.') elif 'x' not in df or 'y' not in df: raise ValueError('No x, y fields in occurrence records.') elif 'actual' not in df: raise ValueError('No actual field in occurrence records.') # check if nodatavals is in dataset if not hasattr(ds, 'nodatavals') or ds.nodatavals == 'unknown': raise AttributeError('Dataset does not have a nodatavalue attribute.') # check if res factor and if_nodata valid if not isinstance(res_factor, int) and res_factor < 1: raise TypeError('Resolution factor must be an integer of 1 or greater.') elif if_nodata not in ['any', 'all']: raise TypeError('If nodata policy must be either any or all.') # split ds into arrays depending on dims da_list = [ds] if 'time' in ds.dims: da_list = [ds.sel(time=dt) for dt in ds['time']] # loop each slice, threshold to auc thresh_list = [] for da in da_list: # take a copy da = da.copy(deep=True) # intersect points with current da df_data = df[['x', 'y', 'actual']].copy() df_data = tools.intersect_records_with_xr(ds=da, df_records=df_data, extract=True, res_factor=res_factor, if_nodata=if_nodata) # remove no data df_data = tools.remove_nodata_records(df_data, nodata_value=ds.nodatavals) # check if dataframe has 1s and 0s only unq = df_data['actual'].unique() if not np.any(unq == 1) or not np.any(unq == 0): raise ValueError('Occurrence records do not contain 1s and/or 0s.') elif len(unq) != 2: raise ValueError('Occurrence records contain more than just 1s and/or 0s.') # rename column, add column of actuals (1s) df_data = df_data.rename(columns={'like': 'predicted'}) # get fpr, tpr, thresh, auc and optimal threshold fpr, tpr, thresholds = roc_curve(df_data['actual'], df_data['predicted']) auc = roc_auc_score(df_data['actual'], df_data['predicted']) cut_off = thresholds[np.argmax(tpr - fpr)] # threshold da to cutoff and append da = da.where(da > cut_off) thresh_list.append(da) # notify if 'time' in ds.dims: print('AUC: {0} for time: {1}.'.format(round(auc, 3), da['time'].values)) else: print('AUC: {0} for whole dataset.'.format(round(auc, 3))) for e in fpr: print(e) print('\n') for e in tpr: print(e) print('\n') print(auc) print('\n') print(cut_off) # show print('- ' * 30) plt.show() print('- ' * 30) print('') # concat array back together if len(thresh_list) > 1: ds_thresh = xr.concat(thresh_list, dim='time').sortby('time') else: ds_thresh = thresh_list[0] if was_da: ds_thresh = ds_thresh.to_array() # notify and return print('Thresholded dataset successfully.') return ds_thresh
6415b7aa7298c7d2bf6488d5c9f0834facbd4300
19,904
def kd_or_scan(func=None, array=None, extra_data=None): """Decorator to allow functions to be call with a scan number or kd object """ if func is None: return partial(kd_or_scan, array=array, extra_data=extra_data) @wraps(func) def wrapper(scan, *args, **kwargs): # If scan number given, read the scan into the object and pass it to function if isinstance(scan, (int, np.int, np.int64)): scan = read_scan(scan, array=array, extra_data=extra_data) return func(scan, *args, **kwargs) return wrapper
8eda0c54717293f57cd817f20a6f008abae6b825
19,905
def matching_intervals(original: DomainNode, approx: DomainNode, conf: float) -> bool: """ Checks if 2 intervals match in respect to a confidence interval.""" # out_of_bounds = (not matching_bounds(original.domains[v], approx.domains[v], conf) for v in original.variables) # return not any(out_of_bounds) vars_in_bounds = (matching_bounds(original.domains[var], approx.domains[var], conf) for var in original.variables) return all(vars_in_bounds)
bb2540872d0406b88551ec5b3a9ef28fbc39d366
19,906
def _make_label_sigmoid_cross_entropy_loss(logits, present_labels, split): """ Helper function to create label loss Parameters ---------- logits: tensor of shape [batch_size, num_verts, num_labels] present_labels: tensor of shape [batch_size, num_verts, num_labels]; labels of labelled verts split: tensor of shape [batch_size, num_verts], 0 if censored, 1 if not censored Returns ------- The cross-entropy loss corresponding to the label. """ if len(logits.shape) == 3: batch_size = tf.cast(tf.shape(input=logits)[0], dtype=tf.float32) else: batch_size = 1 label_pred_losses = tf.compat.v1.losses.sigmoid_cross_entropy( present_labels, logits=logits, weights=tf.expand_dims(split, -1), reduction=tf.compat.v1.losses.Reduction.NONE) # sum rather than (tf default of) mean because ¯\_(ツ)_/¯ label_pred_loss = tf.reduce_sum(input_tensor=label_pred_losses) return label_pred_loss / batch_size
290364255222f20ef864636ef2ac8df51599a587
19,907
import copy def _merge_meta(base, child): """Merge the base and the child meta attributes. List entries, such as ``indexes`` are concatenated. ``abstract`` value is set to ``True`` only if defined as such in the child class. Args: base (dict): ``meta`` attribute from the base class. child (dict): ``meta`` attribute from the child class. Returns: dict: Merged metadata. """ base = copy.deepcopy(base) child.setdefault('abstract', False) for key, value in child.items(): if isinstance(value, list): base.setdefault(key, []).extend(value) else: base[key] = value return base
ba219b8091244a60658bee826fbef5003d3f7883
19,908
from typing import Dict from typing import Any def _parse_quotes(quotes_dict: Dict[str, Dict[str, Dict[str, Any]]]) -> "RegionalQuotes": """ Parse quote data for a :class:`~.DetailedProduct`. :param quotes_dict: """ quotes: RegionalQuotes = RegionalQuotes() for gsp, payment_methods in quotes_dict.items(): quotes[gsp] = {} for method, fuels in payment_methods.items(): quotes[gsp][method] = {} for fuel, quote in fuels.items(): quotes[gsp][method][fuel] = Quote(**quote) return quotes
82ea906391b5e3d23a40619eefc19eaa353e18bc
19,909
def train_validation(train_df, valid_df, epochs=100, batch_size=512, plot=False, nn_args={}): """ Wrapper for training on the complete training data and evaluating the performance on the hold-out set. Parameter: ------------------- train_df: df, train df with features and valid_df: df, validation df with features Returns: ------------------- res_df: metrics nnmodel: neural network model """ #format the dtaframe for ML X_train_full, Seqs_train_full, y_train_full = process_df(train_df) X_valid_full, Seqs_valid_full, y_valid_full = process_df(valid_df) # encode class values as integers encoder = LabelEncoder() encoder.fit(y_train_full) #output dims depending on the number of fractions output_dims = len(np.unique(train_df.Fraction)) input_dims = X_train_full.shape[1] nnmodel = models.SAX_Model(output_dim=output_dims, input_dim=input_dims, **nn_args) print (nnmodel.summary()) history = nnmodel.fit(np.array(X_train_full), np_utils.to_categorical(encoder.transform(y_train_full)), epochs=epochs, batch_size=batch_size) #fit the model to the complete training data yhat_train_prob = nnmodel.predict(np.array(X_train_full)) yhat_train_disc = yhat_train_prob.argmax(axis=1) + 1 yhat_val_prob = nnmodel.predict(np.array(X_valid_full)) yhat_val_disc = yhat_val_prob.argmax(axis=1) + 1 #evaluate res_train = pd.DataFrame(eval_predictions_complex(y_train_full, yhat_train_disc, "keras_Train")) res_valid = pd.DataFrame(eval_predictions_complex(y_valid_full, yhat_val_disc, "keras_Valid")) res_df = pd.concat([res_train.transpose(), res_valid.transpose()]) res_df.columns = eval_predictions_complex(None, None, None, True) if plot: x = np.arange(-4, 30, 1) ax1 = sns.jointplot(x=y_valid_full, y=yhat_val_disc, kind="kde", xlim=(-4, 30 ), ylim=(-4, 30 )) ax1.set_axis_labels(xlabel="True Fraction", ylabel="Prediction") ax1.ax_joint.plot(x, x, '-k') print ("Results on the validation data:") print (res_df) return(res_df, nnmodel, history)
7dffa50d427c0e74fe4f6e6a8ff1e0198304de2a
19,910
import subprocess def retrieve_email() -> str: """ Uses the Git command to retrieve the current configured user email address. :return: The global configured user email. """ return subprocess.run( ["git", "config", "--get", "user.email"], capture_output=True, text=True, ).stdout.strip("\n")
4d2308f3b9376b9b7406f9594c52b8a8ebba04f5
19,911
import warnings import inspect def bootstrap_compute( hind, verif, hist=None, alignment="same_verifs", metric="pearson_r", comparison="m2e", dim="init", reference=["uninitialized", "persistence"], resample_dim="member", sig=95, iterations=500, pers_sig=None, compute=compute_hindcast, resample_uninit=bootstrap_uninitialized_ensemble, reference_compute=compute_persistence, **metric_kwargs, ): """Bootstrap compute with replacement. Args: hind (xr.Dataset): prediction ensemble. verif (xr.Dataset): Verification data. hist (xr.Dataset): historical/uninitialized simulation. metric (str): `metric`. Defaults to 'pearson_r'. comparison (str): `comparison`. Defaults to 'm2e'. dim (str or list): dimension(s) to apply metric over. default: 'init'. reference (str, list of str): Type of reference forecasts with which to verify. One or more of ['persistence', 'uninitialized']. If None or empty, returns no p value. resample_dim (str): dimension to resample from. default: 'member':: - 'member': select a different set of members from hind - 'init': select a different set of initializations from hind sig (int): Significance level for uninitialized and initialized skill. Defaults to 95. pers_sig (int): Significance level for persistence skill confidence levels. Defaults to sig. iterations (int): number of resampling iterations (bootstrap with replacement). Defaults to 500. compute (func): function to compute skill. Choose from [:py:func:`climpred.prediction.compute_perfect_model`, :py:func:`climpred.prediction.compute_hindcast`]. resample_uninit (func): function to create an uninitialized ensemble from a control simulation or uninitialized large ensemble. Choose from: [:py:func:`bootstrap_uninitialized_ensemble`, :py:func:`bootstrap_uninit_pm_ensemble_from_control`]. reference_compute (func): function to compute a reference forecast skill with. Default: :py:func:`climpred.prediction.compute_persistence`. ** metric_kwargs (dict): additional keywords to be passed to metric (see the arguments required for a given metric in :ref:`Metrics`). Returns: results: (xr.Dataset): bootstrapped results for the three different skills: - `initialized` for the initialized hindcast `hind` and describes skill due to initialization and external forcing - `uninitialized` for the uninitialized/historical and approximates skill from external forcing - `persistence` for the persistence forecast computed by `compute_persistence` the different results: - `verify skill`: skill values - `p`: p value - `low_ci` and `high_ci`: high and low ends of confidence intervals based on significance threshold `sig` Reference: * Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P. Gonzalez, V. Kharin, et al. “A Verification Framework for Interannual-to-Decadal Predictions Experiments.” Climate Dynamics 40, no. 1–2 (January 1, 2013): 245–72. https://doi.org/10/f4jjvf. See also: * climpred.bootstrap.bootstrap_hindcast * climpred.bootstrap.bootstrap_perfect_model """ warn_if_chunking_would_increase_performance(hind, crit_size_in_MB=5) if pers_sig is None: pers_sig = sig if isinstance(dim, str): dim = [dim] if isinstance(reference, str): reference = [reference] if reference is None: reference = [] p = (100 - sig) / 100 ci_low = p / 2 ci_high = 1 - p / 2 p_pers = (100 - pers_sig) / 100 ci_low_pers = p_pers / 2 ci_high_pers = 1 - p_pers / 2 # get metric/comparison function name, not the alias metric = METRIC_ALIASES.get(metric, metric) comparison = COMPARISON_ALIASES.get(comparison, comparison) # get class Metric(metric) metric = get_metric_class(metric, ALL_METRICS) # get comparison function comparison = get_comparison_class(comparison, ALL_COMPARISONS) # Perfect Model requires `same_inits` setup isHindcast = True if comparison.name in HINDCAST_COMPARISONS else False reference_alignment = alignment if isHindcast else "same_inits" chunking_dims = [d for d in hind.dims if d not in CLIMPRED_DIMS] # carry alignment for compute_reference separately metric_kwargs_reference = metric_kwargs.copy() metric_kwargs_reference["alignment"] = reference_alignment # carry alignment in metric_kwargs if isHindcast: metric_kwargs["alignment"] = alignment if hist is None: # PM path, use verif = control hist = verif # slower path for hindcast and resample_dim init if resample_dim == "init" and isHindcast: warnings.warn("resample_dim=`init` will be slower than resample_dim=`member`.") ( bootstrapped_init_skill, bootstrapped_uninit_skill, bootstrapped_pers_skill, ) = _bootstrap_hindcast_over_init_dim( hind, hist, verif, dim, reference, resample_dim, iterations, metric, comparison, compute, reference_compute, resample_uninit, **metric_kwargs, ) else: # faster: first _resample_iterations_idx, then compute skill resample_func = _get_resample_func(hind) if not isHindcast: if "uninitialized" in reference: # create more members than needed in PM to make the uninitialized # distribution more robust members_to_sample_from = 50 repeat = members_to_sample_from // hind.member.size + 1 uninit_hind = xr.concat( [resample_uninit(hind, hist) for i in range(repeat)], dim="member", **CONCAT_KWARGS, ) uninit_hind["member"] = np.arange(1, 1 + uninit_hind.member.size) if dask.is_dask_collection(uninit_hind): # too minimize tasks: ensure uninit_hind get pre-computed # alternativly .chunk({'member':-1}) uninit_hind = uninit_hind.compute().chunk() # resample uninit always over member and select only hind.member.size bootstrapped_uninit = resample_func( uninit_hind, iterations, "member", replace=False, dim_max=hind["member"].size, ) bootstrapped_uninit["lead"] = hind["lead"] # effectively only when _resample_iteration_idx which doesnt use dim_max bootstrapped_uninit = bootstrapped_uninit.isel( member=slice(None, hind.member.size) ) if dask.is_dask_collection(bootstrapped_uninit): bootstrapped_uninit = bootstrapped_uninit.chunk({"member": -1}) bootstrapped_uninit = _maybe_auto_chunk( bootstrapped_uninit, ["iteration"] + chunking_dims ) else: # hindcast if "uninitialized" in reference: uninit_hind = resample_uninit(hind, hist) if dask.is_dask_collection(uninit_hind): # too minimize tasks: ensure uninit_hind get pre-computed # maybe not needed uninit_hind = uninit_hind.compute().chunk() bootstrapped_uninit = resample_func( uninit_hind, iterations, resample_dim ) bootstrapped_uninit = bootstrapped_uninit.isel( member=slice(None, hind.member.size) ) bootstrapped_uninit["lead"] = hind["lead"] if dask.is_dask_collection(bootstrapped_uninit): bootstrapped_uninit = _maybe_auto_chunk( bootstrapped_uninit.chunk({"lead": 1}), ["iteration"] + chunking_dims, ) if "uninitialized" in reference: bootstrapped_uninit_skill = compute( bootstrapped_uninit, verif, metric=metric, comparison="m2o" if isHindcast else comparison, dim=dim, add_attrs=False, **metric_kwargs, ) # take mean if 'm2o' comparison forced before if isHindcast and comparison != __m2o: bootstrapped_uninit_skill = bootstrapped_uninit_skill.mean("member") bootstrapped_hind = resample_func(hind, iterations, resample_dim) if dask.is_dask_collection(bootstrapped_hind): bootstrapped_hind = bootstrapped_hind.chunk({"member": -1}) bootstrapped_init_skill = compute( bootstrapped_hind, verif, metric=metric, comparison=comparison, add_attrs=False, dim=dim, **metric_kwargs, ) if "persistence" in reference: if not metric.probabilistic: pers_skill = reference_compute( hind, verif, metric=metric, dim=dim, **metric_kwargs_reference, ) # bootstrap pers if resample_dim == "init": bootstrapped_pers_skill = reference_compute( bootstrapped_hind, verif, metric=metric, **metric_kwargs_reference, ) else: # member _, bootstrapped_pers_skill = xr.broadcast( bootstrapped_init_skill, pers_skill, exclude=CLIMPRED_DIMS ) else: bootstrapped_pers_skill = bootstrapped_init_skill.isnull() # calc mean skill without any resampling init_skill = compute( hind, verif, metric=metric, comparison=comparison, dim=dim, **metric_kwargs, ) if "uninitialized" in reference: # uninit skill as mean resampled uninit skill uninit_skill = bootstrapped_uninit_skill.mean("iteration") if "persistence" in reference: if not metric.probabilistic: pers_skill = reference_compute( hind, verif, metric=metric, dim=dim, **metric_kwargs_reference ) else: pers_skill = init_skill.isnull() # align to prepare for concat if set(bootstrapped_pers_skill.coords) != set(bootstrapped_init_skill.coords): if ( "time" in bootstrapped_pers_skill.dims and "init" in bootstrapped_init_skill.dims ): bootstrapped_pers_skill = bootstrapped_pers_skill.rename( {"time": "init"} ) # allow member to be broadcasted bootstrapped_init_skill, bootstrapped_pers_skill = xr.broadcast( bootstrapped_init_skill, bootstrapped_pers_skill, exclude=("init", "lead", "time"), ) # get confidence intervals CI init_ci = _distribution_to_ci(bootstrapped_init_skill, ci_low, ci_high) if "uninitialized" in reference: uninit_ci = _distribution_to_ci(bootstrapped_uninit_skill, ci_low, ci_high) # probabilistic metrics wont have persistence forecast # therefore only get CI if persistence was computed if "persistence" in reference: if "iteration" in bootstrapped_pers_skill.dims: pers_ci = _distribution_to_ci( bootstrapped_pers_skill, ci_low_pers, ci_high_pers ) else: # otherwise set all persistence outputs to false pers_ci = init_ci == -999 # pvalue whether uninit or pers better than init forecast if "uninitialized" in reference: p_uninit_over_init = _pvalue_from_distributions( bootstrapped_uninit_skill, bootstrapped_init_skill, metric=metric ) if "persistence" in reference: p_pers_over_init = _pvalue_from_distributions( bootstrapped_pers_skill, bootstrapped_init_skill, metric=metric ) # wrap results together in one xr object if reference == []: results = xr.concat( [ init_skill, init_ci.isel(quantile=0, drop=True), init_ci.isel(quantile=1, drop=True), ], dim="results", ) results["results"] = ["verify skill", "low_ci", "high_ci"] results["skill"] = ["initialized"] results = results.squeeze() elif reference == ["persistence"]: skill = xr.concat([init_skill, pers_skill], dim="skill", **CONCAT_KWARGS) skill["skill"] = ["initialized", "persistence"] # ci for each skill ci = xr.concat([init_ci, pers_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "persistence"] results = xr.concat([skill, p_pers_over_init], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] elif reference == ["uninitialized"]: skill = xr.concat([init_skill, uninit_skill], dim="skill", **CONCAT_KWARGS) skill["skill"] = ["initialized", "uninitialized"] # ci for each skill ci = xr.concat([init_ci, uninit_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "uninitialized"] results = xr.concat([skill, p_uninit_over_init], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] elif set(reference) == set(["uninitialized", "persistence"]): skill = xr.concat( [init_skill, uninit_skill, pers_skill], dim="skill", **CONCAT_KWARGS ) skill["skill"] = ["initialized", "uninitialized", "persistence"] # probability that i beats init p = xr.concat( [p_uninit_over_init, p_pers_over_init], dim="skill", **CONCAT_KWARGS ) p["skill"] = ["uninitialized", "persistence"] # ci for each skill ci = xr.concat([init_ci, uninit_ci, pers_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "uninitialized", "persistence"] results = xr.concat([skill, p], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] else: raise ValueError("results not created") # Attach climpred compute information to skill metadata_dict = { "confidence_interval_levels": f"{ci_high}-{ci_low}", "bootstrap_iterations": iterations, "reference": reference, } if reference is not None: metadata_dict[ "p" ] = "probability that reference performs better than initialized" metadata_dict.update(metric_kwargs) results = assign_attrs( results, hind, alignment=alignment, metric=metric, comparison=comparison, dim=dim, function_name=inspect.stack()[0][3], # take function.__name__ metadata_dict=metadata_dict, ) # Ensure that the lead units get carried along for the calculation. The attribute # tends to get dropped along the way due to ``xarray`` functionality. results["lead"] = hind["lead"] if "units" in hind["lead"].attrs and "units" not in results["lead"].attrs: results["lead"].attrs["units"] = hind["lead"].attrs["units"] return results
1a419d129419d15276f21f6f09bbd613a8e662da
19,912
def description_for_number(numobj, lang, script=None, region=None): """Return a text description of a PhoneNumber object for the given language. The description might consist of the name of the country where the phone number is from and/or the name of the geographical area the phone number is from. This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a text description. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a text description in the given language code, for the given phone number, or an empty string if no description is available.""" ntype = number_type(numobj) if ntype == PhoneNumberType.UNKNOWN: return "" elif not is_number_type_geographical(ntype, numobj.country_code): return country_name_for_number(numobj, lang, script, region) return description_for_valid_number(numobj, lang, script, region)
d67d53528c99c8b3ce6323c7e4eb5170603660c1
19,913
def _construct_new_particles(samples, old_particles): """Construct new array of particles given the drawing results over the old particles. Args: + *samples* (np.ndarray): NxM array that contains the drawing results, where N is number of observations and M number of particles. + *old_particles* (np.ndarray): 3xNxM array that stores old particles. Returns: + new particles (np.ndarray): 3xNxM array of newly assembled particles (for each observation, there will be repeated particles). """ N, M = samples.shape ret_arr = 5*np.ones((3,N,M)) m_outer = np.zeros(N) while 0 < np.amax(samples): indices = np.nonzero(samples) last_n = -1 for i, n in enumerate(indices[0]): if last_n < n: if last_n >= 0: m_outer[last_n] += m_inner m_inner = 0 ret_arr[:,n,int(m_outer[n]+m_inner)] = old_particles[ :,n, indices[1][i] ] m_inner += 1 last_n = n m_outer[last_n] += m_inner samples[indices] -= 1 return ret_arr
ec511554074f637466d47d24c449eda8a263100e
19,914
def trunc(x, y, w, h): """Truncates x and y coordinates to live in the (0, 0) to (w, h) Args: x: the x-coordinate of a point y: the y-coordinate of a point w: the width of the truncation box h: the height of the truncation box. """ return min(max(x, 0), w - 1), min(max(y, 0), h - 1)
3edecdfbd9baf24f8b4f3f71b9e35a222c6be1ea
19,915
import os import sys def testInputLog(log_file): """ Test the user input for issues in the DNS query logs """ # if the path is a file if os.path.isfile(log_file): pass else: print("WARNING: Bad Input - Use a DNS (text) log file which has one domain per row without any other data or punctuation.") print("Exiting...") sys.exit(0) # Return NULL return None
c50900dbef8d978e3f7b8349a7ae072c2bab3415
19,916
def exact_match(true_labels, predicts): """ exact_match This is the most strict metric for the multi label setting. It's defined as the percentage of samples that have all their labels correctly classified. Parameters ---------- true_labels: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the true labels for all the classification tasks and for n_samples. predicts: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the predictions for all the classification tasks and for n_samples. Returns ------- float The exact match percentage between the given sets. Examples -------- >>> from skmultiflow.evaluation.metrics.metrics import exact_match >>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]] >>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]] >>> exact_match(true_labels, predictions) 0.5 """ if not hasattr(true_labels, 'shape'): true_labels = np.asarray(true_labels) if not hasattr(predicts, 'shape'): predicts = np.asarray(predicts) N, L = true_labels.shape return np.sum(np.sum((true_labels == predicts) * 1, axis=1)==L) * 1. / N
ebcc1d6ce96ff8b5933e16ce69f5e143e371bf28
19,917
def dimred3(dat): """convenience function dimensionally reduce input data, each row being an element in some vector space, to dimension 3 using PCA calcualted by the SVD""" return dimred(dat, 3)
5151ee8bb0e8bcfe6dbb1633d95e9b355714ae35
19,918
def render_orchestrator_registrations( driver: Driver = None, collab_id: str = None, project_id: str = None ): """ Renders out retrieved registration metadata in a custom form Args: driver (Driver): A connected Synergos driver to communicate with the selected orchestrator. collab_id (str): ID of selected collaboration to be rendered project_id (str): ID of selected project to be rendered """ # Type 1 view: Orchestrator's Perspective if driver and collab_id and project_id: registry_data = driver.registrations.read_all( collab_id=collab_id, project_id=project_id ).get('data', []) participant_ids = [reg['key']['participant_id'] for reg in registry_data] # Type 2 view: Insufficiant keys -> Render nothing else: registry_data = [] participant_ids = [] selected_participant_id = st.selectbox( label="Participant ID:", options=participant_ids, help="""Select an participant to view.""" ) if registry_data: selected_registry = [ reg for reg in registry_data if reg['key']['participant_id'] == selected_participant_id ].pop() else: selected_registry = {} with st.beta_container(): render_participant( driver=driver, participant_id=selected_participant_id ) with st.beta_expander("Registration Details"): reg_renderer.display(selected_registry) with st.beta_expander("Tag Details"): tags = selected_registry.get('relations', {}).get('Tag', []) tag_details = tags.pop() if tags else {} tag_renderer.display(tag_details) with st.beta_expander("Alignment Details"): alignments = selected_registry.get('relations', {}).get('Alignment', []) alignment_details = alignments.pop() if alignments else {} align_renderer.display(alignment_details) return selected_participant_id
14a84029ff20a09d2c8c6e41007827f96fb35f60
19,919
def check_nan(data, new_data): """checks if nan values are conserved """ old = np.isnan(data) new = np.isnan(new_data) if np.all(new == old): return True else: return False
d1dafaadd6e37848aa147b6714cce74f6097e074
19,920
def Var(poly, dist=None, **kws): """ Element by element 2nd order statistics. Args: poly (chaospy.poly.ndpoly, Dist): Input to take variance on. dist (Dist): Defines the space the variance is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``variation.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> chaospy.Var(dist) array([1., 4.]) >>> x, y = chaospy.variable(2) >>> poly = chaospy.polynomial([1, x, y, 10*x*y]) >>> chaospy.Var(poly, dist) array([ 0., 1., 4., 800.]) """ if dist is None: dist, poly = poly, polynomials.variable(len(poly)) poly = polynomials.setdim(poly, len(dist)) if not poly.isconstant: return poly.tonumpy()**2 poly = poly-E(poly, dist, **kws) poly = polynomials.square(poly) return E(poly, dist, **kws)
6ec5e867ed7287f90584e0c134d29b8daf4f9b9c
19,921
def op_par_loop_parse(text): """Parsing for op_par_loop calls""" loop_args = [] search = "op_par_loop" i = text.find(search) while i > -1: arg_string = text[text.find('(', i) + 1:text.find(';', i + 11)] # parse arguments in par loop temp_args = [] num_args = 0 # parse each op_arg_dat search2 = "op_arg_dat" search3 = "op_arg_gbl" search4 = "op_opt_arg_dat" j = arg_string.find(search2) k = arg_string.find(search3) l = arg_string.find(search4) while j > -1 or k > -1 or l > -1: index = min(j if (j > -1) else sys.maxint,k if (k > -1) else sys.maxint,l if (l > -1) else sys.maxint ) if index == j: temp_dat = get_arg_dat(arg_string, j) # append this struct to a temporary list/array temp_args.append(temp_dat) num_args = num_args + 1 j = arg_string.find(search2, j + 11) elif index == k: temp_gbl = get_arg_gbl(arg_string, k) # append this struct to a temporary list/array temp_args.append(temp_gbl) num_args = num_args + 1 k = arg_string.find(search3, k + 11) elif index == l: temp_dat = get_opt_arg_dat(arg_string, l) # append this struct to a temporary list/array temp_args.append(temp_dat) num_args = num_args + 1 l = arg_string.find(search4, l + 15) temp = {'loc': i, 'name1': arg_string.split(',')[0].strip(), 'name2': arg_string.split(',')[1].strip(), 'set': arg_string.split(',')[2].strip(), 'args': temp_args, 'nargs': num_args} loop_args.append(temp) i = text.find(search, i + 10) print '\n\n' return (loop_args)
826bb5cd58e4b34846419fc47977caa73fd5573c
19,922
def bin_to_hex(bin_str: str) -> str: """Convert a binary string to a hex string. The returned hex string will contain the prefix '0x' only if given a binary string with the prefix '0b'. Args: bin_str (str): Binary string (e.g. '0b1001') Returns: str: Hexadecimal string zero-padded to len(bin_str) // 4 Example: >>> bin_str = '0b1010101111001101' >>> bin_to_hex(bin_str) '0xabcd' >>> bin_to_hex(bin_str[2:]) # remove '0b' 'abcd' """ if not isinstance(bin_str, str): raise TypeError(f'Expecting type str. given {bin_str.__class__.__name__}.') literal = '0x' if bin_str[2:].lower() == '0b' else '' num_nibbles = len(bin_str) // BITS_PER_NIBBLE bin_str = bin_str[:num_nibbles * BITS_PER_NIBBLE] # truncate to whole number of nibbles return literal + hex(int(bin_str, 2))[2:].zfill(num_nibbles)
0f44311a600a7b5eac52d3716db4b116302c97ac
19,923
def exp_value_interpolate_bp(prod_inst, util_opti, b_ssv_sd, k_ssv_sd, epsilon_ssv_sd, b_ssv, k_ssv, epsilon_ssv, b_ssv_zr, k_ssv_zr, epsilon_ssv_zr, states_vfi_dim, shocks_vfi_dim): """interpolate value function and expected value function. Need three matrix here: 1. state matrix x shock matrix where optimal choices were solved at - previously, shock for this = 0, but now shock vector might not be zero 2. state matrix x shock matrix where shocks are drawn monte carlo way to allow for averaging, integrating over shocks for each x row 3. state matrix alone, shock = 0, each of the x row in matrix x """ 'A Get States to Integrate over' k_alpha_ae_sd, b_ssv_sd, \ k_alpha_ae, b_ssv, \ k_alpha_ae_zr, b_ssv_zr = \ inter_states_bp(prod_inst, util_opti, b_ssv_sd, k_ssv_sd, epsilon_ssv_sd, b_ssv, k_ssv, epsilon_ssv, b_ssv_zr, k_ssv_zr, epsilon_ssv_zr, states_vfi_dim, shocks_vfi_dim) 'B. invoke' util_emax = \ exp_value_interpolate_main(u1=util_opti, x1=k_alpha_ae_sd, y1=b_ssv_sd, x2=k_alpha_ae, y2=b_ssv, x2_noshk=k_alpha_ae_zr, y2_noshk=b_ssv_zr, states_dim=states_vfi_dim, shocks_dim=shocks_vfi_dim, return_uxy=False) 'C. collect' interpolant_exp_v = {'evu': util_emax, 'kae': k_alpha_ae_zr, 'b': b_ssv_zr} return interpolant_exp_v
e8d698834186efa779bbd81b042e9cf4caa1276a
19,924
from typing import Union from typing import List def remove_non_protein( molecule: oechem.OEGraphMol, exceptions: Union[None, List[str]] = None, remove_water: bool = False, ) -> oechem.OEGraphMol: """ Remove non-protein atoms from an OpenEye molecule. Parameters ---------- molecule: oechem.OEGraphMol An OpenEye molecule holding a molecular structure. exceptions: None or list of str Exceptions that should not be removed. remove_water: bool If water should be removed. Returns ------- selection: oechem.OEGraphMol An OpenEye molecule holding the filtered structure. """ if exceptions is None: exceptions = [] if remove_water is False: exceptions.append("HOH") # do not change input mol selection = molecule.CreateCopy() for atom in selection.GetAtoms(): residue = oechem.OEAtomGetResidue(atom) if residue.IsHetAtom(): if residue.GetName() not in exceptions: selection.DeleteAtom(atom) return selection
6afa4df25cbcf504b2ac06325a3e89291e9a0e4f
19,925
import json def configure_connection(instance, name='eventstreams', credentials=None): """Configures IBM Streams for a certain connection. Creates an application configuration object containing the required properties with connection information. Example for creating a configuration for a Streams instance with connection details:: from icpd_core import icpd_util from streamsx.rest_primitives import Instance import streamsx.eventstreams as es cfg = icpd_util.get_service_instance_details(name='your-streams-instance') cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False instance = Instance.of_service(cfg) app_cfg = es.configure_connection(instance, credentials='my_crdentials_json') Args: instance(streamsx.rest_primitives.Instance): IBM Streams instance object. name(str): Name of the application configuration, default name is 'eventstreams'. credentials(str|dict): The service credentials for Eventstreams. Returns: Name of the application configuration. .. warning:: The function can be used only in IBM Cloud Pak for Data. .. versionadded:: 1.1 """ description = 'Eventstreams credentials' properties = {} if credentials is None: raise TypeError(credentials) if isinstance(credentials, dict): properties['eventstreams.creds'] = json.dumps(credentials) else: properties['eventstreams.creds'] = credentials # check if application configuration exists app_config = instance.get_application_configurations(name=name) if app_config: print('update application configuration: ' + name) app_config[0].update(properties) else: print('create application configuration: ' + name) instance.create_application_configuration(name, properties, description) return name
5f263af94590e7237e27dc90f2e502b952d010fc
19,926
def setup(app): """ Any time a python class is referenced, make it a pretty link that doesn't include the full package path. This makes the base classes much prettier. """ app.add_role_to_domain("py", "class", truncate_class_role) return {"parallel_read_safe": True}
69660fd86216dfe0a5642b0885dbdb0704ce8ffc
19,927
def transects_to_gdf(transects): """ Saves the shore-normal transects as a gpd.GeoDataFrame KV WRL 2018 Arguments: ----------- transects: dict contains the coordinates of the transects Returns: ----------- gdf_all: gpd.GeoDataFrame """ # loop through the mapped shorelines for i,key in enumerate(list(transects.keys())): # save the geometry + attributes geom = geometry.LineString(transects[key]) gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(geom)) gdf.index = [i] gdf.loc[i,'name'] = key # store into geodataframe if i == 0: gdf_all = gdf else: gdf_all = gdf_all.append(gdf) return gdf_all
a2e1c517a7d4d86618a08da07459686fa947d597
19,928
def deduce_final_configuration(fetched_config): """ Fills some variables in configuration based on those already extracted. Args: fetched_config (dict): Configuration variables extracted from a living environment, Returns: dict: Final configuration from live environment. """ final_config = fetched_config.copy() final_config[THRIFT_SERVER_URL] = _get_thrift_server_url(final_config) final_config[HIVE_SERVER_URL] = _get_hive_server_url(final_config) return final_config
30d21a8eb0bd1d282dbd127551e55fc7061e82ed
19,929
def total_benchmark_return_nb(benchmark_value: tp.Array2d) -> tp.Array1d: """Get total market return per column/group.""" out = np.empty(benchmark_value.shape[1], dtype=np.float_) for col in range(benchmark_value.shape[1]): out[col] = returns_nb.get_return_nb(benchmark_value[0, col], benchmark_value[-1, col]) return out
74d2924031dc4f0251b346555bc473d8d225453d
19,930
def young_modulus(data): """ Given a stress-strain dataset, returns Young's Modulus. """ yielding = yield_stress(data)[0] """Finds the yield index""" yield_index = 0 for index, point in enumerate(data): if (point == yielding).all(): yield_index = index break """Finds data in elastic region""" elastic = data[:yield_index+1] """ Finds the upper yield point (lower yield point is the *yielding* variable). We're taking the first element ([0]) because it returns the first element that meets the criteria in parentheses. It's a two-dimensional array so we have to do this twice. """ upperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0] upperyieldpoint = elastic[upperyieldpoint_index] """We estimate the region until the first upper yield point with a linear model""" lin_elastic_region = elastic[:upperyieldpoint_index+1] """The slope of this region is Young's Modulus""" return (lin_elastic_region[-1,1]-lin_elastic_region[0,1])/(lin_elastic_region[-1,0]-lin_elastic_region[0,0])
f41d4c358ae58760055d72e0364a3f79b7258512
19,931
def generateODTableDf(database: pd.DataFrame, save: bool = True) -> pd.DataFrame: """生成各区间OD表相关的数据集 Args: database (pd.DataFrame): 经初始化的原始数据集 save (bool, optional): 是否另外将其保存为csv文件. Defaults to True. Returns: pd.DataFrame: 各区间OD表相关的数据集 """ table4OD: np.ndarray = fetchTable4OD(database, originStations) df4OD: pd.DataFrame = pd.DataFrame( table4OD, columns=originStations, index=originStations ) if save: df4OD.to_csv(SEPERATOR.join([".", "result", "raw", "OD表.csv"])) return df4OD
9660d1c604e0f3514bb9a168ef91b3f29d7ba8b9
19,932
import typing def check_datatype(many: bool): """Checks if data/filter to be inserted is a dictionary""" def wrapper(func): def inner_wrapper(self, _filter={}, _data=None, **kwargs): if _data is None: # statements without two args - find, insert etc if many: # statements that expect a list of dictionaries: insert_many if isinstance(_filter, typing.Sequence): return func(self, _filter, **kwargs) else: raise TypeError("Unexpected Datatype.") if isinstance(_filter, dict): return func(self, _filter, **kwargs) else: raise TypeError("Unexpected Datatype.") else: # update statements if isinstance(_filter, dict) and isinstance(_data, dict): return func(self, _filter, _data, **kwargs) else: raise TypeError("Unexpected Datatype.") return inner_wrapper return wrapper
c5300507936db04b2ae5e4190421cc354f6ac2d4
19,933
def login(): """Login Page""" if request.cookies.get('user_id') and request.cookies.get('username'): session['user_id'] = request.cookies.get('user_id') session['username'] = request.cookies.get('username') update_last_login(session['user_id']) return render_template('main/index.html', username=session['username']) login_form = LoginForm() if login_form.validate_on_submit(): username = request.form['username'] password = (request.form['password']) user_id = check_user_exist(username, password) if user_id: response = login_user(user_id, username) return response else: flash('Username/Password Incorrect!') return render_template('auth/login.html', form=login_form)
e8f02d520c5913e8d8d2c99d1a98b9c546a9a220
19,934
def _get_index_train_test_path(split_num, train = True): """ Method to generate the path containing the training/test split for the given split number (generally from 1 to 20). @param split_num Split number for which the data has to be generated @param train Is true if the data is training data. Else false. @return path Path of the file containing the requried data """ if train: return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt" else: return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
201ac816085211b1f6500e2b84d5e9b293dd8c2e
19,935
def mock_socket() -> MagicMock: """A mock websocket.""" return MagicMock(spec=WebSocket)
a3b8e53d2c929566e2bc9419cfb1d56ca3f25032
19,936
import os def extract_annotations_objtrk(out_path, in_image, project_id, track_prefix, **kwargs): """ out_path: str in_image: BiaflowsCytomineInput project_id: int track_prefix: str kwargs: dict """ image = in_image.object path = os.path.join(out_path, in_image.filename) data, dim_order, _ = imread(path, return_order=True) ndim = get_dimensionality(dim_order) if ndim < 3: raise ValueError("Object tracking should be at least 3D (only {} spatial dimension(s) found)".format(ndim)) tracks = TrackCollection() annotations = AnnotationCollection() if ndim == 3: slices = mask_to_objects_3d(data, time=True, assume_unique_labels=True) time_to_image = get_depth_to_slice(image) for slice_group in slices: curr_tracks, curr_annots = create_tracking_from_slice_group( image, slice_group, slice2point=lambda _slice: _slice.polygon.centroid, depth2slice=time_to_image, id_project=project_id, upload_object=True, upload_group_id=True, track_prefix=track_prefix + "-object" ) tracks.extend(curr_tracks) annotations.extend(curr_annots) elif ndim == 4: objects = mask_to_objects_3dt(mask=data) depths_to_image = get_depth_to_slice(image, depth=("time", "depth")) # TODO add tracking lines one way or another for time_steps in objects: label = time_steps[0][0].label track = Track(name="{}-{}".format(track_prefix, label), id_image=image.id, color=DEFAULT_COLOR).save() Property(track, key="label", value=label).save() annotations.extend([ Annotation( location=change_referential(p=slice.polygon, height=image.height).wkt, id_image=image.id, id_project=project_id, id_tracks=[track.id], slice=depths_to_image[(slice.time, slice.depth)].id ) for slices in time_steps for slice in slices ]) tracks.append(track) else: raise ValueError("Annotation extraction for object tracking does not support masks with more than 4 dims...") return tracks, annotations
0fb1ec074ae01d536fcf9a8e63536e6d8f02bdf8
19,937
def gen_device(dtype, ip, mac, desc, cloud): """Convenience function that generates devices based on they type.""" devices = { # sp1: [0], sp2: [ 0x2711, # SP2 0x2719, 0x7919, 0x271A, 0x791A, # Honeywell SP2 0x2720, # SPMini 0x753E, # SP3 0x7D00, # OEM branded SP3 0x947A, 0x9479, # SP3S 0x2728, # SPMini2 0x2733, 0x273E, # OEM branded SPMini 0x7530, 0x7546, 0x7918, # OEM branded SPMini2 0x7D0D, # TMall OEM SPMini3 0x2736, # SPMiniPlus ], rm: [ 0x2712, # RM2 0x2737, # RM Mini 0x273D, # RM Pro Phicomm 0x2783, # RM2 Home Plus 0x277C, # RM2 Home Plus GDT 0x278F, # RM Mini Shate 0x27C2, # RM Mini 3 0x27D1, # new RM Mini3 0x27DE, # RM Mini 3 (C) ], rm4: [ 0x51DA, # RM4 Mini 0x5F36, # RM Mini 3 0x6070, # RM4c Mini 0x610E, # RM4 Mini 0x610F, # RM4c 0x62BC, # RM4 Mini 0x62BE, # RM4c 0x6364, # RM4S 0x648D, # RM4 mini 0x6539, # RM4c Mini 0x653A, # RM4 mini ], rmp: [ 0x272A, # RM2 Pro Plus 0x2787, # RM2 Pro Plus2 0x279D, # RM2 Pro Plus3 0x27A9, # RM2 Pro Plus_300 0x278B, # RM2 Pro Plus BL 0x2797, # RM2 Pro Plus HYC 0x27A1, # RM2 Pro Plus R1 0x27A6, # RM2 Pro PP ], rm4p: [ 0x6026, # RM4 Pro 0x61A2, # RM4 pro 0x649B, # RM4 pro 0x653C, # RM4 pro ], a1: [0x2714], # A1 mp1: [ 0x4EB5, # MP1 0x4EF7, # Honyar oem mp1 0x4F1B, # MP1-1K3S2U 0x4F65, # MP1-1K3S2U ], # hysen: [0x4EAD], # Hysen controller # S1C: [0x2722], # S1 (SmartOne Alarm Kit) # dooya: [0x4E4D] # Dooya DT360E (DOOYA_CURTAIN_V2) } # Look for the class associated to devtype in devices [device_class] = [dev for dev in devices if dtype in devices[dev]] or [None] if device_class is None: print("Unknow device type 0x%x" % dtype) return BroadlinkDevice(dtype, name=desc, cloud=cloud) return device_class(ip=ip, mac=mac, devtype=dtype, name=desc, cloud=cloud)
07c9ff4ee594bf0c94aa95efc05f63306811e996
19,938
import subprocess def berks(berks_bin, path, action='update'): """ Execute various berks commands :rtype : tuple :param berks_bin: path to berks bin :param path: path to change directory to before running berks commands (berks is a dir context aware tool) :param action: berks action to run, e.g. berks install :return: tpl. output, errors, returncode """ cmd = 'cd {0} && {1} {2}'.format(path, berks_bin, action) p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=True) output, errors = p.communicate() return output, errors, p.returncode
c0f20cccc3a9be747f45f6253a61b455bef69f2c
19,939
def container_clone(request, pk): """ Make a clone of the container. Todo: show params on OPTIONS call. Todo: permissions :param pk pk of the container that needs to be cloned :param name :param description """ params = {} data = request.data if not data.get('name'): return Response({"error": "please provide name for the clone: {\"name\" : \"some name \"}"}) params['name'] = data.get('name') if data.get('description'): params['description'] = data.get('description') origin = get_container(pk) # validate permissions validate_object_permission(ContainerDetailPermission, request, origin) if origin: clone = origin.clone(**params) clone.save() serializer = ContainerSerializer(clone) return Response(serializer.data, status=status.HTTP_201_CREATED) else: return Response({"error": "Container not found!", "data": data})
c546d2810dd62b88b737477df3ad836c4aabb20b
19,940
def parse_multi_id_graph(graph, ids): """ Parse a graph with 1 to 3 ids and return individual graphs with their own braced IDs. """ new_graphs = '' LEVEL_STATE.next_token = ids[0] pid1 = LEVEL_STATE.next_id() split1 = graph.partition('({})'.format(ids[1])) text1 = combine_bolds(split1[0]) pid2_marker = split1[1] remainder = bold_first_italics(split1[2]) new_graphs += "\n{" + pid1 + "}\n" new_graphs += text1 + '\n' LEVEL_STATE.next_token = ids[1] pid2 = LEVEL_STATE.next_id() new_graphs += "\n{" + pid2 + "}\n" if len(ids) == 2: text2 = combine_bolds(" ".join([pid2_marker, remainder])) new_graphs += text2 + '\n' return new_graphs else: split2 = remainder.partition('({})'.format(ids[2])) pid3_marker = split2[1] remainder2 = bold_first_italics(split2[2]) text2 = combine_bolds(" ".join([pid2_marker, split2[0]])) new_graphs += text2 + '\n' LEVEL_STATE.next_token = ids[2] pid3 = LEVEL_STATE.next_id() new_graphs += "\n{" + pid3 + "}\n" text3 = combine_bolds(" ".join([pid3_marker, remainder2])) new_graphs += text3 + '\n' return new_graphs
3dc693e359573ec1a2e71400856e0383653a5533
19,941
import os def filter_multimappers(align_file, data): """ It does not seem like bowtie2 has a corollary to the -m 1 flag in bowtie, there are some options that are close but don't do the same thing. Bowtie2 sets the XS flag for reads mapping in more than one place, so we can just filter on that. This will not work for other aligners. """ config = dd.get_config(data) type_flag = "" if bam.is_bam(align_file) else "S" base, ext = os.path.splitext(align_file) out_file = base + ".unique" + ext bed_file = dd.get_variant_regions(data) bed_cmd = '-L {0}'.format(bed_file) if bed_file else " " if utils.file_exists(out_file): return out_file base_filter = '-F "[XS] == null and not unmapped {paired_filter} and not duplicate" ' if bam.is_paired(align_file): paired_filter = "and paired and proper_pair" else: paired_filter = "" filter_string = base_filter.format(paired_filter=paired_filter) sambamba = config_utils.get_program("sambamba", config) num_cores = dd.get_num_cores(data) with file_transaction(out_file) as tx_out_file: cmd = ('{sambamba} view -h{type_flag} ' '--nthreads {num_cores} ' '-f bam {bed_cmd} ' '{filter_string} ' '{align_file} ' '> {tx_out_file}') message = "Removing multimapped reads from %s." % align_file do.run(cmd.format(**locals()), message) bam.index(out_file, config) return out_file
76e55ca8f29eee13d50bb74249cc25ae91f56d22
19,942
import types import inspect def try_run(obj, names): """Given a list of possible method names, try to run them with the provided object. Keep going until something works. Used to run setup/teardown methods for module, package, and function tests. """ for name in names: func = getattr(obj, name, None) if func is not None: if type(obj) == types.ModuleType: # py.test compatibility try: args, varargs, varkw, defaults = inspect.getargspec(func) except TypeError: # Not a function. If it's callable, call it anyway if hasattr(func, '__call__'): func = func.__call__ try: args, varargs, varkw, defaults = \ inspect.getargspec(func) args.pop(0) # pop the self off except TypeError: raise TypeError("Attribute %s of %r is not a python " "function. Only functions or callables" " may be used as fixtures." % (name, obj)) if len(args): log.debug("call fixture %s.%s(%s)", obj, name, obj) return func(obj) log.debug("call fixture %s.%s", obj, name) return func()
d35377eecd31ce70ae8e4588ac04320d8b48845f
19,943
import os import requests def download_file(url, path=None, clobber=False): """ thanks to: https://stackoverflow.com/questions/16694907/how-to-download-large-file-in-python-with-requests-py path : str local path to download to. """ if path is None: local_filename = os.path.join(directory, url.split("/")[-1]) else: local_filename = path if os.path.exists(local_filename) and not clobber: getLogger().info("{} exists; not downloading.".format(local_filename)) return local_filename # NOTE the stream=True parameter r = requests.get(url, stream=True) with open(local_filename, "wb") as f: for chunk in r.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks f.write(chunk) # f.flush() commented by recommendation from J.F.Sebastian return local_filename
54076659f79fb35305eb5a5ea3d0dd1eba402bc7
19,944
def param_to_secopt(param): """Convert a parameter name to INI section and option. Split on the first dot. If not dot exists, return name as option, and None for section.""" sep = '.' sep_loc = param.find(sep) if sep_loc == -1: # no dot in name, skip it section = None option = param else: section = param[0:sep_loc] option = param[sep_loc+1:] return (section, option)
7d7e2b03cb67ed26d184f85f0328236674fa6497
19,945
from typing import List from typing import Dict import json def load_contracts( web3: web3.Web3, contracts_file: str, contracts_names: List[str] ) -> Dict[str, web3.contract.Contract]: """ Given a list of contract names, returns a dict of contract names and contracts. """ res = {} with open(contracts_file) as infile: source_json = json.load(infile) for contract_name in contracts_names: try: res[contract_name] = web3.eth.contract( address=source_json[contract_name]["address"], abi=source_json[contract_name]["abi"] ) except (KeyError, InvalidAddress) as ex: raise ex return res
6f6c47c5742de0c61eddacfd9358b6d86eefb525
19,946
import logging def removecandidate(_id=''): """ Remove a candidate from the candidate list Use with the lexcion's identifiers /removecandidate?identifier=katt..nn.1 """ lexicon = request.args.get('lexicon', C.config['default']) lexconf = lexconfig.get_lexiconconf(lexicon) try: identifier = request.args.get('identifier', '') # ask karp for the identifier q = 'extended||and|%s.search|equals|%s' % ('identifier', identifier) res = helpers.karp_query('query', query={'q': q}, mode=lexconf['candidateMode'], resource=lexconf['candidatelexiconName']) _id = helpers.es_first_id(res) except Exception as e1: logging.error(e1) raise e.MflException("Could not find candidate %s" % identifier, code="unknown_candidate") # delete it ans = helpers.karp_delete(_id, lexconf['candidatelexiconName']) return jsonify({"deleted": ans})
55d9cfede364a35e44cf44b597653de598867d55
19,947
def outlier_dates_correction(series, coef=2.0): """Corrects the dates that are outliers. It receives all the dates in which samples were collected, for example for a patient and tries to (i) identify outliers and (ii) correct them with the best possible date. .. note: Using mean/std for outliers... .. note: Should I use days which is more interpretable? .. warning: Remember to include always the raw value just in case that was the best! Should I check only values that are outside range? Parameters ---------- series: series with datetime64[ns] coeff: Returns ------- datetime64[ns] series with corrected dates. """ # Check datetime series or str series (errors='raise) # Copy series too! # Find outliers outliers = np.abs(series - series.mean()) > coef * series.std() """ print(outliers) print(np.abs(series - series.mean())) print(coef * series.std()) print(series.quantile([0.05, 0.95])) from scipy.spatial.distance import pdist, cdist from itertools import product #e = np.abs(series - series.mean()) e = (series - series.mean()).abs().dt.days p = np.array(list(product(e, e))) #p = np.array([series, series]) print(p) a = pd.DataFrame(p) a = a.apply(lambda x: np.abs(x[0]-x[1]), axis=1) print(a) print(cdist(p)) #e = series.astype(int) #print(e) # / np.timedelta64(-1, 'D') print(e) import sys sys.exit() a = list(product(e, e)) #print(a) print(pdist(np.array(a))) #print(cdist(series.values, series.values)) import sys sys.exit() """ """ if len(series) < 3: return series """ """ print("\n\n\nFinding outliers...") print("Consecutive distances:") print(ddiff) print("\nThe mean") print(mean) print("\nThe difference") print(dff) print("\nOutliers") print(outliers) """ if len(series) < 3: return series ddiff = series.diff().dt.days.abs() mean = series[ddiff <= 3].mean() dff = (series - mean).abs() outliers = dff.dt.days > 10 # Do corrections if outliers.any(): # Compute min and max mn, mx, mean = series[~outliers].min(), \ series[~outliers].max(), \ series[~outliers].mean() # Compute various corrections r = series[outliers] \ .transform([lambda x: x, swap_day_month, one_year_more, one_year_less]) # Find the closest days = (r - mean).abs() idx = (r - mean).abs().idxmin(axis=1) #print(series) #print(r[idx].squeeze()) # When two outliers it breaks! # Replace series[outliers] = r[idx].squeeze() #print("U") # Return return series # Return return series
fa371b682f943801affa47d3f1a13f5daac9323a
19,948
def svn_log_entry_dup(*args): """svn_log_entry_dup(svn_log_entry_t log_entry, apr_pool_t pool) -> svn_log_entry_t""" return _core.svn_log_entry_dup(*args)
223e7aa1dbf890eae3c5aa86a08cac287ce796c8
19,949
from typing import IO from io import StringIO def input_stream() -> IO: """Input stream fixture.""" return StringIO( """mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X mem[8] = 11 mem[7] = 101 mem[8] = 0""" )
6e9e65754478b0ff69d220f683140675d3f6efdc
19,950
def get_lr_scheduler(optimizer: Optimizer, cfg: CfgNode, start_epoch: int = 0): """Returns LR scheduler module""" # Get mode if cfg.TRAIN.LOSS.TYPE in ["categorical_crossentropy", "focal_loss"]: mode = "min" else: raise NotImplementedError if cfg.TRAIN.SCHEDULER.TYPE == "ReduceLROnPlateau": scheduler = ReduceLROnPlateau( optimizer, mode, factor=cfg.TRAIN.SCHEDULER.FACTOR, patience=cfg.TRAIN.SCHEDULER.PATIENCE, verbose=True, ) elif cfg.TRAIN.SCHEDULER.TYPE == "StepLR": scheduler = StepLR( optimizer, step_size=cfg.TRAIN.SCHEDULER.PATIENCE, gamma=cfg.TRAIN.SCHEDULER.FACTOR, last_epoch=start_epoch - 1, ) elif cfg.TRAIN.SCHEDULER.TYPE == "None": scheduler = None else: raise NotImplementedError logger.info(f"Used scheduler: {scheduler}") return scheduler
59bbb672ac74fcc0331e5cba5bd722ce41049a5d
19,951
async def create_and_open_pool(pool_name, pool_genesis_txn_file): """ Creates a new local pool ledger configuration. Then open that pool and return the pool handle that can be used later to connect pool nodes. :param pool_name: Name of the pool ledger configuration. :param pool_genesis_txn_file: Pool configuration json. if NULL, then default config will be used. :return: The pool handle was created. """ utils.print_header("\nCreate Ledger\n") await create_pool_ledger_config(pool_name, pool_genesis_txn_file) utils.print_header("\nOpen pool ledger\n") pool_handle = await pool.open_pool_ledger(pool_name, None) return pool_handle
eb05893870ff1b8928391c0e748d21b9eb8aef66
19,952
def rotate_char(c, n): """Rotate a single character n places in the alphabet n is an integer """ # alpha_number and new_alpha_number will represent the # place in the alphabet (as distinct from the ASCII code) # So alpha_number('a')==0 # alpha_base is the ASCII code for the first letter of the # alphabet (different for upper and lower case) if c.islower(): alpha_base = ord('a') elif c.isupper(): alpha_base = ord('A') else: # Don't rotate character if it's not a letter return c # Position in alphabet, starting with a=0 alpha_number = ord(c) - alpha_base # New position in alphabet after shifting # The % 26 at the end is for modulo 26, so if we shift it # past z (or a to the left) it'll wrap around new_alpha_number = (alpha_number + n) % 26 # Add the new position in the alphabet to the base ASCII code for # 'a' or 'A' to get the new ASCII code, and use chr() to convert # that code back to a letter return chr(alpha_base + new_alpha_number)
b1259722c7fb2a60bd943e86d87163866432539f
19,953
def subset_language(vocabulary, vectors, wordlist, N=32768): """ Subset the vocabulary/vectors to those in a wordlist. The wordlist is a list arranged in order of 'preference'. Note: we hope the vocabulary is contained in the wordlist, but it might not be. N is the number of words we require. If the wordlist contains fewer than N words, (but the vocabulary has >= N), we supplement the result from the vocabulary randomly. Also, we want to make sure the order of vocabulary is random (because some structure could negatively influence the optimisation procedure later). """ keep_indices = [] # indices of vocabulary/vectors to keep added = 0 if type(wordlist) == str: # load from path print 'Loading wordlist from', wordlist wordlist = np.loadtxt(wordlist, dtype=str) else: assert type(wordlist) == list or type(wordlist) == np.ndarray print 'Subsetting vocabulary.' for word in wordlist: print word if added == N: break try: word_index = vocabulary.index(word) keep_indices.append(word_index) added += 1 except ValueError: continue print 'Acquired', len(keep_indices), 'words.' miss = N - len(keep_indices) if miss > 0: print 'Supplementing with', miss, 'random words.' for i in xrange(miss): random_index = np.random.choice(len(vocabulary), 1) while random_index in keep_indices: random_index = np.random.choice(len(vocabulary), 1) keep_indices.append(random_index) print 'Shuffling.' # shuffle np.random.shuffle(keep_indices) # populate new arrays print 'Populating subsetted arrays.' vectors_subset = np.array([vectors[i] for i in keep_indices]) vocabulary_subset = [vocabulary[i] for i in keep_indices] return vocabulary_subset, vectors_subset
17c5718134f25f1ef7b6ed8fb0086fc1f45d058b
19,954
def compile_subject(*, subject_id, date_of_birth, sex): """Compiles the NWB Subject object.""" return Subject(subject_id=subject_id, date_of_birth=date_of_birth, sex=sex)
86fc69318cfac98f44b11fa4f4c2a47423da317d
19,955
from typing import Any def circulation(**kwargs: Any) -> str: """Url to get :class:`~pymultimatic.model.component.Circulation` details.""" return _CIRCULATION.format(**kwargs)
021d28b92cfac9a69723796d2ee29f53dc16039d
19,956
def split_parentheses(info): """ make all strings inside parentheses a list :param s: a list of strings (called info) :return: info list without parentheses """ # if we see the "(" sign, then we start adding stuff to a temp list # in case of ")" sign, we append the temp list to the new_info list # otherwise, just add the string to the new_info list new_info = [] make_list = False current_list = [] for idx in range(len(info)): if info[idx] == "(": make_list = True elif info[idx] == ")": make_list = False new_info.append(current_list) current_list = [] else: if make_list: current_list.append(info[idx]) else: new_info.append(info[idx]) return new_info
37006936d52abe31e6d5e5d264440ab4950d874b
19,957
from typing import Optional from typing import Callable import inspect def event( name: Optional[str] = None, *, handler: bool = False ) -> Callable[[EventCallable], EventCallable]: """Create a new event using the signature of a decorated function. Events must be defined before handlers can be registered using before_event, on_event, after_event, or event_handler. :param handler: When True, the decorated function implementation is registered as an on event handler. """ def decorator(fn: EventCallable) -> EventCallable: event_name = name if name else fn.__name__ module = inspect.currentframe().f_back.f_locals.get("__module__", None) if handler: # If the method body is a handler, pass the signature directly into `create_event` # as we are going to pass the method body into `on_event` signature = inspect.Signature.from_callable(fn) create_event(event_name, signature, module=module) else: create_event(event_name, fn, module=module) if handler: decorator = on_event(event_name) return decorator(fn) else: return fn return decorator
ce7821bbe67c3c776f8dfa4b69a4bed25ab814e3
19,958
import sys def getcallargs(func, *positional, **named): """Get the mapping of arguments to values. A dict is returned, with keys the function argument names (including the names of the * and ** arguments, if any), and values the respective bound values from 'positional' and 'named'.""" args, varargs, varkw, defaults = getargspec(func) f_name = func.__name__ arg2value = {} # The following closures are basically because of tuple parameter unpacking. assigned_tuple_params = [] def assign(arg, value): if isinstance(arg, str): arg2value[arg] = value else: assigned_tuple_params.append(arg) value = iter(value) for i, subarg in enumerate(arg): try: subvalue = next(value) except StopIteration: raise ValueError('need more than %d %s to unpack' % (i, 'values' if i > 1 else 'value')) assign(subarg,subvalue) try: next(value) except StopIteration: pass else: raise ValueError('too many values to unpack') def is_assigned(arg): if isinstance(arg,str): return arg in arg2value return arg in assigned_tuple_params if ismethod(func) and func.im_self is not None: # implicit 'self' (or 'cls' for classmethods) argument positional = (func.im_self,) + positional num_pos = len(positional) num_total = num_pos + len(named) num_args = len(args) num_defaults = len(defaults) if defaults else 0 for arg, value in zip(args, positional): assign(arg, value) if varargs: if num_pos > num_args: assign(varargs, positional[-(num_pos-num_args):]) else: assign(varargs, ()) elif 0 < num_args < num_pos: raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at most' if defaults else 'exactly', num_args, 'arguments' if num_args > 1 else 'argument', num_total)) elif num_args == 0 and num_total: if varkw: if num_pos: # XXX: We should use num_pos, but Python also uses num_total: raise TypeError('%s() takes exactly 0 arguments ' '(%d given)' % (f_name, num_total)) else: raise TypeError('%s() takes no arguments (%d given)' % (f_name, num_total)) for arg in args: if isinstance(arg, str) and arg in named: if is_assigned(arg): raise TypeError("%s() got multiple values for keyword " "argument '%s'" % (f_name, arg)) else: assign(arg, named.pop(arg)) if defaults: # fill in any missing values with the defaults for arg, value in zip(args[-num_defaults:], defaults): if not is_assigned(arg): assign(arg, value) if varkw: assign(varkw, named) elif named: unexpected = next(iter(named)) if isinstance(unexpected, unicode): unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace') raise TypeError("%s() got an unexpected keyword argument '%s'" % (f_name, unexpected)) unassigned = num_args - len([arg for arg in args if is_assigned(arg)]) if unassigned: num_required = num_args - num_defaults raise TypeError('%s() takes %s %d %s (%d given)' % ( f_name, 'at least' if defaults else 'exactly', num_required, 'arguments' if num_required > 1 else 'argument', num_total)) return arg2value
a693bed0b7c5f6d6ab8caf2b669f4368b56c8ea8
19,959
import re def add_target_to_anchors(string_to_fix, target="_blank"): """Given arbitrary string, find <a> tags and add target attributes""" pattern = re.compile("<a(?P<attributes>.*?)>") def repl_func(matchobj): pattern = re.compile("target=['\"].+?['\"]") attributes = matchobj.group("attributes") if pattern.search(attributes): return "<a%s>" % re.sub(pattern, "target='%s'" % target, attributes) else: return "<a%s target='%s'>" % (attributes, target) return re.sub(pattern, repl_func, string_to_fix)
4650dcf933e9b6e153646c6b7f3535881e4db1f8
19,960
def calcInvariants(S, R, gradT, with_tensor_basis=False, reduced=True): """ This function calculates the invariant basis at one point. Arguments: S -- symmetric part of local velocity gradient (numpy array shape (3,3)) R -- anti-symmetric part of local velocity gradient (numpy array shape (3,3)) gradT -- array with local temperature gradient (numpy array shape (3,)) with_tensor_basis -- optional, a flag that determines whether to also calculate tensor basis. By default, it is false (so only invariants are returned) reduced -- optional argument, a boolean flag that determines whether the features that depend on a vector (lambda 7 thru lambda 13) should be calculated. If reduced==True, extra features are NOT calculated. Default value is True. Returns: invariants -- array of shape (n_features-2,) that contains the invariant basis from the gradient tensors that are used by the ML model to make a prediction at the current point. tensor_basis -- array of shape (n_basis,3,3) that contains the form invariant tensor basis that are used by the TBNN to construct the tensorial diffusivity at the current point. # Taken from the paper of Zheng, 1994, "Theory of representations for tensor functions - A unified invariant approach to constitutive equations" """ # For speed, pre-calculate these S2 = np.linalg.multi_dot([S, S]) R2 = np.linalg.multi_dot([R, R]) S_R2 = np.linalg.multi_dot([S, R2]) ### Fill basis 0-12 if reduced: num_features = constants.NUM_FEATURES_F2-2 else: num_features = constants.NUM_FEATURES_F1-2 invariants = np.empty(num_features) # Velocity gradient only (0-5) invariants[0] = np.trace(S2) invariants[1] = np.trace(np.linalg.multi_dot([S2, S])) invariants[2] = np.trace(R2) invariants[3] = np.trace(S_R2) invariants[4] = np.trace(np.linalg.multi_dot([S2, R2])) invariants[5] = np.trace(np.linalg.multi_dot([S2, R2, S, R])) # Velocity + temperature gradients (6-12) if not reduced: invariants[6] = np.linalg.multi_dot([gradT, gradT]) invariants[7] = np.linalg.multi_dot([gradT, S, gradT]) invariants[8] = np.linalg.multi_dot([gradT, S2, gradT]) invariants[9] = np.linalg.multi_dot([gradT, R2, gradT]) invariants[10] = np.linalg.multi_dot([gradT, S, R, gradT]) invariants[11] = np.linalg.multi_dot([gradT, S2, R, gradT]) invariants[12] = np.linalg.multi_dot([gradT, R, S_R2, gradT]) # Also calculate the tensor basis if with_tensor_basis: tensor_basis = np.empty((constants.N_BASIS,3,3)) tensor_basis[0,:,:] = np.eye(3) tensor_basis[1,:,:] = S tensor_basis[2,:,:] = R tensor_basis[3,:,:] = S2 tensor_basis[4,:,:] = R2 tensor_basis[5,:,:] = np.linalg.multi_dot([S, R]) + np.linalg.multi_dot([R, S]) return invariants, tensor_basis return invariants
2ce8407843947c4f7c9779d061971822707f147e
19,961
def with_uproot(histo_path: str) -> bh.Histogram: """Reads a histogram with uproot and returns it. Args: histo_path (str): path to histogram, use a colon to distinguish between path to file and path to histogram within file (example: ``file.root:h1``) Returns: bh.Histogram: histogram containing data """ hist = uproot.open(histo_path).to_boost() return hist
c03e7c7054a550769c23c904892c0c327b2bcafa
19,962
def slide5x5(xss): """Slide five artists at a time.""" return slidingwindow(5, 5, xss)
56374e53384d2012d2e6352efcd0e972ff3d04bf
19,963
def compute_consensus_rule( profile, committeesize, algorithm="fastest", resolute=True, max_num_of_committees=MAX_NUM_OF_COMMITTEES_DEFAULT, ): """ Compute winning committees with the Consensus rule. Based on Perpetual Consensus from Martin Lackner Perpetual Voting: Fairness in Long-Term Decision Making In Proceedings of the 34th AAAI Conference on Artificial Intelligence (AAAI 2020) Parameters ---------- profile : abcvoting.preferences.Profile A profile. committeesize : int The desired committee size. algorithm : str, optional The algorithm to be used. The following algorithms are available for the Consensus rule: .. doctest:: >>> Rule("consensus-rule").algorithms ('float-fractions', 'gmpy2-fractions', 'standard-fractions') resolute : bool, optional Return only one winning committee. If `resolute=False`, all winning committees are computed (subject to `max_num_of_committees`). max_num_of_committees : int, optional At most `max_num_of_committees` winning committees are computed. If `max_num_of_committees=None`, the number of winning committees is not restricted. The default value of `max_num_of_committees` can be modified via the constant `MAX_NUM_OF_COMMITTEES_DEFAULT`. Returns ------- list of CandidateSet A list of winning committees. """ rule_id = "consensus-rule" rule = Rule(rule_id) if algorithm == "fastest": algorithm = rule.fastest_available_algorithm() rule.verify_compute_parameters( profile=profile, committeesize=committeesize, algorithm=algorithm, resolute=resolute, max_num_of_committees=max_num_of_committees, ) committees, detailed_info = _consensus_rule_algorithm( profile=profile, committeesize=committeesize, algorithm=algorithm, resolute=resolute, max_num_of_committees=max_num_of_committees, ) # optional output output.info(header(rule.longname), wrap=False) if not resolute: output.info("Computing all possible winning committees for any tiebreaking order") output.info(" (aka parallel universes tiebreaking) (resolute=False)\n") output.details(f"Algorithm: {ALGORITHM_NAMES[algorithm]}\n") output.info( str_committees_with_header(committees, cand_names=profile.cand_names, winning=True) ) # end of optional output return committees
0dd12aa8faab485a62cdeccfaf87385df85b0b7f
19,964
def addcron(): """ { "uid": "张三", "mission_name": "定时服务名字", "pid": "c3009c8e62544a23ba894fe5519a6b64", "EnvId": "9d289cf07b244c91b81ce6bb54f2d627", "SuiteIdList": ["75cc456d9c4d41f6980e02f46d611a5c"], "runDate": 1239863854, "interval": 60, "alwaysSendMail": true, "alarmMailGroupList": "['4dc0e648e61846a4aca01421aa1202e2', '2222222222222']", "triggerType": "interval" } """ try: require_items = get_post_items(request, CronJob.REQUIRE_ITEMS, throwable=True) option_items = get_post_items(request, CronJob.OPTIONAL_ITEMS) require_items.update(option_items) require_items.update({"uid": g.user_object_id}) mission_name = get_models_filter(CronJob, CronJob.mission_name == require_items["mission_name"]) if mission_name != []: return jsonify({'status': 'failed', 'data': '名字已存在'}) temp = require_items.get("alarmMailGroupList") require_items["alarmMailGroupList"] = str(temp) times = Run_Times(**require_items) if times == True: _model = create_model(CronJob, **require_items) cron_manager.add_cron( **{ "mission_name": require_items.get("mission_name"), "mode": require_items.get("triggerType"), "seconds": require_items.get("interval"), "run_Date": require_items.get("runDate"), "task_Job": require_items, "object_id": _model.object_id, }) return jsonify({'status': 'ok', 'object_id': _model.object_id}) else: return jsonify(times) except BaseException as e: return jsonify({'status': 'failed', 'data': '新建失败 %s' % e})
87aca95b6486bbbd9abd9277aa3e2eb39b7bbdad
19,965
def dict_expand(d, prefix=None): """ Recursively expand subdictionaries returning dictionary dict_expand({1:{2:3}, 4:5}) = {(1,2):3, 4:5} """ result = {} for k, v in d.items(): if isinstance(v, dict): result.update(dict_expand(v, prefix=k)) else: result[k] = v if prefix is not None: result = {make_tuple(prefix) + make_tuple(k): v for k, v in result.items()} return result
842503eaffca7574f127b731216b5f5b10ddf86f
19,966
def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """ if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
12ab7dc51420196a60ef027ea606a837da3b1b59
19,967
def collaspe_fclusters(data=None, t=None, row_labels=None, col_labels=None, linkage='average', pdist='euclidean', standardize=3, log=False): """a function to collaspe flat clusters by averaging the vectors within each flat clusters achieved from hierarchical clustering""" ## preprocess data if log: data = np.log2(data + 1.0) if standardize == 1: # Standardize along the columns of data data = zscore(data, axis=0) elif standardize == 2: # Standardize along the rows of data data = zscore(data, axis=1) if row_labels is not None and col_labels is None: ## only get fclusters for rows d = dist.pdist(data, metric=pdist) axis = 1 ##!!! haven't checked whether this is correct yet elif row_labels is None and col_labels is not None: ## only get fclusters for cols d = dist.pdist(data.T, metric=pdist) axis = 0 D = dist.squareform(d) Y = sch.linkage(D, method=linkage, metric=pdist) fclusters = sch.fcluster(Y, t, 'distance') fcluster_set = set(fclusters) data_cf = [] for fc in fcluster_set: mask = np.where(fclusters==fc) data_t = data.T vector_avg = np.average(data_t[mask],axis=axis) data_cf.append(vector_avg) data_cf = np.array(data_cf).T return data_cf
879ba2e9469831b096f716dbbb38047580d76844
19,968
from typing import Union def iapproximate_add_fourier_state(self, lhs: Union[int, QuantumRegister], rhs: QRegisterPhaseLE, qcirc: QuantumCircuit, approximation: int = None) -> ApproximateAddFourierStateGate: """Substract two registers with rhs in quantum fourier state.""" if isinstance(lhs, QuantumRegister): self._check_qreg(lhs) self._check_dups([lhs, rhs]) self._check_qreg(rhs) return self._attach(ApproximateAddFourierStateGate(lhs, rhs, qcirc, approximation).inverse())
3e1ccb2576e8babdb589c60aec51f585001bdd9a
19,969
import itertools def _get_indices(A): """Gets the index for each element in the array.""" dim_ranges = [range(size) for size in A.shape] if len(dim_ranges) == 1: return dim_ranges[0] return itertools.product(*dim_ranges)
dc2e77c010a6cfd7dbc7b7169f4bd0d8da62b891
19,970
def calculateOriginalVega(f, k, r, t, v, cp): """计算原始vega值""" price1 = calculatePrice(f, k, r, t, v*STEP_UP, cp) price2 = calculatePrice(f, k, r, t, v*STEP_DOWN, cp) vega = (price1 - price2) / (v * STEP_DIFF) return vega
7b90662003231b50c4d758c3a7beb122b90c05e7
19,971
from typing import Optional from typing import Dict import sys import os import subprocess def run_ansible_lint( *argv: str, cwd: Optional[str] = None, executable: Optional[str] = None, env: Optional[Dict[str, str]] = None ) -> CompletedProcess: """Run ansible-lint on a given path and returns its output.""" if not executable: executable = sys.executable args = [sys.executable, "-m", "ansiblelint", *argv] else: args = [executable, *argv] # It is not safe to pass entire env for testing as other tests would # pollute the env, causing weird behaviors, so we pass only a safe list of # vars. safe_list = [ "HOME", "LANG", "LC_ALL", "LC_CTYPE", "NO_COLOR", "PATH", "PYTHONIOENCODING", "PYTHONPATH", "TERM", ] if env is None: _env = {} else: _env = env for v in safe_list: if v in os.environ and v not in _env: _env[v] = os.environ[v] return subprocess.run( args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False, # needed when command is a list check=False, cwd=cwd, env=_env, universal_newlines=True, )
9d4911f903727a7fab33eced48d3856ad592e8e9
19,972
def to_numpy(tensor): """ Converts a PyTorch Tensor to a Numpy array""" if isinstance(tensor, np.ndarray): return tensor if hasattr(tensor, 'is_cuda'): if tensor.is_cuda: return tensor.cpu().detach().numpy() if hasattr(tensor, 'detach'): return tensor.detach().numpy() if hasattr(tensor, 'numpy'): return tensor.numpy() return np.array(tensor)
c5186918fe7a07054607df500d61b32ae1b0037f
19,973
import glob import os def show_tables(): """ Load all available data files that have the right format. All files will be assumed to have the same 8 fields in the header. This demonstrates pulling a specific file name as well as wildcard file search in the uploads/ directory. And as an example, filtering the data to only show relevant (IC50 and EC50) rows in the app. """ header_format = ['target_name', 'uniprot_id', 'smiles', 'bindingdb_id', 'affinity_type', 'affinity_value', 'source', 'price'] list_of_data = [] data_path = 'bindingDB_purchase_target_subset.tsv' data = pd.read_csv(data_path, sep='\t', names=header_format, header=0) # for tsv file # data = pd.read_excel(data_path) # for Excel file data.set_index(['bindingdb_id'], inplace=True) list_of_data.append(data) uploaded_files = glob.glob(os.path.join(app.config['UPLOAD_FOLDER'], "*.csv")) app.logger.warning("Loading files:") app.logger.warning(os.path.join(app.config['UPLOAD_FOLDER'],"*.csv")) app.logger.warning(uploaded_files) for upload_file in uploaded_files: app.logger.warning("Loading uploaded file %s" % upload_file) data = pd.read_csv(upload_file, names=header_format, header=0) # for csv file data.set_index(['bindingdb_id'], inplace=True) list_of_data.append(data) df = pd.concat(list_of_data) df.index.name = None ic50_data = df.loc[df['affinity_type'].str.contains("IC50")] ec50_data = df.loc[df['affinity_type'].str.contains("EC50")] return render_template('view.html', tables=[ic50_data.to_html(classes='IC50'), ec50_data.to_html(classes='EC50')], titles=['na', 'IC50 data', 'EC50 data'])
f815eb657a97c76b2c872ebda3774080de7cbd33
19,974
import os import sys def _redirect_io(inp, out, f): """Calls the function `f` with ``sys.stdin`` changed to `inp` and ``sys.stdout`` changed to `out`. They are restored when `f` returns. This function returns whatever `f` returns. """ oldin, sys.stdin = sys.stdin, inp oldout, sys.stdout = sys.stdout, out try: x = f() finally: sys.stdin = oldin sys.stdout = oldout if os.environ.get('PYPNG_TEST_TMP') and hasattr(out,'getvalue'): name = mycallersname() if name: w = open(name+'.png', 'wb') w.write(out.getvalue()) w.close() return x
30c00128826789ca008e2ca4dcc8fe5f754fe4f6
19,975
def _maybe_to_dense(obj): """ try to convert to dense """ if hasattr(obj, 'to_dense'): return obj.to_dense() return obj
a2f18aec19bd0bad58a35e772180b94d649262e1
19,976
def update_visit_counter(visit_counter_matrix, observation, action): """Update the visit counter Counting how many times a state-action pair has been visited. This information can be used during the update. @param visit_counter_matrix a matrix initialised with zeros @param observation the state observed @param action the action taken """ x = observation[0] y = observation[1] z = observation[2] visit_counter_matrix[x,y,z,action] += 1.0 return visit_counter_matrix
418097d34f194c81e38e3d6b122ae743c7b73452
19,977
from datetime import datetime def pandas_time_safe(series): """Pandas check time safe""" return (series.map(dt_seconds) if isinstance(series.iloc[0], datetime.time) else series)
f802d7ad4cd9c9dbf426b2c1436c41402b24da0b
19,978
def binary_cross_entropy_loss(predicted_y, true_y): """Compute the binary cross entropy loss between a vector of labels of size N and a vector of probabilities of same size Parameters ---------- predicted_y : numpy array of shape (N, 1) The predicted probabilities true_y : numpy array of shape (N, ) The true labels Returns ------- binary_cross_entropy_loss a numpy array of shape (N, ) """ return -np.log(np.squeeze(predicted_y))*true_y - np.log(1 - np.squeeze(predicted_y))*(1 - true_y)
ba72db9051976a9d07355a1b246a22faea43b2b1
19,979
def money_flow_index(high, low, close, volume, n=14, fillna=False): """Money Flow Index (MFI) Uses both price and volume to measure buying and selling pressure. It is positive when the typical price rises (buying pressure) and negative when the typical price declines (selling pressure). A ratio of positive and negative money flow is then plugged into an RSI formula to create an oscillator that moves between zero and one hundred. http://stockcharts.com/school/doku.php?id=chart_school:technical_indicators:money_flow_index_mfi Args: high(pandas.Series): dataset 'High' column. low(pandas.Series): dataset 'Low' column. close(pandas.Series): dataset 'Close' column. volume(pandas.Series): dataset 'Volume' column. n(int): n period. fillna(bool): if True, fill nan values. Returns: pandas.Series: New feature generated. """ # 0 Prepare dataframe to work df = pd.DataFrame([high, low, close, volume]).T df.columns = ['High', 'Low', 'Close', 'Volume'] df['Up_or_Down'] = 0 df.loc[(df['Close'] > df['Close'].shift(1)), 'Up_or_Down'] = 1 df.loc[(df['Close'] < df['Close'].shift(1)), 'Up_or_Down'] = 2 # 1 typical price tp = (df['High'] + df['Low'] + df['Close']) / 3.0 # 2 money flow mf = tp * df['Volume'] # 3 positive and negative money flow with n periods df['1p_Positive_Money_Flow'] = 0.0 df.loc[df['Up_or_Down'] == 1, '1p_Positive_Money_Flow'] = mf n_positive_mf = df['1p_Positive_Money_Flow'].rolling(n).sum() df['1p_Negative_Money_Flow'] = 0.0 df.loc[df['Up_or_Down'] == 2, '1p_Negative_Money_Flow'] = mf n_negative_mf = df['1p_Negative_Money_Flow'].rolling(n).sum() # 4 money flow index mr = n_positive_mf / n_negative_mf mr = (100 - (100 / (1 + mr))) if fillna: mr = mr.fillna(50) return pd.Series(mr, name='mfi_'+str(n))
bd2cbb7b18c7be8d5c0ec0a984c4f3cadf295eec
19,980
def parse_args(args=[], doc=False): """ Handle parsing of arguments and flags. Generates docs using help from `ArgParser` Args: args (list): argv passed to the binary doc (bool): If the function should generate and return manpage Returns: Processed args and a copy of the `ArgParser` object if not `doc` else a `string` containing the generated manpage """ parser = ArgParser(prog=__COMMAND__, description=f"{__COMMAND__} - {__DESCRIPTION__}") parser.add_argument("file") parser.add_argument("--version", action="store_true", help=f"print program version") args = parser.parse_args(args) arg_helps_with_dups = parser._actions arg_helps = [] [arg_helps.append(x) for x in arg_helps_with_dups if x not in arg_helps] NAME = f"**NAME*/\n\t{__COMMAND__} - {__DESCRIPTION__}" SYNOPSIS = f"**SYNOPSIS*/\n\t{__COMMAND__} [OPTION]... " DESCRIPTION = f"**DESCRIPTION*/\n\t{__DESCRIPTION_LONG__}\n\n" for item in arg_helps: # Its a positional argument if len(item.option_strings) == 0: # If the argument is optional: if item.nargs == "?": SYNOPSIS += f"[{item.dest.upper()}] " elif item.nargs == "+": SYNOPSIS += f"[{item.dest.upper()}]... " else: SYNOPSIS += f"{item.dest.upper()} " else: # Boolean flag if item.nargs == 0: if len(item.option_strings) == 1: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/\n\t\t{item.help}\n\n" elif item.nargs == "+": DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/=[{item.dest.upper()}]...\n\t\t{item.help}\n\n" else: DESCRIPTION += f"\t**{' '.join(item.option_strings)}*/={item.dest.upper()}\n\t\t{item.help}\n\n" if doc: return f"{NAME}\n\n{SYNOPSIS}\n\n{DESCRIPTION}\n\n" else: return args, parser
355d8e8722171ab64ebdc02d1fe39db7521a497b
19,981
def gaussian_kernel_dx_i_dx_j(x, y, sigma=1.): """ Matrix of \frac{\partial k}{\partial x_i \partial x_j}""" assert(len(x.shape) == 1) assert(len(y.shape) == 1) d = x.size pairwise_dist = np.outer(y-x, y-x) x_2d = x[np.newaxis,:] y_2d = y[np.newaxis,:] k = gaussian_kernel(x_2d, y_2d, sigma) term1 = k*pairwise_dist * (2.0/sigma)**2 term2 = k*np.eye(d) * (2.0/sigma) return term1 - term2
626f38a5a5e1e7c7dd98c92636a424c74fc7146b
19,982
from pathlib import Path import tarfile def clone_compressed_repository(base_path, name): """Decompress and clone a repository.""" compressed_repo_path = Path(__file__).parent / "tests" / "fixtures" / f"{name}.tar.gz" working_dir = base_path / name bare_base_path = working_dir / "bare" with tarfile.open(compressed_repo_path, "r") as fixture: fixture.extractall(str(bare_base_path)) bare_path = bare_base_path / name repository_path = working_dir / "repository" repository = Repo(bare_path, search_parent_directories=True).clone(repository_path) return repository
bbd733b079ebedb91687597180b0f98825f6ed6c
19,983
def slices(series, length): """ Given a string of digits, output all the contiguous substrings of length n in that string in the order that they appear. :param series string - string of digits. :param length int - the length of the series to find. :return list - List of substrings of specified length from series. """ if len(series) < length: raise ValueError("Length requested is shorter than series.") if length < 1: raise ValueError("Length requested is less than 1.") substrings = [] for index, number in enumerate(series): sub = series[index:index + length] if len(sub) == length: substrings.append(sub) return substrings
ea2d1caf26a3fc2e2a57858a7364b4ebe67297d6
19,984
from where.models.delay import gnss_range # Local import to avoid cyclical import def get_flight_time(dset): """Get flight time of GNSS signal between satellite and receiver Args: dset(Dataset): Model data Return: numpy.ndarray: Flight time of GNSS signal between satellite and receiver in [s] """ # Get geometric range between satellite and receiver position geometric_range = gnss_range.gnss_range(dset) return geometric_range / constant.c
503bfb55fc10bef9f610291aa0f35e0530c8b0f2
19,985
def textToTuple(text, defaultTuple): """This will convert the text representation of a tuple into a real tuple. No checking for type or number of elements is done. See textToTypeTuple for that. """ # first make sure that the text starts and ends with brackets text = text.strip() if text[0] != '(': text = '(%s' % (text,) if text[-1] != ')': text = '%s)' % (text,) try: returnTuple = eval('tuple(%s)' % (text,)) except Exception: returnTuple = defaultTuple return returnTuple
89fed32bff39ad9e69513d7e743eb05a3bf7141a
19,986
def m2m_bi2uni(m2m_list): """ Splits a bigram word model into a unique unigram word model i=11, j=3 i=10, j=3 i=9,10,11,12, j=3,4,5,6 ###leilatem### ###leilatem### ###leilatem### ###temum### ###temum### ###temum### ^ ^ ^^^^ m: mismatch m m MMMm M: match """ q = Queue(maxsize=2) phonemes_list = [] while len(m2m_list): # NOTE can be optmised removing this while while not q.full(): bigram = m2m_list.pop(0) q.put(PADD + bigram + PADD) curr_word = q.get() next_word = q.get() i = len(curr_word) - 1 - len(PADD) # to decrease backwards j = len(PADD) # to increase forward unmatch_count = 0 match = False #print(curr_word, '***********************************') #print(next_word, '***********************************') while not match: # scan the first section: mismatch (m) while curr_word[i] != next_word[j]: #print('%-6s %-6s %02d %02d <- bi2uni' % (curr_word[i], # next_word[j], i, j)) i -= 1 unmatch_count += 1 #print('%-6s %-6s' % (curr_word[i], next_word[j])) # gambiarra master to avoid mismatches like in 's e j s' if unmatch_count == 0 and not is_vowel(curr_word[i][0]): i -= 1 unmatch_count += 1 continue #print('possible match') for k in range(unmatch_count + len(PADD)): # scan the second section: a match (M) if curr_word[i + k] == next_word[j + k]: continue else: # found third section: right mismatch with PADD (m) if curr_word[i + k] == '#': # check immediate mismatch match = True #print('match! ->', end=' ') #print(curr_word[len(PADD):i]) else: #print('houston we have a problem: (%s, %s)' % # (curr_word[i + k], next_word[j + k])) i -= 1 unmatch_count += 1 break phonemes_list.append(curr_word[len(PADD):i]) q.put(next_word) phonemes_list.append(next_word[len(PADD):j + k]) phonemes_list.append(next_word[j + k:-len(PADD)]) return phonemes_list
37f1644dc16bc0e4dd47acd7a69f1c2d6fbfc6d5
19,987
import time def time_func(func): """Times how long a function takes to run. It doesn't do anything clever to avoid the various pitfalls of timing a function's runtime. (Interestingly, the timeit module doesn't supply a straightforward interface to run a particular function.) """ def timed(*args, **kwargs): start = time.time() func(*args, **kwargs) end = time.time() return end - start return timed
3506ad28c424434402f3223a43daff4eb51b7763
19,988
def GetPhiPsiChainsAndResiduesInfo(MoleculeName, Categorize = True): """Get phi and psi torsion angle information for residues across chains in a molecule containing amino acids. The phi and psi angles are optionally categorized into the following groups corresponding to four types of Ramachandran plots: General: All residues except glycine, proline, or pre-proline Glycine: Only glycine residues Proline: Only proline residues Pre-Proline: Only residues before proline not including glycine or proline Arguments: MoleculeName (str): Name of a PyMOL molecule object. Returns: dict: A dictionary containing sorted list of residue numbers for each chain and dictionaries of residue names, phi and psi angles for each residue number. Examples: PhiPsiInfoMap = GetPhiPsiChainsAndResiduesInfo(MolName) for ChainID in PhiPsiInfoMap["ChainIDs"]: for ResNum in PhiPsiInfoMap["ResNums"][ChainID]: ResName = PhiPsiInfoMap["ResName"][ChainID][ResNum] Phi = PhiPsiInfoMap["Phi"][ChainID][ResNum] Psi = PhiPsiInfoMap["Psi"][ChainID][ResNum] Category = PhiPsiInfoMap["Category"][ChainID][ResNum] MiscUtil.PrintInfo("ChainID: %s; ResNum: %s; ResName: %s; Phi: %8.2f; Psi: %8.2f; Category: %s" % (ChainID, ResNum, ResName, Phi, Psi, Category)) """ if not len(MoleculeName): return None SelectionCmd = "%s" % (MoleculeName) PhiPsiResiduesInfoMap = _GetSelectionPhiPsiChainsAndResiduesInfo(SelectionCmd, Categorize) return PhiPsiResiduesInfoMap
07295d99f3f2150e4a9e0782bf376ac1aa22a499
19,989
def generate_data(n_samples=30): """Generate synthetic dataset. Returns `data_train`, `data_test`, `target_train`.""" x_min, x_max = -3, 3 x = rng.uniform(x_min, x_max, size=n_samples) noise = 4.0 * rng.randn(n_samples) y = x ** 3 - 0.5 * (x + 1) ** 2 + noise y /= y.std() data_train = pd.DataFrame(x, columns=["Feature"]) data_test = pd.DataFrame( np.linspace(x_max, x_min, num=300), columns=["Feature"]) target_train = pd.Series(y, name="Target") return data_train, data_test, target_train
f7d2f5637327119d5f08fe2ccbfe2d4f41a34c5c
19,990
def get_element_event(element_key): """ Get object's event. """ model = apps.get_model(settings.WORLD_DATA_APP, "event_data") return model.objects.filter(trigger_obj=element_key)
bd177573035209e97110a2213cbe98b3b2eadafb
19,991
import operator def get_seller_price(sellers,seller_id,core_request): """ sellers is a list of list where each list contains follwing item in order 1. Seller Name 2. Number of available cores 3. Price of each core 4. List of lists where length of main list is equal to number of cores. Length of minor list will be zero. seller_id is the seller index whose price to be determined. You can access this seller by seller[seller_id] core_request is the number of core requested return the total price of this deal using second price auction if seller_id is with largest ask then return its own price """ new_list = list(sellers) new_list.sort(key=operator.itemgetter(2)) i=0; for x in new_list: if x==sellers[seller_id]: break i+=1 #print i if i==len(sellers)-1: return new_list[i][2]*core_request else : price=0 core=core_request price=0 while core>0: i+=1 if i==len(sellers)-1: price+=core*new_list[i][2] core=0 else: if core>new_list[i][1]: core=core-new_list[i][1] price+=new_list[i][1]*new_list[i][2] else: price+=core*new_list[i][2] core=0 return price
a1103b05409cdab20dd1982f5839a712939c3c3f
19,992
def create_affiliation_ttl(noun_uri: str, noun_text: str, affiliated_text: str, affiliated_type: str) -> list: """ Creates the Turtle for an Affiliation. @param noun_uri: String holding the entity/URI to be affiliated @param noun_text: String holding the sentence text for the entity @param affiliated_text: String specifying the entity (organization, group, etc.) to which the noun is affiliated @param affiliated_type: String specifying the class type of the entity @return: An array of strings holding the Turtle representation of the Affiliation """ affiliated_uri = f':{affiliated_text.replace(" ", "_")}' affiliation_uri = f'{noun_uri}{affiliated_text.replace(" ", "_")}Affiliation' noun_str = f"'{noun_text}'" ttl = [f'{affiliation_uri} a :Affiliation ; :affiliated_with {affiliated_uri} ; :affiliated_agent {noun_uri} .', f'{affiliation_uri} rdfs:label "Relationship based on the text, {noun_str}" .', f'{affiliated_uri} a {affiliated_type} ; rdfs:label "{affiliated_text}" .'] wikidata_desc = get_wikipedia_description(affiliated_text) if wikidata_desc: ttl.append(f'{affiliated_uri} :definition "{wikidata_desc}" .') return ttl
d641a5aa77860dad48c605b3486bc83c0250d551
19,993
from typing import Tuple def get_subpixel_indices(col_num: int) -> Tuple[int, int, int]: """Return a 3-tuple of 1-indexed column indices representing subpixels of a single pixel.""" offset = (col_num - 1) * 2 red_index = col_num + offset green_index = col_num + offset + 1 blue_index = col_num + offset + 2 return red_index, blue_index, green_index
cb4a1b9a4d27c3a1dad0760267e6732fe2d0a0da
19,994
def sigmoid(x): """ This function computes the sigmoid of x for NeuralNetwork""" return NN.sigmoid(x)
534391dc7b39aede21e6a66692bc1ca2ea1ce8b6
19,995
def extractTranslatingSloth(item): """ 'Translating Sloth' """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol or frag) or 'preview' in item['title'].lower(): return None tagmap = [ ('娘子我才是娃的爹', 'Wife, I Am the Baby\'s Father', 'translated'), ('Wife, I Am the Baby\'s Father', 'Wife, I Am the Baby\'s Father', 'translated'), ('I want to eat meat Wife', 'I want to eat meat Wife', 'translated'), ('My Lord is a Stone', 'My Lord is a Stone', 'translated'), ] for tagname, name, tl_type in tagmap: if tagname in item['tags']: return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type) return False
0ed9f5d4ae4c69fae2dc46e0260e29d1c97225af
19,996
def human(number: int, suffix='B') -> str: """Return a human readable memory size in a string. Initially written by Fred Cirera, modified and shared by Sridhar Ratnakumar (https://stackoverflow.com/a/1094933/6167478), edited by Victor Domingos. """ for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']: if abs(number) < 1024.0: return f"{number:3.1f} {unit}{suffix}" number = number / 1024.0 return f"{number:.1f}{'Yi'}{suffix}"
b41e9014ee7afbacb40115f85223ae89b08094a8
19,997
def _get_field_names(field: str, aliases: dict): """ Override this method to customize how :param field: :param aliases: :return: """ trimmed = field.lstrip("-") alias = aliases.get(trimmed, trimmed) return alias.split(",")
cb732c07018c33a546bf42ab1bf3516d2bd6c824
19,998
def get_answer(question_with_context): """ Get answer for question and context. """ # Create pipeline question_answering_pipeline = pipeline('question-answering') # Get answer answer = question_answering_pipeline(question_with_context) # Return answer return answer
ba560ecf5aa07a59b697465e0c34c8b32ddf64e6
19,999