content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from operator import add def set(isamAppliance, name, properties, attributes=None, description=None, type="JavaScript", new_name=None, check_mode=False, force=False): """ Creating or Modifying a JavaScript PIP """ ret_obj = search(isamAppliance, name=name) id = ret_obj['data'] if id == {}: # If no id was found, Force the add return add(isamAppliance, name=name, properties=properties, attributes=attributes, description=description, type=type, check_mode=check_mode, force=force) else: # Update PIP return update(isamAppliance, name=name, properties=properties, attributes=attributes, description=description, type=type, new_name=new_name, check_mode=check_mode, force=force)
6c7f097eb22a1f3033dce2cdf3264bff5f7c9acb
29,708
import logging def init_model(model, opt, argv): """select the network initialization method""" if hasattr(opt, 'weight_init') and opt.weight_init == 'xavier': network_weight_xavier_init(model) elif hasattr(opt, 'weight_init') and opt.weight_init == 'MSRAPrelu': network_weight_MSRAPrelu_init(model) elif hasattr(opt, 'weight_init') and opt.weight_init == 'stupid': network_weight_stupid_init(model) elif hasattr(opt, 'weight_init') and opt.weight_init == 'zero': network_weight_zero_init(model) elif hasattr(opt, 'weight_init') and opt.weight_init == '01': network_weight_01_init(model) elif hasattr(opt, 'weight_init') and opt.weight_init == 'custom': assert hasattr(model, 'init_parameters') model.init_parameters() elif hasattr(opt, 'weight_init') and opt.weight_init == 'None': logging.info('Warning!!! model loaded without initialization !') else: raise ValueError('Unknown weight_init') if hasattr(opt, 'bn_momentum') and opt.bn_momentum is not None: for layer in model.modules(): if isinstance(layer, nn.BatchNorm2d): layer.momentum = opt.bn_momentum if hasattr(opt, 'bn_eps') and opt.bn_eps is not None: for layer in model.modules(): if isinstance(layer, nn.BatchNorm2d): layer.eps = opt.bn_eps return model
5c53efd15af6403a6323d25dc877dc652e4a49b1
29,709
import math def shoulders_up(x, y, max_angle=10): """ 1:"Neck", 2:"RShoulder", 5:"LShoulder". looks at line from left shoulder to neck, and line from right shoulder to neck if either are not straight returns 1 if both are flat (slope of 0 or close to 0) returns 1 """ left_degrees = math.degrees(math.atan2(y[5]-y[1], x[5]-x[1])) right_degrees = math.degrees(math.atan2(y[1]-y[2], x[1]-x[2])) slope_shoulder = (y[5]-y[2])/(x[5]-x[2]) if (left_degrees <= max_angle and right_degrees <= max_angle) \ and slope_shoulder <= 0.25: return left_degrees, right_degrees, 0.0 else: return left_degrees, right_degrees, 1.0
2a6adce5dad431c91cac77bd79e4011964f76341
29,711
def str(x: i32) -> str: """ Return the string representation of an integer `x`. """ if x == 0: return '0' result: str result = '' if x < 0: result += '-' x = -x rev_result: str rev_result = '' rev_result_len: i32 rev_result_len = 0 pos_to_str: list[str] pos_to_str = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9'] while x > 0: rev_result += pos_to_str[x - _lpython_floordiv(x, 10)*10] rev_result_len += 1 x = _lpython_floordiv(x, 10) pos: i32 for pos in range(rev_result_len - 1, -1, -1): result += rev_result[pos] return result
6af580da343eb6adab58021eb489cf837b50571c
29,712
def grad_spsp_mult_entries_x_reverse(b_out, entries_a, indices_a, entries_x, indices_x, N): """ Now we wish to do the gradient with respect to the X matrix in AX=B Instead of doing it all out again, we just use the previous grad function on the transpose equation X^T A^T = B^T """ # get the transposes of the original problem entries_b, indices_b = b_out indices_aT = transpose_indices(indices_a) indices_xT = transpose_indices(indices_x) indices_bT = transpose_indices(indices_b) b_T_out = entries_b, indices_bT # call the vjp maker for AX=B using the substitution A=>X^T, X=>A^T, B=>B^T vjp_XT_AT = grad_spsp_mult_entries_a_reverse(b_T_out, entries_x, indices_xT, entries_a, indices_aT, N) # return the function of the transpose vjp maker being called on the backprop vector return lambda v: vjp_XT_AT(v)
64e5fbf5ca12fb9516c7aab2a357823c201d3d5a
29,713
def execute_until_false(method, interval_s): """Executes a method forever until the method returns a false value. Args: method: The callable to execute. interval_s: The number of seconds to start the execution after each method finishes. Returns: An Interval object. """ interval = Interval(method, stop_if_false=True) interval.start(interval_s) return interval
eee730604ea98080e669d02f18a6ca55c4a4fe97
29,715
def vecnorm(a): """Return a/|a|""" return a / mdamath.norm(a)
425de4c8ddae138e1528ada448f86781b4c5130e
29,716
def count_qubits(operator): """Compute the minimum number of qubits on which operator acts. Args: operator: FermionOperator, QubitOperator, DiagonalCoulombHamiltonian, or PolynomialTensor. Returns: num_qubits (int): The minimum number of qubits on which operator acts. Raises: TypeError: Operator of invalid type. """ # Handle FermionOperator. if isinstance(operator, FermionOperator): num_qubits = 0 for term in operator.terms: for ladder_operator in term: if ladder_operator[0] + 1 > num_qubits: num_qubits = ladder_operator[0] + 1 return num_qubits # Handle QubitOperator. elif isinstance(operator, QubitOperator): num_qubits = 0 for term in operator.terms: if term: if term[-1][0] + 1 > num_qubits: num_qubits = term[-1][0] + 1 return num_qubits # Handle DiagonalCoulombHamiltonian elif isinstance(operator, DiagonalCoulombHamiltonian): return operator.one_body.shape[0] # Handle PolynomialTensor elif isinstance(operator, PolynomialTensor): return operator.n_qubits # Raise for other classes. else: raise TypeError('Operator of invalid type.')
9963c7b8202a825725ca3906b95f16d0243f81ed
29,717
def datenum_to_date(date_num): """Transform date_num to datetime object. Returns pd.NaT on invalid input""" try: total_seconds = round(dt.timedelta(days=date_num - 366).total_seconds()) return dt.datetime(1, 1, 1) + dt.timedelta(seconds=total_seconds) - dt.timedelta(days=1) except OverflowError: return pd.NaT
f2a523f0e1c1af15835ae042fdeac8ebcf0a5717
29,718
def save_ground_truth_part(name, tuple_path, mean, sem, std, sestd): """Saves a ground truth part to strings. This is meant to be called with outputs of `nest.flatten_with_tuple_paths(ground_truth_mean)`. Args: name: Python `str`. Name of the sample transformation. tuple_path: Tuple path of the part of the ground truth we're saving. See `nest.flatten_with_tuple_paths`. mean: Ground truth mean, or `None` if it is absent. sem: Ground truth stadard error of the mean, or `None` if it is absent. std: Ground truth standard deviation, or `None` if it is absent. sestd: Ground truth mean, or `None` if it is absent. Returns: array_strs: Python list of strings, representing the encoded arrays (that were present). Typically these would be joined with a newline and written out to a module, which can then be passed to `load_ground_truth_part`. """ array_strs = [] mean_name, sem_name, std_name, sestd_name = _get_global_variable_names( name, tuple_path) if mean is not None: array_strs.append(array_to_source.array_to_source(mean_name, mean)) if sem is not None: array_strs.append(array_to_source.array_to_source(sem_name, sem)) if std is not None: array_strs.append(array_to_source.array_to_source(std_name, std)) if sestd is not None: array_strs.append(array_to_source.array_to_source(sestd_name, sestd)) return array_strs
a4b769383dd0b250375205efeed363161b28ed0a
29,719
def parse_cpe(cpe): """ Split the given CPE name into its components. :param cpe: CPE name. :type cpe: str :returns: CPE components. :rtype: list(str) """ ver = get_cpe_version(cpe) if ver == "2.2": parsed = [cpe22_unquote(x.strip()) for x in cpe[5:].split(":")] if len(parsed) < 11: parsed.extend( "*" * (11 - len(parsed)) ) elif ver == "2.3": parsed = [x.strip() for x in _cpe23_split.split(cpe[8:])] if len(parsed) != 11: raise ValueError("Not a valid CPE 2.3 name: %s" % cpe) else: raise ValueError("Not a valid CPE 2.2 or 2.3 name: %s" % cpe) return parsed
4dfbac57d3719a1c6ed00b5884be1321800827f5
29,720
def draw_points(xs, ys, covs, M): """ Resample a set of points M times, adding noise according to their covariance matrices. Returns ------- x_samples, y_samples : np.array Every column j, is x[j] redrawn M times. Has M rows, and every row is a realization of xs or ys. """ # store the samples as follows # col0 = all resamplings of x0 # -> each row is a different realization of our 75 sightlines # rescale data to avoid numerical problems factor_x = 1 / np.std(xs) factor_y = 1 / np.std(ys) xyr, covr = rescale.rescale_data( np.column_stack((xs, ys)), covs, factor_x, factor_y ) N = len(xyr) x_samples = np.zeros((M, N)) y_samples = np.zeros((M, N)) for j in range(N): samples = RNG.multivariate_normal(mean=xyr[j], cov=covr[j], size=M) x_samples[:, j] = samples[:, 0] y_samples[:, j] = samples[:, 1] # unscale the data again before returning return x_samples / factor_x, y_samples / factor_y
5609efcf6ba42fc2a059d308e8391b6057de3a16
29,721
def k_folds_split(raw_indexes, n_splits, labels=default_pars.validation_pars_labels, shuffle=default_pars.validation_pars_shuffle, random_state=default_pars.random_state, return_original_indexes=default_pars.validation_pars_return_original_indexes): """Splits a raw set of indexes into k train and k dev subsets using k-folding. There are k (given by 'n_splits') folds. Each of the folds uses the entire raw set of indexes (either for train or for dev). The k dev sets do not overlap, and together they cover the entire raw set. For each fold, the train set is made by all examples that are not in the dev set. Hence all train sets of different folds do overlap. Parameters ---------- raw_indexes : array_like Indexes of data (e.g. data.index, assuming data is a pandas dataframe). n_splits : int Number of folds. labels : list or None If not None, the k-folding is stratified; if None, labels are ignored. shuffle : bool True to shuffle indexes before splitting; False to keep original order. random_state : int or None Random state for shuffling; Ignored if 'shuffle' is False (in which case, 'random_state' can be set to None). return_original_indexes : bool True to return original indexes (as given by 'raw_indexes'); False to return new integer indexes (that go from 0 to the number of elements in raw_indexes). Returns ------- parts : list K different parts (folds). Each part contains a tuple with: (array of indexes in train set for this part, array of indexes in dev set for this part) """ raw_indexes_array = np.array(raw_indexes) # To avoid warnings, impose random_state None if there is no shuffling. if not shuffle: random_state = None # Split a data set into n parts without overlap, and optionally stratified. if labels is None: split_method = KFold else: split_method = StratifiedKFold parts = list(split_method(n_splits=n_splits, random_state=random_state, shuffle=shuffle). split(raw_indexes_array, labels)) if return_original_indexes: parts = [(raw_indexes_array[part[0]], raw_indexes_array[part[1]]) for part in parts] return parts
77394ca5d345d97423abaa0fe421e50cb0017762
29,722
def element_wise_entropy(px): """ Returns a numpy array with element wise entropy calculated as -p_i*log_2(p_i). Params ------ px (np.array) Array of individual probabilities, i.e. a probability vector or distribution. Returns ------- entropy (np.array) Array of element-wise entropies. """ if isinstance(px, list): px = np.array(px) # Make a copy of input array entropy = px.copy() # Get indices of nonzero probability values nz = np.nonzero(entropy) # Compute -pi*log_2(p_i) element-wise entropy[nz] *= - np.log2(entropy[nz]) return entropy
7637cac96e51ce6b89a395c306a6104e77bcef0d
29,723
def score_page(preds, truth): """ Scores a single page. Args: preds: prediction string of labels and center points. truth: ground truth string of labels and bounding boxes. Returns: True/false positive and false negative counts for the page """ tp = 0 fp = 0 fn = 0 truth_indices = { 'label': 0, 'X': 1, 'Y': 2, 'Width': 3, 'Height': 4 } preds_indices = { 'label': 0, 'X': 1, 'Y': 2 } if pd.isna(truth) and pd.isna(preds): return np.array([]), {'tp': tp, 'fp': fp, 'fn': fn} if pd.isna(truth): fp += len(preds.split(' ')) // len(preds_indices) return np.array([]), {'tp': tp, 'fp': fp, 'fn': fn} if pd.isna(preds): fn += len(truth.split(' ')) // len(truth_indices) return np.array([]), {'tp': tp, 'fp': fp, 'fn': fn} truth = truth.split(' ') if len(truth) % len(truth_indices) != 0: raise ValueError('Malformed solution string') truth_label = np.array(truth[truth_indices['label']::len(truth_indices)]) truth_xmin = np.array(truth[truth_indices['X']::len(truth_indices)]).astype(float) truth_ymin = np.array(truth[truth_indices['Y']::len(truth_indices)]).astype(float) truth_xmax = truth_xmin + np.array(truth[truth_indices['Width']::len(truth_indices)]).astype(float) truth_ymax = truth_ymin + np.array(truth[truth_indices['Height']::len(truth_indices)]).astype(float) preds = preds.split(' ') if len(preds) % len(preds_indices) != 0: raise ValueError('Malformed prediction string') preds_label = np.array(preds[preds_indices['label']::len(preds_indices)]) preds_x = np.array(preds[preds_indices['X']::len(preds_indices)]).astype(float) preds_y = np.array(preds[preds_indices['Y']::len(preds_indices)]).astype(float) preds_unused = np.ones(len(preds_label)).astype(bool) ok_array = [] for xmin, xmax, ymin, ymax, label in zip(truth_xmin, truth_xmax, truth_ymin, truth_ymax, truth_label): # Matching = point inside box & character same & prediction not already used matching = (xmin < preds_x) & (xmax > preds_x) & (ymin < preds_y) & (ymax > preds_y) & (preds_label == label) & preds_unused if matching.sum() == 0: fn += 1 else: tp += 1 preds_unused[np.argmax(matching)] = False fp += preds_unused.sum() return preds_unused, {'tp': tp, 'fp': fp, 'fn': fn}
25b3d4280db734ded586eb627fe98d417164601d
29,724
import re def _validate_eida_token(token): """ Just a basic check if the string contains something that looks like a PGP message """ if re.search(pattern='BEGIN PGP MESSAGE', string=token, flags=re.IGNORECASE): return True return False
746fbd011b38abab43be983a1a054505526dcf78
29,725
def dn_histogram_mode_5(y, y_min, y_max): """ Mode of z-scored distribution (5-bin histogram) """ return histogram_mode(y, y_min, y_max, 5)
0dc10f46136e60e2523f4ca79449f18aaedd4854
29,726
def _make_element(spec, parent, attributes=None): """Helper function to generate the right kind of Element given a spec.""" if (spec.name == constants.WORLDBODY or (spec.name == constants.SITE and (parent.tag == constants.BODY or parent.tag == constants.WORLDBODY))): return _AttachableElement(spec, parent, attributes) elif isinstance(parent, _AttachmentFrame): return _AttachmentFrameChild(spec, parent, attributes) elif spec.name == constants.DEFAULT: return _DefaultElement(spec, parent, attributes) elif spec.name == constants.ACTUATOR: return _ActuatorElement(spec, parent, attributes) else: return _ElementImpl(spec, parent, attributes)
7635e8380e8238541544495f250e78a5da3ad441
29,727
def nlf_css(parser, token): """Newsletter friendly CSS""" args = token.split_contents() css = {} css_order = [] for item in args[1:]: tag, value = item.split("=") tag, value = tag.strip('"'), value.strip('"') css[tag] = value css_order.append(tag) nodelist = parser.parse(('end_nlf_css',)) token = parser.next_token() return NewsletterFriendlyCssNode(nodelist, css, css_order)
eadf74cdd6e58fb4e1551f1b951ad74226e175c2
29,730
def data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_bandwidth_profile_peak_information_rate_put(uuid, tapi_common_capacity_value=None): # noqa: E501 """data_context_connectivity_context_connectivity_serviceuuid_requested_capacity_bandwidth_profile_peak_information_rate_put creates or updates tapi.common.CapacityValue # noqa: E501 :param uuid: Id of connectivity-service :type uuid: str :param tapi_common_capacity_value: tapi.common.CapacityValue to be added or updated :type tapi_common_capacity_value: dict | bytes :rtype: None """ if connexion.request.is_json: tapi_common_capacity_value = TapiCommonCapacityValue.from_dict(connexion.request.get_json()) # noqa: E501 return 'do some magic!'
c9ee5c8751e8424732f066598525a44d1c66d1c5
29,731
import logging def _get_default_dataset_statistics( statistics: statistics_pb2.DatasetFeatureStatisticsList ) -> statistics_pb2.DatasetFeatureStatistics: """Gets the DatasetFeatureStatistics to use for validation. If there is a single DatasetFeatureStatistics, this function returns that. If there are multiple DatasetFeatureStatistics, this function attempts to find the one that corresponds to the default slice. If found, this function returns that. If not found, this function raises an error. Args: statistics: A DatasetFeatureStatisticsList protocol buffer. Returns: A DatasetFeatureStatistics protocol buffer to use for validation. Raises: ValueError: If the input statistics proto contains multiple datasets, none of which corresponds to the default slice. """ if len(statistics.datasets) == 1: return statistics.datasets[0] # If there are multiple datasets, attempt to find the dataset for the # default slice (i.e., slice for all examples) from among the datasets. for dataset in statistics.datasets: if dataset.name == constants.DEFAULT_SLICE_KEY: logging.warning('Multiple datasets found in statistics. Using the ' 'default slice dataset.') return dataset # If there are multiple datasets, but the default slice is not found, raise an # error. raise ValueError('Only statistics proto with one dataset or the default ' 'slice (i.e., "All Examples" slice) is currently supported.')
8466e67c9ea5d795ea9b6c16c637b9a55048056c
29,732
from typing import Callable from re import A def foldr(_folder: Callable[[A, B], B], _init: B, _linked_list: LinkedList[A]) -> B: """ foldr """ if _linked_list.content is None: return _init else: head = _linked_list.content[0] tail = _linked_list.content[1] return _folder(head, foldr(_folder, _init, tail))
93aa3749442d8028dd115619e358e8fcf40ada46
29,733
def get_per_lane_sample_dist_plot(sample_data: pd.DataFrame) -> dict: """ A function for returing sample distribution plots :params sample_data: A Pandas DataFrame containing sample data :returns: A dictionary containing sample distribution plots """ try: lane_plots = dict() for lane_id, l_data in sample_data.groupby('Lane'): lane_samples = l_data['Sample_ID'].values.tolist() datasets = list() for project_id, p_data in l_data.groupby('Sample_Project'): pf_counts = \ p_data.\ set_index('Sample_ID')['PF Clusters'].\ reindex(lane_samples).\ fillna(0).\ values.tolist() datasets.append({ "label": project_id, "data": pf_counts}) data = { "labels": lane_samples, "datasets": datasets} lane_plots.update({lane_id: data}) return lane_plots except Exception as e: raise ValueError("Failed to get sample distribution data, error: {0}".format(e))
143094fc909ac9044c5986c958cff79e9e218414
29,734
def mark_volatile(obj): """DEPRECATED(Jiayuan Mao): mark_volatile has been deprecated and will be removed by 10/23/2018; please use torch.no_grad instead.""" return stmap(_mark_volatile, obj)
2e299889fe7982069fb5987f6af0e56ae365a9ee
29,735
def list_keys(bucket): """ Lists all the keys in a bucket. :param bucket: (string) A bucket name. :return: (string list) Keys in the bucket. """ _check_bucket(bucket) bucket_path = _bucket_path(bucket) keys = [] for key_path in bucket_path.iterdir(): keys.append(key_path.name) return keys
0a84c72165cf76970221974196911853d0db027a
29,736
def h6(content, accesskey:str ="", class_: str ="", contenteditable: str ="", data_key: str="", data_value: str="", dir_: str="", draggable: str="", hidden: str="", id_: str="", lang: str="", spellcheck: str="", style: str="", tabindex: str="", title: str="", translate: str=""): """ Returns a heading.\n `content`: The text of the heading.\n """ g_args = global_args(accesskey, class_, contenteditable, data_key, data_value, dir_, draggable, hidden, id_, lang, spellcheck, style, tabindex, title, translate) return f"<h6 {g_args}>{content}</h6>\n"
c68a5ba229643571ce49e535b32e99580443e56c
29,737
def extractUniversesWithMeaning(item): """ """ vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title']) if not (chp or vol) or 'preview' in item['title'].lower(): return None if 'Angel of Death' in item['title']: return buildReleaseMessageWithType(item, 'Angel of Death', vol, chp, frag=frag, postfix=postfix, tl_type='oel') if 'In The Name Of God' in item['title']: return buildReleaseMessageWithType(item, 'In The Name Of God', vol, chp, frag=frag, postfix=postfix, tl_type='oel') return False
0a00cc344bdb1d5a2498252f529536b2c58f2825
29,738
def get_provenance(message): """Given a message with results, find the source of the edges""" prov = defaultdict(lambda: defaultdict(int)) # {qedge->{source->count}} results = message['message']['results'] kg = message['message']['knowledge_graph']['edges'] edge_bindings = [ r['edge_bindings'] for r in results ] for bindings in edge_bindings: for qg_e, kg_l in bindings.items(): for kg_e in kg_l: for att in kg[kg_e['id']]['attributes']: if att['attribute_type_id'] == 'MetaInformation:Provenance': source = att['value'] prov[qg_e][source]+=1 qg_edges = [] sources = [] counts = [] for qg_e in prov: for source in prov[qg_e]: qg_edges.append(qg_e) sources.append(source) counts.append(prov[qg_e][source]) prov_table = pd.DataFrame({"QG Edge":qg_edges, "Source":sources, "Count":counts}) return prov_table
1f6dfe9e48da49b72cb55f3f949146db3db164f2
29,739
def bricklinkColorToLEGO(colornum): """ Get the LEGO equivalent to a bricklink color number if it exists :param colornum: :return: """ default = {"name": "Unknown", "Lego": colornum, "BrickLink": colornum} if colornum < 0: return default for color in color_data: if color_data[color]["BrickLink"] == colornum: return color_data[color] return default
9ed77c271168025c5864a28624470c26ae3a0a9e
29,740
def bounds(gdf): """Calculates the bounding coordinates (left, bottom, right, top) in the given GeoDataFrame. Args: gdf: A GeoDataFrame containing the input points. Returns: An array [minx, miny, maxx, maxy] denoting the spatial extent. """ bounds = gdf.total_bounds return bounds
48242e870edd1db9b1191518c4b9ba7433420610
29,741
def powerport_get_row_boot_strap (port_id, raw = False): """ Get boot strapping setting """ result = powerport_get_port_status (port_id + 24, "pdu", raw) state = result.pop ("Port State", None) if (state): result["Boot Strap"] = "Normal" if (state == "Off") else "Network" return result
00d41e0a505c6d88ce44c4a701865556ccff0f89
29,743
def inv_ltri(ltri, det): """Lower triangular inverse""" inv_ltri = np.array((ltri[2], -ltri[1], ltri[0]), dtype=ltri.dtype) / det return inv_ltri
539bbcec02286f991d5cc426ebe631d1cbe0b890
29,744
def create_dummy_review(user, title='Review 1'): """Simple function for creating reviews of a user""" review = Review.objects.create( reviewer=user, title=title, rating=5, summary='This is my first review!!!', ip='190.190.190.1', company='Test Company', ) return review
ffa9ad526072fa48eff513f9b6d10628e700a266
29,745
def check_user(update): """verify that a user has signed up""" user = update.message.from_user if db.tel_get_user(user.id) is not None: return True else: return False
6654ba29d15158b63f78cea16d796f26f9199b07
29,747
def acmg(): """Calculate an ACMG classification from submitted criteria.""" criteria = request.args.getlist("criterion") classification = get_acmg(criteria) return jsonify(dict(classification=classification))
83e70afcd986ca8b7fe0e442130615cff2e84a1b
29,748
def import_bin(filename, **kwargs): """ Read a .bin file generated by the IRIS Instruments Syscal Pro System and return a curated dataframe for further processing. This dataframe contains only information currently deemed important. Use the function reda.importers.iris_syscal_pro_binary._import_bin to extract ALL information from a given .bin file. Parameters ---------- filename : string path to input filename x0 : float, optional position of first electrode. If not given, then use the smallest x-position in the data as the first electrode. spacing : float electrode spacing. This is important if not all electrodes are used in a given measurement setup. If not given, then the smallest distance between electrodes is assumed to be the electrode spacing. Naturally, this requires measurements (or injections) with subsequent electrodes. reciprocals : int, optional if provided, then assume that this is a reciprocal measurements where only the electrode cables were switched. The provided number N is treated as the maximum electrode number, and denotations are renamed according to the equation :math:`X_n = N - (X_a - 1)` check_meas_nums : bool if True, then check that the measurement numbers are consecutive. Don't return data after a jump to smaller measurement numbers (this usually indicates that more data points were downloaded than are part of a specific measurement. Default: True skip_rows : int Ignore this number of rows at the beginning, e.g., because they were inadvertently imported from an earlier measurement. Default: 0 Returns ------- data : :py:class:`pandas.DataFrame` Contains the measurement data electrodes : :py:class:`pandas.DataFrame` Contains electrode positions (None at the moment) topography : None No topography information is contained in the text files, so we always return None """ metadata, data_raw = _import_bin(filename) skip_rows = kwargs.get('skip_rows', 0) if skip_rows > 0: data_raw.drop(data_raw.index[range(0, skip_rows)], inplace=True) data_raw = data_raw.reset_index() if kwargs.get('check_meas_nums', True): # check that first number is 0 if data_raw['measurement_num'].iloc[0] != 0: print('WARNING: Measurement numbers do not start with 0 ' + '(did you download ALL data?)') # check that all measurement numbers increase by one if not np.all(np.diff(data_raw['measurement_num'])) == 1: logger.warning(' '.join(( 'WARNING', 'Measurement numbers are not consecutive.', 'Perhaps the first measurement belongs to another' 'measurement?', 'Use the skip_rows parameter to skip those measurements' ))) # import IPython # IPython.embed() # now check if there is a jump in measurement numbers somewhere # ignore first entry as this will always be nan diff = data_raw['measurement_num'].diff()[1:] jump = np.where(diff != 1)[0] if len(jump) > 0 and not np.all(data_raw['measurement_num'] == 0): logger.warning( 'WARNING: One or more jumps in measurement numbers detected') logger.warning('The jump indices are:') for jump_nr in jump: logger.warning(jump_nr) logger.info('Removing data points subsequent to the first jump') data_raw = data_raw.iloc[0:jump[0] + 1, :] if data_raw.shape[0] == 0: # no data present, return a bare DataFrame return pd.DataFrame(columns=['a', 'b', 'm', 'n', 'r']), None, None data = _convert_coords_to_abmn_X( data_raw[['x_a', 'x_b', 'x_m', 'x_n']], **kwargs ) # [mV] / [mA] data['r'] = data_raw['vp'] / data_raw['Iab'] data['Vmn'] = data_raw['vp'] data['vab'] = data_raw['vab'] data['Iab'] = data_raw['Iab'] data['mdelay'] = data_raw['mdelay'] data['Tm'] = data_raw['Tm'] data['Mx'] = data_raw['Mx'] data['chargeability'] = data_raw['m'] data['q'] = data_raw['q'] # rename electrode denotations rec_max = kwargs.get('reciprocals', None) if rec_max is not None: logger.info('renumbering electrode numbers') data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']] # print(data) return data, None, None
2ec4d3febad7eae08db4b2df5b4552459b627dd9
29,749
def get_messages(receive_address, send_address, offset, count): """ return most recent messages from offset to count from both communicators """ conn = connect_db() cur = conn.cursor() cur.execute( "(SELECT * FROM message WHERE (recvAddress=%s AND sendAddress=%s) ORDER by id DESC LIMIT %s OFFSET %s) " "UNION " "(SELECT * FROM message WHERE (recvAddress=%s AND sendAddress=%s) ORDER by id DESC LIMIT %s OFFSET %s) " "ORDER by id;", (receive_address, send_address, count, offset, send_address, receive_address, count, offset)) res = [] for row in cur: res.append(row) cur.close() conn.close() return res
13e46a46a4bc15931de77374249ae701f5b65c1a
29,750
def get_test_node(context, **kw): """Return a Node object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ db_node = db_utils.get_test_node(**kw) # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del db_node['id'] node = objects.Node(context) for key in db_node: setattr(node, key, db_node[key]) return node
6ce089c0d6643f2c58633ce846a4a46d2c9e5732
29,751
from typing import OrderedDict def GlobalAttributes(ds, var): """ Creates the global attributes for the netcdf file that is being written these attributes come from : https://www.unidata.ucar.edu/software/thredds/current/netcdf-java/metadata/DataDiscoveryAttConvention.html args: runinfo Table containing all the details of the individual runs ensinfo Custom class object containing all the infomation about the ensemble being saved returns: attributes Ordered Dictionary cantaining the attribute infomation """ # ========== Create the ordered dictionary ========== attr = OrderedDict() # fetch the references for my publications # pubs = puplications() # ========== Fill the Dictionary ========== # ++++++++++ Highly recomended ++++++++++ attr["title"] = "Trend in Climate Variable" attr["summary"] = "Annual and season trends in %s" % var attr["Conventions"] = "CF-1.7" # ++++++++++ Data Provinance ++++++++++ attr["history"] = "%s: Netcdf file created using %s (%s):%s by %s" % ( str(pd.Timestamp.now()), __title__, __file__, __version__, __author__) attr["history"] += ds.history attr["creator_name"] = __author__ attr["creator_url"] = "ardenburrell.com" attr["creator_email"] = __email__ attr["institution"] = "University of Leicester" attr["date_created"] = str(pd.Timestamp.now()) # ++++++++++ Netcdf Summary infomation ++++++++++ attr["time_coverage_start"] = str(dt.datetime(ds['time.year'].min(), 1, 1)) attr["time_coverage_end"] = str(dt.datetime(ds['time.year'].max() , 12, 31)) # Note. Maybe ad some geographich infomation here # Add publication references # attr["references"] = "Results are described in: %s \n TSS-RESTREND method is described in: %s" % ( # pubs["p%d" % ensinfo.paper], pubs["p1"]) # ++++++++++ Infomation unique to TSS-RESREND ensembles ++++++++++ # attr["package_version"] = ",".join(runinfo.iloc[ensinfo.run]["TSS.version"].unique().tolist()) # attr["package_url"] = "https://cran.r-project.org/web/packages/TSS.RESTREND/index.html" # attr["Vegetation"] = ",".join(runinfo.iloc[ensinfo.run]["VI.type"].unique().tolist()) # attr["Precipitation"] = ",".join(runinfo.iloc[ensinfo.run]["rf.type"].unique().tolist()) # ===== Check and see if temperature is included ===== # if not all(pd.isnull(runinfo.iloc[ensinfo.run].Temperature.tolist())): # # +++++ contains temperature +++++ # # List of temperature infomation # temp = ([fnm.split("/")[3].split(".")[1] # for fnm in runinfo.iloc[ensinfo.run].Temperature.unique().tolist()]) # # join list to attr # attr["Temperature"] = ",".join(temp) # # ===== add infomation about CO2 fertilisation ===== # if (ensinfo.paper == 3) or (ensinfo.paper>=5): # attr["CO2_method"] = "Franks et al., (2013) CO2 correction" # attr["CO2_data"] = "CMIP5 rcp8.5 forcing data" return attr
b5fcf1627f8c23964a1ec2fe4fa657a60cb8efa3
29,752
from typing import List from typing import Tuple def extend_path(path: List[Tuple[MerkleTree, hash.HashTriple]], tree: MerkleTree, ) -> List[List[Tuple[MerkleTree, hash.HashTriple]]]: """Extend path if possible.""" paths = [] for t in (tree.left, tree.right): if t is not None: path_ = path + [(t, get_hash_triple(t))] paths.append(path_) return paths
294ea5b1e2b844065a2fd962768f1b6501b4513f
29,753
def load_dictionary(loc): """ Load a dictionary """ with open(loc, 'r') as f: worddict = pkl.load(f) return worddict
f659ebb94410e02bbeab51b04b60e727cc749641
29,756
import json def get_specific_post(post_id): """Get specific post""" value = posts.get(post_id) if not value: return json.dumps({"error": "Post Not Found"}), 404 return json.dumps(value), 200
86ca2ee5847c3e7043dbd52f1a713477ea6191c5
29,757
def first_half(dayinput): """ first half solver: """ houses = { (0,0): 1 } coords = [0, 0] for h in dayinput: coords = change_coords(h, coords) home = (coords[0], coords[1]) if houses.get(home, None): houses[home] += 1 else: houses[home] = 1 return len(houses)
c0ed5c66c3f259257e14c2c6bc4daec406028135
29,758
def compute_cluster_metrics(neighbor_mat, max_k=10, included_fovs=None, fov_col='SampleID'): """Produce k-means clustering metrics to help identify optimal number of clusters Args: neighbor_mat (pandas.DataFrame): a neighborhood matrix, created from create_neighborhood_matrix the matrix should have the label col droppped max_k (int): the maximum k we want to generate cluster statistics for, must be at least 2 included_fovs (list): patient labels to include in analysis. If argument is none, default is all labels used. fov_col (str): the name of the column in neighbor_mat determining the fov Returns: xarray.DataArray: an xarray with dimensions (num_k_values) where num_k_values is the range of integers from 2 to max_k included, contains the metric scores for each value in num_k_values """ # set included_fovs to everything if not set if included_fovs is None: included_fovs = neighbor_mat[fov_col].unique() # make sure the user specifies a positive k if max_k < 2: raise ValueError("Invalid k provided for clustering") # check if included fovs found in fov_col misc_utils.verify_in_list(fov_names=included_fovs, unique_fovs=neighbor_mat[fov_col].unique()) # subset neighbor_mat accordingly, and drop the columns we don't need neighbor_mat_data = neighbor_mat[neighbor_mat[fov_col].isin(included_fovs)] neighbor_mat_data = neighbor_mat_data.drop(fov_col, axis=1) # generate the cluster score information neighbor_cluster_stats = spatial_analysis_utils.compute_kmeans_cluster_metric( neighbor_mat_data=neighbor_mat_data, max_k=max_k ) return neighbor_cluster_stats
bc13b2eb1b4bd9824e4134395ed5d31a9cfa7cb0
29,759
def map_flat_line(x, y, data, linestyles='--', colors='k', ax=None, **kwargs): """Plot a flat line across every axis in a FacetGrid. For use with seaborn's map_dataframe, this will plot a horizontal or vertical line across all axes in a FacetGrid. Parameters ---------- x, y : str, float, or list of floats One of these must be a float (or list of floats), one a str. The str must correspond to a column in the mapped dataframe, and we plot the line from the minimum to maximum value from that column. If the axes x/ylim looks very different than these values (and thus we assume this was a seaborn categorical plot), we instead map from 0 to data[x/y].nunique()-1 The float corresponds to the x/y value of the line; if a list, then we plot multiple lines. data : pd.DataFrame The mapped dataframe linestyles, colors : str, optional The linestyles and colors to use for the plotted lines. ax : axis or None, optional The axis to plot on. If None, we grab current axis. kwargs : Passed to plt.hlines / plt.vlines. Returns ------- lines : matplotlib.collections.LineCollection Artists for the plotted lines """ if ax is None: ax = plt.gca() # we set color with the colors kwarg, don't want to confuse it. kwargs.pop('color') if isinstance(x, str) and not isinstance(y, str): try: xmin, xmax = data[x].min(), data[x].max() except KeyError: # it looks like the above works with catplot / related functions # (i.e., when seaborn thought the data was categorical), but not # when it's relplot / related functions (i.e., when seaborn thought # data was numeric). in that case, the columns have been renamed to # 'x', 'y', etc. xmin, xmax = data['x'].min(), data['x'].max() # then this looks like a categorical plot if (ax.get_xlim()[-1] - xmax) / xmax > 5: xmin = 0 xmax = data[x].nunique()-1 lines = ax.hlines(y, xmin, xmax, linestyles=linestyles, colors=colors, **kwargs) elif isinstance(y, str) and not isinstance(x, str): try: ymin, ymax = data[y].min(), data[y].max() except KeyError: # it looks like the above works with catplot / related functions # (i.e., when seaborn thought the data was categorical), but not # when it's relplot / related functions (i.e., when seaborn thought # data was numeric). in that case, the columns have been renamed to # 'x', 'y', etc. ymin, ymax = data['y'].min(), data['y'].max() # then this looks like a categorical plot if (ax.get_ylim()[-1] - ymax) / ymax > 5: ymin = 0 ymax = data[y].nunique()-1 lines = ax.vlines(x, ymin, ymax, linestyles=linestyles, colors=colors, **kwargs) else: raise Exception("Exactly one of x or y must be a string!") return lines
dd46189458ee0da79785d2c9119a41969dc1357e
29,760
def build_dictionary(sentences, size): """ Create dictionary containing most frequent words in the sentences :param sentences: sequence of sentence that contains words Caution: the sequence might be exhausted after calling this function! :param size: size of dictionary you want :return: dictionary that maps word to index (starting from 1) """ dictionary = defaultdict(int) for sentence in sentences: for token in sentence: dictionary[token] += 1 frequent_pairs = nlargest(size, dictionary.items(), itemgetter(1)) words, frequencies = zip(*frequent_pairs) result = {word: index + 1 for index, word in enumerate(words)} return result
24a998d9df539b44c2dad1ffabb2737a189e7a3e
29,762
import re def simplestr(text): """convert a string into a scrubbed lower snakecase. Intended use is converting human typed field names deterministically into a string that can be used for a key lookup. :param text: type str text to be converted """ text = text.strip() text = text.replace(' ', '_') text = text.lower() return re.sub('\W+', '', text)
b030c50cd300dd97d69a9d2b8421892bb1f0c23a
29,763
def lti_launch(request): """ This method is here to build the LTI_LAUNCH dictionary containing all the LTI parameters and place it into the session. This is nessesary as we need to access these parameters throughout the application and they are only available the first time the application loads. """ if request.user.is_authenticated(): if validaterequiredltiparams(request): return redirect('sl:main') else: return render(request, 'student_locations/error.html', {'message': 'Error: The LTI parameter lis_course_offering_sourcedid is required by this LTI tool.'}) else: return render(request, 'student_locations/error.html', {'message': 'Error: user is not authenticated!'})
a65d4395095e36f4e27ae802ae7c2635f211521e
29,764
def cartesian_to_altaz(x): """ Converts local Cartesian coordinates to Alt-az, inverting altaz_to_cartesian """ x, y, z = x return np.arcsin(z), np.arctan2(-y, x)
e585fb57a9509fab277e15f08e76e1367c8334d4
29,766
def discount_opex(opex, global_parameters, country_parameters): """ Discount opex based on return period. Parameters ---------- cost : float Financial cost. global_parameters : dict All global model parameters. country_parameters : dict All country specific parameters. Returns ------- discounted_cost : float The discounted cost over the desired time period. """ return_period = global_parameters['return_period'] discount_rate = global_parameters['discount_rate'] / 100 wacc = country_parameters['financials']['wacc'] costs_over_time_period = [] for i in range(0, return_period): costs_over_time_period.append( opex / (1 + discount_rate)**i ) discounted_cost = round(sum(costs_over_time_period)) #add wacc discounted_cost = discounted_cost * (1 + (wacc/100)) return discounted_cost
6e079ffc9accc7679bada3b31d07748c93d4b18c
29,768
import torch def permute(x, perm): """Permutes the last three dimensions of the input Tensor or Array. Args: x (Tensor or Array): Input to be permuted. perm (tuple or list): Permutation. Note: If the input has fewer than three dimensions a copy is returned. """ if is_tensor(x): if x.dim() < 3: return x.data.clone() else: s = tuple(range(0, x.dim())) permutation = s[:-3] + tuple(s[-3:][i] for i in perm) return x.permute(*permutation).contiguous() elif is_array(x): if x.ndim < 3: return x.copy() else: s = tuple(range(0, x.ndim)) permutation = s[:-3] + tuple(s[-3:][i] for i in perm) # Copying to get rid of negative strides return np.transpose(x, permutation).copy() else: raise TypeError(f'Uknown type {torch.typename(x)} encountered.')
513164bfc6d25a82f2bfeba05427bd69c8df181c
29,770
def is_hr_between(time: int, time_range: tuple) -> bool: """ Calculate if hour is within a range of hours Example: is_hr_between(4, (24, 5)) will match hours from 24:00:00 to 04:59:59 """ if time_range[1] < time_range[0]: return time >= time_range[0] or time <= time_range[1] return time_range[0] <= time <= time_range[1]
70d874f0a5dee344d7638559101fc6be2bcca875
29,771
def calc_Topt(sur,obs,Tvals,objfunc='nse'): """ Function to calibrate the T parameter using a brute-force method Args: sur (pandas.Series): pandas series of the surface soil moisture obs (pandas.Series): pandas series of the soil moisture at layer x to calibrate Tvals (list,tuple,set,np.array): sequence of values to test for optimal value Kwargs: objfuc (string): objective function used to search for optimal value; options: "nse","rmse","bias",and "r"; default: "nse" Returns: out (dict): dictionary with the optimal T value keyed at 'T' and the objective function value keyed at 'objval' """ objOpts = dict(nse=nse,rmse=rmse,bias=bias,r=r,ubrmse=ubRmse) objectiveFunc = objOpts[objfunc] df = pd.concat([sur,obs],axis=1) df.columns = ('surface','depth') df.dropna(inplace = True) # new_df = new_df[~new_df.index.duplicated(keep='first')] results = [] for T in Tvals: Ttest = expFilter(df['surface'],T=T) tempDf = pd.concat([Ttest,df['depth']],axis=1) tempDf.columns = ('simulation','observation') N = objectiveFunc(tempDf) results.append(N) # check to either find the min or max depending on objectivFunc if objfunc in ('nse','r'): best = np.array(results).argmax() objVal = np.nanmax(results) else: best = np.array(results).argmin() objVal = np.nanmin(results) out = dict(T=Tvals[best],objval=objVal) return out
39df23619ea364cbf477f02665a86e72cef86e11
29,772
def share_file(service, file_id): """Make files public For a given file-id, sets role 'reader' to 'anyone'. Returns public link to file. :param: file_id (string) :return: (string) url to shared file """ permission = { 'type': "anyone", 'role': "reader", 'withLink': True } try: service.permissions().insert( fileId=file_id, body=permission).execute() except errors.HttpError, error: print('An error occured while sharing: %s' % file_id) try: file = service.files().get(fileId=file_id).execute() except errors.HttpError, error: print('Error occured while fetch public link for file: %s' % file_id) print("Uploaded to %s" % file['webContentLink']) return file['webContentLink']
70a1132667663fa85cf7f00fd3ced2fb81b03cc4
29,773
import data def convert_image_for_visualization(image_data, mean_subtracted=True): """ Convert image data from tensorflow to displayable format """ image = image_data if mean_subtracted: image = image + np.asarray(data.IMAGE_BGR_MEAN, np.float32) if FLAGS.image_channel_order == 'BGR': image = image[:,:,::-1] # BGR => RGB image = np.floor(image).astype(np.uint8) return image
9fe138051f7886d3c3e8e70e75f23b636a1ccd02
29,775
def mld(returns_array, scale=252): """ Maximum Loss Duration Maximum number of time steps when the returns were below 0 :param returns_array: array of investment returns :param scale: number of days required for normalization. By default in a year there are 252 trading days. :return: MLD """ max_loss = 0 curr = 0 for i in range(returns_array.shape[0]): # If first returns is negative, add this occurrence to max loss counter # If it's positive, continue if i == 0 and returns_array[0] < 0: curr += 1 max_loss = curr # If the equity continues dropping elif (i > 0) and (returns_array[i-1] < 0) and (returns_array[i] < 0): curr += 1 if max_loss < curr: max_loss = curr # If the equity stops dropping elif (i > 0) and (returns_array[i-1] < 0) and (returns_array[i] > 0): curr = 0 # Normalize over the number of trading days in a year return max_loss / scale
2d78d76c1456ebb4df606a9450f45e47b5e49808
29,776
from typing import Union from typing import List from typing import Dict from typing import Any def json2geodf( content: Union[List[Dict[str, Any]], Dict[str, Any]], in_crs: str = DEF_CRS, crs: str = DEF_CRS, ) -> gpd.GeoDataFrame: """Create GeoDataFrame from (Geo)JSON. Parameters ---------- content : dict or list of dict A (Geo)JSON dictionary e.g., r.json() or a list of them. in_crs : str CRS of the content, defaults to ``epsg:4326``. crs : str, optional The target CRS of the output GeoDataFrame, defaults to ``epsg:4326``. Returns ------- geopandas.GeoDataFrame Generated geo-data frame from a GeoJSON """ if not isinstance(content, (list, dict)): raise InvalidInputType("content", "list or list of dict ((geo)json)") content = content if isinstance(content, list) else [content] try: geodf = gpd.GeoDataFrame.from_features(content[0], crs=in_crs) except TypeError: content = [arcgis2geojson(c) for c in content] geodf = gpd.GeoDataFrame.from_features(content[0], crs=in_crs) if len(content) > 1: geodf = geodf.append([gpd.GeoDataFrame.from_features(c, crs=in_crs) for c in content[1:]]) if in_crs != crs: geodf = geodf.to_crs(crs) return geodf
de9e956a79821611d6f209e9b206d3c15e24708e
29,777
from typing import Optional from typing import List from typing import Tuple from typing import Any import logging def build_block_specs( block_specs: Optional[List[Tuple[Any, ...]]] = None) -> List[BlockSpec]: """Builds the list of BlockSpec objects for SpineNet.""" if not block_specs: block_specs = SPINENET_BLOCK_SPECS logging.info('Building SpineNet block specs: %s', block_specs) return [BlockSpec(*b) for b in block_specs]
6046b2ee33d15254ff6db989b63be4276d0bad19
29,778
def est_agb(dbh_mat, sp_list): """ Estimate above ground biomass using the allometric equation in Ishihara et al. 2015. """ # wood density wd_list = [ dict_sp[sp]["wood_density"] if sp in dict_sp and dict_sp[sp]["wood_density"] else None for sp in sp_list ] # functional type (生活形) # NOTE: 樹種不明の場合は最も頻度の高い生活形にする ft_categ = {1: "EG", 4: "DA", 5: "EA"} ft_list = [ ft_categ[dict_sp[sp]["categ2"]] if sp in dict_sp and dict_sp[sp]["categ2"] else "NA" for sp in sp_list ] ft_u, ft_c = np.unique([i for i in ft_list], return_counts=True) ft_list = [ft_u[np.argmax(ft_c)] if i == "NA" else i for i in ft_list] w_mat = np.array( [ np.vectorize(biomass)(dbh, wd=wd, ft=ft) for dbh, wd, ft in zip(dbh_mat, wd_list, ft_list) ] ).astype("float64") w_mat = w_mat / 1000 # kg to Mg return w_mat
ab0d80408bc9d216e387565cde3d6e758f738252
29,779
from typing import Optional def parse_json_and_match_key(line: str) -> Optional[LogType]: """Parse a line as JSON string and check if it a valid log.""" log = parse_json(line) if log: key = "logbook_type" if key not in log or log[key] != "config": log = None return log
c4efa103730ac63d2ee6c28bba2de99a450f6b8d
29,780
def nearDistance(img, centers): """ Get the blob nearest to the image center, which is probably the blob for cells, using euclidian distance. Parameters: img, image with labels defined; centers, list of label' centers of mass. Returns: nearestLabel, label nearest to the image center. """ N, M = img.shape imgCenter = [N//2, M//2] distances = [] for center in centers: distances.append(np.linalg.norm(np.array(imgCenter) - np.array(center))) nearestLabel = np.argmin(distances) return nearestLabel
6414d30cb1d591b5bf6f8101c084823f92605f1e
29,781
from flask_swagger import swagger import json def create_app(config_object=ProdConfig): """This function is an application factory. As explained here: http://flask.pocoo.org/docs/patterns/appfactories/. :param config_object: The configuration object to use. """ app = Flask(__name__.split('.')[0]) app.config.from_object(config_object) register_blueprints(app) register_extensions(app) register_errorhandlers(app) register_shellcontext(app) register_commands(app) register_admin(app) @app.context_processor def inject_fb_app_ID(): return dict( fb_app_id=app.config['FB_APP_ID'], stripe_pub_key=app.config['STRIPE_PUBLISHABLE_KEY'], embed_entity=json.dumps(g.embed_entity), current_user_json=json.dumps(g.current_user_json), mixpanel_enabled=app.config['MIXPANEL_ENABLED'], mixpanel_id=app.config['MIXPANEL_ID'], ) @app.before_request def embed_entity(): """Embed the entity based on the request.""" # NOTE: this relies pretty heavily on the fact that the before_request # runs before the context_processor. If that's ever False, we'll have # to change how this works. assign_requested_entity() @app.before_request def set_sentry_user_context(): """Add the user to the sentry context.""" sentry.user_context({'id': getattr(current_user, 'id', None)}) @app.route("/swagger/spec") def spec(): swag = swagger(app) swag['info']['version'] = "1.0" swag['info']['title'] = "Ceraon API" return jsonify(swag) @app.route("/swagger/docs") def api_doct(): return render_template('swagger-index.html') return app
b9214ee822c8012a5273cf72daa04f33b9b99539
29,782
def get_f1_dist1(y_true, y_pred, smooth=default_smooth): """Helper to turn the F1 score into a loss""" return 1 - get_f1_score1(y_true, y_pred, smooth)
c200bbe75f7013a75b2801fbb7de4cbf588bb873
29,783
def get_date_info_for_pids_tables(project_id, client): """ Loop through tables within all datasets and determine if the table has an end_date date or a date field. Filtering out the person table and keeping only tables with PID and an upload or start/end date associated. :param project_id: bq name of project_id :param client: bq client object :return: filtered dataframe which includes the following columns for each table in each dataset with a person_id 'project_id', 'dataset_id', 'table', 'date_column', 'start_date_column', 'end_date_column' """ # Create empty df to append to for final output date_fields_info_df = pd.DataFrame() # Loop through datasets LOGGER.info( "Looping through datasets to filter and create dataframe with correct date field to determine retraction" ) dataset_obj = client.list_datasets(project_id) datasets = [d.dataset_id for d in dataset_obj] # Remove synthetic data, vocabulary, curation sandbox and previous naming convention datasets prefixes = ('SR', 'vocabulary', 'curation', 'combined', '2018', 'R2018', 'rdr') datasets = [x for x in datasets if not x.startswith(prefixes)] for dataset in datasets: LOGGER.info(f'Starting to iterate through dataset: {dataset}') # Get table info for tables with pids pids_tables_df = get_pids_table_info(project_id, dataset, client) # Check to see if dataset is empty, if empty break out of loop if pids_tables_df.empty: LOGGER.info( f'No tables in dataset:{dataset}, skipping over dataset') continue # Keep only records with datatype of 'DATE' date_fields_df = pids_tables_df[pids_tables_df['data_type'] == 'DATE'] # Create empty df to append to, keeping only one record per table df_to_append = pd.DataFrame(columns=[ 'project_id', 'dataset_id', 'table', 'date_column', 'start_date_column', 'end_date_column' ]) df_to_append['project_id'] = date_fields_df['table_catalog'] df_to_append['dataset_id'] = date_fields_df['table_schema'] df_to_append['table'] = date_fields_df['table_name'] df_to_append = df_to_append.drop_duplicates() # Create new df to loop through date time fields df_to_iterate = pd.DataFrame( columns=['project_id', 'dataset_id', 'table', 'column']) df_to_iterate['project_id'] = date_fields_df['table_catalog'] df_to_iterate['dataset_id'] = date_fields_df['table_schema'] df_to_iterate['table'] = date_fields_df['table_name'] df_to_iterate['column'] = date_fields_df['column_name'] # Remove person table df_to_append = df_to_append[~df_to_append.table.str.contains('person')] df_to_iterate = df_to_iterate[~df_to_iterate.table.str.contains('person' )] # Filter through date columns and append to the appropriate column for _, row in df_to_iterate.iterrows(): column = getattr(row, 'column') table = getattr(row, 'table') if 'start_date' in column: df_to_append.loc[df_to_append.table == table, 'start_date_column'] = column elif 'end_date' in column: df_to_append.loc[df_to_append.table == table, 'end_date_column'] = column else: df_to_append.loc[df_to_append.table == table, 'date_column'] = column date_fields_info_df = date_fields_info_df.append(df_to_append) LOGGER.info(f'Iteration complete through dataset: {dataset}') return date_fields_info_df
e75dd8176583673e8ee6e89a8c75312fe06216bc
29,784
def preprocess_sent(sentence): """input: a string containing multiple sentences; output: a list of tokenized sentences""" sentence = fill_whitespace_in_quote(sentence) output = tokenizer0(sentence) # tokens = [token.text for token in tokenizer.tokenize(sentence)] tokens = list(map(lambda x: x.text, output)) ret_sentences = [] st = 0 # fix for ',' new_tokens = [] for i, token in enumerate(tokens): if token.endswith(','): new_tokens += [token.rstrip(','), ','] else: new_tokens += [token] tokens = new_tokens for i, token in enumerate(tokens): if token.endswith('.'): ret_sentences.append(tokens[st: i] + [token.strip('.')]) st = i + 1 return ret_sentences
eddaa08e3b0a8ad43f410541793f873449101904
29,785
def average_slope_intercept(image, lines): """ This function combines line segments into one or two lane lines """ left_fit = [] # contains the coordinate of on the line in the left right_fit = [] if lines is None: return None # now loop through very line we did previously for line in lines: for x1, y1, x2, y2 in line: fit = np.polyfit((x1, x2), (y1, y2), 1) slope = fit[0] intercept = fit[1] if slope < 0: # y is reversed in image left_fit.append((slope, intercept)) else: right_fit.append((slope, intercept)) """now What polyfit will do for us is it will fit a first degree polynomial, which would simply be a linear function of Y= mx + b it's going to fit this polynomial to our X and Y points and return a vector of coefficients which describe the slope and Y intercept. """ # add more weight to longer lines left_fit_average = np.average(left_fit, axis=0) right_fit_average = np.average(right_fit, axis=0) left_line = make_coordinates(image, left_fit_average) right_line = make_coordinates(image, right_fit_average) averaged_lines = [left_line, right_line] return averaged_lines
b6dbd56555d5c9d71905b808db20e4f8be6de15f
29,787
def build_upper_limb_roll_jnts(main_net, roll_jnt_count=3): """Add roll jnts, count must be at least 1""" increment = 1.0/float(roll_jnt_count) def create_joints(jnt_a, jnt_b, net, lower_limb=False, up_axis='-Z'): """ :param jnt_a: Start Joint :param jnt_b: End Joint :param net: Limb Network node :param lower_limb: This uses an aim constraint method for end driven rotation, the default uses a ikSc Solver for upper arm and upper leg rotation that is driven by the parent jnt. :param up_axis: For placement of the up locator, must be specified as positive or negative. '+Z', '-X', '+Y' :return: """ driver_a, driver_b = create_driver_rig(jnt_a, jnt_b, net, reverse=lower_limb, up_axis=up_axis) info = naming_utils.ItemInfo(jnt_a) # Create Roll Joint for roll_idx in range(roll_jnt_count): weight_b = increment * roll_idx weight_a = 1 - weight_b name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Roll', consts.INDEX[roll_idx]]) type = naming_utils.concatenate([info.base_name, info.joint_name, 'Roll', consts.INDEX[roll_idx]]) dup_jnt = pymel.duplicate(jnt_a, name=name)[0] dup_jnt.radius.set(8) dup_jnt.setAttr('otherType', type) naming_utils.add_tags(dup_jnt, {'_skin': 'True'}) pymel.delete(dup_jnt.getChildren()) # Parent Roll joint to Jnt A dup_jnt.setParent(jnt_a) naming_utils.add_tags(dup_jnt, {'Network': net.name(), 'Utility': 'Roll'}) point_con = pymel.pointConstraint([jnt_a, jnt_b, dup_jnt]) # Weighting toward child point_con.w0.set(weight_a) point_con.w1.set(weight_b) # Multi Node name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Multi', consts.INDEX[roll_idx]]) multi_utility = pymel.shadingNode('multiplyDivide', name=name, asUtility=True) naming_utils.add_tags(multi_utility, {'Network': net.name(), 'Utility': 'Roll'}) # Using jnt_a for Shoulder and Upper Leg driver_a.rotateX.connect(multi_utility.input1X) multi_utility.input2X.set(weight_a) multi_utility.outputX.connect(dup_jnt.rotateX) def create_driver_rig(jnt_a, jnt_b, net, reverse=False, up_axis=None): info = naming_utils.ItemInfo(jnt_a) # Driver Group grp_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Roll', 'GRP']) grp = virtual_classes.TransformNode(name=grp_name) naming_utils.add_tags(grp, {'Network': net.name()}) pymel.parentConstraint([jnt_a, grp]) grp.setParent(grp.limb_grp) # Driver A new_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Driver', 'A']) driver_a = pymel.duplicate(jnt_a, name=new_name)[0] pymel.delete(driver_a.getChildren()) driver_a.setTranslation(jnt_b.getTranslation(worldSpace=True), worldSpace=True) driver_a.setParent(grp) # Driver B new_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Driver', 'B']) driver_b = pymel.duplicate(jnt_a, name=new_name)[0] pymel.delete(driver_b.getChildren()) driver_b.setParent(driver_a) ikhandle_name = naming_utils.concatenate([info.side, info.base_name, info.joint_name, 'Roll', 'IK']) ikhandle = pymel.ikHandle(startJoint=driver_a, endEffector=driver_b, name=ikhandle_name, solver='ikSCsolver')[0] ikhandle.setParent(grp) pymel.parentConstraint([jnt_a.getParent(), ikhandle], maintainOffset=True) return driver_a, driver_b # Add Upper Arm Roll # idx[0] LEFT Side, idx[1] Right Side for idx in range(2): # UpperArm create_joints(main_net.arms[idx].jnts[0], main_net.arms[idx].jnts[1], main_net.arms[idx]) # UpperLeg create_joints(main_net.legs[idx].jnts[0], main_net.legs[idx].jnts[1], main_net.legs[idx])
0985a33a9fac3b218042cac4d1213df13f606464
29,788
def create_one(**kwargs): """ Create a Prediction object with the given fields. Args: Named arguments. date: Date object. Date of the predicted changes. profitable_change: Decimal. Predicted profitable change in pips. instrument: Instrument object. score: Float. The cross validation score of this prediction. predictor: Predictor object. Returns: Predicton object with the given fields. """ return Prediction(**kwargs)
d71d14445c5e53c032b529652f3ed5fb12df6544
29,790
import math def get_distance_wgs84(lon1, lat1, lon2, lat2): """ 根据https://github.com/googollee/eviltransform,里面的算法:WGS - 84 :param lon1: 经度1 :param lat1: 纬度1 :param lon2: 经度2 :param lat2: 纬度2 :return: 距离,单位为 米 """ earthR = 6378137.0 pi180 = math.pi / 180 arcLatA = lat1 * pi180 arcLatB = lat2 * pi180 x = (math.cos(arcLatA) * math.cos(arcLatB) * math.cos((lon1 - lon2) * pi180)) y = math.sin(arcLatA) * math.sin(arcLatB) s = x + y if s > 1: s = 1 if s < -1: s = -1 alpha = math.acos(s) distance = alpha * earthR return distance
8da67a3a690ff0cb548dc31fb65f3b2133fa3e3f
29,792
def make_uuid(ftype, size=6): """ Unique id for a type. """ uuid = str(uuid4())[:size] return f'{ftype}-{uuid}'
3cb779431e5cb452f63f8bab639e9a437d7aa0f9
29,793
def region_of_interest(img, vertices): """ Applies an image mask. Only keeps the region of the image defined by the polygon formed from `vertices`. The rest of the image is set to black. `vertices` should be a numpy array of integer points. Args: img (image): Mask this image. vertices (numpy array of integers): A polygon to use for the mask. Returns: image: The masked image. """ # Define a blank mask to start with mask = np.zeros_like(img) # Define a 3 channel or 1 channel color to fill the mask with depending # on the input image if len(img.shape) > 2: channel_count = img.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (255,) * channel_count else: ignore_mask_color = 255 # Fill pixels inside the polygon defined by "vertices" with the fill color cv2.fillPoly(mask, vertices, ignore_mask_color) # Return the image only where mask pixels are nonzero masked_image = cv2.bitwise_and(img, mask) return masked_image
bd22cd8b6642dd820e786f88d54c63f4811c5221
29,794
import pytz def _adjust_utc_datetime_to_phone_datetime(value, phone_tz): """ adjust a UTC datetime so that it's comparable with a phone datetime (like timeEnd, modified_on, etc.) returns a timezone-aware date """ phone_tz = _soft_assert_tz_not_string(phone_tz) assert value.tzinfo is None if phone_timezones_have_been_processed(): return value.replace(tzinfo=pytz.utc) else: return _adjust_utc_datetime_to_timezone(value, phone_tz)
48a875af1082d1538c6bf09905afacc411e1408c
29,795
def cache_function(length=CACHE_TIMEOUT): """ A variant of the snippet posted by Jeff Wheeler at http://www.djangosnippets.org/snippets/109/ Caches a function, using the function and its arguments as the key, and the return value as the value saved. It passes all arguments on to the function, as it should. The decorator itself takes a length argument, which is the number of seconds the cache will keep the result around. It will put a temp value in the cache while the function is processing. This should not matter in most cases, but if the app is using threads, you won't be able to get the previous value, and will need to wait until the function finishes. If this is not desired behavior, you can remove the first two lines after the ``else``. """ def decorator(func): def inner_func(*args, **kwargs): if not cache_enabled(): value = func(*args, **kwargs) else: try: value = cache_get('func', func.__name__, func.__module__, args, kwargs) except NotCachedError as e: # This will set a temporary value while ``func`` is being # processed. When using threads, this is vital, as otherwise # the function can be called several times before it finishes # and is put into the cache. funcwrapper = CacheWrapper(".".join([func.__module__, func.__name__]), inprocess=True) cache_set(e.key, value=funcwrapper, length=length, skiplog=True) value = func(*args, **kwargs) cache_set(e.key, value=value, length=length) except MethodNotFinishedError as e: value = func(*args, **kwargs) return value return inner_func return decorator
81c2574621e81c485712e456f9b34c638f47cdf8
29,796
def plot_contour_1d(X_grid, Y_grid, data, xlabel, ylabel, xticks, yticks, metric_bars, fillblack=True): """Create contour plots and return the figure and the axes.""" n = len(metric_bars) assert data.shape == (*X_grid.shape, n), ( "data shape must be (X, Y, M), where (X, Y) is the shape of both X_grid and Y_grid, " "and M is the number of metrics" ) gs_kw = dict(height_ratios=[1, 0.25]) fig, axes = plt.subplots(ncols=n, nrows=2, gridspec_kw=gs_kw) for i in range(n): metric_bar = metric_bars[i] ax = axes[0, i] plt.sca(ax) style_plot_limits(xticks, yticks) yticklabels = i == 0 xticklabels = True if fillblack: fill_plot() cp = plt.contourf(X_grid, Y_grid, data[:, :, i], levels=metric_bar.levels, cmap=metric_bar.cmap) style_axis(ax, xticks, yticks, xticklabels, yticklabels) cbar_ax = axes[1, i] plt.sca(cbar_ax) metric_bar.colorbar(fig, cbar_ax, cp, shrink=0.8, orientation='horizontal') plt.axis('off') set_axes_labels(axes[:-1], xlabel, ylabel) plt.subplots_adjust(hspace=0.1, wspace=0.1) return fig, axes
2eb534ebe841a96386976fa477ec357b5ea07ea7
29,798
def dataset_to_rasa(dataset: JsonDict) -> JsonDict: """Convert dataset to RASA format, ignoring entities See: "https://rasa.com/docs/nlu/dataformat/" """ return { "rasa_nlu_data": { "common_examples": [ {"text": text, "intent": intent, "entities": []} for intent, text in _dataset_intent_iterator(dataset) ] } }
64a9b6291b0a2d2af297e3fb0ca214b8d65792f4
29,799
import math import tqdm def SurfacePlot(Fields,save_plot,as_video=False, Freq=None, W=None, L=None, h=None, Er=None): """Plots 3D surface plot over given theta/phi range in Fields by calculating cartesian coordinate equivalent of spherical form.""" print("Processing SurfacePlot...") fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # ax = Axes3D(fig) phiSize = Fields.shape[0] # Finds the phi & theta range thetaSize = Fields.shape[1] X = np.ones((phiSize, thetaSize)) # Prepare arrays to hold the cartesian coordinate data. Y = np.ones((phiSize, thetaSize)) Z = np.ones((phiSize, thetaSize)) for phi in range(phiSize): # Iterate over all phi/theta range for theta in range(thetaSize): e = Fields[phi][theta] xe, ye, ze = sph2cart1(e, math.radians(theta), math.radians(phi)) # Calculate cartesian coordinates X[phi, theta] = xe # Store cartesian coordinates Y[phi, theta] = ye Z[phi, theta] = ze def init(): ax.plot_surface(X, Y, Z, color='b') # Plot surface plt.ylabel('Y') plt.xlabel('X') # Plot formatting if W!=None: plt.title("Patch: \nW=" + str(W) + " \nL=" + str(L) + "\nEr=" + str(Er) + " h=" + str(h) + " \n@" + str(Freq) + "Hz") return fig, def animate(i): ax.view_init(elev=10., azim=i) return fig, if save_plot!= None: # Animate init() if as_video: plt.show() print("Recording Radiation Video ...") anim = animation.FuncAnimation(fig, animate, init_func=init, frames= tqdm(range(360)), interval=20, blit=True) # Save anim.save(save_plot, fps=30, extra_args=['-vcodec', 'libx264']) else: ax.view_init(elev=10., azim=45) plt.tight_layout() plt.savefig(save_plot) plt.show() else: init() plt.show()
23b0c3f8f569d480f3818ed970ea41adce51dacc
29,800
def resource_method_wrapper(method): """ Wrap a 0-ary resource method as a generic renderer backend. >>> @resource_method_wrapper ... def func(resource): ... print repr(resource) >>> action = "abc" >>> resource = "def" >>> func(action, resource) 'def' """ def generic_renderer_backend(action, resource): return method(resource) return generic_renderer_backend
e07bd139586a7b80d48c246ea831b39c3183224e
29,801
def rotation_matrix(axis_vector, angle, degrees = True): """ Return the rotation matrix corresponding to a rotation axis and angle For more information, see: https://en.wikipedia.org/wiki/Rotation_matrix#Rotation_matrix_from_axis_and_angle Parameters ---------- axis_vector : 3 x 1 numpy array A unit vector of the axis of rotation angle : float Angle of rotation in degrees unless otherwise specified degrees : bool (optional) Choose between units of degrees of radians. Default True so degrees Returns ------- rot_mat : 3 x 3 numpy array Rotation matrix """ ang = angle if degrees: ang = np.radians(ang) # the matrix the sum of 3 terms cos_id = np.cos(ang) * np.identity(3) sin_cross = np.sin(ang) * cross_product_matrix(axis_vector) cos_tens = (1-np.cos(ang)) * np.outer(axis_vector,axis_vector) # total rot_mat = cos_id + sin_cross + cos_tens return rot_mat
c48711d2b4d2bb8ca5ac01ee2c51778f4a9525fd
29,802
def select_int(sql, *args): """ 执行一个sql, 返回一个数值 :param sql: :param args: :return: """ d = _select(sql, True, *args) if d == None: raise StandardError('Result is None') if len(d) != 1: raise MultiColumnsError('Expect only one column.') return d.values()[0]
72826727a45cfa902e710371c8bf4bc9fcd07528
29,803
import logging import torch def compute_nas_score(any_plain_net, random_structure_str, gpu, args): """ compute net score :param any_plain_net: model class :param random_structure_str (str): model string :param gpu (int): gpu index :param args (list): sys.argv :return score """ # compute network zero-shot proxy score the_model = any_plain_net(num_classes=args.num_classes, plainnet_struct=random_structure_str, no_create=False, no_reslink=True) the_model = the_model.cuda(gpu) try: if args.zero_shot_score == 'Zen': the_nas_core_info = compute_zen_score.compute_nas_score(model=the_model, gpu=gpu, resolution=args.input_image_size, mixup_gamma=args.gamma, batch_size=args.batch_size, repeat=1) the_nas_core = the_nas_core_info['avg_nas_score'] elif args.zero_shot_score == 'TE-NAS': the_nas_core = compute_te_nas_score.compute_NTK_score(model=the_model, gpu=gpu, resolution=args.input_image_size, batch_size=args.batch_size) elif args.zero_shot_score == 'Syncflow': the_nas_core = compute_syncflow_score.do_compute_nas_score(model=the_model, gpu=gpu, resolution=args.input_image_size, batch_size=args.batch_size) elif args.zero_shot_score == 'GradNorm': the_nas_core = compute_gradnorm_score.compute_nas_score(model=the_model, gpu=gpu, resolution=args.input_image_size, batch_size=args.batch_size) elif args.zero_shot_score == 'Flops': the_nas_core = the_model.get_FLOPs(args.input_image_size) elif args.zero_shot_score == 'Params': the_nas_core = the_model.get_model_size() elif args.zero_shot_score == 'Random': the_nas_core = np.random.randn() elif args.zero_shot_score == 'NASWOT': the_nas_core = compute_NASWOT_score.compute_nas_score(gpu=gpu, model=the_model, resolution=args.input_image_size, batch_size=args.batch_size) except Exception as err: # pylint: disable=broad-except logging.info(str(err)) logging.info('--- Failed structure: ') logging.info(str(the_model)) # raise err the_nas_core = -9999 _ = the_model.cpu() del the_model torch.cuda.empty_cache() return the_nas_core
6188c88151ca501c22a85cccd65bbc01d99c721f
29,805
def _dict_values_match(*args, **kwargs): """ Matcher that matches a dict where each of they keys match the matcher passed in. Similar to ``MatchesStructure``, but for dictionaries rather than python objects. """ matchers = dict(*args, **kwargs) def extract_val(key): def extract_val_for_key(d): return d.get(key) return extract_val_for_key return MatchesAll(*list(AfterPreprocessing(extract_val(key), value) for key, value in matchers.iteritems()))
b463eb1f24117fb1c37793656632931af05f3a7c
29,806
def fib_lista(n): """ Função que retorna uma lista contendo os números da sequência de Fibonacci até o número n. """ lista = [] i, j = 0, 1 while i < n: lista.append(i) i, j = j, i + j return lista
ec307ce80ae70e5fba81d2e26b140f1b86c95619
29,807
def make_item_accessor(idx): """ Returns a property that mirrors access to the idx-th value of an object. """ @property def attr(self): return self[idx] @attr.setter def attr(self, value): self[idx] = value return attr
7cd1248b3f9402fc9be10d277dee849dc47840c0
29,810
def calc_correlation(data, data2): """ Calculate the correlations between 2 DataFrames(). Parameters: - data: The first dataframe. - data2: The second dataframe. Returns: A Series() object. """ return ( data.corrwith(data2). loc[lambda x: x.notnull()] )
7f47592a4525efa9db2fba317d095448d5288399
29,811
def boschloo_swap(c1r1: int, c2r1: int, c1r2: int, c2r2: int) -> (int, int, int, int): """ Four contingency tables always give the same pvalue: ['abcd', 'badc', 'cdab', 'dcba'] Compute and save only one version. """ if c1r1 + c1r2 > c2r1 + c2r2: # left > right c1r1, c1r2, c2r1, c2r2 = c2r1, c2r2, c1r1, c1r2 if c1r1 + c2r1 > c1r2 + c2r2: # left > right c1r1, c2r1, c1r2, c2r2 = c1r2, c2r2, c1r1, c2r1 return c1r1, c2r1, c1r2, c2r2
4da7cccd892dcf03412509c4df79132f8ebd5ad1
29,812
def get_unit_scale(scale60, val30=1): """ Returns a function to be used in the UNIT_SCALE of a descriptor that will change the scale depending on if the 60fps flag is set or not. """ assert 0 not in (val30, scale60), ("60fps scale and default 30fps " + "value must both be non-zero.") val60 = val30*scale60 key = (val30, val60) if key in _func_unit_scales: return _func_unit_scales[key] def unit_scale(*args, **kwargs): w = kwargs.get('f_widget') if kwargs.get('get_scales'): # used for getting both the 30 and 60 fps scales return (val30, val60) try: if w.tag_window.save_as_60: return val60 except AttributeError: return val30 _func_unit_scales[key] = unit_scale unit_scale.fps_60_scale = True return unit_scale
36463d8e7d0cf46bce527cf9cefccdcf730b4414
29,813
def sample_ingredient(user,name = 'cinnoan'): """create and return a sample ingredient""" return Ingredient.objects.create(user=user,name=name)
3ccf096e68ed25dc4c35cf2abf68e9139c34b82c
29,814
import six def before(action): """Decorator to execute the given action function *before* the responder. Args: action: A function with a similar signature to a resource responder method, taking (req, resp, params), where params includes values for URI template field names, if any. Hooks may also add pseudo-params of their own. For example: def do_something(req, resp, params): try: params['id'] = int(params['id']) except ValueError: raise falcon.HTTPBadRequest('Invalid ID', 'ID was not valid.') params['answer'] = 42 """ def _before(responder_or_resource): if isinstance(responder_or_resource, six.class_types): resource = responder_or_resource for method in HTTP_METHODS: responder_name = 'on_' + method.lower() try: responder = getattr(resource, responder_name) except AttributeError: # resource does not implement this method pass else: # Usually expect a method, but any callable will do if hasattr(responder, '__call__'): # This pattern is necessary to capture the current # value of responder in the do_before_all closure; # otherwise, they will capture the same responder # variable that is shared between iterations of the # for loop, above. def let(responder=responder): @wraps(responder) def do_before_all(self, req, resp, **kwargs): action(req, resp, kwargs) responder(self, req, resp, **kwargs) api_helpers._propagate_argspec( do_before_all, responder) setattr(resource, responder_name, do_before_all) let() return resource else: responder = responder_or_resource @wraps(responder) def do_before_one(self, req, resp, **kwargs): action(req, resp, kwargs) responder(self, req, resp, **kwargs) api_helpers._propagate_argspec(do_before_one, responder) return do_before_one return _before
d385f6b9ab45546cd2c07612528ad487ae5363d9
29,815
def commandLine(Argv): """ Method converting a list of arguments/parameter in a command line format (to include in the execution of a program for exemple). list --> str """ assert type(Argv) is list, "The argument of this method are the arguments to convert in the command line format. (type List)" commandLine = '' for i in Argv[1::]: commandLine += i+" " return(commandLine)
4b27e73fd43ec914f75c22f2482271aafd0848ac
29,816
def prob(n: int, p: float) -> float: """ Parameters: - n (int): số lần thực hiện phép thử - p (float): xác suất phép thử thành công Returns: - float: xác suất hình học """ pr = p * (1 - p) ** (n - 1) return pr
fca3fab45ec852c8910619889ac19b0753f5b498
29,818
from typing import Mapping import pandas import numpy def to_overall_gpu_process_df(gpu_stats: Mapping) -> DataFrame: """ """ resulta = [] columns = [] for k2, v2 in gpu_stats.items(): device_info = v2["devices"] for device_i in device_info: processes = device_i["processes"] if len(processes) > 0: columns = list(processes[0].keys()) df = pandas.DataFrame(data=processes) df["machine"] = [k2] * len(processes) resulta.append(df) out_df = pandas.concat(resulta, sort=False) out_df.sort_values(by="used_gpu_mem", axis=0, ascending=False, inplace=True) if len(out_df) == 0: return pandas.DataFrame() k = columns idx = ["machine", *k] out_df = out_df[idx] out_df.create_time = out_df.create_time.map(timestamp_to_datetime) for c in INT_COLUMNS: out_df[c] = out_df[c].astype(int) for c in PERCENT_COLUMNS: out_df[c] = numpy.round(out_df[c], 2) for c in MB_COLUMNS: out_df[c] = numpy.round(out_df[c] // MB_DIVISOR, 2) out_cols = [c for c in out_df.columns if c not in DROP_COLUMNS] out_df = out_df[out_cols] return out_df
7fd260b1a0d232f42958d3c073300ad6e7098c2c
29,819
from teospy import liq5_f03 def genliq5(): """Generate liq5_f03 Testers. """ funs = liq5_f03.liq_g args1 = (300.,1e5) fargs = [(der+args1) for der in _DERS2] refs = [-5265.05056073,-393.062597709,0.100345554745e-2,-13.9354762020, 0.275754520492e-6,-0.452067557155e-12] fnames = 'liq_g' argfmt = '({0:3d},{1:3d})' header = 'Feistel 2003 g derivatives' testliq5_1 = Tester(funs,fargs,refs,fnames,argfmt,header=header) funs = [liq5_f03.cp,liq5_f03.density,liq5_f03.expansion,liq5_f03.kappa_t, liq5_f03.soundspeed] fargs = args1 refs = [4180.64286060,996.556352243,0.274804919056e-3,0.450510795725e-9, 1501.52808421] fnames = ['cp','density','expansion','kappa_t','soundspeed'] argfmt = '({0:3g},{1:3g})' header = 'Feistel 2003 functions' testliq5_2 = Tester(funs,fargs,refs,fnames,argfmt,header=header) return (testliq5_1,testliq5_2)
4051aaa831bcfb74a5e871e1619b982ad06cc859
29,820
def tridiagonalize_by_lanczos(P, m, k): """ Tridiagonalize matrix by lanczos method Parameters ---------- P : numpy array Target matrix q : numpy array Initial vector k : int Size of the tridiagonal matrix Returns ------- T : numpy array tridiagonal matrix """ # Initialize variables T = np.zeros((k, k)) r0 = m beta0 = 1 q0 = np.zeros(m.shape) for i in range(k): q1 = r0 / beta0 C = np.dot(P, q1) alpha1 = np.dot(q1, C) r1 = C - alpha1 * q1 - beta0 * q0 beta1 = np.linalg.norm(r1) T[i, i] = alpha1 if i + 1 < k: T[i, i + 1] = beta1 T[i + 1, i] = beta1 q0 = q1 beta0 = beta1 r0 = r1 return T
0becf3801e7e486fd0a59fac95223e4d9ca68957
29,822
def Polygon(xpoints, ypoints, name="", visible=True, strfmt="{:.5f}"): """ Polygon defined by point verticies. Returns --------- :class:`lxml.etree.Element` """ polygon = Element("Polygon", name=str(name), visible=str(visible).lower()) polygon.extend( [ Element("Point", x=strfmt.format(x), y=strfmt.format(y)) for x, y in zip(xpoints, ypoints) ] ) return polygon
f2cd966a0dd536b8134ccaab4a85607e8511c60c
29,823
def fake_index_by_name(name, pattern, timezone='+08:00'): """ generate a fake index name for index template matching ATTENTION: - rollover postfix is not supported in index template pattern - timezone is not supported cause tz is not well supported in python 2.x """ if pattern == 'YYYY.MM.dd': return '%s-%s-000001' % (name, now.strftime('%Y.%m.%d')) elif pattern == 'YYYY.MM': return '%s-%s-000001' % (name, now.strftime('%Y.%m')) elif pattern == 'YYYY': return '%s-%s-000001' % (name, now.strftime('%Y.%m')) else: return '%s-000001' % name
b8754ce0086409c75edba8d6bc14b7ca313d56ae
29,824
def map_code(func): """ Map v to an Ontology code """ def mapper(v): if v is None: return v else: return func(str(v)) return mapper
76eb3c6756c983fd73c180b57c1c998a348d32eb
29,825
def install_dvwa(instance, verbose: bool=True): """ Install and configure DVWA web server instance (object): This argmument define the lxc instance. verbose (bool, optional): This argument define if the function prompt some informations during his execution. Default to True. """ if update(instance, verbose=False): return 1 if install(instance, {"module":"nmap"}, verbose=False): return 1 if install(instance, {"module":"apache2"}, verbose=False): return 1 if install(instance, {"module":"mysql-server"}, verbose=False): return 1 if install(instance, {"module":"php"}, verbose=False): return 1 if install(instance, {"module":"php7.2-mysql"}, verbose=False): return 1 if install(instance, {"module":"php-gd"}, verbose=False): return 1 if install(instance, {"module":"libapache2-mod-php"}, verbose=False): return 1 if install(instance, {"module":"git"}, verbose=False): return 1 if delete_file(instance, {"instance_path":"/var/www/html/index.html"}, verbose=False): return 1 git_clone(instance, {"branch":"","repository":"https://github.com/ethicalhack3r/DVWA","instance_path":"/var/www/html/"}, verbose=False) result = execute_command(instance, {"command":["cp", "/var/www/html/config/config.inc.php.dist", "/var/www/html/config/config.inc.php"], "expected_exit_code":"0"}, verbose=False) if result.exit_code == 0: execute_command(instance, {"command":["mysql", "-e", "create database dvwa;"], "expected_exit_code":"0"}, verbose=False) execute_command(instance, {"command":["mysql", "-e", "create user dvwa@localhost identified by 'p@ssw0rd';"], "expected_exit_code":"0"}, verbose=False) execute_command(instance, {"command":["mysql", "-e", "grant all on dvwa.* to dvwa@localhost;"], "expected_exit_code":"0"}, verbose=False) execute_command(instance, {"command":["mysql", "-e", "flush privileges;"], "expected_exit_code":"0"}, verbose=False) if result.exit_code == 0: result = execute_command(instance, {"command":["chmod", "a+w", "/var/www/html/hackable/uploads/"], "expected_exit_code":"0"}, verbose=False) if restart_service(instance, {"service":"apache2"}, verbose=False): return 1 if restart_service(instance, {"service":"mysql"}, verbose=False): return 1 if result.exit_code == 0: if verbose: print(Fore.GREEN + " Config file for dvwa is up" + Style.RESET_ALL) return 0 print(Fore.RED + " Error while changing folder rights in dvwa "+" ["+result.stderr+"]" + Style.RESET_ALL) return 1 print(Fore.RED + " Error during configuration of SQL in dvwa "+" ["+result.stderr+"]" + Style.RESET_ALL) return 1 print(Fore.RED + " Error while copying config file of dvwa "+" ["+result.stderr+"]" + Style.RESET_ALL) return 1
299bddddc06364abfe34c482fa12932f893a224c
29,826
def get(role_arn, principal_arn, assertion, duration): """Use the assertion to get an AWS STS token using Assume Role with SAML""" # We must use a session with a govcloud region for govcloud accounts if role_arn.split(':')[1] == 'aws-us-gov': session = boto3.session.Session(region_name='us-gov-west-1') client = session.client('sts') else: client = boto3.client('sts') token = client.assume_role_with_saml( RoleArn=role_arn, PrincipalArn=principal_arn, DurationSeconds=(duration), SAMLAssertion=assertion) return token
f1012c71eff41bffdad6390b9353745b0e07ab0c
29,828
def find_nth(s, x, n): """ find the nth occurence in a string takes string where to search, substring, nth-occurence """ i = -1 for _ in range(n): i = s.find(x, i + len(x)) if i == -1: break return i
b54998db817272ec534e022a9f04ec8d350b08fb
29,830
from typing import Union from typing import Dict from typing import Any def parse_buffer(value: Union[Dict[str, Any], str]) -> str: """Parse value from a buffer data type.""" if isinstance(value, dict): return parse_buffer_from_dict(value) if is_json_string(value): return parse_buffer_from_json(value) return value
e98cad3020fffdaef5ad71d1f59b89db83e05d03
29,831