content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from datetime import datetime def get_time(sec_scale): """time since epoch in milisecond """ if sec_scale == 'sec': scale = 0 elif sec_scale == 'msec': scale = 3 else: raise secs = (datetime.utcnow() - datetime.utcfromtimestamp(0)).total_seconds() return int(secs * pow(10, scale))
c233133d61c6347a27186ef3baf0ae2bc79cf8f2
15,500
import urllib import json def get_json(url): """ Function that retrieves a json from a given url. :return: the json that was received """ with urllib.request.urlopen(url) as response: data = response.readall().decode('utf-8') data = json.loads(data) return data
3164bb7d1adc40e3dcd07e82ec734807f3a17abc
15,501
def _defaultChangeProvider(variables,wf): """ by default we just forword the message to the change provider """ return variables
5087dc06e0da1f3270b28e9ab1bd2241ed4b4de4
15,502
def evaluate_surface_derivatives(surface, params, order=1): """ """ if surface.rational: control_points = np.array(surface.weighted_control_points) else: control_points = np.array(surface.control_points) degree_u, degree_v = surface.degree knot_vector_u, knot_vector_v = surface.knot_vector count_u, count_v = surface.count params_u = [p[0] for p in params] params_v = [p[1] for p in params] spans_u = find_spans(knot_vector_u, count_u, params_u) bases_u = basis_functions_derivatives(degree_u, knot_vector_u, spans_u, params_u, order) spans_v = find_spans(knot_vector_v, count_v, params_v) bases_v = basis_functions_derivatives(degree_v, knot_vector_v, spans_v, params_v, order) dv = min(degree_v, order) derivatives = [] for span_u, basis_u, span_v, basis_v in zip(spans_u, bases_u, spans_v, bases_v): b = control_points[span_u - degree_u:span_u + 1, span_v - degree_v:span_v + 1] temp = np.dot(b.T, np.array(basis_u).T).T dd = min(order, dv) SKL = np.dot(np.array(basis_v[:dd + 1]), temp[:degree_v + 1]).transpose(1, 0, 2) derivatives.append(SKL) if not surface.rational: return np.array(derivatives) else: # TODO: numpify this! D = [] for SKLw in derivatives: dimension = 4 SKL = [[[0.0 for _ in range(dimension)] for _ in range(order + 1)] for _ in range(order + 1)] for k in range(0, order + 1): for l in range(0, order + 1): # noqa E741 v = list(SKLw[k, l])[:] for j in range(1, l + 1): v[:] = [tmp - (binomial_coefficient(l, j) * SKLw[0][j][-1] * drv) for tmp, drv in zip(v, SKL[k][l - j])] for i in range(1, k + 1): v[:] = [tmp - (binomial_coefficient(k, i) * SKLw[i][0][-1] * drv) for tmp, drv in zip(v, SKL[k - i][l])] v2 = [0.0 for _ in range(dimension - 1)] for j in range(1, l + 1): v2[:] = [tmp + (binomial_coefficient(l, j) * SKLw[i][j][-1] * drv) for tmp, drv in zip(v2, SKL[k - i][l - j])] v[:] = [tmp - (binomial_coefficient(k, i) * tmp2) for tmp, tmp2 in zip(v, v2)] SKL[k][l][:] = [tmp / SKLw[0][0][-1] for tmp in v[0:(dimension - 1)]] D.append(SKL) return np.array(D)
669a20bf23f96720f2a2eae917652723c0aa3fe5
15,503
def GPPrediction(y_train, X_train, T_train, eqid_train, sid_train = None, lid_train = None, X_new = None, T_new = None, eqid_new = None, sid_new = None, lid_new = None, dc_0 = 0., Tid_list = None, Hyp_list = None, phi_0 = None, tau_0 = None, sigma_s = None, sigma_e = None): """ Make ground motion predictions at new locations conditioned on the training data Parameters ---------- y_train : np.array(n_train_pt) Array with ground-motion observations associated with training data X_train : np.array(n_train_pt, n_dim) Design matrix for training data. T_train : np.array(n_train_pt, 2x n_coor) Coordinates matrix for training data. eqid_train : np.array(n_train_pt) Earthquake IDs for training data. sid_train : np.array(n_train_pt), optional Station IDs for training data. The default is None. lid_train : np.array(n_train_pt), optional Source IDs for training data. The default is None. X_new : np.array(n_new_pt, n_dim), optional Desing matrix for predictions. The default is None. T_new : np.array(n_new_pt, 2 x n_coor), optional Coordinate matrix for predictions. The default is None. eqid_new : np.array(n_new_pt), optional Earthquake IDs for predictions. The default is None. sid_new : np.array(n_new_pt), optional Station IDs for predictions. The default is None. lid_new : np.array(n_new_pt), optional Source IDs for predictions. The default is None. dc_0 : float, optional Mean offset. The default is zero. Tid_list : n_dim list List to specify the coordinate pair or each dimension. Hyp_list : TYPE, optional List of hyper-parameters for each dimension of the covariance fuction. phi_0 : double Within-event standard deviation. tau_0 : double Between-event standard deviation. sigma_s : double, optional Standard deviation for zero correlation site-to-site term. The default is None. sigma_e : double, optional Standard deviation for zero correlation source-to-source term. The default is None. Returns ------- np.array(n_new_pt) median estimate of new predictions. np.array(n_new_pt, n_new_pt) epistemic uncertainty of new predictions. """ #import pdb; pdb.set_trace() #remove mean offset from conditioning data y_train = y_train - dc_0 #number of grid nodes n_pt_train = X_train.shape[0] n_pt_new = X_new.shape[0] #initialize covariance matrices cov_data = np.zeros([n_pt_train,n_pt_train]) cov_star = np.zeros([n_pt_new,n_pt_train]) cov_star2 = np.zeros([n_pt_new,n_pt_new]) #create covariance matrices for k, (hyp, tid) in enumerate(zip(Hyp_list,Tid_list)): #covariance between train data cov_data += CreateCovMaternDimX(X_train[:,k], X_train[:,k], T_train[tid], T_train[tid], hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2], delta = 1e-6) #covariance between train data and predictions cov_star += CreateCovMaternDimX(X_new[:,k], X_train[:,k], T_new[tid], T_train[tid], hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2], delta = 0) #covariance between prediction data cov_star2 += CreateCovMaternDimX(X_new[:,k], X_new[:,k], T_new[tid], T_new[tid], hyp_ell = hyp[0], hyp_omega = hyp[1], hyp_pi = hyp[2], delta = 1e-6) #add site to site systematic effects if sigma_s is specified if not (sigma_s is None): assert(not(sid_train is None)), 'Error site id for training data not specified' cov_data += CreateCovS2S(sid_train, sid_train, sigma_s, delta = 1e-6) #add source to source systematic effects if phi_L2L is specified if not (sigma_e is None): assert(not(lid_train is None)), 'Error location id for training data not specified' cov_data += CreateCovL2L(lid_train, lid_train, sigma_e, delta = 1e-6) #add between and within event covariance matrices cov_data += CreateCovWe(eqid_train, eqid_train, phi_0) cov_data += CreateCovBe(eqid_train, eqid_train, tau_0, delta = 1e-6) #consider site to site systematic effects in predictions if sigma_s is specified if not ( (sigma_s is None) or (sid_new is None)): cov_star2 += CreateCovS2S(sid_new, sid_new, sigma_s, delta = 1e-6) cov_star += CreateCovS2S(sid_new, sid_train, sigma_s) #consider site to site systematic effects in predictions if sigma_s is specified if not ( (sigma_e is None) or (lid_new is None)): cov_star2 += CreateCovL2L(lid_new, lid_new, sigma_e, delta = 1e-6) cov_star += CreateCovL2L(lid_new, lid_train, sigma_e) #consider earthquake aleatory terms if eqid_new is specified if not (eqid_new is None): cov_star2 += CreateCovBe(eqid_new, eqid_new, tau_0, delta = 1e-6) cov_star += CreateCovBe(eqid_new, eqid_train, tau_0) #posterior mean and variance at new locations y_new_mu = cov_star.dot(linalg.solve(cov_data, y_train)) #add mean offset to new predictions y_new_mu = y_new_mu + dc_0 y_new_cov = cov_star2 - cov_star.dot(linalg.solve(cov_data, cov_star.transpose())) #posterior standard dev. at new locations y_new_sig = np.sqrt(np.diag(y_new_cov)) return y_new_mu.flatten(), y_new_sig.flatten(), y_new_cov
28bdf5575f16d1ee3a719aaadc59eefda642171d
15,504
def zdotu(x, y): """ This function computes the complex scalar product \M{x^T y} for the vectors x and y, returning the result. """ return _gslwrap.gsl_blas_zdotu(x, y, 1j)
135b5196568454dc0c721ab42cdd13d4bed63c5c
15,505
def music21_to_chord_duration(p, key): """ Takes in a Music21 score, and outputs three lists List for chords (by primeFormString string name) List for chord function (by romanNumeralFromChord .romanNumeral) List for durations """ p_chords = p.chordify() p_chords_o = p_chords.flat.getElementsByClass('Chord') chord_list = [] chord_function_list = [] duration_list = [] for ch in p_chords_o: duration_list.append(ch.duration.quarterLength) ch.closedPosition(forceOctave=4, inPlace=True) rn = roman.romanNumeralFromChord(ch, key) rp = rn.pitches rp_names = ",".join([pi.name + pi.unicodeNameWithOctave[-1] for pi in rp]) chord_list.append(rp_names) chord_function_list.append(rn.figure) return chord_list, chord_function_list, duration_list
142a0ef06c5c9542097cc7db0631a1f19e2f8f72
15,506
def city_country(city, country, population=''): """Generate a neatly formatted city/country name.""" full_name = city + ', ' + country if population: return full_name.title() + ' - population ' + str(population) else: return full_name.title()
23be8d5b39380fd177240e479cf77ac7eb6c7459
15,507
def generate_headermap(line,startswith="Chr", sep="\t"): """ >>> line = "Chr\\tStart\\tEnd\\tRef\\tAlt\\tFunc.refGene\\tGene.refGene\\tGeneDetail.refGene\\tExonicFunc.refGene\\tAAChange.refGene\\tsnp138\\tsnp138NonFlagged\\tesp6500siv2_ea\\tcosmic70\\tclinvar_20150629\\tOtherinfo" >>> generate_headermap(line) {'Chr': 0, 'Start': 1, 'End': 2, 'Ref': 3, 'Alt': 4, 'Func.refGene': 5, 'Gene.refGene': 6, 'GeneDetail.refGene': 7, 'ExonicFunc.refGene': 8, 'AAChange.refGene': 9, 'snp138': 10, 'snp138NonFlagged': 11, 'esp6500siv2_ea': 12, 'cosmic70': 13, 'clinvar_20150629': 14, 'Otherinfo': 15} """ if not line.startswith(startswith): raise Exception("Header line should start with \"{0}\"".format(startswith)) else: if line.startswith("#"): line = line[1:] return dict([(v, i) for i,v in enumerate(line.rstrip().split(sep))])
16bbbc07fa13ff9bc8ec7af1aafc4ed65b20ec4c
15,508
def max(q): """Return the maximum of an array or maximum along an axis. Parameters ---------- q : array_like Input data Returns ------- array_like Maximum of an array or maximum along an axis """ if isphysicalquantity(q): return q.__class__(np.max(q.value), q.unit) else: return np.max(q)
0a3cfae6fb9d1d26913817fcc11765214baa8dff
15,509
from typing import OrderedDict def make_failure_log(conclusion_pred, premise_preds, conclusion, premises, coq_output_lines=None): """ Produces a dictionary with the following structure: {"unproved sub-goal" : "sub-goal_predicate", "matching premises" : ["premise1", "premise2", ...], "raw sub-goal" : "conclusion", "raw premises" : ["raw premise1", "raw premise2", ...]} Raw sub-goal and raw premises are the coq lines with the premise internal name and its predicates. E.g. H : premise (Acc x1) Note that this function is not capable of returning all unproved sub-goals in coq's stack. We only return the top unproved sub-goal. """ failure_log = OrderedDict() conclusion_base = denormalize_token(conclusion_pred) # failure_log["unproved sub-goal"] = conclusion_base premises_base = [denormalize_token(p) for p in premise_preds] # failure_log["matching premises"] = premises_base # failure_log["raw sub-goal"] = conclusion # failure_log["raw premises"] = premises premise_preds = [] for p in premises: try: pred = p.split()[2] except: continue if pred.startswith('_'): premise_preds.append(denormalize_token(pred)) failure_log["all_premises"] = premise_preds failure_log["other_sub-goals"] = get_subgoals_from_coq_output( coq_output_lines, premises) failure_log["other_sub-goals"].insert(0, { 'subgoal': conclusion_base, 'index': 1, 'raw_subgoal': conclusion, 'matching_premises' : premises_base, 'matching_raw_premises' : premises_base}) failure_log["type_error"] = has_type_error(coq_output_lines) failure_log["open_formula"] = has_open_formula(coq_output_lines) return failure_log
17f7cb8b6867849e034d72f05e9e48622bd35b7d
15,510
import requests def request(url=None, json=None, parser=lambda x: x, encoding=None, **kwargs): """ :param url: :param json: :param parser: None 的时候返回r,否则返回 parser(r.json()) :param kwargs: :return: """ method = 'post' if json is not None else 'get' # 特殊情况除外 logger.info(f"Request Method: {method}") headers = { 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) ' 'Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE ' } r = requests.request(method, url, json=json, headers=headers) r.encoding = encoding if encoding else r.apparent_encoding if parser is None: return r return parser(r.json())
c222cc2a5c1e2acb457600d223c9ca6ab588aa5e
15,511
import math def log_density_igaussian(z, z_var): """Calculate log density of zero-mean isotropic gaussian distribution given z and z_var.""" assert z.ndimension() == 2 assert z_var > 0 z_dim = z.size(1) return -(z_dim/2)*math.log(2*math.pi*z_var) + z.pow(2).sum(1).div(-2*z_var)
a412b9e25aecfc2baed2d783a2d7cd281fadc9fb
15,512
def denom(r,E,J,model): """solve the denominator""" ur = model.potcurve(r)#model.potcurve[ (abs(r-model.rcurve)).argmin()] return 2.0*(E-ur)*r*r - J*J;
19dc7c5cd283b66f834ba9a0d84fb396ca2c2c89
15,513
def approximate_gaussians(confidence_array, mean_array, variance_array): """ Approximate gaussians with given parameters with one gaussian. Approximation is performed via minimization of Kullback-Leibler divergence KL(sum_{j} w_j N_{mu_j, sigma_j} || N_{mu, sigma}). Parameters ---------- confidence_array : ndarray(num_gaussians) confidence values for gaussians. mean_array : ndarray(num_gaussians, 3) (z,y,x) mean values for input gaussians. variance_array : ndarray(num_gaussians) (z,y,x) variances for input gaussians. Returns ------- tuple(ndarray(3), ndarray(3)) mean and sigma for covering gaussian. """ delimiter = np.sum(confidence_array) mu = np.sum(mean_array.T * confidence_array, axis=1) / delimiter sigma = np.sqrt(np.sum((variance_array + (mean_array - mu) ** 2).T * confidence_array, axis=1) / delimiter) return mu, sigma
7c722f0153e46631b3c4731d8a307e0b219be02b
15,514
from typing import Callable import types def return_loss(apply_fn: Callable[[jnp.ndarray, jnp.ndarray], jnp.ndarray], steps: types.Transition): """Loss wrapper for ReturnMapper. Args: apply_fn: applies a transition model (o_t, a_t) -> (o_t+1, r), expects the leading axis to index the batch and the second axis to index the transition triplet (t-1, t, t+1). steps: RLDS dictionary of transition triplets as prepared by `rlds_loader.episode_to_timestep_batch`. Returns: A scalar loss value as jnp.ndarray. """ observation_t = jax.tree_map(lambda obs: obs[:, dataset.CURRENT, ...], steps.observation) action_t = steps.action[:, dataset.CURRENT, ...] n_step_return_t = steps.extras[dataset.N_STEP_RETURN][:, dataset.CURRENT, ...] predicted_n_step_return_t = apply_fn(observation_t, action_t) return mse(predicted_n_step_return_t, n_step_return_t)
970cb6623436982ef1359b1328edcb828012f1f7
15,515
def part_two(data): """Part two""" array = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p'] commands = data.split(',') for _ in range(1000000000 % 30): dance(array, commands) return ''.join(map(str, array))
75e847cd5a598aa67ca54133c15a0c2c3fc67433
15,516
import os import json import requests def ts_declare(): """Makes an f5-telemetry-streaming declaration from the supplied metadata""" if is_rest_worker('/mgmt/shared/telemetry/declare') and os.path.isfile( TS_DECLARATION_FILE): tsdf = open(TS_DECLARATION_FILE, 'r') declaration = tsdf.read() tsdf.close() json.loads(declaration) d_url = 'http://localhost:8100/mgmt/shared/telemetry/declare' LOG.debug('POST f5-telemetry-streaming declaration') response = requests.post(d_url, auth=('admin', ''), data=declaration) # initial request if response.status_code < 400: return True LOG.error('f5-telemetry-streaming declaration failed %s - %s', response.status_code, response.text) return False LOG.error( 'f5-telemetry-streaming worker not installed or declaration missing') return False
ea52512201450ed35ba7a902edf15bbe12b748de
15,517
def readCSVPremadeGroups(filename, studentProperties=None): """studentProperties is a list of student properties in the order they appear in the CSV. For example, if a CSV row (each group is a row) is as follows: "Rowan Wilson, [email protected], 1579348, Bob Tilano, [email protected], 57387294" Then the format is: fullname, email, huid, fullname, email, huid, ... Thus, studentProperties = ['fullname', 'email', 'huid'] """ csv = _readCSV(filename) # Create studentProperties if needed if studentProperties == None: studentProperties = [] firstHeader = None for header in csv['headers']: header = _keepOnlyLetters(header).lower() if firstHeader == header: # Found beginning of repeating sequence break if firstHeader == None: firstHeader = header studentProperties.append(header) # Pull groups from CSV data groups = [] for row in csv['data']: students = [] currStudent = None for i in range(len(row)): if len(row[i].strip()) == 0: break propIndex = i % len(studentProperties) if propIndex == 0: # Just starting a new student currStudent = {} else: currStudent[studentProperties[propIndex]] = row[i] if propIndex == len(studentProperties) - 1: # Just finished adding properties to a student students.append(currStudent) if len(students) > 0: groups.append(students) return groups
f12de287b4a9f19e2e29302338f7233e34d54f0c
15,518
def random_contrast(video, lower, upper, seed=None): """Adjust the contrast of an image or images by a random factor. Equivalent to `adjust_contrast()` but uses a `contrast_factor` randomly picked in the interval `[lower, upper)`. For producing deterministic results given a `seed` value, use `tf.image.stateless_random_contrast`. Unlike using the `seed` param with `tf.image.random_*` ops, `tf.image.stateless_random_*` ops guarantee the same results given the same seed independent of how many times the function is called, and independent of global seed settings (e.g. tf.random.set_seed). Args: image: An image tensor with 3 or more dimensions. lower: float. Lower bound for the random contrast factor. upper: float. Upper bound for the random contrast factor. seed: A Python integer. Used to create a random seed. See `tf.compat.v1.set_random_seed` for behavior. Usage Example: >>> x = [[[1.0, 2.0, 3.0], ... [4.0, 5.0, 6.0]], ... [[7.0, 8.0, 9.0], ... [10.0, 11.0, 12.0]]] >>> tf.image.random_contrast(x, 0.2, 0.5) <tf.Tensor: shape=(2, 2, 3), dtype=float32, numpy=...> Returns: The contrast-adjusted image(s). Raises: ValueError: if `upper <= lower` or if `lower < 0`. """ if upper <= lower: raise ValueError("upper must be > lower.") if lower < 0: raise ValueError("lower must be non-negative.") contrast_factor = tf.random.random_uniform([], lower, upper, seed=seed) return adjust_contrast(video, contrast_factor)
6ea6a72100ad468d7692c0bb8c2837cba5eaa3e0
15,519
from typing import Dict def load(df: DataFrame, config: Dict, logger) -> bool: """Write data in final destination :param df: DataFrame to save. :type df: DataFrame :param config: job configuration :type config: Dict :param logger: Py4j Logger :type logger: Py4j.Logger :return: True :rtype: bool """ df.write.save(path=config['output_path'], mode='overwrite') return True
70f962cc24f23264f23dce458c233466dc06d276
15,520
def phase_correct_zero(spec, phi): """ Correct the phases of a spectrum by phi radians Parameters ---------- spec : float array of complex dtype The spectrum to be corrected. phi : float Returns ------- spec : float array The phase corrected spectrum Notes ----- [Keeler2005] Keeler, J (2005). Understanding NMR Spectroscopy, 2nd edition. Wiley. Page 88. """ c_factor = np.exp(-1j * phi) # If it's an array, we need to reshape it and broadcast across the # frequency bands in the spectrum. Otherwise, we assume it's a scalar and # apply it to all the dimensions of the spec array: if hasattr(phi, 'shape'): c_factor = c_factor.reshape(c_factor.shape + (1,)) return spec * c_factor
1647e8f99e10ba5f715e4c268907cbf995e99335
15,521
def upsample(s, n, phase=0): """Increase sampling rate by integer factor n with included offset phase. """ return np.roll(np.kron(s, np.r_[1, np.zeros(n-1)]), phase)
997f48be57816efb11b77c258e945d3161b748be
15,522
def parse_idx_inp(idx_str): """ parse idx string """ idx_str = idx_str.strip() if idx_str.isdigit(): idxs = [int(idx_str)] if '-' in idx_str: [idx_begin, idx_end] = idx_str.split('-') idxs = list(range(int(idx_begin), int(idx_end)+1)) return idxs
2dc1282169f7534455f2a0297af6c3079192cb66
15,523
import requests def toggl_request_get(url: str, params: dict = False) -> requests.Response: """Send a GET request to specified url using toggl headers and configured auth""" headers = {"Content-Type": "application/json"} auth = (CONFIG["toggl"]["api_token"], "api_token") response = requests.get(url, headers=headers, auth=auth, params=params) return response
bd4714bb0d92dcfb1e2ef27bb3fa3c67179b8b40
15,524
def sample_point_cloud(source, target, sample_indices=[2]): """ Resamples a source point cloud at the coordinates of a target points Uses the nearest point in the target point cloud to the source point Parameters ---------- source: array Input point cloud target: array Target point cloud for sample locations sample_indices: list List of indices to sample from source. Defaults to 2 (z or height dimension) Returns ------- An array of sampled points """ sample_indices = np.array(sample_indices) tree = cKDTree(source[:, 0:2]) dist, idx = tree.query(target, n_jobs=-1) output = np.hstack( [ target, source[idx[:, None], sample_indices].reshape( (len(idx), len(sample_indices)) ), ] ) return output
b490e598e68ef175a5cf80c052f2f82fc70ac4ba
15,525
def make_satellite_gsp_pv_map(batch: Batch, example_index: int, satellite_channel_index: int): """Make a animation of the satellite, gsp and the pv data""" trace_times = [] times = batch.satellite.time[example_index] pv = batch.pv for time in times: trace_times.append( make_satellite_gsp_pv_map_one_time_value( batch=batch, example_index=example_index, satellite_channel_index=satellite_channel_index, time_value=time, ) ) frames = [] for i, traces in enumerate(trace_times): frames.append(go.Frame(data=traces, name=f"frame{i+1}")) # make slider labels = [pd.to_datetime(time.data) for time in times] sliders = make_slider(labels=labels) x = pv.x_coords[example_index][pv.x_coords[example_index] != 0].mean() y = pv.y_coords[example_index][pv.y_coords[example_index] != 0].mean() lat, lon = osgb_to_lat_lon(x=x, y=y) fig = go.Figure( data=trace_times[0], layout=go.Layout( title="Start Title", ), frames=frames, ) fig.update_layout(updatemenus=[make_buttons()]) fig.update_layout( mapbox_style="carto-positron", mapbox_zoom=8, mapbox_center={"lat": lat, "lon": lon} ) fig.update_layout(sliders=sliders) return fig
fd0e0a7543da212f368849c3686277a7c8c42a95
15,526
def raw_to_engineering_product(product, idbm): """Apply parameter raw to engineering conversion for the entire product. Parameters ---------- product : `BaseProduct` The TM product as level 0 Returns ------- `int` How many columns where calibrated. """ col_n = 0 idb_ranges = QTable(rows=[(version, range.start.as_float(), range.end.as_float()) for version, range in product.idb_versions.items()], names=["version", "obt_start", "obt_end"]) idb_ranges.sort("obt_start") idb_ranges['obt_start'][0] = SCETime.min_time().as_float() for i in range(0, len(idb_ranges)-1): idb_ranges['obt_end'][i] = idb_ranges['obt_start'][i+1] idb_ranges['obt_end'][-1] = SCETime.max_time().as_float() for col in product.data.colnames: if (not (hasattr(product.data[col], "meta") and "PCF_CURTX" in product.data[col].meta and product.data[col].meta["PCF_CURTX"] is not None and product.data[col].meta["NIXS"] is not None and hasattr(product, "idb") )): continue col_n += 1 c = 0 # clone the current column into a new column as the content might be replaced chunk wise product.data[CCN] = product.data[col] for idbversion, starttime, endtime in idb_ranges.iterrows(): idb = idbm.get_idb(idbversion) idb_time_period = np.where((starttime <= product.data['time']) & (product.data['time'] < endtime))[0] if len(idb_time_period) < 1: continue c += len(idb_time_period) calib_param = idb.get_params_for_calibration( product.service_type, product.service_subtype, (product.ssid if hasattr(product, "ssid") else None), product.data[col].meta["NIXS"], product.data[col].meta["PCF_CURTX"])[0] raw = Parameter(product.data[col].meta["NIXS"], product.data[idb_time_period][col], None) eng = apply_raw_to_engineering(raw, (calib_param, idb)) # cast the type of the column if needed if product.data[CCN].dtype != eng.engineering.dtype: product.data[CCN] = product.data[CCN].astype(eng.engineering.dtype) # set the unit if needed if hasattr(eng.engineering, "unit") and \ product.data[CCN].unit != eng.engineering.unit: meta = product.data[col].meta product.data[CCN].unit = eng.engineering.unit # restore the meta info setattr(product.data[CCN], "meta", meta) # override the data into the new column product.data[CCN][idb_time_period] = eng.engineering # replace the old column with the converted product.data[col] = product.data[CCN] product.data[col].meta = product.data[CCN].meta # delete the generic column for conversion del product.data[CCN] # delete the calibration key from meta as it is now processed del product.data[col].meta["PCF_CURTX"] if c != len(product.data): logger.warning("Not all time bins got converted to engineering" + "values due to bad idb periods." + f"\n Converted bins: {c}\ntotal bins {len(product.data)}") return col_n
aaf5a92c53bfc41a5230593b96c0de7b8ad1ba4a
15,527
def paginate(objects, page_num, per_page, max_paging_links): """ Return a paginated page for the given objects, giving it a custom ``visible_page_range`` attribute calculated from ``max_paging_links``. """ paginator = Paginator(objects, per_page) try: page_num = int(page_num) except ValueError: page_num = 1 try: objects = paginator.page(page_num) except (EmptyPage, InvalidPage): objects = paginator.page(paginator.num_pages) page_range = objects.paginator.page_range if len(page_range) > max_paging_links: start = min(objects.paginator.num_pages - max_paging_links, max(0, objects.number - (max_paging_links / 2) - 1)) page_range = page_range[start:start + max_paging_links] objects.visible_page_range = page_range return objects
cd8a7ef046a48c580ad12cfa44f1862312bb1aba
15,528
def _get_crop_frame(image, max_wiggle, tx, ty): """ Based on on the max_wiggle, determines a cropping frame. """ pic_width, pic_height = image.size wiggle_room_x = max_wiggle * .5 * pic_width wiggle_room_y = max_wiggle * .5 * pic_height cropped_width = pic_width - wiggle_room_x cropped_height = pic_height - wiggle_room_y left = int(tx * wiggle_room_x) top = int(ty * wiggle_room_y) right = left + cropped_width bottom = top + cropped_height return left, top, right, bottom
18442a97544d6c4bc4116dc43811c9fcd0d203c6
15,529
from re import U def __vigenere(s, key='virink', de=0): """维吉利亚密码""" s = str(s).replace(" ", "").upper() key = str(key).replace(" ", "").upper() res = '' i = 0 while i < len(s): j = i % len(key) k = U.index(key[j]) m = U.index(s[i]) if de: if m < k: m += 26 res += U[m - k] else: res += U[(m + k) % 26] i += 1 return res
4deadfc9fdd1cb002c2f31a1de7763b0c49dd757
15,530
def mask_unit_group(unit_group: tf.Tensor, unit_group_length: tf.Tensor, mask_value=0) -> tf.Tensor: """ Masks unit groups according to their length. Args: unit_group: A tensor of rank 3 with a sequence of unit feature vectors. unit_group_length: The length of the unit group (assumes all unit feature vectors upfront). mask_value: The mask value. Returns: A tensor of rank 3 where indices beyond unit_group_length are zero-masked. """ if unit_group_length is not None: # get rid of last dimensions with size 1 if unit_group.shape.rank - unit_group_length.shape.rank < 2: unit_group_length = tf.squeeze(unit_group_length, axis=-1) # B # mask with mask_value unit_group_mask = tf.sequence_mask( tf.cast(unit_group_length, tf.int32), maxlen=unit_group.shape[1], dtype=unit_group.dtype) # B x T unit_group_mask = tf.expand_dims(unit_group_mask, axis=-1) unit_group *= unit_group_mask if mask_value != 0: mask_value = tf.convert_to_tensor(mask_value) unit_group = tf.cast(unit_group, mask_value.dtype) unit_group_mask = tf.cast(unit_group_mask, mask_value.dtype) unit_group += (1 - unit_group_mask) * mask_value return unit_group
758028075f793bad1165d0ca8992c78cb4a1318e
15,531
def fill_session_team(team_id, session_id, dbsession=DBSESSION): """ Use the FPL API to get list of players in an FPL squad with id=team_id, then fill the session team with these players. """ # first reset the team reset_session_team(session_id, dbsession) # now query the API players = fetcher.get_fpl_team_data(get_last_finished_gameweek(), team_id) player_ids = [p["element"] for p in players] for pid in player_ids: add_session_player(pid, session_id, dbsession) team_history = fetcher.get_fpl_team_history_data()["current"] index = ( get_last_finished_gameweek() - 1 ) # as gameweek starts counting from 1 but list index starts at 0 budget = team_history[index]["value"] set_session_budget(budget, session_id) return player_ids
28118a527c009d90401b368d628725ee29e838ef
15,532
def create_map(users_info): """ This function builds an HTML map with locations of user's friends on Twitter. """ my_map = folium.Map( location=[49.818396058511645, 24.02258071000576], zoom_start=10) folium.TileLayer('cartodbdark_matter').add_to(my_map) folium.TileLayer('stamentoner').add_to(my_map) folium.TileLayer('openstreetmap').add_to(my_map) fg_friends = folium.FeatureGroup(name='Twitter Friends') for user in users_info: nickname = user[0] user_coord = user[1] fg_friends.add_child(folium.Marker(location=user_coord, popup=nickname, icon=folium.Icon(color='darkred', icon='heart'))) my_map.add_child(fg_friends) my_map.add_child(folium.LayerControl()) return my_map.get_root().render()
cfe9649101906aa295ffc9984bbed15a99c7ed46
15,533
def l2sq(x): """Sum the matrix elements squared """ return (x**2).sum()
c02ea548128dde02e4c3e70f9280f1ded539cee9
15,534
def normalize(arr, axis=None): """ Normalize a vector between 0 and 1. Parameters ---------- arr : numpy.ndarray Input array axis : integer Axis along which normalization is computed Returns ------- arr : numpy.ndarray Normalized version of the input array """ if not isinstance(arr, np.ndarray): arr = np.asarray(arr) arr = arr - np.min(arr, axis) M = np.max(arr, axis) if np.sum(np.abs(M)) > 0: arr = arr / M return arr
2c9689ee829e66bfd02db3c1c92c749ca068bd73
15,535
def successive_substitution(m, T, P, max_iter, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta, K, steps=0): """ Find K-factors by successive substitution Iterate to find a converged set of K-factors defining the gas/liquid partitioning of a mixture using successive substitution. We follow the algorithms in McCain (1990) and Michelsen and Mollerup (2007). Parameters ---------- m : ndarray, size (nc) masses of each component present in the whole mixture (gas plus liquid, kg) T : float temperature (K) P : float pressure (Pa) max_iter : int maximum number of iterations to perform. Set max_iter to np.inf if you want the algorithm to guarantee to iterate to convergenece, but beware that you may create an infinite loop. M : ndarray, size (nc) Molecular weights (kg/mol) Pc : ndarray, size (nc) Critical pressures (Pa) Tc : ndarray, size (nc) Critical temperatures (K) omega : ndarray, size (nc) Acentric factors (--) delta : ndarray, size (nc, nc) Binary interaction coefficients for the Peng-Robinson equation of state. Aij : ndarray, (15, 15) Coefficients in matrix A_ij for the group contribution method for delta_ij following Privat and Jaubert (2012) Bij : ndarray, (15, 15) Coefficients in matrix A_ij for the group contribution method for delta_ij following Privat and Jaubert (2012) delta_groups : ndarray, (nc, 15) Specification of the fractional groups for each component of the mixture for the group contribution method of Privat and Jaubert (2012) for delta_ij calc_delta : int Flag specifying whether or not to compute delta_ij (1: True, -1: False) using the group contribution method K : ndarray, size (nc) Initial guess for the partition coefficients. If K = None, this function will use initial estimates from Wilson (see Michelsen and Mollerup, 2007, page 259, equation 26) steps : int (default = 0) Number of previous iteration steps Returns ------- K : ndarray, size (nc) Final value of the K-factors beta : float Fraction of gas or liquid (--) xi : ndarray, size(2, nc) Mole fraction of each component in the mixture. Row 1 gives the values for the gas phase and Row 2 gives the values for the liquid phase (--) exit_flag : int Flag indicating how the solution finished: 1: converged in the allowable number of iterations, 0: did not converge and did not find any indication that it might be single phase, and -1: did not converge, but it looks like it might be single phase. steps : int Total number of interation steps so far Notes ----- The max_iter parameter controls how many steps of successive iteration are performed. If set to None, the iteration will continue until the tolerance criteria are reached. """ # Update the value of K using successive substitution def update_K(K): """ Evaluate the update function for finding K-factor Evaluates the new guess for K-factor following McCain (1990) p. 426, equation (15-23) as explained on p. 430 in connection with equation (15-31). This is the update equation for the successive substitution method. Parameters ---------- T, P, m_0, M, Pc, Tc, omega, delta = constant and inherited from above K : ndarray The current guess for the K-factor (--) Returns ------- K_new : ndarray New guess for K-factor """ # Get the mixture composition for the current K-factor xi, beta = gas_liq_eq(m, M, K) # Get tha gas and liquid fugacities for the current composition f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta)[0,:] f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta)[1,:] # Update K using K = (phi_liq / phi_gas) K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P)) # If the mass of any component in the mixture is zero, make sure the # K-factor is also zero. K_new[np.isnan(K_new)] = 0. # Follow what is said by Michelsen & Mollerup, at page 259, just # above equation 27: if steps==0.: moles = m / M zi = moles / np.sum(moles) if np.sum(zi*K_new) - 1. <= 0.: # Condition 4 page 252 xi[0,:] = K_new * zi / np.sum(K_new*zi) xi[1,:] = zi # Recompute fugacities of gas and liquid: # Get tha gas and liquid fugacities for the current # composition f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta)[0,:] f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta)[1,:] # Update K using K = (phi_liq / phi_gas) K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P)) K_new[np.isnan(K_new)] = 0. elif (1.-np.sum(zi/K_new))>=0.: # % Condition 5 page 252 xi[0,:] = zi xi[1,:] = (zi/K_new)/np.sum(zi/K_new) # Recompute fugacities of gas and liquid: # Get tha gas and liquid fugacities for the current # composition f_gas = dbm_f.fugacity(T, P, xi[0,:]*M, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta)[0,:] f_liq = dbm_f.fugacity(T, P, xi[1,:]*M, M, Pc, Tc, omega, delta, Aij, Bij, delta_groups, calc_delta)[1,:] # Update K using K = (phi_liq / phi_gas) K_new = (f_liq / (xi[1,:] * P)) / (f_gas / (xi[0,:] * P)) K_new[np.isnan(K_new)] = 0. # Return an updated value for the K factors return (K_new, beta) # Set up the iteration parameters tol = 1.49012e-8 # Suggested by McCain (1990) err = 1. # Iterate to find the final value of K factor using successive # substitution stop = False while err > tol and steps < max_iter and not stop: # Save the current value of K factor K_old = K # Update the estimate of K factor using the present fugacities K, beta = update_K(K) steps += 1 if steps > 3 and (beta == 0. or beta == 1.): stop = True # Compute the current error based on the squared relative error # suggested by McCain (1990) and update the iteration counter err = np.nansum((K - K_old)**2 / (K * K_old)) # Determine the exit condition if stop: # Successive subsitution thinks this is single-phase flag = -1 elif steps < max_iter: # This solution is converged flag = 1 else: # No decision has been reached flag = 0 # Update the equilibrium and return the last value of K-factor xi, beta = gas_liq_eq(m, M, K) return (K, beta, xi, flag, steps)
1ad40642f940be0d1e967bf97a62e3b754312ae9
15,536
import re def Match(context, pattern, arg=None): """Do a regular expression match against the argument""" if not arg: arg = context.node arg = Conversions.StringValue(arg) bool = re.match(pattern, arg) and boolean.true or boolean.false return bool
62007fcd4617b0dfebb1bc8857f89fa2e6075f41
15,537
def rr_rectangle(rbins, a, b): """ RR_rect(r; a, b) """ return Frr_rectangle(rbins[1:], a, b) - Frr_rectangle(rbins[:-1], a, b)
98e30791e114ce3e2f6529db92c0103d1477cd76
15,538
def update_type(title, title_new=None, description=None, col_titles_new={}): """Method creates data type Args: title (str): current type title title_new (str): new type title description (str): type description col_titles_new (dict): new column values (key - col id, value - col value) Returns: bool """ try: db = DBO(_get_dsn())._dbo_driver cnt = db.execute( 'SELECT count(*) FROM data_type WHERE title = \'{0}\''.format(title)).fetchone()[0] if (cnt == 0): raise Error('Type {0} does not exist'.format(title)) query = 'UPDATE data_type SET ' if (title_new != None): query += 'title = \'{0}\', '.format(title_new) if (description != None): query += 'description = \'{0}\', '.format(description) for key, value in col_titles_new.items(): query += 'col{0}_title = \'{1}\', '.format(key, value) query = query[:-2] query += ' WHERE title = \'{0}\''.format(title) db.execute(query) db.commit() return True except Error as ex: print(ex) db.rollback() return False
92e663d3bfd798de0367a44c4909a330ac9e4254
15,539
def api(repos_path): """Glottolog instance from shared directory for read-only tests.""" return pyglottolog.Glottolog(str(repos_path))
a941a907050300bc89f6db8b4bd33cf9725cf832
15,540
from presqt.targets.osf.utilities.utils.async_functions import run_urls_async import requests def get_all_paginated_data(url, token): """ Get all data for the requesting user. Parameters ---------- url : str URL to the current data to get token: str User's OSF token Returns ------- Data dictionary of the data points gathered up until now. """ headers = {'Authorization': 'Bearer {}'.format(token)} # Get initial data response = requests.get(url, headers=headers) if response.status_code == 200: response_json = response.json() elif response.status_code == 410: raise PresQTResponseException("The requested resource is no longer available.", status.HTTP_410_GONE) elif response.status_code == 404: raise OSFNotFoundError("Resource not found.", status.HTTP_404_NOT_FOUND) elif response.status_code == 403: raise OSFForbiddenError( "User does not have access to this resource with the token provided.", status.HTTP_403_FORBIDDEN) data = response_json['data'] meta = response_json['links']['meta'] # Calculate pagination pages if '?filter' in url or '?page' in url: # We already have all the data we need for this request return data else: page_total = get_page_total(meta['total'], meta['per_page']) url_list = ['{}?page={}'.format(url, number) for number in range(2, page_total + 1)] # Call all pagination pages asynchronously children_data = run_urls_async(url_list, headers) [data.extend(child['data']) for child in children_data] return data
cca997d479c63415b519de9cbd8ac2681abc42ed
15,541
def alaw_decode(x_a, quantization_channels, input_int=True, A=87.6): """alaw_decode(x_a, quantization_channels, input_int=True) input ----- x_a: np.array, mu-law waveform quantization_channels: int, Number of channels input_int: Bool True: convert x_mu (int) from int to float, before mu-law decode False: directly decode x_mu (float) A: float, parameter for a-law, default 87.6 output ------ x: np.array, waveform """ num = quantization_channels - 1.0 if input_int: x = x_a / num * 2 - 1.0 else: x = x_a sign = np.sign(x) x_a_abs = np.abs(x) x = x_a_abs * (1 + np.log(A)) flag = x >= 1 x[flag] = np.exp(x[flag] - 1) x = sign * x / A return x
a2e10eb590d5b7731227b96233c6b615c11d4af6
15,542
import os def incoming(ui, repo, source="default", **opts): """show new changesets found in source Show new changesets found in the specified path/URL or the default pull location. These are the changesets that would have been pulled if a pull at the time you issued this command. For remote repository, using --bundle avoids downloading the changesets twice if the incoming is followed by a pull. See pull for valid source format details. """ limit = cmdutil.loglimit(opts) source, branches = hg.parseurl(ui.expandpath(source), opts.get('branch')) other = hg.repository(cmdutil.remoteui(repo, opts), source) ui.status(_('comparing with %s\n') % url.hidepassword(source)) revs, checkout = hg.addbranchrevs(repo, other, branches, opts.get('rev')) if revs: revs = [other.lookup(rev) for rev in revs] common, incoming, rheads = repo.findcommonincoming(other, heads=revs, force=opts["force"]) if not incoming: try: os.unlink(opts["bundle"]) except: pass ui.status(_("no changes found\n")) return 1 cleanup = None try: fname = opts["bundle"] if fname or not other.local(): # create a bundle (uncompressed if other repo is not local) if revs is None and other.capable('changegroupsubset'): revs = rheads if revs is None: cg = other.changegroup(incoming, "incoming") else: cg = other.changegroupsubset(incoming, revs, 'incoming') bundletype = other.local() and "HG10BZ" or "HG10UN" fname = cleanup = changegroup.writebundle(cg, fname, bundletype) # keep written bundle? if opts["bundle"]: cleanup = None if not other.local(): # use the created uncompressed bundlerepo other = bundlerepo.bundlerepository(ui, repo.root, fname) o = other.changelog.nodesbetween(incoming, revs)[0] if opts.get('newest_first'): o.reverse() displayer = cmdutil.show_changeset(ui, other, opts) count = 0 for n in o: if limit is not None and count >= limit: break parents = [p for p in other.changelog.parents(n) if p != nullid] if opts.get('no_merges') and len(parents) == 2: continue count += 1 displayer.show(other[n]) displayer.close() finally: if hasattr(other, 'close'): other.close() if cleanup: os.unlink(cleanup)
2bc3d3d8b97d54b4f33dd70ae4ad54fb0c3e1792
15,543
def _ec_2d(X): """Function for computing the empirical Euler characteristic of a given thresholded data array. Input arguments: ================ Y : ndarray of floats The thresholded image. Ones correspond to activated regions. Output arguments: ================= ec : float The empirical Euler characteristic. """ # TODO: check for holes in the activated regions. _, ec = label(X, neighbors=None, background=0, return_num=True, connectivity=2) return ec
ac71056a73131a8fa7ee7bc372d72c1678f028bc
15,544
import dateutil def swap_year_for_time(df, inplace): """Internal implementation to swap 'year' domain to 'time' (as datetime)""" if not df.time_col == "year": raise ValueError("Time domain must be 'year' to use this method") ret = df.copy() if not inplace else df index = ret._data.index order = [v if v != "year" else "time" for v in index.names] if "subannual" in df.extra_cols: order = order.remove("subannual") time_values = zip(*[index.get_level_values(c) for c in ["year", "subannual"]]) time = list(map(dateutil.parser.parse, [f"{y}-{s}" for y, s in time_values])) index = index.droplevel(["year", "subannual"]) ret.extra_cols.remove("subannual") else: time = index.get_level_values("year") index = index.droplevel(["year"]) # add new index column, assign data and other attributes index = append_index_col(index, time, "time", order=order) ret._data.index = index ret.time_col = "time" ret._set_attributes() delattr(ret, "year") if not inplace: return ret
790e8d24e6c4d87413dfc5d6205cbb04f7acefd6
15,545
from typing import Optional from typing import List def get_orders( db: Session, skip: int = 0, limit: int = 50, moderator: str = None, owner: str = None, desc: bool = True, ) -> Optional[List[entities.Order]]: """ Get the registed orders using filters. Args: - db: the database session. - skip: the number of filtered entities to skip. - limit: the number of entities to limit the query. - moderadtor: the moderator name that create the order. - owner: the owner name that receive the order. - desc: order by request_at datetime. Returns: - the list of orders or `None` if there are no orders to return using the filter specified. """ order_by = ( entities.Order.requested_at.desc() if desc else entities.Order.requested_at.asc() ) query = db.query(entities.Order).order_by(order_by) if moderator: query = query.filter_by(mod_display_name=moderator) if owner: query = query.filter_by(owner_display_name=owner) return query.offset(skip).limit(limit).all()
a5c86ccaad8573bc641f531751370c264baa60f8
15,546
def create_data_loader(img_dir, info_csv_path, batch_size): """Returns a data loader for the model.""" img_transform = transforms.Compose([transforms.Resize((120, 120), interpolation=Image.BICUBIC), transforms.ToTensor()]) img_dataset = FashionDataset(img_dir, img_transform, info_csv_path) data_loader = DataLoader(img_dataset, batch_size=batch_size, shuffle=True, num_workers=12, pin_memory=True) return data_loader
1a7df2b691c66ef5957113d5113353f34bf8c855
15,547
def comp_number_phase_eq(self): """Compute the equivalent number of phase Parameters ---------- self : LamSquirrelCage A LamSquirrelCage object Returns ------- qb: float Zs/p """ return self.slot.Zs / float(self.winding.p)
f4679cf92dffff138a5a96787244a984a11896f9
15,548
import sympy def exprOps(expr): """This operation estimation is not handling some simple optimizations that should be done (i.e. y-x is treated as -1*x+y) and it is overestimating multiplications in situations such as divisions. This is as a result of the simple method of implementing this function given the rudimentary form of the expression tree. It should only be overestimating the number of operations though, so it is a viable way to see how much optimization is improving the computational load of the generated Kalman filters""" ops = OperationCounts() if isinstance(expr, sympy.Symbol) or isinstance(expr, sympy.Number): #print('--> {}'.format(expr)) ops.reads += 1 else: func = expr.func num = len(expr.args) - 1 #print('--> ({}, {})'.format(func, expr.args)) process = True if func == sympy.Add: ops.addsubs += num elif func == sympy.Mul: ops.mults += num elif func == sympy.Pow: if expr.args[1] == -1: ops.divs += 1 process = False elif expr.args[1] > 0: ops.mults += int(expr.args[1].evalf()-1) process = False else: print('Error: Unknown how to map expression {} to operation counts'.format(expr)) else: print('Unknown function {}'.format(func)) if process: for arg in expr.args: o = exprOps(arg) ops += o return ops
b6255707ef7475c893d9325358e7666b95c0e7c8
15,549
def ellipsis_reformat(source: str) -> str: """ Move ellipses (``...``) for type stubs onto the end of the stub definition. Before: .. code-block:: python def foo(value: str) -> int: ... After: .. code-block:: python def foo(value: str) -> int: ... :param source: The source to reformat. :return: The reformatted source. """ if "..." not in source: return source return EllipsisRewriter(source).rewrite()
162d9d863f7316bee87a04857366a7f78f68d75b
15,550
def build_wtk_filepath(region, year, resolution=None): """ A utility for building WIND Toolkit filepaths. Args: region (str): region in which the lat/lon point is located (see `get_regions`) year (int): year to be accessed (see `get_regions`) resolution (:obj:`str`, optional): data resolution (see `get_regions`) Returns: str: The filepath for the requested resource. """ wtk = _load_wtk() base_url = '/nrel/wtk/' assert region in wtk, 'region not found: %s' % region year_range = wtk[region]['year_range'] year_range = range(year_range[0], year_range[1]+1) assert isinstance(year, int), '"year" must be an integer' msg = 'year %s not available for region: %s' % (year, region) assert year in year_range, msg if resolution: msg = 'resolution "%s" not available for region: %s' % ( resolution, region) assert resolution in wtk[region]['resolutions'], msg base = wtk[region].get('base') if resolution == '5min': url_region = '%s-%s/' % (region, resolution) else: url_region = region + '/' if base: file = '%s_%s.h5' % (base, year) else: file = 'wtk_%s_%s.h5' % (region, year) return base_url + url_region + file
93da894523a6517faaf4fa4976ba986a3719494c
15,551
import logging def init_doc(args: dict) -> dict: """ Initialize documentation variable :param args: A dictionary containing relevant documentation fields :return: """ doc = {} try: doc[ENDPOINT_PORT_KEY] = args[ENDPOINT_PORT_KEY] except KeyError: logging.warning("No port for documentation specified, default one will be used: "+str(DEFAULT_REST_PORT)) doc[ENDPOINT_PORT_KEY] = DEFAULT_REST_PORT try: doc[ENDPOINT_URL_KEY] = args[ENDPOINT_URL_KEY] except KeyError: logging.warning("No URL for documentation specified, default one will be used: " + DEFAULT_URL) doc[ENDPOINT_URL_KEY] = DEFAULT_URL try: doc[MODULE_NAME_KEY] = args[MODULE_NAME_KEY] except KeyError: logging.warning("No module name for documentation specified, default one will be used: " + DEFAULT_MODULE_NAME) doc[MODULE_NAME_KEY] = DEFAULT_MODULE_NAME return doc
afa20f89595eac45e924ecdb32f9ef169fc72726
15,552
def decode_entities(string): """ Decodes HTML entities in the given string ("&lt;" => "<"). """ # http://snippets.dzone.com/posts/show/4569 def replace_entity(match): hash, hex, name = match.group(1), match.group(2), match.group(3) if hash == "#" or name.isdigit(): if hex == "": return unichr(int(name)) # "&#38;" => "&" if hex.lower() == "x": return unichr(int("0x" + name, 16)) # "&#x0026;" = > "&" else: cp = name2codepoint.get(name) # "&amp;" => "&" return unichr(cp) if cp else match.group() # "&foo;" => "&foo;" if isinstance(string, basestring): return RE_UNICODE.subn(replace_entity, string)[0] return string
480a7ed8a37b05bc65d10e513e021b00fcb718c4
15,553
def convert2board(chrom, rows, cols): """ Converts the chromosome represented in a list into a 2D numpy array. :param rows: number of rows associated with the board. :param cols: number of columns associated with the board. :param chrom: chromosome to be converted. :return: 2D numpy array. """ # Initialise the variables to be used idx = int(0) # Chromosome index board = np.zeros((rows, cols), 'uint8') board.fill(CELL_UNASSIGNED) # Now loop through the board adding the shapes and checking validity. # Start at top left corner, processing each row in turn. for row in range(rows): for col in range(cols): # Retrieve the next shape shape = chrom[idx] # Skip the cell if it is already occupied. if board[row][col] != CELL_UNASSIGNED: continue # Have we run out of shapes... if shape == CELL_UNASSIGNED: idx = idx + 1 if idx >= len(chrom): return board continue # Attempt to place the shape on the board. if shape == CELL_SPACE: # Place the hole if valid. if not ((col > 0 and board[row][col - 1] == CELL_SPACE) or (row > 0 and board[row - 1][col] == CELL_SPACE)): board[row][col] = CELL_SPACE elif shape == CELL_HDOMINO: # Are we ok to have a horizontal domino? if col < cols - 1 and board[row][col + 1] == CELL_UNASSIGNED: board[row][col] = CELL_HDOMINO board[row][col + 1] = CELL_HDOMINO else: # shape == CELL_VDOMINO: # Are we ok to have a vertical domino? if row < rows - 1: board[row][col] = CELL_VDOMINO board[row + 1][col] = CELL_VDOMINO # Move on to the next shape idx = idx + 1 if idx >= len(chrom): return board return board
9897965550793f54e55ce2c66c95a7584a987a4e
15,554
def filter_halo_pnum(data, Ncut=1000): """ Returns indicies of halos with more than Ncut particles""" npart = np.array(data['np'][0]) ind =np.where(npart > Ncut)[0] print("# of halos:",len(ind)) return ind
3c89eb263399ef022c1b5492190aff282e4410e8
15,555
def _preprocess_sgm(line, is_sgm): """Preprocessing to strip tags in SGM files.""" if not is_sgm: return line # In SGM files, remove <srcset ...>, <p>, <doc ...> lines. if line.startswith("<srcset") or line.startswith("</srcset"): return "" if line.startswith("<refset") or line.startswith("</refset"): return "" if line.startswith("<doc") or line.startswith("</doc"): return "" if line.startswith("<p>") or line.startswith("</p>"): return "" # Strip <seg> tags. line = line.strip() if line.startswith("<seg") and line.endswith("</seg>"): i = line.index(">") return line[i + 1:-6]
0a482c5ccf2c001dfd9b52458044a1feaf62e5b9
15,556
def discover(using, index="*"): """ :param using: Elasticsearch client :param index: Comma-separated list or wildcard expression of index names used to limit the request. """ indices = Indices() for index_name, index_detail in using.indices.get(index=index).items(): indices[index_name] = Index( client=using, name=index_name, mappings=index_detail["mappings"], settings=index_detail["settings"], aliases=index_detail["aliases"], ) return indices
32a53f15b0db3ba2b2c092e8dbd4ffdf57f133c8
15,557
from datetime import datetime def quote_sql_value(cursor: Cursor, value: SQLType) -> str: """ Use the SQL `quote()` function to return the quoted version of `value`. :returns: the quoted value """ if isinstance(value, (int, float, datetime)): return str(value) if value is None: return "NULL" if isinstance(value, (str, bytes)): cursor.execute("SELECT quote(?);", (value,)) result = cursor.fetchall()[0][0] assert isinstance(result, str) return result raise ValueError(f"Do not know how to quote value of type {type(value)}")
17887be2440563a1321708f797310eb8f1731687
15,558
def create_admin_nova_client(context): """ Creates client that uses trove admin credentials :return: a client for nova for the trove admin """ client = create_nova_client(context, password=CONF.nova_proxy_admin_pass) return client
3fdd56ae419b5228b209a9e00fb8828c17a0d847
15,559
def page_cache(timeout=1800): """ page cache param: timeout:the deadline of cache default is 1800 """ def _func(func): def wrap(request, *a, **kw): key = request.get_full_path() #pass chinese try: key = mkey.encode("utf-8") except Exception, e: key = str(key) data = None try: data = mclient.get(key) if not data: data = func(request, *a, **kw) if data: mclient.set(key, data, timeout) return HttpResponse(data, content_type=request.META.get("CONTENT_TYPE", "text/plain")) except Exception, e: if data: HttpResponse(data, content_type=request.META.get("CONTENT_TYPE", "text/plain")) else: return HttpResponse("<objects><error>%s</error></objects>" % e, content_type=request.META.get("CONTENT_TYPE", "text/plain")) return wrap return _funcs
ca5be8d6ad1c1d0e627e2e22dbe44532d20af5cd
15,560
def get_available_games(): """Get a list of games that are available to join.""" games = Game.objects.filter(started=False) #pylint: disable=no-member if len(games) == 0: options = [('', '- None -')] else: options = [('', '- Select -')] for game in games: options.append((game.name, game.name)) return options
245d85ce623ffe3ed9eb718aafaf7889c67dada6
15,561
def _add_rays_single_cam( camera_data: TensorDict, *, scene_from_frame: tf_geometry.Isometry, ) -> TensorDict: """Returns the camera, eventually with the rays added.""" if _has_precomputed_rays(camera_data): return camera_data else: # Logic below for generating camera rays only applies to perspective # cameras. It will produce incorrect camera rays for other types of # cameras (e.g. those with distortions). camera_type = camera_data['intrinsics']['type'] tf.debugging.assert_equal(camera_type, 'PERSPECTIVE') # Pinhole camera model below does not know how to handle lens distortion. # Ensure that no distortion exists here. radial_distortion = camera_data['intrinsics']['distortion']['radial'] tf.debugging.assert_near(radial_distortion, tf.zeros_like(radial_distortion)) tangential_distortion = ( camera_data['intrinsics']['distortion']['tangential']) tf.debugging.assert_near(tangential_distortion, tf.zeros_like(tangential_distortion)) h, w, _ = camera_data['color_image'].shape # Compute camera pose w.r.t scene (camera to scene transform). camera_from_frame = tf_geometry.Isometry(**camera_data['extrinsics']) scene_from_camera = scene_from_frame * camera_from_frame.inverse() # Get rays w.r.t scene passing through every pixel center of the camera. camera_intrinsics = camera_data['intrinsics'] ray_origins, ray_directions = tf_geometry.rays_from_image_grid( camera=tf_geometry.PinholeCamera( K=camera_intrinsics['K'], # Use static shape if available image_width=w or camera_intrinsics['image_width'], image_height=h or camera_intrinsics['image_height']), world_from_camera=scene_from_camera, ) camera_data['ray_origins'] = ray_origins camera_data['ray_directions'] = ray_directions return camera_data
dc9836d3ebee9ccc3ab940dc3bfe0981f9362741
15,562
def process_ps_stdout(stdout): """ Process the stdout of the ps command """ return [i.split()[0] for i in filter(lambda x: x, stdout.decode("utf-8").split("\n")[1:])]
c086cc88c51484abe4308b3ac450faaba978656e
15,563
import shlex def chpasswd(path, oldpassword, newpassword): """Change password of a private key. """ if len(newpassword) != 0 and not len(newpassword) > 4: return False cmd = shlex.split('ssh-keygen -p') child = pexpect.spawn(cmd[0], cmd[1:]) i = child.expect(['Enter file in which the key is', pexpect.EOF]) if i == 1: if child.isalive(): child.wait() return False child.sendline(path) i = child.expect(['Enter old passphrase', 'Enter new passphrase', pexpect.EOF]) if i == 0: child.sendline(oldpassword) i = child.expect(['Enter new passphrase', 'Bad passphrase', pexpect.EOF]) if i != 0: if child.isalive(): child.wait() return False elif i == 2: if child.isalive(): child.wait() return False child.sendline(newpassword) i = child.expect(['Enter same passphrase again', pexpect.EOF]) if i == 1: if child.isalive(): child.wait() return False child.sendline(newpassword) child.expect(pexpect.EOF) if child.isalive(): return child.wait() == 0 return True
ee84bdccee24ea591db6d9c82bfce8374d1a420d
15,564
def get_display_limits(VarInst, data=None): """Get limits to resize the display of Variables. Function takes as argument a `VariableInstance` from a `Section` or `Planform` and an optional :obj:`data` argument, which specifies how to determine the limits to return. Parameters ---------- VarInst : :obj:`~deltametrics.section.BaseSectionVariable` subclass The `Variable` instance to visualize. May be any subclass of :obj:`~deltametrics.section.BaseSectionVariable` or :obj:`~deltametrics.plan.BasePlanformVariable`. data : :obj:`str`, optional The type of data to compute limits for. Typically this will be the same value used with either :obj:`get_display_arrays` or :obj:`get_display_lines`. Supported options are `'spacetime'`, `'preserved'`, and `'stratigraphy'`. Returns ------- xmin, xmax, ymin, ymax : :obj:`float` Values to use as limits on a plot. Use with, for example, ``ax.set_xlim((xmin, xmax))``. """ # # # SectionVariables # # # if issubclass(type(VarInst), section.BaseSectionVariable): # # DataSection # # if isinstance(VarInst, section.DataSectionVariable): data = data or VarInst._default_data if data in VarInst._spacetime_names: return np.min(VarInst._S), np.max(VarInst._S), \ np.min(VarInst._Z), np.max(VarInst._Z) elif data in VarInst._preserved_names: VarInst._check_knows_stratigraphy() # need to check explicitly return np.min(VarInst._S), np.max(VarInst._S), \ np.min(VarInst._Z), np.max(VarInst._Z) elif data in VarInst._stratigraphy_names: VarInst._check_knows_stratigraphy() # need to check explicitly _strata = np.copy(VarInst.strat_attr['strata']) return np.min(VarInst._S), np.max(VarInst._S), \ np.min(_strata), np.max(_strata) * 1.5 else: raise ValueError('Bad data argument: %s' % str(data)) # # StratigraphySection # # elif isinstance(VarInst, section.StratigraphySectionVariable): data = data or VarInst._default_data if data in VarInst._spacetime_names: VarInst._check_knows_spacetime() # always False elif data in VarInst._preserved_names: VarInst._check_knows_spacetime() # always False elif data in VarInst._stratigraphy_names: return np.min(VarInst._S), np.max(VarInst._S), \ np.min(VarInst._Z), np.max(VarInst._Z) * 1.5 else: raise ValueError('Bad data argument: %s' % str(data)) else: raise TypeError # # # PlanformVariables # # # elif False: # issubclass(type(VarInst), plan.BasePlanformVariable): raise NotImplementedError else: raise TypeError('Invaid "VarInst" type: %s' % type(VarInst))
d4864fccd8c282033d99fdc817e077d3f6d5b434
15,565
def plot_layer_consistency_example(eigval_col, eigvec_col, layernames, layeridx=[0,1,-1], titstr="GAN", figdir="", savelabel="", use_cuda=False): """ Note for scatter plot the aspect ratio is set fixed to one. :param eigval_col: :param eigvec_col: :param nsamp: :param titstr: :param figdir: :return: """ nsamp = len(layeridx) # Hnums = len(eigval_col) # eiglist = sorted(np.random.choice(Hnums, nsamp, replace=False)) # range(5) print("Plot hessian of layers : ", [layernames[idx] for idx in layeridx]) fig = plt.figure(figsize=[10, 10], constrained_layout=False) spec = fig.add_gridspec(ncols=nsamp, nrows=nsamp, left=0.075, right=0.975, top=0.9, bottom=0.05) for axi, Li in enumerate(layeridx): eigval_i, eigvect_i = eigval_col[Li], eigvec_col[Li] for axj, Lj in enumerate(layeridx): eigval_j, eigvect_j = eigval_col[Lj], eigvec_col[Lj] inpr = eigvect_i.T @ eigvect_j vHv_ij = np.diag((inpr @ np.diag(eigval_j)) @ inpr.T) ax = fig.add_subplot(spec[axi, axj]) if axi == axj: ax.hist(np.log10(eigval_j), 20) else: ax.scatter(np.log10(eigval_j), np.log10(vHv_ij), s=15, alpha=0.6) ax.set_aspect(1, adjustable='datalim') if axi == nsamp-1: ax.set_xlabel("eigvals @ %s" % layernames[Lj]) if axj == 0: ax.set_ylabel("vHv eigvec @ %s" % layernames[Li]) ST = plt.suptitle("Consistency of %s Hessian Across Layers\n" "Cross scatter of EigenValues and vHv values for Hessian at %d Layers"%(titstr, nsamp), fontsize=18) # plt.subplots_adjust(left=0.175, right=0.95 ) RND = np.random.randint(1000) plt.savefig(join(figdir, "Hess_layer_consistency_example_%s_rnd%03d.jpg" % (savelabel, RND)), bbox_extra_artists=[ST]) # plt.savefig(join(figdir, "Hess_layer_consistency_example_%s_rnd%03d.pdf" % (savelabel, RND)), bbox_extra_artists=[ST]) # return fig
38191663fcf1c9f05aa39127179a3cbf5f29b219
15,566
def min_vertex_cover(left_v, right_v): """ Use the Hopcroft-Karp algorithm to find a maximum matching or maximum independent set of a bipartite graph. Next, find a minimum vertex cover by finding the complement of a maximum independent set. The function takes as input two dictionaries, one for the left vertices and one for the right vertices. Each key in the left dictionary is a left vertex with a value equal to a list of the right vertices that are connected to the key by an edge. The right dictionary is structured similarly. The output is a dictionary with keys equal to the vertices in a minimum vertex cover and values equal to lists of the vertices connected to the key by an edge. For example, using the following simple bipartite graph: 1000 2000 1001 2000 where vertices 1000 and 1001 each have one edge and 2000 has two edges, the input would be: left = {1000: [2000], 1001: [2000]} right = {2000: [1000, 1001]} and the ouput or minimum vertex cover would be: {2000: [1000, 1001]} with vertex 2000 being the minimum vertex cover. """ data_hk = bipartiteMatch(left_v) left_mis = data_hk[1] right_mis = data_hk[2] mvc = left_v.copy() mvc.update(right_v) # junta os dicionarios num so for v in left_mis: try: del (mvc[v]) except KeyError: pass for v in right_mis: try: del (mvc[v]) except KeyError: pass return mvc
a94aaf6dd07b98e7f5a77b01ab6548bc401e8b03
15,567
def neighbor_dist(x1, y1, x2, y2): """Return distance of nearest neighbor to x1, y1 in x2, y2""" m1, m2, d12 = match_xy(x2, y2, x1, y1, neighbors=1) return d12
91b67e571d2812a9bc2e05b25a74fbca292daec7
15,568
import requests def add_artist_subscription(auth, userid, artist_mbid): """ Add an artist to the list of subscribed artists. :param tuple auth: authentication data (username, password) :param str userid: user ID (must match auth data) :param str artist_mbid: musicbrainz ID of the artist to add :return: True on success :raises: HTTPError """ url = '%s/artists/%s/%s' % (API_BASE_URL, userid, artist_mbid) response = requests.put(url, auth=auth) response.raise_for_status() return True
770be84ec9edb272c8c3d8cb1959f419f8867e1d
15,569
from pathlib import Path import pickle def get_built_vocab(dataset: str) -> Vocab: """load vocab file for `dataset` to get Vocab based on selected client and data in current directory Args: dataset (str): string of dataset name to get vocab Returns: if there is no built vocab file for `dataset`, return None, else return Vocab """ vocab_file_path = Path(__file__).parent.resolve() / f'{dataset}_vocab.pickle' if not vocab_file_path.exists(): print('There is no built vocab file for {} dataset, please run `main` or `build_vocab.sh` to build it firstly.' .format(dataset)) return None vocab_file = open(vocab_file_path, 'rb') # get vocab based on sample data vocab = pickle.load(vocab_file) return vocab
b03daba815ccddb7ff3aee2e2eac39de22ff6cff
15,570
from typing import Optional from typing import Iterable def binidx(num: int, width: Optional[int] = None) -> Iterable[int]: """ Returns the indices of bits with the value `1`. Parameters ---------- num : int The number representing the binary state. width : int, optional Minimum number of digits used. The default is the global value `BITS`. Returns ------- binidx : list """ fill = width or 0 return list(sorted(i for i, char in enumerate(f"{num:0{fill}b}"[::-1]) if char == "1"))
70d1895cf0141950d8e2f5efe6bfbf7bd8dbc30b
15,571
import sys import socket def hashed_class_mix_score256(cycle_hash: bytes, identifier: bytes, ip: str, ip_bytes: bytearray) -> int: """ Nyzo Score computation from hash of IP start + end of last IP byte to effectively reorder the various c-class and their gaps. Then complete the score with first half latest IP byte. That last IP half byte is shuffled from a permutation map, built from cycle hash, so that start of block and end of block ip do not get more odds. Should be similar to first picking a single random class (c-class+end of ip) from the different classes, then picking a single block prefix from these class """ score = sys.maxsize if ip == '': return score ip_bytes = bytearray(socket.inet_aton(ip)) # seed = cycle_hash + ip_bytes[:3] + (ip_bytes[3] & 15).to_bytes(1, byteorder='big') # one c-class + last 4 bits = one seed ip_bytes[3] = ip_bytes[3] & 15 # Mask first 4 bits of last byte. seed = cycle_hash + ip_bytes hashed_c = sha256(seed).digest() score = 0 for i in range(32): # Do we need all 32 bytes? more information than the ip entropy we fed (3.5 bytes). Way faster with only 16? => 30% gain score += abs(cycle_hash[i] - hashed_c[i]) # score = sum(abs(r - h) for r,h in zip(cycle_hash, hashed_c)) # Slightly slower score *= 16 # Up until there, score is the same for all ips of the same class score += abs(SHUFFLE_MAP[ip_bytes[3]//16] - cycle_hash[0]//16) # shuffle map so lower and highest ips parts do not get more odds return score
172663529b625c73ab91147864ce17cd2a4d4108
15,572
import math def distance_vinchey(f, a, start, end): """ Uses Vincenty formula for distance between two Latitude/Longitude points (latitude,longitude) tuples, in numeric degrees. f,a are ellipsoidal parameters Returns the distance (m) between two geographic points on the ellipsoid and the forward and reverse azimuths between these points. Returns ( s, alpha12, alpha21 ) as a tuple """ # Convert into notation from the original paper # http://www.anzlic.org.au/icsm/gdatum/chapter4.html # # Vincenty's Inverse formulae # Given: latitude and longitude of two points (phi1, lembda1 and phi2, lembda2) phi1 = math.radians(start[0]); lembda1 = math.radians(start[1]); phi2 = math.radians(end[0]); lembda2 = math.radians(end[1]); if (abs( phi2 - phi1 ) < 1e-8) and ( abs( lembda2 - lembda1) < 1e-8 ): return 0.0, 0.0, 0.0 two_pi = 2.0*math.pi b = a * (1.0 - f) TanU1 = (1-f) * math.tan( phi1 ) TanU2 = (1-f) * math.tan( phi2 ) U1 = math.atan(TanU1) U2 = math.atan(TanU2) lembda = lembda2 - lembda1 last_lembda = -4000000.0 # an impossibe value omega = lembda # Iterate the following equations, until there is no significant change in lembda while ( last_lembda < -3000000.0 or lembda != 0 and abs( (last_lembda - lembda)/lembda) > 1.0e-9 ) : sqr_sin_sigma = pow( math.cos(U2) * math.sin(lembda), 2) + \ pow( (math.cos(U1) * math.sin(U2) - \ math.sin(U1) * math.cos(U2) * math.cos(lembda) ), 2 ) Sin_sigma = math.sqrt( sqr_sin_sigma ) Cos_sigma = math.sin(U1) * math.sin(U2) + math.cos(U1) * math.cos(U2) * math.cos(lembda) sigma = math.atan2( Sin_sigma, Cos_sigma ) Sin_alpha = math.cos(U1) * math.cos(U2) * math.sin(lembda) / math.sin(sigma) alpha = math.asin( Sin_alpha ) Cos2sigma_m = math.cos(sigma) - (2 * math.sin(U1) * math.sin(U2) / pow(math.cos(alpha), 2) ) C = (f/16) * pow(math.cos(alpha), 2) * (4 + f * (4 - 3 * pow(math.cos(alpha), 2))) last_lembda = lembda lembda = omega + (1-C) * f * math.sin(alpha) * (sigma + C * math.sin(sigma) * \ (Cos2sigma_m + C * math.cos(sigma) * (-1 + 2 * pow(Cos2sigma_m, 2) ))) u2 = pow(math.cos(alpha),2) * (a*a-b*b) / (b*b) A = 1 + (u2/16384) * (4096 + u2 * (-768 + u2 * (320 - 175 * u2))) B = (u2/1024) * (256 + u2 * (-128+ u2 * (74 - 47 * u2))) delta_sigma = B * Sin_sigma * (Cos2sigma_m + (B/4) * \ (Cos_sigma * (-1 + 2 * pow(Cos2sigma_m, 2) ) - \ (B/6) * Cos2sigma_m * (-3 + 4 * sqr_sin_sigma) * \ (-3 + 4 * pow(Cos2sigma_m,2 ) ))) s = b * A * (sigma - delta_sigma) alpha12 = math.atan2( (math.cos(U2) * math.sin(lembda)), \ (math.cos(U1) * math.sin(U2) - math.sin(U1) * math.cos(U2) * math.cos(lembda))) alpha21 = math.atan2( (math.cos(U1) * math.sin(lembda)), \ (-math.sin(U1) * math.cos(U2) + math.cos(U1) * math.sin(U2) * math.cos(lembda))) if ( alpha12 < 0.0 ) : alpha12 = alpha12 + two_pi if ( alpha12 > two_pi ) : alpha12 = alpha12 - two_pi alpha21 = alpha21 + two_pi / 2.0 if ( alpha21 < 0.0 ) : alpha21 = alpha21 + two_pi if ( alpha21 > two_pi ) : alpha21 = alpha21 - two_pi return s, alpha12, alpha21
df5ae92a12af6ab656af65a12145436089202cf2
15,573
def py_cpu_nms(dets, thresh): """Pure Python NMS baseline.""" # x1、y1、x2、y2、以及score赋值 dets = np.array(dets) x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] #每一个检测框的面积 areas = (x2 - x1 + 1) * (y2 - y1 + 1) #按照score置信度降序排序 order = scores.argsort()[::-1] keep = [] #保留的结果框集合 while order.size > 0: i = order[0] keep.append(i) #保留该类剩余box中得分最高的一个 #得到相交区域,左上及右下 xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) #计算相交的面积,不重叠时面积为0 w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h #计算IoU:重叠面积 /(面积1+面积2-重叠面积) ovr = inter / (areas[i] + areas[order[1:]] - inter) #保留IoU小于阈值的box inds = np.where(ovr <= thresh)[0] order = order[inds + 1] #因为ovr数组的长度比order数组少一个,所以这里要将所有下标后移一位 return keep
d85822e8076bf1695c6f9e7b7271b21572ebf7d6
15,574
def _romanize(word: str) -> str: """ :param str word: Thai word to be romanized, should have already been tokenized. :return: Spells out how the Thai word should be pronounced. """ if not isinstance(word, str) or not word: return "" word = _replace_vowels(_normalize(word)) res = _RE_CONSONANT.findall(word) # 2-character word, all consonants if len(word) == 2 and len(res) == 2: word = list(word) word.insert(1, "o") word = "".join(word) word = _replace_consonants(word, res) return word
5e464faa1011893eb63f1f9afedd42768a8527c8
15,575
def lookup_beatmap(beatmaps: list, **lookup): """ Finds and returns the first beatmap with the lookup specified. Beatmaps is a list of beatmap dicts and could be used with beatmap_lookup(). Lookup is any key stored in a beatmap from beatmap_lookup(). """ if not beatmaps: return None for beatmap in beatmaps: match = True for key, value in lookup.items(): if key.lower() not in beatmap: raise KeyError(f"The list of beatmaps does not have key: {key}") if not beatmap[key].lower() == value.lower(): match = False if match: return beatmap return None
fa5f126502b5398934882139f01af8f4f80e1ea5
15,576
def scott( x: BinaryFeatureVector, y: BinaryFeatureVector, mask: BinaryFeatureVector = None ) -> float: """Scott similarity Scott, W. A. (1955). Reliability of content analysis: The case of nominal scale coding. Public opinion quarterly, 321-325. Args: x (BinaryFeatureVector): binary feature vector y (BinaryFeatureVector): binary feature vector Returns: float: similarity of given vectors """ a, b, c, d = operational_taxonomic_units(x, y, mask) return (4 * a * d - (b + c) ** 2) / ((2 * a + b + c) * (2 + d + b + c))
6b950cb2b716d2e93638b169682cdd99230cbb89
15,577
import xmlrunner def get_test_runner(): """ Returns a test runner instance for unittest.main. This object captures the test output and saves it as an xml file. """ try: path = get_test_dir() runner = xmlrunner.XMLTestRunner(output=path) return runner except Exception, e: print("get_test_runner error: %s" % e) return None
6b2db3207c278a7f07ed6fd7922042beea1bfee7
15,578
def _construct_capsule(geom, pos, rot): """Converts a cylinder geometry to a collider.""" radius = float(geom.get('radius')) length = float(geom.get('length')) length = length + 2 * radius return config_pb2.Collider( capsule=config_pb2.Collider.Capsule(radius=radius, length=length), rotation=_vec(euler.quat2euler(rot, 'rxyz'), scale=180 / np.pi), position=_vec(pos))
a4bddb7c64468515d3a36ebaac22402eeb4f16b0
15,579
def file_root_dir(tmpdir_factory): """Prepares the testing dirs for file tests""" root_dir = tmpdir_factory.mktemp('complex_file_dir') for file_path in ['file1.yml', 'arg/name/file2', 'defaults/arg/name/file.yml', 'defaults/arg/name/file2', 'vars/arg/name/file1.yml', 'vars/arg/name/file3.yml', 'vars/arg/name/nested/file4.yml']: root_dir.join(file_path).ensure() return root_dir
834e0d850e7a7dd59d792e98ed25b909d5a20567
15,580
from typing import Iterable def path_nucleotide_length(g: BifrostDiGraph, path: Iterable[Kmer]) -> int: """Compute the length of a path in nucleotides.""" if not path: return 0 node_iter = iter(path) start = next(node_iter) k = g.graph['k'] length = g.nodes[start]['length'] + k - 1 prev = start for n in node_iter: if (prev, n) not in g.edges: raise ValueError(f"Invalid path specified, ({prev}, {n}) is not an edge.") length += g.nodes[n]['length'] prev = n return length
612cff39bcf859a995d90c22e2dacb54e9c0b4c9
15,581
def extract_static_links(page_content): """Deliver the static asset links from a page source.""" soup = bs(page_content, "html.parser") static_js = [ link.get("src") for link in soup.findAll("script") if link.get("src") and "static" in link.get("src") ] static_images = [ image.get("src") for image in soup.findAll("img") if image.get("src") and "static" in image.get("src") ] static_css = [ link.get("href") for link in soup.findAll("link") if link.get("href") and "static" in link.get("href") ] return static_js + static_images + static_css
8ea99171d55db182fe4265042c84deca36176d84
15,582
def zero_inflated_nb(n, p, phi=0, size=None): """Models a zero-inflated negative binomial Something about hte negative binomail model here... This basically just wraps the numpy negative binomial generator, where the probability of a zero is additionally inflated by some probability, psi... Parameters ---------- n : int Parameter, > 0. p : float Parameter, 0 <= p <= 1. phi : float, optional The probability of obtaining an excess zero in the model, where 0 <= phi <= 1. When `phi = 0`, the distribution collapses to a negative binomial model. size : int or tuple of ints, optional Output shape. If the given shape is, e.g., ``(m, n, k)``, then ``m * n * k`` samples are drawn. Default is None, in which case a single value is returned. Returns ------- int or ndarray of ints Drawn samples Also See -------- np.random.negative_binomial References ---------- ..[1] Kutz, Z.D. et al. (2015) "Sparse and Compositionally Robust Inference of Microbial Ecological Networks." PLoS Compuational Biology. 11: e10004226 http://journals.plos.org/ploscompbiol/article?id=10.1371/journal.pcbi.1004226 """ zeros = (np.random.binomial(1, phi, size) == 1) nb_ = np.random.negative_binomial(n, p, size=size) nb_[zeros] = 0 return nb_
c20f28b33e070e035979093e6ebb9ed10611c5dd
15,583
async def get_bot_queue( request: Request, state: enums.BotState = enums.BotState.pending, verifier: int = None, worker_session = Depends(worker_session) ): """Admin API to get the bot queue""" db = worker_session.postgres if verifier: bots = await db.fetch("SELECT bot_id, prefix, description FROM bots WHERE state = $1 AND verifier = $2 ORDER BY created_at ASC", state, verifier) bots = await db.fetch("SELECT bot_id, prefix, description FROM bots WHERE state = $1 ORDER BY created_at ASC", state) return {"bots": [{"user": await get_bot(bot["bot_id"]), "prefix": bot["prefix"], "invite": await invite_bot(bot["bot_id"], api = True), "description": bot["description"]} for bot in bots]}
bfbd51933b140bfd60cc7ec2a401d02048ffdeae
15,584
def ask_for_rating(): """Ask the user for a rating""" heading = '{} {}'.format(common.get_local_string(30019), common.get_local_string(30022)) try: return int(xbmcgui.Dialog().numeric(heading=heading, type=0, defaultt='')) except ValueError: return None
a7a854e02b11ac1313d69f508851d162a7748006
15,585
def isthai(text,check_all=False): """ สำหรับเช็คว่าเป็นตัวอักษรภาษาไทยหรือไม่ isthai(text,check_all=False) text คือ ข้อความหรือ list ตัวอักษร check_all สำหรับส่งคืนค่า True หรือ False เช็คทุกตัวอักษร การส่งคืนค่า {'thai':% อักษรภาษาไทย,'check_all':tuple โดยจะเป็น (ตัวอักษร,True หรือ False)} """ listext=list(text) i=0 num_isthai=0 if check_all==True: listthai=[] while i<len(listext): cVal = ord(listext[i]) if(cVal >= 3584 and cVal <= 3711): num_isthai+=1 if check_all==True: listthai.append(True) else: if check_all==True: listthai.append(False) i+=1 thai=(num_isthai/len(listext))*100 if check_all==True: dictthai=tuple(zip(listext,listthai)) data= {'thai':thai,'check_all':dictthai} else: data= {'thai':thai} return data
6a6bff64ba3b3939414e9f3aa83d169cd026e1c3
15,586
from typing import List from typing import Optional def _convert_object_array( content: List[Scalar], dtype: Optional[DtypeObj] = None ) -> List[Scalar]: """ Internal function ot convert object array. Parameters ---------- content: list of processed data records dtype: np.dtype, default is None Returns ------- arrays: casted content if not object dtype, otherwise return as is in list. """ # provide soft conversion of object dtypes def convert(arr): if dtype != np.dtype("O"): arr = lib.maybe_convert_objects(arr) arr = maybe_cast_to_datetime(arr, dtype) return arr arrays = [convert(arr) for arr in content] return arrays
7b093057b05afa93ced881289d22b1eda91018f0
15,587
def update_room_time(conn, room_name: str, req_time: int) -> int: """部屋のロックを取りタイムスタンプを更新する トランザクション開始後この関数を呼ぶ前にクエリを投げると、 そのトランザクション中の通常のSELECTクエリが返す結果がロック取得前の 状態になることに注意 (keyword: MVCC, repeatable read). """ cur = conn.cursor() # See page 13 and 17 in https://www.slideshare.net/ichirin2501/insert-51938787 cur.execute("INSERT INTO room_time(room_name, time) VALUES (%s, 0) ON DUPLICATE KEY UPDATE time = time", (room_name,)) cur.execute("SELECT time FROM room_time WHERE room_name = %s FOR UPDATE", (room_name,)) room_time = cur.fetchone()[0] current_time = get_current_time(conn) if room_time > current_time: raise RuntimeError(f"room_time is future: room_time={room_time}, req_time={req_time}") if req_time and req_time < current_time: raise RuntimeError(f"req_time is past: req_time={req_time}, current_time={current_time}") cur.execute("UPDATE room_time SET time = %s WHERE room_name = %s", (current_time, room_name)) return current_time
78066e9666ee28217f790fb8c26d2ade8c2ace7c
15,588
def get_layer_coverage(cat, store, store_obj): """Get correct layer coverage from a store.""" coverages = cat.mosaic_coverages(store_obj) # Find the correct coverage coverage = None for cov in coverages["coverages"]["coverage"]: if store == cov['name']: coverage = cov break if coverage is None: logger.warning("Layer '%s' not found", store) return coverage
498c4a8db1a82dafd8569314e4faf13517e75aba
15,589
import logging import time def retarget(songs, duration, music_labels=None, out_labels=None, out_penalty=None, volume=None, volume_breakpoints=None, springs=None, constraints=None, min_beats=None, max_beats=None, fade_in_len=3.0, fade_out_len=5.0, **kwargs): """Retarget a song to a duration given input and output labels on the music. Suppose you like one section of a song, say, the guitar solo, and you want to create a three minute long version of the solo. Suppose the guitar solo occurs from the 150 second mark to the 200 second mark in the original song. You can set the label the guitar solo with 'solo' and the rest of the song with 'other' by crafting the ``music_labels`` input function. And you can set the ``out_labels`` function to give you nothing but solo:: def labels(t): if 150 < t < 200: return 'solo' return 'other' def target(t): return 'solo' song = Song("sweet-rock-song.wav") composition, info = retarget(song, 180, music_labels=labels, out_labels=target) composition.export(filename="super-long-solo") You can achieve much more complicated retargetings by adjusting the ``music_labels``, `out_labels` and ``out_penalty`` functions, but this should give you a basic sense of how to use the ``retarget`` function. :param song: Song to retarget :type song: :py:class:`radiotool.composer.Song` :param duration: Duration of retargeted song (in seconds) :type duration: float :param music_labels: A function that takes a time (in seconds) and returns the label (str) of the input music at that time :type music_labels: function :param out_labels: A function that takes a time (in seconds) and returns the desired label (str) of the output music at that time :type out_labels: function :param out_penalty: A function that takes a time (in seconds) and returns the penalty for not matching the correct output label at that time (default is 1.0) :type out_penalty: function :returns: Composition of retargeted song, and dictionary of information about the retargeting :rtype: (:py:class:`radiotool.composer.Composition`, dict) """ # get song analysis if isinstance(songs, Track): songs = [songs] multi_songs = len(songs) > 1 analyses = [s.analysis for s in songs] # generate labels for every beat in the input and output beat_lengths = [a[BEAT_DUR_KEY] for a in analyses] beats = [a["beats"] for a in analyses] beat_length = np.mean(beat_lengths) logging.info("Beat lengths of songs: {} (mean: {})". format(beat_lengths, beat_length)) if out_labels is not None: target = [out_labels(i) for i in np.arange(0, duration, beat_length)] else: target = ["" for i in np.arange(0, duration, beat_length)] if music_labels is not None: if not multi_songs: music_labels = [music_labels] music_labels = [item for sublist in music_labels for item in sublist] if len(music_labels) != len(songs): raise ArgumentException("Did not specify {} sets of music labels". format(len(songs))) start = [[music_labels[i](j) for j in b] for i, b in enumerate(beats)] else: start = [["" for i in b] for b in beats] if out_penalty is not None: pen = np.array([out_penalty(i) for i in np.arange( 0, duration, beat_length)]) else: pen = np.array([1 for i in np.arange(0, duration, beat_length)]) # we're using a valence/arousal constraint, so we need these in_vas = kwargs.pop('music_va', None) if in_vas is not None: if not multi_songs: in_vas = [in_vas] in_vas = [item for sublist in in_vas for item in sublist] if len(in_vas) != len(songs): raise ArgumentException("Did not specify {} sets of v/a labels". format(len(songs))) for i, in_va in enumerate(in_vas): if callable(in_va): in_va = np.array([in_va(j) for j in beats[i]]) in_vas[i] = in_va target_va = kwargs.pop('out_va', None) if callable(target_va): target_va = np.array( [target_va(i) for i in np.arange(0, duration, beat_length)]) # set constraints if constraints is None: min_pause_len = 20. max_pause_len = 35. min_pause_beats = int(np.ceil(min_pause_len / beat_length)) max_pause_beats = int(np.floor(max_pause_len / beat_length)) constraints = [( rt_constraints.PauseConstraint( min_pause_beats, max_pause_beats, to_penalty=1.4, between_penalty=.05, unit="beats"), rt_constraints.PauseEntryVAChangeConstraint(target_va, .005), rt_constraints.PauseExitVAChangeConstraint(target_va, .005), rt_constraints.TimbrePitchConstraint( context=0, timbre_weight=1.5, chroma_weight=1.5), rt_constraints.EnergyConstraint(penalty=0.5), rt_constraints.MinimumLoopConstraint(8), rt_constraints.ValenceArousalConstraint( in_va, target_va, pen * .125), rt_constraints.NoveltyVAConstraint(in_va, target_va, pen), ) for in_va in in_vas] else: max_pause_beats = 0 if len(constraints) > 0: if isinstance(constraints[0], rt_constraints.Constraint): constraints = [constraints] pipelines = [rt_constraints.ConstraintPipeline(constraints=c_set) for c_set in constraints] trans_costs = [] penalties = [] all_beat_names = [] for i, song in enumerate(songs): (trans_cost, penalty, bn) = pipelines[i].apply(song, len(target)) trans_costs.append(trans_cost) penalties.append(penalty) all_beat_names.append(bn) logging.info("Combining tables") total_music_beats = int(np.sum([len(b) for b in beats])) total_beats = total_music_beats + max_pause_beats # combine transition cost tables trans_cost = np.ones((total_beats, total_beats)) * np.inf sizes = [len(b) for b in beats] idx = 0 for i, size in enumerate(sizes): trans_cost[idx:idx + size, idx:idx + size] =\ trans_costs[i][:size, :size] idx += size trans_cost[:total_music_beats, total_music_beats:] =\ np.vstack([tc[:len(beats[i]), len(beats[i]):] for i, tc in enumerate(trans_costs)]) trans_cost[total_music_beats:, :total_music_beats] =\ np.hstack([tc[len(beats[i]):, :len(beats[i])] for i, tc in enumerate(trans_costs)]) trans_cost[total_music_beats:, total_music_beats:] =\ trans_costs[0][len(beats[0]):, len(beats[0]):] # combine penalty tables penalty = np.empty((total_beats, penalties[0].shape[1])) penalty[:total_music_beats, :] =\ np.vstack([p[:len(beats[i]), :] for i, p in enumerate(penalties)]) penalty[total_music_beats:, :] = penalties[0][len(beats[0]):, :] logging.info("Building cost table") # compute the dynamic programming table (prev python method) # cost, prev_node = _build_table(analysis, duration, start, target, pen) # first_pause = 0 # if max_pause_beats > 0: first_pause = total_music_beats if min_beats is None: min_beats = 0 elif min_beats is 'default': min_beats = int(20. / beat_length) if max_beats is None: max_beats = -1 elif max_beats is 'default': max_beats = int(90. / beat_length) max_beats = min(max_beats, penalty.shape[1]) tc2 = np.nan_to_num(trans_cost) pen2 = np.nan_to_num(penalty) beat_names = [] for i, bn in enumerate(all_beat_names): for b in bn: if not str(b).startswith('p'): beat_names.append((i, float(b))) beat_names.extend([('p', i) for i in xrange(max_pause_beats)]) result_labels = [] logging.info("Running optimization (full backtrace, memory efficient)") logging.info("\twith min_beats(%d) and max_beats(%d) and first_pause(%d)" % (min_beats, max_beats, first_pause)) song_starts = [0] for song in songs: song_starts.append(song_starts[-1] + len(song.analysis["beats"])) song_ends = np.array(song_starts[1:], dtype=np.int32) song_starts = np.array(song_starts[:-1], dtype=np.int32) t1 = time.clock() path_i, path_cost = build_table_full_backtrace( tc2, pen2, song_starts, song_ends, first_pause=first_pause, max_beats=max_beats, min_beats=min_beats) t2 = time.clock() logging.info("Built table (full backtrace) in {} seconds" .format(t2 - t1)) path = [] if max_beats == -1: max_beats = min_beats + 1 first_pause_full = max_beats * first_pause n_beats = first_pause for i in path_i: if i >= first_pause_full: path.append(('p', i - first_pause_full)) result_labels.append(None) # path.append('p' + str(i - first_pause_full)) else: path.append(beat_names[i % n_beats]) song_i = path[-1][0] beat_name = path[-1][1] result_labels.append( start[song_i][np.where(np.array(beats[song_i]) == beat_name)[0][0]]) # path.append(float(beat_names[i % n_beats])) # else: # print("Running optimization (fast, full table)") # # this won't work right now- needs to be updated # # with the multi-song approach # # fortran method # t1 = time.clock() # cost, prev_node = build_table(tc2, pen2) # t2 = time.clock() # print("Built table (fortran) in {} seconds".format(t2 - t1)) # res = cost[:, -1] # best_idx = N.argmin(res) # if N.isfinite(res[best_idx]): # path, path_cost, path_i = _reconstruct_path( # prev_node, cost, beat_names, best_idx, N.shape(cost)[1] - 1) # # path_i = [beat_names.index(x) for x in path] # else: # # throw an exception here? # return None # path = [] # result_labels = [] # if max_pause_beats == 0: # n_beats = total_music_beats # first_pause = n_beats # else: # n_beats = first_pause # for i in path_i: # if i >= first_pause: # path.append(('p', i - first_pause)) # result_labels.append(None) # else: # path.append(beat_names[i % n_beats]) # song_i = path[-1][0] # beat_name = path[-1][1] # result_labels.append( # start[song_i][N.where(N.array(beats[song_i]) == # beat_name)[0][0]]) # return a radiotool Composition logging.info("Generating audio") (comp, cf_locations, result_full_labels, cost_labels, contracted, result_volume) =\ _generate_audio( songs, beats, path, path_cost, start, volume=volume, volume_breakpoints=volume_breakpoints, springs=springs, fade_in_len=fade_in_len, fade_out_len=fade_out_len) info = { "beat_length": beat_length, "contracted": contracted, "cost": np.sum(path_cost) / len(path), "path": path, "path_i": path_i, "target_labels": target, "result_labels": result_labels, "result_full_labels": result_full_labels, "result_volume": result_volume, "transitions": [Label("crossfade", loc) for loc in cf_locations], "path_cost": cost_labels } return comp, info
8ed317392e74545916d1ef33e282bce5c6846009
15,590
def st_get_ipfs_cache_path(user_did): """ Get the root dir of the IPFS cache files. :param user_did: The user DID :return: Path: the path of the cache root. """ return _st_get_vault_path(user_did) / 'ipfs_cache'
4217b178025c395619d9def035d11cc96f2b139a
15,591
def create_img_caption_int_data(filepath): """ function to load captions from text file and convert them to integer format :return: dictionary with image ids and associated captions in int format """ print("\nLoading caption data : started") # load caption data img_caption_dict = load_img_caption_data(filepath) # merge caption text data text_data = " ".join([" ".join(txt) for txt in img_caption_dict.values()]) # create word to int mappings (word_to_int_map, int_to_word_map) = create_word_mappings(text_data) # convert caption data to int img_caption_int_dict = {} for key, value in img_caption_dict.items(): img_caption_int_dict[key] = [convert_text_to_int(txt, word_to_int_map) for txt in value] print("\nLoading caption data : completed") return img_caption_int_dict
6d1a449c1b5be7759c65740440865c72546514ef
15,592
def eigenvector_2d_symmetric(a, b, d, eig, eps=1e-8): """Returns normalized eigenvector corresponding to the provided eigenvalue. Note that this a special case of a 2x2 symmetric matrix where every element of the matrix is passed as an image. This allows the evaluation of eigenvalues to be vectorized over the entire image. This is much more efficient than calling the numpy function for computing the eigenvectors for each pixel of the image. This function solves: | a-lambda b | | b d-lambda | [x, y] = 0 Which means that: bx = (lambda - d) y or y = (lambda - a)/b x This solution is invalid for b == 0. Here we expect orthogonal vectors [1 0] and [0 1]. ax + by = l x bx + dy = l y so x = 1 iff b = 0 and l = a and y = 1 iff b = 0 and l = d """ ex = np.zeros(a.shape) ey = np.zeros(a.shape) ex[np.abs(a - eig) < eps] = 1 ey[np.abs(d - eig) < eps] = 1 mask = np.abs(b) > eps tx = b[mask] ty = eig[mask] - a[mask] length = np.sqrt(tx * tx + ty * ty) tx = tx / length ty = ty / length ex[mask] = tx ey[mask] = ty return ex, ey
88a97af77e3f3097b6742db340f8e9559fb8164a
15,593
from typing import Optional def get_protection_path_name(protection: Optional[RouteProtection]) -> str: """Get the protection's path name.""" if protection is None: return DEFAULT_PROTECTION_NAME return protection
f3abaf21c9ba3cfe6c0ae793afaf018fce20dec9
15,594
def _get_object_description(target): """Return a string describing the *target*""" if isinstance(target, list): data = "<list, length {}>".format(len(target)) elif isinstance(target, dict): data = "<dict, length {}>".format(len(target)) else: data = target return data
57ad3803a702a1199639b8fe950ef14b8278bec1
15,595
def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): # We will load also the output bias tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights: # We will load also the sequence summary tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and config.finetuning_task is not None and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights ): tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) # Transformer blocks for i, b in enumerate(model.layer): layer_str = "model/transformer/layer_%d/" % i tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map
b822a5f5effcf1d925dec0e6c6b166ecb89b6627
15,596
def dynamicviewset(viewset): """ The activate route only makes sense if user activation is required, remove the route if activation is turned off """ if not settings['REQUIRE_ACTIVATION'] and hasattr(viewset, 'activate'): delattr(viewset, 'activate') return viewset
f31a191c7c4d51163f588fa4e728f92fb7d43816
15,597
import random def generate_arabic_place_name(min_length=0): """Return a randomly generated, potentially multi-word fake Arabic place name""" make_name = lambda n_words: ' '.join(random.sample(place_names, n_words)) n_words = 3 name = make_name(n_words) while len(name) < min_length: n_words += 1 name = make_name(n_words) return name
7efc760b8dcf5f8807e2d203542fa637908cbad2
15,598
def find_cutoffs(x,y,crdist,deltas): """function for identifying locations of cutoffs along a centerline and the indices of the segments that will become part of the oxbows from MeanderPy x,y - coordinates of centerline crdist - critical cutoff distance deltas - distance between neighboring points along the centerline""" diag_blank_width = int((crdist+20*deltas)/deltas) # distance matrix for centerline points: dist = distance.cdist(np.array([x,y]).T,np.array([x,y]).T) dist[dist>crdist] = np.NaN # set all values that are larger than the cutoff threshold to NaN # set matrix to NaN along the diagonal zone: for k in range(-diag_blank_width,diag_blank_width+1): rows, cols = kth_diag_indices(dist,k) dist[rows,cols] = np.NaN i1, i2 = np.where(~np.isnan(dist)) ind1 = i1[np.where(i1<i2)[0]] # get rid of unnecessary indices ind2 = i2[np.where(i1<i2)[0]] # get rid of unnecessary indices return ind1, ind2 # return indices of cutoff points and cutoff coordinates
82de02759c70ab746d2adcfafd04313cbb0a8c4e
15,599