content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def compare(isamAppliance1, isamAppliance2): """ Compare Policy Sets between two appliances """ ret_obj1 = get_all(isamAppliance1) ret_obj2 = get_all(isamAppliance2) for obj in ret_obj1['data']: del obj['id'] del obj['userlastmodified'] del obj['lastmodified'] del obj['datecreated'] obj['policies'] = _convert_policy_id_to_name(isamAppliance1, obj['policies']) for obj in ret_obj2['data']: del obj['id'] del obj['userlastmodified'] del obj['lastmodified'] del obj['datecreated'] obj['policies'] = _convert_policy_id_to_name(isamAppliance2, obj['policies']) return tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['id', 'userlastmodified', 'lastmodified', 'datecreated'])
a727eac5efb4e117413d70191e7a74921e3ac284
7,803
def capacity(quantity, channel, gamma, dim, basis, eps, **kwargs): """ Runs the Blahut-Arimoto algorithm to compute the capacity given by 'quantity' (which can be 'h', 'tc', 'coh' or 'qmi' taking the channel, gamma, dim, basis and tolerance (eps) as inputs). With the optional keyword arguments 'plot' (Boolean), it outputs a plot showing how the calculated value changes with the number of iterations. With the optional keyword arguments 'latexplot' (Boolean), the plot uses latex in the labels """ #to store the calculated values itern = [] value = [] #initialization rhoa = DensityMatrix(np.diag((1/dim)*np.ones((1,dim))[0])) #Blahut-Arimoto algorithm iteration for iterator in range(int(gamma*np.log2(dim)/eps)): # for iterator in range(1): itern.append(iterator) sigmab = rhoa rhoa = linalg.expm(np.log(2)*(linalg.logm(sigmab.mat)/np.log(2) + (1/gamma)*(F(quantity, sigmab, basis, channel).mat))) rhoa = DensityMatrix(rhoa/np.trace(rhoa)) value.append(J(quantity, rhoa, rhoa, gamma, basis, channel)) #Plotting if kwargs['plot'] is True: # if kwargs['latexplot'] is True: # plt.rc('text', usetex=True) # plt.rc('font', family='serif') fig, ax = plt.subplots() plt.plot(itern, value, marker = '.', markersize='7', label = r'Capacity value vs iteration' ) plt.xlabel(r'Number of iterations', fontsize = '14') plt.ylabel(r'Value of capacity', fontsize = '14') plt.xticks(fontsize = '8') plt.yticks(fontsize = '8') plt.grid(True) plt.show() return J(quantity, rhoa, rhoa, gamma, basis, channel)
6624d045fb953d536b082f183a6c1536dcd9ca50
7,804
def get_Teq_from_L(L: ArrayLike, d: ArrayLike, A: ArrayLike) -> np.ndarray: """Calculates the equilibrium temperature of a planet given the stellar luminosity L, planetary semi-major axis d and surface albedo A: Args: L (ArrayLike): Stellar luminosity in erg/s. d (ArrayLike): Planetary semi-major axis in cm. A (ArrayLike): Planetary albedo. Returns: np.ndarray: The planetary equilibrium temperature in K. """ return ((L * (1 - A)) / (16 * sigma_b * np.pi * d ** 2)) ** 0.25
9f140c554059074d9569e48ae2f971bc430e2fba
7,805
from typing import Type def lookup_container_plugin_by_type(container: IContainer, plugin_type: Type[ContainerResolutionPlugin]): """ Given a container, finds the first plugin that is an instance of the specified type. :param container: The container to perform the lookup on. :param plugin_type: The type of the plugin to find. :return: The first instance of ``plugin_type`` in ``container.plugins``. """ return next( plugin for plugin in container.plugins if isinstance(plugin, plugin_type) )
b41cfc2e1e1328a8f54e938b7944d3f16924d3cf
7,806
from scipy.ndimage import shift def shift_map_longitude(mapdata, lonshift, spline_order=1): """ Simple shift of the map by wrapping it around the edges Internally uses scipy's ndimage.shift with spline interpolation order as requested for interpolation Parameters ---------- mapdata : 2D Numpy array A map with the second dimension the longutide stretched fully along the map lonshift : float A simple float representing the longitude shift of the array spline_order: int [1, 5] Returns ------- A shifted map """ # Constant degrees = 360.0 # Check the map and compute the relative shift assert len(mapdata.shape) == 2, "Only for 2D maps" assert mapdata.shape[1] > 1, "Map has only one longitudinal coordinate" n = (mapdata.shape[1] - 1) x = degrees * lonshift / n # The number of pixels to shift # Use scipy for the rest mapdata_shift = shift(mapdata, [0, x], mode='wrap', order=spline_order) return mapdata_shift
72373800f3a53785989cc2e2da4dab08d0976b30
7,807
def aalogoheights(aahistObj, N=20): """For a objhist of AA frequencies, compute the heights of each AA for a logo plot""" aahistObj = deepcopy(aahistObj) keys = list(aahistObj.keys()) for aa in BADAA: if aa in keys: dummy = aahistObj.pop(aa) keys = [aa for aa in aahistObj.sortedKeys(reverse=False)] freq = aahistObj.freq() p = np.array([freq[k] for k in keys]) #err = (1/np.log(2))*((N-1) / (2*aahistObj.sum())) #totEntropy = np.log2(N)-((-p*np.log2(p)).sum() + err) totEntropy = np.log2(N)-((-p*np.log2(p)).sum()) heights = p * totEntropy return keys, heights
8020605d6c2a9a618e5faed57ba7af5e1315dfec
7,808
def cmdline_opts( request ): """PyMTL options parsed from pytest commandline options.""" opts = _parse_opts_from_request( request ) # If a fixture is used by a test class, this seems to be the only # way to retrieve the fixture value. # https://stackoverflow.com/a/37761165/13190001 if request.cls is not None: request.cls.cmdline_opts = opts return opts
8b3af4ab15a1a5a11a633fa322e4484f2d8257bc
7,809
def replace(index, ndim, axes, rindices): """Replace indexing for a specified dimension Args: index(index): object used in slicing ndim(num): number of dimensions axes(list): dimension to be replaced rindex(list): new indexing for this dimensions Returns: index """ index2 = list(expand(index, ndim)) for axis, rindex in zip(axes, rindices): axis = axisindex(index2, axis, ndim) index2[axis] = rindex return tuple(index2)
3a8c9ac8b9bf12a5d416e422ddfb0f4458cf9417
7,810
import select def _closed(sock): """Return True if we know socket has been closed, False otherwise. """ try: rd, _, _ = select([sock], [], [], 0) # Any exception here is equally bad (select.error, ValueError, etc.). except: return True return len(rd) > 0
4de2aee7743cac8e660ab01f2920935faf0ee3e9
7,811
def get_forest_connection(device_name: str, seed=None): """Get a connection to a forest backend Args: device_name: the device to connect to Returns: A connection to either a pyquil simulator or a QPU """ if device_name == "wavefunction-simulator": return WavefunctionSimulator(random_seed=seed) else: return get_qc(device_name)
291c92508b097908fa86fb957b42d73066d65ebd
7,812
def add_suffix(path, suffix=""): """Adds a suffix to a filename *path*""" return join(dirname(path), basename(path, ext=False) + suffix + extname(path))
dd95548e06e29c91f0a35c5dd0979889ab945076
7,814
def MdAE_np(preds, labels): """ Median Absolute Error :param preds: :param labels: :return: """ preds = np.reshape(preds, [-1]) labels = np.reshape(labels, [-1]) return np.median(np.abs(preds - labels))
4a725eb35e5f7bd1f77b8433b7ea7393bbdae92e
7,815
from botocore.exceptions import ClientError, BotoCoreError async def s3_fetch_object(url, s3, range=None, **kw): """ returns object with On success: .url = url .data = bytes .last_modified -- last modified timestamp .range = None | (in,out) .error = None On failure: .url = url .data = None .last_modified = None .range = None | (in, out) .error = str| botocore.Exception class """ def result(data=None, last_modified=None, error=None): return SimpleNamespace(url=url, data=data, error=error, last_modified=last_modified, range=range) bucket, key = s3_url_parse(url) extra_args = dict(**kw) if range is not None: try: extra_args['Range'] = s3_fmt_range(range) except Exception: return result(error='Bad range passed in: ' + str(range)) try: obj = await s3.get_object(Bucket=bucket, Key=key, **extra_args) stream = obj.get('Body', None) if stream is None: return result(error='Missing Body in response') async with stream: data = await stream.read() except (ClientError, BotoCoreError) as e: return result(error=e) except Exception as e: return result(error="Some Error: " + str(e)) last_modified = obj.get('LastModified', None) return result(data=data, last_modified=last_modified)
0da8fadb248abe8c4c23e75367b3ddc884df71d3
7,816
from . import darwin from . import linux from . import windows import platform def mss(**kwargs): # type: (Any) -> MSSMixin """ Factory returning a proper MSS class instance. It detects the plateform we are running on and choose the most adapted mss_class to take screenshots. It then proxies its arguments to the class for instantiation. """ # pylint: disable=import-outside-toplevel os_ = platform.system().lower() if os_ == "darwin": return darwin.MSS(**kwargs) if os_ == "linux": return linux.MSS(**kwargs) if os_ == "windows": return windows.MSS(**kwargs) raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_))
057916bf6b13bd6089ccb6f46b1a2ceb583d5bf8
7,817
def reshape_fps(X): """Reshape 4D fingerprint data to 2D If X is already 2D, do nothing. Returns: reshaped X """ if len(X.shape) == 4: num_factors = X.shape[3] num_fps = np.prod(X.shape[:3]) X.shape = (num_fps,num_factors) else: num_factors = X.shape[1] num_fps = X.shape[0] return X
ab2cd286194dd6d35fb27a540378a132d25db575
7,818
def df_fc_overlap_2(): """Scenario case with 2 fragments overlapping, bound to a common fragment.""" mol = Chem.MolFromSmiles('NC1CC(CCC1O)C1CCC1') return DataFrame([ ['mol_fc_overlap_2', 'XXX', 'O1', 0, 'O1:0', 'O2', 0, 'O2:0', 'ffo', 'fusion', 'false_positive', 'overlap', (7, 6, 5, 4, 3, 2, 1), (0, 1, 2, 3, 4, 5, 6), 12, mol, mol_o1, mol_o2, 'O1:0@1,2,3,4,5,6[ffo]O2:0@1,2,3,4,5,6'], ['mol_fc_overlap_2', 'XXX', 'O1', 0, 'O1:0', 'O3', 0, 'O3:0', 'cm', 'connection', 'monopodal', '', (7, 6, 5, 4, 3, 2, 1), (8, 9, 10, 11), 12, mol, mol_o1, mol_o3, 'O1:0@4[cm]O3:0@0'], ['mol_fc_overlap_2', 'XXX', 'O2', 0, 'O2:0', 'O3', 0, 'O3:0', 'cm', 'connection', 'monopodal', '', (0, 1, 2, 3, 4, 5, 6), (8, 9, 10, 11), 12, mol, mol_o2, mol_o3, 'O2:0@3[cm]O3:0@0'], ], columns=['idm', 'inchikey', 'idf1', 'idxf1', 'fid1', 'idf2', 'idxf2', 'fid2', 'fcc', 'category', 'type', 'subtype', '_aidxf1', '_aidxf2', 'hac', 'mol', 'mol_frag_1', 'mol_frag_2', 'fc'])
7ee533aa1c7bb821ae6a73a27d39d6a7e796087f
7,819
from datetime import datetime def strfnow(fmt=HUMAN_DATETIME): """ Returns a string representation of the current timestamp """ return datetime.now(tzlocal()).strftime(fmt)
50fe38d37dfe8581f6cfa07aaaeb588a2e6e72a9
7,821
def tag_to_dict(node): """Assume tag has one layer of children, each of which is text, e.g. <medalline> <rank>1</rank> <organization>USA</organization> <gold>13</gold> <silver>10</silver> <bronze>9</bronze> <total>32</total> </medalline> """ d = {} for child in node: d[child.tag] = child.text return d
e2131e070dce8620630e994cc25578a9a8438c64
7,822
from typing import Any def compute_contact_centroid(molecular_complex: Any, cutoff: float = 4.5) -> np.ndarray: """Computes the (x,y,z) centroid of the contact regions of this molecular complex. For a molecular complex, it's necessary for various featurizations that compute voxel grids to find a reasonable center for the voxelization. This function computes the centroid of all the contact atoms, defined as an atom that's within `cutoff` Angstroms of an atom from a different molecule. Parameters ---------- molecular_complex: Object A representation of a molecular complex, produced by `rdkit_util.load_complex`. cutoff: float, optional The distance in Angstroms considered for computing contacts. """ fragments = reduce_molecular_complex_to_contacts(molecular_complex, cutoff) coords = [frag[0] for frag in fragments] contact_coords = merge_molecules_xyz(coords) centroid = np.mean(contact_coords, axis=0) return (centroid)
53b83e6814f6f59645d84c36de458952918123fc
7,823
def general_operator_gamma_norm(matrix, gamma, max_j, max_q): """ Returns the gamma operator norm of matrix, summing up to max_j and considering the sup up to max_q. Assumed that matrix is a function accepting two arguments i,j and not an array () for efficiency. """ max_j_sum = -1 q = 1 while(q < max_q): temp_j_sum = nsum(lambda j: fprod([power(q, gamma), power(j, -gamma), fabs(matrix(q, j))]), [1, max_j]) max_j_sum = temp_j_sum if temp_j_sum > max_j_sum else max_j_sum q += 1 return max_j_sum
56993d0f406af3cead83e662aecb19d9082878fa
7,824
def crop_image_single(img, device): """ Implementation of the MTCNN network to crop single image to only show the face as shown in the facenet_pytorch doc: https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb :param device: pytorch device :param img: single image to be cropped :return: cropped image """ model = MTCNN(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False, device=device) x_aligned = model(img) return x_aligned
b2755a1bf464dca74cbfc32a5bf5c61f106758ae
7,825
def tf2zpk(b, a): """Return zero, pole, gain (z,p,k) representation from a numerator, denominator representation of a linear filter. Parameters ---------- b : ndarray Numerator polynomial. a : ndarray Denominator polynomial. Returns ------- z : ndarray Zeros of the transfer function. p : ndarray Poles of the transfer function. k : float System gain. Notes ----- If some values of ``b`` are too close to 0, they are removed. In that case, a BadCoefficients warning is emitted. """ b, a = normalize(b, a) b = (b + 0.0) / a[0] a = (a + 0.0) / a[0] k = b[0] b /= b[0] z = roots(b) p = roots(a) return z, p, k
3d83d4053a89be19c3738650a11216d38845d6a6
7,826
def gpiod_line_is_free(line: gpiod_line) -> bool: """ @brief Check if the calling user has neither requested ownership of this line nor configured any event notifications. @param line: GPIO line object. @return True if given line is free, false otherwise. """ return line.state == _LINE_FREE
939320d0737406789bbb81bc9c73023dff71fb51
7,827
import random def train_step(optimizer, inputs, learning_rate_fn, dropout_rng=None): """Perform a single training step.""" weights = jnp.where(inputs > 0, 1, 0) # We handle PRNG splitting inside the top pmap, rather # than handling it outside in the training loop - doing the # latter can add some stalls to the devices. dropout_rng, new_dropout_rng = random.split(dropout_rng) def loss_fn(model): """Loss function used for training.""" with nn.stochastic(dropout_rng): logits = model(inputs, train=True) loss, weight_sum = compute_weighted_cross_entropy(logits, inputs, weights) mean_loss = loss / weight_sum return mean_loss, logits step = optimizer.state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) metrics = compute_metrics(logits, inputs, weights) metrics['learning_rate'] = lr return new_optimizer, metrics, new_dropout_rng
2dae63fcfb9fc5bc059b084bf748886d3d257c4c
7,828
def doublet_line_polar_u(rcp,zcp,dmz_dz, bSelfInd=False): """ Velocity field induced by a semi-infinite doublet line (on the z axis) of intensity `dmz_dz` Control points defined by polar coordinates `rcp` and `zcp`. \int 1/(r^2 + (z-x)^2 )^(3/2) dx \int 1/(r^2 + (z-x)^2 )^(5/2) dx """ if np.any(rcp<0): raise Exception('Script meant for positive r') r=np.asarray(rcp) z=np.asarray(zcp) # Vectorial "if" statements to isolate singular regions of the domain bZ0 = np.abs(z)<1e-8 bR0 = np.abs(r)<1e-8 bZ0R0 = np.logical_and(bZ0,bR0) bZ0Rp = np.logical_and(bZ0, np.abs(r)>1e-8) bR0Zp = np.logical_and(bR0, z>1e-8) bR0Zm = np.logical_and(bR0, z<-1e-8) bOK = np.logical_and(~bZ0,~bR0) uz=np.zeros(r.shape) ur=np.zeros(r.shape) norm2 = r**2+z**2 uz[bOK] = dmz_dz/(4*np.pi) * 1/r[bOK]**2 * ( z[bOK]**3/(norm2[bOK])**(3/2) - z[bOK]/(norm2[bOK])**(1/2) ) uz[bZ0Rp] = 0 uz[bR0Zm] = dmz_dz/(4*np.pi) * 1/norm2[bR0Zm] #uz[bR0Zp] = dmz_dz/(4*np.pi) * 1/norm2[bR0Zp] #<<< No singularity there, but we force it to 0 ur[bOK] =-dmz_dz/(4*np.pi) * r[bOK] * 1/(norm2[bOK] )**(3/2) ur[bZ0Rp] =-dmz_dz/(4*np.pi) * r[bZ0Rp] * 1/(norm2[bZ0Rp])**(3/2) ur[bR0Zm] = 0 ur[bR0Zp] = 0 ur[bZ0R0] = 0 uz[bZ0R0] = 0 return ur, uz
9dcee49192273a6482b269120af01683a11916b6
7,829
def paginate(text: str): """Simple generator that paginates text.""" last = 0 pages = [] for curr in range(0, len(text)): if curr % 1980 == 0: pages.append(text[last:curr]) last = curr appd_index = curr if appd_index != len(text) - 1: pages.append(text[last:curr]) return list(filter(lambda a: a != '', pages))
f40b97e0f221b4c6afffcc4dd707daf48685d04a
7,830
def get_batch_copy(vocab_size, batch_size, seq_len): """Generates random data for copying.""" batch = np.random.choice( vocab_size - 1, size=[batch_size, seq_len // 2 - 1]) + 1 batch = np.concatenate([np.zeros([batch_size, 1], dtype=int), batch], axis=1) batch = np.concatenate([batch] * 2, axis=1) batch_mask = np.concatenate([ np.zeros([batch_size, seq_len // 2], dtype=bool), np.ones([batch_size, seq_len // 2], dtype=bool) ], axis=1) return batch, batch_mask
f1b6092393a0b8e7cb6c2a2ae191c29d28d2d89f
7,831
import pandas def buildCompareDFs(strTodayFileName): """read in and return today's CSV as DF, determine appropriate old CSV as DF, and the old file name for use later""" # get today's file dfTodaysCards = pandas.read_csv( DATA_DIR_NAME + strTodayFileName, dtype={'Card Number': object}) dfTodaysCards = cleanCardDataFrame(dfTodaysCards) # getting older file is a bit trickier, check the run log, find the most recent run, find the old file used, get the next recent old file to compare with dictRunLog = readRunLog() strOldFileName = determineCompareFile(dictRunLog) print("ToCompareAgainst: " + strOldFileName) dfOldCards = pandas.read_csv( DATA_DIR_NAME + strOldFileName, dtype={'Card Number': object}) dfOldCards = cleanCardDataFrame(dfOldCards) dfOldCards = dfOldCards.rename( index=str, columns={"Count": "OldCount", "Price": "OldPrice"}) return dfTodaysCards, dfOldCards, strOldFileName
488afcd0704b9c73a83cab3bd898703d290ac557
7,832
def __cvx_eda(y, delta, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None, options={'reltol': 1e-9, 'show_progress': False}): """ CVXEDA Convex optimization approach to electrodermal activity processing This function implements the cvxEDA algorithm described in "cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing" (http://dx.doi.org/10.1109/TBME.2015.2474131, also available from the authors' homepages). Arguments: y: observed EDA signal (we recommend normalizing it: y = zscore(y)) delta: sampling interval (in seconds) of y tau0: slow time constant of the Bateman function tau1: fast time constant of the Bateman function delta_knot: time between knots of the tonic spline function alpha: penalization for the sparse SMNA driver gamma: penalization for the tonic spline coefficients solver: sparse QP solver to be used, see cvxopt.solvers.qp options: solver options, see: http://cvxopt.org/userguide/coneprog.html#algorithm-parameters Returns (see paper for details): r: phasic component p: sparse SMNA driver of phasic component t: tonic component l: coefficients of tonic spline d: offset and slope of the linear drift term e: model residuals obj: value of objective function being minimized (eq 15 of paper) """ n = len(y) y = cvx.matrix(y) # bateman ARMA model a1 = 1. / min(tau1, tau0) # a1 > a0 a0 = 1. / max(tau1, tau0) ar = np.array([(a1 * delta + 2.) * (a0 * delta + 2.), 2. * a1 * a0 * delta ** 2 - 8., (a1 * delta - 2.) * (a0 * delta - 2.)]) / ((a1 - a0) * delta ** 2) ma = np.array([1., 2., 1.]) # matrices for ARMA model i = np.arange(2, n) A = cvx.spmatrix(np.tile(ar, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n)) M = cvx.spmatrix(np.tile(ma, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n)) # spline delta_knot_s = int(round(delta_knot / delta)) spl = np.r_[np.arange(1., delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1 spl = np.convolve(spl, spl, 'full') spl /= max(spl) # matrix of spline regressors i = np.c_[np.arange(-(len(spl) // 2), (len(spl) + 1) // 2)] + np.r_[np.arange(0, n, delta_knot_s)] nB = i.shape[1] j = np.tile(np.arange(nB), (len(spl), 1)) p = np.tile(spl, (nB, 1)).T valid = (i >= 0) & (i < n) B = cvx.spmatrix(p[valid], i[valid], j[valid]) # trend C = cvx.matrix(np.c_[np.ones(n), np.arange(1., n + 1.) / n]) nC = C.size[1] # Solve the problem: # .5*(M*q + B*l + C*d - y)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l # s.t. A*q >= 0 # old_options = cvx.solvers.options.copy() cvx.solvers.options.clear() cvx.solvers.options.update(options) if solver == 'conelp': # Use conelp z = lambda m, n: cvx.spmatrix([], [], [], (m, n)) G = cvx.sparse([[-A, z(2, n), M, z(nB + 2, n)], [z(n + 2, nC), C, z(nB + 2, nC)], [z(n, 1), -1, 1, z(n + nB + 2, 1)], [z(2 * n + 2, 1), -1, 1, z(nB, 1)], [z(n + 2, nB), B, z(2, nB), cvx.spmatrix(1.0, range(nB), range(nB))]]) h = cvx.matrix([z(n, 1), .5, .5, y, .5, .5, z(nB, 1)]) c = cvx.matrix([(cvx.matrix(alpha, (1, n)) * A).T, z(nC, 1), 1, gamma, z(nB, 1)]) res = cvx.solvers.conelp(c, G, h, dims={'l': n, 'q': [n + 2, nB + 2], 's': []}) obj = res['primal objective'] else: # Use qp Mt, Ct, Bt = M.T, C.T, B.T H = cvx.sparse([[Mt * M, Ct * M, Bt * M], [Mt * C, Ct * C, Bt * C], [Mt * B, Ct * B, Bt * B + gamma * cvx.spmatrix(1.0, range(nB), range(nB))]]) f = cvx.matrix([(cvx.matrix(alpha, (1, n)) * A).T - Mt * y, -(Ct * y), -(Bt * y)]) res = cvx.solvers.qp(H, f, cvx.spmatrix(-A.V, A.I, A.J, (n, len(f))), cvx.matrix(0., (n, 1)), solver=solver) obj = res['primal objective'] + .5 * (y.T * y) # cvx.solvers.options.clear() # cvx.solvers.options.update(old_options) l = res['x'][-nB:] d = res['x'][n:n + nC] t = B * l + C * d q = res['x'][:n] p = A * q r = M * q e = y - r - t return r, t # return r, p, t, l, d, e, obj
5d4b333c7ab99a339d20adc379ea48792e2b43aa
7,834
import torch def pulsar_from_opencv_projection( R: torch.Tensor, tvec: torch.Tensor, camera_matrix: torch.Tensor, image_size: torch.Tensor, znear: float = 0.1, ) -> torch.Tensor: """ Convert OpenCV style camera parameters to Pulsar style camera parameters. Note: * Pulsar does NOT support different focal lengths for x and y. For conversion, we use the average of fx and fy. * The Pulsar renderer MUST use a left-handed coordinate system for this mapping to work. * The resulting image will be vertically flipped - which has to be addressed AFTER rendering by the user. * The parameters `R, tvec, camera_matrix` correspond to the outputs of `cv2.decomposeProjectionMatrix`. Args: R: A batch of rotation matrices of shape `(N, 3, 3)`. tvec: A batch of translation vectors of shape `(N, 3)`. camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. image_size: A tensor of shape `(N, 2)` containing the sizes of the images (height, width) attached to each camera. znear (float): The near clipping value to use for Pulsar. Returns: cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width, c_x, c_y). """ return _pulsar_from_opencv_projection(R, tvec, camera_matrix, image_size, znear)
854349a4442e5e57554995439e62de74000c9b3d
7,835
def identity(gender:str = None) -> dict: """ Generates a pseudo-random identity. Optional args gender: 'm' for traditionally male, 'f' for traditionally female. Returns a dict with the following keys: name -> full name given -> given name / first name family -> family name / last name address -> well formed address (fake of course) city -> city of residence state -> state of residence zip_code -> zip code of residence (matches the city and state) phone - > a phone number with an area code from the state of residence. email -> a valid email address (fake of course) """ if gender and gender.lower() not in ["m", "f"]: raise ValueError("'gender' must be 'm' or 'f'") if gender and gender.lower() == "m": given = _pluck(MGIVEN) elif gender and gender.lower() == "f": given = _pluck(FGIVEN) else: given = _pluck(MGIVEN + FGIVEN) family = _pluck(FAMILY) email = _make_email(given, family) zip_code, city, state_code = _pluck(AREA) phone = _make_phone(state_code) address = _make_address() return dict(name=f"{given} {family}".title(), given=given.title(), family=family.title(), address=address, city=city.title(), state=state_code.upper(), zip_code=zip_code, phone=phone, email=email)
1917b6723a5bfe2c0b7477dca18f450c8a0e07c3
7,836
def matthews_corrcoef(y_true, y_pred): """Returns matthew's correlation coefficient for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Parameters ---------- y_true : array, shape = [n_samples] true targets y_pred : array, shape = [n_samples] estimated targets Returns ------- mcc : float matthew's correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- http://en.wikipedia.org/wiki/Matthews_correlation_coefficient http://dx.doi.org/10.1093/bioinformatics/16.5.412 """ mcc = np.corrcoef(y_true, y_pred)[0, 1] if np.isnan(mcc): return 0. else: return mcc
b4af31bac942a99fabb6f20c29ed59aa7b55e32d
7,837
from typing import Dict from typing import Any import yaml def yaml_dump(dict_to_dump: Dict[str, Any]) -> str: """Dump the dictionary as a YAML document.""" return yaml.safe_dump(dict_to_dump, default_flow_style=False)
4635514ba8ff901656b8a4b5869a6ae101528fa8
7,839
def completeness(importance_matrix): """"Compute completeness of the representation.""" per_factor = completeness_per_code(importance_matrix) if importance_matrix.sum() == 0.: importance_matrix = np.ones_like(importance_matrix) factor_importance = importance_matrix.sum(axis=0) / importance_matrix.sum() return np.sum(per_factor*factor_importance)
39407833b501e974f2ddb421ecad59055153260e
7,840
def image_stat(image_id): """ Return the statistics ofd an image as a pd dataframe :param image_id: :return: """ counts, total_area, mean_area, std_area = {}, {}, {}, {} img_area = get_image_area(image_id) for cl in CLASSES: polygon_list = get_polygon_list(image_id, cl) counts[cl] = len(polygon_list) if len(polygon_list) > 0: total_area[cl] = np.sum([poly.area for poly in polygon_list])\ / img_area * 100. mean_area[cl] = np.mean([poly.area for poly in polygon_list])\ / img_area * 100. std_area[cl] = np.std([poly.area for poly in polygon_list])\ / img_area * 100. return pd.DataFrame({'Class': CLASSES, 'Counts': counts, 'TotalArea': total_area, 'MeanArea': mean_area, 'STDArea': std_area})
a20c1d702f983c576ab506061ab19181b90c8684
7,841
def delete_original(): """ Decorator that deletes the original Discord message upon command execution. :return: boolean """ async def predicate(ctx): if ctx.invoked_with != "help": # Don't try to delete if help command if isinstance(ctx.message.channel, discord.TextChannel): try: await ctx.message.delete() except discord.errors.NotFound as e: log.fatal(f"Unable to delete message.\n\t{e}") return True return commands.check(predicate)
08f71c271b679fb6389754c21a896e11ae6f05c0
7,842
def get_H_OS(): """屋根又は天井の温度差係数 (-) Args: Returns: float: 屋根又は天井の温度差係数 (-) """ adjacent_type = '外気' return get_H(adjacent_type)
cb3b68063d2c734b03d96b261e5cba23b79c3bc7
7,844
def forward_softmax(x): """ Compute softmax function for a single example. The shape of the input is of size # num classes. Important Note: You must be careful to avoid overflow for this function. Functions like softmax have a tendency to overflow when very large numbers like e^10000 are computed. You will know that your function is overflow resistent when it can handle input like: np.array([[10000, 10010, 10]]) without issues. x: A 1d numpy float array of shape number_of_classes Returns: A 1d numpy float array containing the softmax results of shape number_of_classes """ x = x - np.max(x,axis=0) exp = np.exp(x) s = exp / np.sum(exp,axis=0) return s
8c0f54294c2dc5b385466398726b67a3acd674b0
7,845
import numpy def applySpectralClusters(kmeansObj, img, imgNullVal): """ Use the given KMeans object to predict spectral clusters on a whole image array. The kmeansObj is an instance of sklearn.cluster.KMeans, as returned by fitSpectralClusters(). The img array is a numpy array of the image to predict on, of shape (nBands, nRows, nCols). Any pixels in img which have value imgNullVal will be set to SEGNULLVAL (i.e. zero) in the output cluster image. Return value is a numpy array of shape (nRows, nCols), with each element being the segment ID value for that pixel. """ # Predict on the whole image. In principle we could omit the nulls, # but it makes little difference to run time, and just adds complexity. (nBands, nRows, nCols) = img.shape # Re-organise the image data so it matches what sklearn # expects. xFull = numpy.transpose(img, axes=(1, 2, 0)) xFull = xFull.reshape((nRows*nCols, nBands)) clustersFull = kmeansObj.predict(xFull) del xFull clustersImg = clustersFull.reshape((nRows, nCols)) # Make the cluster ID numbers start from 1, and use SEGNULLVAL # (i.e. zero) in null pixels clustersImg += 1 if imgNullVal is not None: nullmask = (img == imgNullVal).any(axis=0) clustersImg[nullmask] = SEGNULLVAL return clustersImg
2b2fb5616c20c4e5d9278bf8555c888bfab80cb8
7,846
def get_config() -> ConfigParser: """ Parse the config file. :return: config """ cfg = ConfigParser() cfg.read(CONFIG_PATH) return cfg
14f9ce4719bf665d62f1a2d06c980f4e85d2b8a5
7,848
from calendra.registry import registry def iso_register(iso_code): """ Registers Calendar class as country or region in IsoRegistry. Registered country must set class variables ``iso`` using this decorator. >>> from calendra.core import Calendar >>> from calendra.registry import registry >>> from calendra.registry_tools import iso_register >>> @iso_register('MC-MR') ... class MyRegion(Calendar): ... 'My Region' Region calendar is then retrievable from registry: >>> calendar = registry.get('MC-MR') """ def wrapper(cls): registry.register(iso_code, cls) return cls return wrapper
7fcb55a37f5af948ff6be8baf797d00328f241a8
7,849
def dict_check_defaults(dd, **defaults): """Check that a dictionary has some default values Parameters ---------- dd: dict Dictionary to check **defs: dict Dictionary of default values Example ------- .. ipython:: python @suppress from xoa.misc import dict_check_defaults dd = dict(color='blue') dict_check_defaults(dd, color='red', size=10) """ if defaults is None: defaults = {} for item in defaults.items(): dd.setdefault(*item) return dd
8edc3fdb351f7ec2d4ec3b1e788e6aa5cc0f8787
7,850
def get_invested_and_worth(account): """Gets the money invested and the actual worth of an account""" data = query_indexa(f"accounts/{account}/performance") invested = data["return"]["investment"] worth = data["return"]["total_amount"] return {"invested": round(invested, 2), "worth": round(worth, 2)}
fc1542f54c8954622aff86d59d7d6fb82e63832b
7,852
def make_album(singer, name, number = ''): """Return singers' names and album""" album = {'singer': singer, 'name': name} if number: album['number'] = number return album
1f1bfaaeb501be0aa6fefd358177922246488b31
7,853
from typing import Dict from typing import List from typing import Generator def fit_ctmp_meas_mitigator(cal_data: Dict[int, Dict[int, int]], num_qubits: int, generators: List[Generator] = None) -> CTMPExpvalMeasMitigator: """Return FullMeasureErrorMitigator from result data. Args: cal_data: calibration dataset. num_qubits: the number of qubits for the calibation dataset. generators: Optional, input generator set. Returns: Measurement error mitigator object. Raises: QiskitError: if input arguments are invalid. """ if not isinstance(num_qubits, int): raise QiskitError('Number of qubits must be an int') if generators is None: generators = standard_generator_set(num_qubits) gen_mat_dict = {} for gen in generators + _supplementary_generators(generators): if len(gen[2]) > 1: mat = _local_g_matrix(gen, cal_data, num_qubits) gen_mat_dict[gen] = mat # Compute rates for generators rates = [_get_ctmp_error_rate(gen, gen_mat_dict, num_qubits) for gen in generators] return CTMPExpvalMeasMitigator(generators, rates)
2dab6ca0da19acb174b6d0e8b96c7833d5de74e8
7,854
def discounted_item(data): """ DOCSTRING: Classifies item purchases as 'Promoted' or 'Not Promoted' based on 'Item Discount' column. Also 'COD Collectibles' column gets restructured by eliminating undesired default values, like 'Online'. INPUT: > data : Only accepts Pandas DataFrame or TextParser, that has been pre-processed earlier. OUTPUT: Pandas DataFrame or TextParser with 1 additional column, i.e. 'On Promotion'. """ data["On Promotion"] = np.nan data["Phone num"] = np.nan data["COD Collectible"] = np.nan # Later again gets renamed within this func. for i,v in data["Item Discount"].iteritems(): if v != 0: data.loc[i, "On Promotion"] = "Promoted" else: data.loc[i, "On Promotion"] = "Not Promoted" # Also taking care of COD Collectible: for i,v in data["COD Collectibles"].iteritems(): if v == "Online": data.loc[i, "COD Collectible"] = 0 else: data.loc[i, "COD Collectible"] = v # Also taking care of 'Phone No.' column: for i,v in data["Phone No."].iteritems(): if v == "Online": data.loc[i, "Phone num"] = "Unavailable" else: data.loc[i, "Phone num"] = v data.drop(["COD Collectibles"], axis=1, inplace=True) data.drop(["Phone No."], axis=1, inplace=True) data.rename(columns={"COD Collectible": "COD Collectibles"}, inplace=True) data.rename(columns={"Phone num": "Phone No."}, inplace=True) return data
178c5e7d8e9c2e3bdd91d4606ea52c34c7cf099c
7,855
def NamespacedKubernetesSyncer(namespace, use_rsync=False): """Wrapper to return a ``KubernetesSyncer`` for a Kubernetes namespace. Args: namespace (str): Kubernetes namespace. use_rsync (bool): Use ``rsync`` if True or ``kubectl cp`` if False. If True, ``rsync`` will need to be installed in the Kubernetes pods for this to work. If False, ``tar`` will need to be installed instead. Returns: A ``KubernetesSyncer`` class to be passed to ``tune.run()``. Example: .. code-block:: python from ray.tune.integration.kubernetes import NamespacedKubernetesSyncer tune.run(train, sync_to_driver=NamespacedKubernetesSyncer("ray")) """ class _NamespacedKubernetesSyncer(KubernetesSyncer): _namespace = namespace _use_rsync = use_rsync return _NamespacedKubernetesSyncer
9da5a049a12a248623040c1ace79c2ebedd3400c
7,856
def _cons8_88(m8, L88, d_gap, k, Cp, h_gap): """dz constrant for edge gap sc touching 2 edge gap sc""" term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts term2 = 2 * k * d_gap / m8 / Cp / L88 # cond to adj bypass edge return 1 / (term1 + term2)
7c48c4999ce2dd3dbdec799edd7ad441a6f66e7b
7,857
import hashlib def cache_key(path): """Return cache key for `path`.""" return 'folder-{}'.format(hashlib.md5(path.encode('utf-8')).hexdigest())
6b9afe1267e0cc0c7168bf3b0d5c7536e2b3c768
7,858
def ref_731(n): """Reference number calculator. Returns reference number calculated using 7-3-1 algorithm used in Estonian banks. :param string n: base number (client id, etc) :rtype: string """ return "%s%d" % (n,((10 - (sum(map(\ lambda l: int(n[-l])*(7,3,1)[(l-1) % 3], \ xrange(1, len(n)+1))))) % 10))
b1958511947d9f369db2547cde15222603dc0773
7,859
import traceback async def exception_as_response(e: Exception): """ Wraps an exception into a JSON Response. """ data = { "message": str(e), "traceback": "".join(traceback.TracebackException.from_exception(e).format()) } return web.json_response(data, status=500)
60f226cb7cd4c3aba3026d44d28e774928e6bbf7
7,860
def canvas_merge_union(layers, full=True, blend=canvas_compose_over): """Blend multiple `layers` into single large enough image""" if not layers: raise ValueError("can not blend zero layers") elif len(layers) == 1: return layers[0] min_x, min_y, max_x, max_y = None, None, None, None for image, offset in layers: x, y = offset w, h = image.shape[:2] if min_x is None: min_x, min_y = x, y max_x, max_y = x + w, y + h else: min_x, min_y = min(min_x, x), min(min_y, y) max_x, max_y = max(max_x, x + w), max(max_y, y + h) width, height = max_x - min_x, max_y - min_y if full: output = None for image, offset in layers: x, y = offset w, h = image.shape[:2] ox, oy = x - min_x, y - min_y image_full = np.zeros((width, height, 4), dtype=FLOAT) image_full[ox : ox + w, oy : oy + h] = image if output is None: output = image_full else: output = blend(output, image_full) else: # this is optimization for method `over` blending output = np.zeros((max_x - min_x, max_y - min_y, 4), dtype=FLOAT) for index, (image, offset) in enumerate(layers): x, y = offset w, h = image.shape[:2] ox, oy = x - min_x, y - min_y effected = output[ox : ox + w, oy : oy + h] if index == 0: effected[...] = image else: effected[...] = blend(effected, image) return output, (min_x, min_y)
ffbb3b78e908ed1e131a1f0827f2d3097415edc9
7,861
import json def exception_response(request, code=400, exception=None): """ Create a response for an exception :param request: request instance :param code: exception code :param exception: exception instance :return: exception formatted response """ code = code if code in [400, 403, 404, 500] else 400 exception_repr = get_error_msg(exception) log.error(usr=request.user, msg=f'{code} - {exception_repr}') context = dict( message=f"Error {code}", request_path=request.path, exception=exception_repr ) if is_browser(request): template = loader.get_template(f'error/{code}.html') rtn = dict( content=template.render(context, request), content_type='text/html' ) else: rtn = dict( content=json.dumps(context), content_type='application/json' ) return rtn
1ef145ea4b07557fbc31a9d5c52621e79c2b99ff
7,862
def entropy(series): """Normalized Shannon Index""" # a series in which all the entries are equal should result in normalized entropy of 1.0 # eliminate 0s series1 = series[series!=0] # if len(series) < 2 (i.e., 0 or 1) then return 0 if len(series1) > 1: # calculate the maximum possible entropy for given length of input series max_s = -np.log(1.0/len(series)) total = float(sum(series1)) p = series1.astype('float')/float(total) return sum(-p*np.log(p))/max_s else: return 0.0
30f8f3cc6fed73d8cfa0b3705008891a60af028a
7,864
def spatially_whiten(X:np.ndarray, *args, **kwargs): """spatially whiten the nd-array X Args: X (np.ndarray): the data to be whitened, with channels/space in the *last* axis Returns: X (np.ndarray): the whitened X W (np.ndarray): the whitening matrix used to whiten X """ Cxx = updateCxx(None,X,None) W,_ = robust_whitener(Cxx, *args, **kwargs) X = X @ W #np.einsum("...d,dw->...w",X,W) return (X,W)
a0c9ae88e8f451378503754e4768ee554e50ed3e
7,865
from pathlib import Path import yaml def get_settings(basename: str="settings.yml", path: Path=PROJECT_ROOT / "conf") -> dict: """ Loads settings file Args: basename (str, optional): Basename of settings file. Defaults to "settings.yml". path (Path, optional): Path of seetings file. Defaults to PROJECT_ROOT/"conf". Raises: exc: Yaml load exception Returns: dict: settings """ with open(str(path / basename), 'r') as stream: try: settings = yaml.safe_load(stream) except yaml.YAMLError as exc: raise exc return settings
2317f9fbd125a16a7c34086d35b02973f1be5d8f
7,866
import torch def quaternion2rotationPT( q ): """ Convert unit quaternion to rotation matrix Args: q(torch.tensor): unit quaternion (N,4) Returns: torch.tensor: rotation matrix (N,3,3) """ r11 = (q[:,0]**2+q[:,1]**2-q[:,2]**2-q[:,3]**2).unsqueeze(0).T r12 = (2.0*(q[:,1]*q[:,2]-q[:,0]*q[:,3])).unsqueeze(0).T r13 = (2.0*(q[:,1]*q[:,3]+q[:,0]*q[:,2])).unsqueeze(0).T r21 = (2.0*(q[:,1]*q[:,2]+q[:,0]*q[:,3])).unsqueeze(0).T r22 = (q[:,0]**2+q[:,2]**2-q[:,1]**2-q[:,3]**2).unsqueeze(0).T r23 = (2.0*(q[:,2]*q[:,3]-q[:,0]*q[:,1])).unsqueeze(0).T r31 = (2.0*(q[:,1]*q[:,3]-q[:,0]*q[:,2])).unsqueeze(0).T r32 = (2.0*(q[:,2]*q[:,3]+q[:,0]*q[:,1])).unsqueeze(0).T r33 = (q[:,0]**2+q[:,3]**2-q[:,1]**2-q[:,2]**2).unsqueeze(0).T r = torch.cat( (r11,r12,r13, r21,r22,r23, r31,r32,r33), 1 ) r = torch.reshape( r, (q.shape[0],3,3)) return r
feeed764ee179b31674790f9d2afc7b606a02aef
7,867
def _expand_and_tile(tensor, multiple, dim=0, name=None): """Slice `tensor` shape in 2, then tile along the sliced dimension. A new dimension is inserted in shape of `tensor` before `dim`, then values are tiled `multiple` times along the new dimension. Args: tensor: Input `Tensor` or `SparseTensor`. multiple: Integer, number of times to tile. dim: Integer, dimension along which to tile. name: Name of operation. Returns: `Tensor` result of expanding and tiling `tensor`. Raises: ValueError: if `multiple` is less than 1, or `dim` is not in `[-rank(tensor), rank(tensor)]`. """ if multiple < 1: raise ValueError(f'Invalid argument multiple={multiple} for ' 'expand_and_tile call. `multiple` must be an integer > 0') with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor) if isinstance(tensor, sparse_tensor.SparseTensor): if dim < 0: expand_dims = array_ops.reshape( array_ops.size(tensor.dense_shape) + dim, [1]) else: expand_dims = [dim] expanded_shape = array_ops.concat( (array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( (ones[:dim], (multiple,), ones[dim:]), 0, name='multiples') return array_ops.tile(expanded, tile_multiples, name=scope)
aa9840fdaee56fee19937c8f632c72628fbd3995
7,868
import random def eval_model(opt, print_parser=None): """Evaluates a model. :param opt: tells the evaluation function how to run :param bool print_parser: if provided, prints the options that are set within the model after loading the model :return: the final result of calling report() """ random.seed(42) # load model and possibly print opt agent = create_agent(opt, requireModelExists=True) if print_parser: # show args after loading model print_parser.opt = agent.opt print_parser.print_args() tasks = opt['task'].split(',') reports = [] for task in tasks: task_report = _eval_single_world(opt, agent, task) reports.append(task_report) report = aggregate_task_reports( reports, tasks, micro=opt.get('aggregate_micro', True) ) # print announcements and report print_announcements(opt) print( '[ Finished evaluating tasks {} using datatype {} ]'.format( tasks, opt.get('datatype', 'N/A') ) ) print(report) return report
153dbead7ebd37ba2f61d745bc499f9eddfa0d03
7,869
def login(request): """ Login with Dummy Test Account. """ if 'user' in request.environ['beaker.session']: return app.redirect('index') users.store_to_session(request, users.create()) return app.redirect('index')
0f2d06e7a6ac2fed0daee73e4c2e216012452e08
7,870
def safe_plus(x,y): """ Handle "x + y" where x and y could be some combination of ints and strs. """ # Handle Excel Cell objects. Grrr. if excel.is_cell_dict(x): x = x["value"] if excel.is_cell_dict(y): y = y["value"] # Handle NULLs. if (x == "NULL"): x = 0 if (y == "NULL"): y = 0 # Easy case first. if ((isinstance(x, int) or isinstance(x, float)) and (isinstance(y, int) or isinstance(y, float))): return x + y # Fix data types. if (isinstance(y, str)): # NULL string in VB. if (x == 0): x = "" # String concat. return str(x) + y if (isinstance(x, str)): # NULL string in VB. if (y == 0): y = "" # String concat. return x + str(y) # Punt. We are not doing pure numeric addition and # we have already handled string concatentaion. Just # convert things to strings and hope for the best. return str(x) + str(y)
e3f5e43ee3e083669d0b744c7fb46a4ae62b4eec
7,871
import types def full_like(a, fill_value, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Return a full array with the same shape and type as a given array. Parameters ---------- a : object The shape and data-type of 'a' define these same attributes of the returned array. fill_value : scalar Fill value. dtype : ht.dtype, optional Overrides the data type of the result. split: int, optional The axis along which the array is split and distributed, defaults to None (no distribution). device : str, ht.Device or None, optional Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. Returns ------- out : ht.DNDarray Array of fill_value with the same shape and type as a. Examples -------- >>> x = ht.zeros((2, 3,)) >>> x tensor([[0., 0., 0.], [0., 0., 0.]]) >>> ht.full_like(a, 1.0) tensor([[1., 1., 1.], [1., 1., 1.]]) """ return __factory_like(a, dtype, split, full, device, comm, fill_value=fill_value, order=order)
4a615e493ae20d925eeda7c0bd6ca9508c338bc2
7,872
import glob def gather_pulled_downloads(input_dir, output_dir): """ Gather MPEG stream files from input_dir into a single MP4 file in output_dir """ dash_globstr = f"{input_dir.absolute() / '*.dash'}" dash_glob = glob(dash_globstr) if len(dash_glob) < 1: raise ValueError(f"No dash file found in {input_dir}") elif len(dash_glob) > 1: raise ValueError(f"Multiple dash files found in {input_dir}") else: dash_file = dash_glob[0] m4s_globstr = f"{input_dir.absolute() / '*.m4s'}" m4s_files = sorted(glob(m4s_globstr)) output_mp4 = output_dir.absolute() / "output.mp4" gather_m4s_to_mp4(dash_file, m4s_files, output_mp4) return output_mp4
a1b9c334ab717292db006666bdcdd0749b2620d7
7,873
import functools def Parallelize(ListIn, f, procs = -1, **kwargs): """This function packages the "starmap" function in multiprocessing, to allow multiple iterable inputs for the parallelized function. Parameters ---------- ListIn: list each item in the list is a tuple of non-keyworded arguments for f. f : func function to be parallelized. Signature must not contain any other non-keyworded arguments other than those passed as iterables. Example: .. highlight:: python .. code-block:: python def multiply(x, y, factor = 1.0): return factor*x*y X = np.linspace(0,1,1000) Y = np.linspace(1,2,1000) XY = [ (x, Y[i]) for i, x in enumerate(X)] # List of tuples Z = Parallelize_MultiIn(XY, multiply, factor = 3.0, procs = 8) Create as many positional arguments as required, but all must be packed into a list of tuples. """ if type(ListIn[0]) != tuple: ListIn = [(ListIn[i],) for i in range(len(ListIn))] reduced_argfunc = functools.partial(f, **kwargs) if procs == -1: opt_procs = int(np.interp(len(ListIn), [1,100,500,1000,3000,5000,10000] ,[1,2,4,8,12,36,48])) procs = min(opt_procs, cpu_count()) if procs == 1: OutList = [reduced_argfunc(*ListIn[iS]) for iS in range(len(ListIn))] else: p = Pool(processes = procs) OutList = p.starmap(reduced_argfunc, ListIn) p.close() p.join() return OutList
84fc1509c96c7bf765246e46983f2fa01745f4b2
7,874
def method_not_found(e): """ Custom response for methods not allowed for the requested URLs :param e: Exception :return: """ return response('failed', 'The method is not allowed for the requested URL', 405)
18a48d53d602c1a90017e3f00adc75c4c33479b5
7,875
def get_total_trainsets(df_anual_data, segments): """ # Fill the training_sets dict :param df_anual_data: :return: """ rows_per_day = int(((60 / 15) * 24)) training_sets = {'ID_SEGMENT': [], 'MES': [], 'COD_LABORALIDAD': [], 'TRAINING_SET': []} for seg_id in segments: # 1) Particionar anual_data por segmento df_seg = df_anual_data.loc[df_anual_data.ID_SEGMENT == seg_id] for month_i in df_seg.FECHA.dt.month.unique(): # 2) Dividir mensual_data en 12 datasets df_month_seg = df_seg.loc[df_seg.FECHA.dt.month == month_i] for code_i in df_month_seg.COD_LABORALIDAD.unique(): # 3) Particionar por dias con mismo código de lab df_month_seg_code = df_month_seg.loc[df_month_seg.COD_LABORALIDAD == code_i] # Fill training_sets dictionary training_sets['ID_SEGMENT'].append(seg_id) training_sets['MES'].append(month_i) training_sets['COD_LABORALIDAD'].append(code_i) training_sets['TRAINING_SET'].append(df_month_seg_code) return training_sets
968c3af1fdba5eb759eb93618ed48e3ca3ce5223
7,876
def uni2diff(u): """Convert speed and angular rate to wheel speeds.""" v = u[0] omega = u[1] v_L = v - ELL / 2 * omega v_R = v + ELL / 2 * omega return np.array([v_L, v_R])
83b743758aa7a549eda9067843a03eb57efde523
7,877
import re def extract_service_and_module(repo_url): """Extract service and module from repository url. :param str repo_url: repository url :return (service, module) :rtype (str, str) """ m = re.match(r'.+[/@]([^\.]+\.[^\.]+)[:/]([^/]+/[^/]+)\.git/?$', repo_url) if not m: m = re.match(r'.+[/@]([^\.]+\.[^\.]+)[:/]([^/]+/[^/]+)/?$', repo_url) if not m: raise Exception( 'cannot detect service and module from {}'.format(repo_url)) service = m.group(1) module = m.group(2) if service not in _pull_request_url.keys(): raise Exception( 'service not supported: {}'.format(service)) return (service, module)
eafe6cf39fc2fe4153830c491147633fb07f95dd
7,878
def format_hexa(value: str) -> ColorBytes: """ Examples: "bda" => (187, 221, 170, 255) "4fcd" => (68, 255, 204, 221) "60B0C4" => (96, 176, 196, 255) "2BEA40D0" => (43, 234, 64, 208) """ if len(value) in {3, 4}: expanded_color = ''.join(s * 2 for s in value) else: expanded_color = value length = len(expanded_color) if length in {6, 8}: hex_parts = [expanded_color[i:(i + 2)] for i in range(0, length, 2)] return format_color_bytes([int(v, 16) for v in hex_parts]) else: raise ValueError(value)
4865e1498ed87c933160e5666adcc41b45162fdd
7,880
def normalize_community_features(features): """ This performs TF-IDF-like normalization of community embedding features. Introduced in: Tang, L., Wang, X., Liu, H., & Wang, L. (2010, July). A multi-resolution approach to learning with overlapping communities. In Proceedings of the First Workshop on Social Media Analytics (pp. 14-22). ACM. Input: - X in R^(nxC_n): The community indicator matrix. Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix. """ # Calculate inverse document frequency. features = normalize_columns(features) # Normalize each row of term frequencies to 1 features = normalize_rows(features) return features
9f5018ad3e20810d2bb66443bac4c2f7f6359d0f
7,881
def __extend_prefixed(pu): """ :param pu: :return: """ parts = pu.split(':') if len(parts) == 1: parts = ('', parts[0]) try: return URIRef(_prefixes[parts[0]] + parts[1]) except KeyError: return BNode(pu)
d6e5c25c94e8b3252d8b0925e8d37747caceebdd
7,882
def angle(u: Vec, v: Vec) -> float: """ Returns the cosine (angle) between two vectors u and v :param u: (Vec) vector u :param v: (Vec) vector v :return: The scaled dot product, cosine of u and v's angle """ if u.is_zero or v.is_zero: raise ValueError("Angle with lower dimensional 0 vector cannot be determined") l_u = u.length l_v = v.length return u.dot(v) / (l_u * l_v)
6c79390af1ed38fc1a99006165684234dabb0b4a
7,883
def find_most_similar(top_k, probs, cache_dict, num=10): """返回最相似的num张照片的文件名,如果找到相似的, 则返回一个包括匹配元组的列表,否则返回一个空列表 top_k : 包含最佳分类的索引的列表 probs : 包含最佳分类索引对应的概率 cache_dict: 缓存中的索引和概率 num : 返回最近匹配的数目 """ similar = [] for filename in cache_dict: score = 0 count = 0 other_top_k, other_probs = cache_dict[filename] for i, t in enumerate(top_k): if t in other_top_k: prob = probs[i] other_prob = other_probs[other_top_k.tolist().index(t)] score += abs(prob-other_prob) count += 1 if count > 0: score = score / count similar.append((filename, score)) if similar: similar.sort(key=lambda item: item[1]) # 根据score升序排序 return similar[:num] return similar
471083e1ed2b0fadb98cafad64d314ba779aa9e6
7,884
import time import torch def evaluate_full_batch(model, minibatch, mode='val'): """ Full batch evaluation: for validation and test sets only. When calculating the F1 score, we will mask the relevant root nodes. """ time_s = time.time() loss, preds, labels = model.eval_step(*minibatch.one_batch(mode=mode)) torch.cuda.synchronize() time_e = time.time() node_val_test = minibatch.node_val if mode == 'val' else minibatch.node_test f1_scores = calc_f1(to_numpy(labels[node_val_test]), to_numpy(preds[node_val_test]), model.sigmoid_loss) # node_test=minibatch.node_test # f1_test=calc_f1(to_numpy(labels[node_test]),to_numpy(preds[node_test]),model.sigmoid_loss) # printf(' ******TEST: loss = {:.4f}\tmic = {:.4f}\tmac = {:.4f}'.format(loss,f1_test[0],f1_test[1]),style='yellow') del labels del preds return loss, f1_scores[0], f1_scores[1], time_e - time_s
b1d183118b304edf6f076caefa2b1160316ad92c
7,886
from operator import mul from operator import inv def berlekamp_massey(s): """Given a sequence of LFSR outputs, find the coefficients of the LFSR.""" C, B, L, m, b = [1], [1], 0, 1, 1 for n in range(len(s)): d = s[n] for i in range(1, L + 1): d ^= mul(C[i], s[n - i]) if d == 0: m += 1 else: T = list(C) while len(C) <= len(B) + m: C += [0] t = mul(d, inv(b)) for i in range(len(B)): C[i + m] ^= mul(t, B[i]) if 2 * L <= n: L, B, b, m = n + 1 - L, T, d, 1 else: m += 1 return C[0:L + 1]
351f52dce7e4a95b986cc169f380347f317f851a
7,887
from typing import Optional def sample(image: type_alias.TensorLike, warp: type_alias.TensorLike, resampling_type: ResamplingType = ResamplingType.BILINEAR, border_type: BorderType = BorderType.ZERO, pixel_type: PixelType = PixelType.HALF_INTEGER, name: Optional[str] = None) -> tf.Tensor: """Samples an image at user defined coordinates. Note: The warp maps target to source. In the following, A1 to An are optional batch dimensions. Args: image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size, `H_i` the height of the image, `W_i` the width of the image, and `C` the number of channels of the image. warp: A tensor of shape `[B, A_1, ..., A_n, 2]` containing the x and y coordinates at which sampling will be performed. The last dimension must be 2, representing the (x, y) coordinate where x is the index for width and y is the index for height. resampling_type: Resampling mode. Supported values are `ResamplingType.NEAREST` and `ResamplingType.BILINEAR`. border_type: Border mode. Supported values are `BorderType.ZERO` and `BorderType.DUPLICATE`. pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and `PixelType.HALF_INTEGER`. name: A name for this op. Defaults to "sample". Returns: Tensor of sampled values from `image`. The output tensor shape is `[B, A_1, ..., A_n, C]`. Raises: ValueError: If `image` has rank != 4. If `warp` has rank < 2 or its last dimension is not 2. If `image` and `warp` batch dimension does not match. """ with tf.name_scope(name or "sample"): image = tf.convert_to_tensor(image, name="image") warp = tf.convert_to_tensor(warp, name="warp") shape.check_static(image, tensor_name="image", has_rank=4) shape.check_static( warp, tensor_name="warp", has_rank_greater_than=1, has_dim_equals=(-1, 2)) shape.compare_batch_dimensions( tensors=(image, warp), last_axes=0, broadcast_compatible=False) if pixel_type == PixelType.HALF_INTEGER: warp -= 0.5 if resampling_type == ResamplingType.NEAREST: warp = tf.math.round(warp) if border_type == BorderType.DUPLICATE: image_size = tf.cast(tf.shape(image)[1:3], dtype=warp.dtype) height, width = tf.unstack(image_size, axis=-1) warp_x, warp_y = tf.unstack(warp, axis=-1) warp_x = tf.clip_by_value(warp_x, 0.0, width - 1.0) warp_y = tf.clip_by_value(warp_y, 0.0, height - 1.0) warp = tf.stack((warp_x, warp_y), axis=-1) return tfa_image.resampler(image, warp)
c9a202a6415d13bddac38cfa75e280ad45f1bda6
7,889
def normalize_parameter(kv): """ Translate a parameter into standard form. """ (k, v) = kv if k[0] == 'requiressl' and v in ('1', True): k[0] = 'sslmode' v = 'require' elif k[0] == 'dbname': k[0] = 'database' elif k[0] == 'sslmode': v = v.lower() return (tuple(k),v)
933ea71f452a16c1d4ae2630d6b58a92da1cbec0
7,890
def pulse_broadening(DM, f_ctr): """ pulse_broadening(DM, f_ctr): Return the approximate pulse broadening (tau) in ms due to scattering based on the rough relation in Cordes' 'Pulsar Observations I' paper. 'f_ctr' should be in MHz. The approximate error is 0.65 in log(tau). """ logDM = Num.log10(DM) return 10.0**(-3.59 + 0.129*logDM + 1.02*logDM**2.0 - 4.4*Num.log10(f_ctr/1000.0))/1000.0
48830e02774247e551605e5e8ad693ece68634ad
7,891
def logged_in_student(browser, override_allowed_hosts, base_test_data): """ Fixture for a logged-in student user Returns: User: User object """ return LoginPage(browser).log_in_via_admin(base_test_data.student_user, DEFAULT_PASSWORD)
4513e8c8356cd8fbd643e9018e1019c5ec403bcd
7,893
import numpy as np import tqdm def test_model(data_set=None, langider=None, lang_to_idx=None, ) -> np.ndarray: """ Tests a given langid.py model on the given data set. :param data_set: data set to test on :param langider: model to test :param lang_to_idx: mapping of languages to ids """ langs = data_set.get_tag_set() pred_prob = np.zeros((len(data_set), len(langs) + 1)) dataloader = DataLoader(data_set) for i, elem in enumerate(tqdm(dataloader)): text = elem['text'][0] label = elem['label'][0] ranking = langider.rank(text) for lang, prob in ranking: pred_prob[i, lang_to_idx[lang]] = prob pred_prob[i, len(langs)] = lang_to_idx[label] return pred_prob
2b955b637a289d0596c0584ac0761d8014e27e86
7,894
from bs4 import BeautifulSoup def search(querry, lim=5): """Search the querry in youtube and return lim number of results. Querry is the keyword, i:e name of the song lim is the number of songs that will be added to video array and returned """ # Replace all the spaces with + querry = querry.replace(' ', '+') url = "https://www.youtube.com/results?search_query={}".format(querry) response = urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") count = 0 for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}): if lim == count: break url = vid['href'] data = scan_video(url) if not data: break video.append(data) urls.append(url) count += 1 return (video, urls)
92771e6aaa88ea65034981ff0c0c3b203addacec
7,895
def calculate_center_of_mass(symbols, coordinates): """Calculate the center of mass of a molecule. The center of mass is weighted by each atom's weight. Parameters ---------- symbols : list A list of elements for the molecule coordinates : np.ndarray The coordinates of the molecule. Returns ------- center_of_mass: np.ndarray The center of mass of the molecule. Notes ----- The center of mass is calculated with the formula .. math:: \\vec{R}=\\frac{1}{M} \\sum_{i=1}^{n} m_{i}\\vec{r_{}i} """ total_mass = calculate_molecular_mass(symbols) center_of_mass = np.array([0.0, 0.0, 0.0]) for atom_number in range(len(symbols)): atom_type = symbols[atom_number] mass_of_atom = atomic_weights[atom_type] atom_position = coordinates[atom_number] center_of_mass += mass_of_atom * atom_position center_of_mass = center_of_mass / total_mass return center_of_mass
34a32e86c42875db59ad9d1bd1a6801d6fd51eb1
7,896
def __iadd__(self, other): """Pythonic use of concat Example: xs += ys Returns self.concat(self, other)""" return self.concat(self, other)
713980aed9713c2882a19ae9837315a431611bbc
7,897
def remove_stopwords(texts, stop_words): """ Define functions for stopwords :param texts: Processed texts from main module :return: Texts that already removed a stopwords """ return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
9e2f4bcf87886a35c3877de34d6746942af19065
7,898
import json def dumps(obj): """Output json with formatting edits + object handling.""" return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)
a9ad97c589a8f610d3186723566420604d99f4de
7,899
def find_asg_using_amis(ami_ids): """ take a list of ami ids and return a dictionary of launch configs that use them """ # ref: return = { ami_id : "lc_arns":[]} ami_ids = listify(ami_ids) result = {id: [] for id in ami_ids} client_asg = boto3.client('autoscaling') lc = client_asg.describe_launch_configurations() for a_lc in lc['LaunchConfigurations']: if a_lc['ImageId'] in ami_ids: result[a_lc['ImageId']].append(a_lc['LaunchConfigurationARN']) return result
951d20144be55a699911a690ee35405d3e4fa08b
7,900
def tokenize_nmt(text, num_examples=None): """Tokenize the English-French dataset.""" source, target = [], [] for i, line in enumerate(text.split('\n')): if num_examples and i > num_examples: break parts = line.split('\t') if len(parts) == 2: source.append(parts[0].split(' ')) target.append(parts[1].split(' ')) return source, target
f77def3cdd2eee6a9ecc3bb7fa007eccfcd9e8a8
7,901
def setup_snicar(input_file): """Builds impurity array and instances of all classes according to config in yaml file. Args: None Returns: ice: instance of Ice class illumination: instance of Illumination class rt_config: instance of RTConfig class model_config: instance of ModelConfig class plot_config: instance of PlotConfig class display_config: instance of DisplayConfig class """ impurities = build_impurities_array(input_file) ( ice, illumination, rt_config, model_config, plot_config, ) = build_classes(input_file) return ( ice, illumination, rt_config, model_config, plot_config, impurities, )
14fc40bb6c555cf94f8f8c7ddf2133e208b5ee02
7,902
async def list_model_config(model_name: str): """ Lists all the model's configuration. :param model_name: Model name :return: List of model's configuration """ try: return ApiResponse(data=dl_service.get_config(model_name)) except ApplicationError as e: raise e except Exception: raise ApplicationError('unexpected server error')
8ce01caee8fe7dacc381ea4ed23a5d885ab0da01
7,903
def signup_post(request): """Получаем данные пользователя с формы и заносим их в базу проверяя, если удачно то предоставляем вход в базу потом дополнительные идентификац""" mess = "login please" data = get_post( request ) mail = data.POST['mail'] name = data.POST['name'] captcha = data.POST['captcha'] hash = data.POST['hash'] password = data.POST['password'] address = data.POST['addres'] phone = data.POST['phone'] # date = time.strftime("%H:%M:%S %d.%m.%Y") # date=time.time() passw = getmd5(password) if not check_captcha(hash, captcha): hash, raw = get_captcha(hash) mess="Неправильно введен проверочный код" return templ('app.auth:signup', request, dict(name=name, mail=mail, address=address, phone=phone, hash = hash, mess = mess.decode('UTF-8')) ) #проверяем есть ли такие пользователи в базе если нет то регистрируем if not request.db.doc.find_one({'_id':'user:'+name}): doc = {'_id': 'user:'+name, 'name': name, 'password': passw, 'mail': mail, "type": "table_row", "rate":0, "doc_type": "des:users", "doc": {"user":"user:"+name, "name": {'ru':name, 'en':name}, "old": "33", "phone":phone, "address": address, 'date': create_date(), "home": "false" } } request.db.doc.save(doc) request.db.doc.update({'_id':'role:simple_users'}, {'$set':{'users.user:'+name:'true'}} ) mess = "Поздравляем, можете войти." else: mess = "Такой логин уже есть выберите другой" return templ('libs.auth:login', request, dict(mess = mess.decode('UTF-8')) )
22e4a8508849f9c4879c65e3347a0fabf6bffeb8
7,904
def make_onehot(label, num_classes, axis=-1): """ Create one hot tensor based on the input tensor Args: label: input tensor, the value must be positive integer and less than num_class num_classes: the number of class in one hot tensor axis: The axis to fill (default: -1, a new inner-most axis). Returns: :onehot tensor Examples: >>> make_onehot(np.array([[1, 2],[1, 3]]).long(), 4, axis=-1) tensor([[[0., 1., 1., 0.], [0., 1., 0., 1.]], <BLANKLINE> [[0., 0., 0., 0.], [0., 0., 0., 0.]]]) """ shp=label.shape flatten_label=label.reshape(-1) result=np.eye(num_classes) result=result[flatten_label.astype(np.int64)] if axis!=-1 and axis!=ndim(label)-1: result=np.swapaxes(axis,-1) return result
3329f5f135b1fea383b389012aeeb37ea923cb46
7,905
def kohn_sham_iteration( state, num_electrons, xc_energy_density_fn, interaction_fn, enforce_reflection_symmetry): """One iteration of Kohn-Sham calculation. Note xc_energy_density_fn must be wrapped by jax.tree_util.Partial so this function can take a callable. When the arguments of this callable changes, e.g. the parameters of the neural network, kohn_sham_iteration() will not be recompiled. Args: state: KohnShamState. num_electrons: Integer, the number of electrons in the system. The first num_electrons states are occupid. xc_energy_density_fn: function takes density (num_grids,) and returns the energy density (num_grids,). interaction_fn: function takes displacements and returns float numpy array with the same shape of displacements. enforce_reflection_symmetry: Boolean, whether to enforce reflection symmetry. If True, the system are symmetric respecting to the center. Returns: KohnShamState, the next state of Kohn-Sham iteration. """ if enforce_reflection_symmetry: xc_energy_density_fn = _flip_and_average_fn( xc_energy_density_fn, locations=state.locations, grids=state.grids) hartree_potential = get_hartree_potential( density=state.density, grids=state.grids, interaction_fn=interaction_fn) xc_potential = get_xc_potential( density=state.density, xc_energy_density_fn=xc_energy_density_fn, grids=state.grids) ks_potential = hartree_potential + xc_potential + state.external_potential xc_energy_density = xc_energy_density_fn(state.density) # Solve Kohn-Sham equation. density, total_eigen_energies, gap = solve_noninteracting_system( external_potential=ks_potential, num_electrons=num_electrons, grids=state.grids) total_energy = ( # kinetic energy = total_eigen_energies - external_potential_energy total_eigen_energies - get_external_potential_energy( external_potential=ks_potential, density=density, grids=state.grids) # Hartree energy + get_hartree_energy( density=density, grids=state.grids, interaction_fn=interaction_fn) # xc energy + get_xc_energy( density=density, xc_energy_density_fn=xc_energy_density_fn, grids=state.grids) # external energy + get_external_potential_energy( external_potential=state.external_potential, density=density, grids=state.grids) ) if enforce_reflection_symmetry: density = utils.flip_and_average( locations=state.locations, grids=state.grids, array=density) return state._replace( density=density, total_energy=total_energy, hartree_potential=hartree_potential, xc_potential=xc_potential, xc_energy_density=xc_energy_density, gap=gap)
f016e1d8c6ab9072065183d3fa1d37cfa12eb8ee
7,906
def calculate_reliability(data): """ Calculates the reliability rating of the smartcab during testing. """ success_ratio = data['success'].sum() * 1.0 / len(data) if success_ratio == 1: # Always meets deadline return ("A+", "green") else: if success_ratio >= 0.90: return ("A", "green") elif success_ratio >= 0.80: return ("B", "green") elif success_ratio >= 0.70: return ("C", "#EEC700") elif success_ratio >= 0.60: return ("D", "#EEC700") else: return ("F", "red")
d1c9ad7bba220beeae06c568cfd269aaaebfb994
7,907
def cache_mat_calc(dra, ddc, dra_err, ddc_err, ra_rad, dc_rad, ra_dc_cor=None, l_max=1, fit_type="full", num_iter=None): """Calculate cache matrix for future use Parameters ---------- dra/ddc : array of float R.A.(*cos(Dec.))/Dec. differences dra_err/ddc_err : array of float formal uncertainty of dra(*cos(dc_rad))/ddc ra_rad/dc_rad : array of float Right ascension/Declination in radian ra_dc_cov/ra_dc_cor : array of float covariance/correlation coefficient between dra and ddc, default is None fit_type : string flag to determine which parameters to be fitted 'full' for T- and S-vectors both 'T' for T-vectors only 'S' for S-vectors only Returns ---------- pmt : array of float estimation of (d1, d2, d3, r1, r2, r3) sig : array of float uncertainty of x cor_mat : matrix matrix of correlation coefficient. """ # Maxium number of sources processed per time # According to my test, 100 should be a good choice if num_iter is None: num_iter = 100 div = dra.size // num_iter rem = dra.size % num_iter suffix_array = [] if rem: suffix_array.append("{:05d}".format(0)) if not ra_dc_cor is None: nor_mat_calc_for_cache(dra_err[: rem], ddc_err[: rem], ra_rad[: rem], dc_rad[: rem], ra_dc_cor=ra_dc_cor[: rem], l_max=l_max, fit_type=fit_type, suffix=suffix_array[0]) else: nor_mat_calc_for_cache(dra_err[: rem], ddc_err[: rem], ra_rad[: rem], dc_rad[: rem], l_max=l_max, fit_type=fit_type, suffix=suffix_array[0]) for i in range(div): sta = rem + i * num_iter end = sta + num_iter suffix_array.append("{:05d}".format(i+1)) if not ra_dc_cor is None: nor_mat_calc_for_cache(dra_err[sta: end], ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end], ra_dc_cor=ra_dc_cor[sta: end], l_max=l_max, fit_type=fit_type, suffix=suffix_array[-1]) else: nor_mat_calc_for_cache(dra_err[sta: end], ddc_err[sta: end], ra_rad[sta: end], dc_rad[sta: end], l_max=l_max, fit_type=fit_type, suffix=suffix_array[-1]) return suffix_array
805ecc94165ae1162341abdc7c4dd3557af5f8c4
7,908
def get_android_replacements(): """Gets a dictionary of all android-specific replacements to be made.""" replacements = {} compileSdk = 'compileSdkVersion {}'.format(COMPILE_SDK_VERSION) targetSdk = 'targetSdkVersion {}'.format(TARGET_SDK_VERSION) buildToolsVersion = 'buildToolsVersion \'{}\''.format(BUILD_TOOLS_VERSION) replacements[COMPILE_SDK_RE] = compileSdk replacements[TARGET_SDK_RE] = targetSdk replacements[BUILD_TOOLS_RE] = buildToolsVersion return replacements
e3b78d7ccd897d79db66740d46aa05410dd2a83f
7,909
from typing import List from typing import Set from typing import Dict def process_long_term_idle_users( slack_data_dir: str, users: List[ZerverFieldsT], slack_user_id_to_zulip_user_id: SlackToZulipUserIDT, added_channels: AddedChannelsT, added_mpims: AddedMPIMsT, dm_members: DMMembersT, zerver_userprofile: List[ZerverFieldsT], ) -> Set[int]: """Algorithmically, we treat users who have sent at least 10 messages or have sent a message within the last 60 days as active. Everyone else is treated as long-term idle, which means they will have a slightly slower first page load when coming back to Zulip. """ all_messages = get_messages_iterator(slack_data_dir, added_channels, added_mpims, dm_members) sender_counts: Dict[str, int] = defaultdict(int) recent_senders: Set[str] = set() NOW = float(timezone_now().timestamp()) for message in all_messages: timestamp = float(message["ts"]) slack_user_id = get_message_sending_user(message) if not slack_user_id: continue if slack_user_id in recent_senders: continue if NOW - timestamp < 60: recent_senders.add(slack_user_id) sender_counts[slack_user_id] += 1 for (slack_sender_id, count) in sender_counts.items(): if count > 10: recent_senders.add(slack_sender_id) long_term_idle = set() for slack_user in users: if slack_user["id"] in recent_senders: continue zulip_user_id = slack_user_id_to_zulip_user_id[slack_user["id"]] long_term_idle.add(zulip_user_id) for user_profile_row in zerver_userprofile: if user_profile_row["id"] in long_term_idle: user_profile_row["long_term_idle"] = True # Setting last_active_message_id to 1 means the user, if # imported, will get the full message history for the # streams they were on. user_profile_row["last_active_message_id"] = 1 return long_term_idle
4a372837ed5497117227e18534c91c6f0ce840bf
7,910
def clear_cache(): """ Clears internal cache. Returns something that can be given back to restore_cache. """ global FS_CACHE old = FS_CACHE FS_CACHE = {} return old
492513177a70cd663671616e034c6f3b287ceb75
7,911
import tqdm def extend_gdf(gdf_disjoint, id_col): """ Add duplicates of intersecting geometries to be able to add the constants. This function adds rows with duplicate geometries and creates the new `id` column for each of the new rows. This function is called by another function `complete_disjoint_geoms`. """ tqdm_max = len(gdf_disjoint) ext = pd.DataFrame(columns=list(gdf_disjoint.columns) + [id_col + "_set"]) for _, row in tqdm(gdf_disjoint.iterrows(), total=tqdm_max): num = len(row[id_col]) data = np.array([list(row[id_col]), [row["geometry"]] * num]).T ext_new = pd.DataFrame(data, columns=gdf_disjoint.columns) ext_new[id_col + "_set"] = [row[id_col]] * num ext = ext.append(ext_new, ignore_index=True) return ext
5d6667b67c47125f8668bb6127b4a295fb2d61c9
7,912
def schedule_news_updates(update_interval:int, update_name:str)->dict: """ Functionality: --------------- Schedules a new news data update Parameters: --------------- update_interval: int The time until the scheduler should update the news data update_name: str The name of the update that has caused the scheduling of the news data update Returns: --------------- a key-value pair: dict Returns a dictionary with the key being the update name and the value being news scheduler object """ return({update_name:news_scheduler.enter(update_interval, 1, update_news, ())})
1e2bf07aae3e2468f1ee0ff119b492820d866460
7,913
def get_digits(text): """ Returns all numeric characters (digits) in string *text* in a new (concatenated) **string**. Example: >>> get_digits('Test123') '123' >>> int(get_digits('The answer is 42')) 42 :param text: The string to search. :type text: str, unicode :rtype: str, unicode """ _vld.pass_if(_vld.is_text(text), TypeError, "'text' attribute must be a string (got {!r})".format(text)) return EMPTY_STR.join(s for s in text if s.isdigit())
78bcd49b74dbdfd7d9f40af3806c2b984adca780
7,914