content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_crypto_quote(symbol, info=None): """Gets information about a crypto including low price, high price, and open price :param symbol: The crypto ticker. :type symbol: str :param info: Will filter the results to have a list of the values that correspond to key that matches info. :type info: Optional[str] :returns: [dict] If info parameter is left as None then the list will contain a dictionary of key/value pairs for each ticker. \ Otherwise, it will be a list of strings where the strings are the values of the key that corresponds to info. :Dictionary Keys: * asset_currency * display_only * id * max_order_size * min_order_size * min_order_price_increment * min_order_quantity_increment * name * quote_currency * symbol * tradability """ id = get_crypto_info(symbol, info='id') url = urls.crypto_quote(id) data = helper.request_get(url) return(helper.filter(data, info))
db10e7493ef6e98ad0876d30357c6b5be8269776
7,800
from .tools.forms import digest as digest_forms from .multitool import convert from .protonation import add_missing_hydrogens from .model_loops import add_loop def fix(item, missing_atoms=True, missing_residues=True, nonstandard_residues=True, missing_terminals=True, missing_loops=False, missing_hydrogens=True, pH=7.4, to_form=None, engine_fix='PDBFixer', engine_hydrogens='PDBFixer', engine_loops='Modeller', verbose=False): """fix_pdb_structure(item, missing_atoms=True, missing_residues=True, nonstandard_residues=True, missing_terminals=True, missing_loops=False, missing_hydrogens=True, pH=7.4, to_form=None, engine_fix='PDBFixer', engine_hydrogens='PDBFixer', engine_loops='Modeller', verbose=False): Fixing missing atoms, residues, terminals or loops in the molecular model coming from a pdb file. This method fixes the possible missing atoms, residues, loops or terminals in a molecular model. The result is a new molecular model, in the desired supported form, with those elements fixed. Parameters ---------- item: molecular model Molecular model in any supported form by MolSysMT. arg2: type, default='value' Paragraph with explanation. Returns ------- object: type Paragraph with explanation. Examples -------- See Also -------- :func:`molsysmt.load` Notes ----- Todo ---- Warning ------- The method has being tested with the following input forms: pdbid, pdbfile, pdbfixer.PDBFixer and openmm.Modeller. """ form_in, form_out = digest_forms(item, to_form) engine_fix = digest_engines(engine_fix) engine_hydrogens = digest_engines(engine_hydrogens) engine_loops = digest_engines(engine_loops) tmp_item = None if engine_fix=='PDBFixer': tmp_item = convert(item, to_form='pdbfixer.PDBFixer') if missing_residues: tmp_item.findMissingResidues() if missing_atoms: tmp_item.findMissingAtoms() if nonstandard_residues: tmp_item.findNonstandardResidues() if verbose: print('Missing residues:', tmp_item.missingResidues) print('Non standard residues:', tmp_item.nonstandardResidues) print('Missing atoms', tmp_item.missingAtoms) print('Missing terminals:', tmp_item.missingTerminals) tmp_item.addMissingAtoms() if verbose: print('Missing residues or atoms reported fixed.') if missing_hydrogens: tmp_item = add_missing_hydrogens(tmp_item, pH=pH, engine=engine_hydrogens, verbose=verbose) if missing_loops: tmp_item = add_loop(tmp_item, engine=engine_loops) tmp_item = convert(tmp_item, to_form=form_out) return tmp_item
10bc29a4006e8bc1b18fba126dde7152672b7a24
7,801
def data_split(*args, **kwargs): """A function to split a dataset into train, test, and optionally validation datasets. **Arguments** - ***args** : arbitrary _numpy.ndarray_ datasets - An arbitrary number of datasets, each required to have the same number of elements, as numpy arrays. - **train** : {_int_, _float_} - If a float, the fraction of elements to include in the training set. If an integer, the number of elements to include in the training set. The value `-1` is special and means include the remaining part of the dataset in the training dataset after the test and (optionally) val parts have been removed - **val** : {_int_, _float_} - If a float, the fraction of elements to include in the validation set. If an integer, the number of elements to include in the validation set. The value `0` is special and means do not form a validation set. - **test** : {_int_, _float_} - If a float, the fraction of elements to include in the test set. If an integer, the number of elements to include in the test set. - **shuffle** : _bool_ - A flag to control whether the dataset is shuffled prior to being split into parts. **Returns** - _list_ - A list of the split datasets in train, [val], test order. If datasets `X`, `Y`, and `Z` were given as `args` (and assuming a non-zero `val`), then [`X_train`, `X_val`, `X_test`, `Y_train`, `Y_val`, `Y_test`, `Z_train`, `Z_val`, `Z_test`] will be returned. """ # handle valid kwargs train, val, test = kwargs.pop('train', -1), kwargs.pop('val', 0.0), kwargs.pop('test', 0.1) shuffle = kwargs.pop('shuffle', True) if len(kwargs): raise TypeError('following kwargs are invalid: {}'.format(kwargs)) # validity checks if len(args) == 0: raise RuntimeError('Need to pass at least one argument to data_split') # check for consistent length n_samples = len(args[0]) for arg in args[1:]: assert len(arg) == n_samples, 'args to data_split have different length' # determine numbers num_val = int(n_samples*val) if val<=1 else val num_test = int(n_samples*test) if test <=1 else test num_train = n_samples - num_val - num_test if train==-1 else (int(n_samples*train) if train<=1 else train) assert num_train >= 0, 'bad parameters: negative num_train' assert num_train + num_val + num_test <= n_samples, 'too few samples for requested data split' # calculate masks perm = np.random.permutation(n_samples) if shuffle else np.arange(n_samples) train_mask = perm[:num_train] val_mask = perm[-num_val:] test_mask = perm[num_train:num_train+num_test] # apply masks masks = [train_mask, val_mask, test_mask] if num_val > 0 else [train_mask, test_mask] # return list of new datasets return [arg[mask] for arg in args for mask in masks]
e2ad6e52d2868020cc0bcb960533348a1f93ed31
7,802
def compare(isamAppliance1, isamAppliance2): """ Compare Policy Sets between two appliances """ ret_obj1 = get_all(isamAppliance1) ret_obj2 = get_all(isamAppliance2) for obj in ret_obj1['data']: del obj['id'] del obj['userlastmodified'] del obj['lastmodified'] del obj['datecreated'] obj['policies'] = _convert_policy_id_to_name(isamAppliance1, obj['policies']) for obj in ret_obj2['data']: del obj['id'] del obj['userlastmodified'] del obj['lastmodified'] del obj['datecreated'] obj['policies'] = _convert_policy_id_to_name(isamAppliance2, obj['policies']) return tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['id', 'userlastmodified', 'lastmodified', 'datecreated'])
a727eac5efb4e117413d70191e7a74921e3ac284
7,803
def capacity(quantity, channel, gamma, dim, basis, eps, **kwargs): """ Runs the Blahut-Arimoto algorithm to compute the capacity given by 'quantity' (which can be 'h', 'tc', 'coh' or 'qmi' taking the channel, gamma, dim, basis and tolerance (eps) as inputs). With the optional keyword arguments 'plot' (Boolean), it outputs a plot showing how the calculated value changes with the number of iterations. With the optional keyword arguments 'latexplot' (Boolean), the plot uses latex in the labels """ #to store the calculated values itern = [] value = [] #initialization rhoa = DensityMatrix(np.diag((1/dim)*np.ones((1,dim))[0])) #Blahut-Arimoto algorithm iteration for iterator in range(int(gamma*np.log2(dim)/eps)): # for iterator in range(1): itern.append(iterator) sigmab = rhoa rhoa = linalg.expm(np.log(2)*(linalg.logm(sigmab.mat)/np.log(2) + (1/gamma)*(F(quantity, sigmab, basis, channel).mat))) rhoa = DensityMatrix(rhoa/np.trace(rhoa)) value.append(J(quantity, rhoa, rhoa, gamma, basis, channel)) #Plotting if kwargs['plot'] is True: # if kwargs['latexplot'] is True: # plt.rc('text', usetex=True) # plt.rc('font', family='serif') fig, ax = plt.subplots() plt.plot(itern, value, marker = '.', markersize='7', label = r'Capacity value vs iteration' ) plt.xlabel(r'Number of iterations', fontsize = '14') plt.ylabel(r'Value of capacity', fontsize = '14') plt.xticks(fontsize = '8') plt.yticks(fontsize = '8') plt.grid(True) plt.show() return J(quantity, rhoa, rhoa, gamma, basis, channel)
6624d045fb953d536b082f183a6c1536dcd9ca50
7,804
def get_Teq_from_L(L: ArrayLike, d: ArrayLike, A: ArrayLike) -> np.ndarray: """Calculates the equilibrium temperature of a planet given the stellar luminosity L, planetary semi-major axis d and surface albedo A: Args: L (ArrayLike): Stellar luminosity in erg/s. d (ArrayLike): Planetary semi-major axis in cm. A (ArrayLike): Planetary albedo. Returns: np.ndarray: The planetary equilibrium temperature in K. """ return ((L * (1 - A)) / (16 * sigma_b * np.pi * d ** 2)) ** 0.25
9f140c554059074d9569e48ae2f971bc430e2fba
7,805
from typing import Type def lookup_container_plugin_by_type(container: IContainer, plugin_type: Type[ContainerResolutionPlugin]): """ Given a container, finds the first plugin that is an instance of the specified type. :param container: The container to perform the lookup on. :param plugin_type: The type of the plugin to find. :return: The first instance of ``plugin_type`` in ``container.plugins``. """ return next( plugin for plugin in container.plugins if isinstance(plugin, plugin_type) )
b41cfc2e1e1328a8f54e938b7944d3f16924d3cf
7,806
from scipy.ndimage import shift def shift_map_longitude(mapdata, lonshift, spline_order=1): """ Simple shift of the map by wrapping it around the edges Internally uses scipy's ndimage.shift with spline interpolation order as requested for interpolation Parameters ---------- mapdata : 2D Numpy array A map with the second dimension the longutide stretched fully along the map lonshift : float A simple float representing the longitude shift of the array spline_order: int [1, 5] Returns ------- A shifted map """ # Constant degrees = 360.0 # Check the map and compute the relative shift assert len(mapdata.shape) == 2, "Only for 2D maps" assert mapdata.shape[1] > 1, "Map has only one longitudinal coordinate" n = (mapdata.shape[1] - 1) x = degrees * lonshift / n # The number of pixels to shift # Use scipy for the rest mapdata_shift = shift(mapdata, [0, x], mode='wrap', order=spline_order) return mapdata_shift
72373800f3a53785989cc2e2da4dab08d0976b30
7,807
def aalogoheights(aahistObj, N=20): """For a objhist of AA frequencies, compute the heights of each AA for a logo plot""" aahistObj = deepcopy(aahistObj) keys = list(aahistObj.keys()) for aa in BADAA: if aa in keys: dummy = aahistObj.pop(aa) keys = [aa for aa in aahistObj.sortedKeys(reverse=False)] freq = aahistObj.freq() p = np.array([freq[k] for k in keys]) #err = (1/np.log(2))*((N-1) / (2*aahistObj.sum())) #totEntropy = np.log2(N)-((-p*np.log2(p)).sum() + err) totEntropy = np.log2(N)-((-p*np.log2(p)).sum()) heights = p * totEntropy return keys, heights
8020605d6c2a9a618e5faed57ba7af5e1315dfec
7,808
def cmdline_opts( request ): """PyMTL options parsed from pytest commandline options.""" opts = _parse_opts_from_request( request ) # If a fixture is used by a test class, this seems to be the only # way to retrieve the fixture value. # https://stackoverflow.com/a/37761165/13190001 if request.cls is not None: request.cls.cmdline_opts = opts return opts
8b3af4ab15a1a5a11a633fa322e4484f2d8257bc
7,809
def replace(index, ndim, axes, rindices): """Replace indexing for a specified dimension Args: index(index): object used in slicing ndim(num): number of dimensions axes(list): dimension to be replaced rindex(list): new indexing for this dimensions Returns: index """ index2 = list(expand(index, ndim)) for axis, rindex in zip(axes, rindices): axis = axisindex(index2, axis, ndim) index2[axis] = rindex return tuple(index2)
3a8c9ac8b9bf12a5d416e422ddfb0f4458cf9417
7,810
import select def _closed(sock): """Return True if we know socket has been closed, False otherwise. """ try: rd, _, _ = select([sock], [], [], 0) # Any exception here is equally bad (select.error, ValueError, etc.). except: return True return len(rd) > 0
4de2aee7743cac8e660ab01f2920935faf0ee3e9
7,811
def get_forest_connection(device_name: str, seed=None): """Get a connection to a forest backend Args: device_name: the device to connect to Returns: A connection to either a pyquil simulator or a QPU """ if device_name == "wavefunction-simulator": return WavefunctionSimulator(random_seed=seed) else: return get_qc(device_name)
291c92508b097908fa86fb957b42d73066d65ebd
7,812
def get_slack_colour(level): """Return Slack colour value based on log level.""" level = level.upper() colours = { "CRITICAL": "ff0000", "ERROR": "ff9933", "WARNING": "ffcc00", "INFO": "33ccff", "DEBUG": "good" } return colours.get(level, "good")
5a75bc103361b3724921d2b9c03c32f63b9b8115
7,813
def add_suffix(path, suffix=""): """Adds a suffix to a filename *path*""" return join(dirname(path), basename(path, ext=False) + suffix + extname(path))
dd95548e06e29c91f0a35c5dd0979889ab945076
7,814
def MdAE_np(preds, labels): """ Median Absolute Error :param preds: :param labels: :return: """ preds = np.reshape(preds, [-1]) labels = np.reshape(labels, [-1]) return np.median(np.abs(preds - labels))
4a725eb35e5f7bd1f77b8433b7ea7393bbdae92e
7,815
from botocore.exceptions import ClientError, BotoCoreError async def s3_fetch_object(url, s3, range=None, **kw): """ returns object with On success: .url = url .data = bytes .last_modified -- last modified timestamp .range = None | (in,out) .error = None On failure: .url = url .data = None .last_modified = None .range = None | (in, out) .error = str| botocore.Exception class """ def result(data=None, last_modified=None, error=None): return SimpleNamespace(url=url, data=data, error=error, last_modified=last_modified, range=range) bucket, key = s3_url_parse(url) extra_args = dict(**kw) if range is not None: try: extra_args['Range'] = s3_fmt_range(range) except Exception: return result(error='Bad range passed in: ' + str(range)) try: obj = await s3.get_object(Bucket=bucket, Key=key, **extra_args) stream = obj.get('Body', None) if stream is None: return result(error='Missing Body in response') async with stream: data = await stream.read() except (ClientError, BotoCoreError) as e: return result(error=e) except Exception as e: return result(error="Some Error: " + str(e)) last_modified = obj.get('LastModified', None) return result(data=data, last_modified=last_modified)
0da8fadb248abe8c4c23e75367b3ddc884df71d3
7,816
from . import darwin from . import linux from . import windows import platform def mss(**kwargs): # type: (Any) -> MSSMixin """ Factory returning a proper MSS class instance. It detects the plateform we are running on and choose the most adapted mss_class to take screenshots. It then proxies its arguments to the class for instantiation. """ # pylint: disable=import-outside-toplevel os_ = platform.system().lower() if os_ == "darwin": return darwin.MSS(**kwargs) if os_ == "linux": return linux.MSS(**kwargs) if os_ == "windows": return windows.MSS(**kwargs) raise ScreenShotError("System {!r} not (yet?) implemented.".format(os_))
057916bf6b13bd6089ccb6f46b1a2ceb583d5bf8
7,817
def reshape_fps(X): """Reshape 4D fingerprint data to 2D If X is already 2D, do nothing. Returns: reshaped X """ if len(X.shape) == 4: num_factors = X.shape[3] num_fps = np.prod(X.shape[:3]) X.shape = (num_fps,num_factors) else: num_factors = X.shape[1] num_fps = X.shape[0] return X
ab2cd286194dd6d35fb27a540378a132d25db575
7,818
def df_fc_overlap_2(): """Scenario case with 2 fragments overlapping, bound to a common fragment.""" mol = Chem.MolFromSmiles('NC1CC(CCC1O)C1CCC1') return DataFrame([ ['mol_fc_overlap_2', 'XXX', 'O1', 0, 'O1:0', 'O2', 0, 'O2:0', 'ffo', 'fusion', 'false_positive', 'overlap', (7, 6, 5, 4, 3, 2, 1), (0, 1, 2, 3, 4, 5, 6), 12, mol, mol_o1, mol_o2, 'O1:0@1,2,3,4,5,6[ffo]O2:0@1,2,3,4,5,6'], ['mol_fc_overlap_2', 'XXX', 'O1', 0, 'O1:0', 'O3', 0, 'O3:0', 'cm', 'connection', 'monopodal', '', (7, 6, 5, 4, 3, 2, 1), (8, 9, 10, 11), 12, mol, mol_o1, mol_o3, 'O1:0@4[cm]O3:0@0'], ['mol_fc_overlap_2', 'XXX', 'O2', 0, 'O2:0', 'O3', 0, 'O3:0', 'cm', 'connection', 'monopodal', '', (0, 1, 2, 3, 4, 5, 6), (8, 9, 10, 11), 12, mol, mol_o2, mol_o3, 'O2:0@3[cm]O3:0@0'], ], columns=['idm', 'inchikey', 'idf1', 'idxf1', 'fid1', 'idf2', 'idxf2', 'fid2', 'fcc', 'category', 'type', 'subtype', '_aidxf1', '_aidxf2', 'hac', 'mol', 'mol_frag_1', 'mol_frag_2', 'fc'])
7ee533aa1c7bb821ae6a73a27d39d6a7e796087f
7,819
def show_toolbar(request): """Determines whether debug toolbar should be shown for the request. Requires settings.DEBUG=True, 'debug_toolbar' GET param present, and request ip in settings.INTERNAL_IPS. Args: request: HttpRequest object. Returns: Boolean. """ if ('debug_toolbar' not in request.GET and '/__debug__/' not in request.path): return False return middleware.show_toolbar(request)
7a3fec348f5d37adcafd29d5cdd757e5e8de6cad
7,820
from datetime import datetime def strfnow(fmt=HUMAN_DATETIME): """ Returns a string representation of the current timestamp """ return datetime.now(tzlocal()).strftime(fmt)
50fe38d37dfe8581f6cfa07aaaeb588a2e6e72a9
7,821
def tag_to_dict(node): """Assume tag has one layer of children, each of which is text, e.g. <medalline> <rank>1</rank> <organization>USA</organization> <gold>13</gold> <silver>10</silver> <bronze>9</bronze> <total>32</total> </medalline> """ d = {} for child in node: d[child.tag] = child.text return d
e2131e070dce8620630e994cc25578a9a8438c64
7,822
from typing import Any def compute_contact_centroid(molecular_complex: Any, cutoff: float = 4.5) -> np.ndarray: """Computes the (x,y,z) centroid of the contact regions of this molecular complex. For a molecular complex, it's necessary for various featurizations that compute voxel grids to find a reasonable center for the voxelization. This function computes the centroid of all the contact atoms, defined as an atom that's within `cutoff` Angstroms of an atom from a different molecule. Parameters ---------- molecular_complex: Object A representation of a molecular complex, produced by `rdkit_util.load_complex`. cutoff: float, optional The distance in Angstroms considered for computing contacts. """ fragments = reduce_molecular_complex_to_contacts(molecular_complex, cutoff) coords = [frag[0] for frag in fragments] contact_coords = merge_molecules_xyz(coords) centroid = np.mean(contact_coords, axis=0) return (centroid)
53b83e6814f6f59645d84c36de458952918123fc
7,823
def general_operator_gamma_norm(matrix, gamma, max_j, max_q): """ Returns the gamma operator norm of matrix, summing up to max_j and considering the sup up to max_q. Assumed that matrix is a function accepting two arguments i,j and not an array () for efficiency. """ max_j_sum = -1 q = 1 while(q < max_q): temp_j_sum = nsum(lambda j: fprod([power(q, gamma), power(j, -gamma), fabs(matrix(q, j))]), [1, max_j]) max_j_sum = temp_j_sum if temp_j_sum > max_j_sum else max_j_sum q += 1 return max_j_sum
56993d0f406af3cead83e662aecb19d9082878fa
7,824
def crop_image_single(img, device): """ Implementation of the MTCNN network to crop single image to only show the face as shown in the facenet_pytorch doc: https://github.com/timesler/facenet-pytorch/blob/master/examples/infer.ipynb :param device: pytorch device :param img: single image to be cropped :return: cropped image """ model = MTCNN(image_size=160, margin=0, min_face_size=20, thresholds=[0.6, 0.7, 0.7], factor=0.709, post_process=False, device=device) x_aligned = model(img) return x_aligned
b2755a1bf464dca74cbfc32a5bf5c61f106758ae
7,825
def tf2zpk(b, a): """Return zero, pole, gain (z,p,k) representation from a numerator, denominator representation of a linear filter. Parameters ---------- b : ndarray Numerator polynomial. a : ndarray Denominator polynomial. Returns ------- z : ndarray Zeros of the transfer function. p : ndarray Poles of the transfer function. k : float System gain. Notes ----- If some values of ``b`` are too close to 0, they are removed. In that case, a BadCoefficients warning is emitted. """ b, a = normalize(b, a) b = (b + 0.0) / a[0] a = (a + 0.0) / a[0] k = b[0] b /= b[0] z = roots(b) p = roots(a) return z, p, k
3d83d4053a89be19c3738650a11216d38845d6a6
7,826
def gpiod_line_is_free(line: gpiod_line) -> bool: """ @brief Check if the calling user has neither requested ownership of this line nor configured any event notifications. @param line: GPIO line object. @return True if given line is free, false otherwise. """ return line.state == _LINE_FREE
939320d0737406789bbb81bc9c73023dff71fb51
7,827
import random def train_step(optimizer, inputs, learning_rate_fn, dropout_rng=None): """Perform a single training step.""" weights = jnp.where(inputs > 0, 1, 0) # We handle PRNG splitting inside the top pmap, rather # than handling it outside in the training loop - doing the # latter can add some stalls to the devices. dropout_rng, new_dropout_rng = random.split(dropout_rng) def loss_fn(model): """Loss function used for training.""" with nn.stochastic(dropout_rng): logits = model(inputs, train=True) loss, weight_sum = compute_weighted_cross_entropy(logits, inputs, weights) mean_loss = loss / weight_sum return mean_loss, logits step = optimizer.state.step lr = learning_rate_fn(step) grad_fn = jax.value_and_grad(loss_fn, has_aux=True) (_, logits), grad = grad_fn(optimizer.target) grad = jax.lax.pmean(grad, 'batch') new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr) metrics = compute_metrics(logits, inputs, weights) metrics['learning_rate'] = lr return new_optimizer, metrics, new_dropout_rng
2dae63fcfb9fc5bc059b084bf748886d3d257c4c
7,828
def doublet_line_polar_u(rcp,zcp,dmz_dz, bSelfInd=False): """ Velocity field induced by a semi-infinite doublet line (on the z axis) of intensity `dmz_dz` Control points defined by polar coordinates `rcp` and `zcp`. \int 1/(r^2 + (z-x)^2 )^(3/2) dx \int 1/(r^2 + (z-x)^2 )^(5/2) dx """ if np.any(rcp<0): raise Exception('Script meant for positive r') r=np.asarray(rcp) z=np.asarray(zcp) # Vectorial "if" statements to isolate singular regions of the domain bZ0 = np.abs(z)<1e-8 bR0 = np.abs(r)<1e-8 bZ0R0 = np.logical_and(bZ0,bR0) bZ0Rp = np.logical_and(bZ0, np.abs(r)>1e-8) bR0Zp = np.logical_and(bR0, z>1e-8) bR0Zm = np.logical_and(bR0, z<-1e-8) bOK = np.logical_and(~bZ0,~bR0) uz=np.zeros(r.shape) ur=np.zeros(r.shape) norm2 = r**2+z**2 uz[bOK] = dmz_dz/(4*np.pi) * 1/r[bOK]**2 * ( z[bOK]**3/(norm2[bOK])**(3/2) - z[bOK]/(norm2[bOK])**(1/2) ) uz[bZ0Rp] = 0 uz[bR0Zm] = dmz_dz/(4*np.pi) * 1/norm2[bR0Zm] #uz[bR0Zp] = dmz_dz/(4*np.pi) * 1/norm2[bR0Zp] #<<< No singularity there, but we force it to 0 ur[bOK] =-dmz_dz/(4*np.pi) * r[bOK] * 1/(norm2[bOK] )**(3/2) ur[bZ0Rp] =-dmz_dz/(4*np.pi) * r[bZ0Rp] * 1/(norm2[bZ0Rp])**(3/2) ur[bR0Zm] = 0 ur[bR0Zp] = 0 ur[bZ0R0] = 0 uz[bZ0R0] = 0 return ur, uz
9dcee49192273a6482b269120af01683a11916b6
7,829
def paginate(text: str): """Simple generator that paginates text.""" last = 0 pages = [] for curr in range(0, len(text)): if curr % 1980 == 0: pages.append(text[last:curr]) last = curr appd_index = curr if appd_index != len(text) - 1: pages.append(text[last:curr]) return list(filter(lambda a: a != '', pages))
f40b97e0f221b4c6afffcc4dd707daf48685d04a
7,830
def get_batch_copy(vocab_size, batch_size, seq_len): """Generates random data for copying.""" batch = np.random.choice( vocab_size - 1, size=[batch_size, seq_len // 2 - 1]) + 1 batch = np.concatenate([np.zeros([batch_size, 1], dtype=int), batch], axis=1) batch = np.concatenate([batch] * 2, axis=1) batch_mask = np.concatenate([ np.zeros([batch_size, seq_len // 2], dtype=bool), np.ones([batch_size, seq_len // 2], dtype=bool) ], axis=1) return batch, batch_mask
f1b6092393a0b8e7cb6c2a2ae191c29d28d2d89f
7,831
import pandas def buildCompareDFs(strTodayFileName): """read in and return today's CSV as DF, determine appropriate old CSV as DF, and the old file name for use later""" # get today's file dfTodaysCards = pandas.read_csv( DATA_DIR_NAME + strTodayFileName, dtype={'Card Number': object}) dfTodaysCards = cleanCardDataFrame(dfTodaysCards) # getting older file is a bit trickier, check the run log, find the most recent run, find the old file used, get the next recent old file to compare with dictRunLog = readRunLog() strOldFileName = determineCompareFile(dictRunLog) print("ToCompareAgainst: " + strOldFileName) dfOldCards = pandas.read_csv( DATA_DIR_NAME + strOldFileName, dtype={'Card Number': object}) dfOldCards = cleanCardDataFrame(dfOldCards) dfOldCards = dfOldCards.rename( index=str, columns={"Count": "OldCount", "Price": "OldPrice"}) return dfTodaysCards, dfOldCards, strOldFileName
488afcd0704b9c73a83cab3bd898703d290ac557
7,832
def _vj_stat(v = None, j = None, freq_type = 'vj_occur_freq', ts = None): """ Return estimate of a single v-gene, j-gene, or vj-gene-pairings frequency specified < v > and <j> argumens , given a tcrsamper instance < ts > Parameters ---------- v : str j : str e.g., freq_type : str 'vj_occur_freq', 'vj_freq', 'v_occur_freq', 'v_freq', 'j_occur_freq', 'j_freq' df : pd.DataFrame DataFrame containing v and j gene names ts : tcrsampler.sampler.TCRsampler sampler instance Example ------- >>> import pandas as pd >>> import os >>> from tcrsampler.sampler import TCRsampler >>> from tcrregex.vj_diff import * >>> t = TCRsampler() >>> fn = os.path.join("tcrregex", "test_files", 'britanova_chord_blood_sample_5000.csv' ) >>> t.ref_df = pd.read_csv(fn) >>> t.build_background() >>> _vj_stat(v = 'TRBV20-1*01' , j ='TRBJ2-1*01', ts = t, freq_type = 'vj_occur_freq') 0.014802960592118424 >>> _vj_stat(v = 'TRBV20-1*01' , ts = t, freq_type = 'v_occur_freq') 0.060012002400480095 >>> _vj_stat(j = 'TRBJ2-1*01', ts = t, freq_type = 'j_occur_freq') 0.272254450890178 """ if ts is None: raise ValueError("._vj_stat requires < ts > be a TCRsampler instance") if v is None and j is None: raise ValueError("Niether a v- nor j-gene was supplied to ._vj_stat ; atleast one must be provided") if v is None: tp = j assert freq_type in ['j_freq', 'j_occur_freq'] elif j is None: tp = v assert freq_type in ['v_freq', 'v_occur_freq'] else: tp = (v,j) assert freq_type in ['vj_freq', 'vj_occur_freq'] return ts.__dict__[freq_type][tp]
99228d17714c5ba403071ad0251f8642bc3148e6
7,833
def __cvx_eda(y, delta, tau0=2., tau1=0.7, delta_knot=10., alpha=8e-4, gamma=1e-2, solver=None, options={'reltol': 1e-9, 'show_progress': False}): """ CVXEDA Convex optimization approach to electrodermal activity processing This function implements the cvxEDA algorithm described in "cvxEDA: a Convex Optimization Approach to Electrodermal Activity Processing" (http://dx.doi.org/10.1109/TBME.2015.2474131, also available from the authors' homepages). Arguments: y: observed EDA signal (we recommend normalizing it: y = zscore(y)) delta: sampling interval (in seconds) of y tau0: slow time constant of the Bateman function tau1: fast time constant of the Bateman function delta_knot: time between knots of the tonic spline function alpha: penalization for the sparse SMNA driver gamma: penalization for the tonic spline coefficients solver: sparse QP solver to be used, see cvxopt.solvers.qp options: solver options, see: http://cvxopt.org/userguide/coneprog.html#algorithm-parameters Returns (see paper for details): r: phasic component p: sparse SMNA driver of phasic component t: tonic component l: coefficients of tonic spline d: offset and slope of the linear drift term e: model residuals obj: value of objective function being minimized (eq 15 of paper) """ n = len(y) y = cvx.matrix(y) # bateman ARMA model a1 = 1. / min(tau1, tau0) # a1 > a0 a0 = 1. / max(tau1, tau0) ar = np.array([(a1 * delta + 2.) * (a0 * delta + 2.), 2. * a1 * a0 * delta ** 2 - 8., (a1 * delta - 2.) * (a0 * delta - 2.)]) / ((a1 - a0) * delta ** 2) ma = np.array([1., 2., 1.]) # matrices for ARMA model i = np.arange(2, n) A = cvx.spmatrix(np.tile(ar, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n)) M = cvx.spmatrix(np.tile(ma, (n - 2, 1)), np.c_[i, i, i], np.c_[i, i - 1, i - 2], (n, n)) # spline delta_knot_s = int(round(delta_knot / delta)) spl = np.r_[np.arange(1., delta_knot_s), np.arange(delta_knot_s, 0., -1.)] # order 1 spl = np.convolve(spl, spl, 'full') spl /= max(spl) # matrix of spline regressors i = np.c_[np.arange(-(len(spl) // 2), (len(spl) + 1) // 2)] + np.r_[np.arange(0, n, delta_knot_s)] nB = i.shape[1] j = np.tile(np.arange(nB), (len(spl), 1)) p = np.tile(spl, (nB, 1)).T valid = (i >= 0) & (i < n) B = cvx.spmatrix(p[valid], i[valid], j[valid]) # trend C = cvx.matrix(np.c_[np.ones(n), np.arange(1., n + 1.) / n]) nC = C.size[1] # Solve the problem: # .5*(M*q + B*l + C*d - y)^2 + alpha*sum(A,1)*p + .5*gamma*l'*l # s.t. A*q >= 0 # old_options = cvx.solvers.options.copy() cvx.solvers.options.clear() cvx.solvers.options.update(options) if solver == 'conelp': # Use conelp z = lambda m, n: cvx.spmatrix([], [], [], (m, n)) G = cvx.sparse([[-A, z(2, n), M, z(nB + 2, n)], [z(n + 2, nC), C, z(nB + 2, nC)], [z(n, 1), -1, 1, z(n + nB + 2, 1)], [z(2 * n + 2, 1), -1, 1, z(nB, 1)], [z(n + 2, nB), B, z(2, nB), cvx.spmatrix(1.0, range(nB), range(nB))]]) h = cvx.matrix([z(n, 1), .5, .5, y, .5, .5, z(nB, 1)]) c = cvx.matrix([(cvx.matrix(alpha, (1, n)) * A).T, z(nC, 1), 1, gamma, z(nB, 1)]) res = cvx.solvers.conelp(c, G, h, dims={'l': n, 'q': [n + 2, nB + 2], 's': []}) obj = res['primal objective'] else: # Use qp Mt, Ct, Bt = M.T, C.T, B.T H = cvx.sparse([[Mt * M, Ct * M, Bt * M], [Mt * C, Ct * C, Bt * C], [Mt * B, Ct * B, Bt * B + gamma * cvx.spmatrix(1.0, range(nB), range(nB))]]) f = cvx.matrix([(cvx.matrix(alpha, (1, n)) * A).T - Mt * y, -(Ct * y), -(Bt * y)]) res = cvx.solvers.qp(H, f, cvx.spmatrix(-A.V, A.I, A.J, (n, len(f))), cvx.matrix(0., (n, 1)), solver=solver) obj = res['primal objective'] + .5 * (y.T * y) # cvx.solvers.options.clear() # cvx.solvers.options.update(old_options) l = res['x'][-nB:] d = res['x'][n:n + nC] t = B * l + C * d q = res['x'][:n] p = A * q r = M * q e = y - r - t return r, t # return r, p, t, l, d, e, obj
5d4b333c7ab99a339d20adc379ea48792e2b43aa
7,834
import torch def pulsar_from_opencv_projection( R: torch.Tensor, tvec: torch.Tensor, camera_matrix: torch.Tensor, image_size: torch.Tensor, znear: float = 0.1, ) -> torch.Tensor: """ Convert OpenCV style camera parameters to Pulsar style camera parameters. Note: * Pulsar does NOT support different focal lengths for x and y. For conversion, we use the average of fx and fy. * The Pulsar renderer MUST use a left-handed coordinate system for this mapping to work. * The resulting image will be vertically flipped - which has to be addressed AFTER rendering by the user. * The parameters `R, tvec, camera_matrix` correspond to the outputs of `cv2.decomposeProjectionMatrix`. Args: R: A batch of rotation matrices of shape `(N, 3, 3)`. tvec: A batch of translation vectors of shape `(N, 3)`. camera_matrix: A batch of camera calibration matrices of shape `(N, 3, 3)`. image_size: A tensor of shape `(N, 2)` containing the sizes of the images (height, width) attached to each camera. znear (float): The near clipping value to use for Pulsar. Returns: cameras_pulsar: A batch of `N` Pulsar camera vectors in the Pulsar convention `(N, 13)` (3 translation, 6 rotation, focal_length, sensor_width, c_x, c_y). """ return _pulsar_from_opencv_projection(R, tvec, camera_matrix, image_size, znear)
854349a4442e5e57554995439e62de74000c9b3d
7,835
def identity(gender:str = None) -> dict: """ Generates a pseudo-random identity. Optional args gender: 'm' for traditionally male, 'f' for traditionally female. Returns a dict with the following keys: name -> full name given -> given name / first name family -> family name / last name address -> well formed address (fake of course) city -> city of residence state -> state of residence zip_code -> zip code of residence (matches the city and state) phone - > a phone number with an area code from the state of residence. email -> a valid email address (fake of course) """ if gender and gender.lower() not in ["m", "f"]: raise ValueError("'gender' must be 'm' or 'f'") if gender and gender.lower() == "m": given = _pluck(MGIVEN) elif gender and gender.lower() == "f": given = _pluck(FGIVEN) else: given = _pluck(MGIVEN + FGIVEN) family = _pluck(FAMILY) email = _make_email(given, family) zip_code, city, state_code = _pluck(AREA) phone = _make_phone(state_code) address = _make_address() return dict(name=f"{given} {family}".title(), given=given.title(), family=family.title(), address=address, city=city.title(), state=state_code.upper(), zip_code=zip_code, phone=phone, email=email)
1917b6723a5bfe2c0b7477dca18f450c8a0e07c3
7,836
def matthews_corrcoef(y_true, y_pred): """Returns matthew's correlation coefficient for binary classes The Matthews correlation coefficient is used in machine learning as a measure of the quality of binary (two-class) classifications. It takes into account true and false positives and negatives and is generally regarded as a balanced measure which can be used even if the classes are of very different sizes. The MCC is in essence a correlation coefficient value between -1 and +1. A coefficient of +1 represents a perfect prediction, 0 an average random prediction and -1 an inverse prediction. The statistic is also known as the phi coefficient. [source: Wikipedia] Only in the binary case does this relate to information about true and false positives and negatives. See references below. Parameters ---------- y_true : array, shape = [n_samples] true targets y_pred : array, shape = [n_samples] estimated targets Returns ------- mcc : float matthew's correlation coefficient (+1 represents a perfect prediction, 0 an average random prediction and -1 and inverse prediction). References ---------- http://en.wikipedia.org/wiki/Matthews_correlation_coefficient http://dx.doi.org/10.1093/bioinformatics/16.5.412 """ mcc = np.corrcoef(y_true, y_pred)[0, 1] if np.isnan(mcc): return 0. else: return mcc
b4af31bac942a99fabb6f20c29ed59aa7b55e32d
7,837
import os def convert_image_link(image): """Convert an image link specification into a Markdown image link Args: image (Match): A Match object corresponding to an image link Returns: str: Markdown formatted link to the image """ image_name = str(image.group(1)) file_ext = 'jpg' if '|' in image_name: image_name, file_ext = image_name.split('|') image_link = f"![{image_name}]({os.path.join(config['media'], create_valid_filename(image_name))}.{file_ext})" return image_link
7ef746cc9d062107b4d8f6fa79d544e75ee00e9b
7,838
from typing import Dict from typing import Any import yaml def yaml_dump(dict_to_dump: Dict[str, Any]) -> str: """Dump the dictionary as a YAML document.""" return yaml.safe_dump(dict_to_dump, default_flow_style=False)
4635514ba8ff901656b8a4b5869a6ae101528fa8
7,839
def completeness(importance_matrix): """"Compute completeness of the representation.""" per_factor = completeness_per_code(importance_matrix) if importance_matrix.sum() == 0.: importance_matrix = np.ones_like(importance_matrix) factor_importance = importance_matrix.sum(axis=0) / importance_matrix.sum() return np.sum(per_factor*factor_importance)
39407833b501e974f2ddb421ecad59055153260e
7,840
def image_stat(image_id): """ Return the statistics ofd an image as a pd dataframe :param image_id: :return: """ counts, total_area, mean_area, std_area = {}, {}, {}, {} img_area = get_image_area(image_id) for cl in CLASSES: polygon_list = get_polygon_list(image_id, cl) counts[cl] = len(polygon_list) if len(polygon_list) > 0: total_area[cl] = np.sum([poly.area for poly in polygon_list])\ / img_area * 100. mean_area[cl] = np.mean([poly.area for poly in polygon_list])\ / img_area * 100. std_area[cl] = np.std([poly.area for poly in polygon_list])\ / img_area * 100. return pd.DataFrame({'Class': CLASSES, 'Counts': counts, 'TotalArea': total_area, 'MeanArea': mean_area, 'STDArea': std_area})
a20c1d702f983c576ab506061ab19181b90c8684
7,841
def delete_original(): """ Decorator that deletes the original Discord message upon command execution. :return: boolean """ async def predicate(ctx): if ctx.invoked_with != "help": # Don't try to delete if help command if isinstance(ctx.message.channel, discord.TextChannel): try: await ctx.message.delete() except discord.errors.NotFound as e: log.fatal(f"Unable to delete message.\n\t{e}") return True return commands.check(predicate)
08f71c271b679fb6389754c21a896e11ae6f05c0
7,842
import os def list_template_dirs(): """List names of directories containnig parallel programming templates.""" dirs = [] for templates_dir in settings.TEMPLATE_DIRS: for template_dir in os.listdir(templates_dir): path = os.path.join(templates_dir,template_dir) if os.path.isdir(path): dirs.append(template_dir) return dirs
2af89460be234e3c649b1a0c67db150062299f14
7,843
def get_H_OS(): """屋根又は天井の温度差係数 (-) Args: Returns: float: 屋根又は天井の温度差係数 (-) """ adjacent_type = '外気' return get_H(adjacent_type)
cb3b68063d2c734b03d96b261e5cba23b79c3bc7
7,844
def forward_softmax(x): """ Compute softmax function for a single example. The shape of the input is of size # num classes. Important Note: You must be careful to avoid overflow for this function. Functions like softmax have a tendency to overflow when very large numbers like e^10000 are computed. You will know that your function is overflow resistent when it can handle input like: np.array([[10000, 10010, 10]]) without issues. x: A 1d numpy float array of shape number_of_classes Returns: A 1d numpy float array containing the softmax results of shape number_of_classes """ x = x - np.max(x,axis=0) exp = np.exp(x) s = exp / np.sum(exp,axis=0) return s
8c0f54294c2dc5b385466398726b67a3acd674b0
7,845
import numpy def applySpectralClusters(kmeansObj, img, imgNullVal): """ Use the given KMeans object to predict spectral clusters on a whole image array. The kmeansObj is an instance of sklearn.cluster.KMeans, as returned by fitSpectralClusters(). The img array is a numpy array of the image to predict on, of shape (nBands, nRows, nCols). Any pixels in img which have value imgNullVal will be set to SEGNULLVAL (i.e. zero) in the output cluster image. Return value is a numpy array of shape (nRows, nCols), with each element being the segment ID value for that pixel. """ # Predict on the whole image. In principle we could omit the nulls, # but it makes little difference to run time, and just adds complexity. (nBands, nRows, nCols) = img.shape # Re-organise the image data so it matches what sklearn # expects. xFull = numpy.transpose(img, axes=(1, 2, 0)) xFull = xFull.reshape((nRows*nCols, nBands)) clustersFull = kmeansObj.predict(xFull) del xFull clustersImg = clustersFull.reshape((nRows, nCols)) # Make the cluster ID numbers start from 1, and use SEGNULLVAL # (i.e. zero) in null pixels clustersImg += 1 if imgNullVal is not None: nullmask = (img == imgNullVal).any(axis=0) clustersImg[nullmask] = SEGNULLVAL return clustersImg
2b2fb5616c20c4e5d9278bf8555c888bfab80cb8
7,846
from sys import path def get_config_and_project_dir(config_file: str): """Guess config file name and project dir""" if config_file is not None: config_file = path.abspath(config_file) project_dir = path.dirname(config_file) else: project_dir = find_project_dir() config_file = '{}/stakkr.yml'.format(project_dir) return config_file, project_dir
2b3e7e219c97ef504e76468512a90926c351b949
7,847
def get_config() -> ConfigParser: """ Parse the config file. :return: config """ cfg = ConfigParser() cfg.read(CONFIG_PATH) return cfg
14f9ce4719bf665d62f1a2d06c980f4e85d2b8a5
7,848
from calendra.registry import registry def iso_register(iso_code): """ Registers Calendar class as country or region in IsoRegistry. Registered country must set class variables ``iso`` using this decorator. >>> from calendra.core import Calendar >>> from calendra.registry import registry >>> from calendra.registry_tools import iso_register >>> @iso_register('MC-MR') ... class MyRegion(Calendar): ... 'My Region' Region calendar is then retrievable from registry: >>> calendar = registry.get('MC-MR') """ def wrapper(cls): registry.register(iso_code, cls) return cls return wrapper
7fcb55a37f5af948ff6be8baf797d00328f241a8
7,849
def dict_check_defaults(dd, **defaults): """Check that a dictionary has some default values Parameters ---------- dd: dict Dictionary to check **defs: dict Dictionary of default values Example ------- .. ipython:: python @suppress from xoa.misc import dict_check_defaults dd = dict(color='blue') dict_check_defaults(dd, color='red', size=10) """ if defaults is None: defaults = {} for item in defaults.items(): dd.setdefault(*item) return dd
8edc3fdb351f7ec2d4ec3b1e788e6aa5cc0f8787
7,850
import os def construct_search_params(): """Iterates through user-defined Entrez Search settings to assemble the search parameters. Envars hold the most recent user-defined Entrez settings, such as rettype, retmax, database, etc. These settings are iterated through, and their values are returned and appended to the query. """ params = {} for setting in ev.settings_eSearch: if os.environ.get(setting[1]) != 'None': params.update({setting[0].lower(): os.environ.get(setting[1])}) return params
0c6c2fd566246d86cf9225ca2ba2404451b79d2c
7,851
def get_invested_and_worth(account): """Gets the money invested and the actual worth of an account""" data = query_indexa(f"accounts/{account}/performance") invested = data["return"]["investment"] worth = data["return"]["total_amount"] return {"invested": round(invested, 2), "worth": round(worth, 2)}
fc1542f54c8954622aff86d59d7d6fb82e63832b
7,852
def make_album(singer, name, number = ''): """Return singers' names and album""" album = {'singer': singer, 'name': name} if number: album['number'] = number return album
1f1bfaaeb501be0aa6fefd358177922246488b31
7,853
from typing import Dict from typing import List from typing import Generator def fit_ctmp_meas_mitigator(cal_data: Dict[int, Dict[int, int]], num_qubits: int, generators: List[Generator] = None) -> CTMPExpvalMeasMitigator: """Return FullMeasureErrorMitigator from result data. Args: cal_data: calibration dataset. num_qubits: the number of qubits for the calibation dataset. generators: Optional, input generator set. Returns: Measurement error mitigator object. Raises: QiskitError: if input arguments are invalid. """ if not isinstance(num_qubits, int): raise QiskitError('Number of qubits must be an int') if generators is None: generators = standard_generator_set(num_qubits) gen_mat_dict = {} for gen in generators + _supplementary_generators(generators): if len(gen[2]) > 1: mat = _local_g_matrix(gen, cal_data, num_qubits) gen_mat_dict[gen] = mat # Compute rates for generators rates = [_get_ctmp_error_rate(gen, gen_mat_dict, num_qubits) for gen in generators] return CTMPExpvalMeasMitigator(generators, rates)
2dab6ca0da19acb174b6d0e8b96c7833d5de74e8
7,854
def discounted_item(data): """ DOCSTRING: Classifies item purchases as 'Promoted' or 'Not Promoted' based on 'Item Discount' column. Also 'COD Collectibles' column gets restructured by eliminating undesired default values, like 'Online'. INPUT: > data : Only accepts Pandas DataFrame or TextParser, that has been pre-processed earlier. OUTPUT: Pandas DataFrame or TextParser with 1 additional column, i.e. 'On Promotion'. """ data["On Promotion"] = np.nan data["Phone num"] = np.nan data["COD Collectible"] = np.nan # Later again gets renamed within this func. for i,v in data["Item Discount"].iteritems(): if v != 0: data.loc[i, "On Promotion"] = "Promoted" else: data.loc[i, "On Promotion"] = "Not Promoted" # Also taking care of COD Collectible: for i,v in data["COD Collectibles"].iteritems(): if v == "Online": data.loc[i, "COD Collectible"] = 0 else: data.loc[i, "COD Collectible"] = v # Also taking care of 'Phone No.' column: for i,v in data["Phone No."].iteritems(): if v == "Online": data.loc[i, "Phone num"] = "Unavailable" else: data.loc[i, "Phone num"] = v data.drop(["COD Collectibles"], axis=1, inplace=True) data.drop(["Phone No."], axis=1, inplace=True) data.rename(columns={"COD Collectible": "COD Collectibles"}, inplace=True) data.rename(columns={"Phone num": "Phone No."}, inplace=True) return data
178c5e7d8e9c2e3bdd91d4606ea52c34c7cf099c
7,855
def NamespacedKubernetesSyncer(namespace, use_rsync=False): """Wrapper to return a ``KubernetesSyncer`` for a Kubernetes namespace. Args: namespace (str): Kubernetes namespace. use_rsync (bool): Use ``rsync`` if True or ``kubectl cp`` if False. If True, ``rsync`` will need to be installed in the Kubernetes pods for this to work. If False, ``tar`` will need to be installed instead. Returns: A ``KubernetesSyncer`` class to be passed to ``tune.run()``. Example: .. code-block:: python from ray.tune.integration.kubernetes import NamespacedKubernetesSyncer tune.run(train, sync_to_driver=NamespacedKubernetesSyncer("ray")) """ class _NamespacedKubernetesSyncer(KubernetesSyncer): _namespace = namespace _use_rsync = use_rsync return _NamespacedKubernetesSyncer
9da5a049a12a248623040c1ace79c2ebedd3400c
7,856
def _cons8_88(m8, L88, d_gap, k, Cp, h_gap): """dz constrant for edge gap sc touching 2 edge gap sc""" term1 = 2 * h_gap * L88 / m8 / Cp # conv to inner/outer ducts term2 = 2 * k * d_gap / m8 / Cp / L88 # cond to adj bypass edge return 1 / (term1 + term2)
7c48c4999ce2dd3dbdec799edd7ad441a6f66e7b
7,857
import hashlib def cache_key(path): """Return cache key for `path`.""" return 'folder-{}'.format(hashlib.md5(path.encode('utf-8')).hexdigest())
6b9afe1267e0cc0c7168bf3b0d5c7536e2b3c768
7,858
def ref_731(n): """Reference number calculator. Returns reference number calculated using 7-3-1 algorithm used in Estonian banks. :param string n: base number (client id, etc) :rtype: string """ return "%s%d" % (n,((10 - (sum(map(\ lambda l: int(n[-l])*(7,3,1)[(l-1) % 3], \ xrange(1, len(n)+1))))) % 10))
b1958511947d9f369db2547cde15222603dc0773
7,859
import traceback async def exception_as_response(e: Exception): """ Wraps an exception into a JSON Response. """ data = { "message": str(e), "traceback": "".join(traceback.TracebackException.from_exception(e).format()) } return web.json_response(data, status=500)
60f226cb7cd4c3aba3026d44d28e774928e6bbf7
7,860
def canvas_merge_union(layers, full=True, blend=canvas_compose_over): """Blend multiple `layers` into single large enough image""" if not layers: raise ValueError("can not blend zero layers") elif len(layers) == 1: return layers[0] min_x, min_y, max_x, max_y = None, None, None, None for image, offset in layers: x, y = offset w, h = image.shape[:2] if min_x is None: min_x, min_y = x, y max_x, max_y = x + w, y + h else: min_x, min_y = min(min_x, x), min(min_y, y) max_x, max_y = max(max_x, x + w), max(max_y, y + h) width, height = max_x - min_x, max_y - min_y if full: output = None for image, offset in layers: x, y = offset w, h = image.shape[:2] ox, oy = x - min_x, y - min_y image_full = np.zeros((width, height, 4), dtype=FLOAT) image_full[ox : ox + w, oy : oy + h] = image if output is None: output = image_full else: output = blend(output, image_full) else: # this is optimization for method `over` blending output = np.zeros((max_x - min_x, max_y - min_y, 4), dtype=FLOAT) for index, (image, offset) in enumerate(layers): x, y = offset w, h = image.shape[:2] ox, oy = x - min_x, y - min_y effected = output[ox : ox + w, oy : oy + h] if index == 0: effected[...] = image else: effected[...] = blend(effected, image) return output, (min_x, min_y)
ffbb3b78e908ed1e131a1f0827f2d3097415edc9
7,861
import json def exception_response(request, code=400, exception=None): """ Create a response for an exception :param request: request instance :param code: exception code :param exception: exception instance :return: exception formatted response """ code = code if code in [400, 403, 404, 500] else 400 exception_repr = get_error_msg(exception) log.error(usr=request.user, msg=f'{code} - {exception_repr}') context = dict( message=f"Error {code}", request_path=request.path, exception=exception_repr ) if is_browser(request): template = loader.get_template(f'error/{code}.html') rtn = dict( content=template.render(context, request), content_type='text/html' ) else: rtn = dict( content=json.dumps(context), content_type='application/json' ) return rtn
1ef145ea4b07557fbc31a9d5c52621e79c2b99ff
7,862
import os def get_jira_issues(jira, exclude_stories, epics_only, all_status, filename, user): """ Query Jira and then creates a status update file (either temporary or named) containing all information found from the JQL query. """ issue_types = ["Epic"] if not epics_only: issue_types.append("Initiative") if not exclude_stories: issue_types.append("Story") issue_type = "issuetype in (%s)" % ", ".join(issue_types) status = "status in (\"In Progress\")" if all_status: status = "status not in (Resolved, Closed)" if user is None: user = "currentUser()" else: user = "\"%s\"" % add_domain(user) jql = "%s AND assignee = %s AND %s" % (issue_type, user, status) vprint(jql) my_issues = jira.search_issues(jql) msg = message_header + email_to_name(os.environ['JIRA_USERNAME']) + "\n\n" f = open_file(filename) filename = f.name f.write(msg) vprint("Found issue:") for issue in my_issues: vprint("%s : %s" % (issue, issue.fields.summary)) f.write("[%s]\n" % issue) f.write("# Header: %s\n" % issue.fields.summary) f.write("# Type: %s\n" % issue.fields.issuetype) f.write("# Status: %s\n" % issue.fields.status) f.write("No updates since last week.\n\n") f.close() return filename
c495326f0790f3bec8c95a2fae1c645409100d27
7,863
def entropy(series): """Normalized Shannon Index""" # a series in which all the entries are equal should result in normalized entropy of 1.0 # eliminate 0s series1 = series[series!=0] # if len(series) < 2 (i.e., 0 or 1) then return 0 if len(series1) > 1: # calculate the maximum possible entropy for given length of input series max_s = -np.log(1.0/len(series)) total = float(sum(series1)) p = series1.astype('float')/float(total) return sum(-p*np.log(p))/max_s else: return 0.0
30f8f3cc6fed73d8cfa0b3705008891a60af028a
7,864
def spatially_whiten(X:np.ndarray, *args, **kwargs): """spatially whiten the nd-array X Args: X (np.ndarray): the data to be whitened, with channels/space in the *last* axis Returns: X (np.ndarray): the whitened X W (np.ndarray): the whitening matrix used to whiten X """ Cxx = updateCxx(None,X,None) W,_ = robust_whitener(Cxx, *args, **kwargs) X = X @ W #np.einsum("...d,dw->...w",X,W) return (X,W)
a0c9ae88e8f451378503754e4768ee554e50ed3e
7,865
from pathlib import Path import yaml def get_settings(basename: str="settings.yml", path: Path=PROJECT_ROOT / "conf") -> dict: """ Loads settings file Args: basename (str, optional): Basename of settings file. Defaults to "settings.yml". path (Path, optional): Path of seetings file. Defaults to PROJECT_ROOT/"conf". Raises: exc: Yaml load exception Returns: dict: settings """ with open(str(path / basename), 'r') as stream: try: settings = yaml.safe_load(stream) except yaml.YAMLError as exc: raise exc return settings
2317f9fbd125a16a7c34086d35b02973f1be5d8f
7,866
import torch def quaternion2rotationPT( q ): """ Convert unit quaternion to rotation matrix Args: q(torch.tensor): unit quaternion (N,4) Returns: torch.tensor: rotation matrix (N,3,3) """ r11 = (q[:,0]**2+q[:,1]**2-q[:,2]**2-q[:,3]**2).unsqueeze(0).T r12 = (2.0*(q[:,1]*q[:,2]-q[:,0]*q[:,3])).unsqueeze(0).T r13 = (2.0*(q[:,1]*q[:,3]+q[:,0]*q[:,2])).unsqueeze(0).T r21 = (2.0*(q[:,1]*q[:,2]+q[:,0]*q[:,3])).unsqueeze(0).T r22 = (q[:,0]**2+q[:,2]**2-q[:,1]**2-q[:,3]**2).unsqueeze(0).T r23 = (2.0*(q[:,2]*q[:,3]-q[:,0]*q[:,1])).unsqueeze(0).T r31 = (2.0*(q[:,1]*q[:,3]-q[:,0]*q[:,2])).unsqueeze(0).T r32 = (2.0*(q[:,2]*q[:,3]+q[:,0]*q[:,1])).unsqueeze(0).T r33 = (q[:,0]**2+q[:,3]**2-q[:,1]**2-q[:,2]**2).unsqueeze(0).T r = torch.cat( (r11,r12,r13, r21,r22,r23, r31,r32,r33), 1 ) r = torch.reshape( r, (q.shape[0],3,3)) return r
feeed764ee179b31674790f9d2afc7b606a02aef
7,867
def _expand_and_tile(tensor, multiple, dim=0, name=None): """Slice `tensor` shape in 2, then tile along the sliced dimension. A new dimension is inserted in shape of `tensor` before `dim`, then values are tiled `multiple` times along the new dimension. Args: tensor: Input `Tensor` or `SparseTensor`. multiple: Integer, number of times to tile. dim: Integer, dimension along which to tile. name: Name of operation. Returns: `Tensor` result of expanding and tiling `tensor`. Raises: ValueError: if `multiple` is less than 1, or `dim` is not in `[-rank(tensor), rank(tensor)]`. """ if multiple < 1: raise ValueError(f'Invalid argument multiple={multiple} for ' 'expand_and_tile call. `multiple` must be an integer > 0') with ops.name_scope(name, 'expand_and_tile', (tensor, multiple, dim)) as scope: # Sparse. tensor = sparse_tensor.convert_to_tensor_or_sparse_tensor(tensor) if isinstance(tensor, sparse_tensor.SparseTensor): if dim < 0: expand_dims = array_ops.reshape( array_ops.size(tensor.dense_shape) + dim, [1]) else: expand_dims = [dim] expanded_shape = array_ops.concat( (array_ops.slice(tensor.dense_shape, [0], expand_dims), [1], array_ops.slice(tensor.dense_shape, expand_dims, [-1])), 0, name='expanded_shape') expanded = sparse_ops.sparse_reshape( tensor, shape=expanded_shape, name='expand') if multiple == 1: return expanded return sparse_ops.sparse_concat( dim - 1 if dim < 0 else dim, [expanded] * multiple, name=scope) # Dense. expanded = array_ops.expand_dims( tensor, dim if (dim >= 0) else (dim - 1), name='expand') if multiple == 1: return expanded ones = array_ops.ones_like(array_ops.shape(tensor)) tile_multiples = array_ops.concat( (ones[:dim], (multiple,), ones[dim:]), 0, name='multiples') return array_ops.tile(expanded, tile_multiples, name=scope)
aa9840fdaee56fee19937c8f632c72628fbd3995
7,868
import random def eval_model(opt, print_parser=None): """Evaluates a model. :param opt: tells the evaluation function how to run :param bool print_parser: if provided, prints the options that are set within the model after loading the model :return: the final result of calling report() """ random.seed(42) # load model and possibly print opt agent = create_agent(opt, requireModelExists=True) if print_parser: # show args after loading model print_parser.opt = agent.opt print_parser.print_args() tasks = opt['task'].split(',') reports = [] for task in tasks: task_report = _eval_single_world(opt, agent, task) reports.append(task_report) report = aggregate_task_reports( reports, tasks, micro=opt.get('aggregate_micro', True) ) # print announcements and report print_announcements(opt) print( '[ Finished evaluating tasks {} using datatype {} ]'.format( tasks, opt.get('datatype', 'N/A') ) ) print(report) return report
153dbead7ebd37ba2f61d745bc499f9eddfa0d03
7,869
def login(request): """ Login with Dummy Test Account. """ if 'user' in request.environ['beaker.session']: return app.redirect('index') users.store_to_session(request, users.create()) return app.redirect('index')
0f2d06e7a6ac2fed0daee73e4c2e216012452e08
7,870
def safe_plus(x,y): """ Handle "x + y" where x and y could be some combination of ints and strs. """ # Handle Excel Cell objects. Grrr. if excel.is_cell_dict(x): x = x["value"] if excel.is_cell_dict(y): y = y["value"] # Handle NULLs. if (x == "NULL"): x = 0 if (y == "NULL"): y = 0 # Easy case first. if ((isinstance(x, int) or isinstance(x, float)) and (isinstance(y, int) or isinstance(y, float))): return x + y # Fix data types. if (isinstance(y, str)): # NULL string in VB. if (x == 0): x = "" # String concat. return str(x) + y if (isinstance(x, str)): # NULL string in VB. if (y == 0): y = "" # String concat. return x + str(y) # Punt. We are not doing pure numeric addition and # we have already handled string concatentaion. Just # convert things to strings and hope for the best. return str(x) + str(y)
e3f5e43ee3e083669d0b744c7fb46a4ae62b4eec
7,871
import types def full_like(a, fill_value, dtype=types.float32, split=None, device=None, comm=None, order="C"): """ Return a full array with the same shape and type as a given array. Parameters ---------- a : object The shape and data-type of 'a' define these same attributes of the returned array. fill_value : scalar Fill value. dtype : ht.dtype, optional Overrides the data type of the result. split: int, optional The axis along which the array is split and distributed, defaults to None (no distribution). device : str, ht.Device or None, optional Specifies the device the tensor shall be allocated on, defaults to None (i.e. globally set default device). comm: Communication, optional Handle to the nodes holding distributed parts or copies of this tensor. Returns ------- out : ht.DNDarray Array of fill_value with the same shape and type as a. Examples -------- >>> x = ht.zeros((2, 3,)) >>> x tensor([[0., 0., 0.], [0., 0., 0.]]) >>> ht.full_like(a, 1.0) tensor([[1., 1., 1.], [1., 1., 1.]]) """ return __factory_like(a, dtype, split, full, device, comm, fill_value=fill_value, order=order)
4a615e493ae20d925eeda7c0bd6ca9508c338bc2
7,872
import glob def gather_pulled_downloads(input_dir, output_dir): """ Gather MPEG stream files from input_dir into a single MP4 file in output_dir """ dash_globstr = f"{input_dir.absolute() / '*.dash'}" dash_glob = glob(dash_globstr) if len(dash_glob) < 1: raise ValueError(f"No dash file found in {input_dir}") elif len(dash_glob) > 1: raise ValueError(f"Multiple dash files found in {input_dir}") else: dash_file = dash_glob[0] m4s_globstr = f"{input_dir.absolute() / '*.m4s'}" m4s_files = sorted(glob(m4s_globstr)) output_mp4 = output_dir.absolute() / "output.mp4" gather_m4s_to_mp4(dash_file, m4s_files, output_mp4) return output_mp4
a1b9c334ab717292db006666bdcdd0749b2620d7
7,873
import functools def Parallelize(ListIn, f, procs = -1, **kwargs): """This function packages the "starmap" function in multiprocessing, to allow multiple iterable inputs for the parallelized function. Parameters ---------- ListIn: list each item in the list is a tuple of non-keyworded arguments for f. f : func function to be parallelized. Signature must not contain any other non-keyworded arguments other than those passed as iterables. Example: .. highlight:: python .. code-block:: python def multiply(x, y, factor = 1.0): return factor*x*y X = np.linspace(0,1,1000) Y = np.linspace(1,2,1000) XY = [ (x, Y[i]) for i, x in enumerate(X)] # List of tuples Z = Parallelize_MultiIn(XY, multiply, factor = 3.0, procs = 8) Create as many positional arguments as required, but all must be packed into a list of tuples. """ if type(ListIn[0]) != tuple: ListIn = [(ListIn[i],) for i in range(len(ListIn))] reduced_argfunc = functools.partial(f, **kwargs) if procs == -1: opt_procs = int(np.interp(len(ListIn), [1,100,500,1000,3000,5000,10000] ,[1,2,4,8,12,36,48])) procs = min(opt_procs, cpu_count()) if procs == 1: OutList = [reduced_argfunc(*ListIn[iS]) for iS in range(len(ListIn))] else: p = Pool(processes = procs) OutList = p.starmap(reduced_argfunc, ListIn) p.close() p.join() return OutList
84fc1509c96c7bf765246e46983f2fa01745f4b2
7,874
def method_not_found(e): """ Custom response for methods not allowed for the requested URLs :param e: Exception :return: """ return response('failed', 'The method is not allowed for the requested URL', 405)
18a48d53d602c1a90017e3f00adc75c4c33479b5
7,875
def get_total_trainsets(df_anual_data, segments): """ # Fill the training_sets dict :param df_anual_data: :return: """ rows_per_day = int(((60 / 15) * 24)) training_sets = {'ID_SEGMENT': [], 'MES': [], 'COD_LABORALIDAD': [], 'TRAINING_SET': []} for seg_id in segments: # 1) Particionar anual_data por segmento df_seg = df_anual_data.loc[df_anual_data.ID_SEGMENT == seg_id] for month_i in df_seg.FECHA.dt.month.unique(): # 2) Dividir mensual_data en 12 datasets df_month_seg = df_seg.loc[df_seg.FECHA.dt.month == month_i] for code_i in df_month_seg.COD_LABORALIDAD.unique(): # 3) Particionar por dias con mismo código de lab df_month_seg_code = df_month_seg.loc[df_month_seg.COD_LABORALIDAD == code_i] # Fill training_sets dictionary training_sets['ID_SEGMENT'].append(seg_id) training_sets['MES'].append(month_i) training_sets['COD_LABORALIDAD'].append(code_i) training_sets['TRAINING_SET'].append(df_month_seg_code) return training_sets
968c3af1fdba5eb759eb93618ed48e3ca3ce5223
7,876
def uni2diff(u): """Convert speed and angular rate to wheel speeds.""" v = u[0] omega = u[1] v_L = v - ELL / 2 * omega v_R = v + ELL / 2 * omega return np.array([v_L, v_R])
83b743758aa7a549eda9067843a03eb57efde523
7,877
import re def extract_service_and_module(repo_url): """Extract service and module from repository url. :param str repo_url: repository url :return (service, module) :rtype (str, str) """ m = re.match(r'.+[/@]([^\.]+\.[^\.]+)[:/]([^/]+/[^/]+)\.git/?$', repo_url) if not m: m = re.match(r'.+[/@]([^\.]+\.[^\.]+)[:/]([^/]+/[^/]+)/?$', repo_url) if not m: raise Exception( 'cannot detect service and module from {}'.format(repo_url)) service = m.group(1) module = m.group(2) if service not in _pull_request_url.keys(): raise Exception( 'service not supported: {}'.format(service)) return (service, module)
eafe6cf39fc2fe4153830c491147633fb07f95dd
7,878
import numpy def triangular(left, mode, right, size=None): """Triangular distribution. Draw samples from the triangular distribution over the interval [left, right]. For full documentation refer to :obj:`numpy.random.triangular`. Limitations ----------- Parameter ``left``, ``mode`` and ``right`` are supported as scalar. Otherwise, :obj:`numpy.random.triangular(left, mode, right, size)` samples are drawn. Output array data type is :obj:`dpnp.float64`. Examples -------- Draw samples from the distribution: >>> df = 2. >>> s = dpnp.random.triangular(-3, 0, 8, 1000000) """ if not use_origin_backend(left): # TODO: # array_like of floats for `left`, `mode`, `right`. if not dpnp.isscalar(left): pass elif not dpnp.isscalar(mode): pass elif not dpnp.isscalar(right): pass elif left > mode: pass elif mode > right: pass elif left == right: pass else: return dpnp_rng_triangular(left, mode, right, size).get_pyobj() return call_origin(numpy.random.triangular, left, mode, right, size)
c92d1461fe9052648e0b141e8b962f350e1d23b4
7,879
def format_hexa(value: str) -> ColorBytes: """ Examples: "bda" => (187, 221, 170, 255) "4fcd" => (68, 255, 204, 221) "60B0C4" => (96, 176, 196, 255) "2BEA40D0" => (43, 234, 64, 208) """ if len(value) in {3, 4}: expanded_color = ''.join(s * 2 for s in value) else: expanded_color = value length = len(expanded_color) if length in {6, 8}: hex_parts = [expanded_color[i:(i + 2)] for i in range(0, length, 2)] return format_color_bytes([int(v, 16) for v in hex_parts]) else: raise ValueError(value)
4865e1498ed87c933160e5666adcc41b45162fdd
7,880
def normalize_community_features(features): """ This performs TF-IDF-like normalization of community embedding features. Introduced in: Tang, L., Wang, X., Liu, H., & Wang, L. (2010, July). A multi-resolution approach to learning with overlapping communities. In Proceedings of the First Workshop on Social Media Analytics (pp. 14-22). ACM. Input: - X in R^(nxC_n): The community indicator matrix. Output: - X_norm in R^(nxC_n): The tf-idf + row normalized community indicator matrix. """ # Calculate inverse document frequency. features = normalize_columns(features) # Normalize each row of term frequencies to 1 features = normalize_rows(features) return features
9f5018ad3e20810d2bb66443bac4c2f7f6359d0f
7,881
def __extend_prefixed(pu): """ :param pu: :return: """ parts = pu.split(':') if len(parts) == 1: parts = ('', parts[0]) try: return URIRef(_prefixes[parts[0]] + parts[1]) except KeyError: return BNode(pu)
d6e5c25c94e8b3252d8b0925e8d37747caceebdd
7,882
def angle(u: Vec, v: Vec) -> float: """ Returns the cosine (angle) between two vectors u and v :param u: (Vec) vector u :param v: (Vec) vector v :return: The scaled dot product, cosine of u and v's angle """ if u.is_zero or v.is_zero: raise ValueError("Angle with lower dimensional 0 vector cannot be determined") l_u = u.length l_v = v.length return u.dot(v) / (l_u * l_v)
6c79390af1ed38fc1a99006165684234dabb0b4a
7,883
def find_most_similar(top_k, probs, cache_dict, num=10): """返回最相似的num张照片的文件名,如果找到相似的, 则返回一个包括匹配元组的列表,否则返回一个空列表 top_k : 包含最佳分类的索引的列表 probs : 包含最佳分类索引对应的概率 cache_dict: 缓存中的索引和概率 num : 返回最近匹配的数目 """ similar = [] for filename in cache_dict: score = 0 count = 0 other_top_k, other_probs = cache_dict[filename] for i, t in enumerate(top_k): if t in other_top_k: prob = probs[i] other_prob = other_probs[other_top_k.tolist().index(t)] score += abs(prob-other_prob) count += 1 if count > 0: score = score / count similar.append((filename, score)) if similar: similar.sort(key=lambda item: item[1]) # 根据score升序排序 return similar[:num] return similar
471083e1ed2b0fadb98cafad64d314ba779aa9e6
7,884
def load(url_or_handle, allow_unsafe_formats=False, cache=None, **kwargs): """Load a file. File format is inferred from url. File retrieval strategy is inferred from URL. Returned object type is inferred from url extension. Args: url_or_handle: a (reachable) URL, or an already open file handle allow_unsafe_formats: set to True to allow saving unsafe formats (eg. pickles) cache: whether to attempt caching the resource. Defaults to True only if the given URL specifies a remote resource. Raises: RuntimeError: If file extension or URL is not supported. """ # handle lists of URLs in a performant manner if isinstance(url_or_handle, (list, tuple)): return _load_urls(url_or_handle, cache=cache, **kwargs) ext, decompressor_ext = _get_extension(url_or_handle) try: ext = ext.lower() if ext in loaders: loader = loaders[ext] elif ext in unsafe_loaders: if not allow_unsafe_formats: raise ValueError(f"{ext} is considered unsafe, you must explicitly allow its use by passing allow_unsafe_formats=True") loader = unsafe_loaders[ext] else: raise KeyError(f'no loader found for {ext}') decompressor = decompressors[decompressor_ext] if decompressor_ext is not None else nullcontext message = "Using inferred loader '%s' due to passed file extension '%s'." log.debug(message, loader.__name__[6:], ext) return load_using_loader(url_or_handle, decompressor, loader, cache, **kwargs) except KeyError: log.warning("Unknown extension '%s', attempting to load as image.", ext) try: with read_handle(url_or_handle, cache=cache) as handle: result = _load_img(handle) except Exception as e: message = "Could not load resource %s as image. Supported extensions: %s" log.error(message, url_or_handle, list(loaders)) raise RuntimeError(message.format(url_or_handle, list(loaders))) else: log.info("Unknown extension '%s' successfully loaded as image.", ext) return result
89fec9684dda24b6b16b63e6b478bc4f67d00f07
7,885
import time import torch def evaluate_full_batch(model, minibatch, mode='val'): """ Full batch evaluation: for validation and test sets only. When calculating the F1 score, we will mask the relevant root nodes. """ time_s = time.time() loss, preds, labels = model.eval_step(*minibatch.one_batch(mode=mode)) torch.cuda.synchronize() time_e = time.time() node_val_test = minibatch.node_val if mode == 'val' else minibatch.node_test f1_scores = calc_f1(to_numpy(labels[node_val_test]), to_numpy(preds[node_val_test]), model.sigmoid_loss) # node_test=minibatch.node_test # f1_test=calc_f1(to_numpy(labels[node_test]),to_numpy(preds[node_test]),model.sigmoid_loss) # printf(' ******TEST: loss = {:.4f}\tmic = {:.4f}\tmac = {:.4f}'.format(loss,f1_test[0],f1_test[1]),style='yellow') del labels del preds return loss, f1_scores[0], f1_scores[1], time_e - time_s
b1d183118b304edf6f076caefa2b1160316ad92c
7,886
from operator import mul from operator import inv def berlekamp_massey(s): """Given a sequence of LFSR outputs, find the coefficients of the LFSR.""" C, B, L, m, b = [1], [1], 0, 1, 1 for n in range(len(s)): d = s[n] for i in range(1, L + 1): d ^= mul(C[i], s[n - i]) if d == 0: m += 1 else: T = list(C) while len(C) <= len(B) + m: C += [0] t = mul(d, inv(b)) for i in range(len(B)): C[i + m] ^= mul(t, B[i]) if 2 * L <= n: L, B, b, m = n + 1 - L, T, d, 1 else: m += 1 return C[0:L + 1]
351f52dce7e4a95b986cc169f380347f317f851a
7,887
import json import sys def vocab_from_file(vocabfile): """ Generates vocabulary from a vocabulary file in JSON Outputs vocabulary and inverted vocabulary """ with smart_open(vocabfile, 'r') as f: inv_vocab = json.loads(f.read()) vocabulary = {} for no, word in enumerate(inv_vocab): vocabulary[word] = no print('Vocabulary size = %d' % len(vocabulary), file=sys.stderr) return vocabulary, inv_vocab
a75fab1e94f5091aa691ad2ab188f0ada406f6ea
7,888
from typing import Optional def sample(image: type_alias.TensorLike, warp: type_alias.TensorLike, resampling_type: ResamplingType = ResamplingType.BILINEAR, border_type: BorderType = BorderType.ZERO, pixel_type: PixelType = PixelType.HALF_INTEGER, name: Optional[str] = None) -> tf.Tensor: """Samples an image at user defined coordinates. Note: The warp maps target to source. In the following, A1 to An are optional batch dimensions. Args: image: A tensor of shape `[B, H_i, W_i, C]`, where `B` is the batch size, `H_i` the height of the image, `W_i` the width of the image, and `C` the number of channels of the image. warp: A tensor of shape `[B, A_1, ..., A_n, 2]` containing the x and y coordinates at which sampling will be performed. The last dimension must be 2, representing the (x, y) coordinate where x is the index for width and y is the index for height. resampling_type: Resampling mode. Supported values are `ResamplingType.NEAREST` and `ResamplingType.BILINEAR`. border_type: Border mode. Supported values are `BorderType.ZERO` and `BorderType.DUPLICATE`. pixel_type: Pixel mode. Supported values are `PixelType.INTEGER` and `PixelType.HALF_INTEGER`. name: A name for this op. Defaults to "sample". Returns: Tensor of sampled values from `image`. The output tensor shape is `[B, A_1, ..., A_n, C]`. Raises: ValueError: If `image` has rank != 4. If `warp` has rank < 2 or its last dimension is not 2. If `image` and `warp` batch dimension does not match. """ with tf.name_scope(name or "sample"): image = tf.convert_to_tensor(image, name="image") warp = tf.convert_to_tensor(warp, name="warp") shape.check_static(image, tensor_name="image", has_rank=4) shape.check_static( warp, tensor_name="warp", has_rank_greater_than=1, has_dim_equals=(-1, 2)) shape.compare_batch_dimensions( tensors=(image, warp), last_axes=0, broadcast_compatible=False) if pixel_type == PixelType.HALF_INTEGER: warp -= 0.5 if resampling_type == ResamplingType.NEAREST: warp = tf.math.round(warp) if border_type == BorderType.DUPLICATE: image_size = tf.cast(tf.shape(image)[1:3], dtype=warp.dtype) height, width = tf.unstack(image_size, axis=-1) warp_x, warp_y = tf.unstack(warp, axis=-1) warp_x = tf.clip_by_value(warp_x, 0.0, width - 1.0) warp_y = tf.clip_by_value(warp_y, 0.0, height - 1.0) warp = tf.stack((warp_x, warp_y), axis=-1) return tfa_image.resampler(image, warp)
c9a202a6415d13bddac38cfa75e280ad45f1bda6
7,889
def normalize_parameter(kv): """ Translate a parameter into standard form. """ (k, v) = kv if k[0] == 'requiressl' and v in ('1', True): k[0] = 'sslmode' v = 'require' elif k[0] == 'dbname': k[0] = 'database' elif k[0] == 'sslmode': v = v.lower() return (tuple(k),v)
933ea71f452a16c1d4ae2630d6b58a92da1cbec0
7,890
def pulse_broadening(DM, f_ctr): """ pulse_broadening(DM, f_ctr): Return the approximate pulse broadening (tau) in ms due to scattering based on the rough relation in Cordes' 'Pulsar Observations I' paper. 'f_ctr' should be in MHz. The approximate error is 0.65 in log(tau). """ logDM = Num.log10(DM) return 10.0**(-3.59 + 0.129*logDM + 1.02*logDM**2.0 - 4.4*Num.log10(f_ctr/1000.0))/1000.0
48830e02774247e551605e5e8ad693ece68634ad
7,891
from amset.tools.wavefunction import wave from pathlib import Path from click.testing import CliRunner def generate_wavefunction_coefficients(dir_name: str): """ Generate wavefunction.h5 file using amset. Parameters ---------- dir_name : str Directory containing WAVECAR and vasprun.xml files (can be gzipped). Returns ------- dict A dictionary with the keys: - "dir_name" (str): containing the directory where the wavefunction.h5 file was generated. - "log" (str): The output log from ``amset wave``. - "ibands" (Tuple[List[int], ...]): The bands included in the wavefunction.h5 file. Given as a tuple of one or two lists (one for each spin channel). The bands indices are zero indexed. """ dir_name = strip_hostname(dir_name) # TODO: Handle hostnames properly. fc = FileClient() files = fc.listdir(dir_name) vasprun_file = Path(dir_name) / get_zfile(files, "vasprun.xml") wavecar_file = Path(dir_name) / get_zfile(files, "WAVECAR") # wavecar can't be gzipped, so copy it to current directory and unzip it fc.copy(wavecar_file, wavecar_file.name) fc.gunzip(wavecar_file.name) args = ["--wavecar=WAVECAR", f"--vasprun={vasprun_file}"] runner = CliRunner() result = runner.invoke(wave, args, catch_exceptions=False) ibands = _extract_ibands(result.output) # remove WAVECAR from current directory fc.remove("WAVECAR") return {"dir_name": str(Path.cwd()), "log": result.output, "ibands": ibands}
0119d18436373753d9e2a1202f185c983fa83963
7,892
def logged_in_student(browser, override_allowed_hosts, base_test_data): """ Fixture for a logged-in student user Returns: User: User object """ return LoginPage(browser).log_in_via_admin(base_test_data.student_user, DEFAULT_PASSWORD)
4513e8c8356cd8fbd643e9018e1019c5ec403bcd
7,893
import numpy as np import tqdm def test_model(data_set=None, langider=None, lang_to_idx=None, ) -> np.ndarray: """ Tests a given langid.py model on the given data set. :param data_set: data set to test on :param langider: model to test :param lang_to_idx: mapping of languages to ids """ langs = data_set.get_tag_set() pred_prob = np.zeros((len(data_set), len(langs) + 1)) dataloader = DataLoader(data_set) for i, elem in enumerate(tqdm(dataloader)): text = elem['text'][0] label = elem['label'][0] ranking = langider.rank(text) for lang, prob in ranking: pred_prob[i, lang_to_idx[lang]] = prob pred_prob[i, len(langs)] = lang_to_idx[label] return pred_prob
2b955b637a289d0596c0584ac0761d8014e27e86
7,894
from bs4 import BeautifulSoup def search(querry, lim=5): """Search the querry in youtube and return lim number of results. Querry is the keyword, i:e name of the song lim is the number of songs that will be added to video array and returned """ # Replace all the spaces with + querry = querry.replace(' ', '+') url = "https://www.youtube.com/results?search_query={}".format(querry) response = urlopen(url) html = response.read() soup = BeautifulSoup(html, "html.parser") count = 0 for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}): if lim == count: break url = vid['href'] data = scan_video(url) if not data: break video.append(data) urls.append(url) count += 1 return (video, urls)
92771e6aaa88ea65034981ff0c0c3b203addacec
7,895
def calculate_center_of_mass(symbols, coordinates): """Calculate the center of mass of a molecule. The center of mass is weighted by each atom's weight. Parameters ---------- symbols : list A list of elements for the molecule coordinates : np.ndarray The coordinates of the molecule. Returns ------- center_of_mass: np.ndarray The center of mass of the molecule. Notes ----- The center of mass is calculated with the formula .. math:: \\vec{R}=\\frac{1}{M} \\sum_{i=1}^{n} m_{i}\\vec{r_{}i} """ total_mass = calculate_molecular_mass(symbols) center_of_mass = np.array([0.0, 0.0, 0.0]) for atom_number in range(len(symbols)): atom_type = symbols[atom_number] mass_of_atom = atomic_weights[atom_type] atom_position = coordinates[atom_number] center_of_mass += mass_of_atom * atom_position center_of_mass = center_of_mass / total_mass return center_of_mass
34a32e86c42875db59ad9d1bd1a6801d6fd51eb1
7,896
def __iadd__(self, other): """Pythonic use of concat Example: xs += ys Returns self.concat(self, other)""" return self.concat(self, other)
713980aed9713c2882a19ae9837315a431611bbc
7,897
def remove_stopwords(texts, stop_words): """ Define functions for stopwords :param texts: Processed texts from main module :return: Texts that already removed a stopwords """ return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts]
9e2f4bcf87886a35c3877de34d6746942af19065
7,898
import json def dumps(obj): """Output json with formatting edits + object handling.""" return json.dumps(obj, indent=4, sort_keys=True, cls=CustomEncoder)
a9ad97c589a8f610d3186723566420604d99f4de
7,899