content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import json def load_config(config_file): """ 加载配置文件 :param config_file: :return: """ with open(config_file, encoding='UTF-8') as f: return json.load(f)
85bab8a60e3abb8af56b0ae7483f2afe992d84b4
26,334
def decode(var, encoding): """ If not already unicode, decode it. """ if PY2: if isinstance(var, unicode): ret = var elif isinstance(var, str): if encoding: ret = var.decode(encoding) else: ret = unicode(var) else: ret = unicode(var) else: ret = str(var) return ret
da59232e9e7715c5c1e87fde99f19997c8e1e890
26,335
def vehicle_emoji(veh): """Maps a vehicle type id to an emoji :param veh: vehicle type id :return: vehicle type emoji """ if veh == 2: return u"\U0001F68B" elif veh == 6: return u"\U0001f687" elif veh == 7: return u"\U000026F4" elif veh == 12: return u"\U0001F686" else: return u"\U0001F68C"
8068ce68e0cdf7f220c37247ba2d03c6505a00fe
26,337
import functools def np_function(func=None, output_dtypes=None): """Decorator that allow a numpy function to be used in Eager and Graph modes. Similar to `tf.py_func` and `tf.py_function` but it doesn't require defining the inputs or the dtypes of the outputs a priori. In Eager mode it would convert the tf.Tensors to np.arrays before passing to `func` and then convert back the outputs from np.arrays to tf.Tensors. In Graph mode it would create different tf.py_function for each combination of dtype of the inputs and cache them for reuse. NOTE: In Graph mode: if `output_dtypes` is not provided then `func` would be called with `np.ones()` to infer the output dtypes, and therefore `func` should be stateless. ```python Instead of doing: def sum(x): return np.sum(x) inputs = tf.constant([3, 4]) outputs = tf.py_function(sum, inputs, Tout=[tf.int64]) inputs = tf.constant([3., 4.]) outputs = tf.py_function(sum, inputs, Tout=[tf.float32]) Do: @eager_utils.np_function def sum(x): return np.sum(x) inputs = tf.constant([3, 4]) outputs = sum(inputs) # Infers that Tout is tf.int64 inputs = tf.constant([3., 4.]) outputs = sum(inputs) # Infers that Tout is tf.float32 # Output dtype is always float32 for valid input dtypes. @eager_utils.np_function(output_dtypes=np.float32) def mean(x): return np.mean(x) # Output dtype depends on the input dtype. @eager_utils.np_function(output_dtypes=lambda x: (x, x)) def repeat(x): return x, x with context.graph_mode(): outputs = sum(tf.constant([3, 4])) outputs2 = sum(tf.constant([3., 4.])) sess.run(outputs) # np.array(7) sess.run(outputs2) # np.array(7.) with context.eager_mode(): inputs = tf.constant([3, 4]) outputs = sum(tf.constant([3, 4])) # tf.Tensor([7]) outputs = sum(tf.constant([3., 4.])) # tf.Tensor([7.]) ``` Args: func: A numpy function, that takes numpy arrays as inputs and return numpy arrays as outputs. output_dtypes: Optional list of dtypes or a function that maps input dtypes to output dtypes. Examples: output_dtypes=[tf.float32], output_dtypes=lambda x: x (outputs have the same dtype as inputs). If it is not provided in Graph mode the `func` would be called to infer the output dtypes. Returns: A wrapped function that can be used with TF code. """ def decorated(func): """Decorated func.""" dtype_map = {} def wrapper(*args, **kwargs): """Wrapper to add nested input and outputs support.""" func_with_kwargs = functools.partial(func, **kwargs) def func_flat_outputs(*args): return tf.nest.flatten(func_with_kwargs(*args)) def compute_output_dtypes(*args): """Calls the func to compute output dtypes.""" result = func(*args, **kwargs) return tf.nest.map_structure(lambda x: x.dtype, result) if tf.executing_eagerly(): result = func_with_kwargs( *tf.nest.map_structure(lambda x: x.numpy(), args)) convert = lambda x: x if x is None else tf.convert_to_tensor(value=x) return tf.nest.map_structure(convert, result) else: input_dtypes = tuple([x.dtype for x in tf.nest.flatten(args)]) if input_dtypes not in dtype_map: if output_dtypes is None: dummy_args = tf.nest.map_structure( lambda x: np.ones(x.shape, x.dtype.as_numpy_dtype), args) dtype_map[input_dtypes] = compute_output_dtypes(*dummy_args) elif isinstance(output_dtypes, (list, tuple)): # output_dtypes define the output dtypes. dtype_map[input_dtypes] = output_dtypes else: try: # See if output_dtypes define the output dtype directly. tf.as_dtype(output_dtypes) dtype_map[input_dtypes] = output_dtypes except TypeError: if callable(output_dtypes): # output_dtypes is mapping from input_dtypes to output_dtypes. dtype_map[input_dtypes] = output_dtypes(*input_dtypes) else: raise ValueError( 'output_dtypes not a list of dtypes or a callable.') flat_output_dtypes = tf.nest.flatten(dtype_map[input_dtypes]) flat_outputs = tf.py_function(func_flat_outputs, inp=args, Tout=flat_output_dtypes) return tf.nest.pack_sequence_as(dtype_map[input_dtypes], flat_outputs) return tf_decorator.make_decorator(func, wrapper) # This code path is for the `foo = np_function(foo, ...)` use case if func is not None: return decorated(func) # This code path is for the decorator # @np_function(...) # def foo(...): return decorated
5ed18b1575ec88fe96c27e7de38b00c5a734ee91
26,338
from typing import List from typing import Dict def constituency_parse(doc: List[str]) -> List[Dict]: """ parameter: List[str] for each doc return: List[Dict] for each doc """ predictor = get_con_predictor() results = [] for sent in doc: result = predictor.predict(sentence=sent) results.append(result) assert len(results) == len(doc) return results
30dd8eca61412083f1f11db6dc8aeb27bc171de9
26,339
def extract_results(filename): """ Extract intensity data from a FLIMfit results file. Converts any fraction data (e.g. beta, gamma) to contributions Required arguments: filename - the name of the file to load """ file = h5py.File(filename,'r') results = file['results'] keys = sorted_nicely(results.keys()) params = sorted_nicely(results['image 1'].keys()) groups = [] g = 1 while(param(g,'I_0') in params): group = [param(g,'I_0')] name_search = [param(g,'gamma'), param(g,'beta')] for name in name_search: if len(group) == 1: group = group + [x for x in params if x.startswith(name)] groups.append(group) g = g + 1 print(groups) X = [] mask = [] for k in keys: A = [] m = np.array([False]) for group in groups: I_0 = results[k][group[0]] m = m | ~np.isfinite(I_0) if len(group) == 1: A.append(I_0) else: for i in range(1,len(group)): A.append(results[k][group[i]][()] * I_0) A = np.stack(A, axis=-1) A[np.isnan(A)] = 0 X.append(A) mask.append(m) X = np.stack(X) mask = np.stack(mask) return X, groups, mask
c4a9f4f66a53050ea55cb1bd266edfa285000717
26,341
async def bundle_status(args: Namespace) -> ExitCode: """Query the status of a Bundle in the LTA DB.""" response = await args.di["lta_rc"].request("GET", f"/Bundles/{args.uuid}") if args.json: print_dict_as_pretty_json(response) else: # display information about the core fields print(f"Bundle {args.uuid}") print(f" Priority: {display_time(response['work_priority_timestamp'])}") print(f" Status: {response['status']} ({display_time(response['update_timestamp'])})") if response['status'] == "quarantined": print(f" Reason: {response['reason']}") print(f" Claimed: {response['claimed']}") if response['claimed']: print(f" Claimant: {response['claimant']} ({display_time(response['claim_timestamp'])})") print(f" TransferRequest: {response['request']}") print(f" Source: {response['source']} -> Dest: {response['dest']}") print(f" Path: {response['path']}") if 'files' in response: print(f" Files: {len(response['files'])}") else: print(" Files: Not Listed") # display additional information if available if 'bundle_path' in response: print(f" Bundle File: {response['bundle_path']}") if 'size' in response: print(f" Size: {response['size']}") if 'checksum' in response: print(" Checksum") print(f" adler32: {response['checksum']['adler32']}") print(f" sha512: {response['checksum']['sha512']}") # display the contents of the bundle, if requested if args.contents: print(" Contents: Not Listed") return EXIT_OK
2efb12b4bba3d9e920c199ad1e7262a24220d603
26,342
def determine_step_size(mode, i, threshold=20): """ A helper function that determines the next action to take based on the designated mode. Parameters ---------- mode (int) Determines which option to choose. i (int) the current step number. threshold (float) The upper end of our control. Returns ------- decision (float) The value to push/pull the cart by, positive values push to the right. """ if mode == 1: return 0 if mode == 2: return np.random.uniform(low=-threshold, high=threshold) if mode == 3: side = -1 if i%2 == 0 else 1 return threshold*side if mode == 4: inp_str = "Enter a float value from -{} to {}:\n".format(threshold, threshold) return float(input(inp_str))
9b59ebe5eeac13f06662e715328d2d9a3ea0e9a2
26,343
def scroll_down(driver): """ This function will simulate the scroll down of the webpage :param driver: webdriver :type driver: webdriver :return: webdriver """ # Selenium supports execute JavaScript commands in current window / frame # get scroll height last_height = driver.execute_script("return document.body.scrollHeight") # scroll to the end of the page driver.execute_script("window.scrollTo(0, {});".format(last_height)) return driver
7d68201f3a49950e509a7e389394915475ed8c94
26,344
from datetime import datetime def processing(): """Renders the khan projects page.""" return render_template('stem/tech/processing/gettingStarted.html', title="Processing - Getting Started", year=datetime.now().year)
53f6c69692591601dcb41c7efccad60bbfaf4cf7
26,345
def conv_unit(input_tensor, nb_filters, mp=False, dropout=0.1): """ one conv-relu-bn unit """ x = ZeroPadding2D()(input_tensor) x = Conv2D(nb_filters, (3, 3))(x) x = relu()(x) x = BatchNormalization(axis=3, momentum=0.66)(x) if mp: x = MaxPooling2D(pool_size=(3, 3), strides=(2, 2), padding='same')(x) x = Dropout(dropout)(x) return x
7c24dae045c38c073431e4fab20687439601b141
26,346
import torch def combine_vectors(x, y): """ Function for combining two vectors with shapes (n_samples, ?) and (n_samples, ?). Parameters: x: (n_samples, ?) the first vector. In this assignment, this will be the noise vector of shape (n_samples, z_dim), but you shouldn't need to know the second dimension's size. y: (n_samples, ?) the second vector. Once again, in this assignment this will be the one-hot class vector with the shape (n_samples, n_classes), but you shouldn't assume this in your code. """ combined = torch.cat((x, y), dim=1) return combined
700ea418c6244dc745bf6add89ad786c4444d2fe
26,347
def expandMacros(context, template, outputFile, outputEncoding="utf-8"): """ This function can be used to expand a template which contains METAL macros, while leaving in place all the TAL and METAL commands. Doing this makes editing a template which uses METAL macros easier, because the results of the macro can be seen immediately. The macros referred to by the passed in template must be present in the context so that their contents can be referenced. The outputEncoding determines the encoding of the returned string, which will contain the expanded macro. """ interp = MacroExpansionInterpreter() interp.initialise(context, outputFile) return template.expand( context, outputFile, outputEncoding, interpreter=interp)
04aad464f975c5ee216e17f93167be51eea8f6e6
26,348
def graph_papers(path="papers.csv"): """ Spit out the connections between people by papers """ data = defaultdict(dict) jkey = u'Paper' for gkey, group in groupby(read_csv(path, key=jkey), itemgetter(jkey)): for pair in combinations(group, 2): for idx,row in enumerate(pair): uid = dotify([row[u'Name']]) if uid not in data: data[uid] = { 'name': uid, 'imports': [], } cpart = pair[0] if idx == 1 else pair[1] data[uid]['imports'].append(dotify([cpart[u'Name']])) return data
43cae08f303707b75da2b225112fa0bc448306d9
26,349
def tariterator1(fileobj, check_sorted=False, keys=base_plus_ext, decode=True): """Alternative (new) implementation of tariterator.""" content = tardata(fileobj) samples = group_by_keys(keys=keys)(content) decoded = decoder(decode=decode)(samples) return decoded
8ea80d266dfe9c63336664aaf0fcac520e620382
26,350
def juego_nuevo(): """Pide al jugador la cantidad de filas/columnas, cantidad de palabras y las palabras.""" show_title("Crear sopa de NxN letras") nxn = pedir_entero("Ingrese un numero entero de la cantidad de\nfilas y columnas que desea (Entre 10 y 20):\n",10,20) n_palabras = pedir_entero("Ingrese un numero entero de la cantidad de\npalabas que deasea agregar (Entre 0 y %d):\n"%(nxn/2),0,(nxn/2)) palabras = [] palabra_min_caracteres = 3 palabra_repetida = False while len(palabras)<n_palabras: if palabra_repetida : show_msg("Ingreso una palabra repetida") palabra_repetida = False # Pedir una palabra que cumpla con los requisitos palabra = pedir_palabra("[%d|%d]Ingrese una palabra entre %d y %d caracteres: "%(len(palabras)+1,n_palabras,palabra_min_caracteres,(nxn/2)),palabra_min_caracteres,(nxn/2)) if palabra in palabras: palabra_repetida = True else : palabras.append(palabra) matrix = crear_matrix(nxn) matrix,posiciones,salteadas = procesar_palabras(matrix, nxn, palabras) matrix = completar_matrix(matrix, nxn) return procesar_juego(matrix,nxn,n_palabras,salteadas,posiciones)
ec42615c3934fd98ca5975f99d215f597f353842
26,351
def mk_sd_graph(pvalmat, thresh=0.05): """ Make a graph with edges as signifcant differences between treatments. """ digraph = DiGraph() for idx in range(len(pvalmat)): digraph.add_node(idx) for idx_a, idx_b, b_bigger, p_val in iter_all_pairs_cmp(pvalmat): if p_val > thresh: continue if b_bigger: digraph.add_edge(idx_a, idx_b) else: digraph.add_edge(idx_b, idx_a) return digraph
f219d964ec90d58162db5e72d272ec8138f8991e
26,352
def body2hor(body_coords, theta, phi, psi): """Transforms the vector coordinates in body frame of reference to local horizon frame of reference. Parameters ---------- body_coords : array_like 3 dimensional vector with (x,y,z) coordinates in body axes. theta : float Pitch (or elevation) angle (rad). phi : float Bank angle (rad). psi : float Yaw (or azimuth) angle (rad) Returns ------- hor_coords : array_like 3 dimensional vector with (x,y,z) coordinates in local horizon axes. Raises ------ ValueError If the values of the euler angles are outside the proper ranges. See Also -------- `hor2body` function. Notes ----- See [1] or [2] for frame of reference definition. Note that in order to avoid ambiguities ranges in angles are limited to: * -pi/2 <= theta <= pi/2 * -pi <= phi <= pi * 0 <= psi <= 2*pi References ---------- .. [1] B. Etkin, "Dynamics of Atmospheric Flight," Courier Corporation, pp. 104-120, 2012. .. [2] Gómez Tierno, M.A. et al, "Mecánica del Vuelo," Garceta, pp. 1-12, 2012 """ # check_theta_phi_psi_range(theta, phi, psi) # Transformation matrix from body to local horizon Lhb = np.array([ [cos(theta) * cos(psi), sin(phi) * sin(theta) * cos(psi) - cos(phi) * sin(psi), cos(phi) * sin(theta) * cos(psi) + sin(phi) * sin(psi)], [cos(theta) * sin(psi), sin(phi) * sin(theta) * sin(psi) + cos(phi) * cos(psi), cos(phi) * sin(theta) * sin(psi) - sin(phi) * cos(psi)], [- sin(theta), sin(phi) * cos(theta), cos(phi) * cos(theta)] ]) hor_coords = np.squeeze(Lhb).dot(body_coords) return hor_coords
2e0e8f6bf3432a944a350fb7df5bdfa067074448
26,353
def negloglikelihoodZTNB(args, x): """Negative log likelihood for zero truncated negative binomial.""" a, m = args denom = 1 - NegBinom(a, m).pmf(0) return len(x) * np.log(denom) + negloglikelihoodNB(args, x)
8458cbc02a00fd2bc37d661a7e34a61afccb6124
26,354
def combine(m1, m2): """ Returns transform that combines two other transforms. """ return np.dot(m1, m2)
083de20237f484806c356c0b29c42ff28aa801f6
26,355
import torch def _acg_bound(nsim, k1, k2, lam, mtop = 1000): # John T Kent, Asaad M Ganeiber, and Kanti V Mardia. # A new unified approach forthe simulation of a wide class of directional distributions. # Journal of Computational andGraphical Statistics, 27(2):291–301, 2018. """ Sampling approach used in Kent et al. (2018) Samples the cartesian coordinates from bivariate ACG: x, y Acceptance criterion: - Sample values v, from uniform between 0 and 1 - If v < fg, accept x, y Convert x, y to angles phi using atan2, we have now simulated the bessel density. """ ntry = 0; nleft = nsim; mloop = 0 eig = torch.tensor([0., 0.5 * (k1 - lam**2/k2)]); eigmin = 0 if eig[1] < 0: eigmin = eig[1]; eig = eig - eigmin q = 2; b0 = bfind(eig) phi = 1 + 2*eig/b0; den = log_im(0, k2) values = torch.empty(nsim, 2); accepted = 0 while nleft > 0 and mloop < mtop: x = Normal(0., 1.).sample((nleft*q,)).reshape(nleft, q) * torch.ones(nleft, 1) * torch.tensor( (1/phi).sqrt()).reshape(1, q) r = (x*x).sum(-1).sqrt() # Dividing a vector by its norm, gives the unit vector # So the ACG samples unit vectors? x = x / (r.reshape(nleft, 1) * torch.ones(1, q)) u = ((x*x) * torch.ones(nleft, 1) * torch.tensor(eig).reshape(1, q)).sum(-1) v = Uniform(0, 1).sample((nleft, )) # eq 7.3 + eq 4.2 logf = (k1*(x[:,0] - 1) + eigmin) + (log_im(0, torch.sqrt(k2**2 + lam**2 * x[:,1]**2 )) - den ) # eq 3.4 loggi = 0.5 * (q - b0) + q/2 * ((1+2*u/b0).log() + (b0/q).log()) logfg = logf + loggi ind = (v < logfg.exp()) nacc = ind.sum(); nleft = nleft - nacc; mloop = mloop + 1; ntry=ntry+nleft if nacc > 0: start = accepted accepted += x[ind].shape[0] values[start:accepted,:] = x[ind,:] print("Sampling efficiency:", (nsim - nleft.item())/ntry.item()) return torch.atan2(values[:,1], values[:,0])
45d96fee1b61d5c020e355df76d77c78483a3a0b
26,356
def melspecgrams_to_specgrams(logmelmag2 = None, mel_p = None, mel_downscale=1): """Converts melspecgrams to specgrams. Args: melspecgrams: Tensor of log magnitudes and instantaneous frequencies, shape [freq, time], mel scaling of frequencies. Returns: specgrams: Tensor of log magnitudes and instantaneous frequencies, shape [freq, time]. """ mel2l = _mel_to_linear_matrix(mel_downscale) logmag = None p = None if logmelmag2 is not None: logmelmag2 = logmelmag2.T logmelmag2 = np.array([logmelmag2]) mag2 = np.tensordot(np.exp(logmelmag2), mel2l, 1) logmag = 0.5 * np.log(mag2+1e-6) logmag = logmag[0].T if mel_p is not None: mel_p = mel_p.T mel_p = np.array([mel_p]) mel_phase_angle = np.cumsum(mel_p * np.pi, axis=1) phase_angle = np.tensordot(mel_phase_angle, mel2l, 1) p = instantaneous_frequency(phase_angle,time_axis=1) p = p[0].T return logmag, p
34090358eff2bf803af9b56c210d5e093b1f2900
26,358
from scipy.stats.mstats import gmean import numpy as np def ligandScore(ligand, genes): """calculate ligand score for given ligand and gene set""" if ligand.ligand_type == "peptide" and isinstance(ligand.preprogene, str): # check if multiple genes needs to be accounted for if isinstance(eval(ligand.preprogene), list): ligand_genes = list() for gene in eval(ligand.preprogene): try: ligand_genes.append(genes[gene]) except KeyError: #print(f"{gene} not found") ligand_genes.append(0.0) # use max, as there might be many orthologs genes for one original # gene and not all have to be expressed try: ligand_score = max(ligand_genes) except ValueError: print(f"something is wrong with the list {ligand_genes}") ligand_score = 0.0 return ligand_score elif ligand.ligand_type == "molecule": synthesis = ligand.synthesis transport = ligand.transport reuptake = ligand.reuptake excluded = ligand.excluded # get geometric mean of synthesis genes (all need to be present) if not isinstance(synthesis, str): # If no genes are needed, synthesis is set to nan synthesis = np.nan else: synthesis_expression = list() for gene in eval(synthesis): try: synthesis_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") synthesis_expression.append(0.0) synthesis = gmean(synthesis_expression) # get maximum of vesicle transporters (only one is needed for molecule transport) if not isinstance(transport, str): # If no specific genes are needed, set transport to nan transport = np.nan else: transport_expression = list() for gene in eval(transport): try: transport_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") transport_expression.append(0.0) transport = max(transport_expression) # Get maximum of reuptake genes (only one is needed) if not isinstance(reuptake, str): # If no specific genes are needed, set reuptake to nan reuptake = np.nan else: reuptake_expression = list() for gene in eval(reuptake): try: reuptake_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") reuptake_expression.append(0.0) reuptake = max(reuptake_expression) # get maximum among exluding genes where any gene expression divert to other ligands if not isinstance(excluded, str): # If no specific genes are needed, set excluded to 0 excluded = 0 else: excluded_expression = list() for gene in eval(excluded): try: excluded_expression.append(genes[gene]) except KeyError: # If gene was not found append 0 #print(f"{gene} not found") excluded_expression.append(0.0) excluded = max(excluded_expression) # return geometric mean of synthesis, transport and reuptake multipled exclusion promoting_factor = gmean(([x for x in [synthesis, transport, reuptake] if str(x) != "nan"])) # genes driving ligand production, remove nan values if str(promoting_factor) == "nan": # capture cases where no promoting genes were present print(f"no promoting genes detected for {ligand.ligand}") return 0.0 # exit before running exclusion calculation ligand_score = promoting_factor - excluded # correct ligand expression based on the exclusion factor if ligand_score < 0: # ligand score should be 0 or positive ligand_score = 0.0 return ligand_score # If genes are missing from ligand gene list else: print("Big error! ligand type is not defined!") return 0.0
68141e9a837619b087cf132c6ba593ba5b1ef43d
26,359
def eval(x): """Evaluates the value of a variable. # Arguments x: A variable. # Returns A Numpy array. # Examples ```python >>> from keras import backend as K >>> kvar = K.variable(np.array([[1, 2], [3, 4]]), dtype='float32') >>> K.eval(kvar) array([[ 1., 2.], [ 3., 4.]], dtype=float32) ``` """ if isinstance(x, KerasSymbol): if x.tensor is not None: if x.name in x.get_bind_values() and _MODEL is not None: _MODEL._sync_weights() ret = x.eval().asnumpy() else: ret = _forward_pass(x)[0].asnumpy() # If the Tensor shape is (1, ) and does not have attribute "_is_vector", then, it is considered to be scalar. # Return the value. if ret.shape == (1,) and not hasattr(x, '_is_vector'): ret = ret[0] return ret elif isinstance(x, mx.nd.NDArray): return x.asnumpy() else: return x
a9b5473cc71cd999d6e85fd760018d454c194c04
26,360
from typing import Optional def triple_in_shape(expr: ShExJ.shapeExpr, label: ShExJ.tripleExprLabel, cntxt: Context) \ -> Optional[ShExJ.tripleExpr]: """ Search for the label in a shape expression """ te = None if isinstance(expr, (ShExJ.ShapeOr, ShExJ.ShapeAnd)): for expr2 in expr.shapeExprs: te = triple_in_shape(expr2, label, cntxt) if te is not None: break elif isinstance(expr, ShExJ.ShapeNot): te = triple_in_shape(expr.shapeExpr, label, cntxt) elif isinstance(expr, ShExJ.shapeExprLabel): se = reference_of(expr, cntxt) if se is not None: te = triple_in_shape(se, label, cntxt) return te
a1e9ba9e7c282475c775c17f52b51a78c3dcfd71
26,361
def poly_learning_rate(base_lr, curr_iter, max_iter, power=0.9): """poly learning rate policy""" lr = base_lr * (1 - float(curr_iter) / max_iter) ** power return lr
fdb2b6ed3784deb3fbf55f6b23f6bd32dac6a988
26,362
def parent_path(xpath): """ Removes the last element in an xpath, effectively yielding the xpath to the parent element :param xpath: An xpath with at least one '/' """ return xpath[:xpath.rfind('/')]
b435375b9d5e57c6668536ab819f40ae7e169b8e
26,363
from datetime import datetime def change_project_description(project_id): """For backwards compatibility: Change the description of a project.""" description = read_request() assert isinstance(description, (str,)) orig = get_project(project_id) orig.description = description orig.lastUpdated = datetime.datetime.utcnow() orig.save() return JsonResponse(orig)
c6b59cfbbffb353943a0a7ba4160ffb0e2382a51
26,364
import unittest def run_all(examples_main_path): """ Helper function to run all the test cases :arg: examples_main_path: the path to main examples directory """ # test cases to run test_cases = [TestExample1, TestExample2, TestExample3, TestExample4, TestExample5s1, TestExample5s2, TestExample6, TestExample7, TestExample8, TestExample9] # load all the specified test cases test_loader = unittest.TestLoader() suite = unittest.TestSuite() for test_case in test_cases: test_names = test_loader.getTestCaseNames(test_case) for test_name in test_names: suite.addTest(test_case(methodName=test_name, examples_path=examples_main_path)) # run the test suite result = unittest.TextTestRunner().run(suite) return int(not result.wasSuccessful())
9a5176bff4e2c82561e3b0dbee467cd1dec0e63e
26,365
def get_queue(queue): """ :param queue: Queue Name or Queue ID or Queue Redis Key or Queue Instance :return: Queue instance """ if isinstance(queue, Queue): return queue if isinstance(queue, str): if queue.startswith(Queue.redis_queue_namespace_prefix): return Queue.from_queue_key(queue) else: return Queue.from_queue_key(Queue.redis_queue_namespace_prefix+queue) raise TypeError('{0} is not of class {1} or {2}'.format(queue, str, Queue))
159860f2efa5c7a2643d4ed8b316e8abca85e67f
26,366
def ptttl_to_samples(ptttl_data, amplitude=0.5, wavetype=SINE_WAVE): """ Convert a PTTTLData object to a list of audio samples. :param PTTTLData ptttl_data: PTTTL/RTTTL source text :param float amplitude: Output signal amplitude, between 0.0 and 1.0. :param int wavetype: Waveform type for output signal. Must be one of\ tones.SINE_WAVE, tones.SQUARE_WAVE, tones.TRIANGLE_WAVE, or tones.SAWTOOTH_WAVE. :return: list of audio samples :rtype: tones.tone.Samples """ parser = PTTTLParser() data = parser.parse(ptttl_data) return _generate_samples(data, amplitude, wavetype)
f4be93a315ff177cbdf69249f7efece55561b431
26,367
from typing import Union from pathlib import Path from typing import Tuple import numpy import pandas def read_output_ascii( path: Union[Path, str] ) -> Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray]: """Read an output file (raw ASCII format) Args: path (str): path to the file Return: list: The contents of the output file as a :py:class:`list` containing four :py:class:`numpy.ndarray` instances. The first one contains all simulation times. The other entries contain the norm, energy and maximum SPF overlap of the wave function at all times. """ dataFrame = pandas.read_csv( str(path), sep=r"\s+", names=["time", "norm", "energy", "overlap"] ) return ( dataFrame["time"].values, dataFrame["norm"].values, dataFrame["energy"].values, dataFrame["overlap"].values, )
ef3008f6cf988f7bd42ccb75bfd6cfd1a58e28ae
26,368
def AliasPrefix(funcname): """Return the prefix of the function the named function is an alias of.""" alias = __aliases[funcname][0] return alias.prefix
771c0f665ddad2427759a5592608e5467005c26d
26,369
from typing import Optional from typing import Tuple from typing import Callable def connect( sender: QWidget, signal: str, receiver: QObject, slot: str, caller: Optional[FormDBWidget] = None, ) -> Optional[Tuple[pyqtSignal, Callable]]: """Connect signal to slot for QSA.""" # Parameters example: # caller: <clientes.FormInternalObj object at 0x7f78b5c230f0> # sender: <pineboolib.qt3_widgets.qpushbutton.QPushButton object at 0x7f78b4de1af0> # signal: 'clicked()' # receiver: <clientes.FormInternalObj object at 0x7f78b5c230f0> # slot: 'iface.buscarContacto()' if caller is not None: logger.trace("* * * Connect:: %s %s %s %s %s", caller, sender, signal, receiver, slot) else: logger.trace("? ? ? Connect:: %s %s %s %s", sender, signal, receiver, slot) signal_slot = solve_connection(sender, signal, receiver, slot) if not signal_slot: return None # http://pyqt.sourceforge.net/Docs/PyQt4/qt.html#ConnectionType-enum conntype = Qt.QueuedConnection | Qt.UniqueConnection new_signal, new_slot = signal_slot # if caller: # for sl in caller._formconnections: # if sl[0].signal == signal_slot[0].signal and sl[1].__name__ == signal_slot[1].__name__: # return False try: slot_done_fn: Callable = slot_done(new_slot, new_signal, sender, caller) # MyPy/PyQt5-Stubs misses connect(type=param) new_signal.connect(slot_done_fn, type=conntype) # type: ignore except Exception: logger.warning("ERROR Connecting: %s %s %s %s", sender, signal, receiver, slot) return None signal_slot = new_signal, slot_done_fn return signal_slot
2ebeca355e721c5fad5ec6aac24a59587e4e86bd
26,371
import torch def get_detection_input(batch_size=1): """ Sample input for detection models, usable for tracing or testing """ return ( torch.rand(batch_size, 3, 224, 224), torch.full((batch_size,), 0).long(), torch.Tensor([1, 1, 200, 200]).repeat((batch_size, 1)), torch.full((batch_size,), 1).long(), )
710a5ed2f89610555d347af568647a8768f1ddb4
26,372
def build_tables(ch_groups, buffer_size, init_obj=None): """ build tables and associated I/O info for the channel groups. Parameters ---------- ch_groups : dict buffer_size : int init_obj : object with initialize_lh5_table() function Returns ------- ch_to_tbls : dict or Table A channel-indexed dictionary of tables for quick look-up, or if passed a dummy group (no group name), return the one table made. """ ch_to_tbls = {} # set up a table for each group for group_name, group_info in ch_groups.items(): tbl = lh5.Table(buffer_size) if init_obj is not None: channel = None # for dummy ch_group # Note: all ch in ch_list will be written to the same table. So it # should suffice to initials for first channel in the list if 'ch_list' in group_info: channel = group_info['ch_list'][0] init_obj.initialize_lh5_table(tbl, channel) group_info['table'] = tbl if group_name == '': if len(ch_groups) != 1: print("Error: got dummy group (no name) in non-dummy ch_groups") return None return tbl # cache the table to a ch-indexed dict for quick look-up for ch in group_info['ch_list']: ch_to_tbls[ch] = tbl return ch_to_tbls
964bc6a817688eb8426976cec2b0053f43c6ed79
26,373
def segmentspan(revlog, revs): """Get the byte span of a segment of revisions revs is a sorted array of revision numbers >>> revlog = _testrevlog([ ... 5, #0 ... 10, #1 ... 12, #2 ... 12, #3 (empty) ... 17, #4 ... ]) >>> segmentspan(revlog, [0, 1, 2, 3, 4]) 17 >>> segmentspan(revlog, [0, 4]) 17 >>> segmentspan(revlog, [3, 4]) 5 >>> segmentspan(revlog, [1, 2, 3,]) 7 >>> segmentspan(revlog, [1, 3]) 7 """ if not revs: return 0 end = revlog.end(revs[-1]) return end - revlog.start(revs[0])
51624b3eac7bba128a2e702c3387bbaab4974143
26,374
def is_stateful(change, stateful_resources): """ Boolean check if current change references a stateful resource """ return change['ResourceType'] in stateful_resources
055465870f9118945a9e5f2ff39be08cdcf35d31
26,375
def get_session_from_webdriver(driver: WebDriver, registry: Registry) -> RedisSession: """Extract session cookie from a Selenium driver and fetch a matching pyramid_redis_sesssion data. Example:: def test_newsletter_referral(dbsession, web_server, browser, init): '''Referral is tracker for the newsletter subscription.''' b = browser b.visit(web_server + "/newsletter") with transaction.manager: r = ReferralProgram() r.name = "Foobar program" dbsession.add(r) dbsession.flush() ref_id, slug = r.id, r.slug # Inject referral data to the active session. We do this because it is very hard to spoof external links pointing to localhost test web server. session = get_session_from_webdriver(b.driver, init.config.registry) session["referral"] = { "ref": slug, "referrer": "http://example.com" } session.to_redis() b.fill("email", "[email protected]") b.find_by_name("subscribe").click() # Displayed as a message after succesful form subscription assert b.is_text_present("Thank you!") # Check we get an entry with transaction.manager: assert dbsession.query(NewsletterSubscriber).count() == 1 subscription = dbsession.query(NewsletterSubscriber).first() assert subscription.email == "[email protected]" assert subscription.ip == "127.0.0.1" assert subscription.referral_program_id == ref_id assert subscription.referrer == "http://example.com" :param driver: The active WebDriver (usually ``browser.driver``) :param registry: The Pyramid registry (usually ``init.config.registry``) """ # Decode the session our test browser is associated with by reading the raw session cookie value and fetching the session object from Redis secret = registry.settings["redis.sessions.secret"] session_cookie = driver.get_cookie("session")["value"] session_id = signed_deserialize(session_cookie, secret) class MockRequest: def __init__(self, registry): self.registry = registry # Use pyramid_redis_session to get a connection to the Redis database redis = get_default_connection(MockRequest(registry)) session = RedisSession(redis, session_id, new=False, new_session=None) return session
0faaa394c065344117cec67ec824ec5186252ee2
26,377
from typing import Tuple def paper() -> Tuple[str]: """ Use my paper figure style. Returns ------- Tuple[str] Colors in the color palette. """ sns.set_context("paper") style = { "axes.spines.bottom": True, "axes.spines.left": True, "axes.spines.right": False, "axes.spines.top": False, "axes.edgecolor": "0", "xtick.bottom": True, "ytick.left": True} plt.rcParams["legend.frameon"] = False palette = sns.color_palette("deep") sns.set_palette(palette) sns.set_style("ticks", rc=style) return palette
6e53247c666db62be1d5bf5ad5d77288af277d2d
26,379
import difflib def _get_diff_text(old, new): """ Returns the diff of two text blobs. """ diff = difflib.unified_diff(old.splitlines(1), new.splitlines(1)) return "".join([x.replace("\r", "") for x in diff])
bd8a3d49ccf7b6c18e6cd617e6ad2ad8324de1cc
26,380
def GetStatus(operation): """Returns string status for given operation. Args: operation: A messages.Operation instance. Returns: The status of the operation in string form. """ if not operation.done: return Status.PENDING.name elif operation.error: return Status.ERROR.name else: return Status.COMPLETED.name
c9630528dd9b2e331a9d387cac0798bf07646603
26,382
def ft2m(ft): """ Converts feet to meters. """ if ft == None: return None return ft * 0.3048
ca2b4649b136c9128b5b3ae57dd00c6cedd0f383
26,384
def show_colors(*, nhues=17, minsat=10, unknown='User', include=None, ignore=None): """ Generate tables of the registered color names. Adapted from `this example <https://matplotlib.org/examples/color/named_colors.html>`__. Parameters ---------- nhues : int, optional The number of breaks between hues for grouping "like colors" in the color table. minsat : float, optional The threshold saturation, between ``0`` and ``100``, for designating "gray colors" in the color table. unknown : str, default: 'User' Category name for color names that are unknown to proplot. Set this to ``False`` to hide unknown color names. include : str or sequence of str, default: None Category names to be shown in the table. Use this to limit the table to a subset of categories. Valid categories are %(demos.colors)s. ignore : str or sequence of str, default: 'CSS4' Used only if `include` was not passed. Category names to be removed from the colormap table. Returns ------- proplot.figure.Figure The figure. proplot.gridspec.SubplotGrid The subplot grid. """ # Tables of known colors to be plotted colordict = {} if ignore is None: ignore = 'css4' if isinstance(include, str): include = (include.lower(),) if isinstance(ignore, str): ignore = (ignore.lower(),) if include is None: include = COLORS_TABLE.keys() include -= set(map(str.lower, ignore)) for cat in sorted(include): if cat not in COLORS_TABLE: raise ValueError( f'Invalid categories {include!r}. Options are: ' + ', '.join(map(repr, COLORS_TABLE)) + '.' ) colordict[cat] = list(COLORS_TABLE[cat]) # copy the names # Add "unknown" colors if unknown: unknown_colors = [ color for color in map(repr, pcolors._color_database) if 'xkcd:' not in color and 'tableau:' not in color and not any(color in list_ for list_ in COLORS_TABLE) ] if unknown_colors: colordict[unknown] = unknown_colors # Divide colors into columns and rows # For base and open colors, tables are already organized into like # colors, so just reshape them into grids. For other colors, we group # them by hue in descending order of luminance. namess = {} for cat in sorted(include): if cat == 'base': names = np.asarray(colordict[cat]) ncols, nrows = len(names), 1 elif cat == 'opencolor': names = np.asarray(colordict[cat]) ncols, nrows = 7, 20 else: hclpairs = [(name, to_xyz(name, 'hcl')) for name in colordict[cat]] hclpairs = [ sorted( [ pair for pair in hclpairs if _filter_colors(pair[1], ihue, nhues, minsat) ], key=lambda x: x[1][2] # sort by luminance ) for ihue in range(nhues) ] names = np.array([name for ipairs in hclpairs for name, _ in ipairs]) ncols, nrows = 4, len(names) // 4 + 1 names.resize((ncols, nrows)) # fill empty slots with empty string namess[cat] = names # Draw figures for different groups of colors # NOTE: Aspect ratios should be number of columns divided by number # of rows, times the aspect ratio of the slot for each swatch-name # pair, which we set to 5. shape = tuple(namess.values())[0].shape # sample *first* group figwidth = 6.5 refaspect = (figwidth * 72) / (10 * shape[1]) # points maxcols = max(names.shape[0] for names in namess.values()) hratios = tuple(names.shape[1] for names in namess.values()) fig, axs = ui.subplots( figwidth=figwidth, refaspect=refaspect, nrows=len(include), hratios=hratios, ) title_dict = { 'css4': 'CSS4 colors', 'base': 'Base colors', 'opencolor': 'Open color', 'xkcd': 'XKCD colors', } for ax, (cat, names) in zip(axs, namess.items()): # Format axes ax.format( title=title_dict.get(cat, cat), titleweight='bold', xlim=(0, maxcols - 1), ylim=(0, names.shape[1]), grid=False, yloc='neither', xloc='neither', alpha=0, ) # Draw swatches as lines lw = 8 # best to just use trial and error swatch = 0.45 # percent of column reserved for swatch ncols, nrows = names.shape for col, inames in enumerate(names): for row, name in enumerate(inames): if not name: continue y = nrows - row - 1 # start at top x1 = col * (maxcols - 1) / ncols # e.g. idx 3 --> idx 7 x2 = x1 + swatch # portion of column xtext = x1 + 1.1 * swatch ax.text( xtext, y, name, ha='left', va='center', transform='data', clip_on=False, ) ax.plot( [x1, x2], [y, y], color=name, lw=lw, solid_capstyle='butt', # do not stick out clip_on=False, ) return fig, axs
34b45185af96f3ce6111989f83d584006ebceb49
26,385
def get_all_lights(scene, include_light_filters=True): """Return a list of all lights in the scene, including mesh lights Args: scene (byp.types.Scene) - scene file to look for lights include_light_filters (bool) - whether or not light filters should be included in the list Returns: (list) - list of all lights """ lights = list() for ob in scene.objects: if ob.type == 'LIGHT': if hasattr(ob.data, 'renderman'): if include_light_filters: lights.append(ob) elif ob.data.renderman.renderman_light_role == 'RMAN_LIGHT': lights.append(ob) else: mat = getattr(ob, 'active_material', None) if not mat: continue output = shadergraph_utils.is_renderman_nodetree(mat) if not output: continue if len(output.inputs) > 1: socket = output.inputs[1] if socket.is_linked: node = socket.links[0].from_node if node.bl_label == 'PxrMeshLight': lights.append(ob) return lights
4570f36bdfbef287f38a250cddcdc7f8c8d8665d
26,386
def get_df(path): """Load raw dataframe from JSON data.""" with open(path) as reader: df = pd.DataFrame(load(reader)) df['rate'] = 1e3 / df['ms_per_record'] return df
0e94506fcaa4bd64388eb2def4f9a66c19bd9b32
26,387
def _format_distribution_details(details, color=False): """Format distribution details for printing later.""" def _y_v(value): """Print value in distribution details.""" if color: return colored.yellow(value) else: return value # Maps keys in configuration to a pretty-printable name. distro_pretty_print_map = { "distro": lambda v: """Distribution Name: """ + _y_v(v), "release": lambda v: """Release: """ + _y_v(v), "arch": lambda v: """Architecture: """ + _y_v(Alias.universal(v)), "pkgsys": lambda v: """Package System: """ + _y_v(v.__name__), } return "\n".join([ " - " + distro_pretty_print_map[key](value) for key, value in details.items() if key in distro_pretty_print_map ]) + "\n"
ccfa7d9b35b17ba9889f5012d1ae5aa1612d33b1
26,388
async def async_get_relation_id(application_name, remote_application_name, model_name=None, remote_interface_name=None): """ Get relation id of relation from model. :param model_name: Name of model to operate on :type model_name: str :param application_name: Name of application on this side of relation :type application_name: str :param remote_application_name: Name of application on other side of relation :type remote_application_name: str :param remote_interface_name: Name of interface on remote end of relation :type remote_interface_name: Optional(str) :returns: Relation id of relation if found or None :rtype: any """ async with run_in_model(model_name) as model: for rel in model.applications[application_name].relations: spec = '{}'.format(remote_application_name) if remote_interface_name is not None: spec += ':{}'.format(remote_interface_name) if rel.matches(spec): return(rel.id)
2447c08c57d2ed4548db547fb4c347987f0ac88b
26,389
from operator import or_ def get_timeseries_references(session_id, search_value, length, offset, column, order): """ Gets a filtered list of timeseries references. This function will generate a filtered list of timeseries references belonging to a session given a search value. The length, offset, and order of the list can also be specified. """ Session = app.get_persistent_store_database( "hydroshare_timeseries_manager", as_sessionmaker=True ) session = Session() sortable_columns = [ "status", "site_name", "site_code", "latitude", "longitude", "variable_name", "variable_code", "sample_medium", "begin_date", "end_date", "value_count", "method_link", "method_description", "network_name", "url", "service_type", "ref_type", "return_type" ] full_query = session.\ query( TimeSeriesCatalog.status, TimeSeriesCatalog.status, TimeSeriesCatalog.site_name, TimeSeriesCatalog.site_code, TimeSeriesCatalog.latitude, TimeSeriesCatalog.longitude, TimeSeriesCatalog.variable_name, TimeSeriesCatalog.variable_code, TimeSeriesCatalog.sample_medium, TimeSeriesCatalog.begin_date, TimeSeriesCatalog.end_date, TimeSeriesCatalog.value_count, TimeSeriesCatalog.method_link, TimeSeriesCatalog.method_description, TimeSeriesCatalog.network_name, TimeSeriesCatalog.url, TimeSeriesCatalog.service_type, TimeSeriesCatalog.ref_type, TimeSeriesCatalog.return_type, TimeSeriesCatalog.timeseries_id, TimeSeriesCatalog.selected ).filter( TimeSeriesCatalog.session_id == session_id ) if search_value != "": filtered_query = full_query.filter( or_( TimeSeriesCatalog.status.ilike(f"%{search_value}%"), TimeSeriesCatalog.site_name.ilike(f"%{search_value}%"), TimeSeriesCatalog.site_code.ilike(f"%{search_value}%"), TimeSeriesCatalog.variable_name.ilike(f"%{search_value}%"), TimeSeriesCatalog.variable_code.ilike(f"%{search_value}%"), TimeSeriesCatalog.sample_medium.ilike(f"%{search_value}%"), TimeSeriesCatalog.network_name.ilike(f"%{search_value}%"), TimeSeriesCatalog.service_type.ilike(f"%{search_value}%"), TimeSeriesCatalog.ref_type.ilike(f"%{search_value}%"), TimeSeriesCatalog.return_type.ilike(f"%{search_value}%"), ) ) else: filtered_query = full_query if order == "asc": ordered_query = filtered_query.order_by( asc(getattr(TimeSeriesCatalog, sortable_columns[int(column)])) ) elif order == "desc": ordered_query = filtered_query.order_by( desc(getattr(TimeSeriesCatalog, sortable_columns[int(column)])) ) else: ordered_query = filtered_query.order_by( asc(TimeSeriesCatalog.timeseries_id) ) paginated_query = ordered_query.offset(offset).limit(length) selected_query = full_query.filter( TimeSeriesCatalog.selected == True ) full_query_count = full_query.count() filtered_query_count = filtered_query.count() selected_query_count = selected_query.count() query_results = paginated_query.all() engine = session.get_bind() session.close() engine.dispose() return full_query_count, filtered_query_count, selected_query_count, query_results
67011d7d1956259c383cd2722ae4035c28e6a5f3
26,390
def mxprv_from_bip39_mnemonic( mnemonic: Mnemonic, passphrase: str = "", network: str = "mainnet" ) -> bytes: """Return BIP32 root master extended private key from BIP39 mnemonic.""" seed = bip39.seed_from_mnemonic(mnemonic, passphrase) version = NETWORKS[network].bip32_prv return rootxprv_from_seed(seed, version)
ceb5f5e853f7964015a2a69ea2fdb26680acf2b3
26,392
def translate_text( text: str, source_language: str, target_language: str ) -> str: """Translates text into the target language. This method uses ISO 639-1 compliant language codes to specify languages. To learn more about ISO 639-1, see: https://www.w3schools.com/tags/ref_language_codes.asp Args: text: str. The text to be translated. If text contains html tags, Cloud Translate only translates content between tags, leaving the tags themselves untouched. source_language: str. An allowlisted language code. target_language: str. An allowlisted language code. Raises: ValueError. Invalid source language code. ValueError. Invalid target language code. Returns: str. The translated text. """ if source_language not in LANGUAGE_CODE_ALLOWLIST: raise ValueError('Invalid source language code: %s' % source_language) if target_language not in LANGUAGE_CODE_ALLOWLIST: raise ValueError('Invalid target language code: %s' % target_language) if source_language == target_language: return text result = ( CLIENT.translate( text, target_language=target_language, source_language=source_language)) # Letting mypy know that result is a dict. assert isinstance(result, dict) translated_text = result['translatedText'] return translated_text
ed82dbb2fd89398340ed6ff39132f95758bfab97
26,394
def evt_cache_staged_t(ticket): """ create event EvtCacheStaged from ticket ticket """ fc_keys = ['bfid' ] ev = _get_proto(ticket, fc_keys = fc_keys) ev['cache']['en'] = _set_cache_en(ticket) return EvtCacheStaged(ev)
86543ca98257cab28e4bfccef229c8d8e5b6893b
26,395
from typing import Dict def _get_setup_keywords(pkg_data: dict, keywords: dict) -> Dict: """Gather all setuptools.setup() keyword args.""" options_keywords = dict( packages=list(pkg_data), package_data={pkg: list(files) for pkg, files in pkg_data.items()}, ) keywords['options'].update(options_keywords) return keywords
34f2d52c484fc4e49ccaca574639929756cfa4dc
26,396
import six def flatten(x): """flatten(sequence) -> list Returns a single, flat list which contains all elements retrieved from the sequence and all recursively contained sub-sequences (iterables). Examples: >>> [1, 2, [3,4], (5,6)] [1, 2, [3, 4], (5, 6)] >>> flatten([[[1,2,3], (42,None)], [4,5], [6], 7, (8,9,10)]) [1, 2, 3, 42, None, 4, 5, 6, 7, 8, 9, 10]""" result = [] for el in x: # if isinstance(el, (list, tuple)): if hasattr(el, "__iter__") and not isinstance(el, six.string_types): result.extend(flatten(el)) else: result.append(el) return list(result)
041807c1622f644c062a5adb0404d14589cc543b
26,397
from clawpack.visclaw import colormaps, geoplot from numpy import linspace from clawpack.visclaw.data import ClawPlotData from clawpack.visclaw import gaugetools import pylab import pylab from numpy import ma from numpy import ma import pylab import pylab from pylab import plot, xticks, floor, xlabel def setplot(plotdata=None): #-------------------------- """ Specify what is to be plotted at each frame. Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData. Output: a modified version of plotdata. """ if plotdata is None: plotdata = ClawPlotData() plotdata.clearfigures() # clear any old figures,axes,items data plotdata.format = 'binary' # To plot gauge locations on pcolor or contour plot, use this as # an afteraxis function: def addgauges(current_data): gaugetools.plot_gauge_locations(current_data.plotdata, \ gaugenos='all', format_string='ko', add_labels=True) #----------------------------------------- # Figure for surface #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Surface', figno=0) plotfigure.kwargs = {'figsize': (8,5)} # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes('pcolor') plotaxes.title = 'Surface' plotaxes.scaled = True def fixup(current_data): addgauges(current_data) t = current_data.t t = t / 3600. # hours pylab.title('Surface at %4.2f hours' % t, fontsize=20) pylab.grid(True) #pylab.xticks(fontsize=15) #pylab.yticks(fontsize=15) plotaxes.afteraxes = fixup # Water plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') #plotitem.plot_var = geoplot.surface plotitem.plot_var = geoplot.surface_or_depth plotitem.pcolor_cmap = colormaps.red_white_blue #geoplot.tsunami_colormap plotitem.pcolor_cmin = sea_level - 0.1 plotitem.pcolor_cmax = sea_level + 0.1 plotitem.add_colorbar = False #plotitem.colorbar_shrink = 0.5 plotitem.colorbar_shrink = 1.0 plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = 100.0 plotitem.add_colorbar = False plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 plotaxes.xlimits = [-230,-115] plotaxes.ylimits = [0,65] # add contour lines of bathy if desired: plotitem = plotaxes.new_plotitem(plot_type='2d_contour') plotitem.show = False plotitem.plot_var = geoplot.topo plotitem.contour_levels = linspace(-3000,-3000,1) plotitem.amr_contour_colors = ['y'] # color on each level plotitem.kwargs = {'linestyles':'solid','linewidths':2} plotitem.amr_contour_show = [1,0,0] plotitem.celledges_show = 0 plotitem.patchedges_show = 0 #----------------------------------------- # Figure for adjoint #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Adjoint ', figno=20) plotfigure.kwargs = {'figsize': (8,5)} # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes('adjoint') plotaxes.scaled = True plotaxes.title = 'Adjoint flag' def fixup(current_data): addgauges(current_data) t = current_data.t t = t / 3600. # hours pylab.title('Adjoint flag at %4.2f hours' % t, fontsize=20) pylab.grid(True) plotaxes.afteraxes = fixup adj_flag_tol = 0.000001 def masked_inner_product(current_data): q = current_data.q soln = ma.masked_where(q[4,:,:] < adj_flag_tol, q[4,:,:]) return soln def masked_regions(current_data): q = current_data.q soln = ma.masked_where(q[4,:,:] < 1e9, q[4,:,:]) return soln plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = 4 #masked_inner_product plotitem.pcolor_cmap = colormaps.white_red plotitem.pcolor_cmin = 0.5*adj_flag_tol plotitem.pcolor_cmax = 6*adj_flag_tol plotitem.add_colorbar = False plotitem.amr_celledges_show = [0] plotitem.amr_data_show = [1,1,0,0,0,0,0] plotitem.patchedges_show = 0 #plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') #plotitem.plot_var = masked_regions #plotitem.pcolor_cmap = colormaps.white_blue #plotitem.pcolor_cmin = 9e9 #plotitem.pcolor_cmax = 1.1e10 #plotitem.add_colorbar = False #plotitem.amr_celledges_show = [0] #plotitem.amr_data_show = [1,1,0,0] #plotitem.patchedges_show = 0 # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = 100.0 plotitem.add_colorbar = False plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 plotaxes.xlimits = [-230,-115] plotaxes.ylimits = [0,65] #----------------------------------------- # Figure for levels #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Grid patches', figno=10) plotfigure.kwargs = {'figsize': (8,5)} # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.title = 'Grid patches' plotaxes.scaled = True def aa_patches(current_data): pylab.ticklabel_format(format='plain',useOffset=False) pylab.xticks([180, 200, 220, 240], rotation=20, fontsize = 28) pylab.yticks(fontsize = 28) t = current_data.t t = t / 3600. # hours pylab.title('Grids patches at %4.2f hours' % t, fontsize=20) a = pylab.gca() a.set_aspect(1./pylab.cos(41.75*pylab.pi/180.)) pylab.grid(True) def fixup(current_data): addgauges(current_data) t = current_data.t t = t / 3600. # hours pylab.title('Grids patches at %4.2f hours' % t, fontsize=20) pylab.grid(True) # Water plotitem = plotaxes.new_plotitem(plot_type='2d_patch') plotitem.amr_patch_bgcolor = [[1,1,1], [0.8,0.8,0.8], [0.8,1,0.8], [1,.7,.7],[0.6,0.6,1]] plotitem.amr_patchedges_color = ['k','k','g','r','b'] plotitem.amr_celledges_show = [0] plotitem.amr_patchedges_show = [0,1,1,1,1] # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = 100.0 plotitem.add_colorbar = False plotitem.amr_celledges_show = [0] plotitem.amr_patchedges_show = [0] plotaxes.afteraxes = fixup plotaxes.xlimits = [-230,-115] plotaxes.ylimits = [0,65] #----------------------------------------- # Zoom #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Crescent City', figno=1) # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes('pcolor') plotaxes.title = 'Surface' plotaxes.scaled = True plotaxes.afteraxes = fixup # Water plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') #plotitem.plot_var = geoplot.surface plotitem.plot_var = geoplot.surface_or_depth plotitem.pcolor_cmap = geoplot.tsunami_colormap plotitem.pcolor_cmin = sea_level - 0.1 plotitem.pcolor_cmax = sea_level + 0.1 plotitem.add_colorbar = True #plotitem.colorbar_shrink = 0.5 plotitem.colorbar_shrink = 1.0 plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = 100.0 plotitem.add_colorbar = False plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 ######## Limits below encompass Crescent City plotaxes.xlimits = [-127,-123.5] plotaxes.ylimits = [40.5,44.5] #----------------------------------------- # Zoom2 #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Crescent City Zoomed', figno=2) # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes('pcolor') plotaxes.title = 'Surface' plotaxes.scaled = True plotaxes.afteraxes = fixup # Water plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') #plotitem.plot_var = geoplot.surface plotitem.plot_var = geoplot.surface_or_depth plotitem.pcolor_cmap = geoplot.tsunami_colormap plotitem.pcolor_cmin = sea_level - 0.1 plotitem.pcolor_cmax = sea_level + 0.1 plotitem.add_colorbar = True #plotitem.colorbar_shrink = 0.5 plotitem.colorbar_shrink = 1.0 plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 # Land plotitem = plotaxes.new_plotitem(plot_type='2d_pcolor') plotitem.plot_var = geoplot.land plotitem.pcolor_cmap = geoplot.land_colors plotitem.pcolor_cmin = 0.0 plotitem.pcolor_cmax = 100.0 plotitem.add_colorbar = False plotitem.amr_celledges_show = [0,0,0] plotitem.patchedges_show = 0 ######## Limits below encompass Crescent City zoomed area plotaxes.xlimits = [-124.235,-124.143] plotaxes.ylimits = [41.716,41.783] #----------------------------------------- # Figures for gauges #----------------------------------------- plotfigure = plotdata.new_plotfigure(name='Surface at gauges', figno=300, \ type='each_gauge') plotfigure.clf_each_gauge = True # Set up for axes in this figure: plotaxes = plotfigure.new_plotaxes() plotaxes.xlimits = [9.5*3600, 15*3600] plotaxes.ylimits = [-3,3] plotaxes.title = 'Surface' # Plot surface as blue curve: plotitem = plotaxes.new_plotitem(plot_type='1d_plot') plotitem.plot_var = 3 plotitem.plotstyle = 'b-' # Plot topo as green curve: plotitem = plotaxes.new_plotitem(plot_type='1d_plot') plotitem.show = False def gaugetopo(current_data): q = current_data.q h = q[0,:] eta = q[3,:] topo = eta - h return topo plotitem.plot_var = gaugetopo plotitem.plotstyle = 'g-' def add_zeroline(current_data): t = current_data.t gaugeno = current_data.gaugeno plot(t, 0*t, 'k') n = int(floor(t.max()/3600.) + 2) #xticks([3600*i for i in range(n)], ['%i' % i for i in range(n)]) xticks([3600*i for i in range(9,n)], ['%i' % i for i in range(9,n)]) xlabel('time (hours)') plotaxes.afteraxes = add_zeroline #----------------------------------------- # Parameters used only when creating html and/or latex hardcopy # e.g., via pyclaw.plotters.frametools.printframes: plotdata.printfigs = True # print figures plotdata.print_format = 'png' # file format plotdata.print_framenos = 'all' # list of frames to print plotdata.print_gaugenos = 'all' # list of gauges to print plotdata.print_fignos = 'all' # list of figures to print plotdata.html = True # create html files of plots? plotdata.html_homelink = '../README.html' # pointer for top of index plotdata.latex = True # create latex file of plots? plotdata.latex_figsperline = 2 # layout of plots plotdata.latex_framesperline = 1 # layout of plots plotdata.latex_makepdf = False # also run pdflatex? plotdata.parallel = True # make multiple frame png's at once return plotdata
a777686f5b8fafe8c2a109e486242a16d25a463b
26,398
from typing import Dict import json def load_spider_tables(filenames: str) -> Dict[str, Schema]: """Loads database schemas from the specified filenames.""" examples = {} for filename in filenames.split(","): with open(filename) as training_file: examples.update(process_dbs(json.load(training_file))) return examples
1575d0afd4efbe5f53d12be1c7dd3537e54fc46c
26,399
from typing import Dict import array def extract_float_arrays(blockids: str, data: bytes) -> Dict[str, array]: """Extracts float arrays from raw scope, background trace, and recorder zoom binary data (block ids a, A, b, B, x, y, Y in the DLC pro 'Scope, Lock, and Recorder Binary Data' format). Args: blockids: String of requested block id letters. Block ids not available in the input data or in the above list are ignored. data: Input byte sequence. Returns: Dictionary with found block ids as keys and arrays of floats (typecode 'f') as values. Raises: DataFormatError: If the contents of `data` are not conform to the 'Scope, Lock, and Recorder Binary Data' format. """ retval = {} for block in _binary_data_blocks(data): if block.id in blockids and block.id in 'aAbBxyY': values = array('f') # float (IEEE 754 single precision) try: values.frombytes(block.payload) except ValueError as exc: raise DataFormatError("Invalid payload length in block '{}'".format(block.id)) from exc retval[block.id] = _letoh(values) return retval
8789f73d175b9d0f244b33b61d0fe1effa702ded
26,400
def rotate_image(path): """Rotate the image from path and return wx.Image.""" img = Image.open(path) try: exif = img._getexif() if exif[ORIENTATION_TAG] == 3: img = img.rotate(180, expand=True) elif exif[ORIENTATION_TAG] == 6: img = img.rotate(270, expand=True) elif exif[ORIENTATION_TAG] == 8: img = img.rotate(90, expand=True) except: pass return pil_image_to_wx_image(img)
b4f450a3f6cb01a4d9c8e6c384edeabd0366ac63
26,401
def predict(): """Predict endpoint. Chooses model for prediction and predcits bitcoin price for the given time period. @author: Andrii Koval, Yulia Khlyaka, Pavlo Mospan """ data = request.json if data: predict = bool(data["predict"]) if predict: if predictor.pred_dict["model"] == 0: # ARIMA arima_forecast = predictor.get_prediction_arima() plots.arima_df = arima_forecast elif predictor.pred_dict["model"] == 1: # Prophet prophet_forecast = predictor.get_prediction_prophet() plots.prophet_df = prophet_forecast elif predictor.pred_dict["model"] == 2: # LSTM lstm_forecast = predictor.get_prediction_bidirectlstm() plots.lstm_df = lstm_forecast else: pass return 'Non tam praeclarum est scire latine, quam turpe nescire'
90fbc72e3a57ad7dae3617bec20268c87a0c158a
26,402
def splice_imgs(img_list, vis_path): """Splice pictures horizontally """ IMAGE_WIDTH, IMAGE_HEIGHT = img_list[0].size padding_width = 20 img_num = len(img_list) to_image = Image.new('RGB', (img_num * IMAGE_WIDTH + (img_num - 1) * padding_width, IMAGE_HEIGHT)) # Create a new picture padding = Image.new('RGB', (padding_width, IMAGE_HEIGHT), (255, 255, 255)) # Loop through, paste each picture to the corresponding position in order for i, from_image in enumerate(img_list): to_image.paste(from_image, (i * (IMAGE_WIDTH + padding_width), 0)) if i < img_num - 1: to_image.paste(padding, (i * (IMAGE_WIDTH + padding_width) + IMAGE_WIDTH, 0)) return to_image.save(vis_path)
97d4ab32f1a734fbd04e7c558062867c2e6bd3b4
26,403
def create_segmented_colormap(cmap, values, increment): """Create colormap with discretized colormap. This was created mainly to plot a colorbar that has discretized values. Args: cmap: matplotlib colormap values: A list of the quantities being plotted increment: The increment used to bin the values Returns: A tuple with the cmap, the norm, and the colors. """ bmin = values[0] - increment / 2 bmax = values[-1] + 3 * increment / 2 boundaries = np.arange(bmin, bmax, increment) norm = mpl.colors.BoundaryNorm(boundaries, len(values) + 1) norm2 = mpl.colors.Normalize(vmin=0, vmax=len(values)) norm3 = mpl.colors.BoundaryNorm( np.arange(-0.5, len(values) + 0.5, 1), len(values) + 1 ) colors = cmap(norm2(norm(values + [values[-1] + increment]))) cmap = mpl.colors.ListedColormap(colors, "hate") return cmap, norm3, colors
9ab8a0a95896e1ae0f8777b705f920196afc6627
26,404
from io import StringIO def division_series_logs(): """ Pull Retrosheet Division Series Game Logs """ s = get_text_file(gamelog_url.format('DV')) data = pd.read_csv(StringIO(s), header=None, sep=',', quotechar='"') data.columns = gamelog_columns return data
414de8e0409bba9651bef81bbdc811e105d1d11f
26,405
def release_date(json): """ Returns the date from the json content in argument """ return json['updated']
635efd7140860c8f0897e90433a539c8bd585945
26,406
def init_embedding_from_graph( _raw_data, graph, n_components, random_state, metric, _metric_kwds, init="spectral" ): """Initialize embedding using graph. This is for direct embeddings. Parameters ---------- init : str, optional Type of initialization to use. Either random, or spectral, by default "spectral" Returns ------- embedding : np.array the initialized embedding """ if random_state is None: random_state = check_random_state(None) if isinstance(init, str) and init == "random": embedding = random_state.uniform( low=-10.0, high=10.0, size=(graph.shape[0], n_components) ).astype(np.float32) elif isinstance(init, str) and init == "spectral": # We add a little noise to avoid local minima for optimization to come initialisation = spectral_layout( _raw_data, graph, n_components, random_state, metric=metric, metric_kwds=_metric_kwds, ) expansion = 10.0 / np.abs(initialisation).max() embedding = (initialisation * expansion).astype( np.float32 ) + random_state.normal( scale=0.0001, size=[graph.shape[0], n_components] ).astype( np.float32 ) else: init_data = np.array(init) if len(init_data.shape) == 2: if np.unique(init_data, axis=0).shape[0] < init_data.shape[0]: tree = KDTree(init_data) dist, ind = tree.query(init_data, k=2) nndist = np.mean(dist[:, 1]) embedding = init_data + random_state.normal( scale=0.001 * nndist, size=init_data.shape ).astype(np.float32) else: embedding = init_data return embedding
22c2d939a47932b625491a1e685b055214753010
26,407
def basic_hash_table(): """Not empty hash table.""" return HashTable(1)
b06e59c2a6767309e5394df6e51bdaf12c58d073
26,408
def scan_by_key(key, a, dim=0, op=BINARYOP.ADD, inclusive_scan=True): """ Generalized scan by key of an array. Parameters ---------- key : af.Array key array. a : af.Array Multi dimensional arrayfire array. dim : optional: int. default: 0 Dimension along which the scan is performed. op : optional: af.BINARYOP. default: af.BINARYOP.ADD. Binary option the scan algorithm uses. Can be one of: - af.BINARYOP.ADD - af.BINARYOP.MUL - af.BINARYOP.MIN - af.BINARYOP.MAX inclusive_scan: optional: bool. default: True Specifies if the scan is inclusive Returns --------- out : af.Array - will contain scan of input. """ out = Array() safe_call(backend.get().af_scan_by_key(c_pointer(out.arr), key.arr, a.arr, dim, op.value, inclusive_scan)) return out
ea9556b3e2a87a08cca62d03da41edf5985d4156
26,409
def local_luminance_subtraction(image, filter_sigma, return_subtractor=False): """ Computes an estimate of the local luminance and removes this from an image Parameters ---------- image : ndarray(float32 or uint8, size=(h, w, c)) An image of height h and width w, with c color channels filter_sigma : float The standard deviation of the isotropic gaussian kernel that we use to compute a local estimate of the luminance return_subtractor : bool, optional If true, return the array used to do the luminance subtraction -- this can be used to reverse the transform. Defualt False. Returns ------- filtered_image : ndarray(float32, size=(h, w, c)) subtractor : ndarray(float32, size=(h, w, c)) """ gaussian_kernel = get_gaussian_filter_2d( filter_sigma, (4*filter_sigma+1, 4*filter_sigma+1)) local_luminance = filter_sd(image, gaussian_kernel) if return_subtractor: return image - local_luminance, local_luminance else: return image - local_luminance
d9cc46a205f495c0107211d7222be27f40d6896b
26,410
def createBank(): """Create the bank. Returns: Bank: The bank. """ return Bank( 123456, 'My Piggy Bank', 'Tunja Downtown' )
676f0d6cf330e7832064393b543a5ffd1f1068d1
26,411
def str_to_bool(param): """ Convert string value to boolean Attributes: param -- inout query parameter """ if param.upper() == 'TRUE': return True elif param.upper() in ['FALSE', None]: return False else: raise InputValidationError( 'Invalid query parameter. Param is {} and param type is {}'.format(param, type(param)))
ef860e2b2e623d98576c04ef1e397604576d8d48
26,412
from typing import Optional from typing import List from typing import Dict from datetime import datetime def fetch_log_messages(attempt_id: Optional[int] = None, task_id: Optional[int] = None, min_severity: Optional[int] = None): """ Fetch log messages from the database. :param attempt_id: Fetch only log messages associated with one particular task execution. :param task_id: Fetch only log messages associated with one particular task. :param min_severity: Fetch only log messages with a minimum severity level. :return: List[Dict] """ output: List[Dict] = [] # Open connection to the database task_db = task_database.TaskDatabaseConnection() # Build an SQL query for all matching log messages constraints = ["1"] if attempt_id is not None: constraints.append("l.generatedByTaskExecution = {:d}".format(attempt_id)) if task_id is not None: constraints.append("et.taskId = {:d}".format(task_id)) if min_severity is not None: constraints.append("l.severity >= {:d}".format(min_severity)) # Search for all matching log messages task_db.conn.execute(""" SELECT l.timestamp, l.generatedByTaskExecution, l.severity, l.message FROM eas_log_messages l LEFT JOIN eas_scheduling_attempt esa on l.generatedByTaskExecution = esa.schedulingAttemptId LEFT JOIN eas_task et on esa.taskId = et.taskId WHERE {constraint} ORDER BY generatedByTaskExecution, timestamp; """.format(constraint=" AND ".join(constraints))) log_list = task_db.conn.fetchall() # Convert log events into dictionaries for item in log_list: message_class = 'info' if item['severity'] >= 30: message_class = 'warning' if item['severity'] >= 40: message_class = 'error' output.append({ 'attempt_id': item['generatedByTaskExecution'], 'time': datetime.utcfromtimestamp(item['timestamp']).strftime('%Y-%m-%d %H:%M:%S'), 'class': message_class, 'message': item['message'].strip() }) # Commit database task_db.commit() task_db.close_db() # Return results return output
10e78d0ee14647cf7a390287208a28aa957551a4
26,415
def part_1_solution_2(lines): """Shorter, but not very readable. A good example of "clever programming" that saves a few lines of code, while making it unbearably ugly. Counts the number of times a depth measurement increases.""" return len([i for i in range(1, len(lines)) if lines[i] > lines[i - 1]])
d393f0385a1afbea4c2f3b4d3f51d8e7d0ade204
26,417
def get_lifecycle_configuration(bucket_name): """ Get the lifecycle configuration of the specified bucket. Usage is shown in usage_demo at the end of this module. :param bucket_name: The name of the bucket to retrieve. :return: The lifecycle rules of the specified bucket. """ s3 = get_s3() try: config = s3.Bucket(bucket_name).LifecycleConfiguration() logger.info("Got lifecycle rules %s for bucket '%s'.", config.rules, bucket_name) except: logger.exception("Couldn't get lifecycle configuration for bucket '%s'.", bucket_name) raise else: return config.rules
380c884afddbf72db6474e60480bf75ebe67309e
26,418
def compute_reward(ori, new, target_ids): """ Compute the reward for each target item """ reward = {} PE_dict = {} ori_RI, ori_ERI, ori_Revenue = ori new_RI, new_ERI, new_Revenue = new max_PE, min_PE, total_PE = 0, 0, 0 for item in target_ids: PE = new_Revenue[item] - ori_Revenue[item] # Eq. (3) in paper PE = min_PE if PE <= min_PE else PE max_PE = PE if PE >= max_PE else max_PE total_PE += PE PE_dict[item] = PE avg_PE = total_PE/len(target_ids) PE_interval = max_PE - min_PE # Eq. (9) in paper for item, PE in PE_dict.items(): reward[item] = expit((PE - avg_PE)/PE_interval) return reward
a64a1c47089924d373c7435c5018c63fd4edbe74
26,419
from typing import Any import torch import tqdm def _compute_aspect_ratios_slow(dataset: Any, indices: Any = None) -> Any: """Compute the aspect ratios.""" print( "Your dataset doesn't support the fast path for " "computing the aspect ratios, so will iterate over " "the full dataset and load every image instead. " "This might take some time..." ) if indices is None: indices = range(len(dataset)) class SubsetSampler(Sampler): # type: ignore """Subset sampler.""" def __init__(self, indices: Any) -> None: self.indices = indices def __iter__(self) -> Any: return iter(self.indices) def __len__(self) -> int: return len(self.indices) sampler = SubsetSampler(indices) data_loader = torch.utils.data.DataLoader( dataset, batch_size=1, sampler=sampler, num_workers=14, # you might want to increase it for faster processing collate_fn=lambda x: x[0], ) aspect_ratios = [] with tqdm(total=len(dataset)) as pbar: for _i, (img, _) in enumerate(data_loader): pbar.update(1) height, width = img.shape[-2:] aspect_ratio = float(width) / float(height) aspect_ratios.append(aspect_ratio) return aspect_ratios
8fd183a5c353503067666526bdf6722bb53a4317
26,420
import getopt def parse_args(input_args): """ Parse the supplied command-line arguments and return the input file glob and metric spec strings. :param input_args: Command line arguments. :return: A triplet, the first element of which is the input file glob, the second element is the output file name (may be empty), the third element is a list of metric spec strings. """ file_glob = "" output_file_name = "" try: opts, args = getopt.getopt(input_args, "hi:o:") except getopt.GetoptError as err: print(str(err)) usage_and_die() for o, a in opts: if o == "-h": usage_and_die() elif o == "-i": file_glob = a elif o == "-o": output_file_name = a else: usage_and_die() if not file_glob: usage_and_die() return file_glob, output_file_name, args
a15327a3aa2aae86ef8ad14ebcae46b6f3593503
26,421
def _diffuse(field: jnp.ndarray, diffusion_coeff: float, delta_t: float) -> jnp.ndarray: """ Average each value in a vector field closer to its neighbors to simulate diffusion and viscosity. Parameters ---------- field The vector field to diffuse. *Shape: [y, x, any].* diffusion_coeff A coefficient determining the amount of diffusion at each frame. Must be static during JIT tracing. delta_t The time elapsed in each timestep. Must be static during JIT tracing. Returns ------- jnp.ndarray `field`, with diffusion applied for this frame. """ # Compile-time: precompute neighbor averaging kernel neighbor_weight = diffusion_coeff * delta_t neighbor_kernel = np.array( [ [0, neighbor_weight / 4, 0], [neighbor_weight / 4, 1 - 4 * neighbor_weight, neighbor_weight / 4], [0, neighbor_weight / 4, 0], ] ) neighbor_kernel = jax.device_put(neighbor_kernel) return jax.scipy.signal.convolve2d(field, neighbor_kernel, mode="same")
e05763df93164bd7b4baa778720dbecc32fea228
26,422
def dAdzmm_ron_s0(u0, M, n2, lamda, tsh, dt, hf, w_tiled): """ calculates the nonlinear operator for a given field u0 use: dA = dAdzmm(u0) """ print(u0.real.flags) print(u0.imag.flags) M3 = uabs(np.ascontiguousarray(u0.real), np.ascontiguousarray(u0.imag)) temp = fftshift(ifft(fft(M3)*hf)) # for i in (M, u0,M3, dt, temp): # print(i.dtype) N = nonlin_ram(M, u0, M3, dt, temp) N *= -1j*n2*2*pi/lamda return N
57c958bca07b77eaa406cb6b95bf5719af34075b
26,423
def create_support_bag_of_embeddings_reader(reference_data, **options): """ A reader that creates sequence representations of the input reading instance, and then models each question and candidate as the sum of the embeddings of their tokens. :param reference_data: the reference training set that determines the vocabulary. :param options: repr_dim, candidate_split (used for tokenizing candidates), question_split :return: a MultipleChoiceReader. """ tensorizer = SequenceTensorizer(reference_data) candidate_dim = options['repr_dim'] support_dim = options['support_dim'] # question embeddings: for each symbol a [support_dim, candidate_dim] matrix question_embeddings = tf.Variable(tf.random_normal((tensorizer.num_symbols, support_dim, candidate_dim), dtype=_FLOAT_TYPE), dtype=_FLOAT_TYPE) # [batch_size, max_question_length, support_dim, candidate_dim] question_encoding_raw = tf.gather(question_embeddings, tensorizer.questions) # question encoding should have shape: [batch_size, 1, support_dim, candidate_dim], so reduce and keep question_encoding = tf.reduce_sum(question_encoding_raw, 1, keep_dims=True) # candidate embeddings: for each symbol a [candidate_dim] vector candidate_embeddings = tf.Variable(tf.random_normal((tensorizer.num_symbols, candidate_dim), dtype=_FLOAT_TYPE), dtype=_FLOAT_TYPE) # [batch_size, num_candidates, max_candidate_length, candidate_dim] candidate_encoding_raw = tf.gather(candidate_embeddings, tensorizer.candidates) # candidate embeddings should have shape: [batch_size, num_candidates, 1, candidate_dim] candidate_encoding = tf.reduce_sum(candidate_encoding_raw, 2, keep_dims=True) # each symbol has [support_dim] vector support_embeddings = tf.Variable(tf.random_normal((tensorizer.num_symbols, support_dim), dtype=_FLOAT_TYPE), dtype=_FLOAT_TYPE) # [batch_size, max_support_num, max_support_length, support_dim] support_encoding_raw = tf.gather(support_embeddings, tensorizer.support) # support encoding should have shape: [batch_size, 1, support_dim, 1] support_encoding = tf.expand_dims(tf.expand_dims(tf.reduce_sum(support_encoding_raw, (1, 2)), 1), 3) # scoring with a dot product # [batch_size, num_candidates, support_dim, candidate_dim] combined = question_encoding * candidate_encoding * support_encoding scores = tf.reduce_sum(combined, (2, 3)) loss = create_softmax_loss(scores, tensorizer.target_values) return MultipleChoiceReader(tensorizer, scores, loss)
0b09423e9d62d1e5c7c99bd1ebede22ca490797a
26,424
def get_hosts_cpu_frequency(ceilo, hosts): """Get cpu frequency for each host in hosts. :param ceilo: A Ceilometer client. :type ceilo: * :param hosts: A set of hosts :type hosts: list(str) :return: A dictionary of (host, cpu_frequency) :rtype: dict(str: *) """ hosts_cpu_total = dict() #dict of (host, cpu_max_frequency) for host in hosts: host_id = "_".join([host, host]) cpu_frequency_list = ceilo.samples.list(meter_name='compute.node.cpu.frequency', limit=1, q=[{'field':'resource_id','op':'eq','value':host_id}]) if cpu_frequency_list: hosts_cpu_total[host] = cpu_frequency_list[0].counter_volume return hosts_cpu_total
aa6049b9d011d187e1a246a413835aafdbb5c6dc
26,425
def _cloture(exc): """ Return a function which will accept any arguments but raise the exception when called. Parameters ------------ exc : Exception Will be raised later Returns ------------- failed : function When called will raise `exc` """ # scoping will save exception def failed(*args, **kwargs): raise exc return failed
b2e22f5b4bd267d1945b7f759f5ddfb1ee8c44e5
26,426
from typing import Dict import json def _with_environment_variables(cmd: str, environment_variables: Dict[str, object]): """Prepend environment variables to a shell command. Args: cmd (str): The base command. environment_variables (Dict[str, object]): The set of environment variables. If an environment variable value is a dict, it will automatically be converted to a one line yaml string. """ as_strings = [] for key, val in environment_variables.items(): val = json.dumps(val, separators=(",", ":")) s = "export {}={};".format(key, quote(val)) as_strings.append(s) all_vars = "".join(as_strings) return all_vars + cmd
ae27d9e7a62f49e836f1c1b116205f318d9d0dd3
26,427
def update_spam_assets(db: 'DBHandler') -> int: """ Update the list of ignored assets using query_token_spam_list and avoiding the addition of duplicates. It returns the amount of assets that were added to the ignore list """ spam_tokens = query_token_spam_list(db) # order maters here. Make sure ignored_assets are queried after spam tokens creation # since it's possible for a token to exist in ignored assets but not global DB. # and in that case query_token_spam_list add it to the global DB ignored_assets = {asset.identifier for asset in db.get_ignored_assets()} assets_added = 0 for token in spam_tokens: if token.identifier in ignored_assets: continue db.add_to_ignored_assets(token) assets_added += 1 return assets_added
4e7f4e5ae8a6b92ebd5a60a34d9330330690b663
26,428
import png def png_info(path): """Returns a dict with info about the png""" r = png.Reader(filename=path) x, y, frames, info = r.read() return info
97b9df7dd7800f350695e8d678d25154c7a4b2b8
26,430
def _(data: ndarray, outliers: ndarray, show_report: bool = True) -> ndarray: """Process ndarrays""" if type(data) != type(outliers): raise TypeError("`data` and `outliers` must be same type") # convert to DataFrame or Series data = DataFrame(data).squeeze() outliers = DataFrame(outliers).squeeze() # dispatch to relevant function and convert back to ndarray return trim(data, outliers).to_numpy()
1c478af8a6fffcf6240b2547782ee3fc256fdd0c
26,431
import six def bool_from_string(subject, strict=False, default=False): """ 将字符串转换为bool值 :param subject: 待转换对象 :type subject: str :param strict: 是否只转换指定列表中的值 :type strict: bool :param default: 转换失败时的默认返回值 :type default: bool :returns: 转换结果 :rtype: bool """ TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') if isinstance(subject, bool): return subject if not isinstance(subject, six.string_types): subject = six.text_type(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = "Unrecognized value '%(val)s', acceptable values are: %(acceptable)s" % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return default
3c6efa416471da391e60b82aec3753d823ee2878
26,432
def prot_to_vector(seq: str) -> np.ndarray: """Concatenate the amino acid features for each position of the sequence. Args: seq: A string representing an amino acid sequence. Returns: A numpy array of features, shape (len(seq), features)""" # convert to uppercase seq = seq.upper() try: chain = [aa_feats.loc[pos].values for pos in seq] except KeyError as e: print(e) raise ValueError("Invalid string character encountered in prot_to_vector") return np.concatenate(chain, axis=0).reshape(len(seq), -1)
ac5293ee67698243e4910c87944133f9697d8646
26,433
def set_partition(num, par): """ A function returns question for partitions of a generated set. :param num: number of questions. :param par: type of items in the set based on documentation. :return: questions in JSON format. """ output = question_list_maker(num, par, 'set-partition') return jsonify(output)
71540d753020e5333558098b7edf96c4318fb316
26,435
def meanS_heteroscedastic_metric(nout): """This function computes the mean log of the variance (log S) for the heteroscedastic model. The mean log is computed over the standard deviation prediction and the mean prediction is not taken into account. Parameters ---------- nout : int Number of outputs without uq augmentation """ def metric(y_true, y_pred): """ Parameters ---------- y_true : Keras tensor Keras tensor including the ground truth y_pred : Keras tensor Keras tensor including the predictions of a heteroscedastic model. The predictions follow the order: (mean_0, S_0, mean_1, S_1, ...) with S_i the log of the variance for the ith output. """ if nout > 1: log_sig2 = y_pred[:, 1::nout] else: log_sig2 = y_pred[:, 1] return K.mean(log_sig2) metric.__name__ = 'meanS_heteroscedastic' return metric
75bc5ddb482cc0e99bb4f5f9b0d557321b57cf06
26,436
def ec2_connect(module): """ Return an ec2 connection""" region, ec2_url, boto_params = get_aws_connection_info(module) # If we have a region specified, connect to its endpoint. if region: try: ec2 = connect_to_aws(boto.ec2, region, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) # Otherwise, no region so we fallback to the old connection method elif ec2_url: try: ec2 = boto.connect_ec2_endpoint(ec2_url, **boto_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e: module.fail_json(msg=str(e)) else: module.fail_json(msg="Either region or ec2_url must be specified") return ec2
d94b5a1359a31657aa2dffd1f0c11d9cd06493dd
26,437
import glob def read_lris(raw_file, det=None, TRIM=False): """ Read a raw LRIS data frame (one or more detectors) Packed in a multi-extension HDU Based on readmhdufits.pro Parameters ---------- raw_file : str Filename det : int, optional Detector number; Default = both TRIM : bool, optional Trim the image? This doesn't work.... Returns ------- array : ndarray Combined image header : FITS header sections : list List of datasec, oscansec, ampsec sections datasec, oscansec needs to be for an *unbinned* image as per standard convention """ # Check for file; allow for extra .gz, etc. suffix fil = glob.glob(raw_file+'*') if len(fil) != 1: msgs.error("Found {:d} files matching {:s}".format(len(fil))) # Read msgs.info("Reading LRIS file: {:s}".format(fil[0])) hdu = fits.open(fil[0]) head0 = hdu[0].header # Get post, pre-pix values precol = head0['PRECOL'] postpix = head0['POSTPIX'] preline = head0['PRELINE'] postline = head0['POSTLINE'] # Setup for datasec, oscansec dsec = [] osec = [] # get the x and y binning factors... binning = head0['BINNING'] xbin, ybin = [int(ibin) for ibin in binning.split(',')] # First read over the header info to determine the size of the output array... n_ext = len(hdu)-1 # Number of extensions (usually 4) xcol = [] xmax = 0 ymax = 0 xmin = 10000 ymin = 10000 for i in np.arange(1, n_ext+1): theader = hdu[i].header detsec = theader['DETSEC'] if detsec != '0': # parse the DETSEC keyword to determine the size of the array. x1, x2, y1, y2 = np.array(parse.load_sections(detsec, fmt_iraf=False)).flatten() # find the range of detector space occupied by the data # [xmin:xmax,ymin:ymax] xt = max(x2, x1) xmax = max(xt, xmax) yt = max(y2, y1) ymax = max(yt, ymax) # find the min size of the array xt = min(x1, x2) xmin = min(xmin, xt) yt = min(y1, y2) ymin = min(ymin, yt) # Save xcol.append(xt) # determine the output array size... nx = xmax - xmin + 1 ny = ymax - ymin + 1 # change size for binning... nx = nx // xbin ny = ny // ybin # Update PRECOL and POSTPIX precol = precol // xbin postpix = postpix // xbin # Deal with detectors if det in [1,2]: nx = nx // 2 n_ext = n_ext // 2 det_idx = np.arange(n_ext, dtype=np.int) + (det-1)*n_ext ndet = 1 elif det is None: ndet = 2 det_idx = np.arange(n_ext).astype(int) else: raise ValueError('Bad value for det') # change size for pre/postscan... if not TRIM: nx += n_ext*(precol+postpix) ny += preline + postline # allocate output array... array = np.zeros( (nx, ny) ) order = np.argsort(np.array(xcol)) # insert extensions into master image... for kk, i in enumerate(order[det_idx]): # grab complete extension... data, predata, postdata, x1, y1 = lris_read_amp(hdu, i+1) #, linebias=linebias, nobias=nobias, $ #x1=x1, x2=x2, y1=y1, y2=y2, gaindata=gaindata) # insert components into output array... if not TRIM: # insert predata... buf = predata.shape nxpre = buf[0] xs = kk*precol xe = xs + nxpre ''' if keyword_set(VERBOSITY) then begin section = '['+stringify(xs)+':'+stringify(xe)+',*]' message, 'inserting extension '+stringify(i)+ $ ' predata in '+section, /info endif ''' array[xs:xe, :] = predata # insert data... buf = data.shape nxdata = buf[0] nydata = buf[1] xs = n_ext*precol + kk*nxdata #(x1-xmin)/xbin xe = xs + nxdata #section = '[{:d}:{:d},{:d}:{:d}]'.format(preline,nydata-postline, xs, xe) # Eliminate lines section = '[{:d}:{:d},{:d}:{:d}]'.format(preline*ybin, (nydata-postline)*ybin, xs*xbin, xe*xbin) # Eliminate lines dsec.append(section) #print('data',xs,xe) array[xs:xe, :] = data # Include postlines #; insert postdata... buf = postdata.shape nxpost = buf[0] xs = nx - n_ext*postpix + kk*postpix xe = xs + nxpost #section = '[:,{:d}:{:d}]'.format(xs*xbin, xe*xbin) section = '[{:d}:{:d},{:d}:{:d}]'.format(preline*ybin, (nydata-postline)*ybin, xs*xbin, xe*xbin) osec.append(section) ''' if keyword_set(VERBOSITY) then begin section = '['+stringify(xs)+':'+stringify(xe)+',*]' message, 'inserting extension '+stringify(i)+ $ ' postdata in '+section, /info endif ''' array[xs:xe, :] = postdata else: buf = data.shape nxdata = buf[0] nydata = buf[1] xs = (x1-xmin)//xbin xe = xs + nxdata ys = (y1-ymin)//ybin ye = ys + nydata - postline yin1 = preline yin2 = nydata - postline ''' if keyword_set(VERBOSITY) then begin section = '['+stringify(xs)+':'+stringify(xe)+ $ ','+stringify(ys)+':'+stringify(ye)+']' message, 'inserting extension '+stringify(i)+ $ ' data in '+section, /info endif ''' array[xs:xe, ys:ye] = data[:, yin1:yin2] # make sure BZERO is a valid integer for IRAF obzero = head0['BZERO'] head0['O_BZERO'] = obzero head0['BZERO'] = 32768-obzero # Return, transposing array back to goofy Python indexing #from IPython import embed; embed() return array.T, head0, (dsec, osec)
98351f63a78a37ac8cbb3282e71903ee6dd6bbb1
26,438
def mu(n: int) -> int: """Return the value of the Moebius function on n. Examples: >>> mu(3*5*2) -1 >>> mu(3*5*2*17) 1 >>> mu(3*3*5*2) 0 >>> mu(1) 1 >>> mu(5) -1 >>> mu(2**10-1) -1 """ if n == 1: return 1 else: facts = factor(n) len_ = len(facts) if len(set(facts)) < len_: return 0 return (-1)**len_
b8347480041de2dc9dfc469096293e3815eebbcd
26,439
def find_last(arr, val, mask=None, compare="eq"): """ Returns the index of the last occurrence of *val* in *arr*. Or the last occurrence of *arr* *compare* *val*, if *compare* is not eq Otherwise, returns -1. Parameters ---------- arr : device array val : scalar mask : mask of the array compare: str ('gt', 'lt', or 'eq' (default)) """ found_col = find_index_of_val(arr, val, mask=mask, compare=compare) found_col = found_col.find_and_replace([arr.size], [None], True) max_index = found_col.max() return -1 if max_index is None or np.isnan(max_index) else max_index
376a21174bc26ca332768aadf96b84b06e7f55f5
26,440
def find_orphans(input_fits, header_ihdus_keys): """Return a dictionary with keys=(ihdu, key) and values='label' for missing cards in 'header_ihdus_keys' Parameters: ----------- input_fits: astropy.io.fits.HDUList instance FITS file where to find orphan header cards header_ihdus_keys: list a list of tuples=(ihdu,key) for the reference header cards Returns: -------- orphans: list list of orphan header keys """ ihdus, keys = zip(*header_ihdus_keys) orphans = [] for ihdu, lvm_hdu in enumerate(input_fits): hdu_mask = np.array(ihdus)==ihdu orphan_keys = OrderedSet(lvm_hdu.header.keys()) - OrderedSet(np.array(keys)[hdu_mask]) orphans.extend(zip([ihdu]*len(orphan_keys), orphan_keys)) return orphans
bcc98722ba43450ff68367f776a84c0e193447d9
26,441
def Rotation_multiplyByBodyXYZ_NInv_P(cosxy, sinxy, qdot): """ Rotation_multiplyByBodyXYZ_NInv_P(Vec2 cosxy, Vec2 sinxy, Vec3 qdot) -> Vec3 Parameters ---------- cosxy: SimTK::Vec2 const & sinxy: SimTK::Vec2 const & qdot: SimTK::Vec3 const & """ return _simbody.Rotation_multiplyByBodyXYZ_NInv_P(cosxy, sinxy, qdot)
93303a83224b29d9dddf09a64f36d7004ae2ace0
26,443
def sqrt(x: float): """ Take the square root of a positive number Arguments: x (int): Returns: (float): √x Raises: (ValueError): If the number is negative """ if x < 0: raise ValueError('Cannot square-root a negative number with this ' 'function!') return np.sqrt(x)
ab43573010044ffa3861f6a13a58135be52c02b4
26,444
def get_tree_type(tree): """Return the (sub)tree type: 'root', 'nucleus', 'satellite', 'text' or 'leaf' Parameters ---------- tree : nltk.tree.ParentedTree a tree representing a rhetorical structure (or a part of it) """ if is_leaf_node(tree): return SubtreeType.leaf tree_type = tree.label().lower().split(':')[0] assert tree_type in SUBTREE_TYPES return tree_type
15d292ab1f756594add92a6999c7874f6d7fc45b
26,445
def intersection(ls1, ls2): """ This function returns the intersection of two lists without repetition. This function uses built in Python function set() to get rid of repeated values so inputs must be cast to list first. Parameters: ----------- ls1 : Python list The first list. Cannot be array. ls2 : Python list The second list. Cannot be array. Returns: ls3 : Python list The list of overlapping values between ls1 and ls2 """ temp = set(ls1) ls3 = [value for value in ls2 if value in temp] return ls3
fb3bda67d8040da5f4f570e8ff10a8503e153f36
26,446
def params_count(model): """ Computes the number of parameters. Args: model (model): model to count the number of parameters. """ return np.sum([p.numel() for p in model.parameters()]).item()
12bb8463f6eb722a5cb7e7adfdf869764be67944
26,448
from pathlib import Path import platform import shutil def open_cmd_in_path(file_path: Path) -> int: """ Open a terminal in the selected folder. """ if platform.system() == "Linux": return execute_cmd(["x-terminal-emulator", "-e", "cd", f"{str(file_path)}", "bash"], True) elif platform.system() == "Windows": cmd_path = shutil.which("cmd") if cmd_path: return execute_app(Path(cmd_path), True, f"/k cd {str(file_path)}") return 0
899424cd8ab2d76a5ca47d7219a7057d29bb5abe
26,449
def get_f(user_id, ftype): """Get one's follower/following :param str user_id: target's user id :param str ftype: follower or following :return: a mapping from follower/following id to screen name :rtype: Dict """ p = dict(user_id=user_id, count=200, stringify_ids=True, include_user_entities=True, cursor=-1) f = [] if ftype == 'follower': resource_uri = 'https://api.twitter.com/1.1/followers/list.json' elif ftype == 'following': resource_uri = 'https://api.twitter.com/1.1/friends/list.json' else: raise Exception('Unknown type: ' + ftype) while True: while 1: try: j = twitter().get(resource_uri, params=p).json() break except ConnectionError: pass if 'errors' in j: raise Exception(j['errors']) if 'error' in j: raise Exception(j['error']) f.extend([(str(u['id']), u['screen_name']) for u in j['users']]) if j['next_cursor'] != 0: p['cursor'] = j['next_cursor'] else: break return dict(f)
31371d823509c8051660ca0869556253af6b99cc
26,450