content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def cancel_job(request): # pylint: disable=unused-argument """Handler for `cancel_job/` request.""" if not job_ids: print('No jobs are running, nothing to cancel!') else: job_id = job_ids.popleft() print('CANCELING JOB:', job_id) long_job.cancel(job_id) return django.http.HttpResponse()
365b305b88329cf394f3b7d36c6cd3e02121b5a1
29,832
def LO_solver_multiprocessing(H,N,dis, args,pipe): """ Allows to solve the Hamiltonian using several CPUs. Parameters ---------- H: arr Discretized Lutchyn-Oreg Hamiltonian built with Lutchyn_builder. N: int or arr Number of sites. If it is an array, each element is the number of sites in each direction. dis: int or arr Distance (in nm) between sites. If it is an array, each element is the distance between sites in each direction. arg: dictionary Dictionary with the keywords arguments of Lutchyn_solver. pipe: pipe Pipe to the corresponding process. """ #Send work to a given process: E,U=LO_solver(H,N,dis,1,n_CPU=1, mu=args['mu'],B=args['B'],aR=args['aR'],d=args['d'], BdG=args['BdG'], space=args['space'],k_vec=args['k_vec'], m_eff=args['m_eff'], sparse=args['sparse'],n_eig=args['n_eig'], near=args['near'], section=args['section'], method=args['method'],Nxp=args['Nxp'],n_orb=args['n_orb']) #Recover output: pipe.send((E,U)) #Close process: pipe.close() return True
b4738ababdd47a9dc633760dbc3ee7f29744e1e3
29,833
def _clip_grad(clip_type, clip_value, grad): """ Clip gradients. Inputs: clip_type(int): The way to clip, 0 for 'value', 1 for 'norm'. clip_value(float): Specifies how much to clip. grad (tuple[Tensor]): Gradients. Outputs: tuple[Tensor], clipped gradients. """ if clip_type not in (0, 1): return grad dt = ops.dtype(grad) if clip_type == 0: new_grad = ops.clip_by_value(grad, ops.cast(ops.tuple_to_array((-clip_value,)), dt), ops.cast(ops.tuple_to_array((clip_value,)), dt)) else: new_grad = nn.ClipByNorm()(grad, ops.cast(ops.tuple_to_array((clip_value,)), dt)) return new_grad
998003fa6ef24e917af55cdd831034cafceeed74
29,834
def lightness_correlate(A, A_w, c, z): """ Returns the *Lightness* correlate :math:`J`. Parameters ---------- A : numeric or array_like Achromatic response :math:`A` for the stimulus. A_w : numeric or array_like Achromatic response :math:`A_w` for the whitepoint. c : numeric or array_like Surround exponential non linearity :math:`c`. z : numeric or array_like Base exponential non linearity :math:`z`. Returns ------- numeric or ndarray *Lightness* correlate :math:`J`. Examples -------- >>> A = 23.9394809667 >>> A_w = 46.1882087914 >>> c = 0.69 >>> z = 1.9272135955 >>> lightness_correlate(A, A_w, c, z) # doctest: +ELLIPSIS 41.7310911... """ A = as_float_array(A) A_w = as_float_array(A_w) c = as_float_array(c) z = as_float_array(z) J = 100 * spow(A / A_w, c * z) return J
2692225728d9621ac427cedafcb18c9fe014d4ac
29,835
def fetch_file_from_guest(module, content, vm, username, password, src, dest): """ Use VMWare's filemanager api to fetch a file over http """ result = {'failed': False} tools_status = vm.guest.toolsStatus if tools_status == 'toolsNotInstalled' or tools_status == 'toolsNotRunning': result['failed'] = True result['msg'] = "VMwareTools is not installed or is not running in the guest" return result # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/NamePasswordAuthentication.rst creds = vim.vm.guest.NamePasswordAuthentication( username=username, password=password ) # https://github.com/vmware/pyvmomi/blob/master/docs/vim/vm/guest/FileManager/FileTransferInformation.rst fti = content.guestOperationsManager.fileManager. \ InitiateFileTransferFromGuest(vm, creds, src) result['size'] = fti.size result['url'] = fti.url # Use module_utils to fetch the remote url returned from the api rsp, info = fetch_url(module, fti.url, use_proxy=False, force=True, last_mod_time=None, timeout=10, headers=None) # save all of the transfer data for k, v in iteritems(info): result[k] = v # exit early if xfer failed if info['status'] != 200: result['failed'] = True return result # attempt to read the content and write it try: with open(dest, 'wb') as f: f.write(rsp.read()) except Exception as e: result['failed'] = True result['msg'] = str(e) return result
6906573831b9889f82a7015c3a6ba9e82ca1cdea
29,837
def ReadFile(filename): """ description: Read program from file param {*} filename return {*} file """ input_file = open(filename, "r") result = [] while True: line = input_file.readline() if not line: break result.append(line) for line_index in range(len(result)): result[line_index] = result[line_index][:-1] # delete the '\n' of every line input_file.close() return result
fd7d7faab401f335579719f6e015bf7b9d82c2e2
29,839
def item_url(item): """Return a Markdown URL for the WCAG item.""" fragment = item["id"].split(":")[1] return url(item["handle"], f"https://www.w3.org/TR/WCAG21/#{fragment}")
d670da65ef794116ae5ccd650f3618e7c6a5dc45
29,840
def gen_nested_prop_getter(val_name, throws, klass): """ generates a nested property getter, it actually returns an _Internal object """ def _internal(self): try: getattr(self, val_name) except AttributeError: setattr(self, val_name, klass()) return getattr(self, val_name) return _internal
54f766ae1dfcbc0e491355a4c741ccbadff6d26f
29,841
import numpy import scipy def quad_genz_keister(order, dist, rule=24): """ Genz-Keister quadrature rule. Examples: >>> abscissas, weights = quad_genz_keister( ... order=1, dist=chaospy.Iid(chaospy.Uniform(0, 1), 2)) >>> abscissas.round(2) array([[0.04, 0.04, 0.04, 0.5 , 0.5 , 0.5 , 0.96, 0.96, 0.96], [0.04, 0.5 , 0.96, 0.04, 0.5 , 0.96, 0.04, 0.5 , 0.96]]) >>> weights.round(2) array([0.03, 0.11, 0.03, 0.11, 0.44, 0.11, 0.03, 0.11, 0.03]) """ assert isinstance(rule, int) if len(dist) > 1: if isinstance(order, int): values = [quad_genz_keister(order, d, rule) for d in dist] else: values = [quad_genz_keister(order[i], dist[i], rule) for i in range(len(dist))] abscissas = [_[0][0] for _ in values] abscissas = combine(abscissas).T weights = [_[1] for _ in values] weights = numpy.prod(combine(weights), -1) return abscissas, weights foo = GENS_KEISTER_FUNCTIONS[rule] abscissas, weights = foo(order) abscissas = dist.inv(scipy.special.ndtr(abscissas)) abscissas = abscissas.reshape(1, abscissas.size) return abscissas, weights
f4d4590f2910ea82e5e824a47be196f82bdd5da3
29,842
def get_maf(variant): """ Gets the MAF (minor allele frequency) tag from the info field for the variant. Args: variant (cyvcf2.Variant) Returns: maf (float): Minor allele frequency """ return variant.INFO.get("MAF")
1d25f577a3cec14b8d05095d320fad6584484718
29,843
import glob def check_channels(file_path_address: str, image_type: str): """Manual verifier to determine which images to further clean or remove. This checks to see if there is a consistent third dimension in each of the images. Paramters: --------- file_path_address: str Address of where all jpgs are located. image_type: str image type as in .png or .jpg Return: --------- Array of name of jpgs to address. """ imgs = glob(f'{file_path_address}*.{image_type}') arr_other = [] for i, j in enumerate(imgs): print(f"Starting {i} for filename: {j}") im = cv2.imread(j) try: if im.shape[2] != 3: arr_other.append(j) except Exception as e: arr_other.append(j) print(e) return arr_other
6d7307dbc103fd74a21e6fbb5193c4aaebc1fd35
29,844
import heapq def break_large_contigs(contigs, break_t, verbose=False): """Break large contigs in half until all contigs are under the size threshold.""" # initialize a heapq of contigs and lengths contig_heapq = [] for ctg in contigs: ctg_len = ctg.end - ctg.start heapq.heappush(contig_heapq, (-ctg_len, ctg)) ctg_len = break_t + 1 while ctg_len > break_t: # pop largest contig ctg_nlen, ctg = heapq.heappop(contig_heapq) ctg_len = -ctg_nlen # if too large if ctg_len > break_t: if verbose: print('Breaking %s:%d-%d (%d nt)' % (ctg.chr,ctg.start,ctg.end,ctg_len)) # break in two ctg_mid = ctg.start + ctg_len//2 try: ctg_left = Contig(ctg.genome, ctg.chr, ctg.start, ctg_mid) ctg_right = Contig(ctg.genome, ctg.chr, ctg_mid, ctg.end) except AttributeError: ctg_left = Contig(ctg.chr, ctg.start, ctg_mid) ctg_right = Contig(ctg.chr, ctg_mid, ctg.end) # add left ctg_left_len = ctg_left.end - ctg_left.start heapq.heappush(contig_heapq, (-ctg_left_len, ctg_left)) # add right ctg_right_len = ctg_right.end - ctg_right.start heapq.heappush(contig_heapq, (-ctg_right_len, ctg_right)) # return to list contigs = [len_ctg[1] for len_ctg in contig_heapq] return contigs
82b039abd675303def8360acf9814426af50e503
29,845
def create_content(address, owner, content): """ Create a new page with some content. Args: address (str): the new page's absolute address. owner (Account): the owner of the page to be created. content (str): the Markdown content of the first revision. Returns: page (Page): the newly-created page. """ if address.startswith("/"): address = address[1:] return Page.objects.create_content(address, owner, content)
229df67cc230d39d7b6d0a129ee91d9a2f0246dd
29,846
def update_op_dims_mapping_by_default_dist_impl(op_dist_attr): """Each operator has a default distributed operator, only allowed to be sharded in batch dimension.""" changed = False op_desc = op_dist_attr.get_owner_op().desc # The following statement will be replaced by a more elegent way if op_desc.type() == "shape" or op_desc.type() == "slice": return False output_names = op_desc.output_names() xshape_arg_names = [] if "XShape" in output_names: xshape_arg_names = op_desc.output("XShape") batch_dim_mappings = [] for arg_name in op_desc.input_arg_names(): if op_dist_attr.is_parameter(arg_name): continue dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name) if len(dims_mapping) > 1: for idx, mapping in enumerate(dims_mapping[1:]): assert mapping == -1, \ "{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part."\ .format(op_desc.type(), idx, mapping) batch_dim_mappings.append(dims_mapping[0]) for arg_name in op_desc.output_arg_names(): if op_dist_attr.is_parameter(arg_name): continue dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name) if arg_name not in xshape_arg_names: if len(dims_mapping) > 1: for idx, mapping in enumerate(dims_mapping[1:]): assert mapping == -1, \ "{} only the batch dimension (0-dim) can be sharded, but the dimension {} is sharded by {} part."\ .format(op_desc.type(), idx, mapping) batch_dim_mappings.append(dims_mapping[0]) else: assert dims_mapping[0] == -1, \ "{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension 0 is sharded by {} part."\ .format(op_desc.type(), mapping) if len(dims_mapping) > 2: for idx, mapping in enumerate(dims_mapping[2:]): assert mapping == -1, \ "{} only the batch dimension (1-dim) of XShape can be sharded, but the dimension {} is sharded by {} part."\ .format(op_desc.type(), idx, mapping) batch_dim_mappings.append(dims_mapping[1]) compatible_dim_mapping = compute_compatible_dim_mapping(batch_dim_mappings) assert compatible_dim_mapping is not None, "There is no compatible dim mapping." for arg_name in op_desc.input_arg_names(): if op_dist_attr.is_parameter(arg_name): continue dims_mapping = op_dist_attr.get_input_dims_mapping(arg_name) if compatible_dim_mapping != dims_mapping[0]: dims_mapping[0] = compatible_dim_mapping changed = True for arg_name in op_desc.output_arg_names(): if op_dist_attr.is_parameter(arg_name): continue dims_mapping = op_dist_attr.get_output_dims_mapping(arg_name) if arg_name not in xshape_arg_names: if compatible_dim_mapping != dims_mapping[0]: dims_mapping[0] = compatible_dim_mapping changed = True else: if compatible_dim_mapping != dims_mapping[1]: dims_mapping[1] = compatible_dim_mapping changed = True return changed
75f226ff4902cd935abadd60b16874929d35883c
29,847
import random import torch def perturb_box(box, min_iou=0.5, sigma_factor=0.1): """ Perturb the input box by adding gaussian noise to the co-ordinates args: box - input box min_iou - minimum IoU overlap between input box and the perturbed box sigma_factor - amount of perturbation, relative to the box size. Can be either a single element, or a list of sigma_factors, in which case one of them will be uniformly sampled. Further, each of the sigma_factor element can be either a float, or a tensor of shape (4,) specifying the sigma_factor per co-ordinate returns: torch.Tensor - the perturbed box """ if isinstance(sigma_factor, list): # If list, sample one sigma_factor as current sigma factor c_sigma_factor = random.choice(sigma_factor) else: c_sigma_factor = sigma_factor if not isinstance(c_sigma_factor, torch.Tensor): c_sigma_factor = c_sigma_factor * torch.ones(4) perturb_factor = torch.sqrt(box[2] * box[3]) * c_sigma_factor # multiple tries to ensure that the perturbed box has iou > min_iou with the input box for i_ in range(100): c_x = box[0] + 0.5 * box[2] c_y = box[1] + 0.5 * box[3] c_x_per = random.gauss(c_x, perturb_factor[0]) c_y_per = random.gauss(c_y, perturb_factor[1]) w_per = random.gauss(box[2], perturb_factor[2]) h_per = random.gauss(box[3], perturb_factor[3]) if w_per <= 1: w_per = box[2] * rand_uniform(0.15, 0.5) if h_per <= 1: h_per = box[3] * rand_uniform(0.15, 0.5) box_per = torch.Tensor([c_x_per - 0.5 * w_per, c_y_per - 0.5 * h_per, w_per, h_per]).round() if box_per[2] <= 1: box_per[2] = box[2] * rand_uniform(0.15, 0.5) if box_per[3] <= 1: box_per[3] = box[3] * rand_uniform(0.15, 0.5) box_iou = iou(box.view(1, 4), box_per.view(1, 4)) # if there is sufficient overlap, return if box_iou > min_iou: return box_per, box_iou # else reduce the perturb factor perturb_factor *= 0.9 return box_per, box_iou
1b1e7cb831d52be0b96b69d68817c678865447d2
29,848
import statistics def coverageCalc(coverageList,minCov): """Function parsing coverageList for :param coverageList: List of pacbam coverage information :param minCov: Int of minimum passing coverage :return: covCount: Int of bases with coverage minCovCount: Int of bases with minimum coverage meanDepth: Int mean coverage stat """ covCount = 0 minCovCount = 0 meanDepth = statistics.mean(coverageList) for i in coverageList: if i != 0: covCount +=1 if i >= minCov: minCovCount +=1 return(covCount,minCovCount,round(meanDepth,2))
e20dc1e1f0b6f7e328501afe9921455a705f196a
29,849
def truncate_top_k_2(x, k): """Keep top_k highest values elements for each row of a numpy array Args: x (np.Array): numpy array k (int): number of elements to keep for each row Returns: np.Array: processed array """ s = x.shape # ind = np.argsort(x)[:, : s[1] - k] ind = np.argpartition(x, -k, axis=1)[:, :-k] rows = np.arange(s[0])[:, None] x[rows, ind] = 0 return x
1e84987b01d4cbab9c97174886c87d88e541f380
29,850
def permute_dimensions(x, pattern): """Permutes axes in a tensor. # Arguments pattern: should be a tuple of dimension indices, e.g. (0, 2, 1). # Returns A tensor. """ return KerasSymbol(mx.sym.transpose(x.symbol, axes=pattern))
3665221ec55a01dcf2eaa3f51d716d08f09eed60
29,852
import copy def relaxStructure(s, operation): """ Performs a gulp relaxation (either relaxation of unit cell parameters only, or both unit cell and atomic positions. s: structure_class operation: string Specifies the gulp calculation to execute. Returns ------- s : structure_class The object containing the relaxed configuration. {True or False}: Relaxation succeed or failed. result: string The gulp output. """ s2 = copy.copy(s) s.structure, s.energy, result, calc_time = gulp_calc.gulp_relaxation(copy.copy(s), operation, gulp_shells=[]) return (s, True, result) if result == "converged" else (s2, False, result)
ccc8a2fe75d11693030fea6771bb127e3fc9ebcd
29,853
import time def dos_gaussian_shift(energies, dos_total, projections, nGridpoints, smearing): """ Produces a single gaussian function then shifts the gaussian around the grid Advantages: + Very fast compared to other methods Disadvantages: - Produces an edge effect, energy range should be larger than required - Very reliable, but not as accurate as addition method as mean needs to be on energy grid - Due to edge effect, grids produced will vary in size - Grids can be made consistent but edge effect will be shown in data Parameters ------------- energies : list list of eigenvalues, floating point numbers dos_total : list Density of states weightings nGridPoints : float Number of grid points to perform this method on smearing : float Smearing value Returns -------------- list, list A list of energies and smeared eigenvalues """ # Start time for function: start = time.time() nComponents = len(projections[0]) nColumns = nComponents + 1 # Create grid for energy values: energyGrid = np.linspace(min_energy, max_energy, nGridpoints) # Final dos using np: final_dos = np.zeros( (nColumns, nGridpoints) ) # Define gaussian function: func = gaus_func(energyGrid, 0, smearing) # Find max index of gaussian: maximum = func.argmax() if components: # Move gaussian around grid until mean of gaussian is nearest to the DOS value for index, item in enumerate(energies): maximum = func.argmax() idx = (np.abs(energyGrid - item)).argmin() rolled = np.roll(func, idx-maximum) final_dos[0] += rolled*dos_total[index] for index2, projection in enumerate(projections[index]): final_dos[index2+1] += rolled*projection else: for index, item in enumerate(energies): maximum = func.argmax() idx = (np.abs(energyGrid - item)).argmin() rolled = np.roll(func, idx-maximum) final_dos[0] += rolled*dos_total[index] # Remove 20% of grid due to edge effects: n = int(0.2*func.size) final_dos = final_dos[:, n:-n] energyGrid = energyGrid[n:-n] # finish timing: end = time.time() print(f"Time elapsed (s), shift method: {end-start:.5f}") return energyGrid, final_dos
ff1ea14a96c217083eba01a5cde7deb668e1267d
29,854
def temp_h5_file(tmpdir_factory): """ a fixture that fetches a temporary output dir/file for a test file that we want to read or write (so it doesn't clutter up the test directory when the automated tests are run)""" return str(tmpdir_factory.mktemp('data').join('test.h5'))
23ca5e58aa7afadcd18394bfa7ea6aa3a48c412e
29,855
def get_day_name(date): """ returns the day name for a give date @param date datatime @return month name .. faqref:: :tag: python :title: Récupérer le nom du jour à partir d'une date .. runpython:: :showcode: import datetime dt = datetime.datetime(2016, 1, 1) print(dt.strftime("%A")) """ return date.strftime("%A")
1e6b67d5b853156d5e6a8624c9644a08ebb4ee20
29,856
def get_samples(n_samples, data, labels=None, use_random_transpose=False): """Return some random samples of the training data.""" indices = np.random.choice(len(data), n_samples, False) if np.issubdtype(data.dtype, np.bool_): sample_data = data[indices] * 2. - 1. else: sample_data = data[indices] if use_random_transpose: sample_data = np.array([random_transpose(x) for x in sample_data]) if labels is None: return sample_data return sample_data, labels[indices]
7a9fc5256b438619af8e366802fc54db196536d7
29,857
from typing import Union import torch import collections from typing import Callable def apply_to_tensor( x: Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes], func: Callable ) -> Union[torch.Tensor, collections.Sequence, collections.Mapping, str, bytes]: """Apply a function on a tensor or mapping, or sequence of tensors. Args: x: input tensor or mapping, or sequence of tensors. func: the function to apply on ``x``. """ return apply_to_type(x, torch.Tensor, func)
53f966bfbd6b68efa2bfeb72b8c34e38be008196
29,858
def status(): """ Returns a page showing the number of unprocessed certficate-requests. """ result = db.session.query(Request).filter(Request.generation_date == None).count() return render_template('status.html', requests=result)
9e04e870a4d3d707da078e5681c145726ba563c2
29,859
def weak_connect(sender, signal, connector, attr, idle=False, after=False): """ Function to connect some GObject with weak callback """ wc = WeakCallback(connector, attr, idle) if after: wc.gobject_token = sender.connect_after(signal, wc) else: wc.gobject_token = sender.connect(signal, wc) #print "Connected", sender, signal, connector, attr, idle, after return wc.gobject_token
e18ba634c039cfb2d03649c76d1aad02f216c653
29,860
def load_user(id): """ Provides login_manager with a method to load a user """ return User.get_by_id(int(id))
3aca05c0bf6ad62401c442ba813a8a8a646dabe4
29,861
def _compute_min_dfc(fnrs, fprs, thresholds, p_target, c_miss, c_fa): """ Computes the minimum of the detection cost function. The comments refer to equations in Section 3 of the NIST 2016 Speaker Recognition Evaluation Plan. :param fnrs: the list of false negative rates :param fprs: the list of false positive rates :param thresholds: the list of decision thresholds :param p_target: a priori probability of the specified target speaker :param c_miss: cost of a missed detection :param c_fa: cost of a spurious detection :return: the minimum detection cost and accompanying threshold """ min_c_det = float("inf") min_c_det_threshold = thresholds[0] for i in range(0, len(fnrs)): # See Equation (2). it is a weighted sum of false negative # and false positive errors. c_det = c_miss * fnrs[i] * p_target + c_fa * fprs[i] * (1 - p_target) if c_det < min_c_det: min_c_det = c_det min_c_det_threshold = thresholds[i] # See Equations (3) and (4). Now we normalize the cost. c_def = min(c_miss * p_target, c_fa * (1 - p_target)) min_dcf = min_c_det / c_def return min_dcf, min_c_det_threshold
23931070ad23f2dc8b1fdc63d0e4635f9fede535
29,862
def parse_rsync_url(location): """Parse a rsync-style URL.""" if ':' in location and '@' not in location: # SSH with no user@, zero or one leading slash. (host, path) = location.split(':', 1) user = None elif ':' in location: # SSH with user@host:foo. user_host, path = location.split(':', 1) if '@' in user_host: user, host = user_host.rsplit('@', 1) else: user = None host = user_host else: raise ValueError('not a valid rsync-style URL') return (user, host, path)
fc315c1a6b376cbb83b047246fee51ae936b68ef
29,863
import random def generate_address_street(): """Concatenate number, street, and street sufix.""" number = random.randint(1, 9999) street = last_names[random.randint(0, len(last_names) - 1)] suffix = address_street_suffix[random.randint(0, len(address_street_suffix) - 1)] return "{0} {1} {2}".format(number, street, suffix)
a7dafb282d1d0abb25ad6cb44ba6f2e0a9e190dd
29,865
def try_all_eliza_transformations(doc): """ Try to do eliza transformation for all the functions and add the transformed string to the responses list """ responses = [] question = ask_do_you_like_to(doc) if question: responses.append(question) question = rephrase_question(doc) if question: responses.append(question) question = ask_why(doc) if question: responses.append(question) return responses
3a76b9a1fc422e8db1e02f41dd286ce385d09213
29,866
def get_jwt_subject(): """Returns a the subject from a valid access tokekn""" token = get_token_auth_header() payload = verify_decode_jwt(token) if "sub" not in payload: abort(401) return payload["sub"]
a7f8bf3989dbdc1a0894d63d6fdc58e5c07356f4
29,867
import functools def api(function): """ Decorator of API functions that protects user code from unknown exceptions raised by gRPC or internal API errors. It will catch all exceptions and throw InternalError. :param function: function to be decorated :return: decorated function """ @functools.wraps(function) def wrapper(*args, **kwargs): try: return function(*args, **kwargs) except (SyntaxError, TypeError, InternalError): raise except _Rendezvous as e: raise InternalError(str(e.code()), e.details()) except Exception as e: raise InternalError(details=str(e)) from e return wrapper
1e023eed5986224967c6057962576afc4c84adb2
29,868
def mock_signal_receiver(signal, wraps=None, **kwargs): """ Taken from mock_django as importing mock_django created issues with Django 1.9+ Temporarily attaches a receiver to the provided ``signal`` within the scope of the context manager. The mocked receiver is returned as the ``as`` target of the ``with`` statement. To have the mocked receiver wrap a callable, pass the callable as the ``wraps`` keyword argument. All other keyword arguments provided are passed through to the signal's ``connect`` method. """ if wraps is None: def wraps(*args, **kwrags): return None receiver = mock.Mock(wraps=wraps) signal.connect(receiver, **kwargs) yield receiver signal.disconnect(receiver)
d3f0a481609bf9491b159a7331e1fffc3a5bf92e
29,869
import urllib def is_valid_cover(cover_metadata): """Fetch all sizes of cover from url and evaluate if they are valid.""" syndetics_urls = build_syndetic_cover_urls(cover_metadata) if syndetics_urls is None: return False try: for size in ["small", "medium", "large"]: resp = urllib.request.urlopen(syndetics_urls[size]) has_error = resp.getcode() != 200 less_than_1_pixel = ( int(resp.getheader("Content-Length")) <= MIN_CONTENT_LENGTH ) if has_error or less_than_1_pixel: return False except Exception: return False return True
b41aa1f558d1080fc1a3a2d03180417b38b92931
29,870
def setup_go_func(func, arg_types=None, res_type=None): """ Set up Go function, so it know what types it should take and return. :param func: Specify Go function from library. :param arg_types: List containing file types that function is taking. Default: None. :param res_type: File type that function is returning. Default: None. :return: Returns func arg back for cases when you want to setup function and assign it to variable in one line. """ if arg_types is not None: func.argtypes = arg_types if res_type is not None: func.restype = res_type return func
05f48f4dfecdf0133613f76f235b1e82f14bc5a9
29,871
import ctypes def xonly_pubkey_tweak_add( xonly_pubkey: Secp256k1XonlyPubkey, tweak32: bytes ) -> Secp256k1Pubkey: """ Tweak an x-only public key by adding the generator multiplied with tweak32 to it. Note that the resulting point can not in general be represented by an x-only pubkey because it may have an odd Y coordinate. Instead, the output_pubkey is a normal Secp256k1Pubkey. :param xonly_pubkey: initialized xonly pubkey :param tweak32: 32-byte tweak :return: tweaked public key :raises ValueError: if tweak32 is not of type bytes and length 32 :raises Libsecp256k1Exception: arguments are invalid or the resulting public key would be invalid (only when the tweak is the negation of the corresponding secret key) """ tweaked_pubkey = ctypes.create_string_buffer(INTERNAL_PUBKEY_LENGTH) result = lib.secp256k1_xonly_pubkey_tweak_add( secp256k1_context_verify, tweaked_pubkey, xonly_pubkey, tweak32 ) if result != 1: assert_zero_return_code(result) raise Libsecp256k1Exception( "arguments are invalid or the resulting public key " "would be invalid (only when the tweak is the negation " "of the corresponding secret key)" ) return tweaked_pubkey
727b84ec239bb19d83fa9fe4e72b08ea17972e31
29,873
from typing import Optional def get_organisms_df(url: Optional[str] = None) -> pd.DataFrame: """Convert tab separated txt files to pandas Dataframe. :param url: url from KEGG tab separated file :return: dataframe of the file :rtype: pandas.DataFrame """ df = pd.read_csv( url or ensure_path(MODULE_NAME, KEGG_ORGANISM_URL, path='organisms.tsv'), sep='\t', header=None, names=[ 'kegg_id', 'kegg_code', 'name', # fourth column is the taxonomy hierarchy ], usecols=[0, 1, 2], ) df['name'] = df['name'].map(lambda name: name.replace(')', '').split(' (')) return df
4b28571848076a785ae773410c70102e5a83d096
29,874
import random def split(dataset: Dataset, count: int, shuffle=False): """Datasetを指定個数に分割する。""" dataset_size = len(dataset) sub_size = dataset_size // count assert sub_size > 0 indices = np.arange(dataset_size) if shuffle: random.shuffle(indices) return [ dataset.slice(indices[o : o + sub_size]) for o in range(0, dataset_size, sub_size) ]
d4f50f617fb65499190c7c5e014178d548a7dccb
29,876
import pathlib def load_fixture(filename): """Load a fixture.""" return ( pathlib.Path(__file__) .parent.joinpath("fixtures", filename) .read_text(encoding="utf8") )
f1382161ad6226cd585a2ecbbe08dc486b3a5f2d
29,877
import re def natural_sort(l): """ From http://stackoverflow.com/a/4836734 """ def convert(text): return int(text) if text.isdigit() else text.lower() def alphanum_key(key): return [convert(c) for c in re.split('([0-9]+)', key)] return sorted(l, key=alphanum_key)
c1cd34aa4c9ea2323cb311d9af6f141aa85abef2
29,878
import inspect def is_verifier(cls): """Determine if a class is a Verifier that can be instantiated""" return inspect.isclass(cls) and issubclass(cls, Verifier) and \ not inspect.isabstract(cls)
83cd18155f23631f2e1dac1ec1eac07a5017809d
29,879
def get_bleu_score(references, hypothesis): """ Args: references: list(list(list(str))) # examples: list(examples) hypothesis: list(list(list(str))) # hypotheses: list(list(str)) """ hypothesis = [hyp[0][0] for hyp in hypothesis] return 100.0 * bleu_score.corpus_bleu(list_of_references=references, hypotheses=hypothesis)
a2a17186555564a02acedf5540aedce0b1a14cd1
29,882
def load_json_link_index(out_dir, link): """check for an existing link archive in the given directory, and load+merge it into the given link dict """ link = { **parse_json_link_index(out_dir), **link, } link.update({ 'history': link.get('history') or {}, }) check_link_structure(link) return link
58c034daa7305e06407af9cf226ff939544ee961
29,884
def relative_phase(input_phase: float, output_phase: float) -> float: """ Calculates the relative phase between two phases. :param input_phase: the input phase. :param output_phase: the output phase. :return: the relative phase. """ phi = output_phase - input_phase if phi < -np.pi: return phi + 2 * np.pi elif phi > np.pi: return phi - 2 * np.pi else: return phi
d912754fe060582e5ffe9dc3aad0286b80ee945a
29,885
def jaccard_similarity(x, y): """ Returns the Jaccard Similarity Coefficient (Jarccard Index) between two lists. From http://en.wikipedia.org/wiki/Jaccard_index: The Jaccard coefficient measures similarity between finite sample sets, as is defined as the size of the intersection divided by the size of the union of the sample sets. """ intersection_cardinality = len(set.intersection(*[set(x), set(y)])) union_cardinality = len(set.union(*[set(x), set(y)])) return intersection_cardinality / float(union_cardinality)
81cf0c882ff4b06e79b102abb2d8f13755b68873
29,887
def align_address_to_size(address, align): """Align the address to the given size.""" return address + ((align - (address % align)) % align)
9496c969e257fb3c00ecddf8e941ddb0bd41155e
29,888
import shlex def tokenizer_word(text_string, keep_phrases=False): """ Tokenizer that tokenizes a string of text on spaces and new lines (regardless of however many of each.) :param text_string: Python string object to be tokenized. :param keep_phrases: Booalean will not split "quoted" text :return: Array of strings, each is a word """ text_string = str(text_string) if keep_phrases: tokens = shlex.split(text_string.replace('\n', ' ').replace('/', ' ')) else: tokens = text_string.replace('\n', ' ').replace('/', ' ').split() return tokens
940f716072e9b2ce522c9854b2394327fbd1e934
29,889
def getiso(): """Get iso level of sensor..""" global camera maxtint = 4 iso = float(camera.analog_gain) # get current ambient brightness 0..8 iso = (iso * maxtint) # adjust buy max tint level iso = (256 - (maxtint * 8)) + iso # clear - max tint + ISO tint return int(iso)
45fa48897cd297232fde00cbe49d7717608466ed
29,890
from typing import Union from typing import Sequence from typing import List from typing import Dict def get_manual_comparisons(db: cosem_db.MongoCosemDB, cropno: Union[None, str, int, Sequence[Union[str, int]]] = None, mode: str = "across_setups") -> \ List[Union[Dict[str, str], Dict[str, Union[str, Sequence[str]]]]]: """ Read best configurations optimized manually from corresponding csv files and translate into dictionary that can be used for queries to the database with automatic evaluations. Args: db: Database with crop information cropno: Specific crop numbers or list of crop numbers that should be included in queries. mode: "per_setup" for queries specifying the optimized manual iteration for each setup, "across_setups" (default) for queries specifying the optimized manual iteration and setup for each label and "all" for both. Returns: List of corresponding queries. """ if isinstance(cropno, int) or isinstance(cropno, str): cropno = [cropno] if mode == "across_setups": all_queries = _get_setup_queries(cropno, db) elif mode == "per_setup": all_queries = _get_iteration_queries(cropno, db) elif mode == "all": all_queries = _get_iteration_queries(cropno, db) + _get_setup_queries(cropno, db) else: raise ValueError("Unknown mode {mode:}".format(mode=mode)) return all_queries
fc4a62d09f9df289b08a249d70875ce6ca19ed39
29,892
def draw_circle(center_x:float, center_y:float, radius:float = 0.3, segments:int = 360, fill:bool=False): """ Returns an Object2D class that draws a circle Arguments: center_x : float : The x cord for the center of the circle. center_y : float : The y cord for the center of the circle. radius : float : The radius of the circle. segments : int : How many segments to make the circle from. fill : bool : Should the shape be filled. """ edges = [] cords = [] for i in range(segments): theta = (2 * pi * i)/segments # Get the current angle x = radius * cos(theta) + center_x # Get the x cord y = radius * sin(theta) + center_y # Get the y cord cords.append([x, y]) if fill: cords.insert(0, [center_x, center_y]) for i in range(len(cords)-2): edges.append([0, i+1, i+2]) edges.append([0, segments, 1]) # Fixes a little glitch return Object2D(cords, edges, draw_type='triangles') else: for i in range(len(cords)-1): edges.append([i, i+1]) edges.append([segments-1,0]) # Fixes a little glitch return Object2D(cords, edges, draw_type='lines')
78caa0cbb25df7c947053a10d54f7ae3fd2fc8b2
29,893
def weighted_mean(values, weights): """Calculate the weighted mean. :param values: Array of values :type values: numpy.ndarray :param weights: Array of weights :type weights: numpy.ndarray :rtype: float """ weighted_mean = (values * weights).sum() / weights.sum() return weighted_mean
886d7cff1555c40b448cda03e08620a0e2d69ede
29,894
def shared_cluster(): """Create a shared cluster""" global _shared_cluster if _shared_cluster is None: cluster = PseudoHdfs4() atexit.register(cluster.stop) try: cluster.start() except Exception, ex: LOG.exception("Failed to fully bring up test cluster: %s" % (ex,)) # Fix config to reflect the cluster setup. webhdfs_url = "http://localhost:%s/webhdfs/v1" % (cluster.dfs_http_port,) closers = [ hadoop.conf.HDFS_CLUSTERS['default'].FS_DEFAULTFS.set_for_testing(cluster.fs_default_name), hadoop.conf.HDFS_CLUSTERS['default'].WEBHDFS_URL.set_for_testing(webhdfs_url), hadoop.conf.MR_CLUSTERS['default'].HOST.set_for_testing('localhost'), hadoop.conf.MR_CLUSTERS['default'].PORT.set_for_testing(cluster._jt_port), hadoop.conf.MR_CLUSTERS['default'].JT_THRIFT_PORT.set_for_testing(cluster.jt_thrift_port), ] old = hadoop.cluster.clear_caches() def restore_config(): hadoop.cluster.restore_caches(old) for x in closers: x() cluster.shutdown_hook = restore_config _shared_cluster = cluster return _shared_cluster
fd51186f8d46ae236b3f4220750ab0f412354669
29,895
import reprlib def _format_args(args): """Format function arguments. Special case for a single parameter: ('hello',) is formatted as ('hello'). """ # use reprlib to limit the length of the output args_repr = reprlib.repr(args) if len(args) == 1 and args_repr.endswith(',)'): args_repr = args_repr[:-2] + ')' return args_repr
a54f06358b629340c1f16ecc86eff15b8fca3bd3
29,896
import requests import logging import json def request(config, url_params={}): """Wrapper for sending GET to Facebook. Args: config: YAML object of config file. url_params: Dictionary of parameters to add to GET. Returns: HTTP response or error. """ host = HOST + f"/{config['user_id']}/" params = {"fields": "id,name", "access_token": config['user_token']} params.update(url_params) try: response = requests.get(host, params=params) logging.info(f"Sending to Facebook: {response.status_code}") response.encoding = "utf-8" return json.dumps(response.text, indent=4) except HTTPError as e: return e
e4ef9315170ab7d1c7e39645bde46b6cbb9f9de9
29,897
import miniupnpc def setup(hass, config): """Register a port mapping for Home Assistant via UPnP.""" upnp = miniupnpc.UPnP() hass.data[DATA_UPNP] = upnp upnp.discoverdelay = 200 upnp.discover() try: upnp.selectigd() except Exception: _LOGGER.exception("Error when attempting to discover an UPnP IGD") return False unit = config[DOMAIN].get(CONF_UNITS) discovery.load_platform(hass, 'sensor', DOMAIN, {'unit': unit}, config) port_mapping = config[DOMAIN].get(CONF_ENABLE_PORT_MAPPING) if not port_mapping: return True base_url = urlsplit(hass.config.api.base_url) host = base_url.hostname external_port = internal_port = base_url.port upnp.addportmapping( external_port, 'TCP', host, internal_port, 'Home Assistant', '') def deregister_port(event): """De-register the UPnP port mapping.""" upnp.deleteportmapping(hass.config.api.port, 'TCP') hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, deregister_port) return True
ca5d7d90efb849412e2256dab3516deca1531539
29,898
def load_formatted_objects_json(fp): """A function to load formatted object data data. The function assumes the input json is of the form: [ { id: <number>, regions : [ { x: <number>, y : <number>, height : <number>, width : <number>, id : <number>, phrase: "somthing cool", image : <image id>, } ] } } ] Will return a dictionary using the image name (id + .jpg) and bounding box information Parameters ----------- fp : string path to a json file Returns -------- dictionary """ parsed = json_load(fp) out = {} for item in parsed: # the formatted input only holds a number that is later turned into a # filename within densecap code src_img = "{0}.jpg".format(item['id']) regions = item['regions'] out_regions = [] for region in regions: formatted = dict(x=region['x'], y=region['y'], h=region['height'], w=region['width'], names=[region['phrase']]) out_regions.append(formatted) out[src_img] = out_regions return out
2647f2c2cfc7530998361b19693ea6e187bf64f1
29,899
from datetime import datetime def _event_last_observed(event: EventsV1Event) -> datetime.datetime: """ Returns the last time an event was observed """ if event.series: series_data: EventsV1EventSeries = event.series return series_data.last_observed_time if event.event_time: return event.event_time # Fall back to event creation return event.metadata.creation_timestamp
c8a26c067df0375923b3aa57afaa48f5c5fc0cf8
29,900
import numpy def bytes(length): """Returns random bytes. .. seealso:: :func:`numpy.random.bytes` """ return numpy.bytes(length)
44055f168dbd9e6b2e2481f8d57b9edef0110430
29,901
def init_json(): """ This function init the JSON dict. Return : Dictionnary """ data_json = {} data_json['login'] = "" data_json['hash'] = "" data_json['duration'] = 0 data_json['nbFiles'] = 0 data_json['nbVirus'] = 0 data_json['nbErrors'] = 0 data_json['uuidUsb'] = "" data_json['viruses'] = [] return data_json
9411f3a525df9e68f53fba94da679bd8d5b34013
29,902
def django_popup_view_field_javascript(): """ Return HTML for django_popup_view_field JavaScript. Adjust url in settings. **Tag name**:: django_popup_view_field_javascript **Usage**:: {% django_popup_view_field_javascript %} """ temp = loader.get_template('django_popup_view_field/scripts_include.html') return temp.render({})
f2cf0631139ade2044aa577f906b4b8568b33713
29,903
def apply_filters(row): """Applies filters to the input data and returns transformed row.""" return {k: COLUMN_FILTERS.get(k, lambda x: x)(v) for k,v in row.items()}
2f31dda70bedc35f8b1b66e5906c38c5d3202e2b
29,905
def save_convergence_statistics( inputs, results, dmf=None, display=True, json_path=None, report_path=None ): """ """ s = Stats(inputs, results) if display: s.report() if report_path: with open(report_path, "w") as f: s.report(f) if json_path is not None: with open(json_path, "w") as f: s.to_json(f) if dmf is not None: s.to_dmf(dmf) return s
e06891c917abc3678e8fbcf0ff81cae46ac2e04a
29,906
def max_size(resize_info): """ リサイズ情報から結合先として必要な画像サイズを計算して返す :param resize_info: リサイズ情報 :return: width, height """ max_w, max_h = 0, 0 for name, info in resize_info.items(): pos = info['pos'] size = info['size'] max_w = max(max_w, pos[0] + size[0]) max_h = max(max_h, pos[1] + size[1]) return max_w, max_h
1e28f993b3b0fac077f234b6388a2d9042396f6b
29,908
def get_thru(path, specs=range(10), cameras='brz'): """Calculate the throughput in each camera for a single exposure. See https://github.com/desihub/desispec/blob/master/bin/desi_average_flux_calibration and DESI-6043. The result includes the instrument throughput as well as the fiber acceptance loss and atmospheric extinction. """ calibs = {c:CoAdd(c) for c in cameras} exptime = None primary_area = 8.659e4 # cm2 for (FCAL,), camera, spec in iterspecs(path, 'fluxcalib'): if exptime is None: hdr = FCAL[0].read_header() exptime = hdr['EXPTIME'] else: if FCAL[0].read_header()['EXPTIME'] != exptime: raise RuntimeError(f'EXPTIME mismatch for fluxcalib in {path}') fluxcalib, ivar = FCAL['FLUXCALIB'].read(), FCAL['IVAR'].read() calibs[camera] += Spectrum(camera, np.median(fluxcalib, axis=0), np.median(ivar, axis=0)) for camera in cameras: # Convert from (1e17 elec cm2 s / erg) to (elec/phot) calibs[camera] /= (M1_area * exptime) / (1e17 * erg_per_photon[cslice[camera]]) return calibs
ec123a4f8843fcd5eb4aef87bf77b936687c544d
29,909
def quicksort(lyst): """This is a quicksort """ def partition_helper(lyst, first, last): pivot = lyst[first] left = (first + 1) right = last done = False while not done: while left <= right and lyst[left] <= pivot: left += 1 while right >= left and lyst[right] >= pivot: right -= 1 if right < left: done = True else: lyst[left], lyst[right] = lyst[right], lyst[left] lyst[first], lyst[right] = lyst[right], lyst[first] return right def quicksort_helper(lyst, first, last): if first < last: splitpoint = partition_helper(lyst, first, last) quicksort_helper(lyst, first, (splitpoint-1)) quicksort_helper(lyst, (splitpoint+1), last) return lyst quicksort_helper(lyst, 0, (len(lyst)-1)) return lyst
33385c01b877a86a2970f33dc4d0bd9d456dc983
29,910
def status_parameter_error(): """Returns the value returned by the function calls to the library in case of parameter error. """ r = call_c_function(petlink32_c.status_parameter_error, [{'name': 'return_value', 'type': 'int', 'value': None}]) return r.return_value
fbbdcd85fde27f200c1c749e03234baa8d725f1d
29,911
def df_to_dict(df: DataFrame) -> list[dict]: """DataFrame 转 dict""" # 拿到表头,转换为字典结构 head_list = list(df.columns) list_dic = [] for i in df.values: a_line = dict(zip(head_list, i)) list_dic.append(a_line) return list_dic
c52e628a78e2bd863a4a9926e739bff53195910a
29,912
def parse_describeprocess(html_response): """Parse WPS DescribeProcess response. Parameters ---------- html_response : string xml document from a DescribeProcess WPS request. Returns ------- out : list of dict 'identifier' : ProcessDescription -> ows:Identifier 'inputs' : ProcessDescription -> DataInputs -> Input -> ows:Identifier 'ouputs' : ProcessDescription -> ProcessOutputs -> Output -> ows:Identifier """ # XML structure: # wps:ProcessDescriptions # ProcessDescription (list) # ows:Identifier (text) # ows:Title (text) # DataInputs (optional) # Input (list) # ows:Identifier (text) # ows:Title (text) # LiteralData (xor) # ows:DataType # [...] # ProcessOutputs # Output (list) # ows:Identifier (text) # ows:Title (text) # LiteralOutput (xor) # ows:DataType # [...] processes = [] process_descriptions = xml_children_as_dict( etree.fromstring(html_response)) for process_description_el in process_descriptions['ProcessDescription']: d = {'inputs': [], 'outputs': []} process_description = xml_children_as_dict(process_description_el) d['identifier'] = process_description['ows:Identifier'][0].text if 'DataInputs' in process_description: data_inputs = xml_children_as_dict( process_description['DataInputs'][0]) for input_element in data_inputs['Input']: input1 = xml_children_as_dict(input_element) d['inputs'].append(input1['ows:Identifier'][0].text) process_outputs = xml_children_as_dict( process_description['ProcessOutputs'][0]) for output_element in process_outputs['Output']: output1 = xml_children_as_dict(output_element) d['outputs'].append(output1['ows:Identifier'][0].text) processes.append(d) return processes
80f7e866424ffa8cbdae2a94d31f6044722648c6
29,913
def variational_implicit_step(system, dt, p, x, z, t): """ For Lagrangian functions of the form L(j) = 1/(2h^2) (x_{j+1} - x_j)^2 - 1/2 (V(x_j) + V(x_{j+1})) - 1/2 (F(z_j) + F(z_{j+1})) """ tnew = t + dt xnew = ( x + (dt - 0.5 * dt ** 2 * system.Fz(z, t)) * p - 0.5 * dt ** 2 * system.Vq(x, t) ) (znew,) = fsolve( lambda znew: z - znew + 0.5 * dt * np.linalg.norm((xnew - x) / dt) ** 2 - 0.5 * dt * ( system.V(x, t) + system.V(xnew, tnew) + system.F(z, t) + system.F(znew, tnew) ), [z], ) pnew = ( (1.0 - 0.5 * dt * system.Fz(z, t)) * p - 0.5 * dt * (system.Vq(x, t) + system.Vq(xnew, tnew)) ) / (1.0 + 0.5 * dt * system.Fz(znew, tnew)) return pnew, xnew, znew, tnew
c32e13ae37873983a3a88b91e7f0bfdf7ba9d043
29,914
def _validate_time_mode(mode, **kwargs): """Validate time mode.""" return mode
e30fd9071bde102b4986fe9ef846a812f7c08ff7
29,916
def flatten() -> GraphBuilder: """ dl.flatten layer builder """ def graph_builder(prev_layer: Metadata) -> Metadata: metadata = {} init_regularized_nodes(metadata, prev_layer) graph = prev_layer['graph'] metadata['units'] = (np.prod(prev_layer['units']),) metadata['graph'] = rc.flatten(graph) return metadata return graph_builder
bf00faa00f5059c33887acafb0665ae37dc970e8
29,917
def _important() -> str: """Returns a query term matching messages that are important.""" return 'is:important'
dced06645f5311b321d42cd3892627df5b30faec
29,918
async def root(): """ Dependency is "static". Value of Depends doesn't get passed into function we still get redirected half the time though """ return {"message": "Hello World"}
6d3b634444240275f56d30aa0c1fe3b3bb84ce24
29,919
from typing import Hashable import encodings def line_width(et: pd.DataFrame, lw_by: Hashable): """Default edge line width function.""" if lw_by is not None: return encodings.data_linewidth(et[lw_by], et[lw_by]) return pd.Series([1] * len(et), name="lw")
064f90d4974f64d9be99090c77cf24d30a34a9f0
29,920
def run_state_machine(ctx, callback): """Run the libmongocrypt state machine until completion. :Parameters: - `ctx`: A :class:`MongoCryptContext`. - `callback`: A :class:`MongoCryptCallback`. :Returns: The completed libmongocrypt operation. """ while True: state = ctx.state # Check for terminal states first. if state == lib.MONGOCRYPT_CTX_ERROR: ctx._raise_from_status() elif state == lib.MONGOCRYPT_CTX_READY: return ctx.finish() elif state == lib.MONGOCRYPT_CTX_DONE: return None if state == lib.MONGOCRYPT_CTX_NEED_MONGO_COLLINFO: list_colls_filter = ctx.mongo_operation() coll_info = callback.collection_info( ctx.database, list_colls_filter) if coll_info: ctx.add_mongo_operation_result(coll_info) ctx.complete_mongo_operation() elif state == lib.MONGOCRYPT_CTX_NEED_MONGO_MARKINGS: mongocryptd_cmd = ctx.mongo_operation() result = callback.mark_command(ctx.database, mongocryptd_cmd) ctx.add_mongo_operation_result(result) ctx.complete_mongo_operation() elif state == lib.MONGOCRYPT_CTX_NEED_MONGO_KEYS: key_filter = ctx.mongo_operation() for key in callback.fetch_keys(key_filter): ctx.add_mongo_operation_result(key) ctx.complete_mongo_operation() elif state == lib.MONGOCRYPT_CTX_NEED_KMS: for kms_ctx in ctx.kms_contexts(): with kms_ctx: callback.kms_request(kms_ctx) ctx.complete_kms() else: raise MongoCryptError('unknown state: %r' % (state,))
37da937db46bea8e7952e72753ab27543215f8fe
29,921
def remove_na_arraylike(arr): """ Return array-like containing only true/non-NaN values, possibly empty. """ if is_extension_array_dtype(arr): return arr[notna(arr)] else: return arr[notna(lib.values_from_object(arr))]
e89d3218d053852ddbc553223d035c71615f7c21
29,922
def conv_nested(image, kernel): """A naive implementation of convolution filter. This is a naive implementation of convolution using 4 nested for-loops. This function computes convolution of an image with a kernel and outputs the result that has the same shape as the input image. Args: image: numpy array of shape (Hi, Wi). kernel: numpy array of shape (Hk, Wk). Returns: out: numpy array of shape (Hi, Wi). """ Hi, Wi = image.shape Hk, Wk = kernel.shape out = np.zeros((Hi, Wi)) ### YOUR CODE HERE x_h = int(Hk / 2) y_h = int(Wk / 2) for hi in range(Hi): for wi in range(Wi): for x in range(Hk): for y in range(Wk): v_h = hi - x_h + x v_w = wi - y_h + y if v_h >= 0 and v_h < Hi and v_w >= 0 and v_w < Wi: out[hi, wi] = out[hi, wi] + image[v_h, v_w] * kernel[Hk - x - 1, Wk - y -1] ### END YOUR CODE return out
acfa5f275bc15a39357390ac356f7ee681dbf31a
29,923
def getLatest(df): """ This get the data of the last day from the dataframe and append it to the details """ df_info = df.iloc[:,0:5] df_last = df.iloc[:,-1] df_info['latest'] = df_last return df_info
f42cae0552a4ac791d3499fa2ca1417a80a970ac
29,924
import time def setup_camera(is_fullscreen = True): """ Setup the PiCam to default PSVD settings, and return the camera as an object. Keyword Arguments: is_fullscreen -- Boolean value. True for fullscreen, false for window. """ # ensure that camera is correctly installed and set it up to output to a # window and turn off AWB and exposure modes. If camera does not exist # print error message and quit program. camera = picamera.PiCamera() camera.resolution = s.PICTURE_RESOLUTION camera.preview_fullscreen = is_fullscreen camera.awb_mode = "off" #camera.exposure_mode = "off" if not is_fullscreen: camera.preview_window = s.CAMERA_WINDOW_SIZE time.sleep(s.WAKEUP_DELAY) # camera wake-up time: 2 s return camera
301d046541c0463e8e3ab58fe429a3c47cbd960e
29,925
def CalculateGearyAutoMutability(ProteinSequence): """ #################################################################################### Calculte the GearyAuto Autocorrelation descriptors based on Mutability. Usage: result=CalculateGearyAutoMutability(protein) Input: protein is a pure protein sequence. Output: result is a dict form containing 30 Geary Autocorrelation descriptors based on Mutability. #################################################################################### """ result=CalculateEachGearyAuto(ProteinSequence,_Mutability,'_Mutability') return result
a9a7f92d742736f7a66c8bdc06980f393922ea4a
29,926
def hamming_distance(a, b): """ Returns the hamming distance between sequence a and b. Sequences must be 1D and have the same length. """ return np.count_nonzero(a != b)
fe895c87867999159c57f23f96eab9d7b41edb8e
29,928
def generate_url(mbid, level): """Generates AcousticBrainz end point url for given MBID. """ return ACOUSTIC_BASE + mbid + level
96fe05dc3274730196dbc764c3e8f58f32b81a5f
29,930
from typing import Sequence from typing import List def averaged_knots_unconstrained(n: int, p: int, t: Sequence[float]) -> List[ float]: """ Returns an averaged knot vector from parametrization vector `t` for an unconstrained B-spline. Args: n: count of control points - 1 p: degree t: parametrization vector, normalized [0..1] """ assert t[0] == 0.0 assert t[-1] == 1.0 knots = [0.0] * (p + 1) knots.extend(sum(t[j: j + p]) / p for j in range(1, n - p + 1)) if knots[-1] > 1.0: raise ValueError('Normalized [0..1] values required') knots.extend([1.0] * (p + 1)) return knots
6da79c699a3420270efc938a6f0de659d4886060
29,931
def converge_launch_stack(desired_state, stacks): """ Create steps that indicate how to transition from the state provided by the given parameters to the :obj:`DesiredStackGroupState` described by ``desired_state``. See note [Converging stacks] for more information. :param DesiredStackGroupState desired_state: The desired group state. :param set stacks: a set of :obj:`HeatStack` instances. This must only contain stacks that are being managed for the specified group. :rtype: :obj:`pbag` of `IStep` """ config = desired_state.stack_config by_state = groupby(lambda stack: stack.get_state(), stacks) stacks_complete = by_state.get(StackState.CREATE_UPDATE_COMPLETE, []) stacks_failed = by_state.get(StackState.CREATE_UPDATE_FAILED, []) stacks_check_complete = by_state.get(StackState.CHECK_COMPLETE, []) stacks_check_failed = by_state.get(StackState.CHECK_FAILED, []) stacks_in_progress = by_state.get(StackState.IN_PROGRESS, []) stacks_delete_in_progress = by_state.get(StackState.DELETE_IN_PROGRESS, []) stacks_delete_failed = by_state.get(StackState.DELETE_FAILED, []) stacks_good = stacks_complete + stacks_check_complete stacks_amiss = (stacks_failed + stacks_check_failed + stacks_in_progress + stacks_delete_in_progress) if stacks_delete_failed: reasons = [ErrorReason.String("Stacks in DELETE_FAILED found.")] return pbag([FailConvergence(reasons)]) # If there are no stacks in CHECK_* or other work to be done, we assume # we're at the beginning of a convergence cycle and need to perform stack # checks. if stacks_complete and not (stacks_check_complete or stacks_amiss): return pbag([CheckStack(stack) for stack in stacks_complete]) # Otherwise, if all stacks are in a good state and we have the right number # of stacks, we call update on the stacks in CHECK_COMPLETE and return # SUCCESS without waiting for it to finish (calling update on a stack in # CREATE_COMPLETE is essentially a no-op) so that there will be no stacks # in CREATE_* the next time otter tries to converge this group. This will # cause all of the stacks to be checked at that time and let otter know # if there are any stacks that have fallen into an error state. elif not stacks_amiss and len(stacks_good) == desired_state.capacity: return pbag([UpdateStack(stack=stack, stack_config=config, retry=False) for stack in stacks_check_complete]) def get_create_steps(): create_stack = CreateStack(stack_config=config) good_or_fixable_stack_count = (len(stacks_good) + len(stacks_in_progress) + len(stacks_check_failed)) return [create_stack] * (desired_state.capacity - good_or_fixable_stack_count) def get_scale_down_steps(): stacks_in_preferred_order = ( stacks_good + stacks_in_progress + stacks_check_failed) unneeded_stacks = stacks_in_preferred_order[desired_state.capacity:] return map(DeleteStack, unneeded_stacks) def get_fix_steps(scale_down_steps): num_stacks_to_update = len(stacks_check_failed) - len(scale_down_steps) stacks_to_update = (stacks_check_failed[:num_stacks_to_update] if num_stacks_to_update > 0 else []) return [UpdateStack(stack=s, stack_config=config) for s in stacks_to_update] create_steps = get_create_steps() scale_down_steps = get_scale_down_steps() fix_steps = get_fix_steps(scale_down_steps) delete_stacks_failed_steps = map(DeleteStack, stacks_failed) converge_later = ( [ConvergeLater([ErrorReason.String("Waiting for stacks to finish.")])] if stacks_delete_in_progress or stacks_in_progress else []) return pbag(create_steps + fix_steps + scale_down_steps + delete_stacks_failed_steps + converge_later)
3f615b38d3e303a63873f3dc8ed2d3699460f3b8
29,932
def strip_long_text(text, max_len, append=u'…'): """Returns text which len is less or equal max_len. If text is stripped, then `append` is added, but resulting text will have `max_len` length anyway. """ if len(text) < max_len - 1: return text return text[:max_len - len(append)] + append
02ce128f1de1dbeb2a2dcef5bc2b6eb8745322d3
29,934
import typing import numpy def function_rescale(data_and_metadata_in: _DataAndMetadataLike, data_range: typing.Optional[DataRangeType] = None, in_range: typing.Optional[DataRangeType] = None) -> DataAndMetadata.DataAndMetadata: """Rescale data and update intensity calibration. rescale(a, (0.0, 1.0)) """ data_and_metadata = DataAndMetadata.promote_ndarray(data_and_metadata_in) if not Image.is_data_valid(data_and_metadata.data): raise ValueError("Rescale: invalid data") used_data_range = data_range if data_range is not None else (0.0, 1.0) def calculate_data() -> _ImageDataType: data = data_and_metadata.data assert data is not None data_ptp = numpy.ptp(data) if in_range is None else in_range[1] - in_range[0] data_ptp_i = 1.0 / data_ptp if data_ptp != 0.0 else 1.0 if in_range is not None: data_min = in_range[0] else: data_min = numpy.amin(data) data_span = used_data_range[1] - used_data_range[0] if data_span == 1.0 and used_data_range[0] == 0.0: return typing.cast(_ImageDataType, (data - data_min) * data_ptp_i) else: m = data_span * data_ptp_i return typing.cast(_ImageDataType, (data - data_min) * m + used_data_range[0]) intensity_calibration = Calibration.Calibration() return DataAndMetadata.new_data_and_metadata(calculate_data(), intensity_calibration=intensity_calibration, dimensional_calibrations=data_and_metadata.dimensional_calibrations)
e312ee866c142b6d2166c450831ce5308d263aaa
29,935
def svn_diff_parse_next_patch(*args): """ svn_diff_parse_next_patch(svn_patch_file_t patch_file, svn_boolean_t reverse, svn_boolean_t ignore_whitespace, apr_pool_t result_pool, apr_pool_t scratch_pool) -> svn_error_t """ return _diff.svn_diff_parse_next_patch(*args)
49c2797c2798a0870cbc68e6ffe917d353cf41bb
29,936
def Sub(inputs, **kwargs): """Calculate A - B. Parameters ---------- inputs : list of Tensor The inputs, represent A and B respectively. Returns ------- Tensor The output tensor. """ CheckInputs(inputs, 2) arguments = ParseArguments(locals()) output = Tensor.CreateOperator(nout=1, op_type='Add', **arguments) if inputs[0].shape is not None: output.shape = inputs[0].shape[:] return output
9da04c8898bb58db0db49eb46bb69b3591f6c99d
29,937
def psf_1d(x, *p): """[summary] Arguments: x {[type]} -- [description] Returns: [type] -- [description] """ A, x0, alpha, C = p r = (x - x0) * alpha y = np.zeros(r.shape) y[r!=0] = A * (2 * special.j1(r[r!=0]) / r[r!=0])**2 y[r==0] = A return y + C
06b5d6a56db19ebc7d43b1d4a95805559e549273
29,938
from datetime import datetime import time def arrow_format(jinja_ctx, context, *args, **kw): """Format datetime using Arrow formatter string. Context must be a time/datetime object. :term:`Arrow` is a Python helper library for parsing and formatting datetimes. Example: .. code-block:: html+jinja <li> Offer created at {{ offer.created_at|arrow_format('YYYYMMDDHHMMss') }} </li> `See Arrow formatting <http://crsmithdev.com/arrow/>`__. """ assert len(args) == 1, "We take exactly one formatter argument, got {}".format(args) assert isinstance(context, (datetime.datetime, datetime.time)), "Got context {}".format(context) return time.arrow_format(dt=context, dt_format=args[0])
739a1b97499a614dfabe9321ccd18126d1ebcad9
29,941
def jsonify_query_result(conn, query): """deprecated""" res = query.all() #res = conn.execute(query) #return [dict(r) for r in res] return [r._asdict() for r in res]
ca11226c6f6fc731089f1d257db02a6cb83bd145
29,942
def _postfix_queue(token_expr): """ Form postfix queue from tokenized expression using shunting-yard algorithm. If expression have function, then presence of arguments for that function added before function token. If function have few arguments then RPN algorithm will pop them from stack until comma will be top token on stack :return: queue of tokens ready for reverse polish calculation """ stack = deque() queue = deque() have_args = deque() for token in token_expr: if token.type in {'FLOAT', 'INTEGER', 'CONST', 'COMPLEX'}: queue.append(token) elif token.type == 'FUNC': stack.append(token) # If function have no arguments we append False before FUNC if token_expr[token.index + 1].type == 'RPARENT': have_args.append(False) else: have_args.append(True) elif not stack: stack.append(token) elif token.type == 'COMMA': while stack[-1].type != 'FUNC': queue.append(stack.pop()) queue.append(token) elif token.type == 'LPARENT': stack.append(token) elif token.type == 'RPARENT': while stack[-1].type not in {'LPARENT', 'FUNC'}: queue.append(stack.pop()) if not stack: raise ArithmeticError("Parentheses error") if stack[-1].type == 'FUNC': queue.append(_Token('', 'ARGS', have_args.pop())) queue.append(stack.pop()) else: stack.pop() elif token.type in {'UMINUS', 'UPLUS'} and stack[-1].type == 'POWER': # From Python docs: The power operator binds more tightly # than unary operators on its left; # it binds less tightly than unary operators on its right. stack.append(token) elif token.precedence == stack[-1].precedence and \ token.type in {'POWER', 'UMINUS', 'UPLUS'}: # Right-to-Left association operations stack.append(token) elif token.precedence <= stack[-1].precedence: while stack: if token.precedence <= stack[-1].precedence: queue.append(stack.pop()) continue else: break stack.append(token) else: stack.append(token) while stack: queue.append(stack.pop()) return queue
777c87bf1c4ac123f65e4061c1e5e72f5abe90d9
29,943
def get_from_list_to_examples(task_proc): """ Return a function that converts 2d list (from csv) into example list This can be different between DataProcessors """ if isinstance(task_proc, DefaultProcessor): return lambda l: task_proc._create_examples(l, "test") else: raise NotImplementedError('from_list_to_examples for %s is required '%(type(FLAGS.task_proc)))
e42d050074b9f83127cca95261d5b1300a5b453a
29,944
from pathlib import Path import yaml def read_rules(rule_file=None): """Read rule from rule yaml file. Args: rule_file (str, optional): The path of rule yaml file. Defaults to None. Returns: dict: dict object read from yaml file """ default_rule_file = Path(__file__).parent / 'rule/default_rule.yaml' p = Path(rule_file) if rule_file else default_rule_file if not p.is_file(): logger.error('DataDiagnosis: invalid rule file path - {}'.format(str(p.resolve()))) return None baseline = None with p.open() as f: baseline = yaml.load(f, Loader=yaml.SafeLoader) return baseline
24def1cb09f1ecc464d38b866397d9821ac24293
29,945
from pymatgen.core.structure import Structure def localized_rattle( structure: Structure, defect_coords: np.array, stdev: float = 0.25, ): """ Given a pymnatgen structure, it applies a random distortion to the coordinates of the atoms in a radius 5 A from defect atom. Random distortion chosen from a gaussian with a standard deviation of stdev. Args: structure : Structure defect_coords (np.array): cartesian coordinates of defect stdev (float): standard dev of the gaussian used for rattle (in A) (default: 0.25) Returns: rattled structure""" aaa = AseAtomsAdaptor() structure_copy = structure.copy() # Classify sites in 2 lists: inside or outside 5 A sphere sites_inside_cutoff, sites_outside_cutoff = [], [] for site in structure_copy: distance, image = site.distance_and_image_from_frac_coords(defect_coords)[:2] if distance < 5: sites_inside_cutoff.append(site) else: sites_outside_cutoff.append(site) # Apply rattle to sites within 5 A sphere structure_inside_cutoff = structure_copy.from_sites(sites_inside_cutoff) ase_struct = aaa.get_atoms(structure_inside_cutoff) ase_struct.rattle(stdev=stdev) rattled_structure = aaa.get_structure(ase_struct) # Add the sites outside the 5 A sphere to the rattled structure [ rattled_structure.append(site_outside_cutoff.specie, site_outside_cutoff.frac_coords) for site_outside_cutoff in sites_outside_cutoff ] return rattled_structure
35be05f136c5f1050394a0ac335ad281c0e855c3
29,946
def calculate_jaccard(set_1, set_2) -> float: """Calculate the jaccard similarity between two sets. :param set set_1: set 1 :param set set_2: set 2 """ intersection = len(set_1.intersection(set_2)) smaller_set = min(len(set_1), len(set_2)) return intersection / smaller_set
eddb25b2fdc0dd5b5d2505fc52cb1353ce74f89d
29,947
def bad_request(e) -> 'template': """Displays a custom 400 error handler page.""" return render_template('error_handler.html', code = 400, message = "Bad request", url = url_for('main.profile_page'), back_to = 'Profile Page'), 400
e9f302cea41e6a3b51044be28850b948153170e7
29,948
def quarter_of_year(datetimes): """Return the quarter of the year of given dates.""" return ((_month_of_year(datetimes) - 1) // 3 + 1).astype(int)
49f3d8d63f9cc5c73b2c0c6cf9859c8be53e567e
29,949
def runner_entrypoint(args): """ Run bonobo using the python command entrypoint directly (bonobo.commands.entrypoint). """ return entrypoint(args)
f1eda845fa442d831f12b501a45dff3c91297f2a
29,950