content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def number_from_string(s): """ Parse and return number from string. Return float only if number is not an int. Assume number can be parsed from string. """ try: return int(s) except ValueError: return float(s)
50cc7defe7c60b536d184aaf91c2831ab63043e1
15,900
def ennAvgPool(inplanes, kernel_size=1, stride=None, padding=0, ceil_mode=False): """enn Average Pooling.""" in_type = build_enn_divide_feature(inplanes) return enn.PointwiseAvgPool( in_type, kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode)
ea48e911a48237dd7ba19f0515ca4cb2e02f2fa3
15,901
def acceptable(*args, acceptables): """ If the characters in StringVars passed as arguments are in acceptables return True, else returns False """ for arg in args: for char in arg: if char.lower() not in acceptables: return False return True
607cc752fb61e8a9348bfdd889afcbb8a8ee5189
15,902
from typing import Optional from typing import List from typing import Union import warnings def get_confusion_matrix( ground_truth: np.ndarray, predictions: np.ndarray, labels: Optional[List[Union[str, float]]] = None) -> np.ndarray: """ Computes a confusion matrix based on predictions and ground truth vectors. The confusion matrix (a.k.a. contingency table) has predictions in rows and ground truth in columns. If the value order is not provide via the ``labels`` parameter, the ordering is based on the alphanumeric sorting of the unique values in both of the input arrays. Parameters ---------- ground_truth : numpy.ndarray An array holding the *true* target values. predictions : numpy.ndarray An array holding *predictions* of the target values. labels : List[string, number], optional (default=None) If a certain ordering of the labels in the confusion matrix is desired, it can be specified via this parameter. By default alphanumeric sorting is used. Warns ----- UserWarning Some of the labels provided by the user are not present in either of the input arrays. Raises ------ IncorrectShapeError The ``ground_truth`` and/or ``labels`` vectors are not 1-dimensional. The length of these two arrays does not agree. TypeError The ``labels`` parameter is not a list. ValueError The ``labels`` list empty, it contains duplicate entries or some of the labels present in either of the input array are not accounted for by the ``labels`` list. Returns ------- confusion_matrix : numpy.ndarray A confusion matrix. """ if not fuav.is_1d_array(ground_truth): raise IncorrectShapeError('The ground truth vector has to be ' '1-dimensional numpy array.') if not fuav.is_1d_array(predictions): raise IncorrectShapeError('The predictions vector has to be ' '1-dimensional numpy array.') if ground_truth.shape[0] != predictions.shape[0]: raise IncorrectShapeError('Both the ground truth and the predictions ' 'vectors have to have the same length.') all_values = np.concatenate([ground_truth, predictions]) if labels is None: ordering = np.sort(np.unique(all_values)).tolist() elif isinstance(labels, list): if not labels: raise ValueError('The labels list cannot be empty.') labels_set = set(labels) if len(labels_set) != len(labels): raise ValueError('The labels list contains duplicates.') extra_labels = labels_set.difference(all_values) if extra_labels: warnings.warn( 'Some of the given labels are not present in either of the ' 'input arrays: {}.'.format(extra_labels), UserWarning) unaccounted_labels = set(all_values).difference(labels_set) if unaccounted_labels: raise ValueError('The following labels are present in the input ' 'arrays but were not given in the labels ' 'parameter: {}.'.format(unaccounted_labels)) ordering = labels else: raise TypeError('The labels parameter has to either a list or None.') confusion_matrix_list = [] for pred in ordering: pdt = predictions == pred row = [np.logical_and(pdt, ground_truth == i).sum() for i in ordering] confusion_matrix_list.append(row) confusion_matrix = np.array(confusion_matrix_list) return confusion_matrix
e6e45bd987345c1fc773fc1d0eccf752b8ee637c
15,903
def atom_explicit_hydrogen_valences(gra): """ explicit hydrogen valences, by atom """ return dict_.transform_values(atom_explicit_hydrogen_keys(gra), len)
2f37bfd890c0f15014b17c6bd32981231104055f
15,904
def get_average(pixels): """ Given a list of pixels, finds the average red, blue, and green values Input: pixels (List[Pixel]): list of pixels to be averaged Returns: rgb (List[int]): list of average red, green, blue values across pixels respectively Assumes you are returning in the order: [red, green, blue] """ # rgb of each pixel pixel_r = 0 pixel_g = 0 pixel_b = 0 # how many pixels in the list[pixels] n = 0 for pixel in pixels: n += 1 pixel_r += pixel.red pixel_g += pixel.green pixel_b += pixel.blue pixel_avg = [pixel_r//n, pixel_g//n, pixel_b//n] return pixel_avg
9cd694505f8d445732bc178b5d645ff273b298d1
15,905
def _leading_space_count(line): """Return number of leading spaces in line.""" i = 0 while i < len(line) and line[i] == ' ': i += 1 return i
b28daa2845618df5030a79129bb7cec1167b149a
15,906
def _get_marker_indices(marker, line): """ method to find the start and end parameter markers on a template file line. Used by write_to_template() """ indices = [i for i, ltr in enumerate(line) if ltr == marker] start = indices[0:-1:2] end = [i + 1 for i in indices[1::2]] assert len(start) == len(end) return start, end
4e68f6629fd94920ddc6290c75d92e8de7b467bb
15,907
import os def get_number_of_images(dir): """ Returns number of files in given directory Input: dir - full path of directory Output: number of files in directory """ return len([name for name in os.listdir(dir) if os.path.isfile(os.path.join(dir, name))])
a964764466aea735558a8ccc832bd0a00616883e
15,908
def get_wrapper_depth(wrapper): """Return depth of wrapper function. .. versionadded:: 3.0 """ return wrapper.__wrapped__.__wrappers__ + (1 - wrapper.__depth__)
c1c31c45a059c4ee56b39322e966d30b742ef86e
15,909
def apiTest(): """Tests the API connection to lmessage. Returns true if it is connected.""" try: result = api.add(2, 3) except: return False return result == 5
5d63720e78fe5e1bcecd2b1792a0f9bf6345595d
15,910
from scipy import stats as dists def get_distribution(dist_name): """Fetches a scipy distribution class by name""" if dist_name not in dists.__all__: return None cls = getattr(dists, dist_name) return cls
bebdb2578dd191b1d0ee1aea96e88d6be4bc144c
15,911
def ece(y_probs, y_preds, y_true, balanced=False, bins="fd", **bin_args): """Compute the expected calibration error (ECE). Parameters: y_probs (np.array): predicted class probabilities y_preds (np.array): predicted class labels y_true (np.array): true class labels Returns: exp_ce (float): expected calibration error """ sklearn.utils.check_consistent_length(y_preds, y_true) # define the bin function def bin_func(y_probs_bin, y_preds_bin, y_true_bin): acc = (y_preds_bin == y_true_bin).mean() conf = y_probs_bin.mean() return abs(acc - conf) # define the balanced bin function def balanced_bin_func(y_probs_bin, y_preds_bin, y_true_bin): balacc = sklearn.metrics.balanced_accuracy_score(y_true_bin, y_preds_bin) conf = y_probs_bin.mean() return abs(balacc - conf) # compute the full result bin_indices = utils.get_bin_indices(y_probs, bins=bins, lower=0, upper=1, **bin_args) func = balanced_bin_func if balanced else bin_func return utils.binning(y_probs, y_preds, y_true, bin_indices, func)
073d1190d71808de03002322679bb29d75a31258
15,912
def _call_or_get(value, menu=None, choice=None, string=None, obj=None, caller=None): """ Call the value, if appropriate, or just return it. Args: value (any): the value to obtain. It might be a callable (see note). Keyword Args: menu (BuildingMenu, optional): the building menu to pass to value if it is a callable. choice (Choice, optional): the choice to pass to value if a callable. string (str, optional): the raw string to pass to value if a callback. obj (Object): the object to pass to value if a callable. caller (Account or Object, optional): the caller to pass to value if a callable. Returns: The value itself. If the argument is a function, call it with specific arguments (see note). Note: If `value` is a function, call it with varying arguments. The list of arguments will depend on the argument names in your callable. - An argument named `menu` will contain the building menu or None. - The `choice` argument will contain the choice or None. - The `string` argument will contain the raw string or None. - The `obj` argument will contain the object or None. - The `caller` argument will contain the caller or None. - Any other argument will contain the object (`obj`). Thus, you could define callbacks like this: def on_enter(menu, caller, obj): def on_nomatch(string, choice, menu): def on_leave(caller, room): # note that room will contain `obj` """ if callable(value): # Check the function arguments kwargs = {} spec = getargspec(value) args = spec.args if spec.keywords: kwargs.update(dict(menu=menu, choice=choice, string=string, obj=obj, caller=caller)) else: if "menu" in args: kwargs["menu"] = menu if "choice" in args: kwargs["choice"] = choice if "string" in args: kwargs["string"] = string if "obj" in args: kwargs["obj"] = obj if "caller" in args: kwargs["caller"] = caller # Fill missing arguments for arg in args: if arg not in kwargs: kwargs[arg] = obj # Call the function and return its return value return value(**kwargs) return value
b5ebf790913bbdaab980ae7f050a96748f1fd3e6
15,913
import re def is_shared_object(s): """ Return True if s looks like a shared object file. Example: librt.so.1 """ so = re.compile('^[\w_\-]+\.so\.[0-9]+\.*.[0-9]*$', re.IGNORECASE).match return so(s)
f6d2f5f589c468613004d06c7d213f899f31b7c4
15,914
def get_name(properties, lang): """Return the Place name from the properties field of the elastic response Here 'name' corresponds to the POI name in the language of the user request (i.e. 'name:{lang}' field). If lang is None or if name:lang is not in the properties Then name receives the local name value 'local_name' corresponds to the name in the language of the country where the POI is located. >>> get_name({}, 'fr') is None True >>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, None) 'spontini' >>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'cz') 'spontini' >>> get_name({'name':'spontini', 'name:en':'spontinien', 'name:fr':'spontinifr'}, 'fr') 'spontinifr' """ name = properties.get(f"name:{lang}") if name is None: name = properties.get("name") return name
82bd6b0fe7e35dae39767b899b56b24ff91f01cb
15,915
def get_task(name): """Return the chosen task.""" tasks_json = load_json('tasks.json') return tasks_json[name]
44e39dd9757247212e8e9923fd3f7756fd3b0b9a
15,916
def aws_credentials(request: pytest.fixture, aws_utils: pytest.fixture, profile_name: str): """ Fixture for setting up temporary AWS credentials from assume role. :param request: _pytest.fixtures.SubRequest class that handles getting a pytest fixture from a pytest function/fixture. :param aws_utils: aws_utils fixture. :param profile_name: Named AWS profile to store temporary credentials. """ aws_credentials_obj = AwsCredentials(profile_name) original_access_key, original_secret_access_key, original_token = aws_credentials_obj.get_aws_credentials() aws_credentials_obj.set_aws_credentials_by_session(aws_utils.assume_session()) def teardown(): # Reset to the named profile using the original AWS credentials aws_credentials_obj.set_aws_credentials(original_access_key, original_secret_access_key, original_token) request.addfinalizer(teardown) return aws_credentials_obj
13d1549b74b597cf3b00f98a5012c4bae111eeeb
15,917
def mean_predictions(predicted): """ Calculate the mean of predictions that overlaps. This is donne mostly to be able to plot what the model is doing. ------------------------------------------------------- Args: predicted : numpy array Numpy array with shape (Number points to predict - prediction length -1, predictions length) ------------------------------------------------------- return: predictions_mean : list list with len of number to predict where each position is the mean of all predictions to that step """ array_global = [[] for _ in range((predicted.shape[0] + predicted.shape[1]))] for i in range(predicted.shape[0]): for l, value in enumerate(predicted[i]): array_global[i + l].append((float(value))) predictions_mean = [] for i in range(len(array_global) - 1): predictions_mean.append(np.array(array_global[i]).mean()) return predictions_mean
7ee19312ad17b97b27fe74a35df43ea4fa1ec709
15,918
import os import errno def update_diskspace(dmfilestat, cached=None): """Update diskspace field in dmfilestat object""" try: # search both results directory and raw data directory search_dirs = [ dmfilestat.result.get_report_dir(), dmfilestat.result.experiment.expDir, ] if not cached: cached = dm_utils.get_walk_filelist( search_dirs, list_dir=dmfilestat.result.get_report_dir() ) total_size = 0 # Create a list of files eligible to process # exclude onboard_results folder if thumbnail or if fullchip was reanalyzed from signal processing sigproc_results_dir = os.path.join( dmfilestat.result.get_report_dir(), "sigproc_results" ) exclude_onboard_results = dmfilestat.result.isThumbnail or ( "onboard_results" not in os.path.realpath(sigproc_results_dir) ) for start_dir in search_dirs: to_process = [] if os.path.isdir(start_dir): to_process, _ = dm_utils._file_selector( start_dir, dmfilestat.dmfileset.include, dmfilestat.dmfileset.exclude, [], exclude_onboard_results, add_linked_sigproc=True, cached=cached, ) # process files in list for path in to_process[1:]: try: # logger.debug("%d %s %s" % (j, 'diskspace', path), extra = logid) if not os.path.islink(path): total_size += os.lstat(path)[6] except Exception as inst: if inst.errno == errno.ENOENT: pass else: errmsg = "update_diskspace %s" % (inst) logger.error(errmsg, extra=logid) diskspace = float(total_size) / (1024 * 1024) except: diskspace = None raise finally: dmfilestat.diskspace = diskspace dmfilestat.save() return diskspace
a3b54b0612ac05ee92735aed5641f8b25bb22c2d
15,919
def find_best_classifier(data, possible_classifiers, target_classifier): """Given a list of points, a list of possible Classifiers to use as tests, and a Classifier for determining the true classification of each point, finds and returns the classifier with the lowest disorder. Breaks ties by preferring classifiers that appear earlier in the list. If the best classifier has only one branch, raises NoGoodClassifiersError.""" min_disorder = INF for test in possible_classifiers: avg_disorder = average_test_disorder(data, test, target_classifier) if avg_disorder < min_disorder: best_test = test min_disorder = avg_disorder if len(split_on_classifier(data, best_test))==1: raise NoGoodClassifiersError return best_test
7c3dc1f8fc0933f238b372fcd3bf3133c2958398
15,920
def get_product_type_name(stac_item): """ Create a ProductType name from a STAC Items metadata """ properties = stac_item['properties'] assets = stac_item['assets'] parts = [] platform = properties.get('platform') or properties.get('eo:platform') instruments = properties.get('instruments') or \ properties.get('eo:instruments') constellation = properties.get('constellation') or \ properties.get('eo:constellation') mission = properties.get('mission') or properties.get('eo:mission') if platform: parts.append(platform) if instruments: parts.extend(instruments) if constellation: parts.append(constellation) if mission: parts.append(mission) bands = properties.get('eo:bands') if not bands: bands = [] for asset in assets.values(): bands.extend(asset.get('eo:bands'), []) parts.extend([band['name'] for band in bands]) if not parts: raise RegistrationError( 'Failed to generate Product type name from metadata' ) return '_'.join(parts)
fc7351c513eae63233b32b86fe6e5098a1571c8a
15,921
def get_show_default(): """ gets the defaults """ return SHOW_DEFAULT
88f6b202ae16155b8ec87eb566535703e33033b7
15,922
import torch def sample_langevin_v2(x, model, stepsize, n_steps, noise_scale=None, intermediate_samples=False, clip_x=None, clip_grad=None, reject_boundary=False, noise_anneal=None, spherical=False, mh=False, temperature=None, norm=False, cut=True): """Langevin Monte Carlo x: torch.Tensor, initial points model: An energy-based model. returns energy stepsize: float n_steps: integer noise_scale: Optional. float. If None, set to np.sqrt(stepsize * 2) clip_x : tuple (start, end) or None boundary of square domain reject_boundary: Reject out-of-domain samples if True. otherwise clip. """ assert not ((stepsize is None) and (noise_scale is None)), 'stepsize and noise_scale cannot be None at the same time' if noise_scale is None: noise_scale = np.sqrt(stepsize * 2) if stepsize is None: stepsize = (noise_scale ** 2) / 2 noise_scale_ = noise_scale stepsize_ = stepsize if temperature is None: temperature = 1. # initial data x.requires_grad = True E_x = model(x) grad_E_x = autograd.grad(E_x.sum(), x, only_inputs=True)[0] if clip_grad is not None: grad_E_x = clip_vector_norm(grad_E_x, max_norm=clip_grad) E_y = E_x; grad_E_y = grad_E_x; l_samples = [x.detach().to('cpu')] l_dynamics = []; l_drift = []; l_diffusion = []; l_accept = [] for i_step in range(n_steps): noise = torch.randn_like(x) * noise_scale_ dynamics = - stepsize_ * grad_E_x / temperature + noise y = x + dynamics reject = torch.zeros(len(y), dtype=torch.bool) if clip_x is not None: if reject_boundary: accept = ((y >= clip_x[0]) & (y <= clip_x[1])).view(len(x), -1).all(dim=1) reject = ~ accept y[reject] = x[reject] else: y = torch.clamp(y, clip_x[0], clip_x[1]) if norm: y = y/y.sum(dim=(2,3)).view(-1,1,1,1) if spherical: y = y / y.norm(dim=1, p=2, keepdim=True) # y_accept = y[~reject] # E_y[~reject] = model(y_accept) # grad_E_y[~reject] = autograd.grad(E_y.sum(), y_accept, only_inputs=True)[0] E_y = model(y) grad_E_y = autograd.grad(E_y.sum(), y, only_inputs=True)[0] if clip_grad is not None: grad_E_y = clip_vector_norm(grad_E_y, max_norm=clip_grad) if mh: y_to_x = ((grad_E_x + grad_E_y) * stepsize_ - noise).view(len(x), -1).norm(p=2, dim=1, keepdim=True) ** 2 x_to_y = (noise).view(len(x), -1).norm(dim=1, keepdim=True, p=2) ** 2 transition = - (y_to_x - x_to_y) / 4 / stepsize_ # B x 1 prob = -E_y + E_x accept_prob = torch.exp((transition + prob) / temperature)[:,0] # B reject = (torch.rand_like(accept_prob) > accept_prob) # | reject y[reject] = x[reject] E_y[reject] = E_x[reject] grad_E_y[reject] = grad_E_x[reject] x = y; E_x = E_y; grad_E_x = grad_E_y l_accept.append(~reject) x = y; E_x = E_y; grad_E_x = grad_E_y if noise_anneal is not None: noise_scale_ = noise_scale / (1 + i_step) l_dynamics.append(dynamics.detach().cpu()) l_drift.append((- stepsize * grad_E_x).detach().cpu()) l_diffusion.append(noise.detach().cpu()) l_samples.append(x.detach().cpu()) if cut: x = x[x.var(dim=(2,3))>1e-6].view(-1,1,40,40) return {'sample': x.detach(), 'l_samples': l_samples, 'l_dynamics': l_dynamics, 'l_drift': l_drift, 'l_diffusion': l_diffusion, 'l_accept': l_accept}
a3dd79facb089afbeafc4e9845cf1324de75226b
15,923
def fpoly(x, m): """Compute the first `m` simple polynomials. Parameters ---------- x : array-like Compute the simple polynomials at these abscissa values. m : :class:`int` The number of simple polynomials to compute. For example, if :math:`m = 3`, :math:`x^0`, :math:`x^1` and :math:`x^2` will be computed. Returns ------- :class:`numpy.ndarray` """ if isinstance(x, np.ndarray): n = x.size else: n = 1 if m < 1: raise ValueError('Order of polynomial must be at least 1.') try: dt = x.dtype except AttributeError: dt = np.float64 leg = np.ones((m, n), dtype=dt) if m >= 2: leg[1, :] = x if m >= 3: for k in range(2, m): leg[k, :] = leg[k-1, :] * x return leg
335c73bf4008be1331d8f030266f5f89d072ed2c
15,924
import logging def get_custom_logger(context): """ Customizable template for creating a logger. What would work is to have the format and date format passed """ # Initialize Custom Logging # Timestamps with logging assist debugging algorithms # With long execution times manifest = context.gear_dict['manifest_json'] # Set suite (default to flywheel) try: suite = manifest['custom']['flywheel']['suite'] except KeyError: suite = 'flywheel' # Set gear_name gear_name = manifest['name'] log_name = '/'.join([suite, gear_name]) log_level = logging.INFO # Tweak the formatting fmt = '%(asctime)s.%(msecs)03d %(levelname)-8s [%(name)s %(funcName)s()]: %(message)s' dtfmt = '%Y-%m-%d %H:%M:%S' logging.basicConfig(level=log_level, format=fmt, datefmt=dtfmt) log = logging.getLogger(log_name) log.critical('{} log level is {}'.format(log_name, log_level)) return log
01b0d584fc81c3948fcdd3d0294c949cbd8b633f
15,925
import os def get_torch_core_binaries(module): """Return required files from the torch folders. Notes: So far only tested for Windows. Requirements for other platforms are unknown. """ binaries = [] torch_dir = module.getCompileTimeDirectory() extras = os.path.join(torch_dir, "lib") if os.path.isdir(extras): for f in os.listdir(extras): # apart from shared libs, also the C header files are required! if f.endswith((".dll", ".so", ".h")) or ".so." in f: item = os.path.join(extras, f) if os.path.isfile(item): binaries.append((item, ".")) # this folder exists in the Linux version extras = os.path.join(torch_dir, "bin") if os.path.isdir(extras): for f in os.listdir(extras): item = os.path.join(extras, f) if os.path.isfile(item): binaries.append((item, ".")) # this folder exists in the Linux version extras = os.path.join(torch_dir, "include") if os.path.isdir(extras): for root, _, files in os.walk(extras): for f in files: item = os.path.join(root, f) if os.path.isfile(item): binaries.append((item, ".")) return binaries
df1aa86f75fa444707ed3499b30f2806389d914c
15,926
def _function_fullname(f): """Return the full name of the callable `f`, including also its module name.""" function, _ = getfunc(f) # get the raw function also for OOP methods if not function.__module__: # At least macros defined in the REPL have `__module__=None`. return function.__qualname__ return f"{function.__module__}.{function.__qualname__}"
eb6fd829081a4606c7be4520a15d627960360b8f
15,927
def dists2centroids_numpy(a): """ :param a: dist ndarray, shape = (*, h, w, 4=(t, r, b, l)) :return a: Box ndarray, shape is (*, h, w, 4=(cx, cy, w, h)) """ return corners2centroids_numpy(dists2corners_numpy(a))
a85122d871179a9d0fb7fa9b844caa448398184c
15,928
import math def heatmap(data_df, figsize=None, cmap="Blues", heatmap_kw=None, gridspec_kw=None): """ Plot a residue matrix as a color-encoded matrix. Parameters ---------- data_df : :class:`pandas.DataFrame` A residue matrix produced with :func:`~luna.analysis.residues.generate_residue_matrix`. figsize : tuple, optional Size (width, height) of a figure in inches. cmap : str, iterable of str The mapping from data values to color space. The default value is 'Blues'. heatmap_kw : dict, optional Keyword arguments for :func:`seaborn.heatmap`. gridspec_kw : dict, optional Keyword arguments for :class:`matplotlib.gridspec.GridSpec`. Used only if the residue matrix (``data_df``) contains interactions. Returns ------- : :class:`matplotlib.axes.Axes` or :class:`numpy.ndarray` of :class:`matplotlib.axes.Axes` """ data_df = data_df.reset_index() heatmap_kw = heatmap_kw or {} gridspec_kw = gridspec_kw or {} interactions = None if "interaction" in data_df.columns: interactions = sorted(data_df["interaction"].unique()) max_value = data_df[data_df.columns[2:]].max().max() else: max_value = data_df[data_df.columns[1:]].max().max() if not interactions: data_df.set_index('entry', inplace=True) fig = plt.figure(figsize=figsize) ax = sns.heatmap(data_df, cmap=cmap, vmax=max_value, vmin=0, **heatmap_kw) ax.set_xlabel("") ax.set_ylabel("") return ax else: ncols = 3 if "ncols" in gridspec_kw: ncols = gridspec_kw["ncols"] del gridspec_kw["ncols"] nrows = math.ceil(len(interactions) / ncols) fig, axs = plt.subplots(nrows, ncols, figsize=figsize, gridspec_kw=gridspec_kw) row, col = 0, 0 for i, interaction in enumerate(interactions): df = data_df[data_df["interaction"] == interaction].copy() df.drop(columns="interaction", inplace=True) df.set_index('entry', inplace=True) g = sns.heatmap(df, cmap=cmap, vmax=max_value, vmin=0, ax=axs[row][col], **heatmap_kw) g.set_title(interaction) g.set_xlabel("") g.set_ylabel("") col += 1 if col == ncols: row += 1 col = 0 if len(interactions) < nrows * ncols: diff = (nrows * ncols) - len(interactions) for i in range(1, diff + 1): axs[-1][-1 * i].axis('off') return axs
99ba802f82f9425fa3946253be78730b6216d9c9
15,929
import torch def combined_loss(x, reconstructed_x, mean, log_var, args): """ MSE loss for reconstruction, KLD loss as per VAE. Also want to output dimension (element) wise RCL and KLD """ # First, binary data loss1 = torch.nn.BCEWithLogitsLoss(size_average=False) loss1_per_element = torch.nn.BCEWithLogitsLoss( size_average=False, reduce=False ) binary_range = args.binary_real_one_hot_parameters['binary_range'] reconstructed_x1 = reconstructed_x[:, binary_range[0]: binary_range[1]] x1 = x[:, binary_range[0]: binary_range[1]] RCL1 = loss1(reconstructed_x1, x1) RCL1_per_element = loss1_per_element(reconstructed_x1, x1) # Next, real data loss2 = torch.nn.MSELoss(size_average=False) loss2_per_element = torch.nn.MSELoss(size_average=False, reduce=False) real_range = args.binary_real_one_hot_parameters['real_range'] reconstructed_x2 = reconstructed_x[:, real_range[0]: real_range[1]] x2 = x[:, real_range[0]: real_range[1]] RCL2 = loss2(reconstructed_x2, x2) RCL2_per_element = loss2_per_element(reconstructed_x2, x2) # Next, one-hot data loss3 = torch.nn.CrossEntropyLoss(size_average=True) loss3_per_element = torch.nn.CrossEntropyLoss( size_average=True, reduce=False ) one_hot_range = args.binary_real_one_hot_parameters['one_hot_range'] reconstructed_x3 = reconstructed_x[:, one_hot_range[0]: one_hot_range[1]] x3 = x[:, one_hot_range[0]: one_hot_range[1]] # This has 3 one-hot's. lets split it up x3_1 = x3[:, :19] x3_2 = x3[:, 19:19 + 19] x3_3 = x3[:, 19+19:] reconstructed_x3_1 = reconstructed_x3[:, :19] reconstructed_x3_2 = reconstructed_x3[:, 19:19 + 19] reconstructed_x3_3 = reconstructed_x3[:, 19+19:] _, labels1 = x3_1.max(dim=1) _, labels2 = x3_2.max(dim=1) _, labels3 = x3_3.max(dim=1) # print(labels.size(), reconstructed_x3.size(), x3.size()) RCL3_1 = loss3(reconstructed_x3_1, labels1.long()) RCL3_per_element_1 = loss3_per_element(reconstructed_x3_1, labels1.long()) RCL3_2 = loss3(reconstructed_x3_2, labels2.long()) RCL3_per_element_2 = loss3_per_element(reconstructed_x3_2, labels2.long()) RCL3_3 = loss3(reconstructed_x3_3, labels3.long()) RCL3_per_element_3 = loss3_per_element(reconstructed_x3_3, labels3.long()) KLD = -0.5 * torch.sum(1 + log_var - mean.pow(2) - log_var.exp()) KLD_per_element = -0.5 * (1 + log_var - mean.pow(2) - log_var.exp()) RCL = RCL1 + RCL2 + RCL3_1 + RCL3_2 + RCL3_3 RCL_per_element = torch.cat( ( RCL1_per_element, RCL2_per_element, RCL3_per_element_1.view([-1, 1]), RCL3_per_element_2.view([-1, 1]), RCL3_per_element_3.view([-1, 1]) ), 1 ) return RCL + args.beta_vae*KLD, RCL, KLD, RCL_per_element, KLD_per_element
162b2706f9643f66ebb0c3b000ea025d411029e2
15,930
def isfloat(string: str) -> bool: """ This function receives a string and returns if it is a float or not. :param str string: The string to check. :return: A boolean representing if the string is a float. :rtype: bool """ try: float(string) return True except (ValueError, TypeError): return False
ac6d8fcbbcf6b8cb442c50895576f417618a7429
15,931
import re def parse_path_kvs(file_path): """ Find all key-value pairs in a file path; the pattern is *_KEY=VALUE_*. """ parser = re.compile("(?<=[/_])[a-z0-9]+=[a-zA-Z0-9]+[.]?[0-9]*(?=[_/.])") kvs = parser.findall(file_path) kvs = [kv.split("=") for kv in kvs] return {kv[0]: to_number(kv[1]) for kv in kvs}
65d3711752808299272383f4b1328336ba9c463c
15,932
def user_count_by_type(utype: str) -> int: """Returns the total number of users that match a given type""" return get_count('users', 'type', (utype.lower(),))
232c4cc40ba31b4fb60f40708f2a38ae73096aea
15,933
import re def node_label(label, number_of_ports, debug=None): """ generate the HTML-like label <TABLE ALIGN="CENTER"><TR><TD COLSPAN="2">name</TD></TR> <TR> <TD PORT="odd">odd</TD> <TD PORT="even">even</TD> </TR> singleport: <TR> <TD PORT="port">port</TD> </TR> return a string """ long_string = [] # name = re.sub(r"[;: ]+", "\\\\n", label) # LF do not work in HTML-like name = re.sub(r'[;: ]+', ' ', label) port_range = range(1, number_of_ports + 1) long_string.append('<<TABLE ALIGN="CENTER">') if number_of_ports % 2 == 1: long_string.extend(['<TR>', '<TD>', name, '</TD>', '</TR>']) long_string.append('<TR>') str_single = '<TD PORT="' + str(number_of_ports) + '">' + str(number_of_ports) + '</TD>' long_string.append(str_single) long_string.append('</TR>') else: long_string.extend(['<TR>', '<TD COLSPAN="2">', name, '</TD>', '</TR>']) for i in range(number_of_ports // 2): long_string.append('<TR>') odd = i * 2 + 1 str_odd = '<TD PORT="' + str(odd) + '">' + str(odd).zfill(2) + '</TD>' long_string.append(str_odd) even = i * 2 + 2 str_even = '<TD PORT="' + str(even) + '">' + str(even).zfill(2) + '</TD>' long_string.append(str_even) long_string.append('</TR>') long_string.append('</TABLE>>') return ''.join([str(elem) for elem in long_string])
07b5a5dab593e1e105d840989b5b053551610e25
15,934
def grouperElements(liste, function=len): """ fonctions qui groupe selon la fonction qu'on lui donne. Ainsi pour le kalaba comme pour les graphèmes, nous aurons besoin de la longueur, """ lexique=[] data=sorted(liste, key=function) for k,g in groupby(data, function): lexique.append(list(g)) return lexique
e75e8e379378ac1207ae0ee9521f630c04cff2f7
15,935
def SensorLocation_Cast(*args): """ Cast(BaseObject o) -> SensorLocation SensorLocation_Cast(Seiscomp::Core::BaseObjectPtr o) -> SensorLocation """ return _DataModel.SensorLocation_Cast(*args)
85a5a6f711c0c5d77f0b93b2e6f819bdfd466ce1
15,936
def fatorial(num=1, show=False): """ -> Calcula o fatorial de um número. :param num: Fatorial a ser calculado :param show: (opicional) Mostra a conta :return: Fatorial de num. """ print('-=' * 20) fat = 1 for i in range(num, 0, -1): fat *= i if show: resp = f'{str(num)}! = ' while num > 1: resp += f'{str(num)} x ' num -= 1 resp += f'{str(num)} = {str(fat)}' return resp else: return fat
80ca60d2ba64a7089f3747a13c109de0bc7c159c
15,937
import os import yaml def read_rudder_config(path=None): """Reads the servo configuration from config.yml and returns a matching servo.""" if path is None: path = os.path.dirname(os.path.abspath(__file__)) with open(path + "/config.yml", "r") as yml: conf = yaml.full_load(yml) rudder_config = conf["rudder"] return rudder_config
079d1172c9109f174ac8f48927a0ff03a0466806
15,938
def linear_trend(series=None, coeffs=None, index=None, x=None, median=False): """Get a series of points representing a linear trend through `series` First computes the lienar regression, the evaluates at each dates of `series.index` Args: series (pandas.Series): data with DatetimeIndex as the index. coeffs (array or List): [slope, intercept], result from np.polyfit index (DatetimeIndex, list[date]): Optional. If not passing series, can pass the DatetimeIndex or list of dates to evaluate coeffs at. Converts to numbers using `matplotlib.dates.date2num` x (ndarray-like): directly pass the points to evaluate the poly1d Returns: Series: a line, equal length to arr, with same index as `series` """ if coeffs is None: coeffs = fit_line(series, median=median) if index is None and x is None: index = series.dropna().index if x is None: x = mdates.date2num(index) poly = np.poly1d(coeffs) linear_points = poly(x) return pd.Series(linear_points, index=index)
6bd09089ffd828fd3d408c0c2b03c3facfcfbd6b
15,939
def snapshot_metadata_get(context, snapshot_id): """Get all metadata for a snapshot.""" return IMPL.snapshot_metadata_get(context, snapshot_id)
8dda987916cb772d6498cd295056ef2b5465c00d
15,940
def graph_from_tensors(g, is_real=True): """ """ loop_edges = list(nx.selfloop_edges(g)) if len(loop_edges) > 0: g.remove_edges_from(loop_edges) if is_real: subgraph = (g.subgraph(c) for c in nx.connected_components(g)) g = max(subgraph, key=len) g = nx.convert_node_labels_to_integers(g) else: g = pick_connected_component_new(g) return g
7f43531f7cbf9221a6b00a56a24325b58f60ea84
15,941
def hook(t): """Calculate the progress from download callbacks (For progress bar)""" def inner(bytes_amount): t.update(bytes_amount) # Update progress bar return inner
d8228b9dec203aaa32d268dea8feef52e8db6137
15,942
def delete(event, context): """ Delete a cfn stack using an assumed role """ stack_id = event["PhysicalResourceId"] if '[$LATEST]' in stack_id: # No stack was created, so exiting return stack_id, {} cfn_client = get_client("cloudformation", event, context) cfn_client.delete_stack(StackName=stack_id) return stack_id
555682546aa6f1bbbc133538003b51f02e744d70
15,943
from typing import Match import six def _rec_compare(lhs, rhs, ignore, only, key, report_mode, value_cmp_func, _regex_adapter=RegexAdapter): """ Recursive deep comparison implementation """ # pylint: disable=unidiomatic-typecheck lhs_cat = _categorise(lhs) rhs_cat = _categorise(rhs) ## NO VALS if ((lhs_cat == Category.ABSENT) or (rhs_cat == Category.ABSENT)) and \ (lhs_cat != Category.CALLABLE) and (rhs_cat != Category.CALLABLE): return _build_res( key=key, match=Match.PASS if lhs_cat == rhs_cat else Match.FAIL, lhs=fmt(lhs), rhs=fmt(rhs)) ## CALLABLES if lhs_cat == rhs_cat == Category.CALLABLE: match = Match.from_bool(lhs == rhs) return _build_res( key=key, match=match, lhs=(0, 'func', callable_name(lhs)), rhs=(0, 'func', callable_name(rhs))) if lhs_cat == Category.CALLABLE: result, error = compare_with_callable(callable_obj=lhs, value=rhs) return _build_res( key=key, match=Match.from_bool(result), lhs=(0, 'func', callable_name(lhs)), rhs='Value: {}, Error: {}'.format( rhs, error) if error else fmt(rhs)) if rhs_cat == Category.CALLABLE: result, error = compare_with_callable(callable_obj=rhs, value=lhs) return _build_res( key=key, match=Match.from_bool(result), lhs='Value: {}, Error: {}'.format( lhs, error) if error else fmt(lhs), rhs=(0, 'func', callable_name(rhs))) ## REGEXES if lhs_cat == rhs_cat == Category.REGEX: match = _regex_adapter.compare(lhs, rhs) return _build_res( key=key, match=match, lhs=_regex_adapter.serialize(lhs), rhs=_regex_adapter.serialize(rhs)) if lhs_cat == Category.REGEX: match = _regex_adapter.match(regex=lhs, value=rhs) return _build_res( key=key, match=match, lhs=_regex_adapter.serialize(lhs), rhs=fmt(rhs)) if rhs_cat == Category.REGEX: match = _regex_adapter.match(regex=rhs, value=lhs) return _build_res( key=key, match=match, lhs=fmt(lhs), rhs=_regex_adapter.serialize(rhs)) ## VALUES if lhs_cat == rhs_cat == Category.VALUE: response = value_cmp_func(lhs, rhs) match = Match.from_bool(response) return _build_res( key=key, match=match, lhs=fmt(lhs), rhs=fmt(rhs)) ## ITERABLE if lhs_cat == rhs_cat == Category.ITERABLE: results = [] match = Match.IGNORED for lhs_item, rhs_item in six.moves.zip_longest(lhs, rhs): # iterate all elems in both iterable non-mapping objects result = _rec_compare( lhs_item, rhs_item, ignore, only, key=None, report_mode=report_mode, value_cmp_func=value_cmp_func) match = Match.combine(match, result[1]) results.append(result) # two lists of formatted objects from a # list of objects with lhs/rhs attributes lhs_vals, rhs_vals = _partition(results) return _build_res( key=key, match=match, lhs=(1, lhs_vals), rhs=(1, rhs_vals)) ## DICTS if lhs_cat == rhs_cat == Category.DICT: match, results = _cmp_dicts( lhs, rhs, ignore, only, report_mode, value_cmp_func) lhs_vals, rhs_vals = _partition(results) return _build_res( key=key, match=match, lhs=(2, lhs_vals), rhs=(2, rhs_vals)) ## DIFF TYPES -- catch-all for unhandled # combinations, e.g. VALUE vs ITERABLE return _build_res( key=key, match=Match.FAIL, lhs=fmt(lhs), rhs=fmt(rhs))
b7d26ed038152ee98a7b50821f3485cdc66a29d4
15,944
def exists_job_onqueue(queuename, when, hour): """ Check if a job is present on queue """ scheduler = Scheduler(connection=Redis()) jobs = scheduler.get_jobs() for job in jobs: if 'reset_stats_queue' in job.func_name: args = job.args if queuename == args[0] and when == args[1] and hour == args[2]: return True return False
165bb3da4746267d789d39ee30ebd9b098ea7c1e
15,945
def q_inv_batch_of_sequences(seq): """ :param seq: (n_batch x n_frames x 32 x 4) :return: """ n_batch = seq.size(0) n_frames = seq.size(1) n_joints = seq.size(2) seq = seq.reshape((n_batch * n_frames * n_joints, 4)) seq = qinv(seq) seq = seq.reshape((n_batch, n_frames, n_joints, 4)) return seq
9c2035a1864e47e99ac074815199217867da0c96
15,946
def msa_job_space_demand(job_space_demand): """ Job space demand aggregated to the MSA. """ df = job_space_demand.local return df.fillna(0).sum(axis=1).to_frame('msa')
044fe6e814c2773629b8f648b789ba99bbdf0108
15,947
def get_pdf_cdf_3(corr, bins_pdf, bins_cdf, add_point=True, cdf_bool=True, checknan=False): """ corr is a 3d array, the first dimension are the iterations, the second dimension is usually the cells the function gives back the pdf and the cdf add_point option duplicated the last point checknan checks if there are any nans in the set and gives nan as result for the pdf and cdf instead 0 as would be calculated naturally """ N1, N2, N3 = corr.shape pdfs = np.zeros((N1, N2, len(bins_pdf) - 1)) cdfs = np.zeros((N1, N2, len(bins_cdf) - 1)) for i in range(N1): pdfs[i], cdfs[i] = get_pdf_cdf_2(corr[i], bins_pdf, bins_cdf, add_point=False, cdf_bool=False, checknan=checknan) if cdf_bool: cdfs = np.cumsum(cdfs, axis=2)/corr.shape[2] if add_point: pdfs = add_point3(pdfs) cdfs = add_point3(cdfs) return pdfs, cdfs
0c6983bf6c3f77aebb7a9c667c54a560ed4a3cf0
15,948
import logging import requests import time def create_app(): """ Application factory to create the app and be passed to workers """ app = Flask(__name__) logging.basicConfig( filename='./logs/flask.log', level=logging.DEBUG, format='%(asctime)s %(levelname)-8s %(message)s', datefmt='%Y-%m-%d %H:%M:%S' ) app.config['SECRET_KEY'] = 'thisisthesecretkeyfortheflaskserver' #app.config['SESSION_TYPE'] = 'redis' server_ip = requests.get("http://ipinfo.io/ip").content.decode('utf-8') graphs = {} expiration = 300 # 5 minutes @app.route('/', methods=['GET', 'POST']) @app.route('/index', methods=['GET', 'POST']) def index(): return "", 418 # render_template('/index.html', graphs=graphs) @app.route('/graph/<ID>', methods=['GET', 'POST']) def graph(ID): """ Main graph display page. If in debug mode, serves raw source files. """ return render_template('/graph.html', development=app.config['DEBUG']) @app.route('/help') def tutorial(): """ Serve the tutorial page """ return render_template("/help.html", development=app.config['DEBUG']) @app.route('/src/<path:path>', methods=['GET']) def source(path): """ Serve source files in development mode """ if app.config['DEBUG']: return send_from_directory("src", path) else: return "", 418 @app.route('/create_graph', methods=['GET']) def create_graph(): """ receive graph JSON from external source """ logging.info("Received create_graph request") logging.info("Number of stored graphs: {}".format(len(list(graphs.keys())))) # remove expired graphs for ID in list(graphs.keys()): try: if time() - graphs[ID][1] > expiration: # temporary until we implement sessions logging.info("Removing graph ID: {}".format(ID)) del graphs[ID] except Exception as e: logging.error("Problem removing graph from dict: {} {}".format(ID,e)) continue ID = uuid4().hex # generate random uuid logging.info("Created id: {}".format(ID)) # store graph in index of all graphs with time created graphs[ID] = (request.json, time()) logging.info("Stored graph") # return url to the graph display url = "http://{}:5000/graph/{}".format(server_ip, ID) logging.info("Generated URL and returning it: {}".format(url)) return url @app.route('/get_graph/<ID>') def get_data(ID): """ Request graph JSON by ID """ stuff = graphs.get(ID) if stuff is None: data = { "error": "Graph does not exist.", "message": "The graph (ID: {}) does not exist. If this graph was used previously, it may have expired since.".format(ID)} return data, 410 return graphs[ID][0] return app
18f1de42aa395cddef482371e8115d01d3384888
15,949
import os def create_win_jupyter_console(folders): """ create a batch file to start jupyter @param folders see @see fn create_win_batches @return operations (list of what was done) """ text = ['@echo off', 'set CURRENT2=%~dp0', 'call "%CURRENT2%env.bat"', 'set JUPYTERC=%PYTHON_WINSCRIPTS%\\jupyter-console.exe', '"%JUPYTERC%" console'] # command jupyter console does not work yet even if the documentation says # so text = "\n".join(text) name = os.path.join(folders["config"], "jupyter_console.bat") with open(name, "w") as f: f.write(text) return [('batch', name)]
14e2e3843dea3d83da2f5c2917277e4703578419
15,950
def incidence_matrix( H, order=None, sparse=True, index=False, weight=lambda node, edge, H: 1 ): """ A function to generate a weighted incidence matrix from a Hypergraph object, where the rows correspond to nodes and the columns correspond to edges. Parameters ---------- H: Hypergraph object The hypergraph of interest order: int, optional Order of interactions to use. If None (default), all orders are used. If int, must be >= 1. sparse: bool, default: True Specifies whether the output matrix is a scipy sparse matrix or a numpy matrix index: bool, default: False Specifies whether to output dictionaries mapping the node and edge IDs to indices weight: lambda function, default=lambda function outputting 1 A function specifying the weight, given a node and edge Returns ------- I: numpy.ndarray or scipy csr_matrix The incidence matrix, has dimension (n_nodes, n_edges) rowdict: dict The dictionary mapping indices to node IDs, if index is True coldict: dict The dictionary mapping indices to edge IDs, if index is True """ edge_ids = H.edges if order is not None: edge_ids = [id_ for id_, edge in H._edge.items() if len(edge) == order + 1] if not edge_ids: return (np.array([]), {}, {}) if index else np.array([]) node_ids = H.nodes num_edges = len(edge_ids) num_nodes = len(node_ids) node_dict = dict(zip(node_ids, range(num_nodes))) edge_dict = dict(zip(edge_ids, range(num_edges))) if node_dict and edge_dict: if index: rowdict = {v: k for k, v in node_dict.items()} coldict = {v: k for k, v in edge_dict.items()} if sparse: # Create csr sparse matrix rows = [] cols = [] data = [] for node in node_ids: memberships = H.nodes.memberships(node) # keep only those with right order memberships = [i for i in memberships if i in edge_ids] if len(memberships) > 0: for edge in memberships: data.append(weight(node, edge, H)) rows.append(node_dict[node]) cols.append(edge_dict[edge]) else: # include disconnected nodes for edge in edge_ids: data.append(0) rows.append(node_dict[node]) cols.append(edge_dict[edge]) I = csr_matrix((data, (rows, cols))) else: # Create an np.matrix I = np.zeros((num_nodes, num_edges), dtype=int) for edge in edge_ids: members = H.edges.members(edge) for node in members: I[node_dict[node], edge_dict[edge]] = weight(node, edge, H) if index: return I, rowdict, coldict else: return I else: if index: return np.array([]), {}, {} else: return np.array([])
efbac24664f30a1cd424843042d7e203a0e96c37
15,951
def initial_landing_distance(interest_area, fixation_sequence): """ Given an interest area and fixation sequence, return the initial landing distance on that interest area. The initial landing distance is the pixel distance between the first fixation to land in an interest area and the left edge of that interest area (or, in the case of right-to-left text, the right edge). Technically, the distance is measured from the text onset without including any padding. Returns `None` if no fixation landed on the interest area. """ for fixation in fixation_sequence.iter_without_discards(): if fixation in interest_area: for char in interest_area: if fixation in char: # be sure not to find a fixation in the padding return abs(interest_area.onset - fixation.x) return None
b3512ea7cb149667e09c56541340122ec1dddcb1
15,952
import gzip import pickle def load_object(filename): """ Load saved object from file :param filename: The file to load :return: the loaded object """ with gzip.GzipFile(filename, 'rb') as f: return pickle.load(f)
f7e15216c371e1ab05169d40ca4df15611fa7978
15,953
from typing import Dict from typing import Tuple def list_events_command(client: Client, args: Dict) -> Tuple[str, Dict, Dict]: """Lists all events and return outputs in Demisto's format Args: client: Client object with request args: Usually demisto.args() Returns: Outputs """ max_results = args.get('max_results') event_created_date_before = args.get('event_created_date_before') event_created_date_after = args.get('event_created_date_after') raw_response = client.list_events( event_created_date_before=event_created_date_before, event_created_date_after=event_created_date_after, max_results=max_results) events = raw_response.get('event') if events: title = f'{INTEGRATION_NAME} - List events:' context_entry = raw_response_to_context(events) context = { f'{INTEGRATION_CONTEXT_NAME}.Event(val.ID && val.ID === obj.ID)': context_entry } # Creating human readable for War room human_readable = tableToMarkdown(title, context_entry) # Return data to Demisto return human_readable, context, raw_response else: return f'{INTEGRATION_NAME} - Could not find any events.', {}, {}
b4e3916ee8d65a47e2128453fd042d998184ea7b
15,954
import os def get_or_generate_vocab_inner(data_dir, vocab_filename, vocab_size, generator, max_subtoken_length=None, reserved_tokens=None): """Inner implementation for vocab generators. Args: data_dir: The base directory where data and vocab files are stored. If None, then do not save the vocab even if it doesn't exist. vocab_filename: relative filename where vocab file is stored vocab_size: target size of the vocabulary constructed by SubwordTextEncoder generator: a generator that produces tokens from the vocabulary max_subtoken_length: an optional integer. Set this to a finite value to avoid quadratic costs during vocab building. reserved_tokens: List of reserved tokens. `text_encoder.RESERVED_TOKENS` should be a prefix of `reserved_tokens`. If `None`, defaults to `RESERVED_TOKENS`. Returns: A SubwordTextEncoder vocabulary object. """ if data_dir and vocab_filename: vocab_filepath = os.path.join(data_dir, vocab_filename) if tf.gfile.Exists(vocab_filepath): tf.logging.info("Found vocab file: %s", vocab_filepath) return text_encoder.SubwordTextEncoder(vocab_filepath) else: vocab_filepath = None tf.logging.info("Generating vocab file: %s", vocab_filepath) vocab = text_encoder.SubwordTextEncoder.build_from_generator( generator, vocab_size, max_subtoken_length=max_subtoken_length, reserved_tokens=reserved_tokens) if vocab_filepath: tf.gfile.MakeDirs(data_dir) vocab.store_to_file(vocab_filepath) return vocab
6bb1faef913ebc3915487827576c2984fe614d84
15,955
def response_map(fetch_map): """Create an expected FETCH response map from the given request map. Most of the keys returned in a FETCH response are unmodified from the request. The exceptions are BODY.PEEK and BODY partial range. A BODY.PEEK request is answered without the .PEEK suffix. A partial range (e.g. BODY[]<0.1000>) has the octet count (1000) removed, since that information is provided in the literal size (and may be different if the data was truncated). """ if not isinstance(fetch_map, dict): fetch_map = dict((v, v) for v in fetch_map) rmap = {} for k, v in fetch_map.items(): for name in ('BODY', 'BINARY'): if k.startswith(name): k = k.replace(name + '.PEEK', name, 1) if k.endswith('>'): k = k.rsplit('.', 1)[0] + '>' rmap[k] = v return rmap
42d992662e5bba62046c2fc1a50f0f8275798ef8
15,956
def RigidTendonMuscle_getClassName(): """RigidTendonMuscle_getClassName() -> std::string const &""" return _actuators.RigidTendonMuscle_getClassName()
8c6bd6604350e6e2a30ee48c018307bc68dea76f
15,957
import json import time import uuid def submit(): """Receives the new paste and stores it in the database.""" if request.method == 'POST': form = request.get_json(force=True) pasteText = json.dumps(form['pasteText']) nonce = json.dumps(form['nonce']) burnAfterRead = json.dumps(form['burnAfterRead']) pasteKeyHash = json.dumps(form['hash']) if burnAfterRead == "true": burnAfterRead = True else: burnAfterRead = False # Creates Expire time expireTime = json.dumps(form['expire_time']) expireTime = int(time.time()) + int(expireTime)*60 # set paste type pasteType = json.dumps(form['pasteType'])[1:-1] # cuts "'" out # print(type(form['nonce'])) db = get_db() # Creates random 64 bit int idAsInt = uuid.uuid4().int >> 65 db.execute('''insert into pastes (id, paste_text, nonce, expire_time, burn_after_read, paste_hash, paste_format) values (?, ?, ?, ?, ?, ?, ?)''', [idAsInt, pasteText, nonce, expireTime, burnAfterRead, pasteKeyHash, pasteType]) db.commit() # add text to sqlite3 db return jsonify(id=hex(idAsInt)[2:])
3f88b665b226c81785b0ecafe3389bb15dcbeaa4
15,958
def money_recall_at_k(recommended_list, bought_list, prices_recommended, prices_bought, k=5): """ Доля дохода по релевантным рекомендованным объектам :param recommended_list - список id рекомендаций :param bought_list - список id покупок :param prices_recommended - список цен для рекомендаций :param prices_bought - список цен покупок """ flags = np.isin(recommended_list[:k], bought_list) # get recommend to bought matches prices = np.array(prices_recommended[:k]) # get prices of recommended items return flags @ prices / np.sum(prices_bought)
edeb6c56c5ce6a2af0321aee350c5f129737cab0
15,959
import networkx def get_clustering_fips( collection_of_fips, adj = None ): """ Finds the *separate* clusters of counties or territorial units that are clustered together. This is used to identify possibly *different* clusters of counties that may be separate from each other. If one does not supply an adjacency :py:class:`dict`, it uses the adjacency dictionary that :py:meth:`fips_adj_2018 <covid19_stats.COVID19Database.fips_adj_2018>` returns. Look at :download:`fips_2019_adj.pkl.gz </_static/gis/fips_2019_adj.pkl.gz>` to see what this dictionary looks like. :param list collection_of_fips: the :py:class:`list` of counties or territorial units, each identified by its `FIPS code`_. :param dict adj: optionally specified adjacency dictionary. Otherwise it uses the :py:meth:`fips_adj_2018 <covid19_stats.COVID19Database.fips_adj_2018>` returned dictionary. Look at :download:`fips_2019_adj.pkl.gz </_static/gis/fips_2019_adj.pkl.gz>` to see what this dictionary looks like. :returns: a :py:class:`list` of counties clustered together. Each cluster is a :py:class:`set` of `FIPS code`_\ s of counties grouped together. :rtype: list """ if adj is None: adj = COVID19Database.fips_adj_2018( ) fips_rem = set( collection_of_fips ) # ## our adjacency matrix from this subset = set(filter(lambda tup: all(map(lambda tok: tok in fips_rem, tup)), adj )) | \ set(map(lambda fips: ( fips, fips ), fips_rem )) G = networkx.Graph( sorted( subset ) ) # ## now greedy clustering algo fips_clusters = [ ] while len( fips_rem ) > 0: first_fips = min( fips_rem ) fips_excl = fips_rem - set([ first_fips, ]) fips_clust = [ first_fips ] for fips in fips_excl: try: dist = networkx.shortest_path_length( G, first_fips, fips ) fips_clust.append( fips ) except: pass fips_clusters.append( set( fips_clust ) ) fips_rem = fips_rem - set( fips_clust ) return fips_clusters
acdd6daa9b0b5d200d98271a4c989e5a5912a684
15,960
def stop_after(space_number): """ Decorator that determines when to stop tab-completion Decorator that tells command specific complete function (ex. "complete_use") when to stop tab-completion. Decorator counts number of spaces (' ') in line in order to determine when to stop. ex. "use exploits/dlink/specific_module " -> stop complete after 2 spaces "set rhost " -> stop completing after 2 spaces "run " -> stop after 1 space :param space_number: number of spaces (' ') after which tab-completion should stop :return: """ def _outer_wrapper(wrapped_function): @wraps(wrapped_function) def _wrapper(self, *args, **kwargs): try: if args[1].count(" ") == space_number: return [] except Exception as err: logger.error(err) return wrapped_function(self, *args, **kwargs) return _wrapper return _outer_wrapper
f0ca0bb0f33c938f6a1de619f70b204e92b20974
15,961
def find_cut_line(img_closed_original): # 对于正反面粘连情况的处理,求取最小点作为中线 """ 根据规则,强行将粘连的区域切分 :param img_closed_original: 二值化图片 :return: 处理后的二值化图片 """ img_closed = img_closed_original.copy() img_closed = img_closed // 250 #print(img_closed.shape) width_sum = img_closed.sum(axis=1) # 沿宽度方向求和,统计宽度方向白点个数 start_region_flag = 0 start_region_index = 0 # 身份证起始点高度值 end_region_index = 0 # 身份证结束点高度值 for i in range(img_closed_original.shape[0]): # 1000是原始图片高度值,当然, 这里也可以用 img_closed_original.shape[0]替代 if start_region_flag == 0 and width_sum[i] > 330: start_region_flag = 1 start_region_index = i # 判定第一个白点个数大于330的是身份证区域的起始点 if width_sum[i] > 330: end_region_index = i # 只要白点个数大于330,便认为是身份证区域,更新结束点 # 身份证区域中白点最少的高度值,认为这是正反面的交点 # argsort函数中,只取width_sum中判定区域开始和结束的部分,因此结果要加上开始点的高度值 min_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[0] img_closed_original[min_line_position][:] = 0 for i in range(1, 11): # 参数可变,分割10个点 temp_line_position = start_region_index + np.argsort(width_sum[start_region_index:end_region_index])[i] if abs(temp_line_position - min_line_position) < 30: # 限定范围,在最小点距离【-30, 30】的区域内 img_closed_original[temp_line_position][:] = 0 # 强制变为0 return img_closed_original
28e5e64e15cb349df186752c669ae16d01e21549
15,962
def _search(progtext, qs=None): """ Perform memoized url fetch, display progtext. """ loadmsg = "Searching for '%s'" % (progtext) wdata = pafy.call_gdata('search', qs) def iter_songs(): wdata2 = wdata while True: for song in get_tracks_from_json(wdata2): yield song if not wdata2.get('nextPageToken'): break qs['pageToken'] = wdata2['nextPageToken'] wdata2 = pafy.call_gdata('search', qs) # # The youtube search api returns a maximum of 500 results length = min(wdata['pageInfo']['totalResults'], 500) slicer = IterSlicer(iter_songs(), length) # paginatesongs(slicer, length=length, msg=msg, failmsg=failmsg, loadmsg=loadmsg) func = slicer s = 0 e = 3 if callable(func): songs = (s, e) else: songs = func[s:e] return songs
55310c4ad05b597b48e32dde810eff9db51d66c0
15,963
import numpy def img_to_vector(img_fn, label=0): """Read the first 32 characters of the first 32 rows of an image file. @return <ndarray>: a 1x(1024+1) numpy array with data and label, while the label is defaults to 0. """ img = "" for line in open(img_fn).readlines()[:32]: img += line[:32] # labels are always attached at the last position itera = [_ for _ in img + str(label)] return numpy.fromiter(itera, "f4")
f1d7161a0bc4d6ffebc6ee1b32eafb28c4d75f7f
15,964
import appdirs def get_config(): """Return a user configuration object.""" config_filename = appdirs.user_config_dir(_SCRIPT_NAME, _COMPANY) + ".ini" config = _MyConfigParser() config.optionxform = str config.read(config_filename) config.set_filename(config_filename) return config
192ea496f80d77f241ec6deb6a4aa4b1ef7d17cf
15,965
import asyncio import websockets def launch_matchcomms_server() -> MatchcommsServerThread: """ Launches a background process that handles match communications. """ host = 'localhost' port = find_free_port() # deliberately not using a fixed port to prevent hardcoding fragility. event_loop = asyncio.new_event_loop() matchcomms_server = MatchcommsServer() start_server = websockets.serve(matchcomms_server.handle_connection, host, port, loop=event_loop) server = event_loop.run_until_complete(start_server) thread = Thread(target=event_loop.run_forever, daemon=True) thread.start() return MatchcommsServerThread( root_url=URL(scheme='ws', netloc=f'{host}:{port}', path='', params='', query='', fragment=''), _server=server, _event_loop=event_loop, _thread=thread, )
4c23c599a61f029972ae3e54ceb3066a4ce9f207
15,966
def acq_randmaxvar(): """Initialise a RandMaxVar fixture. Returns ------- RandMaxVar Acquisition method. """ gp, prior = _get_dependencies_acq_fn() # Initialising the acquisition method. method_acq = RandMaxVar(model=gp, prior=prior) return method_acq
5f306d104032abc993ab7726e08453d5c18f2526
15,967
import json def from_config(func): """Run a function from a JSON configuration file.""" def decorator(filename): with open(filename, 'r') as file_in: config = json.load(file_in) return func(**config) return decorator
4342a5f6fab8f8274b9dfb762be3255672f4f332
15,968
def update_user(user, domain, password=None): """ create/update user record. if password is None, the user is removed. Password should already be SHA512-CRYPT'd """ passwdf = PASSWDFILE % {"domain": domain} passwdb = KeyValueFile.open_file(passwdf, separator=":", lineformat=USERLINE+"\n") passwdb[user] = password return True
6e65be52fe0fb737c5189da295694bf482be9f5d
15,969
def puzzle_pieces(n): """Return a dictionary holding all 1, 3, and 7 k primes.""" kprimes = defaultdict(list) kprimes = {key : [] for key in [7, 3, 1]} upper = 0 for k in sorted(kprimes.keys(), reverse=True): if k == 7: kprimes[k].extend(count_Kprimes(k, 2, n)) if not kprimes[k]: return [] upper = n - kprimes[k][0] if k == 3: kprimes[k].extend(count_Kprimes(k, 2, upper)) upper -= kprimes[k][0] if k == 1: primes = get_primes(upper) for p in takewhile(lambda x: x <= upper, primes): kprimes[k].append(p) return kprimes
4ad36f316a2dfa39aca9c2b574781f9199fb13ef
15,970
import warnings def periodogram_snr(periodogram,periods,index_to_evaluate,duration,per_type, freq_window_epsilon=3.,rms_window_bin_size=100): """ Calculate the periodogram SNR of the best period Assumes fixed frequency spacing for periods periodogram - the periodogram values periods - periods associated with the above values index_to_evaluate - index of period to examine duration - total duration of the observations per_type - which period search algorithm was used (optional) freq_window_epsilon - sets the size of the exclusion area in the periodogram for the calculation rms_window_bin_size - number of points to include in calculating the RMS for the SNR """ # Some value checking if len(periodogram) != len(periods): raise ValueError("The lengths of the periodogram and the periods are not the same") if hasattr(index_to_evaluate,'__len__'): raise AttributeError("The index_to_evaluate has len attribute") if np.isnan(periodogram[index_to_evaluate]): raise ValueError("Selected periodogram value is nan") if np.isinf(periodogram[index_to_evaluate]): raise ValueError("Selected periodogram value is not finite") if per_type.upper() not in ['LS','PDM','BLS']: raise ValueError("Periodogram type " + per_type + " not recognized") # Setting up some parameters freq_window_size = freq_window_epsilon/duration delta_frequency = abs(1./periods[1] - 1./periods[0]) freq_window_index_size = int(round(freq_window_size/delta_frequency)) # More value checking if freq_window_index_size > len(periodogram): raise ValueError("freq_window_index_size is greater than total periodogram length") elif freq_window_index_size > .9*len(periodogram): raise ValueError("freq_window_index_size is greater than 90% total length of periodogram") elif freq_window_index_size > .8*len(periodogram): print("here 80%") warnings.warn("freq_window_index_size is greater than 80% total length of periodogram") perdgm_window = [] # For storing values for RMS calculation # Which values to include in perdgm_window if index_to_evaluate > freq_window_index_size: perdgm_window.extend(periodogram[max(0,index_to_evaluate-freq_window_index_size-rms_window_bin_size+1):index_to_evaluate-freq_window_index_size+1].tolist()) if index_to_evaluate + freq_window_index_size < len(periodogram): perdgm_window.extend(periodogram[index_to_evaluate+freq_window_index_size:index_to_evaluate+freq_window_index_size+rms_window_bin_size].tolist()) perdgm_window = np.array(perdgm_window) # Include only finite values wherefinite = np.isfinite(perdgm_window) # Sigma clip vals, low, upp = sigmaclip(perdgm_window[wherefinite],low=3,high=3) # Calculate standard deviation stddev = np.std(vals) # Return if per_type.upper() == 'PDM': # If PDM, use correct amplitude return (1.-periodogram[index_to_evaluate])/stddev else: return periodogram[index_to_evaluate]/stddev
6b1f84d03796dc839cdb87b94bce69a8eef4f60e
15,971
def derivative_overview(storage_service_id, storage_location_id=None): """Return a summary of derivatives across AIPs with a mapping created between the original format and the preservation copy. """ report = {} aips = AIP.query.filter_by(storage_service_id=storage_service_id) if storage_location_id: aips = aips.filter_by(storage_location_id=storage_location_id) aips = aips.all() all_aips = [] for aip in aips: if not aip.preservation_file_count > 0: continue aip_report = {} aip_report[fields.FIELD_TRANSFER_NAME] = aip.transfer_name aip_report[fields.FIELD_UUID] = aip.uuid aip_report[fields.FIELD_FILE_COUNT] = aip.original_file_count aip_report[fields.FIELD_DERIVATIVE_COUNT] = aip.preservation_file_count aip_report[fields.FIELD_RELATED_PAIRING] = [] original_files = File.query.filter_by( aip_id=aip.id, file_type=FileType.original ) for original_file in original_files: preservation_derivative = File.query.filter_by( file_type=FileType.preservation, original_file_id=original_file.id ).first() if preservation_derivative is None: continue file_derivative_pair = {} file_derivative_pair[ fields.FIELD_DERIVATIVE_UUID ] = preservation_derivative.uuid file_derivative_pair[fields.FIELD_ORIGINAL_UUID] = original_file.uuid original_format_version = original_file.format_version if original_format_version is None: original_format_version = "" file_derivative_pair[fields.FIELD_ORIGINAL_FORMAT] = "{} {} ({})".format( original_file.file_format, original_format_version, original_file.puid ) file_derivative_pair[fields.FIELD_DERIVATIVE_FORMAT] = "{}".format( preservation_derivative.file_format ) aip_report[fields.FIELD_RELATED_PAIRING].append(file_derivative_pair) all_aips.append(aip_report) report[fields.FIELD_ALL_AIPS] = all_aips report[fields.FIELD_STORAGE_NAME] = get_storage_service_name(storage_service_id) report[fields.FIELD_STORAGE_LOCATION] = get_storage_location_description( storage_location_id ) return report
ab688e89c9bc9cec408e022a487d824a229a80a9
15,972
import tarfile def fetch_packages(vendor_dir, packages): """ Fetches all packages from github. """ for package in packages: tar_filename = format_tar_path(vendor_dir, package) vendor_owner_dir = ensure_vendor_owner_dir(vendor_dir, package['owner']) url = format_tarball_url(package) print("Downloading {owner}/{project} {version}".format(**package)) urlretrieve(url, tar_filename) with tarfile.open(tar_filename) as tar: tar.extractall(vendor_owner_dir, members=tar.getmembers()) return packages
4589ce242ab8221a34ea87ce020f53a7874e73cb
15,973
def execute_search(search_term, sort_by, **kwargs): """ Simple search API to query Elasticsearch """ # Get the Elasticsearch client client = get_client() # Perform the search ons_index = get_index() # Init SearchEngine s = SearchEngine(using=client, index=ons_index) # Define type counts (aggregations) query s = s.type_counts_query(search_term) # Execute type_counts_response = s.execute() # Format the output aggregations, total_hits = aggs_to_json( type_counts_response.aggregations, "docCounts") # Setup initial paginator page_number = int(get_form_param("page", False, 1)) page_size = int(get_form_param("size", False, 10)) paginator = None if total_hits > 0: paginator = Paginator( total_hits, MAX_VISIBLE_PAGINATOR_LINK, page_number, page_size) # Perform the content query to populate the SERP # Init SearchEngine s = SearchEngine(using=client, index=ons_index) # Define the query with sort and paginator s = s.content_query( search_term, sort_by=sort_by, paginator=paginator, **kwargs) # Execute the query content_response = s.execute() # Update the paginator paginator = Paginator( content_response.hits.total, MAX_VISIBLE_PAGINATOR_LINK, page_number, page_size) # Check for featured results featured_result_response = None # Only do this if we have results and are on the first page if total_hits > 0 and paginator.current_page <= 1: # Init the SearchEngine s = SearchEngine(using=client, index=ons_index) # Define the query s = s.featured_result_query(search_term) # Execute the query featured_result_response = s.execute() # Return the hits as JSON return hits_to_json( content_response, aggregations, paginator, sort_by.name, featured_result_response=featured_result_response)
48ec250c6deceaca850230e4be2e0e282f5838e4
15,974
def last_char_to_aou(word): """Intended for abbreviations, returns "a" or "ä" based on vowel harmony for the last char.""" assert isinstance(word, str) ch = last_char_to_vowel(word) if ch in "aou": return "a" return "ä"
3a37e97e19e1ca90ccf26d81756db57445f68a26
15,975
def times_vector(mat, vec): """Returns the symmetric block-concatenated matrix multiplied by a vector. Specifically, each value in the vector is multiplied by a row of the full matrix. That is, the vector is broadcast and multiplied element-wise. Note this would be the transpose of full_mat * vec if full_mat represented the full symmetric matrix. Args: mat: The symmetric matrix represented as the concatenated blocks. vec: The vector, having the same dimension as the materialized matrix. """ rows, cols = mat.shape num_blocks = num_blocks_from_total_blocks(cols // rows) multiplied = [] for i in range(num_blocks): mat_block = mat[Ellipsis, rows * ((i + 1) * i) // 2:rows * ((i + 1) * (i + 2)) // 2] vec_block = vec[Ellipsis, rows * i:rows * (i + 1)] multiplied.append(jnp.einsum("...ij,...i->ij", mat_block, vec_block)) return jnp.concatenate(multiplied, axis=-1)
5b90ebd293535810c7ad8e1ad681033997e8c1c8
15,976
import pathlib def ensure_path(path:[str, pathlib.Path]): """ Check if the input path is a string or Path object, and return a path object. :param path: String or Path object with a path to a resource. :return: Path object instance """ return path if isinstance(path, pathlib.Path) else pathlib.Path(path)
40cd2e1271f7f74adbf0928f769ca1a3d89acd50
15,977
def examine_mode(mode): """ Returns a numerical index corresponding to a mode :param str mode: the subset user wishes to examine :return: the numerical index """ if mode == 'test': idx_set = 2 elif mode == 'valid': idx_set = 1 elif mode == 'train': idx_set = 0 else: raise NotImplementedError return idx_set
4fee6f018cacff4c760cb92ef250cad21b497697
15,978
from pathlib import Path import subprocess def main(): """Validates individual trigger files within the raidboss Cactbot module. Current validation only checks that the trigger file successfully compiles. Returns: An exit status code of 0 or 1 if the tests passed successfully or failed, respectively. """ exit_status = 0 for filepath in Path(CactbotModule.RAIDBOSS.directory(), DATA_DIRECTORY).glob('**/*.js'): exit_status |= subprocess.call(['node', str(filepath)]) return exit_status
e14d0638fb0078225e4c20336ad40989895da1d0
15,979
def create_pinata(profile_name: str) -> Pinata: """ Get or create a Pinata SDK instance with the given profile name. If the profile does not exist, you will be prompted to create one, which means you will be prompted for your API key and secret. After that, they will be stored securely using ``keyring`` and accessed as needed without prompt. Args: profile_name (str): The name of the profile to get or create. Returns: :class:`~pinata.sdk.Pinata` """ try: pinata = Pinata.from_profile_name(profile_name) except PinataMissingAPIKeyError: set_keys_from_prompt(profile_name) pinata = Pinata.from_profile_name(profile_name) if not pinata: set_keys_from_prompt(profile_name) return Pinata.from_profile_name(profile_name)
a1b88b8bb5b85a73a8bce01860398a9cbf2d1491
15,980
def create_tfid_weighted_vec(tokens, w2v, n_dim, tfidf): """ Create train, test vecs using the tf-idf weighting method Parameters ---------- tokens : np.array data (tokenized) where each line corresponds to a document w2v : gensim.Word2Vec word2vec model n_dim : int dimensionality of our word vectors Returns ------- vecs_w2v : np.array data ready for the model, shape (n_samples, n_dim) """ vecs_w2v = np.concatenate( [build_doc_vector(doc, n_dim, w2v, tfidf) for doc in tokens]) return vecs_w2v
8503932c2b268ff81752fb22e8640ce9413ad2e5
15,981
def miniimagenet(folder, shots, ways, shuffle=True, test_shots=None, seed=None, **kwargs): """Helper function to create a meta-dataset for the Mini-Imagenet dataset. Parameters ---------- folder : string Root directory where the dataset folder `miniimagenet` exists. shots : int Number of (training) examples per class in each task. This corresponds to `k` in `k-shot` classification. ways : int Number of classes per task. This corresponds to `N` in `N-way` classification. shuffle : bool (default: `True`) Shuffle the examples when creating the tasks. test_shots : int, optional Number of test examples per class in each task. If `None`, then the number of test examples is equal to the number of training examples per class. seed : int, optional Random seed to be used in the meta-dataset. kwargs Additional arguments passed to the `MiniImagenet` class. See also -------- `datasets.MiniImagenet` : Meta-dataset for the Mini-Imagenet dataset. """ defaults = { 'transform': Compose([Resize(84), ToTensor()]) } return helper_with_default(MiniImagenet, folder, shots, ways, shuffle=shuffle, test_shots=test_shots, seed=seed, defaults=defaults, **kwargs)
a9be1fff33b8e5163d6a5af4bd48dc71dcb88864
15,982
def process_account_request(request, order_id, receipt_code): """ Process payment via online account like PayPal, Amazon ...etc """ order = get_object_or_404(Order, id=order_id, receipt_code=receipt_code) if request.method == "POST": gateway_name = request.POST["gateway_name"] gateway = get_object_or_404(Gateway, name=gateway_name) try: if gateway.name == Gateway.PAYPAL: processor = PayPal(gateway) return HttpResponseRedirect(processor.create_account_payment(order, request.user)) else: raise ImproperlyConfigured('Doorstep doesn\'t yet support payment with %s account.' % gateway.get_name_display()) except DoorstepError as e: request.session['processing_error'] = e.message return HttpResponseRedirect(reverse('payments_processing_message')) raise Http404
be5bdb027034e2f2791968755e41bbac762d1dda
15,983
def add_classification_categories(json_object, classes_file): """ Reads the name of classes from the file *classes_file* and adds them to the JSON object *json_object*. The function assumes that the first line corresponds to output no. 0, i.e. we use 0-based indexing. Modifies json_object in-place. Args: json_object: an object created from a json in the format of the detection API output classes_file: the list of classes that correspond to the output elements of the classifier Return: The modified json_object with classification_categories added. If the field 'classification_categories' already exists, then this function is a no-op. """ if ('classification_categories' not in json_object.keys()) or (len(json_object['classification_categories']) == 0): # Read the name of all classes with open(classes_file, 'rt') as fi: class_names = fi.read().splitlines() # remove empty lines class_names = [cn for cn in class_names if cn.strip()] # Create field with name *classification_categories* json_object['classification_categories'] = dict() # Add classes using 0-based indexing for idx, name in enumerate(class_names): json_object['classification_categories']['%i'%idx] = name else: print('WARNING: The input json already contains the list of classification categories.') return json_object
ef92902210f275238271c21e20f8f0eec90253b0
15,984
import copy def create_compound_states(reference_thermodynamic_state, top, protocol, region=None, restraint=False): """ Return alchemically modified thermodynamic states. Parameters ---------- reference_thermodynamic_state : ThermodynamicState object top : Topography or Topology object protocol : dict The dictionary ``{parameter_name: list_of_parameter_values}`` defining the protocol. All the parameter values list must have the same number of elements. region : str or list Atomic indices defining the alchemical region. restraint : bool If ligand exists, restraint ligand and receptor movements. """ create_compound_states.metadata = {} compound_state = _reference_compound_state(reference_thermodynamic_state, top, region=region, restraint=restraint) create_compound_states.metadata.update(_reference_compound_state.metadata) # init the array of compound states compound_states = [] protocol_keys, protocol_values = zip(*protocol.items()) for state_id, state_values in enumerate(zip(*protocol_values)): compound_states.append(copy.deepcopy(compound_state)) for lambda_key, lambda_value in zip(protocol_keys, state_values): if hasattr(compound_state, lambda_key): setattr(compound_states[state_id], lambda_key, lambda_value) else: raise AttributeError( 'CompoundThermodynamicState object does not ' 'have protocol attribute {}'.format(lambda_key)) return compound_states
9ef5c14628237f3754e8522d11aa6bcbe399e1b3
15,985
def initialize_binary_MERA_random(phys_dim, chi, dtype=tf.float64): """ initialize a binary MERA network of bond dimension `chi` isometries and disentanglers are initialized with random unitaries (not haar random) Args: phys_dim (int): Hilbert space dimension of the bottom layer chi (int): maximum bond dimension dtype (tf.dtype): dtype of the MERA tensors Returns: wC (list of tf.Tensor): the MERA isometries uC (list of tf.Tensor): the MERA disentanglers rho (tf.Tensor): initial reduced density matrix """ #Fixme: currently, passing tf.complex128 merely initializez imaginary part to 0.0 # make it random wC, uC, rho = initialize_binary_MERA_identities(phys_dim, chi, dtype=dtype) wC = [tf.cast(tf.random_uniform(shape=w.shape, dtype=dtype.real_dtype), dtype) for w in wC] wC = [misc_mera.w_update_svd_numpy(w) for w in wC] uC = [tf.cast(tf.random_uniform(shape=u.shape, dtype=dtype.real_dtype), dtype) for u in uC] uC = [misc_mera.u_update_svd_numpy(u) for u in uC] return wC, uC, rho
f0ba62a5c8605bf4e8967b5626cae9cd81992697
15,986
def tts_init(): """ Initialize choosen TTS. Returns: tts (TextToSpeech) """ if (TTS_NAME == "IBM"): return IBM_initialization() elif (TTS_NAME == "pytts"): return pytts_initialization() else: print("ERROR - WRONG TTS")
4de36b27298d015b808cbc4973daf02354780787
15,987
def string_dumper(dumper, value, _tag=u'tag:yaml.org,2002:str'): """ Ensure that all scalars are dumped as UTF-8 unicode, folded and quoted in the sanest and most readable way. """ if not isinstance(value, basestring): value = repr(value) if isinstance(value, str): value = value.decode('utf-8') style = None multilines = '\n' in value if multilines: literal_style = '|' style = literal_style return dumper.represent_scalar(_tag, value, style=style)
081e0adaa45072f2b75c9eb1374ce2009bf4fd1d
15,988
import math def to_hours_from_seconds(value): """From seconds to rounded hours""" return Decimal(math.ceil((value / Decimal(60)) / Decimal(60)))
2ceb1f74690d26f0d0d8f60ffdc012b801dd6be3
15,989
def extract_named_geoms(sde_floodplains = None, where_clause = None, clipping_geom_obj = None): """ Clips SDE flood delineations to the boundary of FEMA floodplain changes, and then saves the geometry and DRAINAGE name to a list of dictionaries. :param sde_floodplains: {str} The file path to the UTIL.Floodplains layer :param where_clause: {str} The where clause used to isolate polygons of interest :param clipping_geom_obj: {arc geom obj} The geometry object representing the boundaries of the LOMR/FIRM update :return: {list} [{"SHAPE@": <Poly obj>, "DRAINAGE": "drain name"},...] """ sde_fields = ['SHAPE@', 'DRAINAGE'] with arcpy.da.SearchCursor(sde_floodplains, sde_fields, where_clause) as sCurs: named_geoms = [] geom = None for row in sCurs: # if clipper.contains(row[0].centroid) or row[0].overlaps(clipper): geom = row[0].clip(clipping_geom_obj.extent) named_geoms.append({'SHAPE@': geom, 'DRAINAGE': str(row[1])}) return named_geoms
d56b5caf8a11358db4fc43f51b8a29840698fd3a
15,990
import argparse def parser_train(): """ Parse input arguments (train.py). """ parser = argparse.ArgumentParser(description='Standard + Adversarial Training.') parser.add_argument('--augment', type=str2bool, default=True, help='Augment training set.') parser.add_argument('--batch-size', type=int, default=128, help='Batch size for training.') parser.add_argument('--batch-size-validation', type=int, default=256, help='Batch size for testing.') parser.add_argument('--num-samples-eval', type=int, default=512, help='Number of samples to use for margin calculations.') parser.add_argument('--data-dir', type=str, default='/cluster/scratch/rarade/data/') parser.add_argument('--log-dir', type=str, default='/cluster/home/rarade/adversarial-hat/logs/') parser.add_argument('--tmp-dir', type=str, default='/cluster/scratch/rarade/') parser.add_argument('-d', '--data', type=str, default='cifar10', choices=DATASETS, help='Data to use.') parser.add_argument('--desc', type=str, required=True, help='Description of experiment. It will be used to name directories.') parser.add_argument('-m', '--model', choices=MODELS, default='resnet18', help='Model architecture to be used.') parser.add_argument('--normalize', type=str2bool, default=False, help='Normalize input.') parser.add_argument('--pretrained-file', type=str, default=None, help='Pretrained weights file name.') parser.add_argument('-ns', '--num-std-epochs', type=int, default=0, help='Number of standard training epochs.') parser.add_argument('-na', '--num-adv-epochs', type=int, default=0, help='Number of adversarial training epochs.') parser.add_argument('--adv-eval-freq', type=int, default=30, help='Adversarial evaluation frequency (in epochs).') parser.add_argument('--h', default=2.0, type=float, help='Parameter h to compute helper examples (x + h*r) for HAT.') parser.add_argument('--helper-model', type=str, default=None, help='Helper model weights file name for HAT.') parser.add_argument('--beta', default=None, type=float, help='Stability regularization, i.e., 1/lambda in TRADES \ or weight of robust loss in HAT.') parser.add_argument('--gamma', default=1.0, type=float, help='Weight of helper loss in HAT.') parser.add_argument('--robust-loss', default='kl', choices=['ce', 'kl'], type=str, help='Type of robust loss in HAT.') parser.add_argument('--lr', type=float, default=0.21, help='Learning rate for optimizer (SGD).') parser.add_argument('--weight-decay', type=float, default=5e-4, help='Optimizer (SGD) weight decay.') parser.add_argument('--scheduler', choices=SCHEDULERS, default='cyclic', help='Type of scheduler.') parser.add_argument('--nesterov', type=str2bool, default=True, help='Use Nesterov momentum.') parser.add_argument('--clip-grad', type=float, default=None, help='Gradient norm clipping.') parser.add_argument('-a', '--attack', type=str, choices=ATTACKS, default='linf-pgd', help='Type of attack.') parser.add_argument('--attack-eps', type=str2float, default=8/255, help='Epsilon for the attack.') parser.add_argument('--attack-step', type=str2float, default=2/255, help='Step size for PGD attack.') parser.add_argument('--attack-iter', type=int, default=10, help='Max. number of iterations (if any) for the attack.') parser.add_argument('--keep-clean', type=str2bool, default=False, help='Use clean samples during adversarial training.') parser.add_argument('--debug', action='store_true', default=False, help='Debug code. Run 1 epoch of training and evaluation.') parser.add_argument('--exp', action='store_true', default=False, help='Store results for performing margin and curvature experiments later.') parser.add_argument('--mart', action='store_true', default=False, help='MART training.') parser.add_argument('--unsup-fraction', type=float, default=0.5, help='Ratio of unlabelled data to labelled data.') parser.add_argument('--aux-data-filename', type=str, help='Path to additional Tiny Images data.', default='/cluster/scratch/rarade/cifar10s/ti_500K_pseudo_labeled.pickle') parser.add_argument('--seed', type=int, default=1, help='Random seed.') return parser
a12d4c392b8883c1bf195cb6e1fc9333b8a9fc1b
15,991
from typing import Sequence from typing import List from typing import Any def convert_examples_to_features(examples: Sequence[InputExampleTC], labels: List[str], tokenizer: Any, max_length: int = 512, ignore_lbl_id: int = -100 ) -> List[InputFeaturesTC]: """Converts sequence of ``InputExampleTC to list of ``InputFeaturesTC``. Args: examples (:obj:`list` of :obj:`InputExampleTC`): Sequence of ``InputExampleTC`` containing the examples to be converted to features. tokenizer (:obj): Instance of a transformer tokenizer that will tokenize the example tokens and convert them to model specific ids. max_length (int): the maximum length of the post-tokenized tokens and the respective associated fields in an InputFeaturesTC. Sequences longer will be truncated, sequences shorter will be padded. This length includes any special tokens that must be added such as [CLS] and [SEP] in BERT. ignore_lbl_id (int, optional): a value of a label id to be ignored, used for subword tokens. This is typically negative. Usually, -1 or `torch.nn.CrossEntropy().ignore_index`. Returns: If the input is a list of ``InputExamplesTC``, will return a list of task-specific ``InputFeaturesTC`` which can be fed to the model. """ logger.info(f'Using label list {labels}') label2id = {label: i for i, label in enumerate(labels)} all_features = [] for (ex_index, example) in enumerate(examples): if ex_index % 10000 == 0: logger.info("Converting example %d" % (ex_index)) feats, tks = convert_example_to_features(example=example, label2id=label2id, tokenizer=tokenizer, max_length=max_length, ignore_lbl_id=ignore_lbl_id) if ex_index < 5: log_example_features(example, feats, tks) all_features.append(feats) return all_features
f051cb9fd68aaf08da15e99f978a6bdc24fea5d3
15,992
import os def build_image(local_conda_channel, conda_env_file, container_tool, container_build_args=""): """ Build a container image from the Dockerfile in RUNTIME_IMAGE_PATH. Returns a result code and the name of the new image. """ variant = os.path.splitext(conda_env_file)[0].replace(utils.CONDA_ENV_FILENAME_PREFIX, "", 1) variant = variant.replace("-runtime", "") image_name = REPO_NAME + ":" + IMAGE_NAME + "-" + variant build_cmd = container_tool + " build " build_cmd += "-f " + os.path.join(RUNTIME_IMAGE_PATH, "Dockerfile") + " " build_cmd += "-t " + image_name + " " build_cmd += "--build-arg OPENCE_USER=" + OPENCE_USER + " " build_cmd += "--build-arg LOCAL_CONDA_CHANNEL=" + local_conda_channel + " " build_cmd += "--build-arg CONDA_ENV_FILE=" + conda_env_file + " " build_cmd += "--build-arg TARGET_DIR=" + TARGET_DIR + " " build_cmd += container_build_args + " " build_cmd += BUILD_CONTEXT print("Container build command: ", build_cmd) if os.system(build_cmd): raise OpenCEError(Error.BUILD_IMAGE, image_name) return image_name
73ce85ac078e902e6054605351d0e7b4aba7b10d
15,993
def update_setup_cfg(setupcfg: ConfigUpdater, opts: ScaffoldOpts): """Update `pyscaffold` in setupcfg and ensure some values are there as expected""" if "options" not in setupcfg: template = templates.setup_cfg(opts) new_section = ConfigUpdater().read_string(template)["options"] setupcfg["metadata"].add_after.section(new_section.detach()) # Add "PyScaffold" section if missing and update saved extensions setupcfg = templates.add_pyscaffold(setupcfg, opts) return setupcfg, opts
b08b0faa0645151b24d8eb40b2920e63caf764e9
15,994
def testable_renderable() -> CXRenderable: """ Provides a generic CXRenderable useful for testin the base class. """ chart: CanvasXpress = CanvasXpress( render_to="canvasId", data=CXDictData( { "y": { "vars": ["Gene1"], "smps": ["Smp1", "Smp2", "Smp3"], "data": [[10, 35, 88]] } } ), config=CXConfigs( CXGraphType(CXGraphTypeOptions.Bar) ) ) return SampleRenderable(chart)
3e37096e51e081da8c3fa43f973248252c0276dd
15,995
def secondSolution( fixed, c1, c2, c3 ): """ If given four tangent circles, calculate the other one that is tangent to the last three. @param fixed: The fixed circle touches the other three, but not the one to be calculated. @param c1, c2, c3: Three circles to which the other tangent circle is to be calculated. """ curf = fixed.curvature() cur1 = c1.curvature() cur2 = c2.curvature() cur3 = c3.curvature() curn = 2 * (cur1 + cur2 + cur3) - curf mn = (2 * (cur1*c1.m + cur2*c2.m + cur3*c3.m) - curf*fixed.m ) / curn return Circle( mn.real, mn.imag, 1/curn )
1a6aca3e5d6a26f77b1fbc432ff26fba441e02f7
15,996
def collect3d(v1a,ga,v2a,use_nonan=True): """ set desired line properties """ v1a = np.real(v1a) ga = np.real(ga) v2a = np.real(v2a) # remove nans for linewidth stuff later. ga_nonan = ga[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] v1a_nonan = v1a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] v2a_nonan = v2a[~np.isnan(ga)*(~np.isnan(v1a))*(~np.isnan(v2a))] if use_nonan: sol = np.zeros((len(ga_nonan),3)) sol[:,0] = v1a_nonan sol[:,1] = ga_nonan sol[:,2] = v2a_nonan else: sol = np.zeros((len(ga),3)) sol[:,0] = v1a sol[:,1] = ga sol[:,2] = v2a sol = np.transpose(sol) points = np.array([sol[0,:],sol[1,:],sol[2,:]]).T.reshape(-1,1,3) segs = np.concatenate([points[:-1],points[1:]],axis = 1) line3d = Line3DCollection(segs,linewidths=(1.+(v1a_nonan)/(.001+np.amax(v1a_nonan))*6.),colors='k') return line3d
de53fcb859c8c95b1b95a4ad2ffea102a090e94e
15,997
import os def _DevNull(): """On Windows, sometimes the inherited stdin handle from the parent process fails. Workaround this by passing null to stdin to the subprocesses commands. This function can be used to create the null file handler. """ return open(os.devnull, 'r')
dc815c172fd45dee4b0ed47cbd9497ce7e643972
15,998
import urllib import requests def get_job_priorities(rest_url): """This retrieves priorities of all active jobs""" url = urllib.parse.urljoin(rest_url, "/jobs/priorities") resp = requests.get(url) return resp.json()
020e825d531394798c041f32683bccfea19684c9
15,999