content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def plot_coarray(array, ax=None, show_location_errors=False): """Visualizes the difference coarray of the input array. Args: array (~doatools.model.arrays.ArrayDesign): A sensor array. ax (~matplotlib.axes.Axes): Matplotlib axes used for the plot. If not specified, a new figure will be created. Default value is ``None``. show_location_errors (bool): If set to ``True``, will visualized the perturbed array if the input array has location errors. Returns: The axes object containing the plot. """ return _plot_array_impl(array, ax, True, show_location_errors)
e4a0d1fe4ab48b5050c55d44bd4ca4342cc9f9a9
13,843
def pad_sequence(sequences, batch_first=False, padding_value=0.0): """Pad a list of variable-length Variables. This method stacks a list of variable-length :obj:`nnabla.Variable` s with the padding_value. :math:`T_i` is the length of the :math:`i`-th Variable in the sequences. :math:`B` is the batch size equal to the length of the sequences. :math:`T` is the max of :math:`T_i` for all :math:`i`. :math:`*` is the remaining dimensions including none. .. note:: This function **must** be used the dynamic computation mode. Example: .. code-block:: python import numpy as np import nnabla as nn import nnabla.functions as F import nnabla.utils.rnn as rnn_utils nn.set_auto_forward(True) l2v = lambda ldata: nn.Variable.from_numpy_array(np.asarray(ldata)) a = l2v([1, 1, 1, 1]) b = l2v([2, 2, 2]) c = l2v([2, 2, 2]) d = l2v([3, 3]) e = l2v([3, 3]) sequences = [a, b, c, d, e] padded_sequence = rnn_utils.pad_sequence(sequences) print(padded_sequence.d) Args: sequences (list of :obj:`nnabla.Variable`): Sequence of the variable of (:math:`T_i`, :math:`*`) shape. batch_first (bool): If False, output is of (:math:`T`, :math:`B`, :math:`*`) shape, otherwise (:math:`B`, :math:`T`, :math:`*`). padding_value (float): Padding value. Returns: :obj:`nnabla.Variable` of (:math:`T`, :math:`B`, :math:`*`) or (:math:`B`, :math:`T`, :math:`*`) shape """ B = len(sequences) T = max([e.shape[0] for e in sequences]) shape0 = (B, T) if batch_first else (T, B) shape1 = sequences[0].shape[1:] padded_sequence = F.constant(padding_value, shape0 + shape1) for b, s in enumerate(sequences): l = s.shape[0] if batch_first: padded_sequence[b, :l, ...] = s else: padded_sequence[:l, b, ...] = s return padded_sequence
449c7681d39edc0494269aefd488aa44548a68df
13,845
import urllib import yaml import requests def _fetch_global_config(config_url, github_release_url, gh_token): """ Fetch the index_runner_spec configuration file from the Github release using either the direct URL to the file or by querying the repo's release info using the GITHUB API. """ if config_url: print('Fetching config from the direct url') # Fetch the config directly from config_url with urllib.request.urlopen(config_url) as res: # nosec return yaml.safe_load(res) # type: ignore else: print('Fetching config from the release info') # Fetch the config url from the release info if gh_token: headers = {'Authorization': f'token {gh_token}'} else: headers = {} tries = 0 # Sometimes Github returns usage errors and a retry will solve it while True: release_info = requests.get(github_release_url, headers=headers).json() if release_info.get('assets'): break if tries == _FETCH_CONFIG_RETRIES: raise RuntimeError(f"Cannot fetch config from {github_release_url}: {release_info}") tries += 1 for asset in release_info['assets']: if asset['name'] == 'config.yaml': download_url = asset['browser_download_url'] with urllib.request.urlopen(download_url) as res: # nosec return yaml.safe_load(res) raise RuntimeError("Unable to load the config.yaml file from index_runner_spec")
c436bfb7692ce0d100367691588d511ed95bce99
13,846
def parse_color(c, desc): """Check that a given value is a color.""" return c
ebabefbd56de120a753723f1dccb0f7c12af2fe6
13,847
def __virtual__(): """Only load gnocchiv1 if requirements are available.""" if REQUIREMENTS_MET: return 'gnocchiv1' else: return False, ("The gnocchiv1 execution module cannot be loaded: " "os_client_config or keystoneauth are unavailable.")
5dc2a83ba6a93a37f037978bfe89edf6ec2fe103
13,849
def setup(): """Start headless Chrome in docker container.""" options = webdriver.ChromeOptions() options.add_argument('--no-sandbox') options.add_argument('--headless') options.add_argument('--disable-gpu') driver = webdriver.Chrome(options=options) driver.implicitly_wait(5) return driver
79c135732b39513f270ac0f670ffddc89b576f75
13,850
import json def lambdaResponse(statusCode, body, headers={}, isBase64Encoded=False): """ A utility to wrap the lambda function call returns with the right status code, body, and switches. """ # Make sure the body is a json object if not isinstance(body, str): body = json.dumps(body) # Make sure the content type is json header = headers header["Content-Type"] = "application/json" header["Access-Control-Allow-Origin"]= "*" response = { "isBase64Encoded": isBase64Encoded, "statusCode": statusCode, "headers": header, "body": body } return response
0159ba871c38ce550752d47ffea536c33a5d6b3e
13,851
def singleton(class_): """ Specify that a class is a singleton :param class_: :return: """ instances = {} def getinstance(*args, **kwargs): if class_ not in instances: instances[class_] = class_(*args, **kwargs) return instances[class_] return getinstance
678205d133783f6b0720876546deed9ed7c59d72
13,852
from typing import Optional from typing import Union from typing import List from datetime import datetime def is_datetime( value: Scalar, formats: Optional[Union[str, List[str]]] = None, typecast: Optional[bool] = True ) -> bool: """Test if a given string value can be converted into a datetime object for a given data format. The function accepts a single date format or a list of formates. If no format is given, ISO format is assumed as the default. Parameters ---------- value: scalar Scalar value that is tested for being a date. formats: string or list(string) Date format string using Python strptime() format directives. This can be a list of date formats. typecast: bool, default=True Attempt to parse string values as dates if True. Returns ------- bool """ if isinstance(value, datetime): return True elif not typecast or not isinstance(value, str): return False # Try to convert the given string to a datatime object with the format # that was specified at object instantiation. This will raise an # exception if the value does not match the datetime format string. # Duplicate code depending on whether format is a list of a string. if formats is None: # Issue \#39: dateutil.parse (falsely?) identifies the following # strings as dates. For column profiling we want to exclude these: # 14A; 271 1/2; 41-05; 6-8 # # As a work-around for now we expect a valid date to have at least six # characters (one for day, month, two for year and at least two # non-alphanumeric characters. # # An alternative solution was pointed out by @remram44: # https://gitlab.com/ViDA-NYU/datamart/datamart/-/blob/39462a5dca533a1e55596ddcbfc0ac7e98dce4de/lib_profiler/datamart_profiler/temporal.py#L63 # noqa: E501 # # All solutions seem to suffer from the problem that values like # 152-12 are valid dates (e.g., 152-12-01 in this case) but also # valid house numbers, for example. There is no good solution here. # For now we go with the assumption that if someone wants to specify # a date it should have at least a day, month and year separated by # some special (non-alphanumeric) charcater. if len(value) >= 6 and has_two_spec_chars(value): try: parse(value, fuzzy=False) return True except (OverflowError, TypeError, ValueError): pass else: return to_datetime_format(value=value, formats=formats) is not None return False
642fbe509c7b13a905dc4c65b43dcec20f36fb7e
13,853
def sortkey(d): """Split d on "_", reverse and return as a tuple.""" parts=d.split("_") parts.reverse() return tuple(parts)
1d8f8864a3d0bfd7dae8711bca183317e0f3fc0e
13,854
def resolve_stream_name(streams, stream_name): """Returns the real stream name of a synonym.""" if stream_name in STREAM_SYNONYMS and stream_name in streams: for name, stream in streams.items(): if stream is streams[stream_name] and name not in STREAM_SYNONYMS: return name return stream_name
48fe2f5eca72b30bd669477807c9b7476eb4ef18
13,855
def get_split_cifar100_tasks(num_tasks, batch_size,run,paradigm,dataset): """ Returns data loaders for all tasks of split CIFAR-100 :param num_tasks: :param batch_size: :return: datasets = {} # convention: tasks starts from 1 not 0 ! # task_id = 1 (i.e., first task) => start_class = 0, end_class = 4 cifar_transforms = torchvision.transforms.Compose([torchvision.transforms.ToTensor(),]) cifar_train = torchvision.datasets.CIFAR100('./data/', train=True, download=True, transform=cifar_transforms) cifar_test = torchvision.datasets.CIFAR100('./data/', train=False, download=True, transform=cifar_transforms) for task_id in range(1, num_tasks+1): train_loader, test_loader = get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} return datasets """ """ datasets = {} paradigm = 'class_iid' run = 0 dataset = core50( paradigm, run) for task_id in range(0, num_tasks): train_loader, val, test_loader = dataset.getNextClasses(task_id) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} return datasets """ datasets = {} #paradigm = 'class_iid' #run = 0 #dataset = load_datasets( paradigm, run) if dataset == 'core50': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_core50(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} if dataset == 'toybox': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_toybox(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} if dataset == 'ilab': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_ilab(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} if dataset == 'cifar100': for task_id in range(0, num_tasks): train_loader, test_loader = dataset_cifar100(task_id,batch_size,run,paradigm,dataset) #get_split_cifar100(task_id, batch_size, cifar_train, cifar_test) datasets[task_id] = {'train': train_loader, 'test': test_loader} return datasets
003c74a55a4e9a1f645a6bc930abf65342abd0fc
13,856
def is_point_in_triangle(pt, v1, v2, v3): """Returns True if the 2D point pt is within the triangle defined by v1-3. https://www.gamedev.net/forums/topic/295943-is-this-a-better-point-in-triangle-test-2d/ """ b1 = sign(pt, v1, v2) < 0.0 b2 = sign(pt, v2, v3) < 0.0 b3 = sign(pt, v3, v1) < 0.0 return ((b1 == b2) and (b2 == b3))
2ff58dfb4efe939513cc901772aa744296ebb960
13,857
def precomputed_aug_experiment( clf, auged_featurized_x_train, auged_featurized_y_train, auged_featurized_x_train_to_source_idxs, auged_featurized_x_test, auged_featurized_y_test, auged_featurized_x_test_to_source_idxs, aug_iter, train_idxs_scores, n_aug_sample_points, update_scores=False, weight_aug_samples=False, use_loss=False, stratified_sampling_x_train_ks=None, ): """ This is a precomputed version of the aug_experiment. Here, we expect training sets to be augmented and featurized up front. This function will index into the augmented set (with featurization) to get the input that would be fed into the classifier. @param clf The classifier to use (e.g., logistic regression) @param auged_featurized_x_train The augmented and featurized training set. @param auged_featurized_y_train The labels of the training set. @param auged_featurized_x_train_to_source_idxs A list of idxs corresponding to the source of augmented images from the original training set. -1 means that the point is an original point. @param auged_featurized_x_test The augmented and featurized test set. @param auged_featurized_y_test The labels of the test set. @param auged_featurized_x_test_to_source_idxs A list of idxs corresponding to the source of augmented images from the original test set. -1 means that the point is an original point. @param aug_iter The policy to use. @param train_idxs_scores The scores to use for the policies (e.g., LOO influence or loss). @param stratified_sampling_x_train_ks The population type of each train sample for stratified sampling. Sampling is round robin in numeric order. @return An list of accuracies on the test set and a list of the points that were chosen for augmentation. """ influence_acc = [] aug_iter_idxs = [] original_mask_train = auged_featurized_x_train_to_source_idxs < 0 original_x_train = auged_featurized_x_train[original_mask_train] original_y_train = auged_featurized_y_train[original_mask_train] auged_x_train = np.copy(original_x_train) auged_y_train = np.copy(original_y_train) n_aug_sample_points = set(n_aug_sample_points) if weight_aug_samples: sample_weight = np.ones(len(original_x_train)) else: sample_weight = None if stratified_sampling_x_train_ks is not None: aug_idxs = stratified_sampling_to_aug_idxs( train_idxs_scores, aug_iter, stratified_sampling_x_train_ks, ) else: aug_idxs = np.array(list(aug_iter(train_idxs_scores))).flatten() assert len(np.unique(aug_idxs)) == len(aug_idxs) already_auged = set() while len(already_auged) < len(original_x_train): assert len(train_idxs_scores) == len(original_x_train) next_idxs = [idx for idx in aug_idxs if idx not in already_auged] idx = next_idxs[0] already_auged.add(idx) aug_mask = auged_featurized_x_train_to_source_idxs == idx x_aug_ = auged_featurized_x_train[aug_mask] auged_x_train = np.concatenate( [ auged_x_train, x_aug_, ], axis=0) y_aug_ = auged_featurized_y_train[aug_mask] auged_y_train = np.concatenate( [ auged_y_train, y_aug_, ], axis=0) if weight_aug_samples: # We downweight all points from the original train point rescale_weight = 1.0 / (len(x_aug_) + 1) weight_aug_ = np.full(len(x_aug_), rescale_weight) sample_weight = np.concatenate([ sample_weight, weight_aug_, ], axis=0) sample_weight[idx] = rescale_weight if len(already_auged) in n_aug_sample_points: fit_params = {"logistic_reg__sample_weight": sample_weight} clf.fit(auged_x_train, auged_y_train, **fit_params) aug_train_poisoned_acc = clf.score( auged_featurized_x_test, auged_featurized_y_test) influence_acc.append(aug_train_poisoned_acc) aug_iter_idxs.append(idx) if update_scores: if isinstance(clf, sklearn.model_selection.GridSearchCV): if use_loss: train_idxs_scores = (clf .best_estimator_ .named_steps["logistic_reg"] .log_losses(L2_alpha=0.0)) else: train_idxs_scores = (clf .best_estimator_ .named_steps["logistic_reg"] .LOO_influence()) else: if use_loss: train_idxs_scores = (clf .named_steps["logistic_reg"] .log_losses(L2_alpha=0.0)) else: train_idxs_scores = (clf .named_steps["logistic_reg"] .LOO_influence()) train_idxs_scores = train_idxs_scores[:len(original_x_train)] if stratified_sampling_x_train_ks is not None: aug_idxs = stratified_sampling_to_aug_idxs( train_idxs_scores, aug_iter, stratified_sampling_x_train_ks, ) else: aug_idxs = np.array( list(aug_iter(train_idxs_scores)) ).flatten() return influence_acc, aug_iter_idxs
50f03f08c7ce0777658ca3f84691b940f190e4cd
13,858
def get_yahoo_data(symbol, start_date, end_date): """Returns pricing data for a YAHOO stock symbol. Parameters ---------- symbol : str Symbol of the stock in the Yahoo. You can refer to this link: https://www.nasdaq.com/market-activity/stocks/screener?exchange=nasdaq. start_date : str Starting date (YYYY-MM-DD) of the period that you want to get data on end_date : str Ending date (YYYY-MM-DD) of the period you want to get data on Returns ------- pandas.DataFrame Stock data (in OHLCAV format) for the specified company and date range """ df = yf.download(symbol, start=start_date, end=end_date) df = df.reset_index() rename_dict = { "Date": "dt", "Open": "open", "High": "high", "Low": "low", "Close": "close", "Adj Close": "adj_close", "Volume": "volume", } rename_list = ["dt", "open", "high", "low", "close", "adj_close", "volume"] df = df.rename(columns=rename_dict)[rename_list].drop_duplicates() df["dt"] = pd.to_datetime(df.dt) return df.set_index("dt")
adc2a6186d96c76a75a62391c7f8d7534836f5bd
13,859
def first_n(m: dict, n: int): """Return first n items of dict""" return {k: m[k] for k in list(m.keys())[:n]}
57ccc9f8913c60c592b38211900fe8d28feffb4c
13,860
from typing import List from typing import Dict from typing import Union def listdictnp_combine( lst: List, method: str = "concatenate", axis: int = 0, keep_nested: bool = False, allow_error: bool = False, ) -> Dict[str, Union[np.ndarray, List]]: """Concatenate or stack a list of dictionaries contains numpys along with error handling Parameters ---------- lst : list list of dicts containings np arrays method : str 'concatenate' or 'stack' axis : int axis to concat or stack over keep_nested : bool keep nested structure of list or not allow_error : bool allow for error handling. If op does not succes, list is provided Returns ------- np.array OR list of np.array in case of error """ for k in range(len(lst)): assert ( lst[0].keys() == lst[k].keys() ), "Dict keys do not match in listdictnp_combine fct" # get keys keys = lst[0].keys() output_dict = dict() for key in keys: # merge nested list if keep_nested: tmp = [None] * len(lst) for k in range(len(lst)): tmp[k] = lst[k][key] else: tmp = list() for k in range(len(lst)): tmp = [*tmp, *lst[k][key]] # convert to numpy if possible output_dict[key] = listnp_combine( tmp, method=method, axis=axis, allow_error=allow_error ) return output_dict
b4527342c8a3b90c797e7ef88326c97b4933d1b0
13,861
def find_pure_symbol(symbols, clauses): """Find a symbol and its value if it appears only as a positive literal (or only as a negative) in clauses. >>> find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A]) (A, True) """ for s in symbols: found_pos, found_neg = False, False for c in clauses: if not found_pos and s in disjuncts(c): found_pos = True if not found_neg and ~s in disjuncts(c): found_neg = True if found_pos != found_neg: return s, found_pos return None, None
657c011fb0ee865252e7deed4672cde08c6db2e9
13,862
def cross_entropy_emphasized_loss(labels, predictions, corrupted_inds, axis=0, alpha=0.3, beta=0.7, regularizer=None): """ Compute cross entropy loss over training examples that have been corrupted along certain dimensions :param labels: tensor of training example with no corruption added :param predictions: output tensor of autoencoder :param corrupted_inds: indices of corrupted dimensions (if any) :param axis: axis along which components are taken :param alpha: weight for error on components that were corrupted :param beta: weight for error on components that were not corrupted :return: cross entropy loss, emphasized by corrupted component weight """ assert (labels.shape[axis] == predictions.shape[axis]) assert (labels.dtype == predictions.dtype) num_elems = labels.shape[axis].value * FLAGS.batch_size # corrupted features x_c = tf.boolean_mask(labels, corrupted_inds) z_c = tf.boolean_mask(predictions, corrupted_inds) # uncorrupted features x = tf.boolean_mask(labels, ~corrupted_inds) z = tf.boolean_mask(predictions, ~corrupted_inds) # if training on examples with corrupted indices if x_c is not None: lhs = alpha * (-tf.reduce_sum(tf.add(tf.multiply(x_c, tf.log(z_c)), tf.multiply(1.0 - x_c, tf.log(1.0 - z_c))))) rhs = beta * (-tf.reduce_sum(tf.add(tf.multiply(x, tf.log(z)), tf.multiply(1.0 - x, tf.log(1.0 - z))))) else: lhs = 0 rhs = -tf.reduce_sum(tf.add(tf.multiply(labels, tf.log(predictions)), tf.multiply(1.0 - labels, tf.log(1.0 - predictions)))) return tf.add(lhs, rhs) / num_elems
e4ebb4e3198dea085789c81388522130ed867e3f
13,863
def get_process_list(node: Node): """Analyse the process description and return the Actinia process chain and the name of the processing result :param node: The process node :return: (output_objects, actinia_process_list) """ input_objects, process_list = check_node_parents(node=node) output_objects = [] # First analyse the data entry if "id" not in node.arguments: raise Exception("Process %s requires parameter <id>" % PROCESS_NAME) input_object = DataObject.from_string(node.arguments["id"]) spatial_extent = None if "spatial_extent" in node.arguments: spatial_extent = node.arguments["spatial_extent"] temporal_extent = None if "temporal_extent" in node.arguments: temporal_extent = node.arguments["temporal_extent"] bands = None if "bands" in node.arguments: bands = node.arguments["bands"] if input_object.is_strds() and \ (temporal_extent is not None or bands is not None): output_object = DataObject( name=create_output_name(input_object.name, PROCESS_NAME), datatype=input_object.datatype) else: output_object = input_object output_objects.append(output_object) node.add_output(output_object) pc = create_process_chain_entry(input_object, spatial_extent, temporal_extent, bands, output_object) process_list.extend(pc) return output_objects, process_list
00f5e6c767975def09fbea800a8b74cfcd12f935
13,864
def _validate_image_formation(the_sicd): """ Validate the image formation. Parameters ---------- the_sicd : sarpy.io.complex.sicd_elements.SICD.SICDType Returns ------- bool """ if the_sicd.ImageFormation is None: the_sicd.log_validity_error( 'ImageFormation attribute is not populated, and ImageFormType is {}. This ' 'cannot be valid.'.format(the_sicd.ImageFormType)) return False # nothing more to be done. alg_types = [] for alg in ['RgAzComp', 'PFA', 'RMA']: if getattr(the_sicd, alg) is not None: alg_types.append(alg) if len(alg_types) > 1: the_sicd.log_validity_error( 'ImageFormation.ImageFormAlgo is set as {}, and multiple SICD image formation parameters {} are set.\n' 'Only one image formation algorithm should be set, and ImageFormation.ImageFormAlgo ' 'should match.'.format(the_sicd.ImageFormation.ImageFormAlgo, alg_types)) return False elif len(alg_types) == 0: if the_sicd.ImageFormation.ImageFormAlgo is None: the_sicd.log_validity_warning( 'ImageFormation.ImageFormAlgo is not set, and there is no corresponding\n' 'RgAzComp, PFA, or RMA SICD parameters set. Setting ImageFormAlgo ' 'to "OTHER".'.format(the_sicd.ImageFormation.ImageFormAlgo)) the_sicd.ImageFormation.ImageFormAlgo = 'OTHER' return True elif the_sicd.ImageFormation.ImageFormAlgo != 'OTHER': the_sicd.log_validity_error( 'No RgAzComp, PFA, or RMA SICD parameters populated, but ImageFormation.ImageFormAlgo ' 'is set as {}.'.format(the_sicd.ImageFormation.ImageFormAlgo)) return False return True # there is exactly one algorithm type populated return _validate_image_form_parameters(the_sicd, alg_types[0])
b68c9a767e2499b8149389e2e207a0f05d20bf44
13,865
def handle_closet(player, level, reward_list): """ Handle a closet :param player: The player object for the player :param level: The level that the player is on :return reward: The reward given to the player """ # Print the dialogue for the closet print "You found a closet. It appears to be unlocked." print "Should you open it?" # Get the players move player_move = handle_options(player, ["Open the Closet!", "No! Its a trap!"]) reward = None if player_move == 1: # Decide what happens when the person opens the closet closet_outcome = randint(0, 5) if closet_outcome < level: # There is a rat inside the closet print "OH NO! There is a giant man eating rat in there!" handle_fight(player, 3, 10) else: # You get a helpful reward from the closet reward = reward_list[randint(0, len(reward_list)-1)] print "Congratulations! You found a " + reward + "!" print "This item increases your damage points by", 2 * level player.add_damage_points(2*level) return reward
ab170cb556fd688edeac80eac9bb7577df771a33
13,866
def module_path_to_test_path(module): """Convert a module locator to a proper test filename. """ return "test_%s.py" % module_path_to_name(module)
17997d17d64686deec97d4aa9f23a14f04ff5516
13,867
def inspect_bom(filename): """Inspect file for bom.""" encoding = None try: with open(filename, "rb") as f: encoding = has_bom(f.read(4)) except Exception: # pragma: no cover # print(traceback.format_exc()) pass return encoding
84da40bc941053c4e6d18934c27b3e1d63318762
13,868
from packaging.specifiers import SpecifierSet def parse_requirement(text): """ Parse a requirement such as 'foo>=1.0'. Returns a (name, specifier) named tuple. """ match = REQUIREMENT_RE.match(text) if not match: raise ValueError("Invalid requirement: %s" % text) name = match.group('name').strip() spec = SpecifierSet(match.group('specifier') or '') return Requirement(name, spec)
95dab6f3dd6784bf73233e80cfb946f904984a1d
13,869
def H_split(k, N, eps): """Entropy of the split in binary search including overlap, specified by eps""" return (k / N) * (np.log(k) + H_epsilon(k, eps)) + ((N - k) / N) * (np.log(N - k) + H_epsilon(N - k, eps))
555d5e56550851084fdfa148dc7936c75649a197
13,870
def date_features(inputs, features_slice, columns_index) -> tf.Tensor: """Return an input and output date tensors from the features tensor.""" date = features(inputs, features_slice, columns_index) date = tf.cast(date, tf.int32) date = tf.strings.as_string(date) return tf.strings.reduce_join(date, separator="-", axis=-1, keepdims=True)
3362019b24a6f3104d858d2ddf17f0fae4060d7b
13,871
import pickle def save_calib(filename, calib_params): """ Saves calibration parameters as '.pkl' file. Parameters ---------- filename : str Path to save file, must be '.pkl' extension calib_params : dict Calibration parameters to save Returns ------- saved : bool Saved successfully. """ if type(calib_params) != dict: raise TypeError("calib_params must be 'dict'") output = open(filename, 'wb') try: pickle.dump(calib_params, output) except: raise IOError("filename must be '.pkl' extension") output.close() saved = True return saved
6735c8a6e96158b9fc580b6e61609b5ae7733fe0
13,872
def context_to_dict(context): """convert a django context to a dict""" the_dict = {} for elt in context: the_dict.update(dict(elt)) return the_dict
b319c6be4efa83c91eefa249c8be90824bc0158f
13,873
def returnItemsWithMinSupport(itemSet, transactionList, minSupport, freqSet): """calculates the support for items in the itemSet and returns a subset of the itemSet each of whose elements satisfies the minimum support""" _itemSet = set() localSet = defaultdict(int) for item in itemSet: for transaction in transactionList: if item.issubset(transaction): freqSet[item] += 1 localSet[item] += 1 for item, count in list(localSet.items()): support = float(count)/len(transactionList) if support >= minSupport: _itemSet.add(item) return _itemSet
e1290778548f198f87fc210c8a78bbfadaf0de9f
13,874
def create_P(P_δ, P_ζ, P_ι): """ Combine `P_δ`, `P_ζ` and `P_ι` into a single matrix. Parameters ---------- P_δ : ndarray(float, ndim=1) Probability distribution over the values of δ. P_ζ : ndarray(float, ndim=2) Markov transition matrix for ζ. P_ι : ndarray(float, ndim=1) Probability distribution over the values of ι. Returns ---------- P : ndarray(float, ndim=3) Joint probability distribution over the values of δ, ζ and ι. Probabilities vary by δ on the first axis, by ζ on the second axis, and by ι on the third axis. """ P = \ P_δ[:, None, None, None] * P_ζ[None, :, :, None] * \ P_ι[None, None, None, :] return P
0afdef50c50563421bb7c6f3f928fa6b3e5f4733
13,875
import attrs def sel_nearest( dset, lons, lats, tolerance=2.0, unique=False, exact=False, dset_lons=None, dset_lats=None, ): """Select sites from nearest distance. Args: dset (Dataset): Stations SpecDataset to select from. lons (array): Longitude of sites to interpolate spectra at. lats (array): Latitude of sites to interpolate spectra at. tolerance (float): Maximum distance to use site for interpolation. unique (bool): Only returns unique sites in case of repeated inexact matches. exact (bool): Require exact matches. dset_lons (array): Longitude of stations in dset. dset_lats (array): Latitude of stations in dset. Returns: Selected SpecDataset at locations defined by (lons, lats). Note: Args `dset_lons`, `dset_lats` are not required but can improve performance when `dset` is chunked with site=1 (expensive to access station coordinates) and improve precision if projected coordinates are provided at high latitudes. """ coords = Coordinates(dset, lons=lons, lats=lats, dset_lons=dset_lons, dset_lats=dset_lats) station_ids = [] for lon, lat in zip(coords.lons, coords.lats): closest_id, closest_dist = coords.nearest(lon, lat) if closest_dist > tolerance: raise AssertionError( f"Nearest site from (lat={lat}, lon={lon}) is {closest_dist:g} " f"deg away but tolerance is {tolerance:g} deg." ) if exact and closest_dist > 0: raise AssertionError( f"Exact match required but no site at (lat={lat}, lon={lon}), " f"nearest site is {closest_dist} deg away." ) station_ids.append(closest_id) if unique: station_ids = list(set(station_ids)) dsout = dset.isel(**{attrs.SITENAME: station_ids}) # Return longitudes in the convention provided if coords.consistent is False: dsout.lon.values = coords._swap_longitude_convention(dsout.lon.values) dsout = dsout.assign_coords({attrs.SITENAME: np.arange(len(station_ids))}) return dsout
ebf22cdeb30215a76312f2cdd8223a2d24bf6af6
13,876
import datasets def evaluate(dataset, predictions, gts, output_folder): """evaluate dataset using different methods based on dataset type. Args: dataset: Dataset object predictions(dict): each item in the list represents the prediction results for one image. gt(dict): Ground truth for each batch output_folder: output folder, to save evaluation files or results. Returns: evaluation result """ args = dict( predictions=predictions, gts=gts, output_folder=output_folder, ) if isinstance(dataset, datasets.MNIST): return do_mnist_evaluation(**args) elif isinstance(dataset, datasets.MWPose): return do_mwpose_evaluation(dataset=dataset, **args) elif isinstance(dataset, datasets.ModelNetHdf): return do_modelnet_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
85c0232c53de091f2293d042b944fe8768a9ac91
13,877
from vlescrapertools import getAuthedSession def html_xml_save( s=None, possible_sc_link=None, table="htmlxml", course_presentation=None ): """Save the HTML and XML for a VLE page page.""" if not possible_sc_link: # should really raise error here print("need a link") if not s: if "learn2.open.ac.uk" in possible_sc_link: s = getAuthedSession() else: s = possible_sc_link typ, html_page_url, rawxml, html_src = get_sc_page(possible_sc_link, s) if typ: dbrowdict = { "possible_sc_link": possible_sc_link, "doctype": typ, "html_url": html_page_url, "xml": rawxml, "html_src": html_src, "course_presentation": course_presentation, "courseCode": "", "courseTitle": "", "itemTitle": "", } else: dbrowdict = {} # Get some metadata from the XML # Item/CourseCode # Item/CourseTitle # Item/ItemTitle if typ == "XML": root = etree.fromstring(rawxml.encode("utf-8")) # If the course code is contaminated by a presentation suffix, get rid of the presentation code dbrowdict["courseCode"] = flatten(root.find("CourseCode")).split("-")[0] dbrowdict["courseTitle"] = flatten(root.find("CourseTitle")) dbrowdict["itemTitle"] = flatten(root.find("ItemTitle")) if dbrowdict: DB[table].insert(dbrowdict) return typ, html_page_url, rawxml, html_src
37bb86769c86d851e3fec8dabc17534dfdecde60
13,878
def htmr(t,axis="z"): """ Calculate the homogeneous transformation matrix of a rotation respect to x,y or z axis. """ from sympy import sin,cos,tan if axis in ("z","Z",3): M = Matrix([[cos(t),-sin(t),0,0], [sin(t),cos(t),0,0], [0,0,1,0], [0,0,0,1]]) elif axis in ("y","Y",2): M = Matrix([[cos(t),0,sin(t),0], [0,1,0,0], [-sin(t),0,cos(t),0], [0,0,0,1]]) elif axis in ("x","X",1): M = Matrix([[1,0,0,0], [0,cos(t),-sin(t),0,], [0,sin(t),cos(t),0], [0,0,0,1]]) else: return eye(4) return M
b3941680f22b2eb48da15b2bb1a6e39c05e3b5c3
13,880
def vt(n, gm, gsd, dmin=None, dmax=10.): """Evaluate the total volume of the particles between two diameters. The CDF of the lognormal distribution is calculated using equation 8.12 from Seinfeld and Pandis. Mathematically, it is represented as: .. math:: V_t=\\frac{π}{6}∫_{-∞}^{∞}D_p^3n_N^e(ln D_p)d lnD_p \\;\\;(\mu m^3 cm^{-3}) Parameters ---------- n : float Total aerosol number concentration in units of #/cc gm : float Median particle diameter (geometric mean) in units of :math:`\mu m`. gsd : float Geometric Standard Deviation of the distribution. dmin : float The minimum particle diameter in microns. Default value is 0 :math:`\mu m`. dmax : float The maximum particle diameter in microns. Default value is 10 :math:`\mu m`. Returns ------- Volume | float Returns the total volume of particles between :math:`D_{min}` and :math:`D_{max}` in units of :math:`\mu m^3 cm^{-3}` See Also -------- opcsim.equations.pdf.dv_ddp opcsim.equations.pdf.dv_dlndp opcsim.equations.pdf.dv_dlogdp Examples -------- Integrate a sample distribution between 0 and 2.5 microns: >>> d = opcsim.AerosolDistribution() >>> d.add_mode(1e3, 100, 1.5, "mode 1") >>> n = opcsim.equations.cdf.vt(1e3, 0.1, 1.5, dmax=2.5) """ res = (np.pi/12.)*n*(gm**3) * np.exp(9./2.*(np.log(gsd)**2)) * \ erfc((1.5*np.sqrt(2) * np.log(gsd)) - (np.log(dmax/gm) / (np.sqrt(2) * np.log(gsd)))) if dmin is not None and dmin > 0.0: res -= vt(n, gm, gsd, dmin=None, dmax=dmin) return res
ba407dc86bbf3201bd597f729f2397ef9428e72b
13,881
def core_value_encode(origin): """ 转换utf-8编码为社会主义核心价值观编码 :param origin: :return: """ hex_str = str2hex(origin) twelve = hex2twelve(hex_str) core_value_iter = twelve_2_core_value(twelve) return ''.join(core_value_iter)
7b81540f7e7184ec60fb6820e3548201d67eec29
13,882
def user_query_ahjs_is_ahj_official_of(self, request, queryset): """ Admin action for the User model. Redirects the admin to a change list of AHJs the selected users are AHJ officials of. """ model_name = 'ahj' field_key_pairs = [field_key_pair('AHJPK', 'AHJPK')] queryset = AHJUserMaintains.objects.filter(UserID__in=queryset, MaintainerStatus=True) return load_change_list_with_queryset(request, queryset, model_name, field_key_pairs)
4d97f25f2647a92a9690bf3360bd3fd63b03d631
13,883
def get_cache_node_count( cluster_id: str, configuration: Configuration = None, secrets: Secrets = None ) -> int: """Returns the number of cache nodes associated to the cluster :param cluster_id: str: the name of the cache cluster :param configuration: Configuration :param secrets: Secrets :example: { "type": "probe", "name": "validate cache node count", "tolerance": 3, "provider": { "type": "python", "module": "chaosaws.elasticache.probes", "func": "get_cache_node_count", "arguments": { "cluster_id": "MyTestCluster" } } } """ response = describe_cache_cluster( cluster_id, configuration=configuration, secrets=secrets ) return response["CacheClusters"][0].get("NumCacheNodes", 0)
e4a4b3cd6d0bf7416ffe5a3d86725a614ad1c41c
13,884
import string def top_sentences(query, sentences, idfs, n): """ Given a `query` (a set of words), `sentences` (a dictionary mapping sentences to a list of their words), and `idfs` (a dictionary mapping words to their IDF values), return a list of the `n` top sentences that match the query, ranked according to idf. If there are ties, preference should be given to sentences that have a higher query term density. """ # Process query. query = set( [ word.lower() for word in query if word not in string.punctuation and word not in nltk.corpus.stopwords.words("english") ] ) # Create a list tuples (sentence, sum_idfs, qt_density) to sort the sentences. results = [] for sentence, words in sentences.items(): # Determine the total sum of IDF values and query term density for each # sentence. sum_idfs = 0 for word in query: if word in words: sum_idfs += idfs[word] qt_density = sum(words.count(word) for word in query) / len(words) results.append((sentence, sum_idfs, qt_density)) # Sort sentences by their total sum of IDF values and query term density. ranked_sentences = [ sentence for sentence, sum_idfs, qt_density in sorted( results, key=itemgetter(1, 2), reverse=True ) ] # Return the 'n' top sentences. return ranked_sentences[:n]
5533b96848baea5afa614e691d2d2ae07c4a16a9
13,885
def load_distribution(label): """Load sample distributions as described by Seinfeld+Pandis Table 8.3. There are currently 7 options including: Urban, Marine, Rural, Remote continental, Free troposphere, Polar, and Desert. Parameters ---------- label : {'Urban' | 'Marine' | 'Rural' | 'Remote Continental' | 'Free Troposphere' | 'Polar' | 'Desert'} Choose which sample distribution to load. Returns ------- An instance of the AerosolDistribution class Examples -------- >>> d = opcsim.load_distribution("Urban") """ label = label.lower() if label not in DISTRIBUTION_DATA.keys(): raise ValueError("Invalid label.") _tmp = AerosolDistribution(label) for each in DISTRIBUTION_DATA[label]: _tmp.add_mode(each[0], each[1], 10**each[2], each[3]) return _tmp
3dfd2fea5c165c331255e3b350e1f92a37919726
13,886
from typing import List def split_4d_itk(img_itk: sitk.Image) -> List[sitk.Image]: """ Helper function to split 4d itk images into multiple 3 images Args: img_itk: 4D input image Returns: List[sitk.Image]: 3d output images """ img_npy = sitk.GetArrayFromImage(img_itk) spacing = img_itk.GetSpacing() origin = img_itk.GetOrigin() direction = np.array(img_itk.GetDirection()).reshape(4, 4) spacing = tuple(list(spacing[:-1])) assert len(spacing) == 3 origin = tuple(list(origin[:-1])) assert len(origin) == 3 direction = tuple(direction[:-1, :-1].reshape(-1)) assert len(direction) == 9 images_new = [] for i, t in enumerate(range(img_npy.shape[0])): img = img_npy[t] images_new.append( create_itk_image_spatial_props(img, spacing, origin, direction)) return images_new
21ad4f6c0cbdb05cf6f67469e3d32e732d1500ee
13,887
from bs4 import BeautifulSoup def parse_results(html, keyword): """[summary] Arguments: html {str} -- google search engine html response keyword {str} -- search term Returns: pandas.DataFrame -- Dataframe with the following columns ['keyword', 'rank', 'title', 'link', 'domain'] """ soup = BeautifulSoup(html, 'html.parser') found_results = [] rank = 1 result_block = soup.find_all('div', attrs={'class': 'g'}) for result in result_block: link = result.find('a', href=True) title = result.find('h3') # description = result.find('span', attrs={'class': 'st'}) if link and title: link = link['href'] title = title.get_text() # if description: # description = description.get_text() if link != '#': domain = DOMAIN_RE.findall(link)[0] found_results.append( {'keyword': keyword, 'rank': rank, 'title': title, 'link': link, 'domain': domain}) rank += 1 return pd.DataFrame(found_results, columns=['keyword', 'rank', 'title', 'link', 'domain'])
4c89e919b3f3285565efe5bdf5c4ec5b87664c79
13,888
def maybe_iter_configs_with_path(x, with_params=False): """ Like x.maybe_iter_configs_with_path(), but returns [(x, [{}])] or [(x, {}, [{}])] if x is just a config object and not a Tuner object. """ if is_tuner(x): return x.iter_configs_with_path(with_params=with_params) else: if with_params: return [(deepcopy(x), {}, [{}])] else: return [(deepcopy(x), {})]
947a62067f3eacb4d5c8ba419d8018ad2ab3320c
13,889
import typing def median(vals: typing.List[float]) -> float: """Calculate median value of `vals` Arguments: vals {typing.List[float]} -- list of values Returns: float -- median value """ index = int(len(vals) / 2) - 1 return sorted(vals)[index]
9f840d11409a570a718fdfe56d7a282af43bc798
13,890
def melody_mapper(notes): """ Makes a map of a melody to be played each item in the list 'notes' should be formatted using these chars: duration - length in seconds the sound will be played note - the note to play sleep - time in seconds to pause (note, duration) example: [('A4', 1), ('C3', 0.5)] :param notes: List of notes :return: list of melody map info """ m_map = {} num_of_notes = 1 for note_info in notes: note, duration, sleep = note_info m_map[str(num_of_notes)] = {'note': note, 'frequency': get_note(note)[1], 'duration': duration, 'sleep': sleep} num_of_notes += 1 return m_map
cf4c8f7864e91e771d3a70bfc4d8a7f4edb38967
13,891
def sample_bounding_box_scale_balanced_black(landmarks): """ Samples a bounding box for cropping so that the distribution of scales in the training data is uniform. """ bb_min = 0.9 bb_old = image.get_bounding_box(landmarks) bb_old_shape = np.array((bb_old[2] - bb_old[0], bb_old[3] - bb_old[1])) bb_old_size = np.max(bb_old_shape) margin = (1 - bb_min) / 2 bb_old_min = np.round([bb_old[0] + bb_old_shape[0] * margin, bb_old[1] + bb_old_shape[1] * margin, bb_old[2] - bb_old_shape[0] * margin, bb_old[3] - bb_old_shape[1] * margin]) scale = np.random.random_sample() * 0.94 + 0.08 bb_crop_size = int(round(bb_old_size / scale)) bb_crop_start_x = np.random.random_integers(low=bb_old_min[2] - bb_crop_size, high=bb_old_min[0] + 1) bb_crop_start_y = np.random.random_integers(low=bb_old_min[3] - bb_crop_size, high=bb_old_min[1] + 1) bb_crop_end_x = bb_crop_start_x + bb_crop_size bb_crop_end_y = bb_crop_start_y + bb_crop_size bb_crop = [bb_crop_start_x, bb_crop_start_y, bb_crop_end_x, bb_crop_end_y] return np.array(bb_crop)
789cbe92803b77614ab8a018434745b2d9bba3a4
13,892
def get_trainable_layers(layers): """Returns a list of layers that have weights.""" layers = [] # Loop through all layers for l in layers: # If layer is a wrapper, find inner trainable layer l = find_trainable_layer(l) # Include layer if it has weights if l.get_weights(): layers.append(l) return layers
2d3f00cb061a6c2ee7081468be564f0b8621441d
13,894
def outcome_from_application_return_code(return_code: int) -> outcome.Outcome: """Create either an :class:`outcome.Value` in the case of a 0 `return_code` or an :class:`outcome.Error` with a :class:`ReturnCodeError` otherwise. Args: return_code: The return code to be processed. Returns: The outcome wrapping the passed in return code. """ if return_code == 0: return outcome.Value(return_code) return outcome.Error(qtrio.ReturnCodeError(return_code))
c5b786906e0f3fd99ed6660c55213b18139003c0
13,895
import re def group_by_scale(labels): """ Utility that groups attribute labels by time scale """ groups = defaultdict(list) # Extract scales from labels (assumes that the scale is given by the last numeral in a label) for s in labels: m = re.findall("\d+", s) if m: groups[m[-1]].append(s) else: print("Bad attribute: ", s) return list(groups.values())
661ea03f8d463b1e0d5746df60e9e2cb969737ab
13,896
def FontMapper_GetEncodingDescription(*args, **kwargs): """FontMapper_GetEncodingDescription(int encoding) -> String""" return _gdi_.FontMapper_GetEncodingDescription(*args, **kwargs)
0f154eaa616c3b18bc8828f63137c26c75397d56
13,897
from typing import Counter def create_merged_ngram_dictionaries(indices, n): """Generate a single dictionary for the full batch. Args: indices: List of lists of indices. n: Degree of n-grams. Returns: Dictionary of hashed(n-gram tuples) to counts in the batch of indices. """ ngram_dicts = [] for ind in indices: ngrams = n_gram.find_all_ngrams(ind, n=n) ngram_counts = n_gram.construct_ngrams_dict(ngrams) ngram_dicts.append(ngram_counts) merged_gen_dict = Counter() for ngram_dict in ngram_dicts: merged_gen_dict += Counter(ngram_dict) return merged_gen_dict
bd313ea7eab835102e94f6c7d66fec8882531385
13,898
import base64 def compute_hash_base64(*fields): """bytes -> base64 string""" value = compute_hash(*fields) return base64.b64encode(value).decode()
b29b77b44a51417d63f8cae1970b5c1f4fb40317
13,899
def triplet_margin_loss( anchor, positive, negative, margin=0.1, p=2, use_cosine=False, swap=False, eps=1e-6, scope='', reduction=tf.losses.Reduction.SUM ): """ Computes the triplet margin loss Args: anchor: The tensor containing the anchor embeddings postiive: The tensor containg the positive embeddings negative: The tensor containg the negative embeddings The shapes of anchor, positive and negative must all be equal margin: The margin in the triplet loss p: The norm degree for pairwise distances Options: 1, 2 Default: 2 use_cosine: Should cosine distance be used? swap: Should we swap anchor and positive to get the harder negative? eps: A value used to prevent numerical instability reduction: The reduction method to use """ assert anchor.shape == positive.shape == negative.shape assert p in {1, 2} if use_cosine: def dist_fn(labels, preds): return tf.losses.cosine_distance( labels, preds, axis=1, reduction=tf.losses.Reduction.NONE ) elif p == 2: def dist_fn(labels, preds): return tf.losses.mean_squared_error( labels, preds, reduction=tf.losses.Reduction.NONE ) elif p == 1: def dist_fn(labels, preds): return tf.losses.absolute_difference( labels, preds, reduction=tf.losses.Reduction.NONE ) else: raise NotImplementedError() with tf.variable_scope(scope): pdist = dist_fn(anchor, positive) ndist = dist_fn(anchor, negative) if swap: # ndist_2 is the distance between postive and negative ndist_2 = dist_fn(positive, negative) ndist = tf.maximum(ndist, ndist_2) loss = tf.maximum(pdist - ndist + margin, 0) if reduction == tf.losses.Reduction.NONE: return loss elif reduction == tf.losses.Reduction.SUM: return tf.sum(loss) elif reduction == tf.losses.Reduction.MEAN: return tf.reduce_mean(loss) elif reduction == tf.losses.Reduction.SUM_OVER_BATCH_SIZE: return tf.sum() / tf.shape(anchor)[0] elif reduction == tf.losses.Reduction.SUM_BY_NONZERO_WEIGHTS: return tf.sum(loss) / tf.sum(tf.greater(loss, 0)) else: msg = '{} has not been implemented for triplet_margin_loss'.format( reduction) raise NotImplementedError(msg)
55e85a9ae98ab57458ae1a61a1dbd445deddd7cb
13,900
def f_raw(x, a, b): """ The raw function call, performs no checks on valid parameters.. :return: """ return a * x + b
89bbe9e7a08e3bf4bf37c3efa695ed20fdca95c5
13,901
from pyapprox.cython.barycentric_interpolation import \ def compute_barycentric_weights_1d(samples, interval_length=None, return_sequence=False, normalize_weights=False): """ Return barycentric weights for a sequence of samples. e.g. of sequence x0,x1,x2 where order represents the order in which the samples are added to the interpolant. Parameters ---------- return_sequence : boolean True - return [1],[1/(x0-x1),1/(x1-x0)], [1/((x0-x2)(x0-x1)),1/((x1-x2)(x1-x0)),1/((x2-x1)(x2-x0))] False- return [1/((x0-x2)(x0-x1)),1/((x1-x2)(x1-x0)),1/((x2-x1)(x2-x0))] Note ---- If length of interval [a,b]=4C then weights will grow or decay exponentially at C^{-n} where n is number of points causing overflow or underflow. To minimize this effect multiply each x_j-x_k by C^{-1}. This has effect of rescaling all weights by C^n. In rare situations where n is so large randomize or use Leja ordering of the samples before computing weights. See Barycentric Lagrange Interpolation by Jean-Paul Berrut and Lloyd N. Trefethen 2004 """ if interval_length is None: scaling_factor = 1. else: scaling_factor = interval_length/4. C_inv = 1/scaling_factor num_samples = samples.shape[0] try: compute_barycentric_weights_1d_pyx weights = compute_barycentric_weights_1d_pyx(samples, C_inv) except (ImportError, ModuleNotFoundError) as e: msg = 'compute_barycentric_weights_1d extension failed' trace_error_with_msg(msg, e) weights = np.empty((num_samples, num_samples), dtype=float) weights[0, 0] = 1. for jj in range(1, num_samples): weights[jj, :jj] = C_inv * \ (samples[:jj]-samples[jj])*weights[jj-1, :jj] weights[jj, jj] = np.prod(C_inv*(samples[jj]-samples[:jj])) weights[jj-1, :jj] = 1./weights[jj-1, :jj] weights[num_samples-1, :num_samples] =\ 1./weights[num_samples-1, :num_samples] if not return_sequence: result = weights[num_samples-1, :] # make sure magintude of weights is approximately O(1) # useful to sample sets like leja for gaussian variables # where interval [a,b] is not very useful # print('max_weights',result.min(),result.max()) if normalize_weights: raise NotImplementedError('I do not think I want to support this option') result /= np.absolute(result).max() # result[I]=result else: result = weights assert np.all(np.isfinite(result)), (num_samples) return result
1711328af31b756c040455e0b03363def08e6504
13,902
import collections def _generate_conversions(): """ Generate conversions for unit systems. """ # conversions to inches to_inch = {'microinches': 1.0 / 1000.0, 'mils': 1.0 / 1000.0, 'inches': 1.00, 'feet': 12.0, 'yards': 36.0, 'miles': 63360, 'angstroms': 1.0 / 2.54e8, 'nanometers': 1.0 / 2.54e7, 'microns': 1.0 / 2.54e4, 'millimeters': 1.0 / 2.54e1, 'centimeters': 1.0 / 2.54e0, 'meters': 1.0 / 2.54e-2, 'kilometers': 1.0 / 2.54e-5, 'decimeters': 1.0 / 2.54e-1, 'decameters': 1.0 / 2.54e-3, 'hectometers': 1.0 / 2.54e-4, 'gigameters': 1.0 / 2.54e-11, 'AU': 5889679948818.897, 'light years': 3.72461748e17, 'parsecs': 1.21483369e18} # if a unit is known by other symbols, include them here synonyms = collections.defaultdict(list) synonyms.update({'millimeters': ['mm'], 'inches': ['in', '"'], 'feet': ["'"], 'meters': ['m']}) # add non- plural version of units to conversions # eg, millimeters -> millimeter for key in to_inch.keys(): if key[-2:] == 'es' and key != 'miles': synonyms[key].append(key[:-2]) elif key[-1] == 's': synonyms[key].append(key[:-1]) # update the dict with synonyms for key, new_keys in synonyms.items(): value = to_inch[key] for new_key in new_keys: to_inch[new_key] = value # convert back to regular dictionary and make keys all lower case to_inch = {k.strip().lower(): v for k, v in to_inch.items()} return to_inch
8fa4f625e693fe352b2bba0082d0b18c46f5bec1
13,903
def _ifail(repo, mynode, orig, fcd, fco, fca, toolconf): """ Rather than attempting to merge files that were modified on both branches, it marks them as unresolved. The resolve command must be used to resolve these conflicts.""" return 1
278bb52f96e1a82ce9966626be08bc6fdd0df65d
13,904
from typing import Pattern from typing import Optional from typing import Callable from typing import Union import logging def parser( text: str, *, field: str, pattern: Pattern[str], type_converter: Optional[Callable] = None, clean_up: Optional[Callable] = None, limit_size: Optional[int] = None, null_value: Optional[Union[str, int, bool, None]] = None, ) -> str: """ Returns text based on regex pattern and other provided conditions. :param text: Str. Text to parse. :param field: Str. Label for output info, eg 'charges', 'bail'. :param pattern: Pattern. Regex, compiled pattern used to search. :param type_converter: Callable. Optional. Set type for return value. Defaults to string converter. :param clean_up: Callable. Optional. Function that does any final formatting. :param limit_size: Int. Optional. Max number of chars in returned string. :param null_value: Any. Optional. Value to set when parse conditions aren't met. Default None. :return: Str. Desired pattern in text. """ # set default if no type converter func is provided if not type_converter: type_converter = lambda x: str(x) # parse logging.info("Attempting to extract charges from text with Regex...") try: match = pattern.search(text) final_value = match.group(field) logging.info(f"{field.upper()}, FIRST PASS: {final_value}") # Options if clean_up: final_value = clean_up(final_value) if limit_size: final_value = final_value[0:limit_size] # Trim final_value = final_value.strip() # Type final_value = type_converter(final_value) except (AttributeError, ValueError) as e: logging.info( "Parsing failed or couldn't find target value - setting " "to None" ) final_value = null_value logging.info(f"{field.upper()}, FINAL: {final_value}") return final_value
0b44fecf252399b3109efedffe0f561809982ea6
13,905
def colorize(text='', opts=(), **kwargs): """ Return your text, enclosed in ANSI graphics codes. Depends on the keyword arguments 'fg' and 'bg', and the contents of the opts tuple/list. Return the RESET code if no parameters are given. Valid colors: 'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white' Valid options: 'bold' 'underscore' 'blink' 'reverse' 'conceal' 'noreset' - string will not be auto-terminated with the RESET code Examples: colorize('hello', fg='red', bg='blue', opts=('blink',)) colorize() colorize('goodbye', opts=('underscore',)) print(colorize('first line', fg='red', opts=('noreset',))) print('this should be red too') print(colorize('and so should this')) print('this should not be red') """ code_list = [] if text == '' and len(opts) == 1 and opts[0] == 'reset': return '\x1b[%sm' % RESET for k, v in kwargs.items(): if k == 'fg': code_list.append(foreground[v]) elif k == 'bg': code_list.append(background[v]) for o in opts: if o in opt_dict: code_list.append(opt_dict[o]) if 'noreset' not in opts: text = '%s\x1b[%sm' % (text or '', RESET) return '%s%s' % (('\x1b[%sm' % ';'.join(code_list)), text or '')
02ad24710413770cebdaa4265a1d40c69212ecc8
13,907
from re import T def get_prediction(img_path, threshold): """ get_prediction parameters: - img_path - path of the input image - threshold - threshold value for prediction score method: - Image is obtained from the image path - the image is converted to image tensor using PyTorch's Transforms - image is passed through the model to get the predictions - class, box coordinates are obtained, but only prediction score > threshold are chosen. """ img = Image.open(img_path) transform = T.Compose([T.ToTensor()]) img = transform(img) pred = model([img]) pred_class = [COCO_INSTANCE_CATEGORY_NAMES[i] for i in list(pred[0]['labels'].numpy())] pred_boxes = [[(i[0], i[1]), (i[2], i[3])] for i in list(pred[0]['boxes'].detach().numpy())] pred_score = list(pred[0]['scores'].detach().numpy()) pred_t = [pred_score.index(x) for x in pred_score if x > threshold][-1] pred_boxes = pred_boxes[:pred_t + 1] pred_class = pred_class[:pred_t + 1] return pred_boxes, pred_class
d6df91fb464b072b06ef759ad53aa00fb7d624ec
13,908
def make_fixed_size(protein, shape_schema, msa_cluster_size, extra_msa_size, num_res, num_templates=0): """Guess at the MSA and sequence dimensions to make fixed size.""" pad_size_map = { NUM_RES: num_res, NUM_MSA_SEQ: msa_cluster_size, NUM_EXTRA_SEQ: extra_msa_size, NUM_TEMPLATES: num_templates, } for k, v in protein.items(): if k == 'extra_cluster_assignment': continue shape = list(v.shape) schema = shape_schema[k] assert len(shape) == len(schema), f'Rank mismatch between ' + \ f'shape and shape schema for {k}: {shape} vs {schema}' pad_size = [pad_size_map.get(s2, None) or s1 for (s1, s2) in zip(shape, schema)] padding = [(0, p - v.shape[i]) for i, p in enumerate(pad_size)] if padding: protein[k] = np.pad(v, padding) protein[k].reshape(pad_size) return protein
f74306815dd7cd5291305c7b5c67cae4625c4d38
13,909
def plot_skymap_tract(skyMap, tract=0, title=None, ax=None): """ Plot a tract from a skyMap. Parameters ---------- skyMap: lsst.skyMap.SkyMap The SkyMap object containing the tract and patch information. tract: int [0] The tract id of the desired tract to plot. title: str [None] Title of the tract plot. If None, the use `tract <id>`. ax: matplotlib.axes._subplots.AxesSubplot [None] The subplot object to contain the tract plot. If None, then make a new one. Returns ------- matplotlib.axes._subplots.AxesSubplot: The subplot containing the tract plot. """ if title is None: title = 'tract {}'.format(tract) tractInfo = skyMap[tract] tractBox = afw_geom.Box2D(tractInfo.getBBox()) tractPosList = tractBox.getCorners() wcs = tractInfo.getWcs() xNum, yNum = tractInfo.getNumPatches() if ax is None: fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) tract_center = wcs.pixelToSky(tractBox.getCenter()) .getPosition(afw_geom.degrees) ax.text(tract_center[0], tract_center[1], '%d' % tract, size=16, ha="center", va="center", color='blue') for x in range(xNum): for y in range(yNum): patchInfo = tractInfo.getPatchInfo([x, y]) patchBox = afw_geom.Box2D(patchInfo.getOuterBBox()) pixelPatchList = patchBox.getCorners() path = make_patch(pixelPatchList, wcs) patch = patches.PathPatch(path, alpha=0.1, lw=1) ax.add_patch(patch) center = wcs.pixelToSky(patchBox.getCenter()) .getPosition(afw_geom.degrees) ax.text(center[0], center[1], '%d,%d'%(x,y), size=6, ha="center", va="center") skyPosList = [wcs.pixelToSky(pos).getPosition(afw_geom.degrees) for pos in tractPosList] ax.set_xlim(max(coord[0] for coord in skyPosList) + 1, min(coord[0] for coord in skyPosList) - 1) ax.set_ylim(min(coord[1] for coord in skyPosList) - 1, max(coord[1] for coord in skyPosList) + 1) ax.grid(ls=':',color='gray') ax.set_xlabel("RA (deg.)") ax.set_ylabel("Dec (deg.)") ax.set_title(title) return ax
a8f1b25d8afedfbb0ed643b7954e615932031419
13,910
import json def label(vertex): """ Graph vertex label in dot format """ label = f"{vertex.name} {vertex.state or ''}\n{vertex.traceback or ''}" label = json.dumps(label).replace("\\n", r"\l") return f"[label={label}]"
a8604cfd837afbdba8b8ee7666d81df4b015ad2a
13,911
import six import hashlib def compute_hashes_from_fileobj(fileobj, chunk_size=1024 * 1024): """Compute the linear and tree hash from a fileobj. This function will compute the linear/tree hash of a fileobj in a single pass through the fileobj. :param fileobj: A file like object. :param chunk_size: The size of the chunks to use for the tree hash. This is also the buffer size used to read from `fileobj`. :rtype: tuple :return: A tuple of (linear_hash, tree_hash). Both hashes are returned in hex. """ # Python 3+, not binary if six.PY3 and hasattr(fileobj, 'mode') and 'b' not in fileobj.mode: raise ValueError('File-like object must be opened in binary mode!') linear_hash = hashlib.sha256() chunks = [] chunk = fileobj.read(chunk_size) while chunk: # It's possible to get a file-like object that has no mode (checked # above) and returns something other than bytes (e.g. str). So here # we try to catch that and encode to bytes. if not isinstance(chunk, bytes): chunk = chunk.encode(getattr(fileobj, 'encoding', '') or 'utf-8') linear_hash.update(chunk) chunks.append(hashlib.sha256(chunk).digest()) chunk = fileobj.read(chunk_size) if not chunks: chunks = [hashlib.sha256(b'').digest()] return linear_hash.hexdigest(), bytes_to_hex(tree_hash(chunks))
8c6aed21ae59ecb3e5449ee0856be1d032108aa6
13,912
def imshow(axim, img, amp_range=None, extent=None,\ interpolation='nearest', aspect='auto', origin='upper',\ orientation='horizontal', cmap='jet') : """ extent - list of four image physical limits for labeling, cmap: 'gray_r' #axim.cla() """ imsh = axim.imshow(img, interpolation=interpolation, aspect=aspect, origin=origin, extent=extent, cmap=cmap) if amp_range is not None : imsh.set_clim(amp_range[0],amp_range[1]) return imsh
3483690b01c5d182877c3bf944fa5409d4cb9e69
13,913
def get_total(): """ Return the rounded total as properly rounded string. Credits: https://github.com/dbrgn/coverage-badge """ cov = coverage.Coverage() cov.load() total = cov.report(file=Devnull()) class Precision(coverage.results.Numbers): """ A class for using the percentage rounding of the main coverage package, with any percentage. To get the string format of the percentage, use the ``pc_covered_str`` property. """ def __init__(self, percent): self.percent = percent @property def pc_covered(self): return self.percent return Precision(total).pc_covered_str
9df511f0d895721061642c2fb88268490e27cc0b
13,914
def _infer_subscript_list(context, index): """ Handles slices in subscript nodes. """ if index == ':': # Like array[:] return ValueSet([iterable.Slice(context, None, None, None)]) elif index.type == 'subscript' and not index.children[0] == '.': # subscript basically implies a slice operation, except for Python 2's # Ellipsis. # e.g. array[:3] result = [] for el in index.children: if el == ':': if not result: result.append(None) elif el.type == 'sliceop': if len(el.children) == 2: result.append(el.children[1]) else: result.append(el) result += [None] * (3 - len(result)) return ValueSet([iterable.Slice(context, *result)]) elif index.type == 'subscriptlist': return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)]) # No slices return context.infer_node(index)
bde1de5e7604d51e6c85e429ceb2102d79e91ca6
13,915
def count_by_guess(dictionary, correctly=False): """ Count the number of correctly/incorrectly guessed images for a dataset :param dictionary: :param correctly: :return: """ guessed = 0 for response in dictionary: guessed = guessed + count_by_guess_user(response, correctly) return guessed
d1328a63d3029707131f1932be1535dabb62ab66
13,916
def get_game_by_index(statscursor, table, index): """ Holds get_game_by_index db related data """ query = "SELECT * FROM " + table + " WHERE num=:num" statscursor.execute(query, {'num': index}) return statscursor.fetchone()
754a83f2281ad095ffc32eb8a03c95490bd5f815
13,917
def create_queue(): """Creates the SQS queue and returns the queue url and metadata""" conn = boto3.client('sqs', region_name=CONFIG['region']) queue_metadata = conn.create_queue(QueueName=QUEUE_NAME, Attributes={'VisibilityTimeout':'3600'}) if 'queue_tags' in CONFIG: conn.tag_queue(QueueUrl=queue_metadata['QueueUrl'], Tags=CONFIG['queue_tags']) """Get the SQS queue object from the queue URL""" sqs = boto3.resource('sqs', region_name=CONFIG['region']) queue = sqs.Queue(queue_metadata['QueueUrl']) return conn, queue
ae61c542182bc1238b76bf94991e50809bace595
13,918
def db_describe(table, **args): """Return the list of columns for a database table (interface to `db.describe -c`). Example: >>> run_command('g.copy', vector='firestations,myfirestations') 0 >>> db_describe('myfirestations') # doctest: +ELLIPSIS {'nrows': 71, 'cols': [['cat', 'INTEGER', '20'], ... 'ncols': 22} >>> run_command('g.remove', flags='f', type='vector', name='myfirestations') 0 :param str table: table name :param list args: :return: parsed module output """ if 'database' in args and args['database'] == '': args.pop('database') if 'driver' in args and args['driver'] == '': args.pop('driver') s = read_command('db.describe', flags='c', table=table, **args) if not s: fatal(_("Unable to describe table <%s>") % table) cols = [] result = {} for l in s.splitlines(): f = l.split(':') key = f[0] f[1] = f[1].lstrip(' ') if key.startswith('Column '): n = int(key.split(' ')[1]) cols.insert(n, f[1:]) elif key in ['ncols', 'nrows']: result[key] = int(f[1]) else: result[key] = f[1:] result['cols'] = cols return result
6265a2f6dcc26fcd1fcebb5ead23abfb37cfa179
13,919
def objective_func(x, cs_objects, cs_data): """ Define the objective function :param x: 1D array containing the voltages to be set :param args: tuple containing all extra parameters needed :return: average count rate for 100 shots """ x = np.around(x,2) try: flag_range = 0 for i in xrange(len(x)): if (x[i] <= float(cs_objects[i,4])) or (x[i] >= float(cs_objects[i,5])): flag_range = 1 raise ValueError for i in xrange(len(x)): if flag_range == 0: if int(cs_objects[i,2]) != -1: cs.call_process2(cs_objects[i,0], cs_objects[i,1], "I:1,D:1", cs.pack_ch_val([int(cs_objects[i,2])], [x[i]])) else: cs.call_process2(cs_objects[i,0], cs_objects[i,1], "D:1", cs.pack_val([x[i]])) else: return time.sleep(1) flag = 0 value = total_counts(flag, *cs_data) # value = scop.rosen(x) return value except ValueError: print "Value error : value went out of bound"
677b6455b0db177a3a4f716ced3dd309c711cf74
13,920
def getHPELTraceLogAttribute(nodename, servername, attributename): """ This function returns an attribute of the HPEL Trace Log for the specified server. Function parameters: nodename - the name of the node on which the server to be configured resides. servername - the name of the server whose HPEL Trace is to be configured. attributename - the following attribute names can be specified: - 'dataDirectory' - Specifies the name of the directory where the HPEL logs will be stored. - 'bufferingEnabled' - Specifies whether or not log record buffering should be enabled. Valid values are 'true' and 'false'. - 'fileSwitchEnabled' - Specifies whether or not a new log file should be started each day. Valid values are 'true' and 'false'. - 'fileSwitchTime' - If 'fileSwitchEnabled' is set to 'true', this field specifies the time that new log file should be started. A value from 0 - 23 should be specified. A value of 0 means 12 AM 1 means 1 AM, 2 means 2 AM, ..., 23 means 11 PM. If a value greater than 23 is entered, this field will be set to 0 (12 AM). - 'memoryBufferSize' - Specifies the size (in MB) of the memory trace buffer. - 'outOfSpaceAction' - Specifies which action to take if the hard disk runs out of space. Valid values are 'StopLogging', 'StopServer', and 'PurgeOld'. - 'purgeBySizeEnabled' - Specifies whether or not to purge the logs based on size. Valid values are 'true' and 'false'. - 'purgeByTimeEnabled' - Specifies whether or not to purge the logs based on time. Valid values are 'true' and 'false'. - 'purgeMaxSize' - Specifies the maximum total size of the logs (in MB). - 'purgeMinTime' - Specifies the minimum amount of time to keep the logs (in hours). - 'storageType' - Specifies whether the trace log should be written to a directory or to memory. Valid values are 'DIRECTORY' and 'MEMORYBUFFER'. """ m = "getHPELTraceLogAttribute:" sop (m, "Entering function...") sop (m, "Calling getNodeId() with nodename = %s." % (nodename)) nodeID = getNodeId(nodename) sop (m, "Returned from getNodeID; returned nodeID = %s" % nodeID) if nodeID == "": raise "Could not find node name '%s'" % (nodename) else: sop (m, "Calling getServerId() with nodename = %s and servername = %s." % (nodename, servername)) serverID = getServerId(nodename, servername) sop (m, "Returned from getServerID; returned serverID = %s" % serverID) if serverID == None: raise "Could not find server '%s' on node '%s'" % (servername, nodename) else: serviceName = "HighPerformanceExtensibleLogging" sop (m, "Calling AdminConfig.list with serviceName = %s and serverID = %s." % (serviceName, serverID)) HPELID = AdminConfig.list(serviceName, serverID) sop (m, "Returned from AdminConfig.list; HPELID = %s" % HPELID) sop (m, "Calling AdminConfig.list to get the config ID of the HPEL Trace object.") HPELTraceID = AdminConfig.list("HPELTrace", HPELID) sop (m, "Returned from AdminConfig.list; HPELTraceID = %s" % HPELTraceID) sop(m, "Calling AdminConfig.showAttribute to get the value of attribute = %s" % ( attributename )) attributevalue = AdminConfig.showAttribute(HPELTraceID, attributename) sop (m, "Returned from AdminConfig.showAttribute; attributevalue = %s" % ( attributevalue )) sop (m, "Exiting function...") return attributevalue #endif #endif
8003066ec41ee07dab311690d0687d7f79e6952a
13,921
def dispersionTable(adata): """ Parameters ---------- adata Returns ------- """ if adata.uns["ispFitInfo"]["blind"] is None: raise ("Error: no dispersion model found. Please call estimateDispersions() before calling this function") disp_df = pd.DataFrame({"gene_id": adata.uns["ispFitInfo"]["blind"]["disp_table"]["gene_id"], "mean_expression": adata.uns["ispFitInfo"]["blind"]["disp_table"]["mu"], "dispersion_fit": adata.uns["ispFitInfo"]["blind"]["disp_table"]["blind"]["mu"], "dispersion_empirical": adata.uns["ispFitInfo"]["blind"]["disp_table"]["disp"]}) return disp_df
7f7b4c122ffc42402248ec55155c774c77fbad51
13,922
def L10_indicator(row): """ Determine the Indicator of L10 as one of five indicators """ if row < 40: return "Excellent" elif row < 50: return "Good" elif row < 61: return "Fair" elif row <= 85: return "Poor" else: return "Hazard"
10656a76e72f99f542fd3a4bc2481f0ef7041fa9
13,923
def create_ip_record( heartbeat_df: pd.DataFrame, az_net_df: pd.DataFrame = None ) -> IpAddress: """ Generate ip_entity record for provided IP value. Parameters ---------- heartbeat_df : pd.DataFrame A dataframe of heartbeat data for the host az_net_df : pd.DataFrame Option dataframe of Azure network data for the host Returns ------- IP Details of the IP data collected """ ip_entity = IpAddress() # Produce ip_entity record using available dataframes ip_hb = heartbeat_df.iloc[0] ip_entity.Address = ip_hb["ComputerIP"] ip_entity.hostname = ip_hb["Computer"] # type: ignore ip_entity.SourceComputerId = ip_hb["SourceComputerId"] # type: ignore ip_entity.OSType = ip_hb["OSType"] # type: ignore ip_entity.OSName = ip_hb["OSName"] # type: ignore ip_entity.OSVMajorersion = ip_hb["OSMajorVersion"] # type: ignore ip_entity.OSVMinorVersion = ip_hb["OSMinorVersion"] # type: ignore ip_entity.ComputerEnvironment = ip_hb["ComputerEnvironment"] # type: ignore ip_entity.OmsSolutions = [ # type: ignore sol.strip() for sol in ip_hb["Solutions"].split(",") ] ip_entity.VMUUID = ip_hb["VMUUID"] # type: ignore ip_entity.SubscriptionId = ip_hb["SubscriptionId"] # type: ignore geoloc_entity = GeoLocation() # type: ignore geoloc_entity.CountryName = ip_hb["RemoteIPCountry"] # type: ignore geoloc_entity.Longitude = ip_hb["RemoteIPLongitude"] # type: ignore geoloc_entity.Latitude = ip_hb["RemoteIPLatitude"] # type: ignore ip_entity.Location = geoloc_entity # type: ignore # If Azure network data present add this to host record if az_net_df is not None and not az_net_df.empty: if len(az_net_df) == 1: priv_addr_str = az_net_df["PrivateIPAddresses"].loc[0] ip_entity["private_ips"] = convert_to_ip_entities(priv_addr_str) pub_addr_str = az_net_df["PublicIPAddresses"].loc[0] ip_entity["public_ips"] = convert_to_ip_entities(pub_addr_str) else: if "private_ips" not in ip_entity: ip_entity["private_ips"] = [] if "public_ips" not in ip_entity: ip_entity["public_ips"] = [] return ip_entity
63deb15081f933b0a445d22eed25646782af4221
13,924
import re def extract_version(version_file_name): """Extracts the version from a python file. The statement setting the __version__ variable must not be indented. Comments after that statement are allowed. """ regex = re.compile(r"^__version__\s*=\s*['\"]([^'\"]*)['\"]\s*(#.*)?$") with open(version_file_name, "r") as version_file: lines = version_file.read().splitlines() for line in reversed(lines): version_match = regex.match(line) if version_match: return version_match.group(1) else: raise RuntimeError("Unable to find version string.")
1cc70ba4bf69656bb8d210a49c236e38eba59513
13,925
def powerlaw_loglike(data, theta): """Return the natural logarithm of the likelihood P(data | theta) for our model of the ice flow. data is expected to be a tuple of numpy arrays = (x, y, sigma) theta is expected to be an array of parameters = (intercept, slope) """ x, y, sigma = data n = len(x) model = powerlaw_model(x, theta) lnlike = -0.5 * (n*np.log(2.*np.pi) + np.sum(2.*np.log(errs) + ( y-model)**2 / sigma**2)) return lnlike
98650e66d2a16762b2534be9083b6b92e0d9e9fd
13,926
def get_conv(dim=3): """Chooses an implementation for a convolution layer.""" if dim == 3: return nn.Conv3d elif dim == 2: return nn.Conv2d else: raise ValueError('dim has to be 2 or 3')
4152984ecf7220dc4693013ee567822a2487e225
13,927
async def create_mute_role(bot, ctx): """Create the mute role for a guild""" perms = discord.Permissions( send_messages=False, read_messages=True) mute_role = await ctx.guild.create_role( name='Muted', permissions=perms, reason='Could not find a muted role in the process of muting or unmuting.') await bot.config.update_one({"_id": ctx.guild.id}, {'$set': {"mute_role": mute_role.id}}, upsert=True) for channel in ctx.guild.channels: try: await channel.set_permissions(mute_role, read_messages=True, send_messages=False) except discord.Forbidden: continue except discord.HTTPException: continue return mute_role
9128de3a7f4f841e47531699a878a1c18d8be9d5
13,930
import json import uuid def build_request_data(useralias, req_node): """build_request_data :param useralias: user alias for directory name :param req_node: simulated request node """ if "file" not in req_node: return None use_uniques = req_node["unique_names"] use_file = req_node["file"].format( useralias) use_data = json.loads(open(use_file, 'r').read()) if use_uniques: if "title" in use_data: use_data["title"] = "{}_{}".format( use_data["title"], str(uuid.uuid4())) if "full_file" in use_data: use_data["full_file"] = \ use_data["full_file"].format( str(uuid.uuid4())) if "clean_file" in use_data: use_data["clean_file"] = \ use_data["clean_file"].format( str(uuid.uuid4())) if "csv_file" in use_data: use_data["csv_file"] = \ use_data["csv_file"].format( str(uuid.uuid4())) if "meta_file" in use_data: use_data["meta_file"] = \ use_data["meta_file"].format( str(uuid.uuid4())) if "meta_suffix" in use_data: use_data["meta_suffix"] = \ use_data["meta_suffix"].format( str(uuid.uuid4())) return use_data
938c79c290e1e4c086e6d48f71cbd0b965d36b36
13,931
def _get_stmt_lists(self): """ Returns a tuple of the statement lists contained in this `ast.stmt` node. This method should only be called by an `ast.stmt` node. """ if self.is_simple(): return () elif self.is_body(): return (self.body,) elif self.is_body_orelse(): return (self.body, self.orelse) elif self.is_body_finally(): return (self.body, self.finalbody) else: # Every statement has to be simple or complex. assert(False)
0ec85481bc4261ae77ced0ae32c72081ef80c651
13,932
def get_article(name): """a general function to get an article, returns None if doesn't exist """ article = None if name is not None: try: article = Article.objects.get(name=name) except Article.DoesNotExist: pass return article
d69e801a1d18ccf81753cc35ce2afa645b304fba
13,933
def abbreviateLab(lab): """Lab names are very long and sometimes differ by punctuation or typos. Abbreviate for easier comparison.""" labAbbrev = apostropheSRe.sub('', lab) labAbbrev = firstLetterRe.sub(r'\1', labAbbrev, count=0) labAbbrev = spacePunctRe.sub('', labAbbrev, count=0) return labAbbrev
dce4a1d0f6302a2968fe701d067b209fb61b8930
13,935
def backproject(depth, intrinsics, instance_mask): """ Back-projection, use opencv camera coordinate frame. """ cam_fx = intrinsics[0, 0] cam_fy = intrinsics[1, 1] cam_cx = intrinsics[0, 2] cam_cy = intrinsics[1, 2] non_zero_mask = (depth > 0) final_instance_mask = np.logical_and(instance_mask, non_zero_mask) idxs = np.where(final_instance_mask) z = depth[idxs[0], idxs[1]] x = (idxs[1] - cam_cx) * z / cam_fx y = (idxs[0] - cam_cy) * z / cam_fy pts = np.stack((x, y, z), axis=1) return pts, idxs
9828197b646342ec76cc21b1083540d0fe62978f
13,936
def if_any( _data, *args, _names=None, _context=None, **kwargs, ): """Apply the same predicate function to a selection of columns and combine the results True if any element is True. See Also: [`across()`](datar.dplyr.across.across) """ if not args: args = (None, None) elif len(args) == 1: args = (args[0], None) _cols, _fns, *args = args _data = _context.meta.get("input_data", _data) return IfAny( _data, _cols, _fns, _names, args, kwargs, ).evaluate(_context)
41bf4a14cc8b16845f7d0dd8138871a7ccfad66f
13,937
def Gaussian(y, model, yerr): """Returns the loglikelihood for a Gaussian distribution. In this calculation, it is assumed that the parameters are true, and the loglikelihood that the data is drawn from the distribution established by the parameters is calculated Parameters ---------- model : array_like theoretical model data to be compared against y : array_like data points yerr : standard deviations on individual data points, assumed to be gaussian Returns ------- float loglikelihood for the data.""" inv_sigma2 = 1.0/(yerr**2.0) return -0.5*(np.sum((y-model)**2*inv_sigma2 - np.log(inv_sigma2)))
d9eaa41b95006a9d17907582b804a4921f672141
13,940
def clean_us_demographics(us_demographics_spark, spark_session): """ Clean data from us_demographics Args: us_demographics (object): Pyspark dataframe object spark_session (object): Pyspark session Returns: (object): Pyspark dataframe with cleaned data """ spark = spark_session us_demographics_spark.createOrReplaceTempView('us_demographics') dum = spark.sql(""" SELECT City, State, cast(`Median Age` as float) as Median_Age, cast(`Male Population` as int) as Male_Population, cast(`Female Population` as int) as Female_Population, cast(`Total Population` as int) as Total_Population, cast(`Number of Veterans` as int) as Number_of_Veterans, cast(`Foreign-born` as int) as Foregin_born, cast(`Average Household Size` as float) as Average_Household_Size, `State Code` as State_Code,Race, cast(Count as int) FROM us_demographics """) us_demographics_spark_cleaned = dum.dropDuplicates() us_demographics_spark_cleaned = us_demographics_spark_cleaned.na.drop() us_demographics_spark_race = us_demographics_spark_cleaned.groupBy(['City','State']).pivot("Race").agg(F.first("Count")) us_demographics_spark_race = us_demographics_spark_race.select('City', 'State', F.col('American Indian and Alaska Native').alias('American_Indian_and_Alaska_Native'), 'Asian', F.col('Black or African-American').alias('Black_or_African_American'), F.col('Hispanic or Latino').alias('Hispanic_or_Latino'), 'White') us_demographics_spark_cleaned = us_demographics_spark_cleaned.drop('Race', 'Count') us_demographics_spark_cleaned = us_demographics_spark_cleaned.dropDuplicates() us_demographics_spark_cleaned = us_demographics_spark_cleaned.join(us_demographics_spark_race, ['State', 'City']) us_demographics_spark_cleaned = us_demographics_spark_cleaned.fillna( {'American_Indian_and_Alaska_Native':0, 'Asian':0, 'Black_or_African_American':0, 'Hispanic_or_Latino':0, 'White':0}) us_demographics_spark_cleaned = us_demographics_spark_cleaned.orderBy(['City','State']) return us_demographics_spark_cleaned
dcf812bf64a2f6c3b908d895488e1a57e1729301
13,941
from datetime import datetime def parse_date(date=None): """ Parse a string in YYYY-MM-DD format into a datetime.date object. Throws ValueError if input is invalid :param date: string in YYYY-MM-DD format giving a date :return: a datetime.date object corresponding to the date given """ if date is None: raise ValueError fields = date.split('-') if len(fields) != 3: raise ValueError return datetime.date(year=int(fields[0]), month=int(fields[1]), day=int(fields[2]))
a4c6cef85dabd445dd308fdd5f2c20a38accd6de
13,942
def status(): """ Incoming status handler: forwarded by ForwardServerProvider """ req = jsonex_loads(request.get_data()) status = g.provider._receive_status(req['status']) return {'status': status}
3a50ff8d829a7bf37b84871897335345496dbc49
13,943
def get_feature_extractor_info(): """Return tuple of pretrained feature extractor and its best-input image size for the extractor""" return get_pretrained_feature_extractor(), K_MODEL_IMAGE_SIZE
bdec6d5a2d402f659b9a001f4082f6b5e33ca3cc
13,944
import networkx def nx_find_connected_limited(graph, start_set, end_set, max_depth=3): """Return the neurons in end_set reachable from start_set with limited depth.""" reverse_graph = graph.reverse() reachable = [] for e in end_set: preorder_nodes = list( ( networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes( reverse_graph, source=e, depth_limit=max_depth ) ) ) for s in start_set: if s in preorder_nodes: reachable.append(e) break return reachable
4322f4231be73b575d05442f09608c71c3b9f605
13,945
def hexbyte_2integer_normalizer(first_int_byte, second_int_btye): """Function to normalize integer bytes to a single byte Transform two integer bytes to their hex byte values and normalize their values to a single integer Parameters __________ first_int_byte, second_int_byte : int integer values to normalize (0 to 255) Returns _______ integer: int Single normalized integer """ first_hex = f'{hex(first_int_byte)}'.lstrip('0x') second_hex = f'{hex(second_int_btye)}'.lstrip('0x') first_hex = first_hex if len(f'{first_hex}') == 2 else f'0{first_hex}' second_hex = second_hex if len(f'{second_hex}') == 2 else f'0{second_hex}' hex_string = f'{first_hex}{second_hex}' integer = int(hex_string, 16) return integer
a3bbe75014b6e08607314b615440039bab245f04
13,946
def make_window(signal, sample_spacing, which=None, alpha=4): """Generate a window function to be used in PSD analysis. Parameters ---------- signal : `numpy.ndarray` signal or phase data sample_spacing : `float` spacing of samples in the input data which : `str,` {'welch', 'hann', None}, optional which window to produce. If auto, attempts to guess the appropriate window based on the input signal alpha : `float`, optional alpha value for welch window Notes ----- For 2D welch, see: Power Spectral Density Specification and Analysis of Large Optical Surfaces E. Sidick, JPL Returns ------- `numpy.ndarray` window array """ s = signal.shape if which is None: # attempt to guess best window ysamples = int(round(s[0] * 0.02, 0)) xsamples = int(round(s[1] * 0.02, 0)) corner1 = signal[:ysamples, :xsamples] == 0 corner2 = signal[-ysamples:, :xsamples] == 0 corner3 = signal[:ysamples, -xsamples:] == 0 corner4 = signal[-ysamples:, -xsamples:] == 0 if corner1.all() and corner2.all() and corner3.all() and corner4.all(): # four corners all "black" -- circular data, Welch window is best # looks wrong but 2D welch takes x, y while indices are y, x y, x = (e.arange(N) - (N / 2) for N in s) which = window_2d_welch(x, y) else: # if not circular, square data; use Hanning window y, x = (e.hanning(N) for N in s) which = e.outer(y, x) else: if type(which) is str: # known window type wl = which.lower() if wl == 'welch': y, x = (e.arange(N) - (N / 2) for N in s) which = window_2d_welch(x, y, alpha=alpha) elif wl in ('hann', 'hanning'): y, x = (e.hanning(N) for N in s) which = e.outer(y, x) else: raise ValueError('unknown window type') return which
5ef18c990225b6610ee10c848ab4ee0b2ce0fc9b
13,950
from typing import Dict from typing import Union def set_units( df: pd.DataFrame, units: Dict[str, Union[pint.Unit, str]] ) -> pd.DataFrame: """Make dataframe unit-aware. If dataframe is already unit-aware, convert to specified units. If not, assume values are in specified unit. Parameters ---------- df : pd.DataFrame units : Dict[str, Union[pint.Unit, str]] key = column name, value = unit to set to that column Returns ------- pd.DataFrame Same as input dataframe, but with specified units. """ df = df.copy() # don't change incoming dataframe for name, unit in units.items(): df[name] = set_unit(df[name], unit) return df
8a0cf821e3e0d1ba7b1b8c3dbdddb5f517ea0acb
13,951
def address_repr(buf, reverse: bool = True, delimit: str = "") -> str: """Convert a buffer into a hexlified string.""" order = range(len(buf) - 1, -1, -1) if reverse else range(len(buf)) return delimit.join(["%02X" % buf[byte] for byte in order])
6b4b8921d6280cd688c3bfcfca82b2b5546001e7
13,952
import re def _highlight(line1, line2): """Returns the sections that should be bolded in the given lines. Returns: two tuples. Each tuple indicates the start and end of the section of the line that should be bolded for line1 and line2 respectively. """ start1 = start2 = 0 match = re.search(r'\S', line1) # ignore leading whitespace if match: start1 = match.start() match = re.search(r'\S', line2) if match: start2 = match.start() length = min(len(line1), len(line2)) - 1 bold_start1 = start1 bold_start2 = start2 while (bold_start1 <= length and bold_start2 <= length and line1[bold_start1] == line2[bold_start2]): bold_start1 += 1 bold_start2 += 1 match = re.search(r'\s*$', line1) # ignore trailing whitespace bold_end1 = match.start() - 1 match = re.search(r'\s*$', line2) bold_end2 = match.start() - 1 while (bold_end1 >= bold_start1 and bold_end2 >= bold_start2 and line1[bold_end1] == line2[bold_end2]): bold_end1 -= 1 bold_end2 -= 1 if bold_start1 - start1 > 0 or len(line1) - 1 - bold_end1 > 0: return (bold_start1 + 1, bold_end1 + 2), (bold_start2 + 1, bold_end2 + 2) return None, None
d9bf7667e24d21e6f91b656af0697765c2b74f55
13,953
def get_detected_objects_new(df, siglim=5, Terr_lim=3, Toffset=2000): """ Get a dataframe with only the detected objects. :param df: A DataFrame such as one output by get_ccf_summary with N > 1 :param siglim: The minimum significance to count as detected :param Terr_lim: The maximum number of standard deviations of (Measured - Actual) to allow for detected objects :param Toffset: The absolute difference to allow between the true and measured temperature. :return: A dataframe similar to df, but with fewer rows """ S = get_initial_uncertainty(df) S['Tdiff'] = S.Tmeas - S.Tactual mean, std = S.Tdiff.mean(), S.Tdiff.std() detected = S.loc[(S.significance > siglim) & (S.Tdiff - mean < Terr_lim * std) & (abs(S.Tdiff) < Toffset)] return pd.merge(detected[['Primary', 'Secondary']], df, on=['Primary', 'Secondary'], how='left')
7662086053c093b9eb19ffe7c56f5cf7914b1ab8
13,955
def cmp(a, b): """ Python 3 does not have a cmp function, this will do the cmp. :param a: first object to check :param b: second object to check :return: """ # convert to lower case for string comparison. if a is None: return -1 if type(a) is str and type(b) is str: a = a.lower() b = b.lower() # if list has string element, convert string to lower case. if type(a) is list and type(b) is list: a = [x.lower() if type(x) is str else x for x in a] b = [x.lower() if type(x) is str else x for x in b] a.sort() b.sort() return (a > b) - (a < b)
c82837a0d8887f55fdd1175b5d828742529b3e37
13,956