content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def integrated_circular_gaussian(X=None, Y=None, sigma=0.8): """Create a circular Gaussian that is integrated over pixels This is typically used for the model PSF, working well with the default parameters. Parameters ---------- X, Y: `numpy.ndarray` The x,y-coordinates to evaluate the integrated Gaussian. If `X` and `Y` are `None` then they will both be given the default value `numpy.arange(-7, 8)`, resulting in a `15x15` centered image. sigma: `float` The standard deviation of the Gaussian. Returns ------- image: `numpy.ndarray` A Gaussian function integrated over `X` and `Y`. """ if X is None: if Y is None: X = np.arange(-7, 8) Y = X else: raise Exception( f"Either X and Y must be specified, or neither must be specified, got X={X} and Y={Y}") result = integrated_gaussian(X, sigma)[None, :] * integrated_gaussian(Y, sigma)[:, None] return result/np.sum(result)
63201f6c37fba1e3750881cd692057c2bd5011b0
26,686
def nPairsToFracPairs(hd_obj, all_pairs_vs_rp, redshift_limit = 2): """ Function to convert the number of pairs into a fractional number density per shell @redshift_limit :: the initial redshift limit set on the sample (needed for opening dir) """ num_pairs = all_pairs_vs_rp[1:] - all_pairs_vs_rp[:-1] # get shell volume and projected radius bins r_p, shell_volume = aimm.shellVolume() # normalization total_num_pairs = len(hd_obj) N = total_num_pairs*(total_num_pairs - 1) # fractional number density f_pairs = num_pairs/(N*shell_volume[:len(num_pairs)]) return f_pairs, error(num_pairs)/(N*shell_volume[:len(num_pairs)])
d9d8f72d8f05cff4e984b43f4a22da406dfe1c05
26,687
def default_decode(events, mode='full'): """Decode a XigtCorpus element.""" event, elem = next(events) root = elem # store root for later instantiation while (event, elem.tag) not in [('start', 'igt'), ('end', 'xigt-corpus')]: event, elem = next(events) igts = None if event == 'start' and elem.tag == 'igt': igts = ( decode_igt(e) for e in iter_elements( 'igt', events, root, break_on=[('end', 'xigt-corpus')] ) ) xc = decode_xigtcorpus(root, igts=igts, mode=mode) return xc
36e0b4b13cb357d74cee20623e5a71cf9a5dd02a
26,689
def attention_guide(dec_lens, enc_lens, N, T, g, dtype=None): """Build that W matrix. shape(B, T_dec, T_enc) W[i, n, t] = 1 - exp(-(n/dec_lens[i] - t/enc_lens[i])**2 / (2g**2)) See also: Tachibana, Hideyuki, Katsuya Uenoyama, and Shunsuke Aihara. 2017. “Efficiently Trainable Text-to-Speech System Based on Deep Convolutional Networks with Guided Attention.” ArXiv:1710.08969 [Cs, Eess], October. http://arxiv.org/abs/1710.08969. """ dtype = dtype or paddle.get_default_dtype() dec_pos = paddle.arange(0, N).astype(dtype) / dec_lens.unsqueeze( -1) # n/N # shape(B, T_dec) enc_pos = paddle.arange(0, T).astype(dtype) / enc_lens.unsqueeze( -1) # t/T # shape(B, T_enc) W = 1 - paddle.exp(-(dec_pos.unsqueeze(-1) - enc_pos.unsqueeze(1))**2 / (2 * g**2)) dec_mask = sequence_mask(dec_lens, maxlen=N) enc_mask = sequence_mask(enc_lens, maxlen=T) mask = dec_mask.unsqueeze(-1) * enc_mask.unsqueeze(1) mask = paddle.cast(mask, W.dtype) W *= mask return W
2af05dedb5260e52150d96b181fab063cd17efb8
26,690
def two_step_colormap(left_max, left, center='transparent', right=None, right_max=None, name='two-step'): """Colormap using lightness to extend range Parameters ---------- left_max : matplotlib color Left end of the colormap. left : matplotlib color Left middle of the colormap. center : matplotlib color | 'transparent' Color for the middle value; 'transparent to make the middle transparent (default). right : matplotlib color Right middle of the colormap (if not specified, tyhe colormap ends at the location specified by ``center``). right_max : matplotlib color Right end of the colormap. name : str Name for the colormap. Examples -------- Standard red/blue:: >>> cmap = plot.two_step_colormap('black', 'red', 'transparent', 'blue', 'black', name='red-blue') >>> plot.ColorBar(cmap, 1) Or somewhat more adventurous:: >>> cmap = plot.two_step_colormap('black', (1, 0, 0.3), 'transparent', (0.3, 0, 1), 'black', name='red-blue-2') """ if center == 'transparent': center_ = None transparent_middle = True else: center_ = _to_rgb(center, False) transparent_middle = False left_max_ = _to_rgb(left_max, transparent_middle) left_ = _to_rgb(left, transparent_middle) is_symmetric = right is not None if is_symmetric: right_ = _to_rgb(right, transparent_middle) right_max_ = _to_rgb(right_max, transparent_middle) else: right_ = right_max_ = None kind = (is_symmetric, transparent_middle) if kind == (False, False): clist = ( (0.0, center_), (0.5, left_), (1.0, left_max_), ) elif kind == (False, True): clist = ( (0.0, (*left_[:3], 0)), (0.5, left_), (1.0, left_max_), ) elif kind == (True, False): clist = ( (0.0, left_max_), (0.25, left_), (0.5, center_), (0.75, right_), (1.0, right_max_), ) elif kind == (True, True): clist = ( (0.0, left_max_), (0.25, left_), (0.5, (*left_[:3], 0)), (0.5, (*right_[:3], 0)), (0.75, right_), (1.0, right_max_), ) else: raise RuntimeError cmap = LocatedLinearSegmentedColormap.from_list(name, clist) cmap.set_bad('w', alpha=0.) cmap.symmetric = is_symmetric return cmap
226dfd9a9beaadf5a47167c6080cdb3ba8fa522f
26,692
def _broadcast_arg(U, arg, argtype, name): """Broadcasts plotting option `arg` to all factors. Args: U : KTensor arg : argument provided by the user argtype : expected type for arg name : name of the variable, used for error handling Returns: iterable version of arg of length U.ndim """ # if input is not iterable, broadcast it all dimensions of the tensor if arg is None or isinstance(arg, argtype): return [arg for _ in range(U.ndim)] # check if iterable input is valid elif np.iterable(arg): if len(arg) != U.ndim: raise ValueError('Parameter {} was specified as a sequence of ' 'incorrect length. The length must match the ' 'number of tensor dimensions ' '(U.ndim={})'.format(name, U.ndim)) elif not all([isinstance(a, argtype) for a in arg]): raise TypeError('Parameter {} specified as a sequence of ' 'incorrect type. ' 'Expected {}.'.format(name, argtype)) else: return arg # input is not iterable and is not the corrent type. else: raise TypeError('Parameter {} specified as a {}.' ' Expected {}.'.format(name, type(arg), argtype))
3a441b9156f7cf614b2ab2967159349252802bed
26,693
import signal def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15): """ Mimic a hand-drawn line from (x, y) data Definition ---------- def xkcd_line(x, y, xlim=None, ylim=None, mag=1.0, f1=30, f2=0.05, f3=15): Input ----- x, y array_like; arrays to be modified Optional Input -------------- xlim, ylim data range; the assumed plot range for the modification. If not specified, they will be guessed from the data mag float; the magnitude of the distortion (default: 1.0) f1, f2, f3 int, float, int; filtering parameters. f1 gives the size of the window (default: 50) f2 gives the high-frequency cutoff (default: 0.01) f3 gives the size of the filter (default: 15) Output ------ x, y ndarrays; the modified lines References ---------- See xkcd below. Examples -------- for line in ax.lines: x, y = line.get_data() x_int, y_int = xkcd_line(x, y, xlim, ylim, mag, f1, f2, f3) line.set_data(x_int, y_int) License ------- This file is part of the JAMS Python package, distributed under the MIT License. The JAMS Python package originates from the former UFZ Python library, Department of Computational Hydrosystems, Helmholtz Centre for Environmental Research - UFZ, Leipzig, Germany. Copyright (c) 2013-2019 Matthias Cuntz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. History ------- Written, MC, Mar 2013 """ # assure array x = np.asarray(x) y = np.asarray(y) # get limits for rescaling if xlim is None: xlim = (x.min(), x.max()) if ylim is None: ylim = (y.min(), y.max()) if xlim[1] == xlim[0]: xlim = ylim if ylim[1] == ylim[0]: ylim = xlim # scale the data x_scaled = (x - xlim[0]) * 1. / (xlim[1] - xlim[0]) y_scaled = (y - ylim[0]) * 1. / (ylim[1] - ylim[0]) # compute the total distance along the path dx = x_scaled[1:] - x_scaled[:-1] dy = y_scaled[1:] - y_scaled[:-1] dist_tot = np.sum(np.sqrt(dx*dx + dy*dy)) # number of interpolated points is proportional to the distance Nu = int(200 * dist_tot) u = np.arange(-1, Nu + 1) * 1. / (Nu - 1) # interpolate curve at sampled points # k = min(3, len(x) - 1) k = min(3, x.size - 1) res = interpolate.splprep([x_scaled, y_scaled], s=0, k=k) x_int, y_int = interpolate.splev(u, res[0]) # we perturb perpendicular to the drawn line dx = x_int[2:] - x_int[:-2] dy = y_int[2:] - y_int[:-2] # horizontal or vertical lines # np.sign(np.cumsum(np.random.random(dx.size)-0.5)) emulates something like a Brownian motion # i.e. auto-correlated random walks around 0; just the sign interests here. eps = np.maximum(np.abs(np.amax(x_scaled)-np.amin(x_scaled)), np.abs(np.amax(y_scaled)-np.amin(y_scaled)))/Nu if np.all(np.abs(dx) < eps): dx = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps if np.all(np.abs(dy) < eps): dy = np.sign(np.cumsum(np.random.random(dx.size)-0.5)) * eps # equal distances if np.all(np.sign(dx) == np.sign(dx[0])): dx *= np.sign(np.cumsum(np.random.random(dx.size)-0.5)) if np.all(np.sign(dy) == np.sign(dy[0])): dy *= np.sign(np.cumsum(np.random.random(dx.size)-0.5)) dist = np.sqrt(dx * dx + dy * dy) # create a filtered perturbation # coeffs = mag * np.random.normal(0, 0.01, len(x_int) - 2) coeffs = mag * np.random.normal(0, 0.01, x_int.size - 2) b = signal.firwin(f1, f2*dist_tot, window=('kaiser', f3)) response = signal.lfilter(b, 1, coeffs) x_int[1:-1] += response * dy / dist y_int[1:-1] += response * dx / dist # un-scale data x_int = x_int[1:-1] * (xlim[1] - xlim[0]) + xlim[0] y_int = y_int[1:-1] * (ylim[1] - ylim[0]) + ylim[0] return x_int, y_int
ea36487d6e2f4f9d5d0bc9d5cea23459a5b8a5a4
26,695
def generate_mutation() -> str: """ Retrieve staged instances and generate the mutation query """ staged = Node._get_staged() # localns = {x.__name__: x for x in Node._nodes} # localns.update({"List": List, "Union": Union, "Tuple": Tuple}) # annotations = get_type_hints(Node, globalns=globals(), localns=localns) # query = ['{', '\tset {'] query = list() for uid, node in staged.items(): subject, passed = _parse_subject(uid) edges = node.edges line = f'{subject} <{node.__class__.__name__}> "true" .' query.append(line) line = f'{subject} <_type> "{node.__class__.__name__}" .' query.append(line) for pred, obj in edges.items(): # annotation = annotations.get(pred, "") if not isinstance(obj, list): obj = [obj] for o in obj: facets = [] if isinstance(o, tuple) and hasattr(o, "obj"): for facet in o.__class__._fields[1:]: val = _raw_value(getattr(o, facet)) facets.append(f"{facet}={val}") o = o.obj if not isinstance(o, (list, tuple, set)): out = [o] else: out = o for output in out: output = _make_obj(node, pred, output) if facets: facets = ", ".join(facets) line = f"{subject} <{pred}> {output} ({facets}) ." else: line = f"{subject} <{pred}> {output} ." query.append(line) query = "\n".join(query) Node._clear_staged() return query
789e6042226ed25451d7055bc9b383b81fd10ddf
26,696
from datetime import datetime def start(fund: Fund, start_date: datetime) -> Fund: """ Starts the fund by setting the added USD and the market value of the manager as the current market value. Meaning that at the beginning there is only the manager's positions. :param fund: The fund to start :param start_date: The date to calculate the market value :return: The fund """ current_market_value = calculations.get_market_value(fund, start_date.date()) fund.added_usd = current_market_value fund.market_value = current_market_value fund.save() manager = fund.get_manager() manager.added_usd = current_market_value manager.market_value = current_market_value manager.save() __rebalance_investors_ownership(fund) return fund
e7f4a273b4c48eb3f9e440f663fee45847df902a
26,697
def _make_experiment(exp_id=1, path="./Results/Tmp/test_FiftyChain"): """ Each file specifying an experimental setup should contain a make_experiment function which returns an instance of the Experiment class with everything set up. @param id: number used to seed the random number generators @param path: output directory where logs and results are stored """ ## Domain: domain = FiftyChain() ## Representation # discretization only needed for continuous state spaces, discarded otherwise representation = Tabular(domain) ## Policy policy = eGreedy(representation, epsilon=0.2) ## Agent agent = SARSA(representation=representation, policy=policy, discount_factor=domain.discount_factor, learn_rate=0.1) checks_per_policy = 3 max_steps = 50 num_policy_checks = 3 experiment = Experiment(**locals()) return experiment
6cf51f8957e091175445b36aa1d6ee7b22465835
26,698
def find_largest_digit_helper(n, max_n=0): """ :param n: int,待判別整數 :param max_n: int,當下最大整數值 :return: int,回傳n中最大之 unit 整數 """ # 特殊情況:已達最大值9,就不需再比了 if n == 0 or max_n == 9: return max_n else: # 負值轉換為正值 if n < 0: n *= -1 # 用餘數提出尾數 unit_n = n % 10 # 尾數比現在最大值 if unit_n > max_n: max_n = unit_n # 因變數會隨 Recursive 結束而釋出,所以需將 function 放在回傳上 return find_largest_digit_helper(n//10, max_n)
cd60a0cdb7cdfba6e2374a564bb39f1c95fe8931
26,699
def build_stats(loss, eval_result, time_callback): """Normalizes and returns dictionary of stats. Args: loss: The final loss at training time. eval_result: Output of the eval step. Assumes first value is eval_loss and second value is accuracy_top_1. time_callback: Time tracking callback likely used during keras.fit. Returns: Dictionary of normalized results. """ stats = {} if loss: stats["loss"] = loss if eval_result: stats["eval_loss"] = eval_result[0] stats["eval_hit_rate"] = eval_result[1] if time_callback: timestamp_log = time_callback.timestamp_log stats["step_timestamp_log"] = timestamp_log stats["train_finish_time"] = time_callback.train_finish_time if len(timestamp_log) > 1: stats["avg_exp_per_second"] = ( time_callback.batch_size * time_callback.log_steps * (len(time_callback.timestamp_log) - 1) / (timestamp_log[-1].timestamp - timestamp_log[0].timestamp)) return stats
cddde6bf9bd2797c94bc392be77f7be19a46271e
26,700
import random def random_resource_code2() -> str: """One random book name chosen at random. This fixture exists so that we can have a separate book chosen in a two language document request.""" book_ids = list(bible_books.BOOK_NAMES.keys()) return random.choice(book_ids)
04ea455fa85eea32c2c7e9d7d3a3dc98759b937b
26,701
def _to_str(value): """Helper function to make sure unicode values are converted to UTF-8. Args: value: String or Unicode text to convert to UTF-8. Returns: UTF-8 encoded string of `value`; otherwise `value` remains unchanged. """ if isinstance(value, unicode): return value.encode('utf-8') return value
46186757a475c2b5fa877e8fb62c32a27770e6b7
26,702
def get_loss(loss_name): """Get loss from LOSS_REGISTRY based on loss_name.""" if not loss_name in LOSS_REGISTRY: raise Exception(NO_LOSS_ERR.format( loss_name, LOSS_REGISTRY.keys())) loss = LOSS_REGISTRY[loss_name] return loss
d91bde7ce34e2d4fe38a5c86a93ba96d153eb7c1
26,703
def collate_fn(batch): """ Data collater. Assumes each instance is a dict. Applies different collation rules for each field. Args: batches: List of loaded elements via Dataset.__getitem__ """ collated_batch = {} # iterate over keys for key in batch[0]: try: collated_batch[key] = default_collate([elem[key] for elem in batch]) except TypeError: collated_batch[key] = [elem[key] for elem in batch] return collated_batch
718a6945d71a485fd4dbbbeaac374afbb9256621
26,704
def _rec_eval_in(g, a, v, i, j, K): """Recursive helper for :func:`dmp_eval_in`.""" if i == j: return dmp_eval(g, a, v, K) v, i = v - 1, i + 1 return dmp_strip([ _rec_eval_in(c, a, v, i, j, K) for c in g ], v)
51fbd9a45b4e1722ef98a5ab543575980d56b66b
26,705
def normalize(array): """ Normalize a 4 (or Nx4) element array/list/numpy.array for use as a quaternion :param array: 4 or Nx4 element list/array :returns: normalized array :rtype: numpy array """ quat = np.array(array) return np.squeeze(quat / np.sqrt(np.sum(quat * quat, axis=-1, keepdims=True)))
020b1fb9b1050192254274ac3d716e655a5ff003
26,706
def dose_class_baseline(dose_num, df_test, df_targets): """Calculate the PR- baseline for each dose treatment""" dose_cpds_index = df_test[df_test['dose'] == dose_num].index df_class_targets = df_targets.loc[dose_cpds_index].reset_index(drop = True) class_baseline_score = calculate_baseline(df_class_targets) return class_baseline_score
0e0178573fc3ccfb08c8f898d264efa84fd10962
26,707
def xrange(mn: int, mx: int = None) -> list: """Built-in range function, but actually gives you a range between mn and mx. Range: range(5) -> [0, 1, 2, 3, 4] XRange: xrange(5) -> [0, 1, 2, 3, 4, 5]""" return list(range(0 if mx is None else mn, mn + 1 if mx is None else mx + 1))
4ab3059a51966cefd43008c4aa4c50cf42cb8fa2
26,708
def sous_tableaux(arr: list, n: int) -> list: """ Description: Découper un tableau en sous-tableaux. Paramètres: arr: {list} -- Tableau à découper n: {int} -- Nombre d'éléments par sous-tableau Retourne: {list} -- Liste de sous-tableaux Exemple: >>> sous_tableaux([0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1], 3) [[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1], [0, 1, 1], [0, 0, 1]] """ if n <= 0: return [] if len(arr) < n: return [] return [arr[i:i + n] for i in range(0, len(arr), n)]
4f0a627ea00beafb5b6bc77490e71631e8a55e28
26,710
def get_previous_tweets(date_entry): """Return details about previous Tweets. Namely, retrieve details about the date_entry-th Tweets from 7 days ago, 30 days ago, and a random number of days ago. If a given Tweet does not exist, its corresponding entry in the output will be empty. Args: date_entry: An integer representing the number of Tweets tweeted before the desired one on the date it was tweeted. Returns: A namedtuple with name PreviousTweets and fields with names "last_week", "last_month", and "random", where each name points to a dictionary containing the Tweet's Twitter ID, the word associated with the Tweet, and the URL to the Tweet. Raises: AWSClientError: If any AWS query fails. TypeError: If one or more inputs has an unexpected type. ValueError: If the Tweet entry falls outside of the expected range. """ today = date.today() tweets_by_date = { "last_week": get_tweets_on_date( today - timedelta(days=7), date_entry=date_entry), "last_month": get_tweets_on_date( today - timedelta(days=30), date_entry=date_entry), "random": get_tweets_on_random_date(date_entry=date_entry), } twitter_client = TwitterAPIClient() table_schema = DynamoDBTable.TWEETS.value.schema for date_key in ("last_week", "last_month", "random"): tweets = tweets_by_date[date_key] tweets_by_date[date_key] = dict() if not isinstance(tweets, list): continue if not tweets: continue if len(tweets) > 1: pass tweet = tweets[0] if not validate_item_against_schema(table_schema, tweet): continue tweet_id = tweet["TweetId"] if not twitter_client.tweet_exists(tweet_id): continue word = tweet["Word"] url = tweet_url(TWITTER_USER_USERNAME, tweet_id) tweets_by_date[date_key] = dict(tweet_id=tweet_id, word=word, url=url) PreviousTweets = namedtuple( "PreviousTweets", "last_week last_month random") return PreviousTweets( last_week=tweets_by_date["last_week"], last_month=tweets_by_date["last_month"], random=tweets_by_date["random"])
a4b91b87f3cc897e720f745a1c0ad1097292774b
26,711
def changed_cat_keys(dt): """Returns keys for categories, changed after specified time""" return [root_category_key()]
fbc4d0380bb1deaf7f1214d526d1c623cceb4676
26,712
def create_client(CLIENT_ID, CLIENT_SECRET): """Creates Taboola Client object with the given ID and secret.""" client = TaboolaClient(CLIENT_ID, client_secret=CLIENT_SECRET) return client
bea955f5d944e47f11c74ae97c5472dc3c512217
26,713
def set_values_at_of_var_above_X_lat_2_avg(lat_above2set=65, ds=None, use_avg_at_lat=True, res='0.125x0.125', var2set=None, only_consider_water_boxes=True, fixed_value2use=None, save2NetCDF=True): """ Set values above a latitude to the monthly lon average Parameters ------- lat_above2set (float): latitude to set values above fixed_value2use (float): value to set selected latitudes (lat_above2set) var2set (str): variable in dataset to set to new value res (str): horizontal resolution of dataset (e.g. 4x5) only_consider_water_boxes (bool): only update non-water grid boxes ds (xr.Dataset): xarray dataset to use for plotting save2NetCDF (bool): save outputted dataset as a NetCDF file Returns ------- (xr.Dataset) """ print(var2set) # local variables folder = utils.get_file_locations('data_root')+'/data/' # Get existing file if isinstance(ds, type(None)): filename = 'Oi_prj_feature_variables_{}.nc'.format(res) ds = xr.open_dataset(folder + filename) # get the average value at lat avg = ds[var2set].sel(lat=lat_above2set, method='nearest') # get index of lat to set values from idx = AC.find_nearest(avg['lat'].values, ds['lat'].values) # Setup a bool for values above or equal to lat bool_ = avg['lat'].values <= ds['lat'].values # Just use a fixed value? if not use_avg_at_lat: assert type(fixed_value2use) != int, 'Fixed value must be a float!' if isinstance(fixed_value2use, float): print('Set all values above lat to: {}'.format(fixed_value2use)) avg[:] = fixed_value2use # Make sure there is one value per month if len(avg.shape) == 1: try: avg = np.ma.array([avg.values]*12) except AttributeError: avg = np.ma.array([avg]*12) # Only consider the ware boxes if only_consider_water_boxes: # add LWI to array if res == '0.125x0.125': folderLWI = utils.get_file_locations('AC_tools') folderLWI += '/data/LM/TEMP_NASA_Nature_run/' filenameLWI = 'ctm.nc' LWI = xr.open_dataset(folderLWI+filenameLWI) bool_water = LWI.to_array().values[0, :, idx, :] == 0.0 else: LWI = AC.get_LWI_map(res=res)[..., 0] bool_water = (LWI[:, idx] == 0.0) # Use the annual value for ewvery month bool_water = np.ma.array([bool_water]*12) # Set land/ice values to NaN for n_month in range(12): avg[n_month, ~bool_water[n_month]] = np.NaN # get the average over lon avg = np.nanmean(avg, axis=-1) pstr = '>={}N = monthly avg. @ lat (avg={:.2f},min={:.2f},max={:.2f})' print(pstr.format(lat_above2set, avg.mean(), avg.min(), avg.max())) # Get the data values = ds[var2set].values # Update the values above the specific lat # Do this on a monthly basis if data is monthly. if len(values.shape) == 3: for month in np.arange(values.shape[0]): # Updated array of values arr = np.zeros(values[month, bool_, :].shape) arr[:] = avg[month] # now replace values values[month, bool_, :] = arr del arr else: # Updated array of values arr = np.zeros(values[bool_, :].shape) arr[:] = np.nanmean(avg) # now replace values values[bool_, :] = arr ds[var2set].values = values # Update the history attribute to record the update. attrs = ds.attrs # try: History = attrs['History'] except KeyError: attrs['History'] = '' hist_str = "; '{}' above lat ({}N) set to monthly lon average at that lat.;" hist_str = hist_str.format(var2set, lat_above2set) attrs['History'] = attrs['History'] + hist_str # Save updated file if save2NetCDF: ext_str = '_INTERP_NEAREST_DERIVED_UPDATED_{}'.format(var2set) filename = 'Oi_prj_feature_variables_{}{}.nc'.format(res, ext_str) ds.to_netcdf(filename) else: return ds
6040523af84f5046af373de2aa22447e66aef181
26,714
def _plot_xkcd(plot_func, *args, **kwargs): """ Plot with *plot_func*, *args* and **kwargs*, but in xkcd style. """ with plt.xkcd(): fig = plot_func(*args, **kwargs) return fig
a4bc526c115c54f37c5171b82639adf0c0a3f888
26,716
import requests def unfollow_user(): """UnfollowUser""" auth = request.headers user = request.args.get('userId') req = requests.post( '/api/v1/IsAuthenticated', {'id': auth['Authorization']} ) req.json() if req.authenticated: cur = MY_SQL.connection.cursor() cur.execute( '''DELETE from follows WHERE followerid = %d AND followingid = %d''', req.user_id, user ) output = 'Done' else: output = 'Not Authenticated' return output
8e40b633b960070a4d433610f4f0d4e7f8b89a12
26,717
def isOverlaysEnabled(): """Returns whether or not the current client's quality overlay system is currently enabled. Returns: bool: True (1) if overlays are currently enabled. """ return False
d433b86b38bfa3c3ed28705888ef12710aaf4f96
26,718
def load_image(path, size=None, grayscale=False): """ Load the image from the given file-path and resize it to the given size if not None. """ # Load the image using opencv if not grayscale: # BGR format image = cv2.imread(path) else: # grayscale format image = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # Resize image if desired. if not size is None: image = cv2.resize(image, size) # Convert image to numpy array and scale pixels so they fall between 0.0 and 1.0 image = np.array(image) / 255.0 # Convert 2-dim gray-scale array to 3-dim BGR array. if (len(image.shape) == 2): image = np.repeat(image[:, :, np.newaxis], 3, axis=2) return image
2d3d4a625a690800c2d5db8f8577e9c06a36001a
26,719
def brent_min(f, bracket, fnvals=None, tolerance=1e-6, max_iterations=50): """\ Given a univariate function f and a tuple bracket=(x1,x2,x3) bracketing a minimum, find a local minimum of f (with fn value) using Brent's method. Optionally pass in the tuple fnvals=(f(x1),f(x2),f(x3)) as a parameter. """ x1, x2, x3 = bracket if fnvals==None: f1, f2, f3 = f(x1), f(xx), f(x3) else: f1, f2, f3 = fnvals if not f1>f2<f3: raise MinimizationException("initial triple does not bracket a minimum") if not x1<x3: # ensure x1, x2, x3 in ascending order x1, f1, x3, f3 = x3, f3, x1, f1 a, b = x1, x3 e = 0. x = w = v = x2 fw = fv = fx = f(x) for j in range(max_iterations): xm = (a+b)/2 accuracy = tolerance*abs(x) + LITTLE if abs(x-xm) < (2*accuracy - (b-a)/2): return x, fx if abs(e)>accuracy: r = (x-w)*(fx-fv) q = (x-v)*(fx-fw) p = (x-v)*q - (x-w)*r q = 2*(q-r) if q>0: p = -p q = abs(q) etemp = e e = d if abs(p)>=abs(q*etemp)/2 or p<=q*(a-x) or p>=q*(b-x): if x>=xm: e = a-x else: e = b-x d = (2-GOLDEN)*e else: # accept parabolic fit d = p/q u = x+d if u-a<2*accuracy or b-u<2*accuracy: d = accuracy*sgn(xm-x) else: if x>=xm: e = a-x else: e = b-x d = (2-GOLDEN)*e if abs(d)>=accuracy: u = x+d else: u = x+accuracy*sgn(d) fu = f(u) if fu<=fx: if u>=x: a = x else: b = x v, w, x = w, x, u fv, fw, fx = fw, fx, fu else: if u<x: a = u else: b = u if fu<-fw or w==x: v, w, fv, fw = w, u, fw, fu elif fu<=fw or v==x or v==w: v, fv = u, fu raise MinimizationException("too many iterations")
ef4010e00ca67d1751b7f8eea497afc59e76364c
26,720
def best_validity(source): """ Retrieves best clustering result based on the relative validity metric """ # try: cols = ['min_cluster_size', 'min_samples', 'validity_score', 'n_clusters'] df = pd.DataFrame(source, columns = cols) df['validity_score'] = df['validity_score'].fillna(0) best_validity = df.loc[df['validity_score'].idxmax()] # except TypeError: # best_validity = None return best_validity
ba830ccca8c9f62758ecd8655576efb58892cdbc
26,721
import json def normalize_cell_value(value): """Process value for writing into a cell. Args: value: any type of variable Returns: json serialized value if value is list or dict, else value """ if isinstance(value, dict) or isinstance(value, list): return json.dumps(value) return value
8ef421814826c452cdb6528c0645133f48bd448a
26,722
import numpy def _ancestry2paths(A): """Convert edge x edge ancestry matrix to tip-to-tip path x edge split metric matrix. The paths will be in the same triangular matrix order as produced by distanceDictAndNamesTo1D, provided that the tips appear in the correct order in A""" tips = [i for i in range(A.shape[0]) if sum(A[:,i])==1] paths = [] for (tip1, tip2) in triangularOrder(tips): path = A[tip1] ^ A[tip2] paths.append(path) return numpy.array(paths)
732ef3bbccff4696650c24a983fdbc338f1d8e24
26,723
def geometric_mean(x, axis=-1, check_for_greater_than_zero=True): """ Return the geometric mean of matrix x along axis, ignore NaNs. Raise an exception if any element of x is zero or less. """ if (x <= 0).any() and check_for_greater_than_zero: msg = 'All elements of x (except NaNs) must be greater than zero.' raise ValueError, msg x = x.copy() m = np.isnan(x) np.putmask(x, m, 1.0) m = np.asarray(~m, np.float64) m = m.sum(axis) x = np.log(x).sum(axis) g = 1.0 / m x = np.multiply(g, x) x = np.exp(x) idx = np.ones(x.shape) if idx.ndim == 0: if m == 0: idx = np.nan else: np.putmask(idx, m==0, np.nan) x = np.multiply(x, idx) return x
485780f7766857333a240d059d2bb1c526d3f5a8
26,724
def mongodb(): """ Simple form to get and set a note in MongoDB """ return None
a6de90429bb3ad3e23191e52e1b43484435747f9
26,725
def get_single_blog(url): """Получить блог по указанному url""" blog = Blog.get_or_none(Blog.url == url) if blog is None: return errors.not_found() user = get_user_from_request() has_access = Blog.has_access(blog, user) if not has_access: return errors.no_access() blog_dict = blog.to_json() blog_dict = Vote.add_votes_info(blog_dict, 2, user) return jsonify({"success": 1, "blog": blog_dict})
b4a32278681af7eecbebc29ed06b88c7860a39c0
26,726
def test_section(): """Returns a testing scope context to be used in 'with' statement and captures testing code. Example:: with autograd.train_section(): y = model(x) compute_gradient([y]) with autograd.test_section(): # testing, IO, gradient updates... """ return TrainingStateScope(False)
6bcbc9aaaaeee5a9b5d8b6a3307f1fb69bf726ae
26,727
from typing import Optional import select async def get_installation_owner(metadata_account_id: int, mdb_conn: morcilla.core.Connection, cache: Optional[aiomcache.Client], ) -> str: """Load the native user ID who installed the app.""" user_login = await mdb_conn.fetch_val( select([MetadataAccount.owner_login]) .where(MetadataAccount.id == metadata_account_id)) if user_login is None: raise ResponseError(NoSourceDataError(detail="The installation has not started yet.")) return user_login
925780ce87c14758cf98191cf39effcaf09a8aaa
26,728
def get_cflags(): """Get the cflag for compile python source code""" flags = ['-I' + get_path('include'), '-I' + get_path('platinclude')] flags.extend(getvar('CFLAGS').split()) # Note: Extrat cflags not valid for cgo. for not_go in ('-fwrapv', '-Wall'): if not_go in flags: flags.remove(not_go) return ' '.join(flags)
f1c171d5a70127bda98a3ef7625c60336641ea1f
26,729
import requests def request_post_json(url, headers, data): """Makes a POST request and returns the JSON response""" try: response = requests.post(url, headers=headers, data=data, timeout=10) if response.status_code == 201: return response.json() else: error_message = None try: json_response = response.json() if len(json_response) > 0 and "errorMessage" in json_response[0]: error_message = json_response[0]["errorMessage"] except ValueError: # Raised by response.json() if JSON couln't be decoded logger.error('Radarr returned non-JSON error result: {}', response.content) raise RadarrRequestError( "Invalid response received from Radarr: %s" % response.content, logger, status_code=response.status_code, error_message=error_message, ) except RequestException as e: raise RadarrRequestError("Unable to connect to Radarr at %s. Error: %s" % (url, e))
82d7ce423024ca1af8a7c513f3d689fbc77c591a
26,730
import random def get_rand_number(min_value, max_value): """ This function gets a random number from a uniform distribution between the two input values [min_value, max_value] inclusively Args: - min_value (float) - max_value (float) Return: - Random number between this range (float) """ range = max_value - min_value choice = random.uniform(0, 1) return min_value + range * choice
0eec094d05b291c7c02207427685d36262e643e5
26,731
def read(string): """ Given a single interval from a GFFv2 file, returns an Interval object. Will return meta lines if they start with #, track, or browser. """ if string.startswith(metalines): return interval(_is_meta=True, seqname=string) values = [] cols = string.split(delimiter) for field in required_fields: values.append((field, cols.pop(0))) try: for field in optional_fields: values.append((field, cols.pop(0))) except IndexError: pass if cols: # If there are still fields remaining after consuming all # the required and optional fields raise IndexError("Too many columns: {}".format(cols)) fields = dict(values) i = interval(**fields) # Account for 0-based indexing i['start'] += 1 return i
e4651216c9694935bc879c012b1fe74f529cb41d
26,734
from typing import Union from pathlib import Path from typing import Dict def average_results(results_path: Union[Path, str], split_on: str = " = ") -> Dict[str, float]: """ Average accuracy values from a file. Parameters ---------- results_path : Union[Path, str] The file to read results from. split_on : str The symbol which separates an accuracy's key from its value. Returns ------- averages : Dict[str, float] A dictionary mapping each accuracy key to its average value. """ averages = defaultdict(list) with open(results_path, "r") as results_file: for line in results_file: if split_on not in line: continue line_split = line.split(split_on) if len(line_split) != 2: continue key, value = line_split key = key.strip() if "accuracy" in key: averages[key].append(float(value)) return {key: np.mean(value_list) for key, value_list in averages.items()}
28b896d567d6ef18662766a2da64c6bdb262f3d7
26,735
def dict_collection_only(method): """ Handles the behavior when a group is present on a clumper object. """ @wraps(method) def wrapped(clumper, *args, **kwargs): if not clumper.only_has_dictionaries: non_dict = next(d for d in clumper if not isinstance(d, dict)) raise ValueError( f"The `{method}` won't work unless all items are dictionaries. Found: {non_dict}." ) return method(clumper, *args, **kwargs) return wrapped
a84c3588942378157674e6862d2f1a8c785ba569
26,737
def country_name(country_id): """ Returns a country name >>> country_name(198) u'Spain' """ if country_id == '999': #Added for internal call - ie flag/phone.png return _('internal call').title() try: obj_country = Country.objects.get(id=country_id) return obj_country.countryname except: return _('unknown').title()
fdb44061d795e42d9e312bc25f8335a41c91ca11
26,738
def KLdist(P,Q): """ KLDIST Kullbach-Leibler distance. D = KLDIST(P,Q) calculates the Kullbach-Leibler distance (information divergence) of the two input distributions. """ P2 = P[P*Q>0] Q2 = Q[P*Q>0] P2 = P2 / np.sum(P2) Q2 = Q2 / np.sum(Q2) D = np.sum(P2*np.log(P2/Q2)) return D
380796f3688c5ad8483ba50ddc940eb797e4a973
26,744
def homepage(request: HttpRequest) -> HttpResponse: """ Render the home page of the application. """ context = make_context(request) person = get_person(TARGET_NICK) if not person: return render(request, "404.html", status=404) context["person"] = person technology_set = person.technology_set.order_by( Length("name").asc()).all() context["technologies"] = columns_to_rows(technology_set) protocol_set = person.protocol_set.order_by( Length("name").asc()).all() context["protocols"] = columns_to_rows(protocol_set) return render(request, "index.html", context=context)
42f749a38543b456b603b5b7b923d5637ab84abe
26,746
def _call_rmat(scale, num_edges, create_using, mg): """ Simplifies calling RMAT by requiring only specific args that are varied by these tests and hard-coding all others. """ return rmat(scale=scale, num_edges=num_edges, a=0.1, b=0.2, c=0.3, seed=24, clip_and_flip=False, scramble_vertex_ids=True, create_using=create_using, mg=mg)
cf68a7e436919ad5296438708898eeb233112651
26,747
def value_loss(old_value): """value loss for ppo""" def loss(y_true, y_pred): vpredclipped = old_value + K.clip(y_pred - old_value, -LOSS_CLIPPING, LOSS_CLIPPING) # Unclipped value vf_losses1 = K.square(y_pred - y_true) # Clipped value vf_losses2 = K.square(vpredclipped - y_true) vf_loss = .5 * K.mean(K.maximum(vf_losses1, vf_losses2)) return vf_loss return loss
0888a411be6fa7e41469d15a2798cbebda46db01
26,748
import torch import warnings def split_by_worker(urls): """Selects a subset of urls based on Torch get_worker_info. Used as a shard selection function in Dataset.""" urls = [url for url in urls] assert isinstance(urls, list) worker_info = torch.utils.data.get_worker_info() if worker_info is not None: wid = worker_info.id num_workers = worker_info.num_workers if wid == 0 and len(urls) < num_workers: warnings.warn(f"num_workers {num_workers} > num_shards {len(urls)}") return urls[wid::num_workers] else: return urls
1ddcf436fecc4359367b783f9c1c62fe84782468
26,749
def runge_kutta4(y, x, dx, f): """computes 4th order Runge-Kutta for dy/dx. Parameters ---------- y : scalar Initial/current value for y x : scalar Initial/current value for x dx : scalar difference in x (e.g. the time step) f : ufunc(y,x) Callable function (y, x) that you supply to compute dy/dx for the specified values. """ k1 = dx * f(y, x) k2 = dx * f(y + 0.5*k1, x + 0.5*dx) k3 = dx * f(y + 0.5*k2, x + 0.5*dx) k4 = dx * f(y + k3, x + dx) return y + (k1 + 2*k2 + 2*k3 + k4) / 6.
0f79962a3bd7bbe49bd3ae3eff6d5496182fbea8
26,751
def genBinaryFileRDD(sc, path, numPartitions=None): """ Read files from a directory to a RDD. :param sc: SparkContext. :param path: str, path to files. :param numPartition: int, number or partitions to use for reading files. :return: RDD with a pair of key and value: (filePath: str, fileData: BinaryType) """ numPartitions = numPartitions or sc.defaultParallelism rdd = sc.binaryFiles( path, minPartitions=numPartitions).repartition(numPartitions) #rdd = rdd.map(lambda x: (x[0], bytearray(x[1]))) return rdd
85ef3c657b932946424e2c32e58423509f07ceae
26,752
import requests def generate(): """ Generate a classic image quote :rtype: InspiroBotImageResponse :return: The generated response """ try: r = requests.get("{}?generate=true".format(url())) except: raise InsprioBotError("API request failed. Failed to connect") if r.status_code != 200: raise InsprioBotError("API request failed. Invalid response code ({})".format(r.status_code)) return InspiroBotImageResponse(r.text)
bc9a49909d9191f922a5c781d9fc68c97de92456
26,753
import pickle def inference_lstm(im_path, model_path, tok_path, max_cap_len=39): """ Perform inference using a model trained to predict LSTM. """ tok = pickle.load(open(tok_path, 'rb')) model = load_model( model_path, custom_objects={'RepeatVector4D': RepeatVector4D}) encoder = ImageEncoder(random_transform=False) im_encoding = encoder.process(im_path) def encode_partial_cap(partial_cap, im): input_text = [[tok.word_index[w] for w in partial_cap if w in tok.word_index]] input_text = pad_sequences(input_text, maxlen=max_cap_len, padding='post') im = np.array([im]) return [im, input_text] partial_cap = ['starttoken'] EOS_TOKEN = 'endtoken' while True: inputs = encode_partial_cap(partial_cap, im_encoding) preds = model.predict(inputs)[0, len(partial_cap), :] next_idx = np.argmax(preds, axis=-1) next_word = tok.index_word[next_idx] if next_word == EOS_TOKEN or len(partial_cap) == 39: break partial_cap.append(next_word) print(' '.join(partial_cap[1:]))
81cec1407b6227d7f65a697900467b16b2fce96e
26,754
def iterative_proof_tree_bfs(rules_dict: RulesDict, root: int) -> Node: """Takes in a iterative pruned rules_dict and returns iterative proof tree.""" root_node = Node(root) queue = deque([root_node]) while queue: v = queue.popleft() rule = sorted(rules_dict[v.label])[0] if not rule == (): children = [Node(i) for i in rule] queue.extend([child for child in children if not child.label == root]) v.children = children return root_node
c7ce6f1e48f9ac04f68b94e07dbcb82162b2abba
26,755
def get_host_buffer_init(arg_name, num_elem, host_data_type, host_init_val): """Get host code snippet: init host buffer""" src = get_snippet("snippet/clHostBufferInit.txt") src = src.replace("ARG_NAME", arg_name) src = src.replace("NUM_ELEM", str(num_elem)) src = src.replace("HOST_DATA_TYPE", host_data_type) src = src.replace("HOST_INIT_VALUE", host_init_val) return src
7f94d2727a3c6f861f5c6402c5c8d2211d32dd71
26,756
def get_setting(setting, override=None): """Get setting. Get a setting from `muses` conf module, falling back to the default. If override is not None, it will be used instead of the setting. :param setting: String with setting name :param override: Value to use when no setting is available. Defaults to None. :return: Setting value. """ attr_name = 'MUSES_{0}'.format(setting) if hasattr(settings, attr_name): return getattr(settings, attr_name) else: if hasattr(defaults, setting): return getattr(defaults, setting) else: return override
7e4a05ee3b077023e04693a37d3cbeaaa6025d8d
26,757
def extract_year_month_from_key(key): """ Given an AWS S3 `key` (str) for a file, extract and return the year (int) and month (int) specified in the key after 'ano=' and 'mes='. """ a_pos = key.find('ano=') year = int(key[a_pos + 4:a_pos + 8]) m_pos = key.find('mes=') month = int(key[m_pos + 4:m_pos + 5]) return year, month
b52dc08d393900b54fca3a4939d351d5afe0ef3c
26,758
def depolarize(p: float) -> DepolarizingChannel: """Returns a DepolarizingChannel with given probability of error. This channel applies one of four disjoint possibilities: nothing (the identity channel) or one of the three pauli gates. The disjoint probabilities of the three gates are all the same, p / 3, and the identity is done with probability 1 - p. The supplied probability must be a valid probability or else this constructor will raise a ValueError. This channel evolves a density matrix via \rho -> (1 - p) \rho + (p / 3) X \rho X + (p / 3) Y \rho Y + (p / 3) Z \rho Z Args: p: The probability that one of the Pauli gates is applied. Each of the Pauli gates is applied independently with probability p / 3. Raises: ValueError: if p is not a valid probability. """ return DepolarizingChannel(p)
247dd040844cdd3cd44336ca097b98fcf2f3cac3
26,759
import re def verilog_to_circuit( netlist, name, infer_module_name=False, blackboxes=None, warnings=False, error_on_warning=False, fast=False, ): """ Creates a new Circuit from a module inside Verilog code. Parameters ---------- netlist: str Verilog code. name: str Module name. infer_module_name: bool If True and no module named `name` is found, parse the first module in the netlist. blackboxes: seq of BlackBox Blackboxes in module. warnings: bool If True, warnings about unused nets will be printed. error_on_warning: bool If True, unused nets will cause raise `VerilogParsingWarning` exceptions. fast: bool If True, uses the `fast_parse_verilog_netlist` function from parsing/fast_verilog.py. This function is faster for parsing very large netlists, but makes stringent assumptions about the netlist and does not provide error checking. Read the docstring for `fast_parse_verilog_netlist` in order to confirm that `netlist` adheres to these assumptions before using this flag. Returns ------- Circuit Parsed circuit. """ if blackboxes is None: blackboxes = [] if fast: return fast_parse_verilog_netlist(netlist, blackboxes) # parse module regex = f"(module\s+{name}\s*\(.*?\);(.*?)endmodule)" m = re.search(regex, netlist, re.DOTALL) try: module = m.group(1) except AttributeError: if infer_module_name: regex = f"(module\s+(.*?)\s*\(.*?\);(.*?)endmodule)" m = re.search(regex, netlist, re.DOTALL) try: module = m.group(1) except AttributeError: raise ValueError("Could not read netlist: no modules found") else: raise ValueError(f"Could not read netlist: {name} module not found") return parse_verilog_netlist(module, blackboxes, warnings, error_on_warning)
4dc8e59ff8bea29f32e64219e3c38ed7bfec4aef
26,760
def load_all_channels(event_id=0): """Returns a 3-D dataset corresponding to all the electrodes for a single subject and a single event. The first two columns of X give the spatial dimensions, and the third dimension gives the time.""" info = load_waveform_data(eeg_data_file()) locs = load_channel_locs(channel_locs_file()) nchan, ntime, nev = info.waveforms.shape X = np.zeros((0, 3)) y = np.zeros(0) for c in range(nchan): curr_X = np.zeros((ntime, 3)) curr_X[:, 0] = locs[c, 0] curr_X[:, 1] = locs[c, 1] curr_X[:, 2] = info.times curr_y = info.waveforms[c, :, event_id].astype(float) X = np.vstack([X, curr_X]) y = np.concatenate([y, curr_y]) return X, y
17dd6dfc196a3f88f4bf7f0128da1a0b027f9072
26,761
def parentheses_cleanup(xml): """Clean up where parentheses exist between paragraph an emphasis tags""" # We want to treat None's as blank strings def _str(x): return x or "" for em in xml.xpath("//P/*[position()=1 and name()='E']"): par = em.getparent() left, middle, right = _str(par.text), _str(em.text), _str(em.tail) has_open = '(' in left[-1:] + middle[:1] has_close = ')' in middle[-1:] + right[:1] if not left.endswith('(') and middle.startswith('(') and has_close: # Move '(' out par.text = _str(par.text) + "(" em.text = em.text[1:] if middle.endswith(')') and not right.startswith(')') and has_open: # Move ')' out em.text = em.text[:-1] em.tail = ")" + _str(em.tail)
b5a476cd6fd9b6a2ab691fcec63a33e6260d48f2
26,762
import numpy def filter_atoms(coordinates, num_atoms=None, morphology="sphere"): """ Filter the atoms so that the crystal has a specific morphology with a given number of atoms Params: coordinates (array): The atom coordinates num_atoms (int): The number of atoms morphology (str): The morphology of the crystal Returns: array: The filtered coordinates """ def filter_atoms_sphere(coordinates, num_atoms): # Get the centre of mass x = coordinates["x"] y = coordinates["y"] z = coordinates["z"] c = numpy.array([x, y, z]).T centre_of_mass = numpy.mean(c, axis=0) # Compute all square distances sq_distance = numpy.sum((c - centre_of_mass) ** 2, axis=1) # Get the selection of the closest n atoms index = numpy.argsort(sq_distance) return coordinates[index[0:num_atoms]] # If the number of atoms is not set then return as is if num_atoms is None or morphology is None: return coordinates # Check the number of atoms assert len(coordinates) >= num_atoms # Filter the atoms into the given morphology return {"sphere": filter_atoms_sphere}[morphology](coordinates, num_atoms)
9763f2c7b14a26d089bf58a4c7e82e2d4a0ae2bd
26,763
def weekly(): """The weekly status page.""" db = get_session(current_app) #select id, user_id, created, strftime('%Y%W', created), date(created, 'weekday 1'), content from status order by 4, 2, 3; return render_template( 'status/weekly.html', week=request.args.get('week', None), statuses=paginate( db.query(Status).filter_by(reply_to=None).order_by( desc(WeekColumnClause("created")), Status.user_id, desc(Status.created)), request.args.get('page', 1), startdate(request), enddate(request), per_page=100),)
b5c1e5a8d981fb217492241e8ee140898d47b633
26,765
from typing import List import pathlib from typing import Sequence def parse_source_files( src_files: List[pathlib.Path], platform_overrides: Sequence[str], ) -> List[LockSpecification]: """ Parse a sequence of dependency specifications from source files Parameters ---------- src_files : Files to parse for dependencies platform_overrides : Target platforms to render meta.yaml files for """ desired_envs = [] for src_file in src_files: if src_file.name == "meta.yaml": desired_envs.append( parse_meta_yaml_file(src_file, list(platform_overrides)) ) elif src_file.name == "pyproject.toml": desired_envs.append(parse_pyproject_toml(src_file)) else: desired_envs.append(parse_environment_file(src_file, pip_support)) return desired_envs
47a6e66b56ca0d4acd60a6b388c9f58d0cccbb2c
26,766
def delete_registry( service_account_json, project_id, cloud_region, registry_id): """Deletes the specified registry.""" # [START iot_delete_registry] print('Delete registry') client = get_client(service_account_json) registry_name = 'projects/{}/locations/{}/registries/{}'.format( project_id, cloud_region, registry_id) registries = client.projects().locations().registries() return registries.delete(name=registry_name).execute() # [END iot_delete_registry]
baa8cad0d324f2e564052822f9d17f45a581a397
26,767
def bundle(cls: type) -> Bundle: """ # Bundle Definition Decorator Converts a class-body full of Bundle-storable attributes (Signals, other Bundles) to an `hdl21.Bundle`. Example Usage: ```python import hdl21 as h @h.bundle class Diff: p = h.Signal() n = h.Signal() @h.bundle class DisplayPort: main_link = Diff() aux = h.Signal() ``` Bundles may also define a `Roles` enumeration, inline within this class-body. `Roles` are optional pieces of enumerated endpoint-labels which aid in dictating `Signal` directions. Each `Signal` accepts optional source (`src`) and destination (`dst`) fields which (if set) must be one of these roles. ```python import hdl21 as h from enum import Enum, auto @h.bundle class HasRoles: class Roles(Enum): HOST = auto() DEVICE = auto() tx = h.Signal(src=Roles.HOST, dest=Roles.DEVICE) rx = h.Signal(src=Roles.DEVICE, dest=Roles.HOST) ``` """ if cls.__bases__ != (object,): raise RuntimeError(f"Invalid @hdl21.bundle inheriting from {cls.__bases__}") # Create the Bundle object bundle = Bundle(name=cls.__name__) protected_names = ["signals", "bundles"] dunders = dict() unders = dict() # Take a lap through the class dictionary, type-check everything and assign relevant attributes to the bundle for key, val in cls.__dict__.items(): if key in protected_names: raise RuntimeError(f"Invalid field name {key} in bundle {cls}") elif key.startswith("__"): dunders[key] = val elif key.startswith("_"): unders[key] = val elif key == "Roles": # Special-case the upper-cased `Roles`, as it'll often be a class-def setattr(bundle, "roles", val) else: setattr(bundle, key, val) # And return the bundle return bundle
bf1b68791dbdc5b6350d561db4d784ff92c0bbae
26,768
import time import calendar def previousMidnight(when): """Given a time_t 'when', return the greatest time_t <= when that falls on midnight, GMT.""" yyyy, MM, dd = time.gmtime(when)[0:3] return calendar.timegm((yyyy, MM, dd, 0, 0, 0, 0, 0, 0))
0821eb46115a1e5b1489c4f4dbff78fab1d811b5
26,769
def compute_net_results(net, archname, test_data, df): """ For a given network, test on appropriate test data and return dataframes with results and predictions (named obviously) """ pretrain_results = [] pretrain_predictions = [] tune_results = [] tune_predictions = [] for idx in range(5): results_df, predictions_df = compute_pretrained_results(net, archname, idx, test_data) pretrain_results.append(results_df) pretrain_predictions.append(predictions_df) pretrain_results = pd.concat(pretrain_results, axis=1) for idx in range(5): for test_idx in range(5): results_df, predictions_df = compute_tuned_results(net, archname, idx, test_idx, test_data, df) tune_results.append(results_df) tune_predictions.append(predictions_df) tune_results = pd.concat(tune_results, axis=1, join='inner').stack().unstack() return pretrain_results, pretrain_predictions, tune_results, tune_predictions
b971f269bbee7e48f75327e3b01d73c77ec1f06c
26,770
def _distance_along_line(start, end, distance, dist_func, tol): """Point at a distance from start on the segment from start to end. It doesn't matter which coordinate system start is given in, as long as dist_func takes points in that coordinate system. Parameters ---------- start : tuple Starting point for the line. end : tuple Outer bound on point's location. distance : float Positive distance to travel. dist_func : callable Two-argument function which returns distance. tol : float Relative error in distance to allow. Returns ------- np.ndarray, shape (2, 1) Coordinates of a point. """ initial_distance = dist_func(start, end) if initial_distance < distance: raise ValueError("End is closer to start ({}) than " "given distance ({}).".format( initial_distance, distance )) if tol <= 0: raise ValueError("Tolerance is not positive: {}".format(tol)) # Binary search for a point at the given distance. left = start right = end while not np.isclose(dist_func(start, right), distance, rtol=tol): midpoint = (left + right) / 2 # If midpoint is too close, search in second half. if dist_func(start, midpoint) < distance: left = midpoint # Otherwise the midpoint is too far, so search in first half. else: right = midpoint return right
2f168b068cc434fe9280e2cdf84ae3f0f93eb844
26,771
def exploits_listing(request,option=None): """ Generate the Exploit listing page. :param request: Django request. :type request: :class:`django.http.HttpRequest` :param option: Action to take. :type option: str of either 'jtlist', 'jtdelete', 'csv', or 'inline'. :returns: :class:`django.http.HttpResponse` """ user = request.user if user.has_access_to(ExploitACL.READ): if option == "csv": return generate_exploit_csv(request) return generate_exploit_jtable(request, option) else: return render_to_response("error.html", {'error': 'User does not have permission to view Exploit listing.'}, RequestContext(request))
941ab4e3da6273f17f4a180aebe62d35f7133080
26,773
from typing import Sequence from typing import Tuple def find_command(tokens: Sequence[str]) -> Tuple[Command, Sequence[str]]: """Looks up a command based on tokens and returns the command if it was found or None if it wasn't..""" if len(tokens) >= 3 and tokens[1] == '=': var_name = tokens[0] command_string = tokens[2:] rvalue, args2 = find_command(command_string) if not rvalue: raise KeyError('could not find command: %s' % ' '.join(command_string)) return AssignCommand(var_name, rvalue), args2 elif tokens[0] in aliased_commands: return aliased_commands[tokens[0]], tokens[1:] else: return None, tokens
4b46b03f6dd0fb4a6cbcda855029d0a42958a49f
26,774
def clean(df: pd.DataFrame, completelyInsideOtherBias: float = 0.7, filterCutoff: float = 0.65, algo: str = "jaccard", readFromFile: bool = True, writeToFile: bool = True, doBias: bool = True) -> pd.DataFrame: """Main function to completely clean a restaurant dataset. Args: df: a pandas DataFrame completelyInsideOtherBias: float parameter for the bias function filterCutoff: float parameter specifying at which distance value to cut off the distance list algo: to use for text distance comparison, default = "jaccard" readFromFile: if a cached text distance matrix should be read from a file, default = True writeToFile: if the calculated text distance matrix should be written to a file, default = True doBias: if the bias function should be applied, default = True Returns: a deduplicated pandas DataFrame """ global eqRing df = preProcess(df) distances = calcDistances(df.cname.unique(), completelyInsideOtherBias, algo, readFromFile, writeToFile, doBias) filteredDistances = list(filter(lambda x: x[2] >= filterCutoff, distances)) eqRing = convertToEqualityRings(filteredDistances) return dedupe(df, eqRing)
6c7873958c61bab357abe1d099c57e681c265067
26,775
def create_list(value, sublist_nb, sublist_size): """ Create a list of len sublist_size, filled with sublist_nb sublists. Each sublist is filled with the value value """ out = [] tmp = [] for i in range(sublist_nb): for j in range(sublist_size): tmp.append(value) out.append(tmp) tmp = [] return out
1ecf6c88390167584d1835430c359a7ed6d6b40b
26,776
from datetime import datetime import re def parse_time(date_str: str) -> datetime: """ Parses out a string-formatted date into a well-structured datetime in UTC. Supports any of the following formats: - hh:mm In this format, we treat the value of the hh section to be 24hr format. If a user types in 1:00, it will be interpreted as 1am, not 1pm. - hh:mm(am|pm) In this format, we treat the value of the hh section to be 12hr format, and we rely on the am/pm flag to determine whether it is in the morning or the afternoon. """ match = re.match(r"(\d?\d):(\d\d)(am|pm)?", date_str) if match is None: raise ValueError() groups = match.groups() hour = int(groups[0]) minute = int(groups[1]) if groups[2] == "pm" and hour < 12: hour += 12 now = get_now() time = datetime( year=now.year, month=now.month, day=now.day, hour=hour, minute=minute, second=0, microsecond=0, ) return time
6009342d1e9c1c3f9b255758adf685e4fe7ca2f0
26,778
def gen_r_cr(): """ Generate the R-Cr table. """ r_cr = [0] * 256 for i in range(256): r_cr[i] = int(1.40199 * (i - 128)) return r_cr
43e014bb62c40d038c5fbd124e834e98e9edb5e3
26,779
def fallible_to_exec_result_or_raise( fallible_result: FallibleProcessResult, description: ProductDescription ) -> ProcessResult: """Converts a FallibleProcessResult to a ProcessResult or raises an error.""" if fallible_result.exit_code == 0: return ProcessResult( stdout=fallible_result.stdout, stdout_digest=fallible_result.stdout_digest, stderr=fallible_result.stderr, stderr_digest=fallible_result.stderr_digest, output_digest=fallible_result.output_digest, ) raise ProcessExecutionFailure( fallible_result.exit_code, fallible_result.stdout, fallible_result.stderr, description.value, )
6b46a78897f0fcbd10e4a0c9b733f1834f638af0
26,780
def tf_pose_to_coords(tf_pose): """Convert TransformStamped to Coordinates Parameters ---------- tf_pose : geometry_msgs.msg.Transform or geometry_msgs.msg.TransformStamped transform pose. Returns ------- ret : skrobot.coordinates.Coordinates converted coordinates. """ if isinstance(tf_pose, geometry_msgs.msg.Transform): transform = tf_pose elif isinstance(tf_pose, geometry_msgs.msg.TransformStamped): transform = tf_pose.transform else: raise TypeError('{} not supported'.format(type(tf_pose))) if transform.rotation.w == 0.0 and \ transform.rotation.x == 0.0 and \ transform.rotation.y == 0.0 and \ transform.rotation.z == 0.0: transform.rotation.w = 1.0 return Coordinates(pos=[transform.translation.x, transform.translation.y, transform.translation.z], rot=[transform.rotation.w, transform.rotation.x, transform.rotation.y, transform.rotation.z])
3bfaf7d566e90c9ac0c0d8a34060497e2c0c0f78
26,781
def make_graph_indep_graphnet_functions(units, node_or_core_input_size, node_or_core_output_size = None, edge_input_size = None, edge_output_size = None, global_input_size = None, global_output_size = None, aggregation_function = 'mean', **kwargs): """ A wrapper that creates the functions that are needed for a graph-independent GN block. Takes care of some flags that control a more general factory method for avoiding clutter. Usage: gn_core = GraphNet(**make_graph_indep_graphnet_functions(15, 20)) * If only "node_or_core_input_size" is defined, the rest of the inputs are assumed the same. * If only "node_or_core_input_size" and "node_output_size" are defined, then all corresponding input and output sizes are the same. Parameters: units: the width of the created MLPs node_or_core_input_size : the input shape of the node MLP (or the input size of global and edge MLPs if no other input is defined). node_or_core_output_size : the output shape of the node MLP (or the output sizes of global and edge MLPs if no other inputs are defined). edge_input_size : [None] the edge state input size edge_output_size : [None] the edge state output size global_input_size : [None] ... global_output_size : [None] ... """ if node_or_core_output_size is None: node_or_core_output_size = node_or_core_input_size if edge_input_size is None: edge_input_size = node_or_core_input_size if edge_output_size is None: edge_output_size = node_or_core_output_size if global_input_size is None: global_input_size = node_or_core_input_size if global_output_size is None: global_output_size = node_or_core_output_size if node_or_core_input_size is None: raise ValueError("You should provide the GN size of the size of several of the involved states!") # Just in case it is called from another wrapper that uses kwargs, check if the named inputs are repeated: kwargs_forbidden = ['graph_indep', 'create_global_function', 'use_global_input', 'use_global_to_edge','use_global_to_node'] assert(np.all([k not in kwargs_forbidden for k in kwargs.keys()])) return make_mlp_graphnet_functions(units, node_or_core_input_size, node_or_core_output_size, edge_input_size = edge_input_size, edge_output_size = edge_output_size, global_output_size = global_output_size, global_input_size = global_input_size, use_global_input = True, use_global_to_edge=False, use_global_to_node=False, create_global_function=True, graph_indep=True, aggregation_function = aggregation_function, **kwargs)
63694e7765896d0b369b65d4362edca29b6592d0
26,782
import typing def is_generic(t): """ Checks if t is a subclass of typing.Generic. The implementation is done per Python version, as the typing module has changed over time. Args: t (type): Returns: bool """ # Python 3.6, 3.5 if hasattr(typing, "GenericMeta"): if isinstance(t, typing.GenericMeta): return True else: # Python 3.7+ try: if typing.Generic in t.mro(): return True except AttributeError: pass return False
b085d7799ffe034b4bdeccea250d36f4ff372aea
26,783
def getGpsTime(dt): """_getGpsTime returns gps time (seconds since midnight Sat/Sun) for a datetime """ total = 0 days = (dt.weekday()+ 1) % 7 # this makes Sunday = 0, Monday = 1, etc. total += days*3600*24 total += dt.hour * 3600 total += dt.minute * 60 total += dt.second return(total)
16caa558741d8d65b4b058cf48a591ca09f82234
26,784
from re import T def make_support_transforms(): """ Transforms for support images during inference stage. For transforms of support images during training, please visit dataset.py and dataset_fewshot.py """ normalize = T.Compose([ T.ToTensor(), T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) scales = [512, 528, 544, 560, 576, 592, 608, 624, 640, 656, 672, 688, 704] return T.Compose([ T.RandomHorizontalFlip(), T.RandomResize(scales, max_size=768), normalize, ])
38d17b780fc8faf6a77074c53ef36733feb1f756
26,785
def single_label_normal_score(y_pred, y_gold): """ this function will computer the score by simple compare exact or not Example 1: y_pred=[1,2,3] y_gold=[2,2,3] score is 2/3 Example 2: it can also compute the score same way but for each label y_pred=[1,2,3,2,3] y_gold=[2,2,3,1,3] in this case we see that for label "1", it appears once in y_pred, and not in y_gold thus accuracy for "1" is 0. Similarity, label "2" appears twice in y_pred, and once it is in y_gold, thus accuracy for "2" is 1/2 Same way, for "3" is 1 :param y_pred: a list of labels, must be same length as y_gold :param y_gold: a list of labels, must be same length as y_pred :return: total_score: float of the total score calculated by example 1 label_wise_accuracy: a dictionary,where keys are labels, values are float score of the label calculated by example 2 """ assert len(y_pred) == len(y_gold), 'y_pred and y_gold need to have same length' count = 0 label_wise_score = nltk.defaultdict(lambda: nltk.defaultdict(int)) for index, pred in enumerate(y_pred): gold = y_gold[index] if pred == gold: count += 1 label_wise_score[pred]['total'] += 1 label_wise_score[pred]['correct'] += 1 else: label_wise_score[pred]['total'] += 1 label_wise_accuracy = dict() for label in label_wise_score.keys(): try: rate = label_wise_score[label]['correct'] / label_wise_score[label]['total'] except: rate = 0 label_wise_accuracy[label] = rate total_score = count / len(y_gold) return total_score, label_wise_accuracy
3f906aca6cc5280b932c2dc0a73bfcedad63bd65
26,786
def get_tuning_curves( spike_times: np.ndarray, variable_values: np.ndarray, bins: np.ndarray, n_frames_sample=10000, n_repeats: int = 10, sample_frac: float = 0.4, ) -> dict: """ Get tuning curves of firing rate wrt variables. Spike times and variable values are both in milliseconds Returns a dictionary of n_repeats values at each bin in bins with the firing rate for a random sample of the data. """ # get max 1 spike per 1ms bin spike_times = np.unique(spike_times.astype(int)) # in ms # get which frames are in which bin in_bin_indices = get_samples_in_bin(variable_values, bins) # get tuning curves tuning_curves = {(v + bins[1] - bins[0]): [] for v in bins[:-1]} for i in range(n_repeats): # sample n_frames_sample frames from each bin sampled_frames = [ np.random.choice(v, size=n_frames_sample, replace=True) if len(v) > n_frames_sample / 3 else [] for v in in_bin_indices.values() ] # get firing rate for each bin for i, b in enumerate(tuning_curves.keys()): # get spiikes in bin's sampled frames if sampled_frames: spikes_in_bin = spike_times[ np.isin(spike_times, sampled_frames[i]) ] tuning_curves[b].append( len(spikes_in_bin) / n_frames_sample * 1000 ) # in Hz else: tuning_curves[b].append(np.nan) return tuning_curves
ecdc4a71cc5a65dbb51dd69ef7a710c8ff596fec
26,787
def get_custom_feeds_ip_list(client: PrismaCloudComputeClient) -> CommandResults: """ Get all the BlackListed IP addresses in the system. Implement the command 'prisma-cloud-compute-custom-feeds-ip-list' Args: client (PrismaCloudComputeClient): prisma-cloud-compute client. Returns: CommandResults: command-results object. """ if feeds := client.get_custom_ip_feeds(): if "modified" in feeds: feeds["modified"] = parse_date_string_format(date_string=feeds.get("modified", "")) if "_id" in feeds: feeds.pop("_id") table = tableToMarkdown( name="IP Feeds", t=feeds, headers=["modified", "feed"], removeNull=True, headerTransform=lambda word: word[0].upper() + word[1:] ) else: table = "No results found." return CommandResults( outputs_prefix="PrismaCloudCompute.CustomFeedIP", outputs=feeds, readable_output=table, outputs_key_field="digest", raw_response=feeds )
bcaf44dcefe0fda10943b29cae5e9ba72e561e27
26,789
import logging def setup_new_file_handler(logger_name, log_level, log_filename, formatter, filter=None): """ Sets up new file handler for given logger :param logger_name: name of logger to which filelogger is added :param log_level: logging level :param log_filename: path to log file :param formatter: formatter for file logger :param filter: filter for file logger :return: logging.FileHandler object """ global write_mode global _kind global _interval global _backup_count global _compress_after_rotation global _compress_command global _compressed_file_extension logger = logging.getLogger(logger_name) if _kind is None: cfh = logging.FileHandler(log_filename, write_mode) elif _kind == 'time': if _compress_after_rotation: cfh = CompressedTimedRotatingFileHandler(compress_command=_compress_command, compressed_file_extension=_compressed_file_extension, filename=log_filename, when='S', interval=_interval, backupCount=_backup_count) else: cfh = TimedRotatingFileHandler(filename=log_filename, when='S', interval=_interval, backupCount=_backup_count) else: if _compress_after_rotation: cfh = CompressedRotatingFileHandler(compress_command=_compress_command, compressed_file_extension=_compressed_file_extension, filename=log_filename, mode=write_mode, backupCount=_backup_count, maxBytes=_interval) else: cfh = RotatingFileHandler(filename=log_filename, mode=write_mode, backupCount=_backup_count, maxBytes=_interval) cfh.setLevel(log_level) cfh.setFormatter(formatter) if filter: cfh.addFilter(filter) logger.addHandler(cfh) return cfh
1ffd48250d17232eea94f13dd52628993ea04c2e
26,791
import hashlib def _gen_version(fields): """Looks at BotGroupConfig fields and derives a digest that summarizes them. This digest is going to be sent to the bot in /handshake, and bot would include it in its state (and thus send it with each /poll). If server detects that the bot is using older version of the config, it would ask the bot to restart. Args: fields: dict with BotGroupConfig fields (without 'version'). Returns: A string that going to be used as 'version' field of BotGroupConfig tuple. """ # Just hash JSON representation (with sorted keys). Assumes it is stable # enough. Add a prefix and trim a bit, to clarify that is it not git hash or # anything like that, but just a dumb hash of the actual config. fields = fields.copy() fields['auth'] = [a._asdict() for a in fields['auth']] digest = hashlib.sha256(utils.encode_to_json(fields)).hexdigest() return 'hash:' + digest[:14]
0052c655ca355182d0e962e37ae046f63d1a5066
26,792
import logging def get_callee_account(global_state, callee_address, dynamic_loader): """ Gets the callees account from the global_state :param global_state: state to look in :param callee_address: address of the callee :param dynamic_loader: dynamic loader to use :return: Account belonging to callee """ environment = global_state.environment accounts = global_state.accounts try: return global_state.accounts[callee_address] except KeyError: # We have a valid call address, but contract is not in the modules list logging.info("Module with address " + callee_address + " not loaded.") if dynamic_loader is None: raise ValueError() logging.info("Attempting to load dependency") try: code = dynamic_loader.dynld(environment.active_account.address, callee_address) except Exception as e: logging.info("Unable to execute dynamic loader.") raise ValueError() if code is None: logging.info("No code returned, not a contract account?") raise ValueError() logging.info("Dependency loaded: " + callee_address) callee_account = Account(callee_address, code, callee_address, dynamic_loader=dynamic_loader) accounts[callee_address] = callee_account return callee_account
28a95b2155b1f72a2683ac7c7029d1d5739305f3
26,794
from typing import Iterable def get_products_with_summaries() -> Iterable[ProductWithSummary]: """ The list of products that we have generated reports for. """ index_products = {p.name: p for p in STORE.all_dataset_types()} products = [ (index_products[product_name], get_product_summary(product_name)) for product_name in STORE.list_complete_products() ] if not products: raise RuntimeError( "No product reports. " "Run `python -m cubedash.generate --all` to generate some." ) return products
0d9e23fecfebd66ba251bc62b750e2d43b20c7fa
26,795
def _round_to_4(v): """Rounds up for aligning to the 4-byte word boundary.""" return (v + 3) & ~3
c79736b4fe9e6e447b59d9ab033181317e0b80de
26,797
def bool_like(value, name, optional=False, strict=False): """ Convert to bool or raise if not bool_like Parameters ---------- value : object Value to verify name : str Variable name for exceptions optional : bool Flag indicating whether None is allowed strict : bool If True, then only allow bool. If False, allow types that support casting to bool. Returns ------- converted : bool value converted to a bool """ if optional and value is None: return value extra_text = ' or None' if optional else '' if strict: if isinstance(value, bool): return value else: raise TypeError('{0} must be a bool{1}'.format(name, extra_text)) if hasattr(value, 'squeeze') and callable(value.squeeze): value = value.squeeze() try: return bool(value) except Exception: raise TypeError('{0} must be a bool (or bool-compatible)' '{1}'.format(name, extra_text))
42d16ae228140a0be719fbd238bdc25dafc1cb64
26,799
def process_2d_sawtooth(data, period, samplerate, resolution, width, verbose=0, start_zero=True, fig=None): """ Extract a 2D image from a double sawtooth signal Args: data (numpy array): measured trace period (float): period of the full signal samplerate (float): sample rate of the acquisition device resolution (list): resolution nx, ny. The nx corresonds to the fast oscillating sawtooth width (list of float): width paramter of the sawtooth signals verbose (int): verbosity level start_zero (bool): Default is True fig (int or None): figure handle Returns processed_data (list of arrays): the extracted 2D arrays results (dict): contains metadata """ npoints_expected = int(period * samplerate) # expected number of points npoints = data.shape[0] nchannels = data.shape[1] period_x = period / (resolution[1]) period_y = period if verbose: print('process_2d_sawtooth: expected %d data points, got %d' % (npoints_expected, npoints,)) if np.abs(npoints - npoints_expected) > 0: raise Exception('process_2d_sawtooth: expected %d data points, got %d' % (npoints_expected, npoints,)) full_trace = False if start_zero and (not full_trace): padding_x_time = ((1 - width[0]) / 2) * period_x padding_y_time = ((1 - width[1]) / 2) * period_y sawtooth_centre_pixels = ((1 - width[1]) / 2 + .5 * width[1]) * period * samplerate start_forward_slope_step_pixels = ((1 - width[1]) / 2) * period * samplerate end_forward_slope_step_pixels = ((1 - width[1]) / 2 + width[1]) * period * samplerate else: if full_trace: padding_x_time = 0 padding_y_time = 0 sawtooth_centre_pixels = .5 * width[1] * period * samplerate else: padding_x_time = 0 padding_y_time = 0 sawtooth_centre_pixels = .5 * width[1] * period * samplerate start_forward_slope_step_pixels = 0 end_forward_slope_step_pixels = (width[1]) * period * samplerate padding_x = int(padding_x_time * samplerate) padding_y = int(padding_y_time * samplerate) width_horz = width[0] width_vert = width[1] res_horz = int(resolution[0]) res_vert = int(resolution[1]) if resolution[0] % 32 != 0 or resolution[1] % 32 != 0: # send out warning, due to rounding of the digitizer memory buffers # this is not supported raise Exception( 'resolution for digitizer is not a multiple of 32 (%s) ' % (resolution,)) if full_trace: npoints_forward_x = int(res_horz) npoints_forward_y = int(res_vert) else: npoints_forward_x = int(width_horz * res_horz) npoints_forward_y = int(width_vert * res_vert) if verbose: print('process_2d_sawtooth: number of points in forward trace (horizontal) %d, vertical %d' % (npoints_forward_x, npoints_forward_y,)) print(' horizontal mismatch %d/%.1f' % (npoints_forward_x, width_horz * period_x * samplerate)) processed_data = [] row_offsets = res_horz * np.arange(0, npoints_forward_y).astype(int) + int(padding_y) + int(padding_x) for channel in range(nchannels): row_slices = [data[idx:(idx + npoints_forward_x), channel] for idx in row_offsets] processed_data.append(np.array(row_slices)) if verbose: print('process_2d_sawtooth: processed_data shapes: %s' % ([array.shape for array in processed_data])) if fig is not None: pixel_to_axis = 1. / samplerate times = np.arange(npoints) / samplerate plt.figure(fig) plt.clf() plt.plot(times, data[:, :], '.-', label='raw data') plt.title('Processing of digitizer trace') plt.axis('tight') for row_offset in row_offsets: plot2Dline([-1, 0, pixel_to_axis * row_offset, ], ':', color='r', linewidth=.8, alpha=.5) plot2Dline([-1, 0, pixel_to_axis * sawtooth_centre_pixels], '-c', linewidth=1, label='centre of sawtooth', zorder=-10) plot2Dline([0, -1, 0, ], '-', color=(0, 1, 0, .41), linewidth=.8) plot2Dline([-1, 0, pixel_to_axis * start_forward_slope_step_pixels], ':k', label='start of step forward slope') plot2Dline([-1, 0, pixel_to_axis * end_forward_slope_step_pixels], ':k', label='end of step forward slope') plot2Dline([-1, 0, 0, ], '-', color=(0, 1, 0, .41), linewidth=.8, label='start trace') plot2Dline([-1, 0, period, ], '-', color=(0, 1, 0, .41), linewidth=.8, label='end trace') # plot2Dline([0, -1, data[0,3], ], '--', color=(1, 0, 0, .41), linewidth=.8, label='first value of data') plt.legend(numpoints=1) if verbose >= 2: plt.figure(fig + 10) plt.clf() plt.plot(row_slices[0], '.-r', label='first trace') plt.plot(row_slices[-1], '.-b', label='last trace') plt.plot(row_slices[int(len(row_slices) / 2)], '.-c') plt.legend() return processed_data, {'row_offsets': row_offsets, 'period': period}
f7b212d54637e04294cda336e154910eb718b3e5
26,800
def lower_threshold_projection(projection, thresh=1e3): """ An ugly but effective work around to get a higher-resolution curvature of the great-circle paths. This is useful when plotting the great-circle paths in a relatively small region. Parameters ---------- projection : class Should be one of the cartopy projection classes, e.g., cartopy.crs.Mercator thresh : float Smaller values achieve higher resolutions. Default is 1e3 Returns ------- Instance of the input (`projection`) class Example ------- proj = lower_threshold_projection(cartopy.crs.Mercator, thresh=1e3) Note that the cartopy.crs.Mercator was not initialized (i.e., there are no brackets after the word `Mercator`) """ class LowerThresholdProjection(projection): @property def threshold(self): return thresh return LowerThresholdProjection()
165c657f1ec875f23df21ef412135e27e9e443c6
26,801
def post_benchmark(name, kwargs=None): """ Postprocess benchmark """ if kwargs is None: kwargs = {} post = benchmarks[name].post return post(**kwargs)
900f60435ae31e8ec09312f23ab9f98a723d22af
26,802
def tag_view_pagination_counts(request,hc,urls,tag_with_items): """ retrieve the pagination counts for a tag """ hc.browser.get(urls['https_authority']) try: po = hc.catalog.load_pageobject('TagsPage') po.goto_page() po.search_for_content([tag_with_items]) po = hc.catalog.load_pageobject('TagsViewPage') # counts has (pagination_start, pagination_end, pagination_total) counts = po.get_pagination_counts() current_url = po.current_url() finally: hc.browser.close() r = {'start' : counts[0], 'end' : counts[1], 'total' : counts[2], 'url' : current_url} return r
7130ffdbb2b8f90fadf1a159d15726012511ddee
26,803
def arts_docserver_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Create a link to ARTS docserver. Parameters: name (str): The role name used in the document. rawtext (str): The entire markup snippet, with role. text (str): The text marked with the role. lineno (str): The line number where rawtext appears in the input. inliner (str): The inliner instance that called us. options (dict): Directive options for customization. content (list): The directive content for customization. Returns: list, list: Nodes to insert into the document, System messages. """ if content is None: content = [] if options is None: options = {} url = 'http://radiativetransfer.org/docserver-trunk/all/{}'.format(text) node = nodes.reference(rawtext, text, refuri=url, **options) return [node], []
8f20cc4adb9f7fa17514116fe984562d9f0174f3
26,804
def arima_model(splits, arima_order, graph=False): """ Evaluate an ARIMA model for a given order (p,d,q) and also forecast the next one time step. Split data in train and test. Train the model using t = (1, ..., t) and predict next time step (t+1). Then add (t+1) value from test dataset to history and fit again the model using t = (1, ..., t, t+1). Then, it predicts for the next instant (t+2), and so on up to t=N where N=len(test) Finally, with the predictions and observables from the test dataset, the metrics MSE, MAE, MAPE y R2 are calculated. Args: splits: Dictionary with training, validation and test data. arima_order: Tuple. Contains the argument p, d, q for ARIMA model. graph: Boolean. Plot the predictions and test dataset. Returns: Metrics. """ try: # prepare training dataset train = splits['train']['y'].astype('float32') val_y = splits['val']['y'].astype('float32') test_y = splits['test']['y'].astype('float32') test = pd.concat([val_y, test_y]) history = [x for x in train] predictions = list() for t in range(len(test)): model = sm.tsa.ARIMA(history, order=arima_order) model_fit = model.fit(disp=0, dis=-1) # All cores yhat = model_fit.forecast()[0] # Predict one step in future from the last value in history variable. predictions.append(yhat) history.append(test[t]) # Metrics mse = mean_squared_error(test, predictions) rmse = sqrt(mean_squared_error(test, predictions)) mae = mean_absolute_error(test, predictions) mape = mean_absolute_percentage_error(test, predictions) r2 = r2_score(test, predictions) metrics = {"RMSE": rmse, "MSE": mse, "MAE": mae, "MAPE": mape, "R2": r2} if graph: # plot forecasts against actual outcomes test.plot() plt.plot(predictions, color='red') plt.title('ARIMA Fit') plt.ylabel(test.name) plt.xlabel('Time [days]') plt.legend(['Data test', 'Forecast'], loc='upper left') # plt.show() return metrics except Exception as e: pass
07ca1349e5ebe02793fbf798cbd4e5ce20128a74
26,805
import ctypes def srfscc(srfstr, bodyid): """ Translate a surface string, together with a body ID code, to the corresponding surface ID code. The input surface string may contain a name or an integer ID code. https://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/srfscc_c.html :param srfstr: Surface name or ID string. :type srfstr: str :param bodyid: ID code of body associated with surface. :type bodyid: int :return: Integer surface ID code. :rtype: int """ srfstr = stypes.stringToCharP(srfstr) bodyid = ctypes.c_int(bodyid) code = ctypes.c_int() isname = ctypes.c_int() libspice.srfscc_c(srfstr, bodyid, ctypes.byref(code), ctypes.byref(isname)) return code.value, bool(isname.value)
9f32b6929c2fd5d482db5f79f4b00f1fe41b114b
26,806
import json import requests def pr_comment( message: str, repo: str = None, issue: int = None, token=None, server=None, gitlab=False, ): """push comment message to Git system PR/issue :param message: test message :param repo: repo name (org/repo) :param issue: pull-request/issue number :param token: git system security token :param server: url of the git system :param gitlab: set to True for GitLab (MLRun will try to auto detect the Git system) """ if ("CI_PROJECT_ID" in environ) or (server and "gitlab" in server): gitlab = True token = token or environ.get("GITHUB_TOKEN") or environ.get("GIT_TOKEN") if gitlab: server = server or "gitlab.com" headers = {"PRIVATE-TOKEN": token} repo = repo or environ.get("CI_PROJECT_ID") # auto detect GitLab pr id from the environment issue = issue or environ.get("CI_MERGE_REQUEST_IID") repo = repo.replace("/", "%2F") url = f"https://{server}/api/v4/projects/{repo}/merge_requests/{issue}/notes" else: server = server or "api.github.com" repo = repo or environ.get("GITHUB_REPOSITORY") # auto detect pr number if not specified, in github the pr id is identified as an issue id # we try and read the pr (issue) id from the github actions event file/object if not issue and "GITHUB_EVENT_PATH" in environ: with open(environ["GITHUB_EVENT_PATH"]) as fp: data = fp.read() event = json.loads(data) if "issue" not in event: raise mlrun.errors.MLRunInvalidArgumentError( f"issue not found in github actions event\ndata={data}" ) issue = event["issue"].get("number") headers = { "Accept": "application/vnd.github.v3+json", "Authorization": f"token {token}", } url = f"https://{server}/repos/{repo}/issues/{issue}/comments" resp = requests.post(url=url, json={"body": str(message)}, headers=headers) if not resp.ok: errmsg = f"bad pr comment resp!!\n{resp.text}" raise IOError(errmsg) return resp.json()["id"]
7115ec59fca36459a15e07906ce5c58b305bcfdc
26,807
from typing import Sequence def windowed_run_count_1d(arr: Sequence[bool], window: int) -> int: """Return the number of consecutive true values in array for runs at least as long as given duration. Parameters ---------- arr : Sequence[bool] Input array (bool). window : int Minimum duration of consecutive run to accumulate values. Returns ------- int Total number of true values part of a consecutive run at least `window` long. """ v, rl = rle_1d(arr)[:2] return np.where(v * rl >= window, rl, 0).sum()
ff7b18380cd77ad046fc5d5aa678e7393c72d495
26,810