content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def get_dcgan_args(parser, args=[]): """ parameters determing the DCGAN parameters """ # DCGAN: # ------------------------------------------------------------------------ parser.add_argument( "--lam", type=float, default=10, help="Factor for scaling gradient penalty" ) parser.add_argument( "--wgan", type=bool, default=False, help="Determine if WGAN training should be activated", ) parser.add_argument( "--p_drop", type=float, default=0.1, help="Dropout probability for the Discriminator network", ) # ------------------------------------------------------------------------ return parser
28d00721fad62ecbc381190b05d81fe578860f8e
9,443
from pathlib import Path def store_tabular_data(filepath: Path, use_stem: bool = True) -> None: """Reads the tabular data from filepath and stores it in-memory to be plotted asychronously. Args: filepath (Path): The tabular data file to be read and stored. use_stem (bool, optional): Only store the filename (without extension). Defaults to True. """ # Declare global variables locally global data_glob global data_glob_changed floats = read_tabular_data(filepath) if floats == []: print('Skipping empty file', filepath) return None # Check that the array is not ragged; each line must be the same length! # I'm not exactly sure why this happens, but it seems like maybe the file # contents are not being flushed to disk before getting read back in again. # When I manually check the files afterwards, the data is all present. lengths = [len(x) for x in floats] if not all([length == lengths[0] for length in lengths]): print('Warning! Skipping ragged data in', filepath) return None data = np.array(floats) if use_stem: filepath = Path(filepath.stem) for i in range(len(data_glob)): (p, data_old_) = data_glob[i] if filepath == p: data_glob[i] = (filepath, data) data_glob_changed = True return None data_glob.append((filepath, data)) data_glob_changed = True return None
98c1c74aefe855690ad67ba0c6f09bd574c877ce
9,445
import pkgutil import io def load_uci_credit_card(return_X_y=False, as_frame=False): """Loads the UCI Credit Card Dataset. This dataset contains a sample of [Default of Credit Card Clients Dataset](https://www.kaggle.com/uciml/default-of-credit-card-clients-dataset). Example: ```python from skorecard import datasets df = datasets.load_uci_credit_card(as_frame=True) ``` Args: return_X_y (bool): If True, returns `(data, target)` instead of a dict object. as_frame (bool): give the pandas dataframe instead of X, y matrices (default=False). Returns: (pd.DataFrame, dict or tuple) features and target, with as follows: - if as_frame is True: returns pd.DataFrame with y as a target - return_X_y is True: returns a tuple: (X,y) - is both are false (default setting): returns a dictionary where the key `data` contains the features, and the key `target` is the target """ # noqa file = pkgutil.get_data("skorecard", "data/UCI_Credit_Card.zip") df = pd.read_csv(io.BytesIO(file), compression="zip") df = df.rename(columns={"default.payment.next.month": "default"}) if as_frame: return df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1", "default"]] X, y = ( df[["EDUCATION", "MARRIAGE", "LIMIT_BAL", "BILL_AMT1"]], df["default"].values, ) if return_X_y: return X, y return {"data": X, "target": y}
ae388efcf82e0e6ff5be40ff5293d0b23d474735
9,446
def quad_lsq(x, y, verbose=False, itmax=200, iparams=[]): """ Fits a parabola to the data, more handy as it fits for parabola parameters in the form y = B_0 * (x - B_1)**2 + B_2. This is computationally slower than poly_lsq, so beware of its usage for time consuming operations. Uses scipy odrpack, but for least squares. Parameters ---------- x, y : 1-D arrays Data to fit. verbose : bool or int, optional Can be 0,1,2 for different levels of output (False or True are the same as 0 or 1) itmax : int, optional Maximum number of iterations. iparams : 1D array, optional Initial parameters B_0, B_1, B_2. Returns ------- coeff : 1-D array Parabola coefficients err : 1-D array Standard error (1-sigma) on the coefficients. """ # Internal definition of quadratic def _quadratic(B, x): return B[0] * (x - B[1]) * (x - B[1]) + B[2] def _quad_fjd(B, x): return 2 * B[0] * (x - B[1]) def _quad_fjb(B, x): _ret = np.concatenate((np.ones(x.shape, float), 2 * B[0] * (B[1] - x), x * x - 2 * B[1] * x + B[1] * B[1],)) _ret.shape = (3,) + x.shape return _ret if any(iparams): def _quad_est(data): return tuple(iparams) else: def _quad_est(data): return (1., 1., 1.) quadratic = odr.Model(_quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est) mydata = odr.Data(x, y) myodr = odr.ODR(mydata, quadratic, maxit=itmax) # Set type of fit to least-squares: myodr.set_job(fit_type=2) if verbose == 2: myodr.set_iprint(final=2) fit = myodr.run() # Display results: if verbose: fit.pprint() if fit.stopreason[0] == 'Iteration limit reached': print('(WWW) quad_lsq: iteration limit reached, result not reliable!') # Results and errors coeff = fit.beta err = fit.sd_beta return coeff, err
02dda2ba78ac6754b913941f2204ef4aa26d3f36
9,447
from typing import Tuple import re def _parse_cli_variable(mapping_str: str) -> Tuple[str, str]: """Checks that the input is of shape `name:value` and then splits it into a tuple""" match = re.match(r"(?P<name>.+?):(?P<value>.+)", mapping_str) if match is None: raise ValueError(f'CLI variable input {mapping_str} is not of form `"name:value"`') parsed = match.groupdict() return parsed["name"], parsed["value"]
f701b7e85c45c2df35e1252721cd3215357909ba
9,449
import json def list_privileges_by_role(request, role): """ List sentry privilegs by role :param request: :param role: role name :return: A Json array of SentryPrivileges: [p1, p2, p3...] """ sentry_privileges = _get_sentry_api(request.user).list_sentry_privileges_by_role("cdap", role) sentry_privileges = [{"actions": p["action"], "authorizables": _sentry_authorizables_to_path(p["authorizables"])} for p in sentry_privileges] return HttpResponse(json.dumps(sentry_privileges), content_type="application/json")
fbb488f6d55b3a51646bc0c74f4861677cc16912
9,450
from typing import Any import torch from typing import Union def to_torch_as(x: Any, y: torch.Tensor) -> Union[Batch, torch.Tensor]: """Return an object without np.ndarray. Same as ``to_torch(x, dtype=y.dtype, device=y.device)``. """ assert isinstance(y, torch.Tensor) return to_torch(x, dtype=y.dtype, device=y.device)
c6d71e0b903b611653b07e0f55666672dc123602
9,451
def atexit_shutdown_grace_period(grace_period=-1.0): """Return and optionally set the default worker cache shutdown grace period. This only affects the `atexit` behavior of the default context corresponding to :func:`trio_parallel.run_sync`. Existing and future `WorkerContext` instances are unaffected. Args: grace_period (float): The time in seconds to wait for workers to exit before issuing SIGKILL/TerminateProcess and raising `BrokenWorkerError`. Pass `math.inf` to wait forever. Pass a negative value or use the default value to return the current value without modifying it. Returns: float: The current grace period in seconds. .. note:: This function is subject to threading race conditions.""" global ATEXIT_SHUTDOWN_GRACE_PERIOD if grace_period >= 0.0: ATEXIT_SHUTDOWN_GRACE_PERIOD = grace_period return ATEXIT_SHUTDOWN_GRACE_PERIOD
f7440172f40b00069b149254a689521373dbded0
9,454
def get_point(points, cmp, axis): """ Get a point based on values of either x or y axys. :cmp: Integer less than or greater than 0, representing respectively < and > singhs. :returns: the index of the point matching the constraints """ index = 0 for i in range(len(points)): if cmp < 0: if points[i][axis] < points[index][axis]: index = i else: if points[i][axis] > points[index][axis]: index = i return index
b59035d390e83b45a0131e28c4acf7e302cf3e45
9,456
import pathlib def create_jobs_list(chunks, outdir, *filters): # TO DO # Figure out the packing/unpacking """ Create a list of dictionaries that hold information for the given chunks Arguments: chunks: list: A list of lists. Each nested list contains the filepaths to be processed outdir: Path object: The directory where results will be written filters: Callables Return: jobs_list: list: A list of dictionaries that holds information for the execution of each chunk. Of the form [ {'chunk_id' : int, (0,1,2,...) 'out_fp' : Path object, (outdir/chunk_<chunk_id>.fa.gz) 'fastas' : list of Path objects, ([PosixPath('path/to/PATRIC.faa'),...]) 'filters' : list of functions } ] """ jobs_list = [] for i, chunk in enumerate(chunks): chunk_id = f"chunk_{i}" chunk_out = f"{chunk_id}.fa.gz" out_fp = outdir / pathlib.Path(chunk_out) # chunk_skipped = f"{chunk_id}.skipped.txt" chunk_fastas = chunk chunk_dict = { "chunk_id": chunk_id, "fastas": chunk_fastas, "out_fp": out_fp, # Should there be an if filters or if len(filters) != 0 ? "filters": [f for f in filters], } jobs_list.append(chunk_dict) return jobs_list
433992eb34bc1f80d12f8cdcee3dbd99d04d22c1
9,458
import torch def per_symbol_to_per_seq_probs(per_symbol_probs, tgt_out_idx): """ Gather per-symbol probabilities into per-seq probabilities """ # per_symbol_probs shape: batch_size, seq_len, candidate_size # tgt_out_idx shape: batch_size, seq_len # output shape: batch_size, 1 return torch.prod( torch.gather(per_symbol_probs, 2, tgt_out_idx.unsqueeze(-1)).squeeze(2), dim=1, keepdim=True, )
fc39ac129b8bbffcb602c73bc67fcc44b1d354ed
9,459
def solve_game(payoffs): """ given payoff matrix for a zero-sum normal-form game, return first mixed equilibrium (may be multiple) returns a tuple of numpy arrays """ # .vertex_enumeration() # .lemke_howson(initial_dropped_label=0) - does not return *all* equilibrium game = nash.Game(payoffs) equilibria = game.lemke_howson_enumeration() # equilibria = game.support_enumeration() # non_degenerate=False, tol=10e-16 equilibrium = next(equilibria, None) # Lemke-Howson couldn't find equilibrium OR # Lemke-Howson return error - game may be degenerate. try other approaches print(equilibrium) print(equilibrium[0]) print(equilibrium[1]) if equilibrium is None or np.isnan(equilibrium[0]).any() or np.isnan(equilibrium[1]).any() or (equilibrium[0].shape != (payoffs.shape[0],) or equilibrium[1].shape != (payoffs.shape[1],)): # try other print('\n\n\n\n\nuh oh! degenerate solution') print('payoffs are\n', payoffs) equilibria = game.vertex_enumeration() equilibrium = next(equilibria, None) if equilibrium is None: print('\n\n\n\n\nuh oh x2! degenerate solution again!!') print('payoffs are\n', payoffs) equilibria = game.support_enumeration() # non_degenerate=False, tol=10e-16 equilibrium = next(equilibria, None) assert equilibrium is not None return equilibrium
9eb0dd84592f9a2d135c79322f6c812b775b0e74
9,460
from functools import reduce def zone_features(df, zfeatures, aufeatures): """Create zone features from the data Args: df (DataFrame): Input dataframe zfeatures (list): List of zone median features aufeatures (list): List of zone autocorr features Return: 2 dataframes """ # Medians from the last 1,3,6,12 months zones_1y = df[(df['ds'] >= '2018-03-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({ 'max_user': 'median', 'bandwidth_total': 'median' }) zones_1y.columns = ['zone_code','median_user_1y','median_bw_1y'] zones_1m = df[(df['ds'] >= '2019-02-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({ 'max_user': 'median', 'bandwidth_total': 'median' }) zones_1m.columns = ['zone_code','median_user_1m','median_bw_1m'] zones_3m = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({ 'max_user': 'median', 'bandwidth_total': 'median' }) zones_3m.columns = ['zone_code','median_user_3m','median_bw_3m'] zones_6m = df[(df['ds'] >= '2018-09-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({ 'max_user': 'median', 'bandwidth_total': 'median' }) zones_6m.columns = ['zone_code','median_user_6m','median_bw_6m'] # Autocorrelation features zones_autocorr = df[(df['ds'] >= '2018-12-09') & (df['ds'] < '2019-03-10')].groupby(['zone_code'], as_index=False).agg({ 'max_user': { 'lag_user_1d' :lambda x: pd.Series.autocorr(x, 24), 'lag_user_3d' :lambda x: pd.Series.autocorr(x, 3*24), 'lag_user_1w' :lambda x: pd.Series.autocorr(x, 24*7), }, 'bandwidth_total': { 'lag_bw_1d' :lambda x: pd.Series.autocorr(x, 24), 'lag_bw_3d' :lambda x: pd.Series.autocorr(x, 3*24), 'lag_bw_1w' :lambda x: pd.Series.autocorr(x, 24*7), } }).fillna(0) zones_autocorr.columns.droplevel() zones_autocorr.reset_index() zones_autocorr.columns = ['zone_code','lag_user_1d','lag_user_3d','lag_user_1w','lag_bw_1d','lag_bw_3d','lag_bw_1w'] zones = reduce(lambda x,y: pd.merge(x,y, on='zone_code', how='inner'), [zones_1m, zones_3m, zones_6m, zones_1y]) # Scale the zone features scale1, scale2 = MinMaxScaler(), MinMaxScaler() zones[zfeatures] = scale1.fit_transform(zones[zfeatures]) zones_autocorr[aufeatures] = scale2.fit_transform(zones_autocorr[aufeatures]) return zones, zones_autocorr
fb055e1c2fea040c95422818fbd6d16a97bf873f
9,461
from typing import List def get_active_validator_indices(validators: [ValidatorRecord]) -> List[int]: """ Gets indices of active validators from ``validators``. """ return [i for i, v in enumerate(validators) if is_active_validator(v)]
14719147b49f903240e19fbaa46da8a40315a5cf
9,462
def parse_decodes(sentences, predictions, lengths, label_vocab): """Parse the padding result Args: sentences (list): the tagging sentences. predictions (list): the prediction tags. lengths (list): the valid length of each sentence. label_vocab (dict): the label vocab. Returns: outputs (list): the formatted output. """ predictions = [x for batch in predictions for x in batch] lengths = [x for batch in lengths for x in batch] id_label = dict(zip(label_vocab.values(), label_vocab.keys())) outputs = [] for idx, end in enumerate(lengths): sent = sentences[idx][:end] tags = [id_label[x] for x in predictions[idx][:end]] sent_out = [] tags_out = [] words = "" for s, t in zip(sent, tags): if t.endswith('-B') or t == 'O': if len(words): sent_out.append(words) tags_out.append(t.split('-')[0]) words = s else: words += s if len(sent_out) < len(tags_out): sent_out.append(words) outputs.append(''.join( [str((s, t)) for s, t in zip(sent_out, tags_out)])) return outputs
bf40d8570e0a552853108e860fd193c0d9940e98
9,463
from datetime import datetime def get_weekday(start_date, end_date, weekday_nums, repeat=None): """ 获取一段时间范围内每个周天对应的日期 :param start_date: :param end_date: :param weekday_nums: list, 星期对应数字 0 ~ 6 :param repeat: :return: """ sdate = datetime.datetime.strptime(start_date, date_pattern1) edate = datetime.datetime.strptime(end_date, date_pattern1) if not repeat: edate += datetime.timedelta(days=1) weekdays = [] for weekday_num in weekday_nums: tmp_date = sdate while tmp_date < edate: now_weekday = tmp_date.weekday() tmp_date += datetime.timedelta(days=(((int(weekday_num)+6) % 7 - now_weekday + 7) % 7)) if tmp_date < edate: weekdays.append(tmp_date.strftime(date_pattern1)) tmp_date += datetime.timedelta(days=7) else: break return weekdays
65e0495951647cbb6648a3a68d7fd2c7e1e2e88b
9,464
def context_processor(target): """ Decorator that allows context processors with parameters to be assigned (and executed properly) in a RequestContext Example:: return render_to_response( template_name, context_instance=RequestContext( request, processors=[ test_processor1, test_processor2(val1=test_val1, val2=test_val2), ] ) ) """ def cp_wrapper(*args, **kwargs): if (len(args) == 1 and len(kwargs) == 0) \ or (len(args) == 0 and len(kwargs) == 1 and 'request' in kwargs): return target(*args, **kwargs) else: def get_processor(request): return target(request, *args, **kwargs) return get_processor return cp_wrapper
842395b29aedbfe23bb3332bf343b12d26519d97
9,465
def data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete(uuid, local_id): # noqa: E501 """data_context_path_computation_context_path_comp_serviceuuid_end_pointlocal_id_capacity_bandwidth_profile_committed_burst_size_delete removes tapi.common.CapacityValue # noqa: E501 :param uuid: Id of path-comp-service :type uuid: str :param local_id: Id of end-point :type local_id: str :rtype: None """ return 'do some magic!'
a3bc85df9fa77b210573058b640e47f41930ae0d
9,466
import typing import json def decode_messages(fit_bytes: bytes) -> typing.List[typing.Dict]: """Decode serialized messages. Arguments: fit_bytes: Encoded messages Returns: Decoded messages """ messages = [] for line in fit_bytes.splitlines(): payload = json.loads(line) messages.append(schemas.WriterMessage().load(payload)) return messages
c56a805b5c2ffee3b48be7ae88ad6a91cddd4cc5
9,467
def iresnet101(pretrained=False, progress=True, **kwargs): """ Constructs the IResNet-101 model trained on Glint360K(https://github.com/deepinsight/insightface/tree/master/recognition/partial_fc#4-download). .. note:: The required input size of the model is 112x112. Args: pretrained (bool): Whether to download the pre-trained model on Glint360K. Default: ``False`` progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True`` For example: .. code-block:: python >>> import flowvision >>> iresnet101 = flowvision.models.face_recognition.iresnet101(pretrained=False, progress=True) """ return _iresnet( "iresnet101", IBasicBlock, [3, 13, 30, 3], pretrained, progress, **kwargs )
d986282b805de959cfa2d6707532d23f1c23c31b
9,469
from typing import Dict def get_full_jwt(user: User) -> Dict: """ Get a full jwt response from the username and uid token. """ return { 'access_token': create_access_token(identity=user, fresh=True), 'refresh_token': create_refresh_token(identity=user) }
bbc4bc12352671878edc392717d58636475001c3
9,470
import re from datetime import datetime def GridSearch_Prophet(prophet_grid, metric='mape'): """ GridSearch tool to determine the optimal parameters for prophet Args: - prophet_grid: List of parameters. Enter it as list(ParameterGrid(prophet_grid) - metric: String. Not used yet. May be used to change the metric used to sort the tested models. Return: - mape_table: Pandas dataframe. Show the tested parameters and median of Mean Absolute Percentage Error calculated over 1 day. """ # mape_table summarizes the mean of mape according to tested parameters mape_table = pd.DataFrame.from_dict(prophet_grid) mape_table = mape_table[['device', 'parameter', 'begin', 'end', 'sampling_period_min', 'interval_width', 'daily_fo', 'changepoint_prior_scale']] mape_table['mape'] = np.nan # Loop Prophet over the prophet_grid and store the data a = 0 name = re.sub("[']", '', str(mape_table.iloc[0, 0])) + '_Prediction_' + \ str(mape_table.iloc[a, 1]) for prophet_instance in prophet_grid: print('\nprophet_instance nb ' + str(a)) # Run Prophet df_pred, mape = prophet(**prophet_instance) # store the mape mape_table.iloc[a, 8] = mape # Save the df_pred and figure if the mape_table has 1 row (best model) if mape_table.shape[0] == 1: # calculate diff between begin and end begin_str = mape_table.iloc[a, 2] end_str = mape_table.iloc[a, 3] d1 = datetime.strptime(begin_str, "%Y-%m-%d") d2 = datetime.strptime(end_str, "%Y-%m-%d") pred_duration = abs((d2 - d1).days) # Generate the generic name model_name = re.sub("[']", '', str(mape_table.iloc[0, 0])) + '_' + str(mape_table.iloc[a, 3]) + \ '_cps_' + str(mape_table.iloc[a, 7]) + '_fo_' + str(mape_table.iloc[a, 6]) + '_' + \ str('{:02d}'.format(pred_duration)) + 'td' # Save the figure folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/figures/best/' fig_name = folder_name + model_name + '.png' plt.savefig(fig_name, bbox_inches="tight") # Save the df_pred (prediction and actual values) as a csv folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/data/processed/' csv_name = folder_name + model_name + '.csv' df_pred.to_csv(csv_name) # elif a+1 == mape_table.shape[0]: # # Store the complete mape_table if this is the last prediction # folder_name = '/Users/guillaume/Documents/DS2020/XXXX/XXXX/data/processed/' # mape_table_name = folder_name + re.sub("[']", '', str( # mape_table.iloc[0, 0])) + '_' + str(mape_table.iloc[a, 3]) + '_mape_table.csv' mape_table = mape_table.sort_values('mape') # mape_table.to_csv(mape_table_name) a += 1 return mape_table
324f6468109bfa52258d1ad6645692395be7859a
9,471
def _check_max_features(importances, max_features): """Interpret the max_features value""" n_features = len(importances) if max_features is None: max_features = n_features elif isinstance(max_features, int): max_features = min(n_features, max_features) elif isinstance(max_features, float): max_features = int(n_features * max_features) return max_features
816daf9d99ac4ecd2d5024a3be63f793d7669e1f
9,472
def map_blocks(func, data): """Curried version of Dask's map_blocks Args: func: the function to map data: a Dask array Returns: a new Dask array >>> f = map_blocks(lambda x: x + 1) >>> f(da.arange(4, chunks=(2,))) dask.array<lambda, shape=(4,), dtype=int64, chunksize=(2,)> """ return da.map_blocks(func, data)
ab97911bb147ceb6d5350fcd16300926d2a89f8e
9,473
def premises_to_syllogism(premises): """ >>> premises_to_syllogism(["Aab", "Ebc"]) 'AE1' """ figure = {"abbc": "1", "bacb": "2", "abcb": "3", "babc": "4"}[premises[0][1:] + premises[1][1:]] return premises[0][0] + premises[1][0] + figure
a048d44acea1eb4c9346880a74547a9cd100ebf0
9,475
import re def fix_fits_keywords(header): """ Update header keyword to change '-' by '_' as columns with '-' are not allowed on SQL """ new_header = {} for key in header.keys(): new_key = key.replace('-', '_') new_header[new_key] = header[key] # Temporary fix - needs to be removed # Making it backwards complatible with older files. # Check the FILETYPE is present, if not get from filename if 'FILETYPE' not in header.keys(): logger.warning("Adding FILETYPE from FITSNAME pattern to header to compatibility") # Try to get it from the filename if re.search('_passthrough.fits', header['FITSNAME']): new_header['FILETYPE'] = 'psth' elif re.search('_fltd.fits', header['FITSNAME']): new_header['FILETYPE'] = 'filtered' # For headers without FILETYPE (i.e.: yearly) we set it to raw else: raise Exception("ERROR: Cannot provide suitable FILETYPE from header or pattern") logger.warning(f"Added FILETYPE {new_header['FILETYPE']} from pattern") return new_header
0d8a2f502252051857a131944a4c31ba8ec9ff0e
9,476
def is_sum_lucky(x, y): """This returns a string describing whether or not the sum of input is lucky This function first makes sure the inputs are valid and then calculates the sum. Then, it will determine a message to return based on whether or not that sum should be considered "lucky" """ if x != None: if y is not None: result = x+y; if result == 7: return 'a lucky number!' else: return( 'an unlucky number!') return ('just a normal number')
081b5e8cc2657a00ea160e398fb00f84187e2ab6
9,478
import asyncio def unsync_function(func, *args, **kwargs): """Runs an async function in a standard blocking way and returns output""" return asyncio.run(func(*args, **kwargs))
cd7c19bf226b78c9e3c4b19325e7acb4fcc90e21
9,479
from typing import Iterable from typing import Union from typing import List from typing import Tuple from typing import Any from typing import Dict def zip_results(name: str, recipes: Iterable[Recipe], cache=CacheType.Auto) \ -> Recipe[Union[List[Tuple[Any, ...]], Dict[Any, Tuple[Any, ...]]]]: """ Create a Recipe that zips the outputs from a number of recipes into elements, similar to Python's built-in zip(). Notably, dictionaries are handled a bit differently, in that a dictionary is returned with keys mapping to tuples from the different inputs, i.e.:: {"1": 1} zip {"1", "one"} -> {"1", (1, "one")} :param name: The name to give the created Recipe :param recipes: The recipes to zip. These must return lists or dictionaries :param cache: The type of caching to use for this Recipe :return: The created Recipe """ def _zip_results(*iterables: Union[List, Dict]) \ -> Union[List[Tuple[Any, ...]], Dict[Any, Tuple[Any, ...]]]: # Sanity checks if not iterables or len(iterables) == 0: return [] if any(not isinstance(iterable, Iterable) for iterable in iterables): raise ValueError("Cannot zip non-iterable inputs") first_iterable = iterables[0] if any(not isinstance(iterable, type(first_iterable)) for iterable in iterables): raise ValueError("Cannot zip inputs of different types") num_items = len(first_iterable) if any(len(iterable) != num_items for iterable in iterables): raise ValueError("Cannot zip inputs of different length") # Handle the actual zipping operation if isinstance(first_iterable, list): return list(zip(*iterables)) elif isinstance(first_iterable, dict): return { key: tuple(iterable[key] for iterable in iterables) for key in first_iterable.keys() } else: raise ValueError("Type: {} not supported in _zip_results()".format(type(first_iterable))) return Recipe(_zip_results, recipes, name, transient=False, cache=cache)
a1e0b7aa2d5071e485f49b0b7aa43343f8760ab2
9,480
def get_muscle_reference_dictionary(): """ The @article{bashkatov2011optical, title={Optical properties of skin, subcutaneous, and muscle tissues: a review}, author={Bashkatov, Alexey N and Genina, Elina A and Tuchin, Valery V}, journal={Journal of Innovative Optical Health Sciences}, volume={4}, number={01}, pages={9--38}, year={2011}, publisher={World Scientific} } """ reference_dict = dict() values650nm = TissueProperties() values650nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 1.04 values650nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 87.5 values650nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values650nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values650nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values650nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values650nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values650nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values650nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 values700nm = TissueProperties() values700nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.48 values700nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 81.8 values700nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values700nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values700nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values700nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values700nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values700nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values700nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 values750nm = TissueProperties() values750nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.41 values750nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 77.1 values750nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values750nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values750nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values750nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values750nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values750nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values750nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 values800nm = TissueProperties() values800nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.28 values800nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 70.4 values800nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values800nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values800nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values800nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values800nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values800nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values800nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 values850nm = TissueProperties() values850nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.3 values850nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 66.7 values850nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values850nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values850nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values850nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values850nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values850nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values850nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 values900nm = TissueProperties() values900nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.32 values900nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 62.1 values900nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values900nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values900nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values900nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values900nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values900nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values900nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 values950nm = TissueProperties() values950nm[Tags.DATA_FIELD_ABSORPTION_PER_CM] = 0.46 values950nm[Tags.DATA_FIELD_SCATTERING_PER_CM] = 59.0 values950nm[Tags.DATA_FIELD_ANISOTROPY] = 0.9 values950nm[Tags.DATA_FIELD_GRUNEISEN_PARAMETER] = calculate_gruneisen_parameter_from_temperature(37.0) values950nm[Tags.DATA_FIELD_SEGMENTATION] = SegmentationClasses.MUSCLE values950nm[Tags.DATA_FIELD_OXYGENATION] = 0.175 values950nm[Tags.DATA_FIELD_DENSITY] = 1090.4 values950nm[Tags.DATA_FIELD_SPEED_OF_SOUND] = 1588.4 values950nm[Tags.DATA_FIELD_ALPHA_COEFF] = 1.09 reference_dict[650] = values650nm reference_dict[700] = values700nm reference_dict[750] = values750nm reference_dict[800] = values800nm reference_dict[850] = values850nm reference_dict[900] = values900nm reference_dict[950] = values950nm return reference_dict
b2bcedabce6309a11d0b1f8424ccefc06d7c8dee
9,481
import shlex def smartquotes(text): """ Runs text through pandoc for smartquote correction. This script accepts a paragraph of input and outputs typographically correct text using pandoc. Note line breaks are not retained. """ command = shlex.split('pandoc --smart -t plain') com = Popen(command, shell=False, stdin=PIPE, stdout=PIPE, stderr=PIPE) out, err = com.communicate(text.encode('utf-8')) com_out = out.decode('utf-8') text = com_out.replace('\n', ' ').strip() return text
bab6ec252495d8e279cdcde7f51f60331117bae2
9,483
def get_nearest_stations_xy(x, y, variable, n=1, stations=None, ignore=None): """find the KNMI stations that measure 'variable' closest to the x, y coordinates Parameters ---------- x : int or float x coordinate in RD y : int or float x coordinate in RD variable : str measurement variable e.g. 'RD' or 'EV24' n : int, optional number of stations you want to return. The default is 1. stations : pd.DataFrame, optional if None stations will be obtained using the get_stations function. The default is None. ignore : list, optional list of stations to ignore. The default is None. Returns ------- list station numbers. """ if stations is None: stations = get_stations(variable=variable) if ignore is not None: stations.drop(ignore, inplace=True) if stations.empty: return None d = np.sqrt((stations.x - x)**2 + (stations.y - y)**2) return d.nsmallest(n).index.to_list()
2d19e64054eb0813919e2a286c686b91e6d0a6f4
9,484
def parseStdInput(): """Obtain a graph by parsing the standard input as per the format specified in the PACE Challange. """ edges = [(1,2),(2,3),(3,4),(4,1)] G = nx.Graph() G.add_edges_from(edges) return G
4e26d50c590321241101586d9e83b2d53c7324ea
9,485
def strfdelta(tdelta, fmt): """ Get a string from a timedelta. """ f, d = Formatter(), {} l = {"D": 86400, "H": 3600, "M": 60, "S": 1} k = list(map(lambda x: x[1], list(f.parse(fmt)))) rem = int(tdelta.total_seconds()) for i in ("D", "H", "M", "S"): if i in k and i in l.keys(): d[i], rem = divmod(rem, l[i]) return f.format(fmt, **d)
01e7d3678cc88a08ec91e64dd59037294f17d9fe
9,486
def imputation_Y(X, model): """Perform imputation. Don't normalize for depth. Args: X: feature matrix from h5. model: a trained scBasset model. Returns: array: a peak*cell imputed accessibility matrix. Sequencing depth isn't corrected for. """ Y_impute = model.predict(X) return Y_impute
75e2de758c3544655d4332098d4398255770d7c3
9,488
def format_percent(x, _pos=None): """ plt.gca().yaxis.set_major_formatter(format_percent) """ x = 100 * x if abs(x - round(x)) > 0.05: return r"${:.1f}\%$".format(x) else: return r"${:.0f}\%$".format(x)
27362ffa3b5726c135afdf034208eeca8d7c4f60
9,489
def is_row_and_col_balanced(T1, T2): """ Partial latin squares T1 and T2 are balanced if the symbols appearing in row r of T1 are the same as the symbols appearing in row r of T2, for each r, and if the same condition holds on columns. EXAMPLES:: sage: from sage.combinat.matrices.latin import * sage: T1 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: T2 = matrix([[0,1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: is_row_and_col_balanced(T1, T2) True sage: T2 = matrix([[0,3,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1], [-1,-1,-1,-1]]) sage: is_row_and_col_balanced(T1, T2) False """ for r in range(T1.nrows()): val1 = set(x for x in T1.row(r) if x >= 0) val2 = set(x for x in T2.row(r) if x >= 0) if val1 != val2: return False for c in range(T1.ncols()): val1 = set(x for x in T1.column(c) if x >= 0) val2 = set(x for x in T2.column(c) if x >= 0) if val1 != val2: return False return True
f0a9d1522da2fc079d4021603198e79c438de727
9,490
def submit(ds, entry_name, molecule, index): """ Submit an optimization job to a QCArchive server. Parameters ---------- ds : qcportal.collections.OptimizationDataset The QCArchive OptimizationDataset object that this calculation belongs to entry_name : str The base entry name that the conformation belongs to. Usually, this is a canonical SMILES, but can be anything as it is represents a key in a dictionary-like datastructure. This will be used as an entry name in the dataset molecule : QCMolecule The JSON representation of a QCMolecule, which has geometry and connectivity present, among others index : int The conformation identifier of the molecule. This is used to make the entry names unique, since each conformation must have its own unique entry in the dataset in the dataset Returns ------- (unique_id, success): tuple unique_id : str The unique_id that was submitted to the dataset. This is the name of the new entry in the dataset. success : bool Whether the dataset was able to successfully add the entry. If this is False, then the entry with the name corresponding to unique_id was already present in the dataset. """ # This workaround prevents cmiles from crashing if OE is installed but has # no license. Even though rdkit is specified, protomer enumeration is OE- # specific and still attempted. # oe_flag = cmiles.utils.has_openeye # cmiles.utils.has_openeye = False # attrs = cmiles.generator.get_molecule_ids(molecule, toolkit="rdkit") # cmiles.utils.has_openeye = oe_flag CIEHMS = "canonical_isomeric_explicit_hydrogen_mapped_smiles" molecule["extras"] = {CIEHMS: entry_name} attrs = {CIEHMS: entry_name} unique_id = entry_name + f"-{index}" success = False try: ds.add_entry(unique_id, molecule, attributes=attrs, save=False) success = True except KeyError: pass return unique_id, success
50a30a25af59906ce5636ce8a176e29befd27d60
9,491
def list_isos(apiclient, **kwargs): """Lists all available ISO files.""" cmd = listIsos.listIsosCmd() [setattr(cmd, k, v) for k, v in kwargs.items()] return(apiclient.listIsos(cmd))
ad3117c6fc2c7bc4543372d306d0d476918d5898
9,492
from .....main import _get_bot from typing import Optional from typing import Union async def edit_message_live_location( token: str = TOKEN_VALIDATION, latitude: float = Query(..., description='Latitude of new location'), longitude: float = Query(..., description='Longitude of new location'), chat_id: Optional[Union[int, str]] = Query(None, description='Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)'), message_id: Optional[int] = Query(None, description='Required if inline_message_id is not specified. Identifier of the message to edit'), inline_message_id: Optional[str] = Query(None, description='Required if chat_id and message_id are not specified. Identifier of the inline message'), horizontal_accuracy: Optional[float] = Query(None, description='The radius of uncertainty for the location, measured in meters; 0-1500'), heading: Optional[int] = Query(None, description='Direction in which the user is moving, in degrees. Must be between 1 and 360 if specified.'), proximity_alert_radius: Optional[int] = Query(None, description='Maximum distance for proximity alerts about approaching another chat member, in meters. Must be between 1 and 100000 if specified.'), reply_markup: Optional[Json['InlineKeyboardMarkupModel']] = Query(None, description='A JSON-serialized object for a new inline keyboard.'), ) -> JSONableResponse: """ Use this method to edit live location messages. A location can be edited until its live_period expires or editing is explicitly disabled by a call to stopMessageLiveLocation. On success, if the edited message is not an inline message, the edited Message is returned, otherwise True is returned. https://core.telegram.org/bots/api#editmessagelivelocation """ reply_markup: Optional[InlineKeyboardMarkupModel] = parse_obj_as( Optional[InlineKeyboardMarkupModel], obj=reply_markup, ) bot = await _get_bot(token) try: entity = await get_entity(bot, chat_id) except BotMethodInvalidError: assert isinstance(chat_id, int) or (isinstance(chat_id, str) and len(chat_id) > 0 and chat_id[0] == '@') entity = chat_id except ValueError: raise HTTPException(404, detail="chat not found?") # end try result = await bot.edit_message_live_location( latitude=latitude, longitude=longitude, entity=entity, message_id=message_id, inline_message_id=inline_message_id, horizontal_accuracy=horizontal_accuracy, heading=heading, proximity_alert_radius=proximity_alert_radius, reply_markup=reply_markup, ) data = await to_web_api(result, bot) return r_success(data.to_array())
39eef452e570e4b00b08aa66aba6d4253bce154f
9,493
def process_rollout(rollout, gamma, lambda_=1.0): """ given a rollout, compute its returns and the advantage """ batch_si = np.asarray(rollout.states) batch_a = np.asarray(rollout.actions) rewards = np.asarray(rollout.rewards) action_reward = np.concatenate((batch_a,rewards[:,np.newaxis]), axis=1) vpred_t = np.asarray(rollout.values + [rollout.r]) rewards_plus_v = np.asarray(rollout.rewards + [rollout.r]) batch_r = discount(rewards_plus_v, gamma)[:-1] delta_t = rewards + gamma * vpred_t[1:] - vpred_t[:-1] # this formula for the advantage comes "Generalized Advantage Estimation": # https://arxiv.org/abs/1506.02438 batch_adv = discount(delta_t, gamma * lambda_) features = rollout.features batch_pc = np.asarray(rollout.pixel_changes) return Batch(batch_si, batch_a, action_reward, batch_adv, batch_r, rollout.terminal, features, batch_pc)
da37f8b55294df5204f18772552e72d2131dd072
9,494
async def async_setup_entry(hass, config_entry, async_add_entities): """Add sensors for passed config_entry in HA.""" coordinator: IotawattUpdater = hass.data[DOMAIN][config_entry.entry_id] created = set() @callback def _create_entity(key: str) -> IotaWattSensor: """Create a sensor entity.""" created.add(key) return IotaWattSensor( coordinator=coordinator, key=key, mac_address=coordinator.data["sensors"][key].hub_mac_address, name=coordinator.data["sensors"][key].getName(), entity_description=ENTITY_DESCRIPTION_KEY_MAP.get( coordinator.data["sensors"][key].getUnit(), IotaWattSensorEntityDescription("base_sensor"), ), ) async_add_entities(_create_entity(key) for key in coordinator.data["sensors"]) @callback def new_data_received(): """Check for new sensors.""" entities = [ _create_entity(key) for key in coordinator.data["sensors"] if key not in created ] if entities: async_add_entities(entities) coordinator.async_add_listener(new_data_received)
171d65acf5227ed9027481bcc2eb773bee52bbca
9,495
from datetime import datetime import calendar def calculate_cost(cost, working_days_flag, month, nr_of_passes): """Calculate the monthly tolls cost""" if working_days_flag: passes = working_days(month) * nr_of_passes else: now = datetime.datetime.now() passes = calendar.monthrange(now.year, month)[1] * nr_of_passes total_cost = 0 for i in range(1, passes + 1): if 1 <= i <= 5: total_cost += cost elif 6 <= i <= 10: total_cost += cost - (cost * 15 / 100) elif 11 <= i <= 20: total_cost += cost - (cost * 30 / 100) elif 21 <= i <= 30: total_cost += cost - (cost * 40 / 100) elif 31 <= i <= 40: total_cost += cost - (cost * 50 / 100) elif 41 <= i <= 60: total_cost += cost - (cost * 60 / 100) else: total_cost += cost return total_cost
5221e0dedd56d7d3302aa88cdf9ad7feb67173a3
9,496
def e_dl() -> str: """Fetch size of archives to be downloaded for next system update.""" size = 'Calculating...' with open(file=TMERGE_LOGFILE, mode='r', encoding='utf-8') as log_file: for line in list(log_file)[::-1]: reqex = search(r'(Size of downloads:.)([0-9,]*\s[KMG]iB)', line) if reqex is not None: size = reqex.group(2) break print(size) return size
1639d6cd0e78ca4f4adfceb75875f6b0de398a63
9,497
def get_model_fn(): """Returns the model definition.""" def model_fn(features, labels, mode, params): """Returns the model function.""" feature = features['feature'] print(feature) labels = labels['label'] one_hot_labels = model_utils.get_label( labels, params, FLAGS.src_num_classes, batch_size=FLAGS.train_batch_size) def get_logits(): """Return the logits.""" avg_pool = model.conv_model(feature, mode) name = 'final_dense_dst' with tf.variable_scope('target_CLS'): logits = tf.layers.dense( inputs=avg_pool, units=FLAGS.src_num_classes, name=name) return logits logits = get_logits() logits = tf.cast(logits, tf.float32) dst_loss = tf.losses.softmax_cross_entropy( logits=logits, onehot_labels=one_hot_labels, ) dst_l2_loss = FLAGS.weight_decay * tf.add_n([ tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name and 'kernel' in v.name ]) loss = dst_loss + dst_l2_loss train_op = None if mode == tf.estimator.ModeKeys.TRAIN: cur_finetune_step = tf.train.get_global_step() update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): finetune_learning_rate = lr_schedule() optimizer = tf.train.AdamOptimizer(finetune_learning_rate) train_op = tf.contrib.slim.learning.create_train_op(loss, optimizer) with tf.variable_scope('finetune'): train_op = optimizer.minimize(loss, cur_finetune_step) else: train_op = None eval_metrics = None if mode == tf.estimator.ModeKeys.EVAL: eval_metrics = model_utils.metric_fn(labels, logits) if mode == tf.estimator.ModeKeys.TRAIN: with tf.control_dependencies([train_op]): tf.summary.scalar('classifier/finetune_lr', finetune_learning_rate) else: train_op = None return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metrics, ) return model_fn
ef006ff79c6979a61a745ebfecd599858ded0418
9,498
def build_node(idx, node_type): """ Build node list :idx: a value to id mapping dict :node_type: a string describe the node type :returns: a list of records of the nodes extracted from the mapping """ return rekey(idx, 'value', 'id:ID', {':LABEL': node_type})
cf9cb20b152aa55ef7f37ee1e2f513d166e2b7c5
9,499
def residual_block(filters, repetitions,kernel_size=(3,3),strides=(2,2), is_first_layer=False): """Builds a residual block with repeating bottleneck blocks. """ def f(input): for i in range(repetitions): init_strides = (1, 1) if i == 0 and not is_first_layer: init_strides = strides input = basic_block(filters=filters,kernel_size=kernel_size, init_strides=init_strides, is_first_block_of_first_layer=(is_first_layer and i == 0))(input) return input return f
d3771289e034c4cd06f38caa715c925d7b947ab1
9,500
def vpg_omega(X,Y,Gamma=1, sigma=1, polarIn=False): """ Vorticity distribution for 2D Gaussian vortex patch """ if polarIn: r = X else: r = np.sqrt(X ** 2 + Y ** 2) omega_z = Gamma/(np.pi*sigma) * (np.exp(- r**2/sigma**2)) return omega_z
d7964152c9d21defc395e2a31d8709fe9c5d94c8
9,501
from typing import Tuple def get_outgroup(tree: CassiopeiaTree, triplet: Tuple[str, str, str]) -> str: """Infers the outgroup of a triplet from a CassioepiaTree. Finds the outgroup based on the depth of the latest-common-ancestors of each pair of items. The pair with the deepest LCA is the ingroup and the remaining leaf is the outgroup. We infer the depth of the LCA from the number of shared ancestors. Args: tree: CassiopeiaTree triplet: A tuple of three leaves constituting a triplet. Returns: The outgroup (i.e. the most distal leaf in the triplet.) """ i, j, k = triplet[0], triplet[1], triplet[2] i_ancestors = tree.get_all_ancestors(i) j_ancestors = tree.get_all_ancestors(j) k_ancestors = tree.get_all_ancestors(k) ij_common = len(set(i_ancestors) & set(j_ancestors)) ik_common = len(set(i_ancestors) & set(k_ancestors)) jk_common = len(set(j_ancestors) & set(k_ancestors)) out_group = "None" if ij_common > jk_common and ij_common > ik_common: out_group = k elif ik_common > jk_common and ik_common > ij_common: out_group = j elif jk_common > ij_common and jk_common > ik_common: out_group = i return out_group
c48e7121a8622876b6fb1269f881da4afe9cd8da
9,502
from unittest.mock import call def delete_host(resource_root, host_id): """ Delete a host by id @param resource_root: The root Resource object. @param host_id: Host id @return: The deleted ApiHost object """ return call(resource_root.delete, "%s/%s" % (HOSTS_PATH, host_id), ApiHost)
8d4349c0722517e0f4f8d74ea74b2d74bbc08227
9,503
from typing import Union from typing import Tuple from typing import List def get_preds(model: nn.Module, image: Union[np.array, str], **kwargs) -> Tuple[List]: """ Generated predictions for the given `image` using `model`. """ logger = _get_logger(name=__name__) # load in the image if string is give if isinstance(image, str): image = Image.open(image).convert("RGB") # Convert PIL image to array image = np.array(image) # Convert Image to a tensor tensor_image = transforms(image=image)["image"] # Generate predicitons model.eval() pred = model.predict([tensor_image]) # Gather the bbox, scores & labels from the preds pred_boxes = pred[0]["boxes"] # Bounding boxes pred_class = pred[0]["labels"] # predicted class labels pred_score = pred[0]["scores"] # predicted scores # Process detections boxes = list(pred_boxes.cpu().numpy()) clas = list(pred_class.cpu().numpy()) scores = list(pred_score.cpu().numpy()) return boxes, clas, scores
8778f43fd65bccca8fc9372454aba2a7cd2544d5
9,504
def init_w(w, n): """ :purpose: Initialize a weight array consistent of 1s if none is given This is called at the start of each function containing a w param :params: w : a weight vector, if one was given to the initial function, else None NOTE: w MUST be an array of np.float64. so, even if you want a boolean w, convert it to np.float64 (using w.astype(np.float64)) before passing it to any function n : the desired length of the vector of 1s (often set to len(u)) :returns: w : an array of 1s with shape (n,) if w is None, else return w un-changed """ if w is None: return np.ones(n) else: return w
2157f12410c2a909a32f37b9fcae4a489361fb6e
9,507
def _ensure_min_resources(progs, cores, memory, min_memory): """Ensure setting match minimum resources required for used programs. """ for p in progs: if p in min_memory: if not memory or cores * memory < min_memory[p]: memory = float(min_memory[p]) / cores return cores, memory
f311259242a73a7bc527e3601765c95153a08748
9,508
import ctypes def ctypes_pointer(name): """Create a ctypes type representing a C pointer to a custom data type ``name``.""" return type("c_%s_p" % name, (ctypes.c_void_p,), {})
d87f10ac06391379a24f166272fd42fa938e3676
9,509
def generate_linear_data(n, betas, sigma): """Generate pandas df with x and y variables related by a linear equation. Export data as csv. :param n: Number of observations. :param betas: beta parameters. :param sigma: standard deviation :return: None """ x = np.linspace(start=0.0, stop=1.0, num=n) y = betas[0] + betas[1]*x + np.random.normal(loc=1, scale=sigma, size=n) df = pd.DataFrame({'x': x, 'y': y}) df.to_csv('data/train_data.csv', index=False) return None
2f8b99a3c11ecf75afee51bd5df31f22efaddf58
9,510
def vrotate_3D(vec: np.ndarray, ref: np.ndarray) -> np.ndarray: """Rotates a vector in a 3D space. Returns the rotation matrix for `vec` to match the orientation of a reference vector `ref`. https://math.stackexchange.com/questions/180418/calculate-rotation-matrix-to-align-vector-a-to-vector-b-in-3d/476311#476311 Parameters ---------- vec Vector to rotate, as a numpy 1D array ref Reference vector, as a numpy 1D array Returns ------- np.ndarray (3,3) rotation matrix, as a numpy 2D array """ def norm(A): return sqrt(np.dot(A, A)) # G = np.matrix([ # [np.dot(A, B), -norm(np.cross(A, B)), 0.0], # [norm(np.cross(A, B)), np.dot(A, B), 0.0], # [0.0, 0.0, 1.0] # ]) # F = np.matrix([ # A, # (B-np.dot(A, B)*A)/norm(B-np.dot(A, B)*A), # np.cross(B, A)/norm(np.cross(B, A)) # ]) # return F.I*G*F V = np.cross(vec, ref) S = norm(V) if abs(S) < 1.0e-6: # Already collinear, nothing to do return np.eye(3) else: C = np.dot(vec, ref) Vx = np.matrix([[0.0, -V[2], V[1]], [V[2], 0.0, -V[0]], [-V[1], V[0], 0.0]]) return np.eye(3) + Vx + Vx**2*(1.0-C)/S**2
4cea9d84d8fba2dd5bd9399b83ca9f1aca79b830
9,512
def asymptotic_decay(learning_rate, t, max_iter): """Decay function of the learning process. Parameters ---------- learning_rate : float current learning rate. t : int current iteration. max_iter : int maximum number of iterations for the training. """ return learning_rate / (1+t/(max_iter/2))
7cc699caed4ddcbde67f5d6e4199fc8479364585
9,513
def get_cell_content(browser, author): """ get novel cells return [cell, cell, cell] """ content = list() cells = browser.find_all(class_='t t2') for cell in cells: if cell.find(class_='r_two').b.string != author: continue for cell_content in cell.find(class_=['tpc_content do_not_catch', 'tpc_content']).strings: content.append(cell_content.strip()) return "\n".join(content)
eb498d937b8ffd51ef7805a30940833e09571ed5
9,514
def triangle_area(a, h): """Given length of a side and high return area for a triangle. >>> triangle_area(5, 3) 7.5 """ #[SOLUTION] return a * h / 2.0
9890d5e8332e667fab6dd672f62ca852f6f8f8c0
9,515
from nicos.core import ConfigurationError import urllib import logging def create_mongo_handler(config): """ :param config: configuration dictionary :return: [MongoLogHandler, ] if 'mongo_logger' is in options, else [] """ if hasattr(config, 'mongo_logger'): url = urllib.parse.urlparse(config.mongo_logger) if not url.netloc: raise ConfigurationError('mongo_logger: invalid url') mongo_handler = MongoLogHandler() mongo_handler.setLevel(logging.WARNING) return mongo_handler
c7ac39574a21c44519ae57c6fb40b8f6ca679311
9,516
def split_and_load(data, ctx_list, batch_axis=0, even_split=True): """Splits an NDArray into `len(ctx_list)` slices along `batch_axis` and loads each slice to one context in `ctx_list`. Parameters ---------- data : NDArray A batch of data. ctx_list : list of Context A list of Contexts. batch_axis : int, default 0 The axis along which to slice. even_split : bool, default True Whether to force all slices to have the same number of elements. Returns ------- list of NDArray Each corresponds to a context in `ctx_list`. """ if len(ctx_list) == 1: return [d.as_in_context(ctx_list[0]) for d in data] size = len(data) num_slice = len(ctx_list) step = size // num_slice for i in range(num_slice): for k in range(i*step, (i+1)*step): data[k].as_in_context(ctx_list[i]) return data
4b8f0d1b6b256895da3e37fbb4b1be0cd0da5c46
9,517
def _get_scoped_outputs(comp, g, explicit_outs): """Return a list of output varnames scoped to the given name.""" cnamedot = comp.name + '.' outputs = set() if explicit_outs is None: explicit_outs = () for u,v in g.list_connections(): if u.startswith(cnamedot): outputs.add(u) outputs.update([n for n in explicit_outs if n.startswith(cnamedot)]) if not outputs: return None return [n.split('.',1)[1] for n in outputs]
8ff2cfe49dc3d892c4ed4adaeb9300e9395c790b
9,518
def judge_1d100_with_6_ver(target: int, dice: int): """ Judge 1d100 dice result, and return text and color for message. Result is critical, success, failure or fumble. Arguments: target {int} -- target value (ex. skill value) dice {int} -- dice value Returns: message {string} rgb_color {string} """ if dice <= target: if dice <= 5: return "クリティカル", yig.config.COLOR_CRITICAL return "成功", yig.config.COLOR_SUCCESS if dice >= 96: return "ファンブル", yig.config.COLOR_FUMBLE return "失敗", yig.config.COLOR_FAILURE
f870f6ffee3bb90046eb2f1660e827b899c59f04
9,519
def get_normals(self, indices=None, loc="center"): """Return the array of the normals coordinates. Parameters ---------- self : MeshVTK a MeshVTK object indices : list list of the points to extract (optional) loc : str localization of the normals ("center" or "point") Returns ------- normals: ndarray Normals coordinates """ # Get surfaces surf = self.get_surf() if loc == "center": normals = surf.cell_normals elif loc == "point": if self.node_normals is None: self.surf.compute_normals( cell_normals=False, point_normals=True, inplace=True ) self.node_normals = self.surf["Normals"] normals = self.node_normals if indices is None: return normals else: return normals[indices, :]
5d08247f70e1012eef7d525ae63f7aebe294e700
9,520
def string_to_weld_literal(s): """ Converts a string to a UTF-8 encoded Weld literal byte-vector. Examples -------- >>> string_to_weld_literal('hello') '[104c,101c,108c,108c,111c]' """ return "[" + ",".join([str(b) + 'c' for b in list(s.encode('utf-8'))]) + "]"
d85b016091988c9307cbed56aafdd5766c3c9be5
9,521
def verify_model_licensed(class_name : str, model_path:str): """ Load a licensed model from HDD """ try : m = eval(class_name).load(model_path) return m except: print(f"Could not load Annotator class={class_name} located in {model_path}. Try updaing spark-nlp-jsl")
057987d838982a85925f70c93ff2f4166b038cec
9,522
def examine_api(api): """Find all style issues in the given parsed API.""" global failures failures = {} for key in sorted(api.keys()): examine_clazz(api[key]) return failures
c94efa9a2be66e30597c63b376adf74bd2ef6462
9,523
def launch(): """ Initialize the module. """ return BinCounterWorker(BinCounter, PT_STATS_RESPONSE, STATS_RESPONSE)
07bfc99731088a8572616aa1cbfbd0be74db5492
9,525
def make_map(source): """Creates a Bokeh figure displaying the source data on a map Args: source: A GeoJSONDataSource object containing bike data Returns: A Bokeh figure with a map displaying the data """ tile_provider = get_provider(Vendors.STAMEN_TERRAIN_RETINA) TOOLTIPS = [ ('bikes available', '@bikes'), ] p = figure(x_range=(-8596413.91, -8558195.48), y_range=(4724114.13, 4696902.60), x_axis_type="mercator", y_axis_type="mercator", width=1200, height=700, tooltips=TOOLTIPS) p.add_tile(tile_provider) p.xaxis.visible = False p.yaxis.visible = False p.circle(x='x', y='y', size='size', color='color', alpha=0.7, source=source) color_bar_palette = viridis(256) color_mapper = LinearColorMapper(palette=color_bar_palette, low=0, high=100) color_bar = ColorBar(color_mapper=color_mapper, background_fill_alpha=0.7, title='% Full', title_text_align='left', title_standoff=10) p.add_layout(color_bar) label = Label(x=820, y=665, x_units='screen', y_units='screen', text='Dot size represents total docks in station', render_mode='css', border_line_color=None, background_fill_color='white', background_fill_alpha=0.7) p.add_layout(label) return p
51578186a1fabd071e31e46b20568c23c79bc693
9,526
def rotate_images(images, rot90_scalars=(0, 1, 2, 3)): """Return the input image and its 90, 180, and 270 degree rotations.""" images_rotated = [ images, # 0 degree tf.image.flip_up_down(tf.image.transpose_image(images)), # 90 degrees tf.image.flip_left_right(tf.image.flip_up_down(images)), # 180 degrees tf.image.transpose_image(tf.image.flip_up_down(images)) # 270 degrees ] results = tf.stack([images_rotated[i] for i in rot90_scalars]) results = tf.reshape(results, [-1] + images.get_shape().as_list()[1:]) return results
dd151b83918eba9b62a91b499273772e66af6ba9
9,528
def csv(args:[str])->str: """create a string of comma-separated values""" return ','.join(args)
1e48583c236940f2af10f8e050af8ad70ace51f6
9,529
import types def _update_class(oldclass, newclass): """Update a class object.""" # XXX What about __slots__? olddict = oldclass.__dict__ newdict = newclass.__dict__ # PDF changed to remove use of set as not in Jython 2.2 for name in olddict.keys(): if name not in newdict: delattr(oldclass, name) for name in newdict.keys(): if name not in ["__dict__", "__doc__"]: if name not in olddict: setattr(oldclass, name, newdict[name]) continue new = getattr(newclass, name) old = getattr(oldclass, name, None) if new == old: continue if old is None: setattr(oldclass, name, new) continue if isinstance(new, types.MethodType): changed = _update_method(old, new) setattr(oldclass, name, changed) elif isinstance(new, types.FunctionType): # __init__ is a function changed = _update_function(old, new) setattr(oldclass, name, changed) else: # Fallback to just replace the item setattr(oldclass, name, new) return oldclass
123eb4eadf7bf6ee65ae5df6ae9ed6df444c25d3
9,530
def mean_by_weekday(day, val): """ Returns a list that contain weekday, mean of beginning and end of presence. """ return [day_abbr[day], mean(val['start']), mean(val['end'])]
8aa7ac3dde83db88b44d2178ba19c5b731af683c
9,533
def parse_metrics(match, key): """Gets the metrics out of the parsed logger stream""" elements = match.split(' ')[1:] elements = filter(lambda x: len(x) > 2, elements) elements = [float(e) for e in elements] metrics = dict(zip(['key', 'precision', 'recall', 'f1'], [key] + elements)) return metrics
70de1ad16edfe827e0a851c719d902695696700f
9,534
def minecraftify(clip: vs.VideoNode, div: float = 64.0, mod: int | None = None) -> vs.VideoNode: """ Function that transforms your clip into a Minecraft. Idea from Meme-Maji's Kobayashi memery (love you varde). :param clip: Input clip :param div: How much to divide the clip's resolution with :param mod: Force the downscaled clip to be MOD# compliant :return: A Minecraft. """ ow, oh = round(clip.width/div), round(clip.height/div) if mod is not None: ow, oh = force_mod(ow, mod), force_mod(oh, mod) i444 = core.resize.Bicubic(clip, format=vs.YUV444PS) down = Point().scale(i444, ow, oh) return Point().scale(down, clip.width, clip.height)
4f8338cfe2df8bff8d4f2c7571fa38688e39496c
9,535
def processGOTerm(goTerm): """ In an object representing a GO term, replace single-element lists with their only member. Returns the modified object as a dictionary. """ ret = dict(goTerm) #Input is a defaultdict, might express unexpected behaviour for key, value in ret.items(): if len(value) == 1: ret[key] = value[0] return ret
541916a0060726bbc972b784f9a011541e7c8128
9,536
import urllib def searchxapian_show(request): """ zeigt den Inhalt eines Dokumentes """ SORT_BY = { -1: _(u'Relevanz'), 0: _(u'URL'), 1: _(u'Überschrift/Titel'), 2: _(u'Datum der letzten Änderung') } if request.path.find('index.html') < 0: my_path = request.path.replace('searchxapian', 'index.html/searchxapian') else: my_path = request.path item_container = get_item_container(my_path, '/searchxapian/') def get_sort_by_choices(): ret = [] ret.append((-1, SORT_BY[-1])) # Siehe SORT_BY ret.append((0, SORT_BY[0])) ret.append((1, SORT_BY[1])) ret.append((2, SORT_BY[2])) return ret def get_domain_choices(): """ """ ret = [] ret.append( ('', _(u'Alle Seiten')) ) if item_container != None: url = item_container.container.site.url[7:] ret.append( (url, _(u'Nur Seiten der Domaine <i>') + url + '</i>') ) return ret class DmsItemForm(forms.Form): query = forms.CharField(required=False, max_length=60, widget=forms.TextInput(attrs={'size':60}) ) sort_by = forms.CharField( widget=forms.Select(choices= get_sort_by_choices(), attrs={'size':4, 'style':'width:60%'} ) ) domain = forms.ChoiceField(required=False, choices=get_domain_choices(), widget=forms.RadioSelect() ) def get_prev_next(query, offset, delta, domain, sort_by, count): aquery = u'query=%s' % urllib.quote_plus(query) if domain == '': site = '' else: site = '&domain=' + domain show_prev = '' show_next = '' show_middle = '' n_start = 0 if count > offset + 10*delta: show_next_more = True count = offset + 10*delta else: show_next_more = False if offset > 10*delta: show_prev_more = True n_start = offset - 10*delta else: show_prev_more = False n = n_start while n < count: if n < offset: show_prev += show_link(u'./?%s&offset=%i&sort_by=%i%s' % (aquery, n, sort_by, site), smart_unicode(n), url_class='navLink') + ' ' elif n == offset: show_middle = '<b>%i</b> ' % n else: show_next += show_link(u'./?%s&offset=%i&sort_by=%i%s' % \ (aquery, n, sort_by, site), smart_unicode(n), url_class='navLink') + ' ' n += delta if show_next_more: show_next += show_link(u'./?%s&offset=%i&sort_by=%i%s' % \ (aquery, n, sort_by, site), ' &raquo; Weiter', url_class='navLink') if show_prev_more: show_prev = show_link(u'./?%s&offset=%i&sort_by=%i%s' % \ (aquery, n_start-delta, sort_by, site), 'Zurück &laquo; ', url_class='navLink') + show_prev if count < delta: show_middle = '' return show_prev, show_middle, show_next def get_search_results(request): sort_by = -1 offset = 0 delta = 20 count = -1 if show_errors: data = request.POST.copy() query = data['query'] domain = data['domain'] else: data = { 'query': '', 'sort_by': -1,} query = '' domain = '' if params.has_key('offset'): offset = int(params['offset']) if params.has_key('sort_by'): sort_by = int(params['sort_by']) if params.has_key('domain'): domain = params['domain'] if params.has_key('query'): query = params['query'] data = { 'query': query, 'sort_by': sort_by, 'domain': domain} s = xmlrpclib.Server('http://localhost:3000') sort_by = int(data['sort_by']) ascending = sort_by==2 res = s.search(query, offset, delta, domain, sort_by, ascending) return res, query, offset, delta, domain, sort_by, data def get_link_list(rs): results = [] for r in rs: this_link = show_link(r['url'], r['title']) + u' {%s}' % r['percent'] # --- Siehe SORT_BY if sort_by == 0: this_link += '<br />' + r['url'] elif sort_by == 2: this_link += ', ' + get_german_date(r['date']) results.append(this_link) return results app_name = 'searchxapian' my_title = _(u'Suchanfrage stellen') if item_container != None: my_absolute_url = item_container.get_absolute_url() else: my_absolute_url = './' show_errors = ( request.method == 'POST' ) params = request.GET.copy() if params!={} or show_errors: res, query, offset, delta, domain, sort_by, data = get_search_results(request) query = decode_html(query) # --- Rohdaten in Liste ueberfuehren count = res['count'] rs = res['results'] results = get_link_list(rs) if query.find('&') >= 0: q = query else: try: q = encode_html(query.decode('iso-8859-1')) except: q = encode_html(query) show_prev, show_middle, show_next = \ get_prev_next(q, offset, delta, domain, sort_by, count) else : sort_by = -1 query = '' count = 20 data = { 'query': '', 'sort_by': sort_by, 'doamin': '', } results = [] show_prev = '' show_middle = '' show_next = '' f = DmsItemForm(data) # --- Reihenfolge, Ueberschriften, Hilfetexte tabs = [ ('tab_base',['query',]), ('tab_more', ['sort_by', 'domain', ]) ] # --- Formular zusammenbauen content = get_tabbed_form(tabs, help_form, app_name , f) # --- externe Suchmaschinen search_engines = get_search_engines() links = [] for engine in search_engines: if query.find('&') < 0: url = engine.url_query % (urllib.quote_plus(encode_html(query.decode('iso-8859-1'))), SEARCH_DOMAIN) else: url = engine.url_query % (urllib.quote_plus(query), SEARCH_DOMAIN) links.append(show_link(url, engine.name, url_class="navLink")) t = get_template('utils/info_slot_right_list_simple.html') c = Context ( { 'header': _(u'Externe Suche'), 'links': links } ) slot_info_right = t.render(c) # --- Zur Ausgangsseite back_link = show_link(my_absolute_url, _(u'Zur Ausgangsseite ...'), url_class="navLink") t = get_template('utils/info_slot_right.html') c = Context ( { 'header': _(u'Ausgangsseite'), 'info': back_link } ) slot_info_right += '<br /><br />\n' + t.render(c) vars = get_item_vars_add(request, item_container, app_name, my_title, content, show_errors) vars['next'] = get_site_url(item_container, 'searchxapian/') vars['path'] = item_container.container.path + 'searchxapian/' vars['sub_title'] = '' vars['slot_right_info'] = slot_info_right vars['action'] = '' vars['results'] = results vars['count'] = count vars['show_prev'] = show_prev vars['show_middle'] = show_middle vars['show_next'] = show_next vars['sort_by'] = SORT_BY[sort_by] vars['google_search'] = 'google' vars['no_top_main_navigation'] = True return render_to_response ( 'app/searchxapian/base.html', vars )
bd1f252107bfcf2aa02cf58ef6d1a302d71edbd8
9,537
import re def html2plaintext(html, body_id=None, encoding='utf-8'): """ From an HTML text, convert the HTML to plain text. If @param body_id is provided then this is the tag where the body (not necessarily <body>) starts. """ ## (c) Fry-IT, www.fry-it.com, 2007 ## <[email protected]> ## download here: http://www.peterbe.com/plog/html2plaintext html = ustr(html) tree = etree.fromstring(html, parser=etree.HTMLParser()) if body_id is not None: source = tree.xpath('//*[@id=%s]' % (body_id,)) else: source = tree.xpath('//body') if len(source): tree = source[0] url_index = [] i = 0 for link in tree.findall('.//a'): url = link.get('href') if url: i += 1 link.tag = 'span' link.text = '%s [%s]' % (link.text, i) url_index.append(url) html = ustr(etree.tostring(tree, encoding=encoding)) # \r char is converted into &#13;, must remove it html = html.replace('&#13;', '') html = html.replace('<strong>', '*').replace('</strong>', '*') html = html.replace('<b>', '*').replace('</b>', '*') html = html.replace('<h3>', '*').replace('</h3>', '*') html = html.replace('<h2>', '**').replace('</h2>', '**') html = html.replace('<h1>', '**').replace('</h1>', '**') html = html.replace('<em>', '/').replace('</em>', '/') html = html.replace('<tr>', '\n') html = html.replace('</p>', '\n') html = re.sub('<br\s*/?>', '\n', html) html = re.sub('<.*?>', ' ', html) html = html.replace(' ' * 2, ' ') # strip all lines html = '\n'.join([x.strip() for x in html.splitlines()]) html = html.replace('\n' * 2, '\n') for i, url in enumerate(url_index): if i == 0: html += '\n\n' html += ustr('[%s] %s\n') % (i + 1, url) return html
70a7af7e557b6cffac05e33a7a394fdccbf7bc84
9,538
def MooreSpace(q): """ Triangulation of the mod `q` Moore space. INPUT: - ``q`` -0 integer, at least 2 This is a simplicial complex with simplices of dimension 0, 1, and 2, such that its reduced homology is isomorphic to `\\ZZ/q\\ZZ` in dimension 1, zero otherwise. If `q=2`, this is the real projective plane. If `q>2`, then construct it as follows: start with a triangle with vertices 1, 2, 3. We take a `3q`-gon forming a `q`-fold cover of the triangle, and we form the resulting complex as an identification space of the `3q`-gon. To triangulate this identification space, put `q` vertices `A_0`, ..., `A_{q-1}`, in the interior, each of which is connected to 1, 2, 3 (two facets each: `[1, 2, A_i]`, `[2, 3, A_i]`). Put `q` more vertices in the interior: `B_0`, ..., `B_{q-1}`, with facets `[3, 1, B_i]`, `[3, B_i, A_i]`, `[1, B_i, A_{i+1}]`, `[B_i, A_i, A_{i+1}]`. Then triangulate the interior polygon with vertices `A_0`, `A_1`, ..., `A_{q-1}`. EXAMPLES:: sage: simplicial_complexes.MooreSpace(2) Minimal triangulation of the real projective plane sage: simplicial_complexes.MooreSpace(3).homology()[1] C3 sage: simplicial_complexes.MooreSpace(4).suspension().homology()[2] C4 sage: simplicial_complexes.MooreSpace(8) Triangulation of the mod 8 Moore space """ if q <= 1: raise ValueError("the mod q Moore space is only defined if q is at least 2") if q == 2: return RealProjectivePlane() facets = [] for i in range(q): Ai = "A" + str(i) Aiplus = "A" + str((i+1) % q) Bi = "B" + str(i) facets.append([1, 2, Ai]) facets.append([2, 3, Ai]) facets.append([3, 1, Bi]) facets.append([3, Bi, Ai]) facets.append([1, Bi, Aiplus]) facets.append([Bi, Ai, Aiplus]) for i in range(1, q-1): Ai = "A" + str(i) Aiplus = "A" + str((i+1) % q) facets.append(["A0", Ai, Aiplus]) return UniqueSimplicialComplex(facets, name='Triangulation of the mod {} Moore space'.format(q))
448e948782d530f6b1ee0909fae02b66606da94d
9,540
def show(tournament_name, params=[], filter_response=True): """Retrieve a single tournament record by `tournament name`""" utils._validate_query_params(params=params, valid_params=VALID_PARAMS, route_type='tournament') uri = TOURNAMENT_PREFIX + tournament_name response = api.get(uri, params) if filter_response: response = _filter_tournament_response(response, params) return response
d854c97e312a0bd6860a5c7fa7cbd36cd79d4ffd
9,541
from typing import Optional from datetime import datetime def create(arxiv_id: ArXivID, arxiv_ver: int, resource_type: str, resource_id: str, description: str, creator: Optional[str]) -> Relation: """ Create a new relation for an e-print. Parameters ---------- arxiv_id: ArXivID The arXiv ID of the e-print. arxiv_ver: int The version of the e-print. resource_type: str The type of the corresponding resource. resource_id: str An identifier of the resource e.g., DOI. description: str A description for the relation. creator: Optional[str] Info of the user/app who requested this relation creation. Returns ------- Relation The newly-created relation. """ # store it to DB rel_data = RelationDB(rel_type=RelationType.ADD, arxiv_id=str(arxiv_id), arxiv_ver=arxiv_ver, resource_type=resource_type, resource_id=resource_id, description=description, added_at=datetime.now(UTC), creator=creator, supercedes_or_suppresses=None) try: db.session.add(rel_data) db.session.commit() except Exception as e: db.session.rollback() raise StorageError from e # return the result return relation_from_DB(rel_data)
e1cbe374bba359b66d8564134ee27ac777c4a16e
9,543
def p_contain_resist(D, t, f_y, f_u=None): """Pressure containment resistance in accordance with DNVGL-ST-F101. (press_contain_resis) Reference: DNVGL-ST-F101 (2017-12) sec:5.4.2.2 eq:5.8 p:94 $p_{b}(t)$ """ if f_u is None: f_cb = f_y else: f_cb = np.minimum(f_y, f_u/1.15) p_b = (2*t/(D-t) * f_cb * 2/np.sqrt(3)) return p_b
1c771eebf2ed43115b8ae32405172cd8576d66a2
9,544
def revalue(request): """其它设备参数修改""" value = request.GET.get('value') name = request.GET.get('name') others = Machines().filter_machines(OtherMachineInfo, pk=request.GET.get('dID'))[0] if name == 'remark': others.remark = value elif name == 'machine_name': others.machine_name = value elif name == 'reson_str': others.reson_str = value elif name == 'oth_cab_id': return '再考虑考虑' others.save() return JsonResponse({'is_ok': 1})
0a7ab179932466e171a119c87871e04e2a3ae252
9,545
import authl.handlers.indieauth import json def indieauth_endpoint(): """ IndieAuth token endpoint """ if 'me' in flask.request.args: # A ticket request is being made me_url = flask.request.args['me'] try: endpoint, _ = authl.handlers.indieauth.find_endpoint(me_url, rel='ticket_endpoint') except RuntimeError: endpoint = None if not endpoint: raise http_error.BadRequest("Could not get ticket endpoint") LOGGER.info("endpoint: %s", endpoint) send_auth_ticket(me_url, flask.request.url_root, endpoint) return "Ticket sent", 202 if 'grant_type' in flask.request.form: # token grant if flask.request.form['grant_type'] == 'ticket': # TicketAuth if 'ticket' not in flask.request.form: raise http_error.BadRequest("Missing ticket") ticket = parse_token(flask.request.form['ticket']) LOGGER.info("Redeeming ticket for %s; scopes=%s", ticket['me'], ticket['scope']) scopes = set(ticket.get('scope', '').split()) if 'ticket' not in scopes: raise http_error.BadRequest("Missing 'ticket' scope") scopes.remove('ticket') scope = ' '.join(scopes) token = get_token(ticket['me'], config.token_lifetime, scope) response = { 'access_token': token, 'token_type': 'Bearer', 'me': ticket['me'], 'expires_in': config.token_lifetime, 'refresh_token': get_token(ticket['me'], config.token_lifetime, ticket['scope']) } if scope: response['scope'] = scope return json.dumps(response), {'Content-Type': 'application/json'} raise http_error.BadRequest("Unknown grant type") if 'action' in flask.request.form: raise http_error.BadRequest() if 'Authorization' in flask.request.headers: # ticket verification parts = flask.request.headers['Authorization'].split() if parts[0].lower() == 'bearer': token = parse_token(parts[1]) return json.dumps(token), {'Content-Type': 'application/json'} raise http_error.Unauthorized("Invalid authorization header") raise http_error.BadRequest()
29809af2c243a08b675738b0169bdc794965c934
9,546
def policy_simulation_c(model,var,ages): """ policy simulation for couples""" if var == 'd': return {'hs': lifecycle_c(model,var=var,MA=[0],ST_w=[1,3],ages=ages,calc='sum')['y'][0] + lifecycle_c(model,var=var,MA=[1],ST_h=[1,3],ages=ages,calc='sum')['y'][0], 'hs_f': lifecycle_c(model,var=var,MA=[0],ST_w=[1,3],ages=ages,calc='sum')['y'][0], 'hs_m': lifecycle_c(model,var=var,MA=[1],ST_h=[1,3],ages=ages,calc='sum')['y'][0], 'base': lifecycle_c(model,var=var,MA=[0,1],ages=ages,calc='sum')['y'][0], 'base_f': lifecycle_c(model,var=var,MA=[0],ages=ages,calc='sum')['y'][0], 'base_m': lifecycle_c(model,var=var,MA=[1],ages=ages,calc='sum')['y'][0], 'ls': lifecycle_c(model,var=var,MA=[0],ST_w=[0,2],ages=ages,calc='sum')['y'][0] + lifecycle_c(model,var=var,MA=[1],ST_h=[0,2],ages=ages,calc='sum')['y'][0], 'ls_f': lifecycle_c(model,var=var,MA=[0],ST_w=[0,2],ages=ages,calc='sum')['y'][0], 'ls_m': lifecycle_c(model,var=var,MA=[1],ST_h=[0,2],ages=ages,calc='sum')['y'][0] } if var == 'probs': return {'base_f': retirement_probs_c(model,ma=0), 'base_m': retirement_probs_c(model,ma=1) } if var == 'GovS': return lifecycle_c(model,var=var,MA=[0,1],ages=ages,calc='total_sum')['y'][0] if var == 'RetAge': return {'hs': np.mean(np.concatenate((RetAge_C(model,ma=0,ST_w=[1,3]), RetAge_C(model,ma=1,ST_h=[1,3])))), 'base_m': np.mean(RetAge_C(model,ma=1)), 'base_f': np.mean(RetAge_C(model,ma=0)), 'base': np.mean(np.concatenate((RetAge_C(model,ma=0), RetAge_C(model,ma=1)))), 'ls': np.mean(np.concatenate((RetAge_C(model,ma=0,ST_w=[0,2]), RetAge_C(model,ma=1,ST_h=[0,2])))) }
d81ddd950eafb23b8cb219638b358a65084ae08d
9,547
def emit_obj_db_entry(target, source, env): """Emitter for object files. We add each object file built into a global variable for later use""" for t in target: if str(t) is None: continue OBJ_DB.append(t) return target, source
e02c2b4e3f3b1aad15097c6b4701407ef1902b77
9,548
def listtimes(list, c): """multiplies the elements in the list by the given scalar value c""" ret = [] for i in range(0, len(list)): ret.extend([list[i]]*c); return ret;
8aef63677a1a926f355644187d58b47e437e152c
9,549
def split_audio_ixs(n_samples, rate=STEP_SIZE_EM, min_coverage=0.75): """ Create audio,mel slice indices for the audio clip Args: Returns: """ assert 0 < min_coverage <= 1 # Compute how many frames separate two partial utterances samples_per_frame = int((SAMPLING_RATE * WINDOW_STEP_DIARIZATION / 1000)) n_frames = int(np.ceil((n_samples + 1) / samples_per_frame)) frame_step = int(np.round((SAMPLING_RATE / rate) / samples_per_frame)) assert 0 < frame_step, "The rate is too high" assert frame_step <= H_L, "The rate is too low, it should be %f at least" % \ (SAMPLING_RATE / (samples_per_frame * H_L)) wav_slices, mel_slices = [], [] steps = max(1, n_frames - H_L + frame_step + 1) for i in range(0, steps, frame_step): mel_range = np.array([i, i + H_L]) wav_range = mel_range * samples_per_frame mel_slices.append(slice(*mel_range)) wav_slices.append(slice(*wav_range)) last_wav_range = wav_slices[-1] coverage = (n_samples - last_wav_range.start) / \ (last_wav_range.stop - last_wav_range.start) if coverage < min_coverage and len(mel_slices) > 1: mel_slices = mel_slices[:-1] wav_slices = wav_slices[:-1] return wav_slices, mel_slices
d3a71082c9f551dffb5a0457ba79fc3318f6df6a
9,551
def new(w: int, h: int, fmt: str, bg: int) -> 'Image': """ Creates new image by given size and format and fills it with bg color """ if fmt not in ('RGB', 'RGBA', 'L', 'LA'): raise ValueError('invalid format') c = len(fmt) image = Image() image.im = _new_image(w, h, c) lib.image_draw_rect(image.im, 0, 0, w, h, bg) return image
c570eab9d62def584a2a12b8a228b30c57cfed76
9,552
def eval_f(f, xs): """Takes a function f = f(x) and a list xs of values that should be used as arguments for f. The function eval_f should apply the function f subsequently to every value x in xs, and return a list fs of function values. I.e. for an input argument xs=[x0, x1, x2,..., xn] the function eval_f(f, xs) should return [f(x0), f(x1), f(x2), ..., f(xn)].""" return [f(x) for x in xs] # alternatively: return list(map(f, xs))
00c6ed7fc59b213a3ec9fec9feeb3d91b1522061
9,553
def cie94_loss(x1: Tensor, x2: Tensor, squared: bool = False, **kwargs) -> Tensor: """ Computes the L2-norm over all pixels of the CIEDE2000 Color-Difference for two RGB inputs. Parameters ---------- x1 : Tensor: First input. x2 : Tensor: Second input (of size matching x1). squared : bool Returns the squared L2-norm. Returns ------- ΔE_00_l2 : Tensor The L2-norm over all pixels of the CIEDE2000 Color-Difference. """ ΔE_94_squared = rgb_cie94_color_difference(x1, x2, squared=True, **kwargs).flatten(1) ε = kwargs.get('ε', 0) if squared: return ΔE_94_squared.sum(1) return ΔE_94_squared.sum(1).clamp_min(ε).sqrt()
1044585ce4cf8158caa3b969a8b94001681815db
9,554
def get_current_user_id() -> str: """ This functions gets the id of the current user that is signed in to the Azure CLI. In order to get this information, it looks like there are two different services, "Microsoft Graph" (developer.microsoft.com/graph) and "Azure AD Graph" (graph.windows.net), the latter being deprecated (https://devblogs.microsoft.com/microsoft365dev/microsoft-graph-or-azure-ad-graph/). I think these services correspond to two different python libraries, msal (https://docs.microsoft.com/en-us/python/api/overview/azure/active-directory?view=azure-python) and adal (https://docs.microsoft.com/en-us/python/api/adal/adal?view=azure-python), but these libraries don't appear to do anything super useful on their own. The deprecated Azure Graph API seems to correspond to a higher-level library azure-graphrbac, which does seem to have the functionality we need: azure.graphrbac.GraphRbacManagementClient.signed_in_user, but is deprecated along with Azure Graph (https://github.com/Azure/azure-sdk-for-python/issues/14022#issuecomment-752279618). The msgraph library that we use here seems to be a not-very-high-level library for Microsoft Graph (https://github.com/microsoftgraph/msgraph-sdk-python-core). As a side note, another way to get this information is to use the command line to call `az ad signed-in-user show`, but that appears to be relying on the deprecated Azure Graph API as it gives a deprecation warning. """ # crucial scopes parameter is needed, see # https://github.com/microsoftgraph/msgraph-sdk-python-core/issues/106#issuecomment-969281260 with get_credential() as credential: client = GraphClient( credential=credential, scopes=["https://graph.microsoft.com"] ) # https://docs.microsoft.com/en-us/graph/api/user-get?view=graph-rest-1.0&tabs=http result = client.get("/me") return result.json()["id"]
79a557762c9c4c2a6546370f492f879f3f046f67
9,555
def scale_labels(subject_labels): """Saves two lines of code by wrapping up the fitting and transform methods of the LabelEncoder Parameters :param subject_labels: ndarray Label array to be scaled :return: ndarray Scaled label array """ encoder = preprocessing.LabelEncoder() _ = encoder.fit(subject_labels) return encoder.transform(subject_labels)
e7c4e4c01f7bc7b43519f1eaf97ff9ce0fda9bbd
9,556
def _get_basemap(grid_metadata_dict): """Creates basemap. M = number of rows in grid M = number of columns in grid :param grid_metadata_dict: Dictionary created by `grids.create_equidistant_grid`. :return: basemap_object: Basemap handle (instance of `mpl_toolkits.basemap.Basemap`). :return: basemap_x_matrix_metres: M-by-N numpy array of x-coordinates under Basemap projection (different than pyproj projection). :return: basemap_y_matrix_metres: Same but for y-coordinates. """ x_matrix_metres, y_matrix_metres = grids.xy_vectors_to_matrices( x_unique_metres=grid_metadata_dict[grids.X_COORDS_KEY], y_unique_metres=grid_metadata_dict[grids.Y_COORDS_KEY] ) projection_object = grid_metadata_dict[grids.PROJECTION_KEY] latitude_matrix_deg, longitude_matrix_deg = ( projections.project_xy_to_latlng( x_coords_metres=x_matrix_metres, y_coords_metres=y_matrix_metres, projection_object=projection_object) ) standard_latitudes_deg, central_longitude_deg = _get_lcc_params( projection_object) basemap_object = Basemap( projection='lcc', lat_1=standard_latitudes_deg[0], lat_2=standard_latitudes_deg[1], lon_0=central_longitude_deg, rsphere=projections.DEFAULT_EARTH_RADIUS_METRES, ellps=projections.SPHERE_NAME, resolution=RESOLUTION_STRING, llcrnrx=x_matrix_metres[0, 0], llcrnry=y_matrix_metres[0, 0], urcrnrx=x_matrix_metres[-1, -1], urcrnry=y_matrix_metres[-1, -1] ) basemap_x_matrix_metres, basemap_y_matrix_metres = basemap_object( longitude_matrix_deg, latitude_matrix_deg) return basemap_object, basemap_x_matrix_metres, basemap_y_matrix_metres
caeac576f5a6345378c71e8e0e690f9bafda0995
9,557
import types def unary_math_intr(fn, intrcode): """ Implement the math function *fn* using the LLVM intrinsic *intrcode*. """ @lower(fn, types.Float) def float_impl(context, builder, sig, args): res = call_fp_intrinsic(builder, intrcode, args) return impl_ret_untracked(context, builder, sig.return_type, res) unary_math_int_impl(fn, float_impl) return float_impl
cd3a4c22dab5ea1776987a717c32fbbc71d75da7
9,558
def is_is_int(a): """Return `True` if `a` is an expression of the form IsInt(b). >>> x = Real('x') >>> is_is_int(IsInt(x)) True >>> is_is_int(x) False """ return is_app_of(a, Kind.IS_INTEGER)
d7565102a228119ba3157e9569495c5531ea5d74
9,559
def getTestSuite(select="unit"): """ Get test suite select is one of the following: "unit" return suite of unit tests only "component" return suite of unit and component tests "all" return suite of unit, component and integration tests "pending" return suite of pending tests name a single named test to be run """ testdict = { "unit": [ "testUnits" , "testNull" ], "component": [ "testComponents" , "testReadMe" , "testCreateFile" , "testRewriteFile" , "testUpdateFile" , "testDeleteFile" ], "integration": [ "testIntegration" ], "pending": [ "testPending" , "testWebDAVFile" , "testWebDAVFileUrlLib" ] } return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select)
8ea94ad556dd77d28d5abdb2034b51858c996042
9,560
import torch def normalize_channel_wise(tensor: torch.Tensor, mean: torch.Tensor, std: torch.Tensor) -> torch.Tensor: """Normalizes given tensor channel-wise Parameters ---------- tensor: torch.Tensor Tensor to be normalized mean: torch.tensor Mean to be subtracted std: torch.Tensor Std to be divided by Returns ------- result: torch.Tensor """ if len(tensor.size()) != 3: raise ValueError for channel in range(tensor.size(0)): tensor[channel, :, :] -= mean[channel] tensor[channel, :, :] /= std[channel] return tensor
862a5497d9c4379a974e8e2543acc0c1282faea5
9,561
import tqdm def load_images(shot_paths): """ images = { shot1: { frame_id1: PIL image1, ... }, ... } """ images = list(tqdm(map(load_image, shot_paths), total=len(shot_paths), desc='loading images')) images = {k: v for k, v in images} return images
4916c68e1b4255d066bc624284cde77036764dd6
9,562
import numpy def rmSingles(fluxcomponent, targetstring='target'): """ Filter out targets in fluxcomponent that have only one ALMA source. """ nindiv = len(fluxcomponent) flagger = numpy.zeros(nindiv) for icomp in range(nindiv): target = fluxcomponent[targetstring][icomp] match = fluxcomponent[targetstring] == target nmatch = fluxcomponent[targetstring][match].size if nmatch == 1: flagger[icomp] = 1 goodflag = flagger == 0 fluxcomponent = fluxcomponent[goodflag] return fluxcomponent
013d5f3169fd1dcb277733627ecd5b0135bc33fb
9,563