content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def pairwise_to_multiple(pwise, ref_seq, moltype, info=None): """ turns pairwise alignments to a reference into a multiple alignment Parameters ---------- pwise Series of pairwise alignments to ref_seq as [(non-refseq name, aligned pair), ...] ref_seq The sequence common in all pairwise alignments moltype molecular type for the returned alignment info info object Returns ------- ArrayAlign """ if not hasattr(ref_seq, "name"): raise TypeError(f"ref_seq must be a cogent3 sequence, not {type(ref_seq)}") refseqs = [s for _, aln in pwise for s in aln.seqs if s.name == ref_seq.name] ref_gaps = _gap_union(refseqs) m = gap_coords_to_map(ref_gaps, len(ref_seq)) aligned = [Aligned(m, ref_seq)] for other_name, aln in pwise: curr_ref = aln.named_seqs[ref_seq.name] curr_ref_gaps = dict(curr_ref.map.get_gap_coordinates()) other_seq = aln.named_seqs[other_name] other_gaps = dict(other_seq.map.get_gap_coordinates()) diff_gaps = _combined_refseq_gaps(curr_ref_gaps, ref_gaps) inject = _gaps_for_injection(other_gaps, diff_gaps, len(other_seq.data)) if inject: m = gap_coords_to_map(inject, len(other_seq.data)) other_seq = Aligned(m, other_seq.data) aligned.append(other_seq) # default to ArrayAlign return Alignment(aligned, moltype=moltype, info=info).to_type( array_align=True, moltype=moltype )
36f8d63ba9a53aaa448bcf0c782f748edafc25fa
24,341
def get_dashboard(title: str): """Get a dashboard by title""" dashboards = sdk.search_dashboards(title=title) if not dashboards: print(f"dashboard {title} was not found") return None return dashboards[0]
3738557ef1ef2dee35382df7da86cf373908974c
24,342
import torch def translate_tensor(tensor, input_size=32, nt=2): """ Data augmentation function to enforce periodic boundary conditions. Inputs are arbitrarily translated in each dimension """ ndim = len(tensor[0,0, :].shape) t = input_size//nt t_vec = np.linspace(0, (nt-1)*t, nt).astype(int) for i in range(len(tensor)): if ndim == 2: tensor1 = torch.roll(tensor[i,0, :], (np.random.choice(t_vec), np.random.choice(t_vec)), (0, 1)) # translate by random no. of units (0-input_size) in each axis elif ndim == 3: tensor1 = torch.roll(tensor[i,0, :], ( np.random.choice(input_size), np.random.choice(input_size), np.random.choice(input_size)), (0, 1, 2)) else: raise if i == 0: newtensor = tensor1.unsqueeze(0).unsqueeze(0) # add back channel dim and batch dim else: newtensor = torch.cat((newtensor,tensor1.unsqueeze(0).unsqueeze(0)),dim=0) return newtensor
12280e33331adb6924b36eebc65c85a10f937d58
24,343
def _get_job_resources(args): """Extract job-global resources requirements from input args. Args: args: parsed command-line arguments Returns: Resources object containing the requested resources for the job """ logging = param_util.build_logging_param( args.logging) if args.logging else None timeout = param_util.timeout_in_seconds(args.timeout) log_interval = param_util.log_interval_in_seconds(args.log_interval) return job_model.Resources( min_cores=args.min_cores, min_ram=args.min_ram, machine_type=args.machine_type, disk_size=args.disk_size, disk_type=args.disk_type, boot_disk_size=args.boot_disk_size, image=args.image, regions=args.regions, zones=args.zones, logging=logging, logging_path=None, service_account=args.service_account, scopes=args.scopes, cpu_platform=args.cpu_platform, network=args.network, subnetwork=args.subnetwork, use_private_address=args.use_private_address, accelerator_type=args.accelerator_type, accelerator_count=args.accelerator_count, nvidia_driver_version=None, timeout=timeout, log_interval=log_interval, ssh=args.ssh, enable_stackdriver_monitoring=args.enable_stackdriver_monitoring, max_retries=args.retries, max_preemptible_attempts=args.preemptible, block_external_network=args.block_external_network)
fbbf596c721369890a14581863d4dce0fb24eb42
24,344
import torch def cosine_distance(input1, input2): """Computes cosine distance. Args: input1 (torch.Tensor): 2-D feature matrix. input2 (torch.Tensor): 2-D feature matrix. Returns: torch.Tensor: distance matrix. """ input1_normed = F.normalize(input1, p=2, dim=1) input2_normed = F.normalize(input2, p=2, dim=1) distmat = 1 - torch.mm(input1_normed, input2_normed.t()) return distmat
e4aed2f8f0439797312977d674ccc0351a90402b
24,347
def pad_sequences(sequences, maxlen=None, dtype='int32', padding='post', truncating='post', value=0.): """ pad_sequences. Pad each sequence to the same length: the length of the longest sequence. If maxlen is provided, any sequence longer than maxlen is truncated to maxlen. Truncation happens off either the beginning or the end (default) of the sequence. Supports pre-padding and post-padding (default). Arguments: sequences: list of lists where each element is a sequence. maxlen: int, maximum length. dtype: type to cast the resulting sequence. padding: 'pre' or 'post', pad either before or after each sequence. truncating: 'pre' or 'post', remove values from sequences larger than maxlen either in the beginning or in the end of the sequence value: float, value to pad the sequences to the desired value. Returns: x: `numpy array` with dimensions (number_of_sequences, maxlen) Credits: From Keras `pad_sequences` function. """ lengths = [len(s) for s in sequences] nb_samples = len(sequences) if maxlen is None: maxlen = np.max(lengths) x = (np.ones((nb_samples, maxlen)) * value).astype(dtype) for idx, s in enumerate(sequences): if len(s) == 0: continue # empty list was found if truncating == 'pre': trunc = s[-maxlen:] elif truncating == 'post': trunc = s[:maxlen] else: raise ValueError("Truncating type '%s' not understood" % padding) if padding == 'post': x[idx, :len(trunc)] = trunc elif padding == 'pre': x[idx, -len(trunc):] = trunc else: raise ValueError("Padding type '%s' not understood" % padding) return x
f69c199861e17575185d0c40371f85b1fa5f2458
24,348
def get_regions(service_name, region_cls=None, connection_cls=None): """ Given a service name (like ``ec2``), returns a list of ``RegionInfo`` objects for that service. This leverages the ``endpoints.json`` file (+ optional user overrides) to configure/construct all the objects. :param service_name: The name of the service to construct the ``RegionInfo`` objects for. Ex: ``ec2``, ``s3``, ``sns``, etc. :type service_name: string :param region_cls: (Optional) The class to use when constructing. By default, this is ``RegionInfo``. :type region_cls: class :param connection_cls: (Optional) The connection class for the ``RegionInfo`` object. Providing this allows the ``connect`` method on the ``RegionInfo`` to work. Default is ``None`` (no connection). :type connection_cls: class :returns: A list of configured ``RegionInfo`` objects :rtype: list """ endpoints = load_regions() if service_name not in endpoints: raise BotoClientError( "Service '%s' not found in endpoints." % service_name ) if region_cls is None: region_cls = RegionInfo region_objs = [] for region_name, endpoint in endpoints.get(service_name, {}).items(): region_objs.append( region_cls( name=region_name, endpoint=endpoint, connection_cls=connection_cls ) ) return region_objs
b63982d14c415d082c729595c85fee0833e75d8f
24,349
import math def sol_dec(day_of_year): """ Calculate solar declination from day of the year. Based on FAO equation 24 in Allen et al (1998). :param day_of_year: Day of year integer between 1 and 365 or 366). :return: solar declination [radians] :rtype: float """ _check_doy(day_of_year) return 0.409 * math.sin(((2.0 * math.pi / 365.0) * day_of_year - 1.39))
20c0c491a9ad99a324754c8bc2f6a32e6c6b1f51
24,350
import unittest def makeTestSuiteV201111(): """Set up test suite using v201111. Returns: TestSuite test suite using v201111. """ suite = unittest.TestSuite() suite.addTests(unittest.makeSuite(NetworkServiceTestV201111)) return suite
39a7ecb94bce33e3edbbc52ae584e4fa36c95c42
24,351
def build_series(df): """ Return a series tuple where: the first element is a list of dates, the second element is the series of the daily-type variables, the third element is the series of the current-type variables, the fourth element is the series of the cum-type variables. :param df: pd.DataFrame :return: tuple """ dates = df[DATE_KEY].apply(lambda x: x.strftime(CHART_DATE_FMT)).tolist() series_daily = sorted([ { "id": col, "name": VARS[col]["title"], "data": df[col].tolist() } for col in DAILY_QUANTITIES ], key=lambda x: max(x[DATE_KEY]), reverse=True) series_cum = sorted([ { "id": col, "name": VARS[col]["title"], "data": df[col].tolist() } for col in CUM_QUANTITIES ], key=lambda x: max(x[DATE_KEY]), reverse=True) series_current = sorted([ { "id": col, "name": VARS[col]["title"], "data": df[col].tolist() } for col in NON_CUM_QUANTITIES ], key=lambda x: max(x[DATE_KEY]), reverse=True) series = (dates, series_daily, series_current, series_cum) return series
d5ca0f87c46a6061544481ca01a88ad29def4c7e
24,352
def lnZ(df_mcmc): """ Compute log Z(1) from PTMCMC traces stored in DataFrame. Parameters ---------- df_mcmc : pandas DataFrame, as outputted from run_ptmcmc. DataFrame containing output of a parallel tempering MCMC run. Only need to contain columns pertinent to computing ln Z, which are 'beta_int', 'lnlike', and 'beta'. Returns ------- output : float ln Z as computed by thermodynamic integration. This is equivalent to what is obtained by calling `sampler.thermodynamic_integration_log_evidence(fburnin=0)` where `sampler` is an emcee.PTSampler instance. Notes ----- .. This is useful when the DataFrame from a PTSampler is too large to store in RAM. """ # Average the log likelihood over the samples log_mean = np.zeros(len(df_mcmc['beta_ind'].unique())) for i, b in enumerate(df_mcmc['beta_ind'].unique()): log_mean[i] = df_mcmc['lnlike'][df_mcmc['beta_ind']==b].mean() # Set of betas (temperatures) betas = np.concatenate((np.array(df_mcmc['beta'].unique()), (0,))) # Approximate quadrature return np.dot(log_mean, -np.diff(betas))
621d6db5a9126608d61688bd79ae09e03d25d114
24,353
def p2h(p, T=293., P0=1000., m=28.966, unit_p='mbar'): """ Returns an elevation from barometric pressure Parameters ---------- p: {float, array} barometric pressure in mbar or torr specified with unit_p T: float, optional Temperature in K P0: float, optional Pressure at reference altitude in hPa (default = 1000.) m: float, optional average mass of gas molecules in u (default = 28.966) unit_p: {[mbar], torr}, optional Source ------ http://en.wikipedia.org/wiki/Barometric_formula """ if unit_p == 'torr': p = unit_conversion.torr2mbar(p) k = const.physical_constants['Boltzmann constant'][0] g = const.physical_constants['standard acceleration of gravity'][0] m *= 1 / const.physical_constants['Avogadro constant'][0] / 1000. h = (np.log(P0) - np.log(p)) * ((k * T) / (m * g)) return h
646eac27f7723116e9ea5cd9dcef226cc6c45cc5
24,354
from typing import Callable def upon_teardown(f: Callable): """ Use this decorator to mark you ploogin function as a handler to call upon teardown. """ return PlooginEventHandler(event=PlooginEvents.TEARDOWN, f=f)
289db179d427c9ebccd1ff408e4606d4f6e97bd5
24,355
def get_train_test_indices_drone(df, frac, seed=None): """ Split indices of a DataFrame with binary and balanced labels into balanced subindices Args: df (pd.DataFrame): {0,1}-labeled data frac (float): fraction of indicies in first subset random_seed (int): random seed used as random state in np.random and as argument for random.seed() Returns: train_indices (torch.tensor): balanced subset of indices corresponding to rows in the DataFrame test_indices (torch.tensor): balanced subset of indices corresponding to rows in the DataFrame """ split_idx = int(len(df) * frac / 2) df_with = df[df['label'] == 1] df_without = df[df['label'] == 0] np.random.seed(seed) df_with_train = df_with.sample(n=split_idx, random_state=seed) df_with_test = df_with.drop(df_with_train.index) df_without_train = df_without.sample(n=split_idx, random_state=seed) df_without_test = df_without.drop(df_without_train.index) train_indices = list(df_without_train.index) + list(df_with_train.index) test_indices = list(df_without_test.index) + list(df_with_test.index) """" print('fraction of 1-label in train set: {}'.format(len(df_with_train)/(len(df_with_train) + len(df_without_train)))) print('fraction of 1-label in test set: {}'.format(len(df_with_test)/(len(df_with_test) + len(df_with_test)))) """ return train_indices, test_indices
f27f893f8cfcc48718d0ca5166e2eafdb57bbad2
24,356
import requests from bs4 import BeautifulSoup import re def get_subs(choice, chatid, obj): """Return subtitle download links.""" url = "https://yts-subs.com" + obj.get_url(chatid, int(choice)) try: reponse = requests.get(url, headers=headers) except Exception as e: print(e) raise Exception("Invalid url") soup = BeautifulSoup(reponse.content, 'html5lib') table = soup.find('tbody') results = table.findAll('tr') href = [] message = [] for i, result in enumerate(results): link = result.find('a')['href'] link = link.replace('subtitles', 'subtitle') language = result.findAll('td', {'class': 'flag-cell'})[0].text.strip() title = result.find('a').text.strip() title = re.findall("subtitle (.*)", title)[0] title = re.sub(r'(\[.*\])', '', title) title = f"{language}: {title}" link = f"https://yifysubtitles.org{link}.zip" href.append(link) message.append(title) if(i == 55): break return href, message
60b19f1004771546cfe530be55fa793d24500df0
24,357
def get_shape(rhoa_range): """ Find anomaly `shape` from apparent resistivity values framed to the best points. :param rhoa_range: The apparent resistivity from selected anomaly bounds :attr:`~core.erp.ERP.anom_boundaries` :type rhoa_range: array_like or list :returns: - V - W - K - C - M - U :Example: >>> from watex.core.erp import get_shape >>> x = [60, 70, 65, 40, 30, 31, 34, 40, 38, 50, 61, 90] >>> shape = get_shape (rhoa_range= np.array(x)) ...U """ shape ='V' try: minlocals_ix, = argrelextrema(rhoa_range, np.less) except : minlocals_ix = argrelextrema(rhoa_range, np.less) try : maxlocals_ix, = argrelextrema(rhoa_range, np.greater) except : maxlocals_ix = argrelextrema(rhoa_range, np.greater) value_of_median = np.median(rhoa_range) coef_UH = 1.2 c_=[rhoa_range[0] , rhoa_range[-1] ] if len(minlocals_ix)==0 : if len(maxlocals_ix)==0 and\ (max(c_) and min(c_)) > value_of_median : return 'U' return 'C' if len(minlocals_ix) ==1 : if max(c_) > np.median(rhoa_range) and min(c_) < value_of_median/2: return 'C' elif rhoa_range[minlocals_ix] > value_of_median or \ rhoa_range[minlocals_ix] > max(c_): return 'M' if len(minlocals_ix)>1 : if (max(c_) or min(c_))> value_of_median : shape ='W' if max(c_) > value_of_median and\ min(c_) > value_of_median: if rhoa_range[maxlocals_ix].mean()> value_of_median : if coef_UH * rhoa_range[minlocals_ix].mean(): shape ='H' coef_UH = 1. if rhoa_range[minlocals_ix].mean() <= coef_UH * \ rhoa_range[maxlocals_ix].mean(): shape = 'U' else : shape ='K' elif (rhoa_range[0] and rhoa_range[-1]) < np.median(rhoa_range): shape = 'M' return shape return shape
7de41fb432c733f434853e74e873ecac1542c877
24,358
def elslib_D2(*args): """ * For elementary surfaces from the gp package (cones, cylinders, spheres and tori), computes: - the point P of parameters (U, V), and - the first derivative vectors Vu and Vv at this point in the u and v parametric directions respectively, and - the second derivative vectors Vuu, Vvv and Vuv at this point. :param U: :type U: float :param V: :type V: float :param C: :type C: gp_Cone :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void :param U: :type U: float :param V: :type V: float :param C: :type C: gp_Cylinder :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void :param U: :type U: float :param V: :type V: float :param S: :type S: gp_Sphere :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void :param U: :type U: float :param V: :type V: float :param T: :type T: gp_Torus :param P: :type P: gp_Pnt :param Vu: :type Vu: gp_Vec :param Vv: :type Vv: gp_Vec :param Vuu: :type Vuu: gp_Vec :param Vvv: :type Vvv: gp_Vec :param Vuv: :type Vuv: gp_Vec :rtype: void """ return _ElSLib.elslib_D2(*args)
c99d089ba0aa95ed1134be53a491f690feb03edd
24,359
def readiness(): """Handle GET requests that are sent to /api/v1/readiness REST API endpoint.""" return flask.jsonify({}), 200
7c1edf3b965ad1f2b7356d634135a75846886b21
24,360
def camino_minimo(origen,dest,grafo,aeropuertos_por_ciudad,pesado=True): """Obtiene el camino minimo de un vertice a otro del grafo""" camino=[] costo=float("inf") for aeropuerto_i in aeropuertos_por_ciudad[origen]: for aeropuerto_j in aeropuertos_por_ciudad[dest]: if pesado: distancia, predecesores= utils.dijkstra(grafo,aeropuerto_i,aeropuerto_j) else: predecesores, distancia= utils.bfs(grafo,aeropuerto_i,aeropuerto_j) if distancia[aeropuerto_j]< costo: costo=distancia[aeropuerto_j] camino.clear() utils.armar_camino(distancia,predecesores,camino,aeropuerto_i,aeropuerto_j) distancia.clear() predecesores.clear() return costo,camino
a0ef06265ce754fa1a4289761139e86e241f14fc
24,361
def extract_name_from_uri_or_curie(item, schema=None): """Extract name from uri or curie :arg str item: an URI or curie :arg dict schema: a JSON-LD representation of schema """ # if schema is provided, look into the schema for the label if schema: name = [record["rdfs:label"] for record in schema["@graph"] if record['@id'] == item] if name: return name[0] else: return extract_name_from_uri_or_curie(item) # handle curie, get the last element after ":" elif 'http' not in item and len(item.split(":")) == 2: return item.split(":")[-1] # handle URI, get the last element after "/" elif len(item.split("//")[-1].split('/')) > 1: return item.split("//")[-1].split('/')[-1] # otherwise, rsise ValueError else: raise ValueError('{} should be converted to either URI or curie'.format(item))
08125457496c9d563f96a4f2a54a560c56c01af8
24,362
def external_forces(cod_obj): """actual cone position""" x_pos,y_pos,z_pos =cod_obj.pos_list[-1] """ Drift vector components Drift signal//all directions """ divx = forcep['divxp']([x_pos,y_pos,z_pos])[0] divy = forcep['divyp']([x_pos,y_pos,z_pos])[0] divz = forcep['divzp']([x_pos,y_pos,z_pos])[0] """ Structure tensor components Diffusion metric // Diffusive tensor components """ divxx = forcep['stpxx']([x_pos,y_pos,z_pos])[0] divxy = forcep['stpxy']([x_pos,y_pos,z_pos])[0] divxz = forcep['stpxz']([x_pos,y_pos,z_pos])[0] divyy = forcep['stpyy']([x_pos,y_pos,z_pos])[0] divyz = forcep['stpyz']([x_pos,y_pos,z_pos])[0] divzz = forcep['stpzz']([x_pos,y_pos,z_pos])[0] return [divx, divy, divz], [divxx, divxy, divxz, divyy, divyz, divzz]
2dbd13b3e09b0eb7e09b10c0607a9e0e7a87c11a
24,365
def charis_font_spec_css(): """Font spec for using CharisSIL with Pisa (xhtml2pdf).""" return """ @font-face {{ font-family: 'charissil'; src: url('{0}/CharisSIL-R.ttf'); }} @font-face {{ font-family: 'charissil'; font-style: italic; src: url('{0}/CharisSIL-I.ttf'); }} @font-face {{ font-family: 'charissil'; font-weight: bold; src: url('{0}/CharisSIL-B.ttf'); }} @font-face {{ font-family: 'charissil'; font-weight: bold; font-style: italic; src: url('{0}/CharisSIL-BI.ttf'); }} """.format(static_path('fonts'))
a812b65da61d333031dac878ebfdf9e4afe4b448
24,366
def set_symbols(pcontracts, dt_start="1980-1-1", dt_end="2100-1-1", n=None, spec_date={}): # 'symbol':[,] """ Args: pcontracts (list): list of pcontracts(string) dt_start (datetime/str): start time of all pcontracts dt_end (datetime/str): end time of all pcontracts n (int): last n bars spec_date (dict): time range for specific pcontracts """ global _simulator _simulator = ExecuteUnit(pcontracts, dt_start, dt_end, n, spec_date) return _simulator
9450e8355fa88d6794d58de7311992bb4bab357f
24,367
def estimate_vol_gBM(data1, data2, time_incr=0.1): """ Estimate vol and correlation of two geometric Brownian motion samples with time samples on a grid with mesh size time_incr using estimate_vol_2d_rv_incr, the drift parameter and mean rev paramters are set to 0. ---------- args: data1 data array for X1 data2 data array for X2 time_incr time increment log=True if True, then estimation based on log of data1 and data2, else in plain format. output: [0, 0, sigma_1], [0,0, sigma_2], rho format to be used direclty in a LOBLinear model object """ sigma_bid, sigma_ask, rho = estimate_vol_2d_rv_incr(data1, data2, time_incr, log=True) return [float(0), float(0), sigma_bid], [float(0), float(0), sigma_ask], rho
4ecc5fb7f97c41db2f9b7347db43bda70d7b6c14
24,368
import requests def get_coin_total(credentials_file: str, coin: str) -> float: """ Get the current total amount of your coin Args: credentials_file: A JSON file containing Coinbase Pro credentials coin: The coin requested Returns: coin_total: The total amount of the coin you hold in your account """ # Instantiate Coinbase API and query the price coin_total = 0 coinbase_creds = get_cbpro_creds_from_file(credentials_file) coinbase_auth = CoinbaseProAuth(coinbase_creds[0], coinbase_creds[1], coinbase_creds[2]) api_query = "accounts" result = requests.get(API_URL + api_query, auth=coinbase_auth).json() for account in result: if account['currency'] == coin: coin_total = float(account['balance']) return coin_total
cf71d211cf44e0b215af8ce219a12179c005f52a
24,370
from datetime import datetime def datetime_to_serial(dt): """ Converts the given datetime to the Excel serial format """ if dt.tzinfo: raise ValueError("Doesn't support datetimes with timezones") temp = datetime(1899, 12, 30) delta = dt - temp return delta.days + (float(delta.seconds) + float(delta.microseconds) / 1E6) / (60 * 60 * 24)
3142bfc9d33ddf782c0a6485898e6ed6bcc00418
24,371
from copy import deepcopy def compute_transitive_closure(graph): """Compute the transitive closure of a directed graph using Warshall's algorithm. :arg graph: A :class:`collections.abc.Mapping` representing a directed graph. The dictionary contains one key representing each node in the graph, and this key maps to a :class:`collections.abc.MutableSet` of nodes that are connected to the node by outgoing edges. This graph may contain cycles. This object must be picklable. Every graph node must be included as a key in the graph. :returns: The transitive closure of the graph, represented using the same data type. .. versionadded:: 2020.2 """ # Warshall's algorithm closure = deepcopy(graph) # (assumes all graph nodes are included in keys) for k in graph.keys(): for n1 in graph.keys(): for n2 in graph.keys(): if k in closure[n1] and n2 in closure[k]: closure[n1].add(n2) return closure
62a7191759614f495f5297379544fa3cdf77fcfa
24,372
def A_intermediate(f1, f2, f3, v1, v2, v3, d1, d3): """Solves system of equations for intermediate amplitude matching""" Mat = np.array( [ [1.0, f1, f1 ** 2, f1 ** 3, f1 ** 4], [1.0, f2, f2 ** 2, f2 ** 3, f2 ** 4], [1.0, f3, f3 ** 2, f3 ** 3, f3 ** 4], [0.0, 1.0, 2 * f1, 3 * f1 ** 2, 4 * f1 ** 3], [0.0, 1.0, 2 * f3, 3 * f3 ** 2, 4 * f3 ** 3], ], dtype="float", ) a = np.array([v1, v2, v3, d1, d3], dtype="float") return np.linalg.solve(Mat, a)
484c17d0a176a1e666f2ebe447d74f3c83845918
24,373
def _evaluate_criterion(criterion, params, criterion_kwargs): """Evaluate the criterion function for the first time. The comparison_plot_data output is needed to initialize the database. The criterion value is stored in the general options for the tao pounders algorithm. Args: criterion (callable): Python function that takes a pandas DataFrame with parameters as the first argument and returns a value or array to be minimized and data for the comparison plot. params (pd.DataFrame): See :ref:`params`. criterion_kwargs (dict): Additional keyword arguments for criterion. Returns: fitness_eval (float): The scalar criterion value. comparison_plot_data (np.array or pd.DataFrame): Data for the comparison_plot. """ criterion_out, comparison_plot_data = criterion(params, **criterion_kwargs) if np.any(np.isnan(criterion_out)): raise ValueError( "The criterion function evaluated at the start parameters returns NaNs." ) elif np.isscalar(criterion_out): fitness_eval = criterion_out else: fitness_eval = np.mean(np.square(criterion_out)) return fitness_eval, comparison_plot_data
bbb0b2cdb4fb4e12d18b6e34c1878a3eaa40059a
24,374
def group_by(collection, callback=None): """Creates an object composed of keys generated from the results of running each element of a `collection` through the callback. Args: collection (list|dict): Collection to iterate over. callback (mixed, optional): Callback applied per iteration. Returns: dict: Results of grouping by `callback`. Example: >>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], 'a') >>> assert results == {1: [{'a': 1, 'b': 2}], 3: [{'a': 3, 'b': 4}]} >>> results = group_by([{'a': 1, 'b': 2}, {'a': 3, 'b': 4}], {'a': 1}) >>> assert results == {False: [{'a': 3, 'b': 4}],\ True: [{'a': 1, 'b': 2}]} .. versionadded:: 1.0.0 """ ret = {} cbk = pyd.iteratee(callback) for value in collection: key = cbk(value) ret.setdefault(key, []) ret[key].append(value) return ret
5ca9e3867a1e340da92c223b8ba60a2bdcf2bc0b
24,375
def lemmatize_verbs(words): """lemmatize verbs in tokenized word list""" lemmatizer = WordNetLemmatizer() lemmas = [] for word in words: lemma = lemmatizer.lemmatize(word, pos='v') lemmas.append(lemma) return lemmas
6d37cc6c4f52b062872586f56cb4d459d3fa5cb0
24,376
def loadfromensembl(homology, kingdom='fungi', sequence='cdna', additional='type=orthologues', saveonfiles=False, normalized=False, setnans=False, number=0, by="entropy", using="normal", getCAI=None): """ Load from ensembl the datas required in parameters ( look at PyCUB.get_data for more information) returns a fully populated homology object. Args: homology: str the homology code additional: str additional information on the retrieved sequence kingdom: str flags the relevant kingdom of you current session [fungi,plants,bacteria, animals] sequence: str flags the type of sequence you consider the full genome is (coding or non coding or full) [cds, all, cda] by: str flags what type of computation should be done [entropy,frequency, entropylocation] normalized: bool to true if should we normalize the entorpy by length saveonfiles: bool to true if the retrieved data should be saved on a file setnans: bool to true if nans should be set to NaN instead of an avg value using: the method to compute the partition function if using entropy location getCAI: wether or not to compute CAI !! need to have called the corresponding function on Pycub before hand !! Returns: a populated PyCUB.homology of the homology object by [names, taxons, full, lenmat, homocode, nans, KaKs_Scores, similarity_scores, proteinids, GCcount, geneids, refs, ecai, refgene, refprot, tot_volume, mean_hydrophobicity, glucose_cost, synthesis_steps, isoelectricpoint,cai, conservation, uncounted] OR None if the homology is empty Raises: ConnectionError: "tried 50 times but still not able to connect" """ server = "http://rest.ensemblgenomes.org" print 'homology: ' + homology + ' : ' + str(number) ext = "/homology/id/" + homology + '?' if sequence is not None: # dna cdna cds ncrna Protein EMBL GENBANK MySQL TSV GTF GFF3 ext += 'sequence=' + sequence if kingdom is not None: ext += ';compara=' + kingdom if additional is not None: ext += ';' + additional try: r = requests.get(server + ext, headers={"Content-Type": "application/json"}) except ConnectionError: print "problem at " + homology if number > 50: raise IOError("tried 50 times but still not able to connect") return loadfromensembl(homology, kingdom=kingdom, sequence=sequence, additional=additional, saveonfiles=saveonfiles, normalized=normalized, setnans=setnans, number=number + 1, by=by, using=using) if not r.ok: r.raise_for_status() data = r.json()['data'] if not data: return None data = data[0]['homologies'] if not data: return None if saveonfiles: with open('utils/data/' + homology + '.json', "wb") as code: code.write(json.dump(data)) species, GCcount, lenmat, H, nans, similarities, KaKs_Scores, taxons, proteinids,\ geneid, ref, ecai, cai, refgene, refprot, vol, cost, hydrophob, synthcost, isoepoint, conservation, others = process( data, normalized=normalized, setnans=setnans, by=by, getCAI=getCAI) if by == 'entropyLocation': H = getloc(H, np.array(lenmat), using=using) # here we add two things into names but only as a temporary saving measures removed by the # application fo preprocessing in homoset. homo = h.homology(names=[species, taxons], full=H, lenmat=lenmat, homocode=homology, nans=nans, KaKs_Scores=KaKs_Scores, similarity_scores=similarities, proteinids=proteinids, GCcount=GCcount, geneids=geneid, ref=ref, ecai=ecai, cai=cai, refgene=refgene, refprot=refprot, tot_volume=vol, mean_hydrophobicity=hydrophob, glucose_cost=cost, synthesis_steps=synthcost, isoelectricpoint=isoepoint, conservation=conservation, othercods=others) homo.order(withtaxons=True) # a first ordering of the data, usefull afterward in the preprocessing return homo
b7c3adee4ba4b61c0828b830b5f53578da75211c
24,377
def dereference(reference_buffer, groups): """ find a reference within a group """ if len(reference_buffer)>0: ref_number = int(''.join(reference_buffer))-1 return groups[ref_number % len(groups)] +' ' return ''
c76234051e81a16f44690de46435e9856996d677
24,378
def get_main_corpora_info(): """Create dict with the main corpora info saved in CORPORA_SOURCES :return: Dictionary with the corpora info to be shown :rtype: dict """ table = [] for corpus_info in CORPORA_SOURCES: corpus_id = CORPORA_SOURCES.index(corpus_info) + 1 props = corpus_info["properties"] corpus_name = pretty_string( f"{corpus_info['name']} ({props['slug']})", 2 ) table.append({ "id": corpus_id, "name": corpus_name, "lang": props["language"], "size": props["size"], "docs": props["doc_quantity"], "words": props["word_quantity"], "granularity": pretty_string('\n'.join(props["granularity"]), 1), "license": pretty_string(props["license"], 1), }) return table
d0a642e98248eabdbaa018991774612f33caca8f
24,379
def start_compare_analysis(api_token, project_id, kind, url, username, password, target_branch, target_revision): """ Get the project identifier from the GraphQL API :param api_token: the access token to the GraphQL API :param project_id: identifier of the project to use as source :param kind: kind of the target repositiory (Github, Gitlab, Git) :param url: URL of the target repository :param username: username of the target repository :param password: password of the target repository :return: the project identifier or None is exception or non-existent project. """ try: args = [] args.append("projectId: " + str(project_id)) args.append("targetKind: " + kind) args.append("targetUrl: \"" + url + "\"") if target_revision: args.append("targetRevision: \"" + target_revision + "\"") if target_branch: args.append("targetBranch: \"" + target_branch + "\"") args_string = ",".join(args) query = """ mutation { createCompareAnalysis(""" + args_string + """){id}} """ response_json = do_graphql_query(api_token, {"query": query}) return response_json["createCompareAnalysis"]["id"] except KeyError: log.error("Error while starting new analysis") return None
10482ed334f5522894b271e9d69803b4c804cb09
24,380
def get_matrix_in_format(original_matrix, matrix_format): """Converts matrix to format Parameters ---------- original_matrix : np.matrix or scipy matrix or np.array of np. arrays matrix to convert matrix_format : string format Returns ------- matrix : scipy matrix matrix in given format """ if isinstance(original_matrix, np.ndarray): return SPARSE_FORMAT_TO_CONSTRUCTOR[matrix_format](original_matrix) if original_matrix.getformat() == matrix_format: return original_matrix return original_matrix.asformat(matrix_format)
837b47ccb4d0bf608907dd13ed1bedd0cb780058
24,381
def _convert_name(name, recurse=True, subs=None): """ From an absolute path returns the variable name and its owner component in a dict. Names are also formatted. Parameters ---------- name : str Connection absolute path and name recurse : bool If False, treat the top level of each name as the source/target component. subs: tuple or None Character pairs with old and substitute characters Returns ------- dict(str, str) """ def convert(name): sep = '.' name = name.replace('@', sep) name_items = name.split(sep) if recurse: if len(name_items) > 1: comp = name_items[-2] # -1 is variable name, before that -2 is the component name path = name.rsplit(sep, 1)[0] else: msg = ('The name "{}" cannot be processed. The separator character is "{}", ' 'which does not occur in the name.') raise ValueError(msg.format(name, sep)) else: comp = name_items[0] path = comp var = name_items[-1] var = _replace_chars(var, substitutes=subs) return {'comp': comp, 'var': var, 'abs_name': _format_name(name), 'path': _format_name(path)} if isinstance(name, list): # If a source has multiple targets return [convert(n) for n in name] else: # string return convert(name)
aa331f8616e3996d78a2bd278b10e2e806d56440
24,383
def _orthogonalize(constraints, X): """ Orthogonalize spline terms with respect to non spline terms. Parameters ---------- constraints: numpy array constraint matrix, non spline terms X: numpy array spline terms Returns ------- constrained_X: numpy array orthogonalized spline terms """ Q, _ = np.linalg.qr(constraints) # compute Q Projection_Matrix = np.matmul(Q,Q.T) constrained_X = X - np.matmul(Projection_Matrix,X) return constrained_X
01eb69ffa30d48c84c76915e33be39b201fda73e
24,384
def tril(input, diagonal=0, name=None): """ This op returns the lower triangular part of a matrix (2-D tensor) or batch of matrices :attr:`input`, the other elements of the result tensor are set to 0. The lower triangular part of the matrix is defined as the elements on and below the diagonal. Args: input (Variable): The input variable which is a Tensor. Support data types: ``float64``, ``float32``, ``int32``, ``int64``. diagonal (int, optional): The diagonal to consider, default value is 0. If :attr:`diagonal` = 0, all elements on and below the main diagonal are retained. A positive value includes just as many diagonals above the main diagonal, and similarly a negative value excludes just as many diagonals below the main diagonal. The main diagonal are the set of indices :math:`\{(i, i)\}` for :math:`i \in [0, \min\{d_{1}, d_{2}\} - 1]` where :math:`d_{1}, d_{2}` are the dimensions of the matrix. name (str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`. Returns: Variable: Tensor, results of lower triangular operation by the specified diagonal of input tensor, it's data type is the same as input's Tensor. Raises: TypeError: diagonal is not a int type. ValueError: dimension of :attr:`input` is less than 2. Examples: .. code-block:: python import numpy as np import paddle.fluid as fluid data = np.arange(1, 13, dtype="int64").reshape(3,-1) # array([[ 1, 2, 3, 4], # [ 5, 6, 7, 8], # [ 9, 10, 11, 12]]) x = fluid.data(shape=(-1, 4), dtype='int64', name='x') exe = fluid.Executor(fluid.CPUPlace()) # example 1, default diagonal tril = fluid.layers.tril(x) tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, fetch_list=[tril], return_numpy=True) # array([[ 1, 0, 0, 0], # [ 5, 6, 0, 0], # [ 9, 10, 11, 0]]) .. code-block:: python # example 2, positive diagonal value import paddle.fluid as fluid import numpy as np data = np.arange(1, 13, dtype="int64").reshape(3,-1) x = fluid.data(shape=(-1, 4), dtype='int64', name='x') exe = fluid.Executor(fluid.CPUPlace()) tril = fluid.layers.tril(x, diagonal=2) tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, fetch_list=[tril], return_numpy=True) # array([[ 1, 2, 3, 0], # [ 5, 6, 7, 8], # [ 9, 10, 11, 12]]) .. code-block:: python # example 3, negative diagonal value import paddle.fluid as fluid import numpy as np data = np.arange(1, 13, dtype="int64").reshape(3,-1) x = fluid.data(shape=(-1, 4), dtype='int64', name='x') exe = fluid.Executor(fluid.CPUPlace()) tril = fluid.layers.tril(x, diagonal=-1) tril_out, = exe.run(fluid.default_main_program(), feed={"x": data}, fetch_list=[tril], return_numpy=True) # array([[ 0, 0, 0, 0], # [ 5, 0, 0, 0], # [ 9, 10, 0, 0]]) """ return _tril_triu_op(LayerHelper('tril', **locals()))
62eb0c83cc633db655859160ea5df1fc0158086a
24,386
def rename_and_merge_columns_on_dict(data_encoded, rename_encoded_columns_dict, **kwargs): """ Parameters ---------- data_encoded: pandas.DataFrame with numerical columns rename_encoded_columns_dict: dict of columns to rename in data_encoded **kwargs inplace:bool, default=False decides if data_encoded is edited inplace or if a copy is returned Returns ------- pandas.DataFrame with columns renamed according to rename_encoded_columns_dict, columns that share the same name after renaming are merged by adding the columns up Example ------- data_encoded: x y z 0 0 1 1 0 1 0 1 0 rename_encoded_columns_dict: {'y': 'x'} return: x z 0 1 1 1 1 0 """ if 'inplace' not in kwargs: kwargs['inplace'] = False if kwargs['inplace']: data_copy = data_encoded else: data_copy = data_encoded.copy() data_copy.rename(columns=rename_encoded_columns_dict, inplace=True) for col in data_copy.columns: df_col = data_copy[col] # if column name col appears more than once in data_encoded.columns -> df_col is DataFrame (else it is a Series) if isinstance(df_col, pd.DataFrame): # add index to identical column names: [cap-shape_x0, cap-shape_x1, ...] df_col.columns = [col + str(i) for i in range(0, len(df_col.columns))] # drop identical columns col from DataFrame data_copy.drop(columns=col, inplace=True) # create column of zeros and add the numerical columns up col_merged = pd.Series(np.zeros(len(data_copy)), dtype=int) for col_indexed in df_col.columns: col_merged += df_col[col_indexed] data_copy[col] = col_merged if kwargs['inplace']: data_encoded = data_encoded.reindex(sorted(data_encoded.columns), axis=1) return else: data_copy = data_copy.reindex(sorted(data_copy.columns), axis=1) return data_copy
cb8767a102ad421674381182a2ea65468613abee
24,388
from typing import List def _get_public_props(obj) -> List[str]: """Return the list of public props from an object.""" return [prop for prop in dir(obj) if not prop.startswith('_')]
7b3be3e186bc009329ed417c6685fb2503a7c993
24,389
from typing import List def get_vd_html( voronoi_diagram: FortunesAlgorithm, limit_sites: List[SiteToUse], xlim: Limit, ylim: Limit, ) -> None: """Plot voronoi diagram.""" figure = get_vd_figure( voronoi_diagram, limit_sites, xlim, ylim, voronoi_diagram.SITE_CLASS ) html = get_html(figure) return html
1fbec3a3bf2c23d878e9c4b7d1779fb2526560e0
24,390
def sample_normal_mean_jeffreys(s1, ndata, prec): """Samples the mean of a normal distribution""" ## return rn.normal(s1 / ndata, 1 / np.sqrt(prec * ndata))
391cd72ea307903278e94bbc6b323f0997759f10
24,391
def pipeline_report_build(submission: Submission, stdout: str, passed: bool, **_): """ POSTed json should be of the shape: { "stdout": "build logs...", "passed": True } :param submission: :param stdout: :param passed: :return: """ if len(stdout) > MYSQL_TEXT_MAX_LENGTH: stdout = stdout[:MYSQL_TEXT_MAX_LENGTH] # Log the build being reported logger.info( "submission build reported", extra={ "type": "build_report", "submission_id": submission.id, "assignment_id": submission.assignment_id, "owner_id": submission.owner_id, "passed": passed, "stdout": stdout, }, ) # Update submission build submission.build.stdout = stdout submission.build.passed = passed # If the build did not passed, then the # submission pipeline is done if passed is False: submission.processed = True submission.state = "Build did not succeed" # Add and commit db.session.add(submission) db.session.add(submission.build) db.session.commit() # Report success return success_response("Build successfully reported.")
cbd07d2642b511f301a7e82581d71de3d04a66c6
24,393
import random def flatten(episode, context_length, include_labels=True, delimiter='\n'): """ Flatten the data into single example episodes. This is used to make conditional training easier and for a fair comparison of methods. """ context = deque(maxlen=context_length if context_length > 0 else None) new_episode = [] for ex in episode: context.append(ex.get('text', '')) # add context if len(context) > 1: ex.force_set('text', delimiter.join(context)) # set episode_done to be True ex.force_set('episode_done', True) labels = ex.get('labels', ex.get('eval_labels', None)) if labels is not None and include_labels: context.append(random.choice(labels)) new_episode.append(ex) return new_episode
37c44cb5e442e3d257230f151ce11a0012658ef5
24,394
def calc_binsize(num_bins, t_start, t_stop): """ Calculates the stop point from given parameter. Calculates the size of bins :attr:`binsize` from the three parameter :attr:`num_bins`, :attr:`t_start` and :attr`t_stop`. Parameters ---------- num_bins: int Number of bins t_start: quantities.Quantity Start time t_stop Stop time Returns ------- binsize : quantities.Quantity Size of bins calculated from given parameter. Raises ------ ValueError : Raised when :attr:`t_stop` is smaller than :attr:`t_start`". """ if num_bins is not None and t_start is not None and t_stop is not None: if t_stop < t_start: raise ValueError("t_stop (%s) is smaller than t_start (%s)" % (t_stop, t_start)) return (t_stop - t_start) / num_bins
0eb42e56aebfd29aa76190b4837171e5cfb94e82
24,395
def d_within(geom, gdf, distance): """Find the subset of a GeoDataFrame within some distance of a shapely geometry""" return _intersects(geom, gdf, distance)
463be3ff9c3eb7f002dc652047b96fbc15ba05b4
24,396
def make_params(args, nmax=None): """Format GET parameters for the API endpoint. In particular, the endpoint requires that parameters be sorted alphabetically by name, and that filtering is done only on one parameter when multiple filters are offered. """ if nmax and len(args) > nmax: raise ValueError("Too many parameters supplied") return [(k, stringify(args[k])) for k in sorted(args.keys())]
406d23a5090b901c20a4ac10dc182fbc3051e61e
24,397
def remap(value, oldMin, oldMax, newMin, newMax): """ Remaps the value to a new min and max value Args: value: value to remap oldMin: old min of range oldMax: old max of range newMin: new min of range newMax: new max of range Returns: The remapped value in the new range """ return newMin + (((value - oldMin) / (oldMax - oldMin)) * (newMax - newMin))
c0e53ce2b2169b08d271f7077e552762c572cf1f
24,398
async def construct_unit_passport(unit: Unit) -> str: """construct own passport, dump it as .yaml file and return a path to it""" passport = _get_passport_dict(unit) path = f"unit-passports/unit-passport-{unit.uuid}.yaml" _save_passport(unit, passport, path) return path
e4f1e90bbe82b1cb425cf5834fafbd1f36258454
24,399
import warnings def certification_to_csv(stats, filepath, product_id): """Writes certification outputs to the file specified. Parameters ---------- stats : list of dict list of statistical outputs from the function `thermostat.compute_summary_statistics()` filepath : str filepath specification for location of output CSV file. Returns ------- df : pd.DataFrame DataFrame containing data output to CSV. """ if stats is None: warnings.warn("No certification data to export.") return None labels = [i.get("label") for i in stats] sw_version = stats[labels.index("all_tau_cvrmse_savings_p01_filter_heating")][ "sw_version" ] certification_data = [] for column_filter, column_data in DATA_COLUMNS: stats_column_number = labels.index(column_filter) value = stats[stats_column_number].get(column_data, None) row = [ product_id, sw_version, COLUMN_LOOKUP[column_data]["metric"], FILTER_LOOKUP[column_filter]["filter"], FILTER_LOOKUP[column_filter]["region"], COLUMN_LOOKUP[column_data]["statistic"], FILTER_LOOKUP[column_filter]["season"], value, ] certification_data.append(row) output_dataframe = pd.DataFrame(certification_data, columns=CERTIFICATION_HEADERS) output_dataframe.to_csv( filepath, index=False, columns=CERTIFICATION_HEADERS, float_format="%.2f" ) return output_dataframe
23f1a84fa2d9c5ad25eb04d23ea9646cf2849286
24,400
def get_ipns_link(name: str) -> str: """Get the ipns link with the name of it which we remember it by Args: name (str): Name we call ipns link Returns: str: Returns the IPNS url Raises: ValueError: if link not found >>> import random >>> key_name = str(random.getrandbits(32 * 8)) # get random, or else throws duplicate key error >>> create_new_ipns_link(key_name) != '' True """ keys = IPFS_CLIENT.key.list() does_match = lambda x: x['Name'] == name.lower() matches = list(filter(does_match, keys['Keys'])) if len(matches) == 0: raise ValueError(f'IPNS link not found with name: "{name}"!') ipns_id = matches[0]['Id'] # get first match return f'{IPNS_PATH}{ipns_id}'
1c171e0539013aecd3e45a7cf7ca2c2907df3955
24,401
def joint_sim(num_samp, num_dim, noise=0.5): """ Function for generating a joint-normal simulation. :param num_samp: number of samples for the simulation :param num_dim: number of dimensions for the simulation :param noise: noise level of the simulation, defaults to 0.5 :return: the data matrix and a response array """ gauss_noise = np.random.normal(loc=0, scale=1, size=(num_samp, 1)) if (num_dim > 1): kappa = 1 else: kappa = 0 rho = 1 / (2*num_dim) sig = np.diag(np.ones(shape=(2*num_dim))) sig[num_dim: (2*num_dim), 0: num_dim] = rho sig[0: num_dim, num_dim: (2*num_dim)] = rho samp = (np.random.multivariate_normal(cov=sig, mean=np.zeros(2*num_dim), size=num_samp)) if num_dim == 1: y = samp[:, (num_dim):(2*num_dim)] + kappa*noise*gauss_noise x = samp[:, 0:num_dim] else: y = samp[:, (num_dim+1):(2*num_dim)] + kappa*noise*gauss_noise x = samp[:, 0:num_dim] return x, y
71296c5093aa3113b7df70cb8966ac9ca06ccb31
24,402
def calculate_seasonal_tilt(axial_tilt, degrees): """Find the seasonal tilt offset from axial tilt and orbit (in degrees) axial_tilt -- The planet's tilt. e.g. Earth's tilt is 23.44 degrees. degrees -- How far along is the planet in its orbit around its star? (between 0 and 360. 0/360 and 180 are equinoxes. 90 and 270 are solstices.) """ # NOTE: IRL the tilt of a planet doesn't actually change as it orbits. # What does change is the *relative* angle of incoming sunlight. return np.sin(degrees * np.pi/180) * axial_tilt
a31e072f95d9d856b2c2d7549b7f96a97a4d6b60
24,403
import logging def extractWithoutOrder(query, choices, processor=default_processor, scorer=default_scorer, score_cutoff=0): """Select the best match in a list or dictionary of choices. Find best matches in a list or dictionary of choices, return a generator of tuples containing the match and its score. If a dictionary is used, also returns the key for each match. Arguments: query: An object representing the thing we want to find. choices: An iterable or dictionary-like object containing choices to be matched against the query. Dictionary arguments of {key: value} pairs will attempt to match the query against each value. processor: Optional function of the form f(a) -> b, where a is the query or individual choice and b is the choice to be used in matching. This can be used to match against, say, the first element of a list: lambda x: x[0] Defaults to fuzzywuzzy.utils.full_process(). scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.WRatio() is used and expects both query and choice to be strings. score_cutoff: Optional argument for score threshold. No matches with a score less than this number will be returned. Defaults to 0. Returns: Generator of tuples containing the match and its score. If a list is used for choices, then the result will be 2-tuples. If a dictionary is used, then the result will be 3-tuples containing the key for each match. For example, searching for 'bird' in the dictionary {'bard': 'train', 'dog': 'man'} may return ('train', 22, 'bard'), ('man', 0, 'dog') """ # Catch generators without lengths def no_process(x): return x try: if choices is None or len(choices) == 0: return except TypeError: pass # If the processor was removed by setting it to None # perfom a noop as it still needs to be a function if processor is None: processor = no_process # Run the processor on the input query. processed_query = processor(query) if len(processed_query) == 0: logging.warning(u"Applied processor reduces input query to empty string, " "all comparisons will have score 0. " "[Query: \'{0}\']".format(query)) # Don't run full_process twice if scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio, fuzz.UWRatio, fuzz.UQRatio] \ and processor == utils.full_process: processor = no_process # Only process the query once instead of for every choice if scorer in [fuzz.UWRatio, fuzz.UQRatio]: pre_processor = partial(utils.full_process, force_ascii=False) scorer = partial(scorer, full_process=False) elif scorer in [fuzz.WRatio, fuzz.QRatio, fuzz.token_set_ratio, fuzz.token_sort_ratio, fuzz.partial_token_set_ratio, fuzz.partial_token_sort_ratio]: pre_processor = partial(utils.full_process, force_ascii=True) scorer = partial(scorer, full_process=False) else: pre_processor = no_process processed_query = pre_processor(processed_query) try: # See if choices is a dictionary-like object. for key, choice in choices.items(): processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score, key) except AttributeError: # It's a list; just iterate over it. for choice in choices: processed = pre_processor(processor(choice)) score = scorer(processed_query, processed) if score >= score_cutoff: yield (choice, score)
784a619b06fed48a5b7d4c8f4c711da954125c9c
24,404
def mark_dts_nn(marked_dict): """Loops through a dictionary representation of the XML-text where determiners have been "focus"-marked. Finds the "focus"-marked determiners and looks for their nouns from the words after the determiner until the end of the current sentence. The found noun is then marked with "focus": 2. Once the first noun of the right type for the determiner is found, it stops looking and moved on to the next determiner. This is an add-on to make the second approach of marking both determiners and their nouns possible. Found an issue with single word sentences (often only a bracket or another such character in the XML-text). The "isinstance()" check on word_meta is a bandaid-fix for this. It simply skips these one-word sentences, since they most likely are not relevant to the issue at hand and because I found no relatively quick fix for the issue. Args: marked_dict: a dictionary representation of the XML-text, with the added word metadata attribute "focus" (only determiners marked). Returns: nn_marked_dict: a dictionary representation of the XML-text, with the added wordmetadata attribute "focus" for both determiners (1) and their nouns (2).""" nn_marked_dict = deepcopy(marked_dict) for paragraph in nn_marked_dict['corpus']['text']['lessontext']['paragraph']: sentence_lvl = paragraph['sentence'] if isinstance(sentence_lvl, dict): for word_meta in sentence_lvl['w']: if isinstance(word_meta, dict): if word_meta['focus'] == 1: start = sentence_lvl['w'].index(word_meta) for noun_meta in sentence_lvl['w'][start:]: if noun_meta['msd'] == 'NN.NEU.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.GEN': noun_meta['focus'] = 2 break elif isinstance(sentence_lvl, list): for sentence in sentence_lvl: for word_meta in sentence['w']: if isinstance(word_meta, dict): if word_meta['focus'] == 1: start = sentence['w'].index(word_meta) for noun_meta in sentence['w'][start:]: if noun_meta['msd'] == 'NN.NEU.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.NOM' or noun_meta['msd'] == 'NN.UTR.SIN.IND.GEN': noun_meta['focus'] = 2 break else: print("Found something that is not a dict/list!") return nn_marked_dict
2db6e21a3ea1f4249ef13fd7a235839a8a2d1871
24,405
def reservation_rollback(context, reservations, project_id=None, user_id=None): """Roll back quota reservations.""" return IMPL.reservation_rollback(context, reservations, project_id=project_id, user_id=user_id)
fcb5a82522320ffe6a6d262eb6571153aaa55b29
24,406
def encrypt( security_control: SecurityControlField, system_title: bytes, invocation_counter: int, key: bytes, plain_text: bytes, auth_key: bytes, ) -> bytes: """ Encrypts bytes according the to security context. """ if not security_control.encrypted and not security_control.authenticated: raise NotImplementedError("encrypt() only handles authenticated encryption") if len(system_title) != 8: raise ValueError(f"System Title must be of lenght 8, not {len(system_title)}") # initialization vector is 12 bytes long and consists of the system_title (8 bytes) # and invocation_counter (4 bytes) iv = system_title + invocation_counter.to_bytes(4, "big") # Making sure the keys are of correct length for specified security suite validate_key(security_control.security_suite, key) validate_key(security_control.security_suite, auth_key) # Construct an AES-GCM Cipher object with the given key and iv. Allow for # truncating the auth tag encryptor = Cipher( algorithms.AES(key), modes.GCM(initialization_vector=iv, tag=None, min_tag_length=TAG_LENGTH), ).encryptor() # associated_data will be authenticated but not encrypted, # it must also be passed in on decryption. associated_data = security_control.to_bytes() + auth_key encryptor.authenticate_additional_data(associated_data) # Encrypt the plaintext and get the associated ciphertext. # GCM does not require padding. ciphertext = encryptor.update(plain_text) + encryptor.finalize() # dlms uses a tag lenght of 12 not the default of 16. Since we have set the minimum # tag length to 12 it is ok to truncated the tag down to 12 bytes. tag = encryptor.tag[:TAG_LENGTH] return ciphertext + tag
f03066da2ab54e784063f01255b9f3f53050a2cf
24,407
def leftmost_turn(((x0, y0), (x1, y1)), (x, y), zs): """Find the line segment intersecting at the leftmost angle relative to initial segment. Arguments: (x0, y0) – where we started (x1, x2) – direction travelling in (x, y) – where intersected one or more alternative line segments zs – set of points definign direction to move in """ if len(zs) == 1: return (x, y), zs.pop() theta = atan2(y1 - y, x1 - x) # Direction currently headed. def fun((xn, yn)): phi = atan2(yn - y, xn - x) - theta if phi < -pi: phi += 2 * pi elif phi > pi: phi -= 2 * pi # Tie-breaker is length of segment: len2 = (yn - y) * (yn - y) + (xn - x) * (xn - x) return phi, len2 zm = max(zs, key=fun) return (x, y), zm
bfe1650a92e38612461942ddfcee5faaad96ad5f
24,408
def maya_window(): """Get Maya MainWindow as Qt. Returns: QtWidgets.QWidget: Maya main window as QtObject """ return to_qwidget("MayaWindow")
bea4ef97a14bb93a461f0dd54dbb6e9a25a14a63
24,409
def soap2Dict(soapObj): """A recursive version of sudsobject.asdict""" if isinstance(soapObj, sudsobject.Object): return {k: soap2Dict(v) for k, v in soapObj} elif isinstance(soapObj, list): return [soap2Dict(v) for v in soapObj] return soapObj
46d5b767640a1b8c506f85d03580508d9b2278f0
24,410
def generate_sequential_BAOAB_string(force_group_list, symmetric=True): """Generate BAOAB-like schemes that break up the "V R" step into multiple sequential updates E.g. force_group_list=(0,1,2), symmetric=True --> "V0 R V1 R V2 R O R V2 R V1 R V0" force_group_list=(0,1,2), symmetric=False --> "V0 R V1 R V2 R O V0 R V1 R V2 R" """ VR = [] for i in force_group_list: VR.append("V{}".format(i)) VR.append("R") if symmetric: return " ".join(VR + ["O"] + VR[::-1]) else: return " ".join(VR + ["O"] + VR)
7710775e365f0caae81a9737feec18c662790bde
24,411
def _get_active_tab(visible_tabs, request_path): """ return the tab that claims the longest matching url_prefix if one tab claims '/a/{domain}/data/' and another tab claims '/a/{domain}/data/edit/case_groups/' then the second tab wins because it's a longer match. """ matching_tabs = sorted( (url_prefix, tab) for tab in visible_tabs for url_prefix in tab.url_prefixes if request_path.startswith(url_prefix) ) if matching_tabs: _, tab = matching_tabs[-1] return tab
ac9cd34d4b4ee1c1c0356499b389c1f6a7195585
24,412
import logging def fit_scale_heights(data, masks, min_lat = None, max_lat = None, deredden = False, fig_names = None, return_smoothed = False, smoothed_width = None, xlim = None, ylim = None, robust = True, n_boot = 10000): """ Fits scale height data and returns slopes Parameters ---------- data: `skySurvey` WHAM skySurvey object of full sky (requires track keyword), or spiral arm section masks: `list like` longitude masks to use min_lat: `u.Quantity` min latitude to fit max_lat: `u.Quantity` max latitude to fit deredden: `bool` if True, also fits dereddened slopes fig_names: `str` if provided, saves figures following this name return_smoothed: `bool` if True, returns smoothed longitude and slope estimates smoothed_width: `u.Quantity` width to smooth data to in longitude robust: `bool` if True, uses stats.models.robust_linear_model n_boot: `int` only if robust = True number of bootstrap resamples """ # Default values if min_lat is None: min_lat = 5*u.deg elif not hasattr(min_lat, "unit"): min_lat *= u.deg if max_lat is None: max_lat = 35*u.deg elif not hasattr(max_lat, "unit"): max_lat *= u.deg if smoothed_width is None: smoothed_width = 5*u.deg elif not hasattr(smoothed_width, "unit"): smoothed_width *= u.deg #initialize data arrays slopes_pos = [] slopes_neg = [] slopes_pos_dr = [] slopes_neg_dr = [] intercept_pos = [] intercept_neg = [] intercept_pos_dr = [] intercept_neg_dr = [] slopes_pos_err = [] slopes_neg_err = [] slopes_pos_dr_err = [] slopes_neg_dr_err = [] intercept_pos_err = [] intercept_neg_err = [] intercept_pos_dr_err = [] intercept_neg_dr_err = [] median_longitude = [] median_distance = [] for ell2 in range(len(masks)): xx = data["tan(b)"][masks[ell2]] yy = np.log(data["INTEN"][masks[ell2]]) nan_mask = np.isnan(yy) nan_mask |= np.isinf(yy) if deredden: zz = np.log(data["INTEN_DERED"][masks[ell2]]) nan_mask_z = np.isnan(zz) nan_mask_z |= np.isinf(zz) median_longitude.append(np.median(data["GAL-LON"][masks[ell2]])) if deredden: median_distance.append(np.median(data["DISTANCE"][masks[ell2]])) y_min = np.tan(min_lat) y_max = np.tan(max_lat) if not robust: if hasattr(stats, "siegelslopes"): slope_estimator = stats.siegelslopes else: logging.warning("Installed version of scipy does not have the siegelslopes method in scipy.stats!") slope_estimator = stats.theilslopes siegel_result_pos = slope_estimator(yy[(xx > y_min) & (xx < y_max) & ~nan_mask], xx[(xx > y_min) & (xx < y_max) & ~nan_mask]) siegel_result_neg = slope_estimator(yy[(xx < -y_min) & (xx > -y_max) & ~nan_mask], xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask]) if deredden: siegel_result_pos_dr = slope_estimator(zz[(xx > y_min) & (xx < y_max) & ~nan_mask_z], xx[(xx > y_min) & (xx < y_max) & ~nan_mask_z]) siegel_result_neg_dr = slope_estimator(zz[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z], xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z]) slopes_pos.append(siegel_result_pos[0]) slopes_neg.append(siegel_result_neg[0]) intercept_pos.append(siegel_result_pos[1]) intercept_neg.append(siegel_result_neg[1]) if deredden: slopes_pos_dr.append(siegel_result_pos_dr[0]) slopes_neg_dr.append(siegel_result_neg_dr[0]) intercept_pos_dr.append(siegel_result_pos_dr[1]) intercept_neg_dr.append(siegel_result_neg_dr[1]) if fig_names is not None: figure_name = "{0}_{1}.png".format(fig_names, ell2) if xlim is None: xlim = np.array([-0.9, 0.9]) if ylim is None: ylim = np.array([-4.6, 3.2]) fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twiny() ax.scatter(xx, yy, color ="k", alpha = 0.8) if deredden: ax.scatter(xx, zz, color ="grey", alpha = 0.8) ax.set_xlabel(r"$\tan$(b)", fontsize= 12) ax.set_ylabel(r"$\log$($H\alpha$ Intensity / R)", fontsize= 12) ax.set_title(r"${0:.1f} < l < {1:.1f}$".format(data["GAL-LON"][masks[ell2]].min(), data["GAL-LON"][masks[ell2]].max()), fontsize = 14) ax2.plot(np.degrees(np.arctan(xlim)), np.log([0.1,0.1]), ls = ":", lw = 1, color = "k", label = "0.1 R") ax2.fill_between([-min_lat, min_lat]*u.deg, [ylim[0], ylim[0]], [ylim[1], ylim[1]], color = pal[1], alpha = 0.1, label = r"$|b| < 5\degree$") line_xx = np.linspace(y_min, y_max, 10) line_yy_pos = siegel_result_pos[0] * line_xx + siegel_result_pos[1] line_yy_neg = siegel_result_neg[0] * -line_xx + siegel_result_neg[1] ax.plot(line_xx, line_yy_pos, color = "r", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = {0:.2f} D$".format(1/-siegel_result_pos[0])) ax.plot(-line_xx, line_yy_neg, color = "b", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = {0:.2f} D$".format(1/siegel_result_neg[0])) if deredden: line_yy_pos_dr = siegel_result_pos_dr[0] * line_xx + siegel_result_pos_dr[1] line_yy_neg_dr = siegel_result_neg_dr[0] * -line_xx + siegel_result_neg_dr[1] ax.plot(line_xx, line_yy_pos_dr, color = "r", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = {0:.2f} D$".format(1/-siegel_result_pos_dr[0])) ax.plot(-line_xx, line_yy_neg_dr, color = "b", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = {0:.2f} D$".format(1/siegel_result_neg_dr[0])) ax.set_xlim(xlim) ax.set_ylim(ylim) ax2.set_xlabel(r"$b$ (deg)", fontsize = 12) ax2.set_xlim(np.degrees(np.arctan(xlim))) ax.legend(fontsize = 12, loc = 1) ax2.legend(fontsize = 12, loc = 2) plt.tight_layout() plt.savefig(figure_name, dpi = 300) del(fig) plt.close() results = { "median_longitude":np.array(median_longitude), "slopes_pos":np.array(slopes_pos), "slopes_neg":np.array(slopes_neg), "intercept_pos":np.array(intercept_pos), "intercept_neg":np.array(intercept_neg) } if deredden: results["median_distance"] = np.array(median_distance), results["slopes_pos_dr"] = np.array(slopes_pos_dr) results["slopes_neg_dr"] = np.array(slopes_neg_dr) results["intercept_pos_dr"] = np.array(intercept_pos_dr) results["intercept_neg_dr"] = np.array(intercept_neg_dr) else: yy_pos = yy[(xx > y_min) & (xx < y_max) & ~nan_mask] xx_pos = xx[(xx > y_min) & (xx < y_max) & ~nan_mask] yy_neg = yy[(xx < -y_min) & (xx > -y_max) & ~nan_mask] xx_neg = xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask] if ((len(yy_pos) < 5) | (len(yy_neg) < 5)): slopes_pos.append(np.mean(boot_pos[:,1], axis = 0)) slopes_neg.append(np.mean(boot_neg[:,1], axis = 0)) slopes_pos_err.append(np.std(boot_pos[:,1], axis = 0)) slopes_neg_err.append(np.std(boot_neg[:,1], axis = 0)) intercept_pos.append(np.mean(boot_pos[:,0], axis = 0)) intercept_neg.append(np.mean(boot_neg[:,0], axis = 0)) intercept_pos_err.append(np.std(boot_pos[:,0], axis = 0)) intercept_neg_err.append(np.std(boot_neg[:,0], axis = 0)) else: if deredden: zz_dr_pos = zz[(xx > y_min) & (xx < y_max) & ~nan_mask_z] xx_dr_pos = xx[(xx > y_min) & (xx < y_max) & ~nan_mask_z] zz_dr_neg = zz[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z] xx_dr_neg = xx[(xx < -y_min) & (xx > -y_max) & ~nan_mask_z] def slope_int_estimator_pos_dr(inds, YY = zz_dr_pos, XX = xx_dr_pos): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_neg_dr(inds, YY = zz_dr_neg, XX = xx_dr_neg): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_pos(inds, YY = yy_pos, XX = xx_pos): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params def slope_int_estimator_neg(inds, YY = yy_neg, XX = xx_neg): """ estimate slope using sm.RLM """ XX = XX[inds] YY = YY[inds] XX = sm.add_constant(XX) res = sm.RLM(YY, XX, M=sm.robust.norms.HuberT()).fit() return res.params boot_pos = bootstrap(np.arange(len(yy_pos)), func = slope_int_estimator_pos, n_boot = n_boot) boot_neg = bootstrap(np.arange(len(yy_neg)), func = slope_int_estimator_neg, n_boot = n_boot) slopes_pos.append(np.mean(boot_pos[:,1], axis = 0)) slopes_neg.append(np.mean(boot_neg[:,1], axis = 0)) slopes_pos_err.append(np.std(boot_pos[:,1], axis = 0)) slopes_neg_err.append(np.std(boot_neg[:,1], axis = 0)) intercept_pos.append(np.mean(boot_pos[:,0], axis = 0)) intercept_neg.append(np.mean(boot_neg[:,0], axis = 0)) intercept_pos_err.append(np.std(boot_pos[:,0], axis = 0)) intercept_neg_err.append(np.std(boot_neg[:,0], axis = 0)) if deredden: boot_pos_dr = bootstrap(np.arange(len(zz_dr_pos)), func = slope_int_estimator_pos_dr, n_boot = n_boot) boot_neg_dr = bootstrap(np.arange(len(zz_dr_neg)), func = slope_int_estimator_neg_dr, n_boot = n_boot) slopes_pos_dr.append(np.mean(boot_pos_dr[:,1], axis = 0)) slopes_neg_dr.append(np.mean(boot_neg_dr[:,1], axis = 0)) slopes_pos_dr_err.append(np.std(boot_pos_dr[:,1], axis = 0)) slopes_neg_dr_err.append(np.std(boot_neg_dr[:,1], axis = 0)) intercept_pos_dr.append(np.mean(boot_pos_dr[:,0], axis = 0)) intercept_neg_dr.append(np.mean(boot_neg_dr[:,0], axis = 0)) intercept_pos_dr_err.append(np.std(boot_pos_dr[:,0], axis = 0)) intercept_neg_dr_err.append(np.std(boot_neg_dr[:,0], axis = 0)) if fig_names is not None: figure_name = "{0}_{1}.png".format(fig_names, ell2) if xlim is None: xlim = np.array([-0.9, 0.9]) if ylim is None: ylim = np.array([-4.6, 3.2]) fig = plt.figure() ax = fig.add_subplot(111) ax2 = ax.twiny() ax.scatter(xx, yy, color ="k", alpha = 0.8) if deredden: ax.scatter(xx, zz, color ="grey", alpha = 0.8) ax.set_xlabel(r"$\tan$(b)", fontsize= 12) ax.set_ylabel(r"$\log$($H\alpha$ Intensity / R)", fontsize= 12) ax.set_title(r"${0:.1f} < l < {1:.1f}$".format(data["GAL-LON"][masks[ell2]].min(), data["GAL-LON"][masks[ell2]].max()), fontsize = 14) ax2.plot(np.degrees(np.arctan(xlim)), np.log([0.1,0.1]), ls = ":", lw = 1, color = "k", label = "0.1 R") ax2.fill_between([-min_lat, min_lat]*u.deg, [ylim[0], ylim[0]], [ylim[1], ylim[1]], color = pal[1], alpha = 0.1, label = r"$|b| < 5\degree$") line_xx = np.linspace(y_min, y_max, 100) def get_slope_conf_band(boot_res, X = line_xx): yy = [[res[0] + res[1] * X] for res in boot_res] yy = np.vstack(yy) return np.percentile(yy, (5,95), axis = 0) line_yy_pos = slopes_pos[-1] * line_xx + intercept_pos[-1] line_yy_neg = slopes_neg[-1] * -line_xx + intercept_neg[-1] line_yy_pos_range = get_slope_conf_band(boot_pos) line_yy_neg_range = get_slope_conf_band(boot_neg, X = -line_xx) ax.plot(line_xx, line_yy_pos, color = "r", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/-slopes_pos[-1], np.abs(1/slopes_pos[-1] * slopes_pos_err[-1] / slopes_pos[-1]))) ax.fill_between(line_xx, line_yy_pos_range[0], line_yy_pos_range[1], color = "r", alpha = 0.2) ax.plot(-line_xx, line_yy_neg, color = "b", lw = 3, alpha = 0.9, label = r"$H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/slopes_neg[-1], np.abs(-1/slopes_pos[-1] * slopes_pos_err[-1] / slopes_pos[-1]))) ax.fill_between(-line_xx, line_yy_neg_range[0], line_yy_neg_range[1], color = "b", alpha = 0.2) if deredden: line_yy_pos_dr = slopes_pos_dr[-1] * line_xx + intercept_pos_dr[-1] line_yy_neg_dr = slopes_neg_dr[-1] * -line_xx + intercept_neg_dr[-1] line_yy_pos_range_dr = get_slope_conf_band(boot_pos_dr) line_yy_neg_range_dr = get_slope_conf_band(boot_neg_dr, X = -line_xx) ax.plot(line_xx, line_yy_pos_dr, color = "r", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/-slopes_pos_dr[-1], np.abs(1/slopes_pos_dr[-1] * slopes_pos_dr_err[-1] / slopes_pos_dr[-1]))) ax.fill_between(line_xx, line_yy_pos_range_dr[0], line_yy_pos_range_dr[1], color = "r", alpha = 0.2) ax.plot(-line_xx, line_yy_neg_dr, color = "b", lw = 3, alpha = 0.9, ls = "--", label = r"Dered: $H_{{n_e^2}} = ({0:.2f} \pm {1:.2f}) D$".format(1/slopes_neg_dr[-1], np.abs(-1/slopes_pos_dr[-1] * slopes_pos_dr_err[-1] / slopes_pos_dr[-1]))) ax.fill_between(-line_xx, line_yy_neg_range_dr[0], line_yy_neg_range_dr[1], color = "b", alpha = 0.2) ax.set_xlim(xlim) ax.set_ylim(ylim) ax2.set_xlabel(r"$b$ (deg)", fontsize = 12) ax2.set_xlim(np.degrees(np.arctan(xlim))) ax.legend(fontsize = 12, loc = 1) ax2.legend(fontsize = 12, loc = 2) plt.tight_layout() plt.savefig(figure_name, dpi = 300) del(fig) plt.close() results = { "median_longitude":np.array(median_longitude), "slopes_pos":np.array(slopes_pos), "slopes_neg":np.array(slopes_neg), "intercept_pos":np.array(intercept_pos), "intercept_neg":np.array(intercept_neg), "slopes_pos_err":np.array(slopes_pos_err), "slopes_neg_err":np.array(slopes_neg_err), "intercept_pos_err":np.array(intercept_pos_err), "intercept_neg_err":np.array(intercept_neg_err) } if deredden: results["median_distance"] = np.array(median_distance), results["slopes_pos_dr"] = np.array(slopes_pos_dr) results["slopes_neg_dr"] = np.array(slopes_neg_dr) results["intercept_pos_dr"] = np.array(intercept_pos_dr) results["intercept_neg_dr"] = np.array(intercept_neg_dr) results["slopes_pos_dr_err"] = np.array(slopes_pos_dr_err) results["slopes_neg_dr_err"] = np.array(slopes_neg_dr_err) results["intercept_pos_dr_err"] = np.array(intercept_pos_dr_err) results["intercept_neg_dr_err"] = np.array(intercept_neg_dr_err) if return_smoothed: results["smoothed_longitude"] = np.arange(np.min(median_longitude), np.max(median_longitude), 0.25) if deredden: distance_interp = interp1d(median_longitude, median_distance) results["smoothed_distance"] = distance_interp(results["smoothed_longitude"]) smoothed_slope_pos_ha = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_neg_ha = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_pos_ha_dr = np.zeros((3,len(results["smoothed_longitude"]))) smoothed_slope_neg_ha_dr = np.zeros((3,len(results["smoothed_longitude"]))) for ell,lon in enumerate(results["smoothed_longitude"]): smoothed_slope_pos_ha[:,ell] = np.nanpercentile(np.array(slopes_pos)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) smoothed_slope_neg_ha[:,ell] = np.nanpercentile(np.array(slopes_neg)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) if deredden: smoothed_slope_pos_ha_dr[:,ell] = np.nanpercentile(np.array(slopes_pos_dr)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) smoothed_slope_neg_ha_dr[:,ell] = np.nanpercentile(np.array(slopes_neg_dr)[(median_longitude <= lon + smoothed_width.value/2) & (median_longitude > lon - smoothed_width.value/2)], (10, 50, 90)) results["smoothed_slopes_pos"] = smoothed_slope_pos_ha results["smoothed_slopes_neg"] = smoothed_slope_neg_ha if deredden: results["smoothed_slopes_pos_dr"] = smoothed_slope_pos_ha_dr results["smoothed_slopes_neg_dr"] = smoothed_slope_neg_ha_dr return results
fe2cd6d1cc1dfa18b7a78593e326f80ee99222bc
24,414
def get_aircon_mock(said): """Get a mock of an air conditioner.""" mock_aircon = mock.Mock(said=said) mock_aircon.connect = AsyncMock() mock_aircon.fetch_name = AsyncMock(return_value="TestZone") mock_aircon.get_online.return_value = True mock_aircon.get_power_on.return_value = True mock_aircon.get_mode.return_value = whirlpool.aircon.Mode.Cool mock_aircon.get_fanspeed.return_value = whirlpool.aircon.FanSpeed.Auto mock_aircon.get_current_temp.return_value = 15 mock_aircon.get_temp.return_value = 20 mock_aircon.get_current_humidity.return_value = 80 mock_aircon.get_humidity.return_value = 50 mock_aircon.get_h_louver_swing.return_value = True mock_aircon.set_power_on = AsyncMock() mock_aircon.set_mode = AsyncMock() mock_aircon.set_temp = AsyncMock() mock_aircon.set_humidity = AsyncMock() mock_aircon.set_mode = AsyncMock() mock_aircon.set_fanspeed = AsyncMock() mock_aircon.set_h_louver_swing = AsyncMock() return mock_aircon
68833445b94b2194f73c9b699d925bb92dca010b
24,416
def mutation(individual): """ Shuffle certain parameters of the network to keep evolving it. Concretely: - thresh, tau_v, tau_t, alpha_v, alpha_t, q """ individual[0].update_params() return individual,
8ccd373f991cbf2e8161e6bbe32375ca8826e48c
24,417
def truncate_repeated_single_step_traversals_in_sub_queries( compound_match_query: CompoundMatchQuery, ) -> CompoundMatchQuery: """For each sub-query, remove one-step traversals that overlap a previous traversal location.""" lowered_match_queries = [] for match_query in compound_match_query.match_queries: new_match_query = truncate_repeated_single_step_traversals(match_query) lowered_match_queries.append(new_match_query) return compound_match_query._replace(match_queries=lowered_match_queries)
b5d264640fb65ff7162209a714257b0a65128e89
24,418
def intersect(list1, list2): """ Compute the intersection of two sorted lists. Returns a new sorted list containing only elements that are in both list1 and list2. This function can be iterative. """ result_list = [] idx1 = 0 idx2 = 0 while idx1 < len(list1) and idx2 < len(list2): if list1[idx1] == list2[idx2]: result_list.append(list1[idx1]) idx1 += 1 idx2 += 1 elif list1[idx1] < list2[idx2]: idx1 += 1 elif list1[idx1] > list2[idx2]: idx2 += 1 else: print 'error in func intersect!!!' return return result_list
d0f50b466108f685dc74d227554ab057cac018ae
24,419
import typing def get_parent_project_ids(project_id: int, only_if_child_can_add_users_to_parent: bool = False) -> typing.List[int]: """ Return the list of parent project IDs for an existing project. :param project_id: the ID of an existing project :param only_if_child_can_add_users_to_parent: whether or not to only show those parent projects, which someone with GRANT permissions on this project can add users to (transitively) :return: list of project IDs """ subproject_relationships: typing.Iterable[SubprojectRelationship] = SubprojectRelationship.query.filter_by( child_project_id=project_id ).all() parent_project_ids = [] for subproject_relationship in subproject_relationships: if subproject_relationship.child_can_add_users_to_parent or not only_if_child_can_add_users_to_parent: parent_project_ids.append(subproject_relationship.parent_project_id) return parent_project_ids
b0c9d2241a0b114b3fcf531592b7f05000596fec
24,420
def integral_length(v): """ Compute the integral length of a given rational vector. INPUT: - ``v`` - any object which can be converted to a list of rationals OUTPUT: Rational number ``r`` such that ``v = r u``, where ``u`` is the primitive integral vector in the direction of ``v``. EXAMPLES:: sage: lattice_polytope.integral_length([1, 2, 4]) 1 sage: lattice_polytope.integral_length([2, 2, 4]) 2 sage: lattice_polytope.integral_length([2/3, 2, 4]) 2/3 """ data = [QQ(e) for e in list(v)] ns = [e.numerator() for e in data] ds = [e.denominator() for e in data] return gcd(ns)/lcm(ds)
54d2b2726bea848e1a5836425516371fc09f54b3
24,422
def load_classification_pipeline( model_dir: str = "wukevin/tcr-bert", multilabel: bool = False, device: int = 0 ) -> TextClassificationPipeline: """ Load the pipeline object that does classification """ try: tok = ft.get_pretrained_bert_tokenizer(model_dir) except OSError: tok = ft.get_aa_bert_tokenizer(64) if multilabel: model = BertForSequenceClassificationMulti.from_pretrained(model_dir) pipeline = TextMultiClassificationPipeline( model=model, tokenizer=tok, device=device, framework="pt", task="mulitlabel_classification", return_all_scores=True, ) else: model = BertForSequenceClassification.from_pretrained(model_dir) pipeline = TextClassificationPipeline( model=model, tokenizer=tok, return_all_scores=True, device=device ) return pipeline
0811cdc4ddaac3992e1cec7f43d88df276356c5c
24,423
def skew_image(img, angle): """ Skew image using some math :param img: PIL image object :param angle: Angle in radians (function doesn't do well outside the range -1 -> 1, but still works) :return: PIL image object """ width, height = img.size # Get the width that is to be added to the image based on the angle of skew xshift = tan(abs(angle)) * height new_width = width + int(xshift) if new_width < 0: return img # Apply transform img = img.transform( (new_width, height), Image.AFFINE, (1, angle, -xshift if angle > 0 else 0, 0, 1, 0), Image.BICUBIC ) return img
5b52a87edc44669e9fad82efd5c594df12edee41
24,424
import logging def test_process_bto_order_high_risk(monkeypatch, capsys, caplog): """BTO order should be correctly processed with high risk flag set """ caplog.set_level(logging.INFO) monkeypatch.setitem(USR_SET, "high_risk_ord_value", 1000) monkeypatch.setitem(USR_SET, "buy_limit_percent", 0.03) monkeypatch.setitem(USR_SET, "SL_percent", 0.25) monkeypatch.setitem(VALID_ORD_INPUT, "contract_price", 2.00) flags = {"SL": None, "risk_level": "high risk", "reduce": None} monkeypatch.setitem(VALID_ORD_INPUT, "flags", flags) def mock_place_order(acct_num, order_spec): built_order = order_spec.build() assert built_order["price"] == "2.06" assert built_order["orderLegCollection"][0]["quantity"] == 4 assert built_order["orderStrategyType"] == "TRIGGER" assert built_order["childOrderStrategies"][0]["orderType"] == "STOP" assert built_order["childOrderStrategies"][0]["stopPrice"] == "1.50" return "PASSAR" client = tda.client.Client monkeypatch.setattr(client, "place_order", mock_place_order) am.process_bto_order(client, "1234567890", VALID_ORD_INPUT, USR_SET) captured = capsys.readouterr() assert captured.out.split()[-1] == "PASSAR" logged = caplog.text assert logged.split()[-1] == "PASSAR"
0981a09686670ad8d941a438514832c17b541863
24,425
import types import doctest def _load_tests_from_module(tests, module, globs, setUp=None, tearDown=None): """Load tests from module, iterating through submodules. """ for attr in (getattr(module, x) for x in dir(module) if not x.startswith("_")): if isinstance(attr, types.ModuleType): suite = doctest.DocTestSuite( attr, globs, setUp=setUp, tearDown=tearDown, optionflags=+doctest.ELLIPSIS, ) tests.addTests(suite) return tests
068eb24fd826192730bfb7dde2c978ef42fb8475
24,426
def calculate_full_spectrum(xs, cp, ep=None, betas=(0,0), data=None): """Direct solution of the k-eigenvalue problem in integral transport by the collision probability method. Input data are the xs list and the collision probabilities in cp. Only isotropic scattering is allowed. A relation of albedo for the partial currents can be used at the boundary.""" st, ss, chi, nsf = xs G, I = nsf.shape check_xs(xs) betaL, betaR = betas if (betaL < 0) or (betaL > 1): raise ValueError("betaL (left albedo) is not in valid range") elif betaL > 0: if ep is None: raise ValueError("betaL > 0, but no input escape probs") if data is None: raise ValueError("input mesh data is needed for VjoSbL") else: # r, geo, V, Sb = data.xi, data.geometry_type, data.Vi, data.Si[0] # V = calculate_volumes(r, geo) # Sb = calculate_surfaces(r, geo)[0] VjoSbL = data.Vi / data.Si[0] if (betaR < 0) or (betaR > 1): raise ValueError("betaR (right albedo) is not in valid range") elif betaR > 0: if ep is None: raise ValueError("betaR > 0, but no input escape probs") if data is None: raise ValueError("input mesh data is needed for VjoSbR") else: VjoSbR = data.Vi / data.Si[-1] def get_rt(rpjx, st): total_collision = np.dot(rpjx, st) if data.geometry_type != 'slab': reflection, transmission = 1 - total_collision, 0 else: reflection, transmission = 0, 1 - total_collision return reflection, transmission GI = G * I PS = np.zeros((GI, GI),) PX, F = np.zeros((GI, I),), np.zeros((I, GI),) # X = np.zeros_like(PX) Is = np.arange(I) for g in range(G): idx = slice(I * g, I * (g + 1)) pji = np.transpose(cp[g,:,:] / st[g,:]) # reduced CP # apply b.c. eaj, ebj = -ep[g,0,:,1], ep[g,-1,:,0] if betaL > 0: # pja and pjb are both needed if refl at both sides pja = 4 * VjoSbL * eaj if betaR > 0: pjb = 4 * VjoSbR * ebj if betaL > 0: r, t = get_rt(pja, st[g,:]) coef = betaL / (1 - betaL * (r + t**2 * betaR / (1 - betaR * r))) pji += coef * np.dot(np.diag(eaj), np.tile(pja, (I, 1))) if betaR > 0: coef *= betaR * t pji += coef * np.dot(np.diag(eaj), np.tile(pjb, (I, 1))) if betaR > 0: r, t = get_rt(pjb, st[g,:]) coef = betaR / (1 - betaR * (r + t**2 * betaL / (1 - betaL * r))) pji += coef * np.dot(np.diag(ebj), np.tile(pjb, (I, 1))) if betaL > 0: coef *= betaL * t pji += coef * np.dot(np.diag(ebj), np.tile(pja, (I, 1))) # X[Is + g * I, Is] = chi[g,:] F[Is, Is + g * I] = nsf[g,:] PX[idx,:] = pji * chi[g,:] for gg in range(G): jdx = slice(I * gg, I * (gg + 1)) PS[idx, jdx] = pji * ss[g,gg,0,:] PS *= -1 PS[np.diag_indices_from(PS)] += 1 H = np.dot(F, np.dot(np.linalg.inv(PS), PX)) return np.linalg.eig(H)
ad145dc3fc5ae57f6512cb01b1119a2fc150b4bd
24,427
def get_connectors_by_type(type : str): """ Convenience method for `get_connectors()`. """ return get_connectors(type)
7e41c2a37173a4d72d7d947aa5a166c23f102da0
24,428
def crawl(alphabet, initial, accepts, follow): """ Create a new FSM from the above conditions. """ states = [initial] accepting = set() transition = dict() i = 0 while i < len(states): state = states[i] if accepts(state): accepting.add(i) transition[i] = dict() for symbol in alphabet: try: next_states = follow(state, symbol) except OblivionError: continue else: try: j = states.index(next_states) except ValueError: j = len(states) states.append(next_states) transition[i][symbol] = j i += 1 return FSM( alphabet=alphabet, states=range(len(states)), initial=0, accepting=accepting, transition=transition, __validation__=False )
c72b743ed4d06691fea020e2e66236a54d53df5f
24,429
def mpncovresnet101(pretrained=False, **kwargs): """Constructs a ResNet-101 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = MPNCOVResNet(Bottleneck, [3, 4, 23, 3], **kwargs) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['mpncovresnet101'])) return model
bad52e5b47a84faabdb9d82fb50e585ee287b392
24,430
def cathegory_encoder(data, labelCathegory=labelCathegory): """ Encode cathegorical labels """ for k in labelCathegory: encoder = sklearn.preprocessing.LabelEncoder() encoder.fit(list(data[k].values)) data[k] = encoder.transform(list(data[k].values)) return data
dc4c549e58097d219ade1b7140a9e09356692cd8
24,431
def split_dataset(args, dataset): """Split the dataset Parameters ---------- args : dict Settings dataset Dataset instance Returns ------- train_set Training subset val_set Validation subset test_set Test subset """ train_ratio, val_ratio, test_ratio = map(float, args['split_ratio'].split(',')) if args['split'] == 'scaffold': train_set, val_set, test_set = ScaffoldSplitter.train_val_test_split( dataset, frac_train=train_ratio, frac_val=val_ratio, frac_test=test_ratio, scaffold_func='smiles') elif args['split'] == 'random': train_set, val_set, test_set = RandomSplitter.train_val_test_split( dataset, frac_train=train_ratio, frac_val=val_ratio, frac_test=test_ratio) else: return ValueError("Expect the splitting method to be 'scaffold', got {}".format(args['split'])) return train_set, val_set, test_set
1fbaac75655694bc1ca3a5e8ed06d31401d3dd9c
24,433
def depthwise_conv2d_nchw(inputs, weight, bias=None, stride=1, padding=0, dilation=1): """Depthwise convolution 2d NCHW layout Args: ----------------------------- inputs : tvm.te.tensor.Tensor shape [batch, channel, height, width] weight : tvm.te.tensor.Tensor shape [in_channel, factor, kernel_height, kernel_width] bias : (optional:None) tvm.te.tensor.Tensor shape [out_channel] stride : (optional:1) int or tuple padding : (optional:0) int or tuple dilation: (optional:1) int ----------------------------- Returns: ----------------------------- tvm.te.tensor.Tensor shape [batch, out_channel, output_height, output_width] ----------------------------- """ batch_size, in_channel, in_h, in_w = inputs.shape _in_channel, factor, k_h, k_w = weight.shape assert_print(_in_channel.value == in_channel.value) out_channel = in_channel * factor stride = (stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding dilation = (dilation, dilation) if isinstance(dilation, (int, tvm.tir.IntImm)) else dilation assert_print(isinstance(stride, tuple) and len(stride) == 2) assert_print(isinstance(padding, tuple) and len(padding) == 2) assert_print(isinstance(dilation, tuple) and len(dilation) == 2) out_h = (in_h + 2 * padding[0] - dilation[0] * (k_h - 1) - 1) // stride[0] + 1 out_w = (in_w + 2 * padding[1] - dilation[1] * (k_w - 1) - 1) // stride[1] + 1 rh = tvm.te.reduce_axis((0, k_h)) rw = tvm.te.reduce_axis((0, k_w)) padded = zero_pad2d(inputs, padding=padding) output = tvm.te.compute( (batch_size, out_channel, out_h, out_w), lambda b, c, h, w: tvm.te.sum( (padded[b, c//factor, h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]] * weight[c//factor, c%factor, rh, rw]), axis=[rw, rh] ) ) if bias is not None: output = tvm.te.compute( (batch_size, out_channel, out_h, out_w), lambda b, c, h, w: output[b, c, h, w] + bias[c] ) return output
bd4f5f0f7dc3a12adefce0e19fa010919e9b9407
24,434
def xtransformed(geo, transformation): """Returns a copy of the transformed Rhino Geometry object. Args: geo (:class:`Rhino.Geometry.GeometryBase`): a Rhino Geometry object transformation (:class:`Transformation`): the transformation. Returns: (:class:`Rhino.Geometry.GeometryBase`): the transformed geometry """ T = xform_from_transformation(transformation) geo_copy = geo.Duplicate() geo_copy.Transform(T) return geo_copy
9d21ad58358bff07b10e18c7c3593cca68f07541
24,435
def function_calls(libfuncs): """ libfuncs is the list of library functions called in script. Returns ths list of all library functions required in script """ libfuncs2 = set() while libfuncs: func = libfuncs.pop() libfuncs2.add(func) for func in called_functions(func): if func not in libfuncs and func not in libfuncs2: libfuncs.add(func) return sorted(list(libfuncs2))
3c6e29930f0a59cc2ad5a3b24ca22c07f3fca28b
24,436
def preprocess_yaml_config(config: SimpleNamespace, prefix_keys=False) -> SimpleNamespace: """ Preprocess a simple namespace. Currently, - prepend the prefix key to all the configuration parameters - change 'None' strings to None values :param config: The SimpleNamespace containing the configuration. :return: Preprocessed configuration as a SimpleNamespace """ # Make sure there's a prefix in the configuration assert 'prefix' in config.__dict__, 'Please include a prefix in the yaml.' if prefix_keys: # Grab the prefix from the yaml file prefix = config.prefix # Prepend the prefix to all the keys, and get rid of the prefix config = SimpleNamespace(**{f'{prefix}_{k}': v for k, v in config.__dict__.items() if k != prefix}) # Change 'None' to None in the top level: recommended behavior is to use null instead of None in the yaml for key, value in config.__dict__.items(): config.__dict__[key] = value if value != 'None' else None return config
52c4e79334bc95c573b795a6962e83d949cf9639
24,437
from numpy.core import isinf, errstate def gisinf(x): """ Like isinf, but always raise an error if type not supported instead of returning a TypeError object. Notes ----- `isinf` and other ufunc sometimes return a NotImplementedType object instead of raising any exception. This function is a wrapper to make sure an exception is always raised. This should be removed once this problem is solved at the Ufunc level. """ with errstate(invalid="ignore"): st = isinf(x) if isinstance(st, type(NotImplemented)): raise TypeError("isinf not supported for this type") return st
cc525ffc10e87b44a5cee3e93fc1c4466bc7a171
24,438
def make_const(g, # type: base_graph.BaseGraph name, # type: str value, # type: np.ndarray uniquify_name=False # type: bool ): """ Convenience method to add a `Const` op to a `gde.Graph`. Args: g: The graph that the node should be added to name: Name for the new `Const` node value: Value to use for the constant uniquify_name: if True, generate unique names by appending a numeric suffix in the event of a name collision. Otherwise name collisions result in an error. Returns `gde.Node` object representing the new node. """ dtype = tf.as_dtype(value.dtype) ret = g.add_node(name, "Const", uniquify_name=uniquify_name) ret.add_attr("dtype", dtype) ret.add_attr("value", value) ret.set_outputs_from_pairs([(dtype, tf.TensorShape(value.shape))]) return ret
fd8493c6ea33c2fd4f930f78fd906ddb5fcdf12e
24,439
def find_maxima(x): """Halla los índices de los máximos relativos""" idx = [] N = len(x) if x[1] < x[0]: idx.append(0) for i in range(1, N - 1): if x[i-1] < x[i] and x[i+1] < x[i]: idx.append(i) if x[-2] < x[-1]: idx.append(N - 1) return idx
8be862981e46ac2534a78354adf52993ca78426a
24,440
def _transform_rankings(Y): """Transform the rankings to integer.""" Yt = np.zeros(Y.shape, dtype=np.int64) Yt[np.isfinite(Y)] = Y[np.isfinite(Y)] Yt[np.isnan(Y)] = RANK_TYPE.RANDOM.value Yt[np.isinf(Y)] = RANK_TYPE.TOP.value return Yt
7a89bc4dd2ff1ad8b00456198f4051ab9030ccbc
24,441
import io import gzip def gzip_bytes(bytes_obj): """byte: Compress a string as gzip in memory. """ if isinstance(bytes_obj, (str,)): bytes_obj = bytes_obj.encode() out_ = io.BytesIO() with gzip.GzipFile(fileobj=out_, mode='w') as fo: fo.write(bytes_obj) return out_
68d0a6b3c64b8633a3084114f617ccd792a688f9
24,447
def ones_like(other_ary): """ Create a PitchArray with all entry equal 1, whose shape and dtype is the same as other_ary """ result = PitchArray(other_ary.shape, other_ary.dtype) result.fill(1) return result
7bbdbdaa409de3986db66c98eedc3670d2483b2b
24,448
def inference_multiview(views, n_classes, keep_prob): """ views: N x V x W x H x C tensor """ n_views = views.get_shape().as_list()[1] # transpose views : (NxVxWxHxC) -> (VxNxWxHxC) views = tf.transpose(views, perm=[1, 0, 2, 3, 4]) view_pool = [] for i in xrange(n_views): # set reuse True for i > 0, for weight-sharing reuse = (i != 0) view = tf.gather(views, i) # NxWxHxC conv1 = _conv('conv1', view, [11, 11, 3, 96], [1, 4, 4, 1], 'VALID', reuse=reuse) lrn1 = None pool1 = _maxpool('pool1', conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv2 = _conv('conv2', pool1, [5, 5, 96, 256], group=2, reuse=reuse) lrn2 = None pool2 = _maxpool('pool2', conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') conv3 = _conv('conv3', pool2, [3, 3, 256, 384], reuse=reuse) conv4 = _conv('conv4', conv3, [3, 3, 384, 384], group=2, reuse=reuse) conv5 = _conv('conv5', conv4, [3, 3, 384, 256], group=2, reuse=reuse) pool5 = _maxpool('pool5', conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID') dim = np.prod(pool5.get_shape().as_list()[1:]) reshape = tf.reshape(pool5, [-1, dim]) view_pool.append(reshape) pool5_vp = _view_pool(view_pool, 'pool5_vp') print('pool5_vp', pool5_vp.get_shape().as_list()) fc6 = _fc('fc6', pool5_vp, 4096, dropout=keep_prob) fc7 = _fc('fc7', fc6, 4096, dropout=keep_prob) fc8 = _fc('fc8', fc7, n_classes) return fc8
b9fd30db4d130aad29333d80a24c9cac6a6ce580
24,449
def trueReturn(data, msg): """ 操作成功结果 """ result = { "status": True, "data": data, "msg": msg } return JSONResponse(content=result)
7eabfe62bb0cf11b92d146cae3171fe391c27d5f
24,450
from pathlib import Path import re def parse_slurm_times(job_id: str, path: Path = Path.cwd()) -> float: """Performs the parsing of the file slurm-{job_id}.out by returning in milliseconds the time measured by Slurm. Args: out_file (str): The job slurm output file path to parse. path (Path): The path where to look for the slurm output. Defaults to the current working directory. Returns: float: The time elapsed by the application, in milliseconds. """ real_time = None out_file = path / f"slurm-{job_id}.out" try: with open(out_file, "r") as file: lines = file.readlines() for line in lines: if line.startswith("real"): time = re.split("\t", line)[-1].strip() real_time = parse_milliseconds(time) if real_time: return real_time raise ValueError( "Could not parse time of slurm output," f"content set to {real_time} !" ) except FileNotFoundError: raise FileNotFoundError("Slurm output was not generated.")
22cc642aa711ab302772273d3d05f7d5615e21d1
24,451
import torch def bprl(positive: torch.Tensor, negative: torch.Tensor) -> torch.Tensor: """ Bayesian Personalized Ranking Loss https://arxiv.org/ftp/arxiv/papers/1205/1205.2618.pdf """ dist = positive - negative return -F.logsigmoid(dist).mean()
0fb13f41c27880e821548298a369091f0b96c0c1
24,452
def select2_js_url(): """ Return the full url to the Select2 JavaScript library Default: ``None`` # Example {% select2_js_url %} """ return sl2.select2_js_url()
6866c7ad1a00e8d23c15f94fd9169412213aa4f0
24,453
def _SharedSuffix(pattern1, pattern2): """Returns the shared suffix of two patterns.""" return _SharedPrefix(pattern1[::-1], pattern2[::-1])[::-1]
c48792aaaf3e470571cbf4d16f6af0b00a671c3f
24,454
def vgg16(num_class): """VGG 16-layer model (configuration "D") with batch normalization """ model = VGG(make_layers(cfg['D'], batch_norm=True), num_classes=num_class) return model
abdb0a48bd5190cd7c7e50193f3d950af5195770
24,457
def IFS(*args) -> Function: """ Evaluates multiple conditions and returns a value that corresponds to the first true condition. Learn more: https//support.google.com/docs/answer/7014145 """ return Function("IFS", args)
395c67b524b4cccbeabba73666bc1a8f78668ff2
24,458
def idwt_joined_(w, rec_lo, rec_hi, mode): """Computes single level discrete wavelet reconstruction """ n = len(w) m = n // 2 ca = w[:m] cd = w[m:] x = idwt_(ca, cd, rec_lo, rec_hi, mode) return x
4b7371a36abc4bd094a3cd86faa1005ff5d6fd69
24,459
def _get_pattern_nts(rule): """ Return a list of NT names present in given rule. """ nt_names = [] for bt in rule.ipattern.bits: if bt.is_nonterminal(): nt_name = bt.nonterminal_name() nt_names.append(nt_name) return nt_names
e690e9187aaff0cf3138444db085e15adfda3847
24,460