content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def function_3(): """This is a Function prototype in Python""" print("Printing Docs String") return 0
4268904e75772b9fef804931e3a3564fda333bc7
25,647
def client(): """ client fixture """ return testing.TestClient(app=service.microservice.start_service(), headers=CLIENT_HEADERS)
ea9997f9904057f0ffdc3175f081acb7e21e719d
25,648
def get_character_bullet(index: int) -> str: """Takes an index and converts it to a string containing a-z, ie. 0 -> 'a' 1 -> 'b' . . . 27 -> 'aa' 28 -> 'ab' """ result = chr(ord('a') + index % 26) # Should be 0-25 if index > 25: current = index // 26 while current > 0: result = chr(ord('a') + (current - 1) % 25) + result current = current // 26 return result
357f68feb302f11a996b5446c642ad9ca1f0f8d3
25,649
def update_det_cov( res: OptResult, jacobian: JacobianValue): """Calculates the inv hessian of the deterministic variables Note that this modifies res. """ covars = res.hess_inv for v, grad in jacobian.items(): for det, jac in grad.items(): cov = propagate_uncertainty(covars[v], jac) covars[det] = covars.get(det, 0.) + cov return res
c505654be6f08dcf037337104b4077f6003db876
25,651
def simplex_init_modified(A, b, c): """ Attempt to find a basic feasible vector for the linear program max: c*x ST: Ax=b x>=0, where A is a (m,n) matrix. Input Parameters: A - (n,m) constraint matrix b - (m,1) vector appearing in the constraint equation above c - (1,n) vector giving the coefficients of the objective function Output Parameters: istatus - integer parameter reporting the condition of the istatus = 0 indicates a basic feasible vector was found istatus = 4 indicates that the initialization procedure failed istatus = 16 indicates that the problem is infeasible iB - integer vector of length m specifying the indices of the basic variables iN - integer vector of length n-m specying the indices of the nonbasic variables xB - vector of length m specifying the values of the basic variables """ A_new, b_new = A, b A_new[find_negative_index(b)] = -A[find_negative_index(b)] b_new[find_negative_index(b)] = -b[find_negative_index(b)] A_new = np.hstack((A_new, np.eye(b.shape[0]))) # problem setup c_phase_I = np.zeros(A_new.shape[1]).reshape(1, -1) c_phase_I[0, c.shape[1]:] = np.ones(b.shape[0]) iB = np.arange(c.shape[1], c.shape[1] + b.shape[0]) + 1 # index begin with 1 for input iN = np.arange(0, c.shape[1]) + 1 xB = np.matrix(np.copy(b)) istatus_step = 1000 while istatus_step != -1: try: istatus_step, iB, iN, xB, Binv = simplex_step(A_new, b_new, c_phase_I, iB, iN, xB, irule=0) except np.linalg.LinAlgError: raise ValueError("iB cannot form a basis!") if istatus_step == 16: istatus, iB, iN, xB, tableau = 4, None, None, None, None return istatus, iB, iN, xB iB = iB - 1 optimal_cost = np.matmul(c_phase_I[0, iB].reshape(1, -2), xB) if optimal_cost > 0: istatus, iB, iN, xB, tableau = 16, None, None, None, None return istatus, iB, iN, xB if optimal_cost == 0: #print("optimal basis is found!") istatus = 0 artificial_idx = np.arange(c.shape[1], c.shape[1] + b.shape[0]) artificial_in_basis = np.intersect1d(artificial_idx, iB) tableau = np.matmul(Binv, A_new) #c_new = np.concatenate((c, np.zeros(A.shape[0]).reshape(1, -1)), axis=1) #reduced_cost = c - np.matmul(np.matmul(c_new[0, iB], Binv), A) if len(artificial_in_basis) == 0: #print("no artificial variable in the final basis") return istatus, iB+1, iN, xB, tableau[:, 0:A.shape[1]] else: #print("artificial variable in the final basis") for xl in artificial_in_basis: row_l = tableau[np.where(iB == xl), :c.shape[1]] if np.sum(row_l) == 0: tableau = np.delete(tableau, np.where(iB == xl), axis=0) xB = np.delete(xB, np.where(iB == xl)) iB = np.delete(iB, np.where(iB == xl)) iN = np.setdiff1d(range(c.shape[1]), iB) iB = iB + 1 iN = iN + 1 xB = xB.reshape(-1, 1) return istatus, iB, iN, xB, tableau[:, 0:A.shape[1]]
fd415eedaec1138812fb054656c45450a53535b8
25,652
def LeakyRelu( alpha: float, do_stabilize: bool = False) -> InternalLayer: """Leaky ReLU nonlinearity, i.e. `alpha * min(x, 0) + max(x, 0)`. Args: alpha: slope for `x < 0`. do_stabilize: set to `True` for very deep networks. Returns: `(init_fn, apply_fn, kernel_fn)`. """ return ABRelu(alpha, 1, do_stabilize)
93a9f103c42979e5107291f818d387eb06feb41b
25,654
def load_interp2d(xz_data_path: str, y: list): """ Setup 2D interpolation Example: x1, y1, z1, x2, y2, z2\n 1, 3, 5, 1, 4, 6\n 2, 3, 6, 2, 4, 7\n 3, 3 7, 3, 4, 8\n xy_data_path will lead to a file such as: 1,5,6\n 2,6,7\n 3,7,8\n y will be: [3, 4] :param xz_data_path: path to csv file with columnated data, e.g. 'x1,z1,z2,...,zn' :param y: list of *constant* values for the second independent variable :return initialized interp2d instance """ data = np.genfromtxt(xz_data_path, delimiter=',') _, num_col = data.shape num_series = num_col - 1 # check to make sure number of columns and length of 'y' match if num_series != len(y): ValueError("Number of columns in '{}' inconsistent with 'y'".format(xz_data_path)) x = data[:, 0] z = [] for idx in range(1, num_series + 1): z.append(data[:, idx]) return interp2d(x, y, z)
0883c317c44a97a8e38c615285315adb22a091c5
25,655
def save_new_party(json_data): """saves a new party in the database Args: json_data (json) : party details Returns: json : api endpoint response """ # Deserialize the data input against the party schema # check if input values throw validation errors try: data = party_schema.load(json_data) except ValidationError as e: return jsonify({ "status": 400, "error": e.messages }), 400 party_name = data['party_name'] hq_address = data['hq_address'] logo_url = data['logo_url'] # Query database for party by name party_by_name = Party.get_party_by_name(party_name) party = db().get_single_row(*party_by_name) if party is None: # if name is not taken new_party = Party( party_name=party_name, hq_address=hq_address, logo_url=logo_url ) save_changes(new_party) # 1. serialize the input for response # 2. return serialized and proper format json to api endpoint party_saved = db().get_single_row(*party_by_name) response = party_schema.dump(party_saved) response_object = jsonify({ "status": 201, "data": [response] }) return response_object, 201 # default response When name is taken return jsonify({ "status": 409, "error": "Try a different Party name, Provided name is taken." }), 409
e6a11646c1aa13bfceabb1c143308fc985b3f59d
25,656
def cost_function(theta, X, y, lamda=0.01, regularized=False): """ Compute cost and gradient for logistic regression with and without regularization. Computes the cost of using theta as the parameter for regularized logistic regression and the gradient of the cost w.r.t. to the parameters. using lamda instead of lambda because of keyword conflict. :param X: numpy array of shape (m,n) Training data :param theta: numpy array (n,1) Weights :param y: numpy array of shape (m,1) Training predictions :param lamda: Floating point value Regularization parameter :param regularized: Bool(Default:True) if True the cost function returned will be regularized :return J, Grad: J: Cost of the theta values for given dataset Grad: gradient for logistic regression with regularization partial derivatives of the cost w.r.t. each parameter in theta """ # initial values m = y.size if type(theta) != type(np.array([])): theta = np.array(theta).reshape([-1, 1]) # since in regularization we do not penalize theta(0) # print("Message: theta = {}".format(theta)) h = sigmoid(X @ theta) J = (-(y.T @ np.log(h)) - ((1 - y.T) @ np.log(1 - h))) / m if regularized: J = J + ((theta[1:].T @ theta[1:]) * (lamda / (2 * m))) # regularization value addted to cost; # note we didn't add regularization for first theta return J
e4002fc30455be730e6ba46db85588b113e24451
25,658
from pathlib import Path import torch def read_image_numpy(input_filename: Path) -> torch.Tensor: """ Read an Numpy file with Torch and return a torch.Tensor. :param input_filename: Source image file path. :return: torch.Tensor of shape (C, H, W). """ numpy_array = np.load(input_filename) torch_tensor = torch.from_numpy(numpy_array) return torch_tensor
987185e7b207ecae1abcf01fd5ea939ace0fb869
25,659
def solution(n: int = 4000000) -> int: """Returns the sum of all fibonacci sequence even elements that are lower or equals to n. >>> solution(10) 10 >>> solution(15) 10 >>> solution(2) 2 >>> solution(1) 0 >>> solution(34) 44 """ fib = [0, 1] i = 0 while fib[i] <= n: fib.append(fib[i] + fib[i + 1]) if fib[i + 2] > n: break i += 1 total = 0 for j in range(len(fib) - 1): if fib[j] % 2 == 0: total += fib[j] return total
b2c3983b9888ae8a10b4ceca2faf5d943b17fbe3
25,661
def triu(m: ndarray, k: int = 0) -> ndarray: """ Upper triangle of an array. """ af_array = af.data.upper(m._af_array, is_unit_diag=False) return ndarray(af_array)
00b0b4a301b0b59214a53d8b741c7417bac95f3d
25,662
def generate_annotation(overlay_path,img_dim,ext): """ Generate custom annotation for one image from its DDSM overlay. Args: ---------- overlay_path: string Overlay file path img_dim: tuple (img_height,img_width) ext: string Image file format Returns: ---------- pandas.dataframe columns: ['NAME','FEATURE','SEVERITY','X1','Y1','X2','Y2','HEIGHT','WIDTH'] NAME <--> image filename with extension FEATURE <--> lesion_type: mass or calcifications X1,Y1,X2,Y2 <--> xyrb bounding box HEIGHT <--> image height WIDTH <--> image width """ myColumns = ["NAME","FEATURE","SEVERITY","X1","Y1","X2","Y2","HEIGHT","WIDTH"] sdf = pd.DataFrame(columns=myColumns) H,W = img_dim overlay = get_overlay_info(overlay_path) total_abnormalities = overlay["total_abnormalities"] name = overlay["name"] name = str(name) + ext for i in range(1,total_abnormalities+1): abnormality = overlay[i] lesion_type = abnormality["lesion_type"] lesion_type = '_'.join(lesion_type) pathology_type = abnormality["pathology_type"][0] boundary = abnormality["boundary"] x,y,w,h = cv2.boundingRect(boundary) X1 = int(x) Y1 = int(y) X2 = int(x+w) Y2 = int(y+h) data = [str(name),str(lesion_type),str(pathology_type),X1,Y1,X2,Y2,H,W] label = pd.DataFrame([data],columns=myColumns) sdf = sdf.append(label,ignore_index=True) return sdf
e9d334c834063ee27b5b9a9d597f084bc735f794
25,663
from datetime import datetime def parse_episode_page(loc, contents): """Parse a page describing a single podcast episode. @param loc: The URL of this page. @type loc: basestring @param contents: The raw HTML contents of the episode page from which episode information should be parsed. @type contents: basestring @return: Dictionary describing the episode. Contains keys name (str value), date (datetime.date), loc (url - str value), duration (seconds - int), and orig_tags (tags applied to episode - list of str) @rtype: dict """ soup = bs4.BeautifulSoup(contents) header = soup.find(class_='centerPosts') title = header.find('strong').contents[0] date_str = soup.find(class_='pdateS').find('em').contents[0] date_components = date_str.replace(',', ' ').split(' ') year = int(date_components[2]) month = common.MONTH_ABBRV[date_components[0]] day = int(date_components[1]) episode_date = datetime.date(year, month, day) tags = sorted(set(map( lambda x: x.contents[0], soup.findAll('a', rel='tag') ))) duration_str = soup.find(class_='podpress_mediafile_dursize').contents[0] duration_str_clean = duration_str.replace('[ ', '').replace(' ]', '') duration = common.interpret_duration(duration_str_clean) return { 'title': title, 'date': episode_date, 'tags': tags, 'loc': loc, 'duration': duration }
805e466c15741ee004059817efa70da66e470871
25,664
def _bitarray_to_message(barr): """Decodes a bitarray with length multiple of 5 to a byte message (removing the padded zeros if found).""" padding_len = len(barr) % 8 if padding_len > 0: return bitstring.Bits(bin=barr.bin[:-padding_len]).bytes else: return barr.bytes
79e601bc30519e42c8dbf2369deea5b36a5851ff
25,665
def align_address_to_page(address: int) -> int: """Align the address to a page.""" a = align_address(address) >> DEFAULT_PAGE_ALIGN_SHIFT return a << DEFAULT_PAGE_ALIGN_SHIFT
1211d3c1a3ae6b1bd183f3d1b1cfb1097fc7dc40
25,667
def getNamespace(modelName): """Get the name space from rig root Args: modelName (str): Rig top node name Returns: str: Namespace """ if not modelName: return "" if len(modelName.split(":")) >= 2: nameSpace = ":".join(modelName.split(":")[:-1]) else: nameSpace = "" return nameSpace
abfb4c54f2dd1b54563f6c7c84e902ed4ee77b01
25,669
import re def compile_rules(environment): """Compiles all the rules from the environment into a list of rules.""" e = re.escape rules = [ ( len(environment.comment_start_string), TOKEN_COMMENT_BEGIN, e(environment.comment_start_string), ), ( len(environment.block_start_string), TOKEN_BLOCK_BEGIN, e(environment.block_start_string), ), ( len(environment.variable_start_string), TOKEN_VARIABLE_BEGIN, e(environment.variable_start_string), ), ] if environment.line_statement_prefix is not None: rules.append( ( len(environment.line_statement_prefix), TOKEN_LINESTATEMENT_BEGIN, r"^[ \t\v]*" + e(environment.line_statement_prefix), ) ) if environment.line_comment_prefix is not None: rules.append( ( len(environment.line_comment_prefix), TOKEN_LINECOMMENT_BEGIN, r"(?:^|(?<=\S))[^\S\r\n]*" + e(environment.line_comment_prefix), ) ) return [x[1:] for x in sorted(rules, reverse=True)]
ca7971de422f66e9c9574c13306610e84a000271
25,670
def compute_window_based_feature(seq, sample_freq, func_handle, window_length, window_stride, verbose=False, **kwargs): """Use this function to compute any metric within a sliding window. Parameters ---------- seq : 1D array like object e.g. a blood volume pulse sequence, a continuous blood pressure sequence, a heart rate sequence etc. sample_freq : float the sampling frequency of the sequence; if the signal is not sampled at a constant sampling frequency than resample it beforehand func_handle : function handle the handle of the function to apply on each window window_length : float the length of each window in seconds window_stride : float the stride between two consecutive windows in seconds verbose : bool, optional if the intermediate results of each window should be collected and returned as well, by default False Returns ------- numpy.ndarray of shape [n,] where n corresponds to the number of windows built the extracted metric value for each window dict containing {'w_data', 'w_masks'} the intermediate results of each window and the corresponding boolean mask used to extract the window; is only returned, when verbose is True Raises ------ TypeError if a 'ref_hr_bpm' key is set in kwargs to compute the snr metric but the corresponding value is not a list """ # create timestamps for the sequence seq = np.squeeze(seq) seq_ts = np.arange(0, len(seq)) * 1/sample_freq res = [] ver = {'w_data': [], 'w_masks': []} # set loop indexes ts = 0 i = 0 # check kwargs ref_hr_bpm = kwargs.pop('ref_hr_bpm', None) if ref_hr_bpm is not None: ref_hr_bpm = np.squeeze(ref_hr_bpm) while ts + window_length <= seq_ts[-1]: mask = (seq_ts >= ts) & (seq_ts < ts + window_length) if ref_hr_bpm is not None: kwargs['ref_hr_bpm'] = ref_hr_bpm[i] out = func_handle(seq[mask], sample_freq, verbose=True, **kwargs) res.append(out[0]) if verbose: ver['w_data'].append(out[1]) ver['w_masks'].append(mask) ts += window_stride i += 1 if verbose: return np.asarray(res), ver return np.asarray(res)
4ab084d3459c617640e404b5232db4557b22c8b8
25,672
def read_cif(filename): """ read the cif, mainly for pyxtal cif output Be cautious in using it to read other cif files Args: filename: path of the structure file Return: pyxtal structure """ species = [] coords = [] with open(filename, 'r') as f: lines = f.readlines() for i, line in enumerate(lines): if line.startswith('_symmetry_Int_Tables_number'): sg = int(line.split()[-1]) elif line.startswith('_cell_length_a'): a = float(lines[i].split()[-1]) b = float(lines[i+1].split()[-1]) c = float(lines[i+2].split()[-1]) alpha = float(lines[i+3].split()[-1]) beta = float(lines[i+4].split()[-1]) gamma = float(lines[i+5].split()[-1]) elif line.startswith('_symmetry_cell_setting'): lat_type = line.split()[-1] elif line.startswith('_symmetry_space_group_name_H-M '): symbol = line.split()[-1] if eval(symbol) in ["Pn", "P21/n", "C2/n"]: diag = True else: diag = False elif line.find('_atom_site') >= 0: s = i while True: s += 1 if lines[s].find('_atom_site') >= 0: pass elif len(lines[s].split()) <= 3: break else: tmp = lines[s].split() pos = [float(tmp[-4]), float(tmp[-3]), float(tmp[-2])] species.append(tmp[0]) coords.append(pos) break wp0 = Group(sg)[0] lattice = Lattice.from_para(a, b, c, alpha, beta, gamma, lat_type) sites = [] for specie, coord in zip(species, coords): pt, wp, _ = WP_merge(coord, lattice.matrix, wp0, tol=0.1) sites.append(atom_site(wp, pt, specie, diag)) return lattice, sites
d6d164a6425d088a17bb449b75e875047a5fbc29
25,673
import random def custom_data_splits(src_sents, trg_sents, val_samples=3000, seed=SEED): """ splits data based on custom number of validation/test samples :param src_sents: the source sentences :param trg_sents: the target sentences :param val_samples: number of validation/test samples :param seed: the random seed :return: training, validation and test datasets splits """ assert len(src_sents) == len(trg_sents) data = list(zip(src_sents, trg_sents)) num_samples = len(data) print("Total samples: ", num_samples) print("Shuffling data....") random.seed(seed) # 30 random.shuffle(data) val_set = data[:val_samples] test_set = data[val_samples:val_samples + val_samples] train_set = data[val_samples + val_samples:] print("Total train:", len(train_set)) print("Total validation:", len(val_set)) print("Total test:", len(test_set)) print("All:", len(test_set) + len(train_set) + len(val_set)) samples = train_set[:5] + val_set[:5] + test_set[:5] train_set = list(zip(*train_set)) val_set = list(zip(*val_set)) test_set = list(zip(*test_set)) samples_set = list(zip(*samples)) return train_set, val_set, test_set, samples_set
5a4754ce9fe400248a46f4868aeaa0b96ebd5760
25,674
def normalize(output): """将null或者empty转换为暂无输出""" if not output: return '暂无' else: return output
18af58c74325522a64dcfd98a75f55e677c01ca3
25,675
def sgd(args): """ Wrapper of torch.optim.SGD (PyTorch >= 1.0.0). Implements stochastic gradient descent (optionally with momentum). """ args.lr = 0.01 if args.lr == -1 else args.lr args.weight_decay = 0 if args.weight_decay == -1 else args.weight_decay args.momentum = 0 if args.momentum == -1 else args.momentum args.dampening = 0 if args.dampening == -1 else args.dampening args.nesterov = False if args.nesterov == False else args.nesterov def sgd_wrapper(param_groups): pytorch_support(required_version='1.0.0', info_str='Optimizer - SGD') return optim.SGD( param_groups, lr=args.lr, momentum=args.momentum, dampening=args.dampening, weight_decay=args.weight_decay, nesterov=args.nesterov) return sgd_wrapper
17a852165766bcf02f92bac4c847684f2dcfb133
25,676
def normal_conjugates_known_scale_posterior(prior, scale, s, n): """Posterior Normal distribution with conjugate prior on the mean. This model assumes that `n` observations (with sum `s`) come from a Normal with unknown mean `loc` (described by the Normal `prior`) and known variance `scale**2`. The "known scale posterior" is the distribution of the unknown `loc`. Accepts a prior Normal distribution object, having parameters `loc0` and `scale0`, as well as known `scale` values of the predictive distribution(s) (also assumed Normal), and statistical estimates `s` (the sum(s) of the observations) and `n` (the number(s) of observations). Returns a posterior (also Normal) distribution object, with parameters `(loc', scale'**2)`, where: ``` mu ~ N(mu', sigma'**2) sigma'**2 = 1/(1/sigma0**2 + n/sigma**2), mu' = (mu0/sigma0**2 + s/sigma**2) * sigma'**2. ``` Distribution parameters from `prior`, as well as `scale`, `s`, and `n`. will broadcast in the case of multidimensional sets of parameters. Args: prior: `Normal` object of type `dtype`: the prior distribution having parameters `(loc0, scale0)`. scale: tensor of type `dtype`, taking values `scale > 0`. The known stddev parameter(s). s: Tensor of type `dtype`. The sum(s) of observations. n: Tensor of type `int`. The number(s) of observations. Returns: A new Normal posterior distribution object for the unknown observation mean `loc`. Raises: TypeError: if dtype of `s` does not match `dtype`, or `prior` is not a Normal object. """ if not isinstance(prior, normal.Normal): raise TypeError("Expected prior to be an instance of type Normal") if s.dtype != prior.dtype: raise TypeError( "Observation sum s.dtype does not match prior dtype: %s vs. %s" % (s.dtype, prior.dtype)) n = math_ops.cast(n, prior.dtype) scale0_2 = math_ops.square(prior.scale) scale_2 = math_ops.square(scale) scalep_2 = 1.0/(1/scale0_2 + n/scale_2) return normal.Normal( loc=(prior.loc/scale0_2 + s/scale_2) * scalep_2, scale=math_ops.sqrt(scalep_2))
0bc94999ee10ce63ba0156510a9807523de6c085
25,677
def make_matrix(num_rows, num_cols, entry_fn): """retorna a matriz num_rows X num_cols cuja entrada (i,j)th é entry_fn(i, j)""" return [[entry_fn(i, j) # dado i, cria uma lista for j in range(num_cols)] # [entry_fn(i, 0), ... ] for i in range(num_rows)]
f706773245730eab3ce6cf41b0f6e81fbe3d52ab
25,678
def add_relationtoforeignsign(request): """Add a new relationtoforeignsign instance""" if request.method == "POST": form = RelationToForeignSignForm(request.POST) if form.is_valid(): sourceid = form.cleaned_data['sourceid'] loan = form.cleaned_data['loan'] other_lang = form.cleaned_data['other_lang'] other_lang_gloss = form.cleaned_data['other_lang_gloss'] try: gloss = Gloss.objects.get(pk=int(sourceid)) except Gloss.DoesNotExist: # Translators: HttpResponseBadRequest return HttpResponseBadRequest(_("Source gloss not found."), content_type='text/plain') rel = RelationToForeignSign(gloss=gloss, loan=loan, other_lang=other_lang, other_lang_gloss=other_lang_gloss) rel.save() return HttpResponseRedirect( reverse('dictionary:admin_gloss_view', kwargs={'pk': gloss.id}) + '?editrelforeign') else: print(form) # Translators: HttpResponseBadRequest return HttpResponseBadRequest(_("Form not valid"), content_type='text/plain') # fallback to redirecting to the requesting page return HttpResponseRedirect('/')
44e6a80ed4596b9dae48ce8f4ed37927feb1ec71
25,679
def check_url(url): """ Check if a URL exists without downloading the whole file. We only check the URL header. """ good_codes = [httplib.OK, httplib.FOUND, httplib.MOVED_PERMANENTLY] return get_server_status_code(url) in good_codes
f6dede6aaf41f404c182052cd4dc5708b9a0b879
25,680
from datetime import datetime def ap_time_filter(value): """ Converts a datetime or string in hh:mm format into AP style. """ if isinstance(value, basestring): value = datetime.strptime(value, '%I:%M') value_tz = _set_timezone(value) value_year = value_tz.replace(year=2016) return value_year.strftime('%-I:%M')
0539cd58bfa4b7ee647ac88a58bcac93108d4819
25,682
def make_signal(time, amplitude=1, phase=0, period=1): """ Make an arbitrary sinusoidal signal with given amplitude, phase and period over a specific time interval. Parameters ---------- time : np.ndarray Time series in number of days. amplitude : float, optional A specific amplitude (defaults to 1). phase : float, optional A given phase offset in degrees (defaults to 0). period : float, optional A period for the sine wave (defaults to 1). Returns ------- signal : np.ndarray The time series with the given parameters. """ signal = (amplitude * np.sin((2 * np.pi * 1 / period * (time - np.min(time)) + np.deg2rad(phase)))) return signal
9f940922ae2a4bf1e3ff7d1c13351f4d07c40ca8
25,683
def train_data(X, y): """ :param X: numpy array for date(0-5), school_id :param y: output for the data provided :return: return the learned linear regression model """ regression = linear_model.LinearRegression() regression.fit(X, y) return regression
abaa0ba6f02ed111b6ec9b0945e9e26c643836be
25,684
import re def clean_text(s, stemmer, lemmatiser): """ Takes a string as input and cleans it by removing non-ascii characters, lowercasing it, removing stopwords and lemmatising/stemming it - Input: * s (string) * stemmer (object that stems a string) * lemmatiser (object that lemmatises a string) - Output: * text (string) """ stop_words = set(stopwords.words('english')) # Remove non ASCII characters text = removeNonAscii(s) text = text.lower() # Remove any undesired character for s in ['#', '|', '*', '.', ',', ';', '!', ':']: text = text.replace(s, '') # Remove digits for s in [str(x) for x in range(10)]: text = text.replace(s, '') text = text.replace('\n', ' ') text = re.sub(' +', ' ', text) # Apply stemmer/lemmatiser word_tokens = word_tokenize(text) s = [] for w in word_tokens: if w in stop_words: continue if stemmer: s.append(stemmer.stem(w)) if lemmatiser: s.append(lemmatiser.lemmatize(w)) text = ' '.join(s) return text
0bcb14378c6b72e24526c7eff9f1daf2b6871152
25,685
def make_rst_sample_table(data): """Format sample table""" if data is None: return "" else: tab_tt = tt.Texttable() tab_tt.set_precision(2) tab_tt.add_rows(data) return tab_tt.draw()
160b28355f1bea80878417f2a92e5dc31dde66cd
25,686
def is_thunk(space, w_obj): """Check if an object is a thunk that has not been computed yet.""" while 1: w_alias = w_obj.w_thunkalias if w_alias is None: return space.w_False if w_alias is w_NOT_COMPUTED_THUNK: return space.w_True w_obj = w_alias
1918a7d79d02a2a20e6f7ead8b7a2dc6cfe05a85
25,687
def plot_roc_curve( fpr, tpr, roc_auc=None, ax=None, figsize=None, style="seaborn-ticks", **kwargs, ): """Plots a receiver operating characteristic (ROC) curve. Args: fpr: an array of false postive rates tpr: an array of true postive rates roc_auc (None): the area under the ROC curve ax (None): an optional matplotlib axis to plot in figsize (None): an optional ``(width, height)`` for the figure, in inches style ("seaborn-ticks"): a style to use for the plot **kwargs: optional keyword arguments for matplotlib's ``plot()`` Returns: a matplotlib figure """ with plt.style.context(style): display = skm.RocCurveDisplay(fpr=fpr, tpr=tpr, roc_auc=roc_auc) display.plot(ax=ax, **kwargs) if figsize is not None: display.figure_.set_size_inches(*figsize) return display.figure_
d4d6f9d33857598a16b04097de035a5a7a3f354b
25,688
def my_place_or_yours(our_address: Address, partner_address: Address) -> Address: """Convention to compare two addresses. Compares lexicographical order and returns the preceding address """ if our_address == partner_address: raise ValueError("Addresses to compare must differ") sorted_addresses = sorted([our_address, partner_address]) return sorted_addresses[0]
991b2d44042520eea28817f33cbb9421d7b99a78
25,689
def get_dataset_json(met, version): """Generated HySDS dataset JSON from met JSON.""" return { "version": version, "label": met['data_product_name'], "starttime": met['sensingStart'], }
d84f3652866c83e8c1618a9f87bc3bf6b5c6a0cf
25,690
def encode(value): """ pyg_mongo.encoder is similar to pyg_base.encoder with the only exception being that bson.objectid.ObjectId used by mongodb to generate the document _id, are not encoded Parameters ---------- value : value/document to be encoded Returns ------- encoded value/document """ return encode_(value, ObjectId)
fa7dec607dca66736e3b9203bf97289a0ffdd733
25,692
def locate_line_segments(isolated_edges): """ Extracts line segments from observed lane edges using Hough Line Transformations :param isolated_edges: Lane edges returned from isolated_lane_edges() :return: Line segments extracted by HoughLinesP() """ rho = 1 theta = np.pi / 180 threshold = 10 min_line_length = 8 max_line_gap = 4 segments = cv2.HoughLinesP(isolated_edges, rho, theta, threshold, np.array([]), min_line_length, max_line_gap) return segments
3b26da0535b327dfac4b268552209c75481bb4d2
25,693
def proj(A, B): """Returns the projection of A onto the hyper-plane defined by B""" return A - (A * B).sum() * B / (B ** 2).sum()
982cdfb1564166dce14432bf24404f066e2acee3
25,694
def v6_multimax(iterable): """Return a list of all maximum values. Bonus 2: Make the function works with lazy iterables. Our current solutions fail this requirement because they loop through our iterable twice and generators can only be looped over one time only. We could keep track of the maximum values as we loop and manually build up a list of maximums """ maximums = [] for item in iterable: if not maximums or maximums[0] == item: maximums.append(item) elif item > maximums[0]: maximums = [item] return maximums
5539adb0dcb6c9db4f8f2f68487fc13c6aa8d067
25,695
import traceback def format_traceback_string(exception): """Format exception traceback as a single string. Args: exception: Exception object. Returns: Full exception traceback as a string. """ return '\n'.join( traceback.TracebackException.from_exception(exception).format() )
debdf53966b26b6562671bf48d283a3bf10d85d5
25,696
def get_stored_file(file_id): """Get the "stored file" or the summary about the file.""" return JsonResponse(StoredFile.objects(id=ObjectId(file_id)).first())
860f6f5dd24e5ebaf59fff1f4c82f4b5c7ce6da5
25,697
def _compute_array_job_index(): # type () -> int """ Computes the absolute index of the current array job. This is determined by summing the compute-environment-specific environment variable and the offset (if one's set). The offset will be set and used when the user request that the job runs in a number of slots less than the size of the input. :rtype: int """ offset = 0 if _os.environ.get("BATCH_JOB_ARRAY_INDEX_OFFSET"): offset = int(_os.environ.get("BATCH_JOB_ARRAY_INDEX_OFFSET")) return offset + int(_os.environ.get(_os.environ.get("BATCH_JOB_ARRAY_INDEX_VAR_NAME")))
5c9b451af75f894ad49dc8aa95b7c1a80e6e9c96
25,698
def make_transpose(transpose_name, input_name, input_type, perm): """Makes a transpose node. Args: transpose_name: name of the transpose op. input_name: name of the op to be the tranpose op's input. input_type: type of the input node. perm: permutation array, e.g. [0, 2, 3, 1] for NCHW to NHWC. Returns: A (transpose, permation) pair of NodeDefs to be added to a GraphDef. """ perm_bytes = np.array(perm, dtype=np.int32).tobytes() perm_def = PERMUTE_TMPL % (transpose_name + '/perm', len(perm)) perm_node = tf.compat.v1.NodeDef() text_format.Merge(perm_def, perm_node) perm_node.attr['value'].tensor.tensor_content = perm_bytes transpose_def = TRANSPOSE_TMPL % ( transpose_name, input_name, perm_node.name) transpose_node = tf.compat.v1.NodeDef() text_format.Merge(transpose_def, transpose_node) transpose_node.attr['T'].type = input_type return transpose_node, perm_node
21e05caed8a439f748f3fa939b5bff9864c2525d
25,699
def is_batch_norm(layer): """ Return True if `layer` is a batch normalisation layer """ classname = layer.__class__.__name__ return classname.find('BatchNorm') != -1
6494b75a3fbfbfd55ff43b05536a1094290ea915
25,700
import torch def predictive_entropy(y_input, y_target): """ Computes the entropy of predictions by the model :param y_input: Tensor [N, samples, class] :param y_target: Tensor [N] Not used here. :return: mean entropy over all examples """ y_input = torch.exp(y_input) # model output is log_softmax so we exponentiate y_posterior = torch.mean(y_input, dim=1) # Average over all the samples to marginalize over epsilon # y_input is now [N, class] # We want the entropy of y_input epsilon = 1e-25 y_posterior += epsilon # We add a small constant to each term to avoid infinities entropy = - torch.mean(y_posterior * torch.log(y_posterior), dim=1) # [N] entropy on each example return torch.mean(entropy).cpu().numpy()
6c3c4c3cfc93d0c19e2662b54a9b6d41146264d5
25,701
def exec_cmd(cmd_args, *args, **kw): """ Execute a shell call using Subprocess. All additional `*args` and `**kwargs` are passed directly to subprocess.Popen. See `Subprocess <http://docs.python.org/library/subprocess.html>`_ for more information on the features of `Popen()`. :param cmd_args: List of command line arguments. :type cmd_args: list. :param args: Additional arguments are passed to Popen(). :param kwargs: Additional keyword arguments are passed to Popen(). :returns: The (stdout, stderror, return_code) of the command. :rtype: tuple Usage: .. code-block:: python from cement.utils import shell stdout, stderr, exitcode = shell.exec_cmd(['echo', 'helloworld']) """ if not 'stdout' in kw.keys(): kw['stdout'] = PIPE if not 'stderr' in kw.keys(): kw['stderr'] = PIPE proc = Popen(cmd_args, *args, **kw) (stdout, stderr) = proc.communicate() proc.wait() return (stdout, stderr, proc.returncode)
c946fce186e56d19c2e182e2061f4f9739a2ce59
25,702
from datetime import datetime def roundTime(dt=None, roundTo=1): """Round a datetime object to any time period (in seconds) dt : datetime.datetime object, default now. roundTo : Closest number of seconds to round to, default 1 second. Author: Thierry Husson 2012 - Use it as you want but don't blame me. http://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object-python/10854034#10854034 """ if dt == None : dt = datetime.now() seconds = total_seconds(dt - dt.min) # // is a floor division, not a comment on following line: rounding = (seconds+roundTo/2) // roundTo * roundTo return dt + timedelta(0,rounding-seconds,-dt.microsecond)
c17cbf9092cc2a88cb486afd1a7ff0ad984987bd
25,703
from typing import OrderedDict import requests def get_imagery_layers(url): """ Get the list of available image layers that can be used as background or foreground based on the URL to a WTML (WorldWide Telescope image collection file). Parameters ---------- url : `str` The URL of the image collection. """ available_layers = OrderedDict() # Get the XML describing the available surveys response = requests.get(url) assert response.ok b = BytesIO(response.content) e = ElementTree() t = e.parse(b) for survey in t.iter('ImageSet'): name = survey.attrib['Name'] thumbnail_url = survey.find('ThumbnailUrl').text if not thumbnail_url: thumbnail_url = None available_layers[name] = {'thumbnail': thumbnail_url} return available_layers
9e5a37552d18ebd1994c892c2af07bd3a3445ac1
25,704
def _typecheck(op1, op2): """Check the type of parameters used and return correct enum type.""" if isinstance(op1, CipherText) and isinstance(op2, CipherText): return ParamTypes.CTCT elif isinstance(op1, PlainText) and isinstance(op2, PlainText): return ParamTypes.PTPT elif isinstance(op1, CipherText) and isinstance(op2, PlainText): return ParamTypes.CTPT elif isinstance(op1, PlainText) and isinstance(op2, CipherText): return ParamTypes.PTCT else: return None
872a05347ac26324e26f7444798c977f5cfad2fa
25,705
def vm_update_cb(result, task_id, vm_uuid=None, new_node_uuid=None): """ A callback function for api.vm.base.views.vm_manage. """ vm = Vm.objects.select_related('dc').get(uuid=vm_uuid) _vm_update_cb_done(result, task_id, vm) msg = result.get('message', '') force = result['meta']['apiview']['force'] if result['returncode'] == 0 and (force or msg.find('Successfully updated') >= 0): json = result.pop('json', None) try: # save json from smartos json_active = vm.json.load(json) except Exception as e: logger.exception(e) logger.error('Could not parse json output from PUT vm_manage(%s). Error: %s', vm_uuid, e) raise TaskException(result, 'Could not parse json output') vm_delete_snapshots_of_removed_disks(vm) # Do this before updating json and json_active vm.json = json_active update_fields = ['enc_json', 'enc_json_active', 'changed'] ignored_changed_vm_attrs = ( 'set_customer_metadata', 'remove_customer_metadata', 'create_timestamp', 'boot_timestamp', 'autoboot', 'vnc_port', 'update_disks', ) if new_node_uuid: update_dict = vm.json_update() for i in ignored_changed_vm_attrs: update_dict.pop(i, None) if update_dict: raise TaskException(result, 'VM definition on compute node differs from definition in DB in ' 'following attributes: %s' % ','.join(update_dict.keys())) update_fields.append('node_id') old_json_active = vm.json_active vm.json_active = json_active if new_node_uuid: node = Node.objects.get(uuid=new_node_uuid) vm.set_node(node) with transaction.atomic(): vm.save(update_node_resources=True, update_storage_resources=True, update_fields=update_fields) vm_update_ipaddress_usage(vm) vm_json_active_changed.send(task_id, vm=vm, old_json_active=old_json_active) # Signal! if new_node_uuid: vm_node_changed.send(task_id, vm=vm, force_update=True) # Signal! result['message'] = 'Node association successfully changed on VM %s' % vm.hostname if vm.json_changed(): vm_update(vm) else: logger.error('Found nonzero returncode in result from PUT vm_manage(%s). Error: %s', vm_uuid, msg) raise TaskException(result, 'Got bad return code (%s). Error: %s' % (result['returncode'], msg)) task_log_cb_success(result, task_id, vm=vm, **result['meta']) return result
3d7ad728cb6c3ddd8fe3638f98223635378ce8d7
25,706
from typing import Union def _class_effective_mesh_size( geo_graph: geograph.GeoGraph, class_value: Union[int, str] ) -> Metric: """ Return effective mesh size of given class. Definition taken from: https://pylandstats.readthedocs.io/en/latest/landscape.html """ class_areas = geo_graph.df["geometry"][ geo_graph.df["class_label"] == class_value ].area total_area = geo_graph.get_metric("total_area").value description = ( "A <= MESH <= A ; MESH approaches its minimum when there is a single" " corresponding patch of one pixel, and approaches its maximum when the " "landscape consists of a single patch." ) return Metric( value=np.sum(class_areas ** 2) / total_area, name=f"effective_mesh_size_class={class_value}", description=description, variant="conventional", unit="CRS.unit**2", )
fd98849f6a7ff9d9cf17ecd15a3bcd790f6dcb6f
25,707
def noam_schedule(step, warmup_step=4000): """ original Transformer schedule""" if step <= warmup_step: return step / warmup_step return (warmup_step ** 0.5) * (step ** -0.5)
ad42f6f478f06c2641cb189db769c4a6e0272f6f
25,708
def word_list_to_long(val_list, big_endian=True): """Word list (16 bits int) to long list (32 bits int) By default word_list_to_long() use big endian order. For use little endian, set big_endian param to False. :param val_list: list of 16 bits int value :type val_list: list :param big_endian: True for big endian/False for little (optional) :type big_endian: bool :returns: list of 32 bits int value :rtype: list """ # allocate list for long int long_list = [None] * int(len(val_list) / 2) # fill registers list with register items for i, item in enumerate(long_list): if big_endian: long_list[i] = (val_list[i * 2] << 16) + val_list[(i * 2) + 1] else: long_list[i] = (val_list[(i * 2) + 1] << 16) + val_list[i * 2] # return long list return long_list
954d1cefc521c2f8fd88492858590df8bc0ce120
25,710
from typing import List def _show_problems_info(id_tournament: int) -> List: """ Функция возвращает информацию о задачах турнира по его id(id_tournament) """ return loop.run_until_complete(get_request(f'https://codeforces.com/api/contest.standings?contestId=1477&from=1&count=5&showUnofficial=true'))['problems']
36bafa02b2523c538fefbb5d77496dfddcbda6a1
25,711
from typing import Dict from typing import Any def make_shell_context() -> Dict[str, Any]: """Make objects available during shell""" return { "db": db, "api": api, "Playlist": Playlist, "User": User, "BlacklistToken": BlacklistToken, }
c2121fc95a0916021338d2b39debcc7d88933982
25,712
async def find_user_by_cards(app, cards, fields=["username"]): """Find a user by a list of cards assigned to them. Parameters ---------- app : aiohttp.web.Application The aiohttp application instance cards : list The list of cards to search for fields : list, default=["username"] The fields to be returned in the user document Returns ------- user : dict The user document """ if not isinstance(cards, list): cards = [cards] projection = {} for field in fields: projection[field] = 1 if "_id" not in fields: projection["_id"] = 0 return await app["db"].users.find_one({"cards": cards}, projection)
ef5b20ea668b39eda51c859a3b33f1af30a644f5
25,713
def _calc_y_from_dataframe(trafo_df, baseR): """ Calculate the subsceptance y from the transformer dataframe. INPUT: **trafo** (Dataframe) - The dataframe in net.trafo which contains transformer calculation values. RETURN: **subsceptance** (1d array, np.complex128) - The subsceptance in pu in the form (-b_img, -b_real) """ ### Calculate subsceptance ### unl_squared = trafo_df["vn_lv_kv"].values**2 b_real = trafo_df["pfe_kw"].values / (1000. * unl_squared) * baseR b_img = (trafo_df["i0_percent"].values / 100. * trafo_df["sn_kva"].values / 1000.)**2 \ - (trafo_df["pfe_kw"].values / 1000.)**2 b_img[b_img < 0] = 0 b_img = np.sqrt(b_img) * baseR / unl_squared return -b_real * 1j - b_img
f6b3493dd56d93a269b2c82431a5ef0a6a7ff946
25,714
def mutual_coherence(A, B): """"Mutual coherence between two dictionaries A and B """ max_val, index = mutual_coherence_with_index(A, B) return max_val
8e6f8d499e84394ef1af551d4fea9ab9a259c05a
25,715
def pdf_page_enumeration(pdf): """Generate a list of pages, using /PageLabels (if it exists). Returns a list of labels.""" try: pagelabels = pdf.trailer["/Root"]["/PageLabels"] except: # ("No /Root/PageLabels object"), so infer the list. return range(1, pdf.getNumPages() + 1) # """Select the item that is most likely to contain the information you desire; e.g. # {'/Nums': [0, IndirectObject(42, 0)]} # here, we only have "/Num". """ try: pagelabels_nums = pdf.trailer["/Root"]["/PageLabels"]["/Nums"] except: raise CommandError("Malformed PDF, /Root/PageLabels but no .../Nums object") # # At this point we have either the object or the list. # Make it a list. # if isinstance(pagelabels_nums, (list,)): pagelabels_nums_list = pagelabels_nums else: pagelabels_nums_list = list(pagelabels_nums) labels = [] style = None # default style = '/D' prefix = '' next_pageno = 1 for i in range(0, pdf.getNumPages()): if len(pagelabels_nums_list) > 0 and i >= pagelabels_nums_list[0]: pagelabels_nums_list.pop(0) # discard index pnle = pagelabels_nums_list.pop(0) style = pnle.get('/S', '/D') prefix = pnle.get('/P', '') next_pageno = pnle.get('/St', 1) pageno_str = '' if style == '/D': pageno_str = str(next_pageno) elif style == '/A': pageno_str = int_to_page_alpha(next_pageno, 'A') elif style == '/a': pageno_str = int_to_page_alpha(next_pageno, 'a') elif style == '/R': pageno_str = int_to_roman(next_pageno) elif style == '/r': pageno_str = int_to_roman(next_pageno).lower() else: raise CommandError("Malformded PDF: unkown page numbering style " + style) labels.append(prefix + pageno_str) next_pageno += 1 return labels
133c97f4d25dc562d08a7d45c9f85cbe04776162
25,716
import torch def transform_points_torch(points, homography): """Transforms input points according to homography. Args: points: [..., H, W, 3]; pixel (u,v,1) coordinates. homography: [..., 3, 3]; desired matrix transformation Returns: output_points: [..., H, W, 3]; transformed (u,v,w) coordinates. """ # Because the points have two additional dimensions as they vary across the # width and height of an image, we need to reshape to multiply by the # per-image homographies. points_orig_shape = points.shape points_reshaped_shape = list(homography.shape) points_reshaped_shape[-2] = -1 points_reshaped = torch.reshape(points, points_reshaped_shape) transformed_points = torch.matmul(points_reshaped, transpose_torch(homography)) transformed_points = torch.reshape(transformed_points, points_orig_shape) return transformed_points
f45bf1b94c360241272bc084adcf6fee1b9f3afe
25,717
def _sane_fekete_points(directions, n_dim): """ get fekete points for DirectionalSimulator object. use get_directions function for other use cases. """ if directions is None: n_dir = n_dim * 80 elif isinstance(directions, int): n_dir = directions else: try: n_dir, n_dim_dir = directions.shape except AttributeError: err_msg = "Only an integer or a numpy array is accepted as " err_msg += "directions." raise TypeError(err_msg) if n_dim != n_dim_dir: err_msg = "Number of dimensions of the directions does not " err_msg += "match the number of marginal distributions" raise ValueError(err_msg) return directions return fekete_points(n_dim, n_dir, max_iters=100, tolerance=1e-12)
f1d0a7dfa1438f2a4071536fb048504074e8b95d
25,718
def backoffPolicy(initialDelay=1.0, maxDelay=60.0, factor=1.5, jitter=_goodEnoughRandom): """ A timeout policy for L{ClientService} which computes an exponential backoff interval with configurable parameters. @since: 16.1.0 @param initialDelay: Delay for the first reconnection attempt (default 1.0s). @type initialDelay: L{float} @param maxDelay: Maximum number of seconds between connection attempts (default 60 seconds, or one minute). Note that this value is before jitter is applied, so the actual maximum possible delay is this value plus the maximum possible result of C{jitter()}. @type maxDelay: L{float} @param factor: A multiplicative factor by which the delay grows on each failed reattempt. Default: 1.5. @type factor: L{float} @param jitter: A 0-argument callable that introduces noise into the delay. By default, C{random.random}, i.e. a pseudorandom floating-point value between zero and one. @type jitter: 0-argument callable returning L{float} @return: a 1-argument callable that, given an attempt count, returns a floating point number; the number of seconds to delay. @rtype: see L{ClientService.__init__}'s C{retryPolicy} argument. """ def policy(attempt): try: delay = min(initialDelay * (factor ** min(100, attempt)), maxDelay) except OverflowError: delay = maxDelay return delay + jitter() return policy
679185a35f4e830ded528d59d77b2bf91a548999
25,719
def block_group(inputs, filters, strides, block_fn, block_repeats, conv2d_op=None, activation=tf.nn.swish, batch_norm_activation=nn_ops.BatchNormActivation(), dropblock=nn_ops.Dropblock(), drop_connect_rate=None, data_format='channels_last', name=None, is_training=False): """Creates one group of blocks for NAS-FPN.""" if block_fn == 'conv': inputs = conv2d_op( inputs, filters=filters, kernel_size=(3, 3), padding='same', data_format=data_format, name='conv') inputs = batch_norm_activation( inputs, is_training=is_training, relu=False, name='bn') inputs = dropblock(inputs, is_training=is_training) return inputs if block_fn != 'bottleneck': raise ValueError('Block function {} not implemented.'.format(block_fn)) _, _, _, num_filters = inputs.get_shape().as_list() block_fn = nn_blocks.bottleneck_block use_projection = not (num_filters == (filters * 4) and strides == 1) return resnet.block_group( inputs=inputs, filters=filters, strides=strides, use_projection=use_projection, block_fn=block_fn, block_repeats=block_repeats, activation=activation, batch_norm_activation=batch_norm_activation, dropblock=dropblock, drop_connect_rate=drop_connect_rate, data_format=data_format, name=name, is_training=is_training)
990dae88aa1fcad078094f3a667ce5ae48f37521
25,720
def GeneratePublicKeyDataFromFile(path): """Generate public key data from a path. Args: path: (bytes) the public key file path given by the command. Raises: InvalidArgumentException: if the public key file path provided does not exist or is too large. Returns: A public key encoded using the UTF-8 charset. """ try: public_key_data = arg_parsers.FileContents()(path).strip() except arg_parsers.ArgumentTypeError as e: raise gcloud_exceptions.InvalidArgumentException( 'public_key_file', '{}. Please double check your input and try again.'.format(e)) return public_key_data.encode('utf-8')
a233b31c3ca2328952b09592fe054aff69c5d4ce
25,721
def make_keyword_html(keywords): """This function makes a section of HTML code for a list of keywords. Args: keywords: A list of strings where each string is a keyword. Returns: A string containing HTML code for displaying keywords, for example: '<strong>Ausgangsw&ouml;rter:</strong> Nature, Plants, Fauna' """ res_html = '<strong>Ausgangsw&ouml;rter:</strong> ' for word in keywords[:-1]: res_html += word + ', ' res_html += keywords[-1] return res_html
71e35245ad7b2fe2c67f6a4c27d53374945089bd
25,723
def is_match(set, i): """Checks if the three cards all have the same characteristic Args: set (2D-list): a set of three cards i (int): characterstic Returns: boolean: boolean """ if (set[0][i] == set[1][i] and set[1][i] == set[2][i]): return True return False
bd4063dba02f10d7d9d4093aa8f0df8920db17b3
25,724
def notGroup (states, *stateIndexPairs): """Like group, but will add a DEFAULT transition to a new end state, causing anything in the group to not match by going to a dead state. XXX I think this is right... """ start, dead = group(states, *stateIndexPairs) finish = len(states) states.append([]) states[start].append((DEFAULT, finish)) return start, finish
9fecae45c8cadc2ba2a4a7962416b8b31e8b1bce
25,725
def test_softsign(): """Test using a reference softsign implementation. """ def softsign(x): return np.divide(x, np.ones_like(x) + np.absolute(x)) x = K.placeholder(ndim=2) f = K.function([x], [activations.softsign(x)]) test_values = get_standard_values() result = f([test_values])[0] expected = softsign(test_values) assert_allclose(result, expected, rtol=1e-05)
1de242e1a545ca7a182c3e3b086a551c0774d578
25,728
def run( package_out_dir, package_tests_dir, work_dir, packages): """Deployes build *.cipd package locally and runs tests against them. Used to verify the packaged code works when installed as CIPD package, it is important for infra_python package that has non-trivial structure. Args: package_out_dir: where to search for built packages. work_dir: where to install/update packages into. packages: names of *.cipd files in package_out_dir or [] for all. Returns: 0 on success, 1 or error. """ # Discover what to test. paths = [] if not packages: # Enumerate all known tests in tests/*.py and filter them based on # availability of corresponding *.cipd package in package_out_dir. It will # skip any cross-compiled packages, since they have additional '+<platform>' # suffix in the package file name. for test in os.listdir(package_tests_dir): if not test.endswith('.py'): continue pkg_file = os.path.join( package_out_dir, os.path.splitext(test)[0] + '.cipd') if os.path.exists(pkg_file): paths.append(pkg_file) else: for name in packages: abs_path = os.path.join(package_out_dir, name) if not os.path.isfile(abs_path): raise TestException('No such package file: %s' % name) paths.append(abs_path) paths = sorted(paths) if not paths: print 'Nothing to test.' return 0 cipd_client = find_cipd() if not cipd_client: return 1 # Run all tests sequentially. Most of the are extra fast. nuke_temp = False if not work_dir: work_dir = tempfile.mkdtemp(suffix='cipd_test') nuke_temp = True work_dir = os.path.abspath(work_dir) try: fail = False for path in paths: name = os.path.splitext(os.path.basename(path))[0] test_script = os.path.join(package_tests_dir, '%s.py' % name) if not os.path.isfile(test_script): print 'Skipping tests for %s - no such file: %s' % (name, test_script) continue try: run_test( cipd_client=cipd_client, package=path, work_dir=os.path.join(work_dir, name), test_script=test_script) print '' print 'PASS' except TestException as exc: print >> sys.stderr, '' print >> sys.stderr, 'FAILED! ' * 10 print >> sys.stderr, 'Tests for %s failed: %s' % (name, exc) fail = True return 1 if fail else 0 finally: if nuke_temp: try: shutil.rmtree(work_dir, ignore_errors=True) except OSError as exc: print >> sys.stderr, 'Failed to delete %s: %s' % (work_dir, exc)
277613b42502725d751cfdc0493f545f4fb46125
25,729
def get_published_questions(quiz): """ Returns the QuerySet of the published questions for the given quiz """ questions = get_questions_by_quiz(quiz) #Questions are ordered by serial number return questions.filter(published = True)
19cec8b57954ae68605de1acefef57f0a68671da
25,730
def moving_average(x, window): """ :param int window: odd windows preserve phase From http://nbviewer.ipython.org/github/demotu/BMC/blob/master/notebooks/DataFiltering.ipynb """ return np.convolve(x, np.ones(window) / window, "same")
c0bd4438d54e3a26bf398f019ca40eec62db83a2
25,731
def validate_interval_avg_data(in_data): # test """Validates input to get_avg_since for correct fields Args: in_data: dictionary received from POST request Returns: boolean: if in_data contains the correct fields """ expected_keys = {"patient_id", "heart_rate_average_since"} for key in in_data.keys(): if key not in expected_keys: return False return True
0fcf927c3912bea594554fdffc73312a7da4d628
25,732
def _get_cm_control_command(action='--daemon', cm_venv_name='CM', ex_cmd=None): """ Compose a system level command used to control (i.e., start/stop) CloudMan. Accepted values to the ``action`` argument are: ``--daemon``, ``--stop-daemon`` or ``--reload``. Note that this method will check if a virtualenv ``cm_venv_name`` exists and, if it does, the returned control command will include activation of the virtualenv. If the extra command ``ex_cmd`` is provided, insert that command into the returned activation command. Example return string: ``cd /mnt/cm; [ex_cmd]; sh run.sh --daemon`` """ if _virtualenv_exists(cm_venv_name): cmd = _with_venvburrito("workon {0}; cd {1}; {3}; sh run.sh {2}" .format(cm_venv_name, CM_HOME, action, ex_cmd)) else: cmd = "cd {0}; {2}; sh run.sh {1}".format(CM_HOME, action, ex_cmd) return cmd
d6c4448da86ddd790977c4c0c150b748dd8f26b7
25,733
def get_all_db_data() -> list: """Возвращает все строки из базы""" cursor.execute('''SELECT * FROM news''') res = cursor.fetchall() return res
52f0e59f892898f15a361c5d1de56898f48ac2d7
25,734
def astra_projector(vol_interp, astra_vol_geom, astra_proj_geom, ndim, impl): """Create an ASTRA projector configuration dictionary. Parameters ---------- vol_interp : {'nearest', 'linear'} Interpolation type of the volume discretization. This determines the projection model that is chosen. astra_vol_geom : dict ASTRA volume geometry. astra_proj_geom : dict ASTRA projection geometry. ndim : {2, 3} Number of dimensions of the projector. impl : {'cpu', 'cuda'} Implementation of the projector. Returns ------- proj_id : int Handle for the created ASTRA internal projector object. """ if vol_interp not in ('nearest', 'linear'): raise ValueError("`vol_interp` '{}' not understood" ''.format(vol_interp)) impl = str(impl).lower() if impl not in ('cpu', 'cuda'): raise ValueError("`impl` '{}' not understood" ''.format(impl)) if 'type' not in astra_proj_geom: raise ValueError('invalid projection geometry dict {}' ''.format(astra_proj_geom)) if ndim == 3 and impl == 'cpu': raise ValueError('3D projectors not supported on CPU') ndim = int(ndim) proj_type = astra_proj_geom['type'] if proj_type not in ('parallel', 'fanflat', 'fanflat_vec', 'parallel3d', 'parallel3d_vec', 'cone', 'cone_vec'): raise ValueError('invalid geometry type {!r}'.format(proj_type)) # Mapping from interpolation type and geometry to ASTRA projector type. # "I" means probably mathematically inconsistent. Some projectors are # not implemented, e.g. CPU 3d projectors in general. type_map_cpu = {'parallel': {'nearest': 'line', 'linear': 'linear'}, # I 'fanflat': {'nearest': 'line_fanflat', 'linear': 'line_fanflat'}, # I 'parallel3d': {'nearest': 'linear3d', # I 'linear': 'linear3d'}, # I 'cone': {'nearest': 'linearcone', # I 'linear': 'linearcone'}} # I type_map_cpu['fanflat_vec'] = type_map_cpu['fanflat'] type_map_cpu['parallel3d_vec'] = type_map_cpu['parallel3d'] type_map_cpu['cone_vec'] = type_map_cpu['cone'] # GPU algorithms not necessarily require a projector, but will in future # releases making the interface more coherent regarding CPU and GPU type_map_cuda = {'parallel': 'cuda', # I 'parallel3d': 'cuda3d'} # I type_map_cuda['fanflat'] = type_map_cuda['parallel'] type_map_cuda['fanflat_vec'] = type_map_cuda['fanflat'] type_map_cuda['cone'] = type_map_cuda['parallel3d'] type_map_cuda['parallel3d_vec'] = type_map_cuda['parallel3d'] type_map_cuda['cone_vec'] = type_map_cuda['cone'] # create config dict proj_cfg = {} if impl == 'cpu': proj_cfg['type'] = type_map_cpu[proj_type][vol_interp] else: # impl == 'cuda' proj_cfg['type'] = type_map_cuda[proj_type] proj_cfg['VolumeGeometry'] = astra_vol_geom proj_cfg['ProjectionGeometry'] = astra_proj_geom proj_cfg['options'] = {} # Add the hacky 1/r^2 weighting exposed in intermediate versions of # ASTRA if (proj_type in ('cone', 'cone_vec') and astra_supports('cone3d_hacky_density_weighting')): proj_cfg['options']['DensityWeighting'] = True if ndim == 2: return astra.projector.create(proj_cfg) else: return astra.projector3d.create(proj_cfg)
df3458ea09d2a9bffdced2738404aec419e7b48f
25,735
def __hit(secret_number, choice): """Check if the choice is equal to secret number""" return secret_number == choice
55bee8370a2480b5ca84cd5f478fd8eb367276bd
25,736
def minimal_product_data(setup_data): """Valid product data (only required fields)""" return { 'name': 'Bar', 'rating': .5, 'brand_id': 1, 'categories_ids': [1], 'items_in_stock': 111, }
ecf027704ea8533d71468527335201a021d8ae4f
25,737
def flatten_column_array(df, columns, separator="|"): """Fonction qui transforme une colonne de strings séparés par un séparateur en une liste : String column -> List column""" df[columns] = ( df[columns].applymap(lambda x: separator.join( [str(json_nested["name"]) for json_nested in x])) ) return df
770b519a5b086d872e4bd16bc92663f693453745
25,738
from typing import List def part2(lines: List[List[int]]): """ """ grid = Grid.from_text(lines) lim_x = grid.width() lim_y = grid.height() for by in range(5): for bx in range(5): if bx == by == 0: continue for dy in range(lim_y): for dx in range(lim_x): grid[bx * lim_x + dx, by * lim_y + dy] = ((lines[dy][dx] + bx + by - 1) % 9) + 1 best = perform_seek(grid) return best # 2914
c11c452e71b61b7cda98acda6908832aec7bca60
25,739
def translate_point(point, y_offset=0, x_offset=0): """Translate points. This method is mainly used together with image transforms, such as padding and cropping, which translates the top left point of the image to the coordinate :math:`(y, x) = (y_{offset}, x_{offset})`. Args: point (~numpy.ndarray or list of arrays): See the table below. y_offset (int or float): The offset along y axis. x_offset (int or float): The offset along x axis. .. csv-table:: :header: name, shape, dtype, format :obj:`point`, ":math:`(R, K, 2)` or :math:`[(K, 2)]`", \ :obj:`float32`, ":math:`(y, x)`" Returns: ~numpy.ndarray: Points modified translation of an image. """ if isinstance(point, np.ndarray): out_point = point.copy() out_point[:, :, 0] += y_offset out_point[:, :, 1] += x_offset else: out_point = [] for pnt in point: out_pnt = pnt.copy() out_pnt[:, 0] += y_offset out_pnt[:, 1] += x_offset out_point.append(out_pnt) return out_point
fffd18a2df12e8d51b0ea30fe378da37aa245d5d
25,740
def _get_tau_var(tau, tau_curriculum_steps): """Variable which increases linearly from 0 to tau over so many steps.""" if tau_curriculum_steps > 0: tau_var = tf.get_variable('tau', [], initializer=tf.constant_initializer(0.0), trainable=False) tau_var = tau_var.assign( tf.minimum(float(tau), tau_var + float(tau) / tau_curriculum_steps)) else: tau_var = tf.get_variable('tau', [], initializer=tf.constant_initializer(float(tau)), trainable=False) return tau_var
a4ba777a70df55f22299e415e7998dc303c0b3cf
25,741
def des_descrypt(s): """ DES 解密 :param s: 加密后的字符串,16进制 :return: 解密后的字符串 """ iv = constants.gk k = des(iv, CBC, iv, pad=None, padmode=PAD_PKCS5) #print binascii.b2a_hex(s) de = k.decrypt(binascii.a2b_hex(s), padmode=PAD_PKCS5) print de return de
2a1224ec5a197928aedc6b4168762bac7f287624
25,742
def get_compound_coeff_func(phi=1.0, max_cost=2.0): """ Cost function from the EfficientNets paper to compute candidate values for alpha, beta and gamma parameters respectively. These values are then used to train models, and the validation accuracy is used to select the best base parameter set at phi = 1. # Arguments: phi: The base power of the parameters. Kept as 1 for initial search of base parameters. max_cost: The maximum cost of permissible. User defined constant generally set to 2. # Returns: A function which accepts a numpy vector of 3 values, and computes the mean squared error between the `max_cost` value and the cost computed as `cost = x[0] * (x[1] ** 2) * (x[2] ** 2)`. # References: - [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) """ def compound_coeff(x, phi=phi, max_cost=max_cost): depth = alpha = x[0] width = beta = x[1] resolution = gamma = x[2] # scale by power. Phi is generally kept as 1.0 during search. alpha = alpha ** phi beta = beta ** phi gamma = gamma ** phi # compute the cost function cost = alpha * (beta ** 2) * (gamma ** 2) return (cost - max_cost) ** 2 return compound_coeff
ec2e3e07a93741827c934e05d2e4e7e5e4a54901
25,744
import re def get_kver_bin(path, split=False, proc=None): """ Get version of a kernel binary at 'path'. The 'split' and 'proc' arguments are the same as in 'get_kver()'. """ if not proc: proc = Procs.Proc() cmd = f"file -- {path}" stdout = proc.run_verify(cmd)[0].strip() msg = f"ran this command: {cmd}, got output:\n{stdout}" matchobj = re.match(r".* Linux kernel.* executable .*", stdout) if not matchobj: raise Error(f"file at '{path}'{proc.hostmsg} is not a Linux kernel binary file\n{msg}") matchobj = re.match(r".* version ([^ ]+) .*", stdout) if not matchobj: raise Error(f"{msg}\nFailed to find kernel version in the output.") kver = matchobj.group(1) if split: return split_kver(kver) return kver
212da809d1c2dc52e7bf7cee2aafd89d9437eadb
25,745
from datetime import datetime def post_apply_become_provider_apply_id_accept(request: HttpRequest, apply_id, **kwargs) -> JsonResponse: """ 允许成为设备拥有者 :param request: 视图请求 :type request: HttpRequest :param kwargs: 额外参数 :type kwargs: Dict :return: JsonResponse :rtype: JsonResponse """ handle_reason: str = request.POST.get('handle_reason', '') applications: QuerySet = PermApply.objects.filter(apply_id=apply_id) if len(applications) == 0: return JsonResponse(common.create_error_json_obj(303, '该申请不存在'), status=400) application: PermApply = applications.first() if application.status != common.PENDING: return JsonResponse(common.create_error_json_obj(304, '该申请已处理'), status=400) applicant: User = application.applicant applicant.change_group('provider') applicant.save() application.status = common.APPROVED application.handler = request.user application.handle_time = int(datetime.now(timezone.utc).timestamp()) application.handle_reason = handle_reason application.save() pm.send_system_message_to_by_user(application.applicant, common.PM_IMPORTANT, common.create_prem_apply_handle_message(common.APPROVED)) mail.send_perm_apply_accept(applicant.email, application) return common.create_success_json_res_with({})
1440d2ae9495b75c398000d3367d68fdb84f2c00
25,746
import tokenize def get_var_info(line, frame): """Given a line of code and a frame object, it obtains the value (repr) of the names found in either the local or global scope. """ tokens = utils.tokenize_source(line) loc = frame.f_locals glob = frame.f_globals names_info = [] names = [] for tok in tokens: if tok.type == tokenize.NAME: name = tok.string if name in names: continue names.append(name) result = "" if name in loc: result = format_var_info(tok, loc) elif name in glob: result = format_var_info(tok, glob, _global=True) if result: names_info.append(result) if names_info: names_info.append("") return "\n".join(names_info)
d584e1c83a9bf7d0134be1251b92cb789a7ac51c
25,747
from typing import Union from typing import Optional def MPS_SimAddRule(event_mask: Union[IsoSimulatorEvent, FeliCaSimulatorEvent, VicinitySimulatorEvent, NfcSimulatorEvent, Type2TagSimulatorEvent], delay: float, execute_count: Optional[int], pattern_condition: Optional[ActionConditionDataPattern], remote_command: str) -> int: """Adds a simulation rule Parameters ---------- event_mask : IsoSimulatorEvent, Type2TagSimulatorEvent, \ FeliCaSimulatorEvent, VicinitySimulatorEvent \ or NfcSimulatorEvent Mask of events which triggers the rule delay : float Delay between event occurrence and rule execution in s execute_count : int Rule executions count, or None if always active pattern_condition : ActionConditionDataPattern Pattern condition remote_command : str Remote command to run when the rule conditions are met Returns ------- int Rule identifier """ if isinstance(event_mask, IsoSimulatorEvent): protocol = _SimulatorProtocol.CL_14443_SIMULATOR elif isinstance(event_mask, FeliCaSimulatorEvent): protocol = _SimulatorProtocol.CL_FELICA_SIMULATOR elif isinstance(event_mask, VicinitySimulatorEvent): protocol = _SimulatorProtocol.CL_VICINITY_SIMULATOR elif isinstance(event_mask, NfcSimulatorEvent): protocol = _SimulatorProtocol.CL_NFC_SIMULATOR elif isinstance(event_mask, Type2TagSimulatorEvent): protocol = _SimulatorProtocol.CL_TAG_TYPE2_SIMULATOR else: raise TypeError('event_mask must be an instance of ' 'IsoSimulatorEvent IntFlag, ' 'FeliCaSimulatorEvent IntFlag, ' 'VicinitySimulatorEvent IntFlag, ' 'NfcSimulatorEvent IntFlag or ' 'Type2TagSimulatorEvent IntFlag') # Unit auto-selection computed_unit, [computed_delay] = _unit_autoselect(NfcUnit.UNIT_S, [delay]) _check_limits(c_uint32, computed_delay, 'delay') if execute_count is not None: _check_limits(c_uint32, execute_count, 'execute_count') count = execute_count else: count = 0xFFFFFFFF # Always active rule_id = c_uint32() if pattern_condition is None: CTS3Exception._check_error(_MPuLib.MPS_SimAddRule( c_uint8(0), c_uint32(protocol), c_uint32(event_mask), c_uint32(computed_delay), c_uint32(computed_unit), c_uint32(count), c_uint32(0), c_uint32(0), c_uint32(0), c_uint32(0), None, c_uint32(len(remote_command)), remote_command.encode('ascii'), byref(rule_id))) elif isinstance(pattern_condition, ActionConditionDataPattern): length = c_uint32(len(pattern_condition.pattern)) temp = bytearray(pattern_condition.pattern + b'\x00' * (256 - len(pattern_condition.mask))) mask = (c_uint8 * 256).from_buffer(temp) temp = bytearray(pattern_condition.mask + b'\x00' * (256 - len(pattern_condition.pattern))) pattern = (c_uint8 * 256).from_buffer(temp) condition_ctypes = _ActionConditionDataPattern(length, mask, pattern) CTS3Exception._check_error(_MPuLib.MPS_SimAddRule( c_uint8(0), c_uint32(protocol), c_uint32(event_mask), c_uint32(computed_delay), c_uint32(computed_unit), c_uint32(count), c_uint32(0), c_uint32(0), c_uint32(0), c_uint32(516), byref(condition_ctypes), c_uint32(len(remote_command)), remote_command.encode('ascii'), byref(rule_id))) else: raise TypeError('pattern_condition must be an instance of ' 'ActionConditionDataPattern') return rule_id.value
d945885635eba27c74edb33eb72e6bac3791a190
25,749
import json def to_pretty_json(obj): """Encode to pretty-looking JSON string""" return json.dumps(obj, sort_keys=False, indent=4, separators=(',', ': '))
b325c4e6e150e089da1d9027299831bd1576e57f
25,751
def parse_access_token(request): """Get request object and parse access token""" try: auth_header = request.headers.get('Authorization') return auth_header.split(" ")[1] except Exception as e: return
a51d51d83cba5fc8e8eb7b9a9147a0219e2bcb20
25,752
def postscriptWeightNameFallback(info): """ Fallback to the closest match of the *openTypeOS2WeightClass* in this table: === =========== 100 Thin 200 Extra-light 300 Light 400 Normal 500 Medium 600 Semi-bold 700 Bold 800 Extra-bold 900 Black === =========== """ value = getAttrWithFallback(info, "openTypeOS2WeightClass") value = int(round(value * .01) * 100) if value < 100: value = 100 elif value > 900: value = 900 name = _postscriptWeightNameOptions[value] return name
4521375a668c81fdee9a3fc20391f633a60af777
25,753
def down_spatial(in_planes, out_planes): """downsampling 21*21 to 5*5 (21-5)//4+1=5""" return nn.Sequential(nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=4), nn.BatchNorm2d(out_planes))
2570eb4f837d45a3683f8594ef9ba0fa5b996445
25,754
def add_integral_control( plant, regulator=None, integrator_ugf=None, integrator_time_constant=None, **kwargs): """Match and returns an integral gain. This function finds an integral gain such that the UGF of the integral control matches that of the specified regulator. If ``integrator_ugf`` or ``integrator_time_constant`` is specified instead, these will be matched instead. Parameters ---------- plant : TransferFunction The transfer function representation of the system to be feedback controlled. regulator : TransferFunction, optional The pre-regulator Use ``kontrol.regulator.feedback.proportional_derivative()`` or ``kontrol.regulator.feedback.critical_damping()`` to make one for oscillator-like systems. integrator_ugf : float, optional The unity gain frequency (Hz) of the integral control. This is the inverse of the integration time constant. If ``integrator_time_constant is not None``, then this value will be ignored. If set to None, it'll be set to match the first UGF of the derivative control. Defaults to None. integrator_time_constant : float, optional, The integration time constant (s) for integral control. Setting this will override the ``integrator_ugf`` argument. Defaults to None. Returns ------- ki : float The integral control gain. """ s = control.tf("s") oltf_int = 1/s * plant.dcgain() if integrator_time_constant is not None: integrator_ugf = 1/integrator_time_constant ki = 1 / abs(oltf_int(1j*2*np.pi*integrator_ugf)) elif integrator_ugf is not None: ki = 1 / abs(oltf_int(1j*2*np.pi*integrator_ugf)) elif regulator is not None: oltf = plant * regulator _, _, _, _, ugf, _ = control.stability_margins( oltf, returnall=True) ki = 1 / abs(oltf_int(1j*min(ugf))) else: raise ValueError("At least one of regulator, integrator_ugf, or " "integrator_time_constant must be specified.") return ki
16c6efe598e60e325f7be2fe018156f89deaac11
25,755
def script(): """Render the required Javascript""" return Response(response=render_template("settings/settings.js"), status=200, mimetype="application/javascript")
d879fded0ebf2e160d3dcbc541ae07bc08571b8e
25,756
def repr_helper(tuple_gen_exp, ind=2): """ given a sequence of 2-tuples, return a nice string like: .. code_block:: python (1, 'hi'), (2, 'there'), (40, 'you') -> .. code_block:: python [ 1] : hi [ 2] : there [40] : you """ lines = [] k_v = list(tuple_gen_exp) if not k_v: return " "*ind + "(empty)" max_len_k = max(len(str(k_vp[0])) for k_vp in k_v) for k, v in sorted(k_v): lines.append(" "*ind + "[%s] : %s" % (lpad(k, max_len_k), v)) return "\n".join(lines)
a80739ac09167ce582bf35dc8e7ce1c7654ba6e2
25,757
from IPython import get_ipython def in_ipython() -> bool: """Return true if we're running in an IPython interactive shell.""" try: return get_ipython().__class__.__name__ == 'TerminalInteractiveShell' except Exception: pass return False
f0a92dfc8c02da2761c5f2074b3928943e7abd8f
25,758
def demo_loss_accuracy_curve(): """Make a demo loss-accuracy curve figure.""" steps = np.arange(101) loss = np.exp(-steps * 0.1) * 20. + np.random.normal(size=101) * 2. loss = loss - np.min(loss) + .2 valid_steps = np.arange(0, 101, 10) valid_loss = (np.exp(-valid_steps * 0.1) * 25. + np.random.normal(size=11) * 0.5) valid_loss = valid_loss - np.min(valid_loss) valid_acc = np.exp(-valid_loss * 0.1) return Section( 'Training Metrics', loss_accuracy_curve( metrics=[ {'name': 'loss', 'steps': steps, 'values': loss}, {'name': 'valid loss', 'steps': valid_steps, 'values': valid_loss}, ], secondary_metrics=[ {'name': 'valid acc', 'steps': valid_steps, 'values': valid_acc}, ], title='Training Loss & Validation Loss / Accuracy' ) )
1d530d0f4f6c830a974fd634c636f691595f1d38
25,759
def text_filter(sentence:str)-> str: """ 过滤掉非汉字和标点符号和非数字 :param sentence: :return: """ line = sentence.replace('\n', '。') # 过滤掉非汉字和标点符号和非数字 linelist = [word for word in line if word >= u'\u4e00' and word <= u'\u9fa5' or word in [',', '。', '?', '!', ':'] or word.isdigit()] return ''.join(linelist)
9c0949b2e9b374f1aa5392b5a4c215ebff21171b
25,761
import math def get_lr_schedule(base_lr, global_batch_size, base_batch_size=None, scaling=None, n_warmup_epochs=0, warmup_factor=-1, decay_schedule={}, is_root=True): """Get the learning rate schedule function""" if scaling == 'linear': scale_factor = global_batch_size / base_batch_size elif scaling == 'sqrt': scale_factor = math.sqrt(global_batch_size / base_batch_size) else: scale_factor = 1.; peak_lr = base_lr * scale_factor init_lr = peak_lr * warmup_factor if warmup_factor >= 0 else base_lr # MLPerf logging # NOTE: there is currently a confusing mismatch between the parameter # naming convention in this implementation and MLPerf's hyperparameter # conventions. Here we define base LR to be the LR at a baseline batch # size and the "peak" LR to be the value scaled according to current batch # size. We will leave things as-is for now. if is_root: mllogger = mllog.get_mllogger() mllogger.event(key=mllog.constants.OPT_BASE_LR, value=peak_lr) mllogger.event(key=mllog.constants.OPT_LR_WARMUP_EPOCHS, value=n_warmup_epochs) mllogger.event(key=mllog.constants.OPT_LR_WARMUP_FACTOR, value=warmup_factor if warmup_factor >= 0 else init_lr / peak_lr) decay_name = decay_schedule['name'] if decay_name == 'step': decay_steps = decay_schedule.copy() decay_steps.pop('name') mllogger.event(key=mllog.constants.OPT_LR_DECAY_BOUNDARY_EPOCHS, value=sorted(decay_steps.keys())) mllogger.event(key=mllog.constants.OPT_LR_DECAY_FACTOR, value=max(decay_steps.values()) if len(decay_steps)>0 else 1) return partial(_lr_schedule, init_lr=init_lr, peak_lr=peak_lr, n_warmup_epochs=n_warmup_epochs, decay_schedule=decay_schedule)
385d9f01992dc650732420580803b147068f70fc
25,762
import math def stdp_values(values, period=None): """Returns list of running population standard deviations. :param values: list of values to iterate and compute stat. :param period: (optional) # of values included in computation. * None - includes all values in computation. :rtype: list of windowed population standard deviations. Examples: >>> values = [34, 30, 29, 34, 38, 25, 35] >>> results = stdp_values(values, 3) #using 3 period window. >>> ["%.2f" % x for x in results] ['0.00', '2.00', '2.16', '2.16', '3.68', '5.44', '5.56'] """ results = _varbases(values, period, population=True) _sqrt = math.sqrt return [_sqrt(x) for x in results]
b3be172dc377325b75ac7f8fe908751b47ecca58
25,763