content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def similarity(item, user, sim_dict): """ similarity between an item and a user (a set of items) """ if user not in sim_dict or item not in sim_dict[user]: return 0 else: return sim_dict[user][item]
e63eec781f7ed9fa72d21d1119daf9ce89d39b1b
4,400
import requests def get_totalt_result(req_url): """[summary] This gets all the results in INT from the specified query Args: req_url ([STR]): [The request query that decides the data] """ r = requests.get(req_url, headers=headers) json = r.json() return json['result']['totalHits']
8c6ff54fbb285fc765afd8bb5e5ba3195ec624d0
4,401
def lorentz(x, FWHM, x0=0): """ Returns Lorentzian lineshape. """ return FWHM/2/np.pi*((x-x0)**2+(FWHM/2)**2)**-1
92c21b7b99b600b2dc622d2ea09b0bd3e39f8047
4,402
def count_collision(strMap: list[str], right: int, down: int) -> int: """Read the map and count how many tree would be encountered if someone start from the top left corner""" mapWidth = len(strMap[0]) # All lines are assumed to have same width xCoord, yCoord = right % mapWidth, down count = 0 while yCoord < len(strMap): if strMap[yCoord][xCoord] == TREE: count += 1 xCoord = (xCoord + right) % mapWidth yCoord += down return count
b233224a407493757ba7976ede22499b21afe068
4,403
def svn_config_find_group(*args): """svn_config_find_group(svn_config_t cfg, char key, char master_section, apr_pool_t pool) -> char""" return _core.svn_config_find_group(*args)
3974798a82f2dfb77957e8de1c4814725d82c3a9
4,404
def _database_exists(): """Checks for existence of database""" _require_environment() database = _get_database_name() with settings(hide('warnings'), warn_only=True): result = run(MYSQL_PREFIX % "\"SHOW DATABASES LIKE '%(NAME)s';\"" % database) if database['NAME'] in result: return True else: print('Database %(NAME)s does not exist' % database) return False
94cbffb4d7e62d6c9fcae7d0966c6d595ddf7907
4,405
def EncoderDecoder(d_model, d_ff, n_heads, dropout, layer_idx, mode, ff_activation): """Transformer encoder-decoder layer. The input is a triple (decoder_input, mask, encoder) where the mask is created from the original source to prevent attending to the padding part of the encoder. Args: d_model: int: depth of embedding d_ff: int: depth of feed-forward layer n_heads: int: number of attention heads dropout: float: dropout rate (how much to drop out) layer_idx: which layer are we at (for bookkeeping) mode: str: 'train' or 'eval' ff_activation: the non-linearity in feed-forward layer Returns: the layer, returning a triple (decoder_activations, mask, encoder). """ decoder_self_attention = [ # vecs_d pmask vecs_e tl.LayerNorm(), # vecs_d ..... ...... tl.BasicCausalAttention( d_model, n_heads=n_heads, dropout=dropout, mode=mode), tl.Dropout(rate=dropout, mode=mode), # vecs_d ..... ...... ] decoder_to_encoder_attention = [ # vecs_d masks vecs_e tl.LayerNorm(), # vecs_d masks vecs_e tl.Parallel([], [], tl.Dup()), # ______ _____ vecs_e vecs_e tl.Parallel([], tl.Swap()), # ______ vecs_e masks ...... tl.Parallel([], tl.Dup()), # ______ vecs_e vecs_e ..... ...... tl.AttentionQKV( # (q k v masks ... --> vecs_d masks ...) d_model, n_heads=n_heads, dropout=dropout, mode=mode), tl.Dropout(rate=dropout, mode=mode), # vecs_d mask vecs_e ] feed_forward = [ FeedForward(d_model, d_ff, dropout, layer_idx, mode, ff_activation), ] return tl.Serial( # vecs_d masks vecs_e tl.Residual(decoder_self_attention), # vecs_d masks vecs_e tl.Residual(decoder_to_encoder_attention), # vecs_d masks vecs_e tl.Residual(feed_forward), # vecs_d masks vecs_e )
8dd8032d2b0b270f49f21adedb22474565827801
4,406
from operator import concat def group_normalize(strokes): """ normilize a multistroke drawing """ long_stroke = concat(strokes) x_min = min(long_stroke.x) x_max = max(long_stroke.x) y_min = min(long_stroke.y) y_max = max(long_stroke.y) x_range = float(x_max-x_min) y_range = float(y_max-y_min) normalized_strokes = [] for stroke in strokes: x = ((np.array(stroke.x) - x_min)/x_range).tolist() y = ((np.array(stroke.y) - y_min)/y_range).tolist() normalized_strokes.append(Stroke(x,y)) return normalized_strokes
ec7a44573b2334b69d3d878bd381a0ced1fdd304
4,407
def _get_sa_bracket(myimt, saset): """ For a given SA IMT, look through the input SAs and return a tuple of a) a pair of IMT strings representing the periods bracketing the given period; or c) the single IMT representing the first or last period in the input list if the given period is off the end of the list. Args: myper (float): The period to search for in the input lists. saset (list): A list of SA IMTs. Returns: tuple: One or two strings representing the IMTs closest to or bracketing the input IMT. """ if not len(saset): return () # # Stick the target IMT into a copy of the list of SAs, then sort # the list by period. # ss = saset.copy() ss.append(myimt) tmplist = sorted(ss, key=_get_period_from_imt) nimt = len(tmplist) # # Get the index of the target IMT in the sorted list # myix = tmplist.index(myimt) # # If the target IMT is off the end of the list, return the # appropriate endpoint; else return the pair of IMTs that # bracket the target. # if myix == 0: return (tmplist[1], ) elif myix == nimt - 1: return (tmplist[-2], ) else: return (tmplist[myix - 1], tmplist[myix + 1])
2588fb1a45a008ec81770f69b5f6ee815f1f2511
4,408
def fb83(A, B, eta=1., nu=None): """ Generates the FB8 distribution using the orthogonal vectors A and B where A = gamma1*kappa and B = gamma2*beta (gamma3 is inferred) A may have not have length zero but may be arbitrarily close to zero B may have length zero however. If so, then an arbitrary value for gamma2 (orthogonal to gamma1) is chosen """ kappa = norm(A) beta = norm(B) gamma1 = A / kappa if beta == 0.0: gamma2 = __generate_arbitrary_orthogonal_unit_vector(gamma1) else: gamma2 = B / beta theta, phi, psi = FB8Distribution.gammas_to_spherical_coordinates( gamma1, gamma2) gamma1, gamma2, gamma3 = FB8Distribution.spherical_coordinates_to_gammas( theta, phi, psi) return FB8Distribution(gamma1, gamma2, gamma3, kappa, beta, eta, nu)
1162c99eb3964512d935db8e5ce19bfc6eb5b391
4,409
def replicate(pta, ptac, p0, coefficients=False): """Create a replicated residuals conditioned on the data. Here pta is standard marginalized-likelihood PTA, and ptac is a hierarchical-likelihood version of pta with coefficients=True for all GPs. This function: - calls utils.get_coefficients(pta, p0) to get a realization of the GP coefficients conditioned on the data and on the hyperparameters in p0; - calls ptac.get_delay() to compute the resulting realized GPs at the toas; - adds measurement noise (including ECORR) consistent with the hyperparameters. To use this (pending further development), you need to set combine=False on the pta/ptac GPs, and method='sparse' on the ptac EcorrKernelNoise. Returns a list of replicated residuals, one list element per pulsar.""" # GP delays if not coefficients: p0 = get_coefficients(pta, p0) ds = ptac.get_delay(params=p0) # note: the proper way to cache the Nmat computation is to give # a `sample` method to csc_matrix_alt and ndarray_alt, which # would then save the factorization in the instance nmats = ptac.get_ndiag(params=p0) for d, nmat in zip(ds, nmats): if isinstance(nmat, sps.csc_matrix): # add EFAC/EQUAD/ECORR noise # use xx' = I => (Lx)(Lx)' = LL' with LL' = PNP' # hence N[P[:, np.newaxis], P[np.newaxis, :]] = LL' # see https://scikit-sparse.readthedocs.io/en/latest/cholmod.html ch = cholesky(nmat) d[ch.P()] += ch.L() @ np.random.randn(len(d)) elif isinstance(nmat, np.ndarray): # diagonal case, nmat will be ndarray_alt instance d += np.sqrt(nmat) * np.random.randn(len(d)) else: raise NotImplementedError( "Cannot take Nmat factor; " "you may need to set the EcorrKernelNoise to 'sparse'." ) return ds
addab624fb2314a004a7454ca3a3199539baabf9
4,410
def load_danube() -> pd.DataFrame: """ The danube dataset contains ranks of base flow observations from the Global River Discharge project of the Oak Ridge National Laboratory Distributed Active Archive Center (ORNL DAAC), a NASA data center. The measurements are monthly average flow rate for two stations situated at Scharding (Austria) on the Inn river and at Nagymaros (Hungary) on the Danube. The data have been pre-processed to remove any time trend. Specifically, Bacigal et al. (2011) extracted the raw data, and obtain the fast Fourier transformed centered observations. The negative spectrum is retained and a linear time series model with 12 seasonal components is fitted. Residuals are then extracted and AR model fitted to the series, the selection being done based on the AIC criterion with imposed maximum order of 3 and the number of autoregressive components may differ for each series. This data frame contains the following columns: inn: A numeric vector containing the rank of pre-whitened level observations of the Inn river at Nagyramos. donau: A numeric vector containing the rank of prewhitened level observations of the Donau river at Scharding. """ return _load_file('danube.csv')
f1ae04e37e69acf1fa805953f58c96633136be09
4,411
def get_index(grid_mids, values): """get the index of a value in an array Args: grid_mids: array of grid centers value: array of values Returns: indices """ diff = np.diff(grid_mids) diff = np.concatenate((diff, diff[-1:])) edges = np.concatenate((grid_mids-diff/2, grid_mids[-1:]+diff[-1:]/2)) #print(edges) ind = np.digitize(np.array(values), edges)-1 ind[ind > grid_mids.shape[0]-1] = grid_mids.shape[0]-1 return ind
b8277e84ddaae5c951ad032f3a738d1f9c02feac
4,412
def validate_incoming_edges(graphs, param=None): """ In case a node of a certain type has more then a threshold of incoming edges determine a possible stitches as a bad stitch. """ param = param or {} res = {} i = 0 for candidate in graphs: res[i] = 'ok' for node, values in candidate.nodes(data=True): if values[stitcher.TYPE_ATTR] not in list(param.keys()): continue tmp = param[values[stitcher.TYPE_ATTR]] if len(candidate.in_edges(node)) >= tmp: res[i] = 'node ' + str(node) + ' has to many edges: ' + \ str(len(candidate.in_edges(node))) i += 1 return res
7872ad52c942d986725d7dc1e089ad91850b5c71
4,413
def face_area(bounding_box, correction): """ Increase face area, to square format, face detectors are very close clipping useless when you want to get whole head Arguments: bounding box original, correction value Returns: 4-element list - bounding box for expanded area (ints) """ x_1, y_1, x_2, y_2 = bounding_box x_1 = x_1 + correction x_2 = x_2 + correction x_center = int(x_1 + (x_2 - x_1) / 2) y_center = int(y_1 + (y_2 - y_1) / 2) factor = 2 square_factor = int(max(x_2 - x_1, y_2 - y_1) * factor / 2) x_1p = x_center - square_factor y_1p = y_center - square_factor x_2p = x_1p + square_factor * 2 y_2p = y_1p + square_factor * 2 return [x_1p, y_1p, x_2p, y_2p]
b4c47b01989acb706e9c959cd29902b6045a7fad
4,414
def ut_to_dt(ut): """Converts a universal time in days to a dynamical time in days.""" # As at July 2020, TAI is 37 sec ahead of UTC, TDT is 32.184 seconds ahead of TAI. return ut + 69.184/SEC_IN_DAY
1f9af7758c53d32494013280b401393e5723d358
4,415
from pathlib import Path def _read_group_h5(filename: Path, groupname: str) -> ndarray: """Return group content. Args: filename: path of hdf5 file. groupname: name of group to read. Returns: content of group. """ try: with h5py.File(filename, 'r') as h5f: data = h5f[groupname][()] except OSError as err: # h5py doesn't always include the filename in its error messages err.args += (filename,) raise return data
3febc0c0322ca9d9a7f30b4d9cf94ce32f5d3109
4,416
def _clip_bbox(min_y, min_x, max_y, max_x): """Clip bounding box coordinates between 0 and 1. Args: min_y: Normalized bbox coordinate of type float between 0 and 1. min_x: Normalized bbox coordinate of type float between 0 and 1. max_y: Normalized bbox coordinate of type float between 0 and 1. max_x: Normalized bbox coordinate of type float between 0 and 1. Returns: Clipped coordinate values between 0 and 1. """ min_y = tf.clip_by_value(min_y, 0.0, 1.0) min_x = tf.clip_by_value(min_x, 0.0, 1.0) max_y = tf.clip_by_value(max_y, 0.0, 1.0) max_x = tf.clip_by_value(max_x, 0.0, 1.0) return min_y, min_x, max_y, max_x
bb5b2ed23626e26004ae87bdd7fc03b2d177f38f
4,417
def hot(df, hot_maps, drop_cold=True, ret_hots_only=False, verbose=False): """ df: pd.DataFrame hot_maps: list(dict) hot_map: dict key: str column in df value: one_hot vector for unique row value --- returns dataframe """ if verbose: print(f"hot_df cols: {df.columns}") ret = [] for i, (col_name, hot_map) in enumerate(hot_maps.items()): ret.append(hot_col(df[col_name], hot_map)) if ret_hots_only: return ret ret = pd.concat([df] + ret, axis=1) if drop_cold: ret = ret.drop(list(hot_maps.keys()), axis=1) return ret
b0912ae22aa3ee34acde76e89c5f926c9d309492
4,418
def menu(function_text): """ Decorator for plain-text handler :param function_text: function which set as handle in bot class :return: """ def wrapper(self, bot, update): self.text_menu(bot, update) function_text(self, bot, update) return wrapper
dc68a46aaf402cd5ce3bd832a0f2a661b5cbc71b
4,419
def create_delete_classes(system_id_or_identifier, **kwargs): """Create classes for a classification system. :param system_id_or_identifier: The id or identifier of a classification system """ if request.method == "DELETE": data.delete_classes(system_id_or_identifier) return {'message': f'Classes of {system_id_or_identifier} deleted'}, 204 if request.method == "POST": args = request.get_json() errors = ClassMetadataForm().validate(args) if errors: return abort(400, str(errors)) classes = data.insert_classes(system_id_or_identifier=system_id_or_identifier, classes_files_json=args['classes']) result = ClassesSchema(exclude=['classification_system_id']).dump(classes, many=True) return jsonify(result), 201
4f48ebb7fe80854d255f47fb795e83b54f9f60b3
4,420
def ajax_upload_key(): """Ajax upload a functionary key. Key files are stored to the db in their dictionary representation. """ functionary_key = request.files.get("functionary_key", None) functionary_name = request.form.get("functionary_name", None) if not functionary_name: flash("Something went wrong: We don't know which functionary," " this key belongs to", "alert-danger") return jsonify({"error": True}) if not functionary_key: flash("Something went wrong: No file uploaded", "alert-danger") return jsonify({"error": True}) if functionary_key.filename == "": flash("Something went wrong: No file selected", "alert-danger") return jsonify({"error": True}) try: # We try to load the public key to check the format key = securesystemslib.keys.import_rsakey_from_public_pem( functionary_key.read()) securesystemslib.formats.PUBLIC_KEY_SCHEMA.check_match(key) file_name = functionary_key.filename functionary_db_item = { "functionary_name": functionary_name, "file_name": file_name, "key_dict": key } # Clumsy update or insert for functionary array embedded subdocument # NOTE: Unfortunately we can't "upsert" on arrays but must first try to # update and if that does not work insert. # https://docs.mongodb.com/manual/reference/operator/update/positional/#upsert # https://stackoverflow.com/questions/23470658/mongodb-upsert-sub-document query_result = mongo.db.session_collection.update_one( { "_id": session["id"], "functionaries.items.functionary_name": functionary_name }, { "$set": {"functionaries.items.$": functionary_db_item} }) if not query_result.matched_count: query_result = mongo.db.session_collection.update_one( { "_id": session["id"], # This query part should deal with concurrent requests "functionaries.items.functionary_name": {"$ne": functionary_name} }, { "$push": {"functionaries.items": functionary_db_item} }, upsert=True) flash("Added key '{fn}' for functionary '{functionary}'" .format(fn=file_name, functionary=functionary_name), "alert-success") else: flash("Updated key '{fn}' for functionary ""'{functionary}'" .format(fn=file_name, functionary=functionary_name), "alert-success") # TODO: Throw more rocks at query_result except Exception as e: flash("Could not store uploaded file. Error: {}".format(e), "alert-danger") return jsonify({"error": True}) return jsonify({"error": False})
cd5cca4ff1a3283224362ccacab8c20c450eeaf6
4,421
def add_latents_to_dataset_using_tensors(args, sess, tensors, data): """ Get latent representations from model. Args: args: Arguments from parser in train_grocerystore.py. sess: Tensorflow session. tensors: Tensors used for extracting latent representations. data: Data used during epoch. Returns: Data dictionary filled with latent representations. """ latents = sess.run(tensors['latents'], feed_dict={tensors['x']: data['features']} ) data['latents'] = latents if args.use_private: latents_ux = sess.run(tensors['latents_ux'], feed_dict={tensors['x']: data['features']} ) data['latents_ux'] = latents_ux if args.use_text: all_captions = load_captions(data['captions'], data['labels']) latents_uw = sess.run(tensors['latents_uw'], feed_dict={tensors['captions']: all_captions}) data['latents_uw'] = latents_uw if args.use_iconic: batch_size = args.batch_size n_examples = len(data['iconic_image_paths']) n_batches = int(np.ceil(n_examples/batch_size)) latents_ui = np.zeros([n_examples, args.z_dim]) for i in range(n_batches): start = i * batch_size end = start + batch_size if end > n_examples: end = n_examples iconic_images = load_iconic_images(data['iconic_image_paths'][start:end]) latents_ui[start:end] = sess.run(tensors['latents_ui'], feed_dict={tensors['iconic_images']: iconic_images}) data['latents_ui'] = latents_ui return data
550c7c878b43737b5fcabbb007062774f404b0b3
4,422
def normal_distribution_parameter_estimation(data): """ Notice: Unbiased Estimation Adopted. Line 115. :param data: a list, each element is a real number, the value of some attribute eg: [0.46, 0.376, 0.264, 0.318, 0.215, 0.237, 0.149, 0.211] :return miu: the estimation of miu of the normal distribution based on 'data' eg: 0.27875 :return sigma: the estimation of sigma of the normal distribution based on 'data' eg: 0.10092394590553255 """ miu = np.mean(data) # estimate miu of the normal distribution sigma = 0 # initial sigma data_num = len(data) # the number of data # estimate sigma of the normal distribution for each_data in data: sigma = sigma + (each_data-miu) ** 2 sigma = sigma/(data_num-1) # unbiased estimation adopted!! sigma = sigma ** 0.5 return miu, sigma
ce8ca8010f98fdd2067285fea4779507fe7e958b
4,423
def reverse_complement(seq): """ ARGS: seq : sequence with _only_ A, T, C or G (case sensitive) RETURN: rcSeq : reverse complement of sequenced passed to it. DESCRIPTION: DEBUG: Compared several sequences. Is working. FUTURE: """ rcSeq = "" # Reverse Complement sequence # Complement for char in seq: if(char == 'A' ): rcSeq += 'T' continue if(char == 'T' ): rcSeq += 'A' continue if(char == 'G' ): rcSeq += 'C' continue if(char == 'C' ): rcSeq += 'G' continue if(char == 'N' ): rcSeq += 'N' continue if(char not in "ATCGN"): exit_with_error("ERROR! char %s is not a valid sequencing character!\n"%(char)) # Revese rcSeq = rcSeq[::-1] return rcSeq
a6dee7ccb862e21534fd1736e345438f9df18fac
4,424
def compose(chosung, joongsung, jongsung=u''): """This function returns a Hangul letter by composing the specified chosung, joongsung, and jongsung. @param chosung @param joongsung @param jongsung the terminal Hangul letter. This is optional if you do not need a jongsung.""" if jongsung is None: jongsung = u'' try: chosung_index = CHOSUNGS.index(chosung) joongsung_index = JOONGSUNGS.index(joongsung) jongsung_index = JONGSUNGS.index(jongsung) except Exception as e: raise NotHangulException('No valid Hangul character can be generated using given combination of chosung, joongsung, and jongsung.') return chr(0xAC00 + chosung_index * NUM_JOONGSUNGS * NUM_JONGSUNGS + joongsung_index * NUM_JONGSUNGS + jongsung_index)
047d0cf68a558d795a5bf71b0ebe686a41208af7
4,425
from typing import Dict from typing import List def generate_markdown_metadata(metadata_obj: Dict[str, str]) -> List[str]: """generate_markdown_metadata Add some basic metadata to the top of the file in HTML tags. """ metadata: List[str] = ["<!---"] passed_metadata: List[str] = [ f" {key}: {value}" for key, value in metadata_obj.items() ] metadata.extend(passed_metadata) metadata.append(f" Tags:") metadata.append("--->") metadata.append(f"# Diary for {metadata_obj['Date']}") metadata.append("") return metadata
02ef3952c265276f4e666f060be6cb1d4a150cca
4,426
def fftshift(x:np.ndarray): """平移FFT频谱 FFT默认频谱不是关于零频率对称的,使用fftshift可以对调左右频谱。 :Parameters: - x: 频谱序列 :Returns: 平移后的频谱 """ N = x.size return np.append(x[N//2:], x[:N//2])
beaf2dcd0d5c9ff0b9bd83d326b3d3a9f6471968
4,427
from typing import List from typing import Tuple def get_20newsgroups_data( train_test, categories=None, max_text_len: int = None, min_num_tokens=0, random_state=42, ) -> List[Tuple[str, str]]: """ 'alt.atheism', 'comp.graphics', 'comp.os.ms-windows.misc', 'comp.sys.ibm.pc.hardware', 'comp.sys.mac.hardware', 'comp.windows.x', 'misc.forsale', 'rec.autos', 'rec.motorcycles', 'rec.sport.baseball', 'rec.sport.hockey', 'sci.crypt', 'sci.electronics', 'sci.med', 'sci.space', 'soc.religion.christian', 'talk.politics.guns', 'talk.politics.mideast', 'talk.politics.misc', 'talk.religion.misc' """ data = fetch_20newsgroups( subset=train_test, shuffle=True, remove=("headers", "footers", "quotes"), categories=categories, random_state=random_state, ) target_names = data.target_names def truncate_to_maxlen(text): if max_text_len is not None: return text[0 : min(len(text), max_text_len)] else: return text text_target_tuples = [ (truncate_to_maxlen(d), target_names[target]) for d, target in zip(data.data, data.target) if len(d.split(" ")) > min_num_tokens ] return text_target_tuples
6053f967ac1fb782cab28fe401e384940703e384
4,428
def crossdomain(allowed_origins=None, methods=None, headers=None, max_age=21600, attach_to_all=True, automatic_options=True, credentials=False): """ http://flask.pocoo.org/snippets/56/ """ if methods is not None: methods = ', '.join(sorted(x.upper() for x in methods)) if headers is not None and not isinstance(headers, str): headers = ', '.join(x.upper() for x in headers) if isinstance(allowed_origins, str): # always have allowed_origins as a list of strings. allowed_origins = [allowed_origins] if isinstance(max_age, timedelta): max_age = max_age.total_seconds() def get_methods(): if methods is not None: return methods options_resp = current_app.make_default_options_response() return options_resp.headers['allow'] def decorator(f): def wrapped_function(*args, **kwargs): # Get a hold of the request origin origin = request.environ.get('HTTP_ORIGIN') if automatic_options and request.method == 'OPTIONS': resp = current_app.make_default_options_response() else: resp = make_response(f(*args, **kwargs)) if not attach_to_all and request.method != 'OPTIONS': return resp h = resp.headers # if the origin matches any of our allowed origins set the # access control header appropriately allow_origin = (origin if origin is not None and allowed_origins is not None and origin in allowed_origins else None) h['Access-Control-Allow-Origin'] = allow_origin h['Access-Control-Allow-Methods'] = get_methods() h['Access-Control-Max-Age'] = str(max_age) if credentials: h['Access-Control-Allow-Credentials'] = 'true' if headers is not None: h['Access-Control-Allow-Headers'] = headers return resp f.provide_automatic_options = False return update_wrapper(wrapped_function, f) return decorator
9d352718406f62eaeb184a081b4223b67f6200f3
4,429
from typing import Callable def make_vector_gradient(bcs: Boundaries) -> Callable: """ make a discretized vector gradient operator for a cylindrical grid |Description_cylindrical| Args: bcs (:class:`~pde.grids.boundaries.axes.Boundaries`): |Arg_boundary_conditions| Returns: A function that can be applied to an array of values """ assert isinstance(bcs.grid, CylindricalGrid) bcs.check_value_rank(1) # calculate preliminary quantities gradient_r = make_gradient(bcs.extract_component(0)) gradient_z = make_gradient(bcs.extract_component(1)) gradient_phi = make_gradient(bcs.extract_component(2)) @jit_allocate_out(out_shape=(3, 3) + bcs.grid.shape) def vector_gradient(arr, out=None): """ apply gradient operator to array `arr` """ gradient_r(arr[0], out=out[:, 0]) gradient_z(arr[1], out=out[:, 1]) gradient_phi(arr[2], out=out[:, 2]) return out return vector_gradient
61d4f1a29d4a81e57ad37a6b963e201e5deabc06
4,430
def exec_in_terminal(command): """Run a command in the terminal and get the output stripping the last newline. Args: command: a string or list of strings """ return check_output(command).strip().decode("utf8")
1186649cebbd20559f7de0ba8aa743d70f35c924
4,431
def replace_string(original, start, end, replacement): """Replaces the specified range of |original| with |replacement|""" return original[0:start] + replacement + original[end:]
c71badb26287d340170cecdbae8d913f4bdc14c6
4,432
def edit_mod(): """ Admin endpoint used for sub transfers. """ if not current_user.is_admin(): abort(403) form = EditModForm() try: sub = Sub.get(fn.Lower(Sub.name) == form.sub.data.lower()) except Sub.DoesNotExist: return jsonify(status='error', error=[_("Sub does not exist")]) try: user = User.get(fn.Lower(User.name) == form.user.data.lower()) except User.DoesNotExist: return jsonify(status='error', error=[_("User does not exist")]) if form.validate(): try: sm = SubMod.get((SubMod.sid == sub.sid) & (SubMod.uid == user.uid)) sm.power_level = 0 sm.invite = False sm.save() except SubMod.DoesNotExist: SubMod.create(sid=sub.sid, uid=user.uid, power_level=0) misc.create_sublog(misc.LOG_TYPE_SUB_TRANSFER, current_user.uid, sub.sid, comment=user.name, admin=True) return jsonify(status='ok') return jsonify(status="error", error=get_errors(form))
debab16603e2cfe412eb0f819753fb7571a9c803
4,433
def get_current_info(symbol_list, columns='*'): """Retrieves the latest data (15 minute delay) for the provided symbols.""" columns = ','.join(columns) symbols = __format_symbol_list(symbol_list) yql = ('select %s from %s where symbol in (%s)' % (columns, FINANCE_TABLES['quotes'], symbols)) response = execute_yql_query(yql) return __validate_response(response, 'quote')
a13df0f44b31ac091a5283958cdb1aa675fe9bdc
4,434
def dictionarify_recpat_data(recpat_data): """ Covert a list of flat dictionaries (single-record dicts) into a dictionary. If the given data structure is already a dictionary, it is left unchanged. """ return {track_id[0]: patterns[0] for track_id, patterns in \ [zip(*item.items()) for item in recpat_data]} \ if not isinstance(recpat_data, dict) else recpat_data
d1cdab68ab7445aebe1bbcce2f220c73d6db308f
4,435
def _get_qualified_name(workflow_name, job_name): """Construct a qualified name from workflow name and job name.""" return workflow_name + _NAME_DELIMITER + job_name
29881480a9db33f18ff4b01abcdd1aaf39781f36
4,436
def normalize_each_time_frame(input_array): """ Normalize each time frame - Input: 3D numpy array - Output: 3D numpy array """ for i in range(input_array.shape[0]): max_value = np.amax(input_array[i, :, :]) if max_value != 0: input_array[i, :, :] = input_array[i, :, :] / max_value return input_array
bee7f41f17e4e24a654426f65c6a73c518abafca
4,437
def pre_process_data(full_data): """ pre process data- dump invalid values """ clean_data = full_data[(full_data["Temp"] > -10)] return clean_data
6172d4a77f5805c60ae9e4f146da2bd8283beef0
4,438
def invalid_grant(_): """Handles the Invalid Grant error when doing Oauth """ del current_app.blueprints['google'].token flash(("InvalidGrant Error"), category="danger") return redirect(url_for('index'))
95b8b20d3d96b46387c6dd23ede9b54c6b056da1
4,439
import difflib def diff_text(a, b): """ Performs a diffing algorithm on two pieces of text. Returns a string of HTML containing the content of both texts with <span> tags inserted indicating where the differences are. """ def tokenise(text): """ Tokenises a string by spliting it into individual characters and grouping the alphanumeric ones together. This means that punctuation, whitespace, CJK characters, etc become separate tokens and words/numbers are merged together to form bigger tokens. This makes the output of the diff easier to read as words are not broken up. """ tokens = [] current_token = "" for c in text: if c.isalnum(): current_token += c else: if current_token: tokens.append(current_token) current_token = "" tokens.append(c) if current_token: tokens.append(current_token) return tokens a_tok = tokenise(a) b_tok = tokenise(b) sm = difflib.SequenceMatcher(lambda t: len(t) <= 4, a_tok, b_tok) changes = [] for op, i1, i2, j1, j2 in sm.get_opcodes(): if op == 'replace': for token in a_tok[i1:i2]: changes.append(('deletion', token)) for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'delete': for token in a_tok[i1:i2]: changes.append(('deletion', token)) elif op == 'insert': for token in b_tok[j1:j2]: changes.append(('addition', token)) elif op == 'equal': for token in a_tok[i1:i2]: changes.append(('equal', token)) # Merge ajacent changes which have the same type. This just cleans up the HTML a bit merged_changes = [] current_value = [] current_change_type = None for change_type, value in changes: if change_type != current_change_type: if current_change_type is not None: merged_changes.append((current_change_type, ''.join(current_value))) current_value = [] current_change_type = change_type current_value.append(value) if current_value: merged_changes.append((current_change_type, ''.join(current_value))) return TextDiff(merged_changes)
e15348e942ac3e6936872ec61f123a9241f49eba
4,440
from common.aes import encrypt from common.datatypes import PasswordResetToken from config import AUTH_TOKEN, RESET_PASSWORD_EXPIRE_SECONDS from time import time from urllib.parse import quote_plus from utils import send_mail import traceback from operator import or_ def require_reset_password(): """ 请求重设密码 参数: { "identifier":"用户识别符" } 返回: { "code":0,//非0表示调用成功 "message":"qwq"//code非0的时候表示错误信息 } """ if config.USE_PHONE_WHEN_REGISTER_AND_RESETPASSWD: return make_response(-1, message="当前不使用邮箱验证密码") if db.session.query(User).filter(User.email == request.form["identifier"]).count() > 1: return make_response(-1, message="此邮箱对应多个用户,请使用用户名进行操作") query = db.session.query(User).filter(or_( User.email == request.form["identifier"], User.username == request.form["identifier"])) if query.count() == 0: return make_response(-1, message="用户名或邮箱错误") user: User = query.one() raw_json = PasswordResetToken( user.id, int(time())+RESET_PASSWORD_EXPIRE_SECONDS, AUTH_TOKEN).as_json() # print(raw_json) to_send_token = encrypt(config.AUTH_PASSWORD, raw_json) # print("raw token", to_send_token) to_send_token = quote_plus(quote_plus(to_send_token)) # print(to_send_token) # user.reset_token = str(uuid.uuid1()) try: send_mail(config.RESET_PASSWORD_EMAIL.format( reset_token=to_send_token), "重置密码", user.email) except Exception as ex: return make_response(-1, message=traceback.format_exc()) return make_response(0, message="重置密码的邮件已经发送到您邮箱的垃圾箱,请注意查收")
aa1c14755485fe3ac5fc294e43fa1d4e610e0a83
4,441
def coerce_affine(affine, *, ndim, name=None): """Coerce a user input into an affine transform object. If the input is already an affine transform object, that same object is returned with a name change if the given name is not None. If the input is None, an identity affine transform object of the given dimensionality is returned. Parameters ---------- affine : array-like or napari.utils.transforms.Affine An existing affine transform object or an array-like that is its transform matrix. ndim : int The desired dimensionality of the transform. Ignored is affine is an Affine transform object. name : str The desired name of the transform. Returns ------- napari.utils.transforms.Affine The input coerced into an affine transform object. """ if affine is None: affine = Affine(affine_matrix=np.eye(ndim + 1), ndim=ndim) elif isinstance(affine, np.ndarray): affine = Affine(affine_matrix=affine, ndim=ndim) elif isinstance(affine, list): affine = Affine(affine_matrix=np.array(affine), ndim=ndim) elif not isinstance(affine, Affine): raise TypeError( trans._( 'affine input not recognized. must be either napari.utils.transforms.Affine or ndarray. Got {dtype}', deferred=True, dtype=type(affine), ) ) if name is not None: affine.name = name return affine
66900e32b83100004d2ea62a742fc0afe8a26cbb
4,442
def provider_filtered_machines(request, provider_uuid, identity_uuid, request_user=None): """ Return all filtered machines. Uses the most common, default filtering method. """ identity = Identity.objects.filter(uuid=identity_uuid) if not identity: raise ObjectDoesNotExist() try: esh_driver = prepare_driver(request, provider_uuid, identity_uuid) except Exception: # TODO: Observe the change of 'Fail loudly' here # and clean up the noise, rather than hide it. logger.exception( "Driver could not be prepared - Provider: %s , Identity: %s" % (provider_uuid, identity_uuid)) esh_driver = None if not esh_driver: raise LibcloudInvalidCredsError() logger.debug(esh_driver) return list_filtered_machines(esh_driver, provider_uuid, request_user)
f6251e9b72649f37489c5a9d97d7b3835d400cbe
4,443
def known_peaks(): """Return a list of Peak instances with data (identified).""" peak1 = Peak( name="Test1Known", r_time=5.00, mz=867.1391, charge="+", inchi_key="IRPOHFRNKHKIQA-UHFFFAOYSA-N", ) peak2 = Peak( name="Test2Known", r_time=8.00, mz=260.0297, charge="-", inchi_key="HXXFSFRBOHSIMQ-FPRJBGLDSA-N", ) return [peak1, peak2]
3f7d5eb5b16d61f09c0c10f32e9d8d40324e2d5d
4,444
def explode_sheet_music(sheet_music): """ Splits unformatted sheet music into formated lines of LINE_LEN_LIM and such and returns a list of such lines """ split_music = sheet_music.split(',') split_music = list(map(lambda note: note+',', split_music)) split_list = [] counter = 0 line_counter = 1 for note in split_music: if line_counter > LINES_LIMIT-1: break if counter+len(note) > LINE_LENGTH_LIM-2: split_list[-1] = split_list[-1].rstrip(',') split_list[-1] += END_OF_LINE_CHAR counter = 0 line_counter += 1 split_list.append(note) counter += len(note) return split_list
f89ae58a0deb315c61419bd381cd0bf84f079c3e
4,445
def norm_coefficient(m, n): """ Calculate the normalization coefficient for the (m, n) Zernike mode. Parameters ---------- m : int m-th azimuthal Zernike index n : int n-th radial Zernike index Returns ------- norm_coeff : float Noll normalization coefficient """ norm_coeff = np.sqrt(2 * (n + 1)/(1 + (m == 0))) return norm_coeff
1632ffac5e771e4ab16b3f7918d9543ffd67171e
4,446
def get_waveglow(ckpt_url): """ Init WaveGlow vocoder model with weights. Used to generate realistic audio from mel-spectrogram. """ wn_config = { 'n_layers': hp.wg_n_layers, 'n_channels': hp.wg_n_channels, 'kernel_size': hp.wg_kernel_size } audio_config = { 'wav_value': hp.wg_wav_value, 'sampling_rate': hp.wg_sampling_rate } model = WaveGlow( n_mel_channels=hp.wg_n_mel_channels, n_flows=hp.wg_n_flows, n_group=hp.wg_n_group, n_early_every=hp.wg_n_early_every, n_early_size=hp.wg_n_early_size, wn_config=wn_config ) load_checkpoint(ckpt_url, model) model.set_train(False) return model, audio_config
a5b494299fae98be2bb5f764ed7a53fc42d36eff
4,447
def user_exists(keystone, user): """" Return True if user already exists""" return user in [x.name for x in keystone.users.list()]
17d99e12c0fc128607a815f0b4ab9897c5d45578
4,448
from typing import List from typing import Dict import itertools def gen_cartesian_product(*args: List[Dict]) -> List[Dict]: """ generate cartesian product for lists 生成笛卡尔积,估计是参数化用的 Args: args (list of list): lists to be generated with cartesian product Returns: list: cartesian product in list Examples: >>> arg1 = [{"a": 1}, {"a": 2}] >>> arg2 = [{"x": 111, "y": 112}, {"x": 121, "y": 122}] >>> args = [arg1, arg2] >>> gen_cartesian_product(*args) >>> # same as below >>> gen_cartesian_product(arg1, arg2) [ {'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122} ] """ if not args: return [] elif len(args) == 1: return args[0] """ 经过以上判断,只有args≥2时 """ product_list = [] # itertools.product(*args) 笛卡尔积,相当于嵌套的for循环 for product_item_tuple in itertools.product(*args): """ ({'a': 1}, {'x': 111, 'y': 112}) ({'a': 1}, {'x': 121, 'y': 122}) ({'a': 2}, {'x': 111, 'y': 112}) ({'a': 2}, {'x': 121, 'y': 122}) """ product_item_dict = {} for item in product_item_tuple: """ 1 :{'a': 1} 1 :{'x': 111, 'y': 112} 2 :{'a': 1} 2 :{'x': 121, 'y': 122} 3 :{'a': 2} 3 :{'x': 111, 'y': 112} 4 :{'a': 2} 4 :{'x': 121, 'y': 122} """ product_item_dict.update(item) product_list.append(product_item_dict) # [{'a': 1, 'x': 111, 'y': 112}, {'a': 1, 'x': 121, 'y': 122}, {'a': 2, 'x': 111, 'y': 112}, {'a': 2, 'x': 121, 'y': 122}] return product_list
cbe85f440f399b523aa70bc10733ea175dc93f7a
4,449
def get_234_df(x): """ This function get the dataframe for model2.1,2.2,2.3 input: x, the col we want output: the dataframe only for x """ styles = pd.read_csv("styles.csv", error_bad_lines=False) styles = styles.drop(["productDisplayName"],axis = 1) styles = styles.drop(["year"],axis = 1) styles = styles[(styles.masterCategory=='Apparel')| (styles.masterCategory=='Footwear')] styles = styles.drop(styles[styles["subCategory"] == "Innerwear"].index) styles = styles.dropna() styles = df_drop(styles,"subCategory", ["Apparel Set", "Dress","Loungewear and Nightwear","Saree","Socks"]) styles["subCategory"] = styles["subCategory"].transform(lambda x: "Footwear" if(x in ["Shoes","Flip Flops","Sandal"]) else x) styles = styles.drop(labels=[6695,16194,32309,36381,40000], axis=0) styles = styles[styles.subCategory == x] group_color(styles) styles.baseColour=styles.colorgroup return styles
c9d456ae058492e5e242bbde2288885158681f98
4,450
def appropriate_bond_orders(params, smrts_mol, smrts): """Checks if a SMARTS substring specification has appropriate bond orders given the user-specified mode. :param params: A dictionary of the user parameters and filters. :type params: dict :param smrts_mol: RDKit mol object of the SMARTS string. :type smrts_mol: RDKit mol object. :param smrts: The SMARTS string. :type smrts: str :return: 'True' if it validates, 'False' otherwise. :rtype: bool """ # Test if double bonds are inappropriately specified. if params["mode"] == "NONE" and ( ".pdb" in params["ligand_exts"] or ".pdbqt" in params["ligand_exts"] ): bond_orders = [b.GetBondTypeAsDouble() for b in smrts_mol.GetBonds()] bond_orders = [o for o in bond_orders if o != 1.0] if len(bond_orders) > 0: # So it has bonds with orders greater than 1 output.error( "When processing PDB- and PDBQT-formatted ligands in NONE " + "mode, LigGrep ignores bond orders and simply " + "assumes that all appropriately juxtaposed atoms are " + "connected by single bonds. But one (or more) of your " + "filters describes a substructure with bonds of higher " + "orders: " + smrts, params, ) return False return True
045abda277716812694cc1093256742e1d67a016
4,451
def train(model, train_path, val_path, steps_per_epoch, batch_size, records_path): """ Train the Keras graph model Parameters: model (keras Model): The Model defined in build_model train_path (str): Path to training data val_path (str): Path to validation data steps_per_epoch (int): Len(training_data)/batch_size batch_size (int): Size of mini-batches used during training records_path (str): Path + prefix to output directory Returns: loss (ndarray): An array with the validation loss at each epoch """ adam = Adam(lr=0.001) model.compile(loss='binary_crossentropy', optimizer=adam) train_generator = data_generator(train_path, batch_size, seqlen=500) val_generator = data_generator(val_path, 200000, seqlen=500) validation_data = next(val_generator) precision_recall_history = PrecisionRecall(validation_data) # adding check-pointing checkpointer = ModelCheckpoint(records_path + 'model_epoch{epoch}.hdf5', verbose=1, save_best_only=False) # defining parameters for early stopping # earlystop = EarlyStopping(monitor='val_loss', mode='min', verbose=1, # patience=5) # training the model.. hist = model.fit_generator(epochs=15, steps_per_epoch=steps_per_epoch, generator=train_generator, validation_data=validation_data, callbacks=[precision_recall_history, checkpointer]) loss, val_pr = save_metrics(hist, precision_recall_history, records_path=records_path) return loss, val_pr
24a8080a8b4738f7eb32846729b006ca2237a576
4,452
def Mcnu_to_m1m2(Mc, nu): """Convert chirp mass, symmetric mass ratio pair to m1, m2""" q = nu_to_q(nu) M = Mcq_to_M(Mc, q) return Mq_to_m1m2(M, q)
8b4eb6e49549607bda0ea9a17baec8c4d0b38cb6
4,453
import functools def _AccumulateActions(args): """Given program arguments, determines what actions we want to run. Returns [(ResultsReportCtor, str)], where ResultsReportCtor can construct a ResultsReport, and the str is the file extension for the given report. """ results = [] # The order of these is arbitrary. if args.json: results.append((JSONResultsReport, 'json')) if args.text: results.append((TextResultsReport, 'txt')) if args.email: email_ctor = functools.partial(TextResultsReport, email=True) results.append((email_ctor, 'email')) # We emit HTML if nothing else was specified. if args.html or not results: results.append((HTMLResultsReport, 'html')) return results
73925fe55e6986e1222a5e88f804caaa9793044a
4,454
def build_predictions_dictionary(data, class_label_map): """Builds a predictions dictionary from predictions data in CSV file. Args: data: Pandas DataFrame with the predictions data for a single image. class_label_map: Class labelmap from string label name to an integer. Returns: Dictionary with keys suitable for passing to OpenImagesDetectionChallengeEvaluator.add_single_detected_image_info: standard_fields.DetectionResultFields.detection_boxes: float32 numpy array of shape [num_boxes, 4] containing `num_boxes` detection boxes of the format [ymin, xmin, ymax, xmax] in absolute image coordinates. standard_fields.DetectionResultFields.detection_scores: float32 numpy array of shape [num_boxes] containing detection scores for the boxes. standard_fields.DetectionResultFields.detection_classes: integer numpy array of shape [num_boxes] containing 1-indexed detection classes for the boxes. """ dictionary = { standard_fields.DetectionResultFields.detection_classes: data['LabelName'].map(lambda x: class_label_map[x]).to_numpy(), standard_fields.DetectionResultFields.detection_scores: data['Score'].to_numpy().astype(float) } if 'Mask' in data: segments, boxes = _decode_raw_data_into_masks_and_boxes( data['Mask'], data['ImageWidth'], data['ImageHeight']) dictionary[standard_fields.DetectionResultFields.detection_masks] = segments dictionary[standard_fields.DetectionResultFields.detection_boxes] = boxes else: dictionary[standard_fields.DetectionResultFields.detection_boxes] = data[[ 'YMin', 'XMin', 'YMax', 'XMax' ]].to_numpy().astype(float) return dictionary
738e1c9c4568bc689ecda9765c3412d36f2d73ec
4,455
def create_file_link(link_id, file_id, parent_share_id, parent_datastore_id): """ DB wrapper to create a link between a file and a datastore or a share Takes care of "degenerated" tree structures (e.g a child has two parents) In addition checks if the link already exists, as this is a crucial part of the access rights system :param link_id: :param file_id: :param parent_share_id: :param parent_datastore_id: :return: """ try: File_Link.objects.create( link_id = link_id, file_id = file_id, parent_datastore_id = parent_datastore_id, parent_share_id = parent_share_id ) except: return False return True
0c4abe1d5aa4bce8bd489f8bec1ae900a9194631
4,456
def deptree(lines): """Build a tree of what step depends on what other step(s). Test input becomes {'A': set(['C']), 'C': set([]), 'B': set(['A']), 'E': set(['B', 'D', 'F']), 'D': set(['A']), 'F': set(['C'])} A depends on C B depends on A C depends on nothing (starting point) D depends on A E depends on B, D, F F depends on C """ coll = defaultdict(set) for line in lines: parts = line.split() coll[parts[7]].add(parts[1]) if parts[1] not in coll: coll[parts[1]] = set() return dict(coll)
9a435a0e78dd3a68c97df4bcd2e03583841de216
4,457
from datetime import datetime def get_datetime(time_str, model="0"): """ 时间格式化 '20200120.110227'转为'2020-01-20 11:02:27' 返回一个datetime格式 """ if model == "0": time_str = get_time(time_str) time = datetime.datetime.strptime(time_str, "%Y-%m-%d %H:%M:%S") return time
568c46efab9366f64fc0e286cf9876cd48e7a9bb
4,458
def gather_parent_cnvs(vcf, fa, mo): """ Create BEDTools corresponding to parent CNVs for converage-based inheritance """ cnv_format = '{0}\t{1}\t{2}\t{3}\t{4}\n' fa_cnvs = '' mo_cnvs = '' for record in vcf: # Do not include variants from sex chromosomes if record.chrom in sex_chroms: continue # Process biallelic CNVs if record.info['SVTYPE'] in 'DEL DUP'.split() \ and 'MULTIALLELIC' not in record.filter: # Father fa_ac = get_AC(get_GT(record, fa)) if fa_ac != 'NA': if int(fa_ac) > 0: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), record.info['SVTYPE'], fa_ac) fa_cnvs = fa_cnvs + new_cnv # Mother mo_ac = get_AC(get_GT(record, mo)) if mo_ac != 'NA': if int(mo_ac) > 0: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), record.info['SVTYPE'], mo_ac) mo_cnvs = mo_cnvs + new_cnv # Process multiallelic CNVs if record.info['SVTYPE'] == 'MCNV' and 'MULTIALLELIC' in record.filter: # Father fa_ac = get_GT(record, fa).split('/')[1] if fa_ac != 'None': fa_ac = int(fa_ac) if fa_ac < 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DEL', str(2 - fa_ac)) fa_cnvs = fa_cnvs + new_cnv elif fa_ac > 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DUP', str(fa_ac - 2)) fa_cnvs = fa_cnvs + new_cnv # Mother mo_ac = get_GT(record, mo).split('/')[1] if mo_ac != 'None': mo_ac = int(mo_ac) if mo_ac < 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DEL', str(2 - mo_ac)) mo_cnvs = mo_cnvs + new_cnv elif mo_ac > 2: new_cnv = cnv_format.format(record.chrom, str(record.pos), str(record.stop), 'DUP', str(mo_ac - 2)) mo_cnvs = mo_cnvs + new_cnv fa_cnvs = pbt.BedTool(fa_cnvs, from_string=True) mo_cnvs = pbt.BedTool(mo_cnvs, from_string=True) return fa_cnvs, mo_cnvs
2d685586a917bbd94f87758221b4d06c2d6ad7c1
4,459
import json def create_images(): """ Create new images Internal Parameters: image (FileStorage): Image Returns: success (boolean) image (list) """ # vars image_file = request.files.get('image') validate_image_data({"image": image_file}) image_url_set = create_img_set(image_file) # create image image = Image(**{ "user_id": auth_user_id(), "url": json.dumps(image_url_set) }) try: image.insert() # return the result return jsonify({ 'success': True, 'image': image.format() }) except Exception as e: abort(400)
682bb2f6044265fbd6c29c4ff6581d6bc2469edb
4,460
from typing import Union from pathlib import Path from typing import Optional def docx2python( docx_filename: Union[str, Path], image_folder: Optional[str] = None, html: bool = False, paragraph_styles: bool = False, extract_image: bool = None, ) -> DocxContent: """ Unzip a docx file and extract contents. :param docx_filename: path to a docx file :param image_folder: optionally specify an image folder (images in docx will be copied to this folder) :param html: bool, extract some formatting as html :param paragraph_styles: prepend the paragraphs style (if any, else "") to each paragraph. This will only be useful with ``*_runs`` attributes. :param extract_image: bool, extract images from document (default True) :return: DocxContent object """ if extract_image is not None: warn( "'extract_image' is no longer a valid argument for docx2python. If an " "image_folder is given as an argument to docx2python, images will be " "written to that folder. A folder can be provided later with " "``docx2python(filename).write_images(image_folder)``. Images files are " "available as before with ``docx2text(filename).images`` attribute." ) docx_context = DocxReader(docx_filename, html, paragraph_styles) docx_content = DocxContent(docx_context, locals()) if image_folder: _ = docx_content.images return docx_content
557ba8502b62ffc771a7d3b6f88a8b769dd55d68
4,461
def parcel_analysis(con_imgs, parcel_img, msk_img=None, vcon_imgs=None, design_matrix=None, cvect=None, fwhm=8, smooth_method='default', res_path=None): """ Helper function for Bayesian parcel-based analysis. Given a sequence of independent images registered to a common space (for instance, a set of contrast images from a first-level fMRI analysis), perform a second-level analysis assuming constant effects throughout parcels defined from a given label image in reference space. Specifically, a model of the following form is assumed: Y = X * beta + variability, where Y denotes the input image sequence, X is a design matrix, and beta are parcel-wise parameter vectors. The algorithm computes the Bayesian posterior probability of cvect'*beta, where cvect is a given contrast vector, in each parcel using an expectation propagation scheme. Parameters ---------- con_imgs: sequence of nipy-like images Images input to the group analysis. parcel_img: nipy-like image Label image where each label codes for a parcel. msk_img: nipy-like image, optional Binary mask to restrict analysis. By default, analysis is carried out on all parcels with nonzero value. vcon_imgs: sequece of nipy-like images, optional First-level variance estimates corresponding to `con_imgs`. This is useful if the input images are "noisy". By default, first-level variances are assumed to be zero. design_matrix: array, optional If None, a one-sample analysis model is used. Otherwise, an array with shape (n, p) where `n` matches the number of input scans, and `p` is the number of regressors. cvect: array, optional Contrast vector of interest. The method makes an inference on the contrast defined as the dot product cvect'*beta, where beta are the unknown parcel-wise effects. If None, `cvect` is assumed to be np.array((1,)). However, the `cvect` argument is mandatory if `design_matrix` is provided. fwhm: float, optional A parameter that represents the localization uncertainty in reference space in terms of the full width at half maximum of an isotropic Gaussian kernel. smooth_method: str, optional One of 'default' and 'spm'. Setting `smooth_method=spm` results in simply smoothing the input images using a Gaussian kernel, while the default method involves more complex smoothing in order to propagate spatial uncertainty into the inference process. res_path: str, optional An existing path to write output images. If None, no output is written. Returns ------- pmap_mu_img: nipy image Image of posterior contrast means for each parcel. pmap_prob_img: nipy image Corresponding image of posterior probabilities of positive contrast. """ p = ParcelAnalysis(con_imgs, parcel_img, parcel_info=None, msk_img=msk_img, vcon_imgs=vcon_imgs, design_matrix=design_matrix, cvect=cvect, fwhm=fwhm, smooth_method=smooth_method, res_path=res_path) return p.parcel_maps()
8abface7ad72f5ca2679dc9a8ea6cedd93f681a5
4,462
def memoize(fn): """Simple memoization decorator for functions and methods, assumes that all arguments to the function can be hashed and compared. """ memoized_values = {} @wraps(fn) def wrapped_fn(*args, **kwargs): key = (args, tuple(sorted(kwargs.items()))) try: return memoized_values[key] except KeyError: memoized_values[key] = fn(*args, **kwargs) return memoized_values[key] return wrapped_fn
2a48fad065e04a7eed9b9865adc0640f2a7cff9f
4,463
from pydantic import BaseModel # noqa: E0611 import torch def validate( args: Namespace, model: BaseModel ) -> pd.DataFrame: """Perform the validation. Parameters ---------- args : Namespace Arguments to configure the model and the validation. model : BaseModel The model to be used for validation. Returns ------- pd.DataFrame A DataFrame with the metric results. See Also -------- ptlflow.models.base_model.base_model.BaseModel : The parent class of the available models. """ model.eval() if torch.cuda.is_available(): model = model.cuda() dataloaders = model.val_dataloader() dataloaders = {model.val_dataloader_names[i]: dataloaders[i] for i in range(len(dataloaders))} metrics_df = pd.DataFrame() metrics_df['model'] = [args.model] metrics_df['checkpoint'] = [args.pretrained_ckpt] for dataset_name, dl in dataloaders.items(): metrics_mean = validate_one_dataloader(args, model, dl, dataset_name) metrics_df[[f'{dataset_name}-{k}' for k in metrics_mean.keys()]] = list(metrics_mean.values()) args.output_path.mkdir(parents=True, exist_ok=True) metrics_df.T.to_csv(args.output_path / 'metrics.csv', header=False) metrics_df = metrics_df.round(3) return metrics_df
456ec24e1639970db285e260028e9ba3bd4d2e31
4,464
def stats(last_day=None, timeframe=None, dates_sources=None): """See :class:`bgpranking.api.get_stats`""" query = {'method': 'stats'} query.update({'last_day': last_day, 'timeframe': timeframe, 'dates_sources': dates_sources}) return __prepare_request(query)
5da42848926372fa5fe90338529ab47396203fd8
4,465
def restore_purchases() -> None: """restore_purchases() -> None (internal) """ return None
7f047cdfe892bd724c2203d846762c3b3786d7c2
4,466
import time def sim_mat(fc7_feats): """ Given a matrix of features, generate the similarity matrix S and sparsify it. :param fc7_feats: the fc7 features :return: matrix_S - the sparsified matrix S """ print("Something") t = time.time() pdist_ = spatial.distance.pdist(fc7_feats) print('Created distance matrix' + ' ' + str(time.time() - t) + ' sec') t = time.time() dist_mat = spatial.distance.squareform(pdist_) print('Created square distance matrix' + ' ' + str(time.time() - t) + ' sec') del pdist_ t = time.time() sigmas = np.sort(dist_mat, axis=1)[:, 7] + 1e-16 matrice_prodotti_sigma = np.dot(sigmas[:, np.newaxis], sigmas[np.newaxis, :]) print('Generated Sigmas' + ' ' + str(time.time() - t) + ' sec') t = time.time() dist_mat /= -matrice_prodotti_sigma print('Computed dists/-sigmas' + ' ' + str(time.time() - t) + ' sec') del matrice_prodotti_sigma t = time.time() W = np.exp(dist_mat, dist_mat) # W = np.exp(-(dist_mat / matrice_prodotti_sigma)) np.fill_diagonal(W, 0.) # sparsify the matrix k = int(np.floor(np.log2(fc7_feats.shape[0])) + 1) n = W.shape[0] print('Created inplace similarity matrix' + ' ' + str(time.time() - t) + ' sec') t = time.time() for x in W: x[np.argpartition(x, n - k)[:(n - k)]] = 0.0 print('Sparsify the matrix' + ' ' + str(time.time() - t) + ' sec') t = time.time() # matrix_S = np.zeros((n, n)) m1 = W[np.triu_indices(n, k=1)] m2 = W.T[np.triu_indices(n, k=1)] W = spatial.distance.squareform(np.maximum(m1, m2)) print('Symmetrized the similarity matrix' + ' ' + str(time.time() - t) + ' sec') return W
6b896a4912f4a9a8fc765674ad47e17aea73bfa0
4,467
def text_split(in_text, insert_points, char_set): """ Returns: Input Text Split into Text and Nonce Strings. """ nonce_key = [] encrypted_nonce = "" in_list = list(in_text) for pos in range(3967): if insert_points[pos] >= len(in_list) - 1: point = len(in_list) - 2 else: point = insert_points[pos] char = in_list[point] in_list.pop(point) nonce_key.append(char) if char is not char_set[-1]: break length = ((len(nonce_key) - 1) * (len(char_set) -2)) + char_set.index(nonce_key[len(nonce_key) - 1]) for pos in range(length): if insert_points[pos + len(nonce_key)] >= len(in_list) - 1: point = len(in_list) - 2 else: point = insert_points[pos + len(nonce_key)] char = in_list[point] in_list.pop(point) encrypted_nonce = encrypted_nonce + char return "".join(in_list), encrypted_nonce
15f496513e63236b0df7e2d8a8949a8b2e632af4
4,468
import decimal def f_approximation(g_matrix, coeficients_array): """ Retorna um vetor para o valor aproximado de f, dados os coeficientes ak. """ decimal.getcontext().prec = PRECSION decimal.getcontext().rounding = ROUNDING_MODE num_of_xs = len(g_matrix[0]) num_of_coeficients = len(g_matrix) f_approx_array = np.full(num_of_xs, decimal.Decimal('0')) for i in range(0, num_of_xs): approx_sum = 0 for k in range(0, num_of_coeficients): approx_sum += coeficients_array[k] * g_matrix[k][i] f_approx_array[i] = approx_sum return f_approx_array
f5f8ce78b07e877c521a6374548e21273c61dcee
4,469
def _module_exists(module_name): """ Checks if a module exists. :param str module_name: module to check existance of :returns: **True** if module exists and **False** otherwise """ try: __import__(module_name) return True except ImportError: return False
8f3ed2e97ee6dbb41d6e84e9e5595ec8b6f9b339
4,470
def users(request): """Show a list of users and their puzzles.""" context = {'user_list': []} for user in User.objects.all().order_by('username'): objs = Puzzle.objects.filter(user=user, pub_date__lte=timezone.now()).order_by('-number') if objs: puzzle_list = [] for puz in objs: puzzle_list.append({'number': puz.number, 'date': get_date_string(puz)}) context['user_list'].append({'name': user.username, 'puzzles': puzzle_list}) return render(request, 'puzzle/users.html', context)
abf953394af6baff08bedf252796eb0d89cba3f4
4,471
from xidplus.stan_fit import get_stancode def MIPS_SPIRE_gen(phot_priors,sed_prior_model,chains=4,seed=5363,iter=1000,max_treedepth=10,adapt_delta=0.8): """ Fit the three SPIRE bands :param priors: list of xidplus.prior class objects. Order (MIPS,PACS100,PACS160,SPIRE250,SPIRE350,SPIRE500) :param sed_prior: xidplus.sed.sed_prior class :param chains: number of chains :param iter: number of iterations :return: pystan fit object """ prior24=phot_priors[0] prior250=phot_priors[1] prior350=phot_priors[2] prior500=phot_priors[3] #input data into a dictionary XID_data = { 'nsrc': prior250.nsrc, 'bkg_prior': [prior24.bkg[0],prior250.bkg[0], prior350.bkg[0], prior500.bkg[0]], 'bkg_prior_sig': [prior24.bkg[1],prior250.bkg[1], prior350.bkg[1], prior500.bkg[1]], 'conf_prior_sig': [0.0001, 0.1, 0.1, 0.1], 'z_median': prior24.z_median, 'z_sig': prior24.z_sig, 'npix_psw': prior250.snpix, 'nnz_psw': prior250.amat_data.size, 'db_psw': prior250.sim, 'sigma_psw': prior250.snim, 'Val_psw': prior250.amat_data, 'Row_psw': prior250.amat_row.astype(np.long), 'Col_psw': prior250.amat_col.astype(np.long), 'npix_pmw': prior350.snpix, 'nnz_pmw': prior350.amat_data.size, 'db_pmw': prior350.sim, 'sigma_pmw': prior350.snim, 'Val_pmw': prior350.amat_data, 'Row_pmw': prior350.amat_row.astype(np.long), 'Col_pmw': prior350.amat_col.astype(np.long), 'npix_plw': prior500.snpix, 'nnz_plw': prior500.amat_data.size, 'db_plw': prior500.sim, 'sigma_plw': prior500.snim, 'Val_plw': prior500.amat_data, 'Row_plw': prior500.amat_row.astype(np.long), 'Col_plw': prior500.amat_col.astype(np.long), 'npix_mips24': prior24.snpix, 'nnz_mips24': prior24.amat_data.size, 'db_mips24': prior24.sim, 'sigma_mips24': prior24.snim, 'Val_mips24': prior24.amat_data, 'Row_mips24': prior24.amat_row.astype(np.long), 'Col_mips24': prior24.amat_col.astype(np.long), 'nTemp': sed_prior_model.shape[0], 'nz': sed_prior_model.shape[2], 'nband': sed_prior_model.shape[1], 'SEDs': sed_prior_model, } #see if model has already been compiled. If not, compile and save it model_file='/XID+MIPS_SPIRE_SED_gen' sm = get_stancode(model_file) fit = sm.sampling(data=XID_data,iter=iter,chains=chains,seed=seed,verbose=True,control=dict(max_treedepth=max_treedepth,adapt_delta=adapt_delta)) #return fit data return fit
be3d168e6a7a5a8159e83371059cfcbd1f0c187e
4,472
def check_cstr(solver, indiv): """Check the number of constraints violations of the individual Parameters ---------- solver : Solver Global optimization problem solver indiv : individual Individual of the population Returns ------- is_feasible : bool Individual feasibility """ # Non valid simulation violate every constraints if indiv.is_simu_valid == False: indiv.cstr_viol = len(solver.problem.constraint) return True # To not add errors to infeasible # Browse constraints for constraint in solver.problem.constraint: # Compute value to compare var_val = constraint.get_variable(indiv.output) # Compare the value with the constraint type_const = constraint.type_const if type_const == "<=": if var_val > constraint.value: indiv.cstr_viol += 1 elif type_const in ["==", "="]: if var_val != constraint.value: indiv.cstr_viol += 1 elif type_const == ">=": if var_val < constraint.value: indiv.cstr_viol += 1 elif type_const == "<": if var_val >= constraint.value: indiv.cstr_viol += 1 elif type_const == ">": if var_val <= constraint.value: indiv.cstr_viol += 1 else: raise ValueError("Wrong type of constraint") return indiv.cstr_viol == 0
2aa52d2badfb45d8e289f8314700648ddc621252
4,473
def _FixFsSelectionBit(key, expected): """Write a repair script to fix a bad fsSelection bit. Args: key: The name of an fsSelection flag, eg 'ITALIC' or 'BOLD'. expected: Expected value, true/false, of the flag. Returns: A python script to fix the problem. """ if not _ShouldFix('fsSelection'): return None op = '|=' verb = 'set' mask = bin(fonts.FsSelectionMask(key)) if not expected: op = '&=' verb = 'unset' mask = '~' + mask return 'ttf[\'OS/2\'].fsSelection %s %s # %s %s' % (op, mask, verb, key)
6dda9ccbb565857c4187afc4dada6dd84653b427
4,474
def dilation_dist(path_dilation, n_dilate=None): """ Compute surface of distances with dilation :param path_dilation: binary array with zeros everywhere except for paths :param dilate: How often to do dilation --> defines radious of corridor :returns: 2dim array of same shape as path_dilation, with values 0 = infinite distance from path n_dilation = path location """ saved_arrs = [path_dilation] if n_dilate is None: # compute number of iterations: maximum distance of pixel to line x_coords, y_coords = np.where(path_dilation) x_len, y_len = path_dilation.shape # dilate as much as the largest distance from the sides n_dilate = max( [ np.min(x_coords), x_len - np.max(x_coords), np.min(y_coords), y_len - np.max(y_coords) ] ) # dilate for _ in range(n_dilate): path_dilation = binary_dilation(path_dilation) saved_arrs.append(path_dilation) saved_arrs = np.sum(np.array(saved_arrs), axis=0) return saved_arrs
0d35ec5a0a14b026f0df228ae752f104502b82ba
4,475
def plot_rgb_phases(absolute, phase): """ Calculates a visualization of an inverse Fourier transform, where the absolute value is plotted as brightness and the phase is plotted as color. :param absolute: 2D numpy array containing the absolute value :param phase: 2D numpy array containing phase information in units of pi (should range from -1 to +1!) :return: numpy array containing red, green and blue values """ red = 0.5 * (np.sin(phase * np.pi) + 1) * absolute / absolute.max() green = 0.5 * (np.sin(phase * np.pi + 2 / 3 * np.pi) + 1) * absolute / absolute.max() blue = 0.5 * (np.sin(phase * np.pi + 4 / 3 * np.pi) + 1) * absolute / absolute.max() return np.dstack([red, green, blue])
d2f12df2af25925ae9607ef102f4b0fc1cb01373
4,476
import uuid from sys import float_info def layer_view_attachment_warning(): """Unlimited attachments are warnings""" content = { 'id': str(uuid.uuid4()), '_type': 'CatXL', 'attachment': { 'currency': 'USD', 'value': float_info.max, } } return convert_to_analyzere_object(content, LayerView)
9a5340a1c726ee39029f8475aed15a99c17aff6d
4,477
import logging import os import sqlite3 def create_resfinder_sqlite3_db(dbfile, mappings): """ Create and fill an sqlite3 DB with ResFinder mappings. Expects mappings to be a list of tuples: (header, symbol, family, class, extra) """ logging.info("Creating sqlite3 db: %s ...", dbfile) if os.path.isfile(dbfile): logging.warning("Overwriting previously existing dbfile: %s") os.remove(dbfile) logging.debug("Removed pre-existing dbfile: %s") con = sqlite3.connect(dbfile) con.execute("CREATE TABLE resfinder(header TEXT PRIMARY KEY, symbol TEXT, family TEXT, class TEXT, extra TEXT)") con.executemany("INSERT INTO resfinder VALUES (?,?,?,?,?)", mappings) num_mappings = con.execute("SELECT Count(*) FROM resfinder").fetchone()[0] con.commit() logging.debug("Inserted %i mappings in to sqlite3 DB", num_mappings) return con
39fbdffc3088c5265b8bf388c5aa2d7530f8ca87
4,478
def normal_shock_pressure_ratio(M, gamma): """Gives the normal shock static pressure ratio as a function of upstream Mach number.""" return 1.0+2.0*gamma/(gamma+1.0)*(M**2.0-1.0)
30d0a339b17bab2b662fecd5b19073ec6478a1ec
4,479
from typing import Tuple import numpy def _lorentz_berthelot( epsilon_1: float, epsilon_2: float, sigma_1: float, sigma_2: float ) -> Tuple[float, float]: """Apply Lorentz-Berthelot mixing rules to a pair of LJ parameters.""" return numpy.sqrt(epsilon_1 * epsilon_2), 0.5 * (sigma_1 + sigma_2)
b27c282cec9f880442be4e83f4965c0ad79dfb1e
4,480
def verify_password_str(password, password_db_str): """Verify password matches database string.""" split_password_db = password_db_str.split('$') algorithm = split_password_db[0] salt = split_password_db[1] return password_db_str == generate_password_str(algorithm, salt, password)
467dcbfa1dbf1af0d7cd343f00149fc8322053e5
4,481
def get_ical_file_name(zip_file): """Gets the name of the ical file within the zip file.""" ical_file_names = zip_file.namelist() if len(ical_file_names) != 1: raise Exception( "ZIP archive had %i files; expected 1." % len(ical_file_names) ) return ical_file_names[0]
7013840891844358f0b4a16c7cefd31a602d9eae
4,482
def unquoted_str(draw): """Generate strings compatible with our definition of an unquoted string.""" start = draw(st.text(alphabet=(ascii_letters + "_"), min_size=1)) body = draw(st.text(alphabet=(ascii_letters + digits + "_"))) return start + body
7927e828a82786f45749e25e25376f48479c0662
4,483
from typing import List from typing import Optional from typing import Any from typing import Callable def _reduce_attribute(states: List[State], key: str, default: Optional[Any] = None, reduce: Callable[..., Any] = _mean) -> Any: """Find the first attribute matching key from states. If none are found, return default. """ attrs = list(_find_state_attributes(states, key)) if not attrs: return default if len(attrs) == 1: return attrs[0] return reduce(*attrs)
bfc4ca6826e05b04ae9e1af6d3c167935bceda6f
4,484
def sync_garmin(fit_file): """Sync generated fit file to Garmin Connect""" garmin = GarminConnect() session = garmin.login(ARGS.garmin_username, ARGS.garmin_password) return garmin.upload_file(fit_file.getvalue(), session)
8e604a0461f503d83b5a304081020d54acd7577c
4,485
from typing import Callable from typing import List def get_paths(graph: Graph, filter: Callable) -> List: """ Collect all the paths consist of valid vertices. Return one path every time because the vertex index may be modified. """ result = [] if filter == None: return result visited = set() vs = graph.topological_sorting() for vertex in vs: if not filter(vertex, graph) or vertex in visited: continue visited.add(vertex) path = [vertex] slist = graph.successors(vertex) while len(set(slist))==1 and filter(slist[0], graph) and not slist[0] in visited: cur = slist[0] path.append(cur) visited.add(cur) slist = graph.successors(cur) if len(path) > 0: result.append(path) return result
f7e1679bae48781010257b4fe8e980964dee80ce
4,486
def create_app(app_name=PKG_NAME): """Initialize the core application.""" app = Flask(app_name) CORS(app) with app.app_context(): # Register Restx Api api.init_app(app) return app
1773b0a84253aa6a1bca5c6f6aec6cd6d59b74fa
4,487
from typing import Dict from typing import Callable import sympy import warnings def smtlib_to_sympy_constraint( smtlib_input: str, interpreted_constants: Dict[str, Callable] = default_interpreted_constants, interpreted_unary_functions: Dict[str, Callable] = default_interpreted_unary_functions): """Convert SMTLIB(v2) constraints into sympy constraints analyzable via SYMPAIS. This function is experimental and introduced as an example. It is implemented on top of PySMT (https://github.com/pysmt/pysmt). Additional features can be added extending the `SMTToSympyWalker` class. Args: smtlib_input: SMT constraint as a string in SMTLIB(v2) format, as accepted by PySMT interpreted_constants: predefined interpreted constants to be declared in the SMT problem. Default: E (Euler), PI interpreted_unary_functions: predefined interpreted functions Real -> Real. Default: sin, cos, tan, asin, acos, atan, log, exp, sqrt Returns: A dict of the estimates found by the DMC sampler. """ interpreted_symbols_declarations = '\n'.join( [f'(declare-const {cname} Real)' for cname in interpreted_constants.keys()]) interpreted_symbols_declarations += '\n'.join([ f'(declare-fun {fname} (Real) Real)' for fname in interpreted_unary_functions.keys() ]) smtlib_with_interpreted_symbols = ( interpreted_symbols_declarations + '\n' + smtlib_input) reset_env() parser = SmtLibParser() script = parser.get_script(cStringIO(smtlib_with_interpreted_symbols)) f = script.get_last_formula() converter = SMTToSympyWalker(get_env(), interpreted_constants, interpreted_unary_functions) f_sympy = converter.walk(f) f_sympy = sympy.logic.simplify_logic(f_sympy) f_sympy = sympy.simplify(f_sympy) if f_sympy.atoms(sympy.logic.Or): warnings.warn( 'Disjunctive constraints are not supported by RealPaver. Consider replacing it with an adequate interval constraint propagation tool for benefit from all the features of SYMPAIS' ) return f_sympy
d04782272cd13fcb7eafdb4b8f9cb7b1fd857dcc
4,488
async def revert(app, change_id: str) -> dict: """ Revert a history change given by the passed ``change_id``. :param app: the application object :param change_id: a unique id for the change :return: the updated OTU """ db = app["db"] change = await db.history.find_one({"_id": change_id}, ["index"]) if change["index"]["id"] != "unbuilt" or change["index"]["version"] != "unbuilt": raise virtool.errors.DatabaseError( "Change is included in a build an not revertible" ) otu_id, otu_version = change_id.split(".") if otu_version != "removed": otu_version = int(otu_version) _, patched, history_to_delete = await patch_to_version(app, otu_id, otu_version - 1) # Remove the old sequences from the collection. await db.sequences.delete_many({"otu_id": otu_id}) if patched is not None: patched_otu, sequences = virtool.otus.utils.split(patched) # Add the reverted sequences to the collection. for sequence in sequences: await db.sequences.insert_one(sequence) # Replace the existing otu with the patched one. If it doesn't exist, insert it. await db.otus.replace_one({"_id": otu_id}, patched_otu, upsert=True) else: await db.otus.delete_one({"_id": otu_id}) await db.history.delete_many({"_id": {"$in": history_to_delete}}) return patched
ad5484639f0a70913b17799534fa52b8531b3356
4,489
def days_away(date): """Takes in the string form of a date and returns the number of days until date.""" mod_date = string_to_date(date) return abs((current_date() - mod_date).days)
f76b10d9e72d8db9e42d7aba7481e63cf1382502
4,490
def node_constraints_transmission(model): """ Constrains e_cap symmetrically for transmission nodes. """ m = model.m # Constraint rules def c_trans_rule(m, y, x): y_remote, x_remote = transmission.get_remotes(y, x) if y_remote in m.y_trans: return m.e_cap[y, x] == m.e_cap[y_remote, x_remote] else: return po.Constraint.NoConstraint # Constraints m.c_transmission_capacity = po.Constraint(m.y_trans, m.x, rule=c_trans_rule)
0fc51f39b63324c73503b349cfd38da4c9816c50
4,491
def plot_mtf(faxis, MTF, labels=None): """Plot the MTF. Return the figure reference.""" fig_lineplot = plt.figure() plt.rc('axes', prop_cycle=PLOT_STYLES) for i in range(0, MTF.shape[0]): plt.plot(faxis, MTF[i, :]) plt.xlabel('spatial frequency [cycles/length]') plt.ylabel('Radial MTF') plt.gca().set_ylim([0, 1]) if labels is not None: plt.legend([str(n) for n in labels]) plt.title("Modulation Tansfer Function for various angles") return fig_lineplot
dac09628a72666a4f4e3e8aae4263cb9f2688fa2
4,492
import enum def forward_ref_structure_hook(context, converter, data, forward_ref): """Applied to ForwardRef model and enum annotations - Map reserved words in json keys to approriate (safe) names in model. - handle ForwardRef types until github.com/Tinche/cattrs/pull/42/ is fixed Note: this is the reason we need a "context" param and have to use a partial func to register the hook. Once the issue is resolved we can remove "context" and the partial. """ data = hooks.tr_data_keys(data) actual_type = eval(forward_ref.__forward_arg__, context, locals()) if issubclass(actual_type, enum.Enum): instance = converter.structure(data, actual_type) elif issubclass(actual_type, model.Model): # cannot use converter.structure - recursion error instance = converter.structure_attrs_fromdict(data, actual_type) else: raise DeserializeError(f"Unknown type to deserialize: {actual_type}") return instance
acbbf365c7a80c7a9f5230bcd038c2c286ae58c5
4,493
def cross(x: VariableLike, y: VariableLike) -> VariableLike: """Element-wise cross product. Parameters ---------- x: Left hand side operand. y: Right hand side operand. Raises ------ scipp.DTypeError If the dtype of the input is not vector3. Returns ------- : The cross product of the input vectors. """ return _call_cpp_func(_cpp.cross, x, y)
372156ba869e3dabb2421e1ea947fdc710c316eb
4,494
def delete(service, name, parent_id=None, appProperties=defaults.GDRIVE_USE_APPPROPERTIES): """ Delete a file/folder on Google Drive Parameters ---------- service : googleapiclient.discovery.Resource Google API resource for GDrive v3 name : str Name of file/folder parent_id : str, optional Parent ID of folder containing file (to narrow search) appProperties : bool Search for application-specific files using ``appProperties`` Returns ------- str ID of deleted file/folder """ name_id = exists(service, name, parent_id=parent_id) resp = service.files().delete(fileId=name_id).execute() return name_id
e2653005d8d0e53df80119869586542b08405c55
4,495
def _is_LoginForm_in_this_frame(driver, frame): """ 判断指定的 frame 中是否有 登录表单 """ driver.switch_to.frame(frame) # 切换进这个 frame if _is_LoginForm_in_this_page(driver): return True else: driver.switch_to.parent_frame() # 如果没有找到就切换回去 return False
16a4b3af1d5cf9abe2efee6856a37b520fc2a1fc
4,496
def parse_range_header(specifier, len_content): """Parses a range header into a list of pairs (start, stop)""" if not specifier or '=' not in specifier: return [] ranges = [] unit, byte_set = specifier.split('=', 1) unit = unit.strip().lower() if unit != "bytes": return [] for val in byte_set.split(","): val = val.strip() if '-' not in val: return [] if val.startswith("-"): # suffix-byte-range-spec: this form specifies the last N # bytes of an entity-body start = len_content + int(val) if start < 0: start = 0 stop = len_content else: # byte-range-spec: first-byte-pos "-" [last-byte-pos] start, stop = val.split("-", 1) start = int(start) # Add 1 to make stop exclusive (HTTP spec is inclusive) stop = int(stop)+1 if stop else len_content if start >= stop: return [] ranges.append((start, stop)) return ranges
2a408abe816684bf42b2253495088338bf4cac2b
4,497
def acf(x, lags=None): """ Computes the empirical autocorralation function. :param x: array (n,), sequence of data points :param lags: int, maximum lag to compute the ACF for. If None, this is set to n-1. Default is None. :return gamma: array (lags,), values of the ACF at lags 0 to lags """ gamma = np.correlate(x, x, mode='full') # Size here is always 2*len(x)-1 gamma = gamma[int((gamma.size - 1) / 2):] # Keep only second half if lags is not None and lags < len(gamma): gamma = gamma[0:lags + 1] return gamma / gamma[0]
9d47df88255ec8c9ae373c501f8b70af8f3c4ebc
4,498
import os import functools def copyInfotainmentServerFiles(tarName, targetId=None): """ Stuff the server binary into a tar file """ # grab the pre-built binary osImage = getSetting('osImage', targetId=targetId) infotainmentBinDir = getBinDir('infotainment-server', targetId=targetId) cpFilesToBuildDir(infotainmentBinDir, pattern="infotainment_server", targetId=targetId) tarFiles = ["infotainment_server"] infotainmentAppDir = getCyberphysAppDir('infotainment-server') runtimeFilesDir = os.path.join(infotainmentAppDir, osImage) if osImage == 'debian': cpFilesToBuildDir(runtimeFilesDir, pattern="infotainment-server.service", targetId=targetId) tarFiles += ["infotainment-server.service"] elif osImage == 'FreeBSD': cpFilesToBuildDir(runtimeFilesDir, pattern="infotainment-server.sh", targetId=targetId) tarFiles += ["infotainment-server.sh"] else: logAndExit(f"Installing infotainment-server is not supported on <{osImage}>", exitCode=EXIT.Dev_Bug) buildDirPathTuplePartial = functools.partial(buildDirPathTuple, targetId=targetId) filesList=map(buildDirPathTuplePartial, tarFiles) return filesList
53d7d586e74281f3ac9061e8b1028d3efb62be07
4,499