content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import numpy def undiskify(z): """Maps SL(2)/U(1) poincare disk coord to Lie algebra generator-factor.""" # Conventions match (2.13) in https://arxiv.org/abs/1909.10969 return 2* numpy.arctanh(abs(z)) * numpy.exp(1j * numpy.angle(z))
9ac4cd521ca64decd082a34e35e0d080d3190e13
20,000
def mean_vertex_normals(vertex_count, faces, face_normals, **kwargs): """ Find vertex normals from the mean of the faces that contain that vertex. Parameters ----------- vertex_count : int The number of vertices faces refer to faces : (n, 3) int List of vertex indices face_normals : (n, 3) float Normal vector for each face Returns ----------- vertex_normals : (vertex_count, 3) float Normals for every vertex Vertices unreferenced by faces will be zero. """ def summed_sparse(): # use a sparse matrix of which face contains each vertex to # figure out the summed normal at each vertex # allow cached sparse matrix to be passed if 'sparse' in kwargs: sparse = kwargs['sparse'] else: sparse = index_sparse(vertex_count, faces) summed = sparse.dot(face_normals) return summed def summed_loop(): # loop through every face, in tests was ~50x slower than # doing this with a sparse matrix summed = np.zeros((vertex_count, 3)) for face, normal in zip(faces, face_normals): summed[face] += normal return summed try: summed = summed_sparse() except BaseException: log.warning( 'unable to generate sparse matrix! Falling back!', exc_info=True) summed = summed_loop() # invalid normals will be returned as zero vertex_normals = util.unitize(summed) return vertex_normals
767214b5c2ba701de5288009ee4ebfb90378446c
20,001
def get_beam_jobs(): """Returns the list of all registered Apache Beam jobs. Returns: list(BeamJob). The list of registered Apache Beam jobs. """ return [beam_job_domain.BeamJob(j) for j in jobs_registry.get_all_jobs()]
24e22d487fdbdb02917011e94a6d5b985de67640
20,002
def z_norm(dataset, max_seq_len=50): """Normalize data in the dataset.""" processed = {} text = dataset['text'][:, :max_seq_len, :] vision = dataset['vision'][:, :max_seq_len, :] audio = dataset['audio'][:, :max_seq_len, :] for ind in range(dataset["text"].shape[0]): vision[ind] = np.nan_to_num( (vision[ind] - vision[ind].mean(0, keepdims=True)) / (np.std(vision[ind], axis=0, keepdims=True))) audio[ind] = np.nan_to_num( (audio[ind] - audio[ind].mean(0, keepdims=True)) / (np.std(audio[ind], axis=0, keepdims=True))) text[ind] = np.nan_to_num( (text[ind] - text[ind].mean(0, keepdims=True)) / (np.std(text[ind], axis=0, keepdims=True))) processed['vision'] = vision processed['audio'] = audio processed['text'] = text processed['labels'] = dataset['labels'] return processed
8cf40069b2a8c042d357fab3b1e3aaf13c15c69e
20,003
def days_upto(year): """ Return the number of days from the beginning of the test period to the beginning of the year specified """ return sum([days_in_year(y) for y in range(2000,year)])
f87295a53d839e2ce895ef5fe5490b77377d28eb
20,004
def read_DEM(fn=None, fjord=None): """ Reads in the DEM (only accepts GeoTiffs right now) into an XArray Dataarray with the desired format. """ # intake.open_rasterio accepts a list of input files and may effectively do what this function does! # try using cropped versions of the input files. Doesn't seem to make a difference r.e. crashing ''' cropped_fn = fn.rpartition(".tif")[0] + "_cropped.tif" print(cropped_fn) if os._exists(cropped_fn): fn = cropped_fn elif fjord != None: bbox = fjord_props.get_fjord_bounds(fjord) ds = rioxarray.open_rasterio(fn) trimmed_ds = ds.rio.slice_xy(*bbox) trimmed_ds.rio.to_raster(fn.rpartition(".tif")[0] + "_cropped.tif") del ds del trimmed_ds fn = cropped_fn ''' # try bringing in the rasters as virtual rasters (i.e. lazily loading) with rasterio.open(fn) as src: # print('Source CRS:' +str(src.crs)) # print(src.is_tiled) # print(src.block_shapes) with WarpedVRT(src,src_crs=src.crs,crs=src.crs) as vrt: # warp_mem_limit=12000,warp_extras={'NUM_THREADS':2}) as vrt: # print('Destination CRS:' +str(vrt.crs)) darr = xr.open_rasterio(vrt) # ds = rioxarray.open_rasterio(vrt).chunk({'x':1500,'y':1500,'band':1}).to_dataset(name='HLS_Red') # Rasterio automatically checks that the file exists # ultimately switch to using rioxarray, but it causes issues down the pipeline so it will need to be debugged through # with rioxarray.open_rasterio(fn) as src: # with xr.open_rasterio(fn) as darr: # darr = src # open_rasterio automatically brings the geotiff in as a DataArray with 'band' as a dimensional coordinate # we rename it and remove the band as a coordinate, since our DEM only has one dimension # squeeze removes dimensions of length 0 or 1, in this case our 'band' # Then, drop('band') actually removes the 'band' dimension from the Dataset darr = darr.rename('elevation').squeeze().drop_vars('band') # darr = darr.rename({'band':'dtime'}) # if we wanted to instead convert it to a dataset # attr = darr.attrs # darr = darr.to_dataset(name='elevation').squeeze().drop('band') # darr.attrs = attr # attr=None # newest version of xarray (0.16) has promote_attrs=True kwarg. Earlier versions don't... # darr = darr.to_dataset(name='elevation', promote_attrs=True).squeeze().drop('band') # mask out the nodata values, since the nodatavals attribute is wrong darr = darr.where(darr != -9999.) # the gdalwarp geoid files have this extra attribute in the geoTiff, which when brought in # ultimately causes a "__module__" related error when trying to plot with hvplot try: del darr.attrs["units"] except KeyError: pass if fjord != None: # USE RIOXARRAY - specifically, slicexy() which can be fed the bounding box # darr = darr.rio.slice_xy(fjord_props.get_fjord_bounds(fjord)) bbox = fjord_props.get_fjord_bounds(fjord) if pd.Series(darr.y).is_monotonic_increasing: darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[1], bbox[3])) else: darr = darr.sel(x=slice(bbox[0], bbox[2]), y=slice(bbox[3], bbox[1])) return darr
23abd9bfe9a9c498bee43138245e3b38107eef6c
20,005
def choose_field_size(): """a function that crafts a field""" while True: print('Пожалуйста, задайте размер поля (число от 3 до 5):') try: field_size = int(input()) except ValueError: continue if field_size == 3: print('\nПоле для игры:\n') rows = {'a': 1, 'b': 2, 'c': 3} columns = [1, 2, 3] field = [[[' '], [' '], [' ']], [[' '], [' '], [' ']], [[' '], [' '], [' ']]] rows_name = ['a', 'b', 'c'] print(' 1 2 3\n') for row_num in range(len(field)): print(rows_name[row_num], sep='', end='') for cell in field[row_num]: print(cell[0], '|', sep='', end='') print('\n --------------', sep='', end='') print('\n') break elif field_size == 4: print(""" 1 2 3 4 a | | | -------------- b | | | -------------- c | | | -------------- d | | |""") break elif field_size == 5: print(""" 1 2 3 4 5 a | | | | ------------------ b | | | | ------------------ c | | | | ------------------ d | | | | ------------------ e | | | |""") break else: continue return field, rows, columns
ed370aca1f13a9f93bb96e483885c67e1bd30317
20,006
def delete_submission_change(id): """Delete a post. Ensures that the post exists and that the logged in user is the author of the post. """ db = get_db() db.execute('DELETE FROM submission_change WHERE id = ?', (id,)) db.commit() return jsonify(status='ok')
a886dfd89939ca10d95877bd16bda313ccb9353d
20,007
from datetime import datetime def get_search_response(db, search_term): """Method to get search result from db or google api. Args: db: The database object. search_term: The search term. Returns: String: List of relevant links separated by line break. """ # Find if the search results for the term is stored in mongo. response = ( db["SearchResults"].find_one( { "searchTerm": search_term } ) or {} ).get("result") if not response: # Fetch search results from Google API if not found in mongo. response = get_google_search_response(search_term) # Cache the results in mongo where lastSearchedOn is a TTL index with timeout of 3600 seconds. db["SearchResults"].insert_one( { "searchTerm": search_term, "lastSearchedOn": datetime.now(), "result": response } ) return response
11180ad1ee57d2a778439fd260d5201d3723cfe7
20,008
def lattice_2d_rescale_wave_profile(kfit, X, dT, Z_C, Y_C, v, dx=1.): """ Fit the wave profile (X, dT) to the ODE solution (X_C, dT_C) """ # recenter the profile around 0 k0 = np.argmax(dT) x0 = X[k0] Z = kfit*(X.copy()-x0) # retain a window corresponding to the input ODE solution zlo = max(np.min(Z_C), np.min(Z)) zhi = min(np.max(Z_C), np.max(Z)) idx = (Z >= zlo) & (Z <= zhi) Z = Z[idx] Y = dT.copy()[idx] if (len(Z) > len(Z_C)): raise ValueError("Increase resolution of ODE solution!") # rescale Y Y /= (v*kfit/2.) return Z, Y
8caea59a092efd9632e4b705400f40aa0ebbec44
20,009
def extent_switch_ijk_kji( extent_in: npt.NDArray[np.int_]) -> npt.NDArray[np.int_]: # reverse order of elements in extent """Returns equivalent grid extent switched either way between simulator and python protocols.""" dims = extent_in.size result = np.zeros(dims, dtype = 'int') for d in range(dims): result[d] = extent_in[dims - d - 1] return result
c960591fa1f3b31bd0877fd75845a17fff8eff50
20,010
def create_data(f, x_vals): """Assumes f is a function of one argument x_vals is an array of suitable arguments for f Returns array containing results of applying f to the elements of x_vals""" y_vals = [] for i in x_vals: y_vals.append(f(x_vals[i])) return np.array(y_vals)
5f74402586c3f7d02c8d6146d5256dbccdf49e81
20,011
def register(): """注册""" req_dict = request.get_json() phone = req_dict.get("phone") password = req_dict.get("password") password2 = req_dict.get("password2") sms_code = req_dict.get("sms_code") phone = str(phone) sms_code = str(sms_code) # 校验参数 if not all([phone, password, password2, sms_code]): return jsonify(code=400, msg="参数不完整") if password != password2: return jsonify(code=400, msg="两次密码不一致") # 从redis中取出短信验证码 try: real_sms_code = redis_store.get("sms_code_%s" % phone) except Exception as e: current_app.logger.error(e) return jsonify(code=4001, msg="读取真实短信验证码异常") # 判断短信验证码是否过期 if real_sms_code is None: return jsonify(code=4002, msg="短信验证码失效") # 删除redis中的短信验证码,防止重复使用校验 try: redis_store.delete("sms_code_%s" % phone) except Exception as e: current_app.logger.error(e) # 判断用户填写短信验证码的正确性 if real_sms_code != sms_code: return jsonify(code=4003, msg="短信验证码错误") # 判断用户的手机是否注册过 try: user = User.query.filter_by(phone=phone).first() except Exception as e: current_app.logger.error(e) return jsonify(code=400, msg="数据库异常") else: if user is not None: # 表示已被注册 return jsonify(code=400, msg="手机已被注册") # 保存用户的注册数据到数据库中 avatar = constant.ADMIN_AVATAR_URL # 用户头像 user = User(username=phone, phone=phone, password=password, avatar=avatar) try: db.session.add(user) db.session.commit() except Exception as e: db.session.rollback() current_app.logger.error(e) return jsonify(code=400, msg="查询数据库异常") # 保存登录状态到session中 session["username"] = phone session["phone"] = phone session["user_id"] = user.id session["avatar"] = user.avatar # 返回结果 return jsonify(code=200, msg="注册成功")
f9e6bc8dc30cb967843d0f47fada1b4b62c6b130
20,012
def cpp_flag(compiler): """Return the -std=c++[11/14/17] compiler flag. The newer version is prefered over c++11 (when it is available). """ flags = ["-std=c++17", "-std=c++14", "-std=c++11"] for flag in flags: if has_flag(compiler, flag): return flag raise RuntimeError( "Unsupported compiler -- at least C++11 support is needed!" )
a21a0a8efcad62cc26ff033877c366d7d6acf09d
20,013
def to_null(string): """ Usage:: {{ string|to_null}} """ return 'null' if string is None else string
1868ca2c7474a8134f2dbb0b0e542ca659bf4940
20,014
import warnings def get_mtime(path, mustExist=True): """ Get mtime of a path, even if it is inside a zipfile """ warnings.warn("Don't use this function", DeprecationWarning) try: return zipio.getmtime(path) except IOError: if not mustExist: return -1 raise
6c661fb5d7a874a8173ec509b07401e2120da95b
20,015
def get_table_8(): """表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数 Args: Returns: list: 表 8 主たる居室の照明区画݅に設置された照明設備の調光による補正係数 """ table_8 = [ (0.9, 1.0), (0.9, 1.0), (1.0, 1.0) ] return table_8
89470f0242982755104dbb2afe0198e2f5afa5f4
20,016
import requests import warnings def query_epmc(query): """ Parameters ---------- query : Returns ------- """ url = "https://www.ebi.ac.uk/europepmc/webservices/rest/search?query=" page_term = "&pageSize=999" ## Usual limit is 25 request_url = url + query + page_term r = requests.get(request_url) if r.status_code == 200: return r else: warnings.warn("request to " + str(query) + " has failed to return 200, and has returned " + str(r.status_code)) pass
a8da1ee3253d51738f1d556548f6bccf17b32b53
20,017
def is_default_array_type(f, type_map=TYPE_MAP): """ Check whether the field is an array and is made up of default types, e.g. u8 or s16. """ return f.type_id == 'array' and type_map.get(f.options['fill'].value, None)
ec0e7a26261cc72e473d2c365bc452b2eeab396f
20,018
def delete_police_station_collection(): """ Helper function to delete station collection in db. """ result = PoliceStation.objects().delete() return result
7b3cc89269695fa494eb12a7b904fabd1974f3d8
20,019
def compute_asvspoof_tDCF( asv_target_scores, asv_nontarget_scores, asv_spoof_scores, cm_bonafide_scores, cm_spoof_scores, cost_model, ): """ Compute t-DCF curve as in ASVSpoof2019 competition: Fix ASV threshold to EER point and compute t-DCF curve over thresholds in CM. Code for this is mainly taken from the ASVSpoof2019 competition t-DCF implementation: https://www.asvspoof.org/ Parameters: asv_target_scores (ndarray): Array of ASV target (bonafide) scores (should be high) asv_nontarget_scores (ndarray): Array of ASV nontarget (bonafide) scores (should be low) asv_spoof_scores (ndarray): Array of ASV spoof scores (should be low) cm_bonafide_scores (ndarray): Array of CM target (bonafide) scores (should be high) cm_spoof_scores (ndarray): Array of CM nontarget (spoof) scores (should be low) cost_model (CostParameters): CostParameters object containing cost parameters Returns: tdcf_curve (ndarray): Array of normalized t-DCF values at different CM thresholds cm_thresholds (ndarray): Array of different CM thresholds, corresponding to values in tdcf_curve. """ # Fix ASV FAR and miss to values at EER (with legit samples) asv_frr, asv_far, asv_thresholds = compute_det(asv_target_scores, asv_nontarget_scores) asv_frr_eer, asv_far_eer, asv_eer_threshold = compute_eer(asv_frr, asv_far, asv_thresholds) p_asv_miss = asv_frr_eer p_asv_fa = asv_far_eer # Fraction of spoof samples that were rejected by asv. # Note that speaker labels are not used here, just raw number # of spoof samples rejected by asv in general p_asv_spoof_miss = np.sum(asv_spoof_scores < asv_eer_threshold) / len(asv_spoof_scores) # Copy/pasta from t-DCF implementation in ASVSpoof2019 competition # Obtain miss and false alarm rates of CM p_cm_miss, p_cm_fa, cm_thresholds = compute_det(cm_bonafide_scores, cm_spoof_scores) # See ASVSpoof2019 evaluation plan for more information on these C1 = cost_model.p_tar * (cost_model.c_cm_miss - cost_model.c_asv_miss * p_asv_miss) - \ cost_model.p_nontar * cost_model.c_asv_fa * p_asv_fa # Cost for CM false-accept: # How often we have spoof samples * # Cost of accepting a spoof * # how often ASV accepts spoof C2 = cost_model.c_cm_fa * cost_model.p_spoof * (1 - p_asv_spoof_miss) # Obtain t-DCF curve for all thresholds tDCF = C1 * p_cm_miss + C2 * p_cm_fa # Normalized t-DCF tDCF_norm = tDCF if min(C1, C2) == 0: tDCF_norm = tDCF else: tDCF_norm = tDCF / np.minimum(C1, C2) return tDCF_norm, cm_thresholds
27819737d7a1a84db10d78cce4c5edd16548e774
20,020
def all_divisor(n, includeN=True): """ >>> all_divisor(28) [1, 2, 4, 7, 14, 28] >>> all_divisor(28, includeN=False) [1, 2, 4, 7, 14] Derived from https://qiita.com/LorseKudos/items/9eb560494862c8b4eb56 """ lower_divisors, upper_divisors = [], [] i = 1 while i * i <= n: if n % i == 0: lower_divisors.append(i) if i != n // i: upper_divisors.append(n//i) i += 1 upper_divisors = upper_divisors[::-1] if not includeN: upper_divisors.pop() return lower_divisors + upper_divisors
2fa0eb58eac30030cfbbfdcce62bc91cb36f218e
20,021
def isTrue(value, noneIsFalse=True): """ Returns True if <value> is one of the valid string representations for True. By default, None is considered False. """ if not value: if noneIsFalse: return False else: return None else: return value.lower() in TRUE_STRINGS
16e69dc43ef2034d210803e8cc3ebf2ae13e13b2
20,022
import pickle def read_doc_labels(input_dir): """ :param input_dir: :return: doc labels """ with open(input_dir + "doc_labels.pkl", 'rb') as fin: labels = pickle.load(fin) return labels
c0246f8e09441782a7437177877cc1e4d83ecb40
20,023
import os def which(bin_dir, program): """ rough equivalent of the 'which' command to find external programs (current script path is tested first, then PATH envvar) """ def is_executable(fpath): return os.path.exists(fpath) and os.access(fpath, os.X_OK) fpath, fname = os.path.split(program) if fpath: if is_executable(program): return program else: progpath = os.path.join(bin_dir, program) if is_executable(progpath): return progpath for path in os.environ["PATH"].split(os.pathsep): progpath = os.path.join(path, program) if is_executable(progpath): return progpath return None
4c7ecc9e43574d95591b4de5df92c5693eda70a3
20,024
def search(catalog_number): """ A top level `catalog_number` search that returns a list of result dicts. Usually catalog numbers are unique but not always hence the returned list. """ results = query(catalog_number) result_list = [] for result in results: dict_result = vars(result)["data"] result_list.append(result_filter(dict_result)) return result_list
2a8ce325250cbaa5a9f307ef707c74c5101d84d3
20,025
import requests import urllib from io import StringIO def fetch_csv_for_date(dt, session=None): """ Fetches the whole month of the give datetime returns the data as a DataFrame throws an exception data is not available """ if not session: session = requests.session() # build the parameters and fill in the requested date # TODO find something prettier than string concatenation which works # TODO find out whether VIEWSTATE stays valid or needs to be fetched before making the post request datestr = dt.strftime("%m/%d/%Y") parameters = { "__EVENTTARGET": "", "__EVENTARGUMENT": "", "__VIEWSTATE": "/wEPDwUKLTM2ODQwNzIwMw9kFgJmD2QWAgIDD2QWAgIBD2QWCAIBD2QWAmYPZBYCAgMPDxYCHgRUZXh0BTNTaXN0ZW1hIGRlIEluZm9ybWFjacOzbiBkZWwgTWVyY2Fkby4gw4FyZWEgUMO6YmxpY2FkZAIFDzwrABEDAA8WBB4LXyFEYXRhQm91bmRnHgtfIUl0ZW1Db3VudGZkARAWABYAFgAMFCsAAGQCCQ9kFgJmD2QWAgIDD2QWAmYPZBYEZg9kFgYCAQ8PFgQFBE1pbkQGAECJX4pw0wgFBE1heEQGAMBI0Tg61wgPFg4eB01pbkRhdGUGAECJX4pw0wgeDFNlbGVjdGVkRGF0ZQYAwEjRODrXCB4HTWF4RGF0ZQYAwEjRODrXCB4VRW5hYmxlRW1iZWRkZWRTY3JpcHRzZx4cRW5hYmxlRW1iZWRkZWRCYXNlU3R5bGVzaGVldGceElJlc29sdmVkUmVuZGVyTW9kZQspclRlbGVyaWsuV2ViLlVJLlJlbmRlck1vZGUsIFRlbGVyaWsuV2ViLlVJLCBWZXJzaW9uPTIwMTQuMi43MjQuNDUsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49MTIxZmFlNzgxNjViYTNkNAEeF0VuYWJsZUFqYXhTa2luUmVuZGVyaW5naGQWBGYPFCsACA8WEB8ABRMyMDE5LTA5LTE2LTAwLTAwLTAwHhFFbmFibGVBcmlhU3VwcG9ydGgfBmceDUxhYmVsQ3NzQ2xhc3MFB3JpTGFiZWwfCWgfB2ceBFNraW4FB0RlZmF1bHQfCAsrBAFkFggeBVdpZHRoGwAAAAAAAFlABwAAAB4KUmVzaXplTW9kZQspclRlbGVyaWsuV2ViLlVJLlJlc2l6ZU1vZGUsIFRlbGVyaWsuV2ViLlVJLCBWZXJzaW9uPTIwMTQuMi43MjQuNDUsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49MTIxZmFlNzgxNjViYTNkNAAeCENzc0NsYXNzBRFyaVRleHRCb3ggcmlIb3Zlch4EXyFTQgKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEXJpVGV4dEJveCByaUVycm9yHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRNyaVRleHRCb3ggcmlGb2N1c2VkHxACggIWBh8NGwAAAAAAAFlABwAAAB8PBRNyaVRleHRCb3ggcmlFbmFibGVkHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRRyaVRleHRCb3ggcmlEaXNhYmxlZB8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpRW1wdHkfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEHJpVGV4dEJveCByaVJlYWQfEAKCAmQCAg8PFgQfDwUxUmFkQ2FsZW5kYXJNb250aFZpZXcgUmFkQ2FsZW5kYXJNb250aFZpZXdfRGVmYXVsdB8QAgJkFgRmDw8WAh4MVGFibGVTZWN0aW9uCyopU3lzdGVtLldlYi5VSS5XZWJDb250cm9scy5UYWJsZVJvd1NlY3Rpb24AFgIeBXN0eWxlBQ1kaXNwbGF5Om5vbmU7FgJmDw9kFgIeBXNjb3BlBQNjb2xkAgcPZBYCZg8PFgYeCkNvbHVtblNwYW4CBB8PBQlyY0J1dHRvbnMfEAICZGQCBQ8PFgQFBE1pbkQGAECJX4pw0wgFBE1heEQGAMBI0Tg61wgPFg4fAwYAQIlfinDTCB8EBgDASNE4OtcIHwUGAMBI0Tg61wgfBmcfB2cfCAsrBAEfCWhkFgRmDxQrAAgPFhAfAAUTMjAxOS0wOS0xNi0wMC0wMC0wMB8KaB8GZx8LBQdyaUxhYmVsHwloHwdnHwwFB0RlZmF1bHQfCAsrBAFkFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpSG92ZXIfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEXJpVGV4dEJveCByaUVycm9yHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRNyaVRleHRCb3ggcmlGb2N1c2VkHxACggIWBh8NGwAAAAAAAFlABwAAAB8PBRNyaVRleHRCb3ggcmlFbmFibGVkHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRRyaVRleHRCb3ggcmlEaXNhYmxlZB8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpRW1wdHkfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FEHJpVGV4dEJveCByaVJlYWQfEAKCAmQCAg8PFgQfDwUxUmFkQ2FsZW5kYXJNb250aFZpZXcgUmFkQ2FsZW5kYXJNb250aFZpZXdfRGVmYXVsdB8QAgJkFgRmDw8WAh8RCysGABYCHxIFDWRpc3BsYXk6bm9uZTsWAmYPD2QWAh8TBQNjb2xkAgcPZBYCZg8PFgYfFAIEHw8FCXJjQnV0dG9ucx8QAgJkZAIHDw8WBAUETWluRAYAQIlfinDTCAUETWF4RAYAwEjRODrXCA8WDh8DBgBAiV+KcNMIHwQGAMBI0Tg61wgfBQYAwEjRODrXCB8GZx8HZx8ICysEAR8JaGQWBGYPFCsACA8WEB8ABRMyMDE5LTA5LTE2LTAwLTAwLTAwHwpoHwZnHwsFB3JpTGFiZWwfCWgfB2cfDAUHRGVmYXVsdB8ICysEAWQWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRFyaVRleHRCb3ggcmlIb3Zlch8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwURcmlUZXh0Qm94IHJpRXJyb3IfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FE3JpVGV4dEJveCByaUZvY3VzZWQfEAKCAhYGHw0bAAAAAAAAWUAHAAAAHw8FE3JpVGV4dEJveCByaUVuYWJsZWQfEAKCAhYIHw0bAAAAAAAAWUAHAAAAHw4LKwUAHw8FFHJpVGV4dEJveCByaURpc2FibGVkHxACggIWCB8NGwAAAAAAAFlABwAAAB8OCysFAB8PBRFyaVRleHRCb3ggcmlFbXB0eR8QAoICFggfDRsAAAAAAABZQAcAAAAfDgsrBQAfDwUQcmlUZXh0Qm94IHJpUmVhZB8QAoICZAICDw8WBB8PBTFSYWRDYWxlbmRhck1vbnRoVmlldyBSYWRDYWxlbmRhck1vbnRoVmlld19EZWZhdWx0HxACAmQWBGYPDxYCHxELKwYAFgIfEgUNZGlzcGxheTpub25lOxYCZg8PZBYCHxMFA2NvbGQCBw9kFgJmDw8WBh8UAgQfDwUJcmNCdXR0b25zHxACAmRkAgEPZBYCAgEPPCsADgIAFCsAAg8WDB8BZx8HZx8GZx8CAgEfCWgfCAsrBAFkFwIFD1NlbGVjdGVkSW5kZXhlcxYABQtFZGl0SW5kZXhlcxYAARYCFgsPAgYUKwAGPCsABQEAFgQeCERhdGFUeXBlGSsCHgRvaW5kAgI8KwAFAQAWBB8VGSsCHxYCAxQrAAUWAh8WAgRkZGQFBmNvbHVtbhQrAAUWAh8WAgVkZGQFB2NvbHVtbjEUKwAFFgIfFgIGZGRkBQdjb2x1bW4yPCsABQEAFgQfFRkrAh8WAgdkZRQrAAALKXlUZWxlcmlrLldlYi5VSS5HcmlkQ2hpbGRMb2FkTW9kZSwgVGVsZXJpay5XZWIuVUksIFZlcnNpb249MjAxNC4yLjcyNC40NSwgQ3VsdHVyZT1uZXV0cmFsLCBQdWJsaWNLZXlUb2tlbj0xMjFmYWU3ODE2NWJhM2Q0ATwrAAcACyl0VGVsZXJpay5XZWIuVUkuR3JpZEVkaXRNb2RlLCBUZWxlcmlrLldlYi5VSSwgVmVyc2lvbj0yMDE0LjIuNzI0LjQ1LCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPTEyMWZhZTc4MTY1YmEzZDQBZGQWDB8BZx4USXNCb3VuZFRvRm9yd2FyZE9ubHloHgVfcWVsdBkpZ1N5c3RlbS5EYXRhLkRhdGFSb3dWaWV3LCBTeXN0ZW0uRGF0YSwgVmVyc2lvbj00LjAuMC4wLCBDdWx0dXJlPW5ldXRyYWwsIFB1YmxpY0tleVRva2VuPWI3N2E1YzU2MTkzNGUwODkeCERhdGFLZXlzFgAeBV8hQ0lTFwAfAgIBZGYWBGYPFCsAA2RkZGQCAQ8WBRQrAAIPFgwfAWcfF2gfGBkrCR8ZFgAfGhcAHwICAWQXAwULXyFJdGVtQ291bnQCAQUIXyFQQ291bnRkBQZfIURTSUMCARYCHgNfc2UWAh4CX2NmZBYGZGRkZGRkFgJnZxYCZg9kFghmD2QWAmYPZBYQZg8PFgQfAAUGJm5ic3A7HgdWaXNpYmxlaGRkAgEPDxYEHwAFBiZuYnNwOx8daGRkAgIPDxYCHwAFEU1lcyBkZSBPcGVyYWNpw7NuZGQCAw8PFgIfAAUcTm8uIGRlIExpcXVpZGFjacOzbiBBc29jaWFkYWRkAgQPDxYCHwAFA0NzdmRkAgUPDxYCHwAFA1BkZmRkAgYPDxYCHwAFBEh0bWxkZAIHDw8WAh8ABRVGZWNoYSBkZSBQdWJsaWNhY2nDs25kZAIBDw8WAh8daGQWAmYPZBYQZg8PFgIfAAUGJm5ic3A7ZGQCAQ8PFgIfAAUGJm5ic3A7ZGQCAg8PFgIfAAUGJm5ic3A7ZGQCAw8PFgIfAAUGJm5ic3A7ZGQCBA8PFgIfAAUGJm5ic3A7ZGQCBQ8PFgIfAAUGJm5ic3A7ZGQCBg8PFgIfAAUGJm5ic3A7ZGQCBw8PFgIfAAUGJm5ic3A7ZGQCAg8PFgIeBF9paWgFATBkFhBmDw8WAh8daGRkAgEPDxYEHwAFBiZuYnNwOx8daGRkAgIPDxYCHwAFD1NlcHRpZW1icmUgMjAxOWRkAgMPDxYCHwAFATBkZAIED2QWAmYPDxYEHg1BbHRlcm5hdGVUZXh0ZR4HVG9vbFRpcGVkZAIFD2QWAmYPDxYEHx9lHyBlZGQCBg9kFgJmDw8WBB8fZR8gZWRkAgcPDxYCHwAFGTE0LzEwLzIwMTkgMDU6MDA6MDEgYS4gbS5kZAIDD2QWAmYPDxYCHx1oZGQCCw8PFggfB2cfCWgfCAsrBAEfBmdkFgRmDw8WBh8JaB8GZx8ICysEAWRkAgEPFCsAAhQrAAIUKwACDxYOHwdnHhNFbmFibGVFbWJlZGRlZFNraW5zZx8JaB4URW5hYmxlUm91bmRlZENvcm5lcnNnHg1FbmFibGVTaGFkb3dzaB8GZx8ICysEAWRkZGRkGAIFHl9fQ29udHJvbHNSZXF1aXJlUG9zdEJhY2tLZXlfXxYMBSdjdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJEZlY2hhQ29uc3VsdGEFJmN0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkRmVjaGFJbmljaWFsBSRjdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJEZlY2hhRmluYWwFK2N0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkRGVzY2FyZ2FyUmVwb3J0ZXMFKmN0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkR3JpZFJhZFJlc3VsdGFkbwVAY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRHcmlkUmFkUmVzdWx0YWRvJGN0bDAwJGN0bDA0JGdiY2NvbHVtbgVBY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRHcmlkUmFkUmVzdWx0YWRvJGN0bDAwJGN0bDA0JGdiY2NvbHVtbjEFQWN0bDAwJENvbnRlbnRQbGFjZUhvbGRlcjEkR3JpZFJhZFJlc3VsdGFkbyRjdGwwMCRjdGwwNCRnYmNjb2x1bW4yBSVjdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJE5vdGlmQXZpc29zBS5jdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJE5vdGlmQXZpc29zJFhtbFBhbmVsBS9jdGwwMCRDb250ZW50UGxhY2VIb2xkZXIxJE5vdGlmQXZpc29zJFRpdGxlTWVudQUoY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRidG5DZXJyYXJQYW5lbAUfY3RsMDAkQ29udGVudFBsYWNlSG9sZGVyMSRjdGwwMA88KwAMAQhmZHAKRKrT54JyF09yAgRL16DIn42vcyspzOtg86mdF/6Z", "__VIEWSTATEGENERATOR": "5B6503FA", "__EVENTVALIDATION": "/wEdABPIFpMnlAgkSZvMhE+vOQYa0gsvRcXibJrviW3Dmsx0G+jYKkdCU41GOhiZPOlFyBecIegvepvm5r48BtByTWSkIC/PSPgmtogq3vXUp+YNvsMPaGT0F8ZMY05tsTP7KXY5p77wXhhk2nxxmhBw8yYO6yoq09PpCPpnHhKGI5XXqN0NAXFS9Kcv7U1TgXuCACxTET4yjIt6nVt9qCHIyzbla16U6SvCvrhBDl88f4l+A2AwM+Efhx0eY7z5UUNUDwDoCL/OENuuNNFPCRAmSpT1/nxKmb/ucFs0tCWRV4G4iLScixGy8IhVeNkOJJPR8q4msGM8DGO6o6g/gMszmMRrbD50rXo0f8u6b2IB+RzVpsHxVceaRLBN56ddyVdqKV1RL0jZlTtb1Prpo6YdA7cH301O2Ez19CJOtDoyAWUZ982dVJTM6fLOsQokHcEDIxQ=", "ctl00_ContentPlaceHolder1_FechaConsulta_ClientState": "{\"minDateStr\":\"" + datestr + "+0:0:0\",\"maxDateStr\":\"" + datestr + "+0:0:0\"}", "ctl00$ContentPlaceHolder1$GridRadResultado$ctl00$ctl04$gbccolumn.x": "10", "ctl00$ContentPlaceHolder1$GridRadResultado$ctl00$ctl04$gbccolumn.y": "9", } # urlencode the data in the weird form which is expected by the API # plus signs MUST be contained in the date strings but MAY NOT be contained in the VIEWSTATE... data = urllib.parse.urlencode(parameters, quote_via=urllib.parse.quote).replace("%2B0", "+0") response = session.post(MX_PRODUCTION_URL, data=data, headers={"Content-Type": "application/x-www-form-urlencoded"}) response.raise_for_status() # API returns normally status 200 but content type text/html when data is missing if "Content-Type" not in response.headers or response.headers["Content-Type"] != 'application/octet-stream': raise Exception("Error while fetching csv for date {}: No CSV was returned by the API. Probably the data for this date has not yet been published.".format(datestr)) # skip non-csv data, the header starts with "Sistema" csv_str = response.text csv_str = csv_str[csv_str.find("\"Sistema\""):] return pd.read_csv(StringIO(csv_str), parse_dates={'instante' : [1, 2]}, date_parser=parse_date)
6e65a9941390beb826eace939d03af4427133029
20,026
def calculate_mean_probas(time_ser, model): """Calculate the metric to evaluate based on average probabilities Args: time_ser (np.ndarray): dynophore time series model (HMM): Fitted HMM Returns: np.float: Probability of prediting the given time series based on the fitted model Model """ probas = model.predict_proba(time_ser) states = model.predict(time_ser) prob_ser = np.zeros(probas.shape) for i in range(len(states)): prob_ser[i, states[i]] = probas[i, states[i]] return np.mean(np.mean(prob_ser, axis=0))
b8320a24c01e56c89b5d706630190a118d803ffa
20,027
def compute_t(i, automata_list, target_events): """ Compute alphabet needed for processing L{automata_list}[i-1] in the sequential abstraction procedure. @param i: Number of the automaton in the L{automata_list} @type i: C{int} in range(1, len(automata_list)+1) @param automata_list: List of automata @type automata_list: C{list} of L{Automaton} @param target_events: List of events to preserve after abstraction @type target_events: C{set} of L{Event} @return: New alphabet for the next step in sequential abstraction @rtype: C{set} of L{Event} """ processed = set() for j in range(0, i): processed = processed.union(automata_list[j].alphabet) unprocessed = target_events.copy() for j in range(i, len(automata_list)): unprocessed = unprocessed.union(automata_list[j].alphabet) result = processed.intersection(unprocessed) processed.clear() unprocessed.clear() return result
88fc64aaf917d23a29e9400cf29705e6b20665c3
20,028
def cal_min_sim(y): """Calculate the minimal value given multiple trajectories from different isomers""" y = y.copy() if len(y.shape) == 2: # add one more dimension if only two provided y = y[np.newaxis, :] n_sim, nT, nP = y.shape y_min_sim = np.min(y, axis = 0) return y_min_sim
efbee1f3d8a88ac447609019a431d3ac6469f2cf
20,029
async def create_rsa_key( hub, ctx, name, vault_url, key_ops=None, enabled=None, expires_on=None, not_before=None, tags=None, **kwargs, ): """ .. versionadded:: 2.0.0 Create a new RSA key or, if name is already in use, create a new version of the key. Requires the keys/create permission. Key properties can be specified as keyword arguments. :param name: The name of the new key. Key names can only contain alphanumeric characters and dashes. :param vault_url: The URL of the vault that the client will access. :param key_ops: A list of permitted key operations. Possible values include: 'decrypt', 'encrypt', 'sign', 'unwrap_key', 'verify', 'wrap_key'. :param enabled: Whether the key is enabled for use. :param expires_on: When the key will expire, in UTC. This parameter must be a string representation of a Datetime object in ISO-8601 format. :param not_before: The time before which the key can not be used, in UTC. This parameter must be a string representation of a Datetime object in ISO-8601 format. :param tags: Application specific metadata in the form of key-value pairs. CLI Example: .. code-block:: bash azurerm.keyvault.key.create_rsa_key test_name test_vault """ result = {} kconn = await hub.exec.azurerm.keyvault.key.get_key_client(ctx, vault_url, **kwargs) try: key = kconn.create_rsa_key( name=name, key_operations=key_ops, enabled=enabled, expires_on=expires_on, not_before=not_before, tags=tags, ) result = _key_as_dict(key) except (KeyVaultErrorException, ValidationError, HttpResponseError) as exc: result = {"error": str(exc)} return result
f22a520bc82bed0447440a80639a1f6ef575e718
20,030
import re def _parse_size_string(size): """ Parse a capacity string. Takes a string representing a capacity and returns the size in bytes, as an integer. Accepts strings such as "5", "5B", "5g", "5GB", " 5 GiB ", etc. Case insensitive. See `man virsh` for more details. :param size: The size string to parse. :returns: The number of bytes represented by `size`, as an integer. """ # Base values for units. BIN = 1024 DEC = 1000 POWERS = {"": 0, "k": 1, "m": 2, "g": 3, "t": 4} # If an integer is passed, treat it as a string without units. size = str(size).lower() regex = r"\s*(\d+)\s*([%s])?(i?b)?\s*$" % "".join(POWERS.keys()) match = re.compile(regex).match(size) if not match: msg = "The size string '%s' is not of a valid format." % size raise AnsibleFilterError(to_text(msg)) number = match.group(1) power = match.group(2) unit = match.group(3) if not power: power = "" if unit == "b": base = DEC else: base = BIN return int(number) * (base ** POWERS[power])
6ad10ba10380eaa7a8acd6bbeb52b537fdcf3864
20,031
import os def connect(cfg=None, jar=None): """ Connect to MF using a token with authority to access the data collection :return: A new :py:class:`Session` Example:: >>> from MFQuery.MF import MF >>> cfg = "$HOME/aterm.cfg" >>> jar = "$HOME/aterm.jar" >>> wath = MF.connect(cfg,jar) # doctest: +SKIP >>> outputs = wath.query() # doctest: +SKIP """ # if user didn't pass configuration and jar file assume their in $HOME if cfg is None: cfg = os.environ.get('ATERMCFG', "$HOME/aterm.cfg") if jar is None: jar = os.environ.get('ATERMJAR', "$HOME/aterm.jar") session = Session(cfg, jar) return session
f05e959ca7cf9c83d1d8e81eebadf9e8c6244774
20,032
def get_exp_lr(base_lr, xs, power=4e-10): """Get learning rates for each step.""" ys = [] for x in xs: ys.append(base_lr / np.exp(power*x**2)) return ys
58486de08742d2467a4178d1ac0544c0d1f2055c
20,033
import time def dashboard(): """ Main dashboard function. Run stats across all accounts. """ start = time.time() instance_count = 0 user_count = 0 sg_count = 0 elb_count = 0 aws_accounts = AwsAccounts() accounts = aws_accounts.all() pool = Pool(10) results = pool.map(get_account_stats, accounts) pool.close() pool.join() for acc_result in results: instance_count += acc_result['InstanceCount'] user_count += acc_result['UserCount'] sg_count += acc_result['SecurityGroupCount'] elb_count += acc_result['ELBCount'] end = time.time() result = dict( Time=(end - start), Summary=dict( AccountsCount=len(accounts), InstanceCount=instance_count, UserCount=user_count, SecurityGroupCount=sg_count, ELBCount=elb_count)) return result
e5138d8527ecb5712db6205757432d31efde8f2b
20,034
def strip_new_line(str_json): """ Strip \n new line :param str_json: string :return: string """ str_json = str_json.replace('\n', '') # kill new line breaks caused by triple quoted raw strings return str_json
f2faaa80dca000586a32a37cdf3dff793c0a2d9b
20,035
def fromAtoB(x1, y1, x2, y2, color='k', connectionstyle="arc3,rad=-0.4", shrinkA=10, shrinkB=10, arrowstyle="fancy", ax=None): """ Draws an arrow from point A=(x1,y1) to point B=(x2,y2) on the (optional) axis ``ax``. .. note:: See matplotlib documentation. """ if ax is None: return pl.annotate("", xy=(x2, y2), xycoords='data', xytext=(x1, y1), textcoords='data', arrowprops=dict( arrowstyle=arrowstyle, # linestyle="dashed", color=color, shrinkA=shrinkA, shrinkB=shrinkB, patchA=None, patchB=None, connectionstyle=connectionstyle), ) else: return ax.annotate("", xy=(x2, y2), xycoords='data', xytext=(x1, y1), textcoords='data', arrowprops=dict( arrowstyle=arrowstyle, # linestyle="dashed", color=color, shrinkA=shrinkA, shrinkB=shrinkB, patchA=None, patchB=None, connectionstyle=connectionstyle), )
a7b14ae62d26f203da0fb3f26c7aa7652fb9a345
20,036
import torch def exp(input_): """Wrapper of `torch.exp`. Parameters ---------- input_ : DTensor Input dense tensor. """ return torch.exp(input_._data)
01449c87486a7145b26d313de7254cb784d94a7b
20,037
import torch def capsule_sdf(mesh_verts, mesh_normals, query_points, query_normals, caps_rad, caps_top, caps_bot, foreach_on_mesh): """ Find the SDF of query points to mesh verts Capsule SDF formulation from https://iquilezles.org/www/articles/distfunctions/distfunctions.htm :param mesh_verts: (batch, V, 3) :param mesh_normals: (batch, V, 3) :param query_points: (batch, Q, 3) :param caps_rad: scalar, radius of capsules :param caps_top: scalar, distance from mesh to top of capsule :param caps_bot: scalar, distance from mesh to bottom of capsule :param foreach_on_mesh: boolean, foreach point on mesh find closest query (V), or foreach query find closest mesh (Q) :return: normalized sdf + 1 (batch, V or Q) """ # TODO implement normal check? if foreach_on_mesh: # Foreach mesh vert, find closest query point knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(mesh_verts, query_points, K=1, return_nn=True) # TODO should attract capsule middle? capsule_tops = mesh_verts + mesh_normals * caps_top capsule_bots = mesh_verts + mesh_normals * caps_bot delta_top = nearest_pos[:, :, 0, :] - capsule_tops normal_dot = torch.sum(mesh_normals * batched_index_select(query_normals, 1, nearest_idx.squeeze(2)), dim=2) else: # Foreach query vert, find closest mesh point knn_dists, nearest_idx, nearest_pos = pytorch3d.ops.knn_points(query_points, mesh_verts, K=1, return_nn=True) # TODO should attract capsule middle? closest_mesh_verts = batched_index_select(mesh_verts, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3) closest_mesh_normals = batched_index_select(mesh_normals, 1, nearest_idx.squeeze(2)) # Shape (batch, V, 3) capsule_tops = closest_mesh_verts + closest_mesh_normals * caps_top # Coordinates of the top focii of the capsules (batch, V, 3) capsule_bots = closest_mesh_verts + closest_mesh_normals * caps_bot delta_top = query_points - capsule_tops normal_dot = torch.sum(query_normals * closest_mesh_normals, dim=2) bot_to_top = capsule_bots - capsule_tops # Vector from capsule bottom to top along_axis = torch.sum(delta_top * bot_to_top, dim=2) # Dot product top_to_bot_square = torch.sum(bot_to_top * bot_to_top, dim=2) h = torch.clamp(along_axis / top_to_bot_square, 0, 1) # Could avoid NaNs with offset in division here dist_to_axis = torch.norm(delta_top - bot_to_top * h.unsqueeze(2), dim=2) # Distance to capsule centerline return dist_to_axis / caps_rad, normal_dot
f34a734496738464ba601caab8985efc59809e54
20,038
from datetime import datetime def get_lat_lon(fp, fs=FS): """ get lat lon values for concat dataset """ logger.info(f"{str(datetime.datetime.now())} : Retrieving lat lon") with xr.open_dataset(fs.open(fp)) as ds: lat, lon = ds["latitude"].values, ds["longitude"].values logger.info(f"{str(datetime.datetime.now())} : Retrieved lat lon") return lat, lon
a99614463121edb99c290ddea8d6bb7b298498f1
20,039
def one_hot_encode(vec, vals=10): """ For use to one-hot encode the 10- possible labels """ n = len(vec) out = np.zeros((n, vals)) out[range(n), vec] = 1 return out
079c4c505464659248631b3e5c3d1345557d922b
20,040
def COUNTA(*args) -> Function: """ Returns a count of the number of values in a dataset. Learn more: https//support.google.com/docs/answer/3093991 """ return Function("COUNTA", args)
c8e876e80a0414eab915b6eb0efc9917b12edb19
20,041
import json def decode_url_json_string(json_string): """ Load a string representing serialised json into :param json_string: :return: """ strings = json.loads(h.unescape(json_string), object_pairs_hook=parse_json_pairs) return strings
6f616e5e6037024ebdab6e63aa90c13c60fca40c
20,042
import argparse def init_argparser(): """ Define and parse commandline arguments. """ # training settings parser = argparse.ArgumentParser(description="PyTorch MNIST Example") parser.add_argument("--experiment", type=str, help="Choose the experiment.") parser.add_argument( "--batch-size", type=int, default=64, metavar="N", help="input batch size for training (default: 64)", ) parser.add_argument( "--test-batch-size", type=int, metavar="N", help="input batch size for testing (default: same as --batch-size)", ) parser.add_argument( "--log-level", default="info", choices=["verbose", "info", "warning", "error", "debug"], help="Log level", ) parser.add_argument( "--result-dir", default="results", help="path to the result directory", metavar="DIR", ) parser.add_argument( "--reuse-base-dir", help="path to the an already existing base directory (e.g. to continue certain experiments)", metavar="DIR", ) parser.add_argument( "--epochs", type=int, default=10, metavar="N", help="number of epochs to train (default: 10)", ) parser.add_argument( "--lr", type=float, default=0.01, metavar="N", help="learning rate (default: 0.01)", ) parser.add_argument( "--cuda", action="store_true", default=False, help="Enable CUDA training" ) parser.add_argument( "--cuda-device-id", nargs="+", type=int, default=[0], help="Cuda device ids. E.g. [0,1,2]. Use -1 for all GPUs available and -2 for cpu only.", ) parser.add_argument( "--debug", action="store_true", default=False, help="Enable debugging." ) parser.add_argument( "--experiment-name", type=str, help="Set the experiment name", required=True ) parser.add_argument("--net", type=str, help="Define network", required=True) parser.add_argument( "--n-gaussians", type=int, default=3, metavar="N", help="number of possible independence combinations of gaussians", ) parser.add_argument( "--njobs", type=int, default=4, metavar="S", help="Number of threads (default: 4)", ) parser.add_argument( "--seed", type=int, default=1, metavar="S", help="random seed (default: 1)" ) parser.add_argument( "--tag", default="", type=str, help="Tag to identify runs in the result directory and tensorboard overviews", ) parser.add_argument( "--resnet-arch", default="resnet18", type=str, choices=["resnet18", "resnet34", "resnet50", "resnet101", "resnet152"], help="Resnet architecture", ) parser.add_argument( "--log-interval", type=int, default=10, metavar="N", help="how many batches to wait before logging training status", ) parser.add_argument( "--dataset", type=str, choices=[ "iris-2d", "wine-2d", "diabetes", "audit", "banknotes", "ionosphere", "sonar", "wheat-2d", "synth-8-easy", "synth-64-easy", "synth-8-hard", "synth-64-hard", ], ) parser.add_argument( "--force-overfit", action="store_true", default=False, help="Force overfitting (set num train samples to 1000)", ) parser.add_argument( "--save-model", action="store_true", default=False, help="For Saving the current Model", ) parser.add_argument( "--l2", type=float, default=0.0, help="L2 weight decay parameter. (default: 0.0)", ) return parser # args = parser.parse_args() # ensure_dir(args.result_dir) # if args.debug: # args.epochs = 2 # if args.n_digits > args.n_labels: # raise Exception("Option --n-digits has to be <= --n-labels.") # return args
1d435b4d2b714e94c5d4c1fc575ee93db98d93e4
20,043
def svn_wc_merge2(*args): """ svn_wc_merge2(enum svn_wc_merge_outcome_t merge_outcome, char left, char right, char merge_target, svn_wc_adm_access_t adm_access, char left_label, char right_label, char target_label, svn_boolean_t dry_run, char diff3_cmd, apr_array_header_t merge_options, apr_pool_t pool) -> svn_error_t """ return apply(_wc.svn_wc_merge2, args)
271e596810b7ee604532f34612e349ae30b108c5
20,044
import torch def test_finetuning_callback_warning(tmpdir): """Test finetuning callbacks works as expected.""" seed_everything(42) class FinetuningBoringModel(BoringModel): def __init__(self): super().__init__() self.backbone = nn.Linear(32, 2, bias=False) self.layer = None self.backbone.has_been_used = False def training_step(self, batch, batch_idx): output = self(batch) loss = self.loss(batch, output) return {"loss": loss} def forward(self, x): self.backbone.has_been_used = True x = self.backbone(x) return x def train_dataloader(self): return DataLoader(RandomDataset(32, 64), batch_size=2) def configure_optimizers(self): optimizer = torch.optim.SGD(self.parameters(), lr=0.1) return optimizer chk = ModelCheckpoint(dirpath=tmpdir, save_last=True) model = FinetuningBoringModel() model.validation_step = None callback = TestBackboneFinetuningWarningCallback(unfreeze_backbone_at_epoch=3, verbose=False) with pytest.warns(UserWarning, match="Did you init your optimizer in"): trainer = Trainer(limit_train_batches=1, default_root_dir=tmpdir, callbacks=[callback, chk], max_epochs=2) trainer.fit(model) assert model.backbone.has_been_used trainer = Trainer(max_epochs=3) trainer.fit(model, ckpt_path=chk.last_model_path)
c7bec2c256ece471b0f3d9f56a74dcb2c7ad186a
20,045
from contextlib import suppress def idempotent(function): """Shallows 304 errors, making actions repeatable.""" @wraps(function) def decorator(*args, **kwargs): with suppress(GitlabCreateError): return function(*args, **kwargs) return decorator
4012ce715a8344a7a9eb7e27a7d96f0e3b9c8f6d
20,046
def newline_formatter(func): """ Wrap a formatter function so a newline is appended if needed to the output """ def __wrapped_func(*args, **kwargs): """ Wrapper function that appends a newline to result of original function """ result = func(*args, **kwargs) # The result may be a string, or bytes. In python 2 they are the same, but in python 3, they are not. # First, check for strings as that works the same in python 2 and 3, THEN check for bytes, as that # implementation is python 3 specific. If it's neither (future proofing), we use a regular new line line_ending = "\n" if isinstance(result, str): line_ending = "\n" elif isinstance(result, bytes): # We are redefining the variable type on purpose since python broke backwards compatibility between 2 & 3. line_ending = b"\n" # Avoid double line endings if not result.endswith(line_ending): result += line_ending return result # Return the wrapper return __wrapped_func
71af6af25aa93e0e8f80958b5caf5266f598c878
20,047
from typing import List from typing import Tuple def sigma_splitter(float_arr: List[float]) -> Tuple[List[List[int]], List[List[int]], List[List[int]]]: """ separates the NCOF score into the 1-3 sigma outliers for the NCOF input @param float_arr: List[float] @return: inliers , pos_outliers , neg_outliers: List[List[int]], List[List[int]], List[List[int]] """ "calculates the mean and std of the input score" mean = np.mean(float_arr) std = np.std(float_arr) "calculate which indexes that are input inliers" inliers = np.where(np.logical_and(float_arr >= mean - std, float_arr <= mean + std)) inliers = inliers[0].tolist() "Calculates the 1-sigma postive outliers" one_pos_sigma = np.where(np.logical_and(mean + std <= float_arr, float_arr < mean + 2 * std)) "Calculates the 2-sigma postive outliers" two_pos_sigma = np.where(np.logical_and(mean + 2 * std <= float_arr, float_arr < mean + 3 * std)) "Calculates the 3-sigma postive outliers" three_pos_sigma = np.where(mean + 3 * std <= float_arr) "Calculates the 1-sigma negative outliers" one_neg_sigma = np.where(np.logical_and(mean - 2 * std < float_arr, float_arr <= mean - std)) "Calculates the 2-sigma negative outliers" two_neg_sigma = np.where(np.logical_and(mean - 3 * std < float_arr, float_arr <= mean - 2 * std)) "Calculates the 3-sigma negative outliers" three_neg_sigma = np.where(float_arr <= mean - 3 * std) "stores the positive outliers in a list of lists" pos_outliers = [one_pos_sigma[0], two_pos_sigma[0], three_pos_sigma[0]] pos_outliers = [l.tolist() for l in pos_outliers] "stores the negative outliers in a list of lists" neg_outliers = [one_neg_sigma[0], two_neg_sigma[0], three_neg_sigma[0]] neg_outliers = [l.tolist() for l in neg_outliers] "OUTPUT: list of indexes" "inliers: list of all inliers" "pos_outliers: list of 3 lists that corresponds to the 1,2,3 positive sigma outlers" "neg_outliers: list of 3 lists that corresponds to the 1,2,3 negative sigma outlers" return inliers, pos_outliers, neg_outliers
824c3d11ffa1fb81763cdf815de3e37a7b8aa335
20,048
import torch def cosine_beta_schedule(timesteps, s = 0.008, thres = 0.999): """ cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ """ steps = timesteps + 1 x = torch.linspace(0, timesteps, steps, dtype = torch.float64) alphas_cumprod = torch.cos(((x / timesteps) + s) / (1 + s) * torch.pi * 0.5) ** 2 alphas_cumprod = alphas_cumprod / alphas_cumprod[0] betas = 1 - (alphas_cumprod[1:] / alphas_cumprod[:-1]) return torch.clip(betas, 0, thres)
a1969deafdb282955a53b15978a055d15f0678a0
20,049
from typing import Callable from re import T from typing import Optional from typing import Dict from typing import Any import yaml def construct_from_yaml( constructor: Callable[..., T], yaml_dict: Optional[Dict[str, Any]] = None, ) -> T: """Build ``constructor`` from ``yaml_dict`` Args: constructor (Callable): The constructor to test (such as an Hparams class) yaml_dict (Dict[str, Any], optional): The YAML. Defaults to ``None``, which is equivalent to an empty dictionary. """ yaml_dict = {} if yaml_dict is None else yaml_dict # ensure that yaml_dict is actually a dictionary of only json-serializable objects yaml_dict = yaml.safe_load(yaml.safe_dump(yaml_dict)) instance = hp.create(constructor, yaml_dict, cli_args=False) return instance
5cb92f8af0b0ab49069e88b74b7b10fdf2cc797d
20,050
import tokenize def text_to_document(text, language="en"): """ Returns string text as list of Sentences """ splitter = _sentence_splitters[language] utext = unicode(text, 'utf-8') if isinstance(text, str) else text sentences = splitter.tokenize(utext) return [tokenize(text, language) for text in sentences]
2f196c51a979a2f9a849ebd6f89203c907406789
20,051
def get_top_playlists_route(type): """ An endpoint to retrieve the "top" of a certain demographic of playlists or albums. This endpoint is useful in generating views like: - Top playlists - Top Albums - Top playlists of a certain mood - Top playlists of a certain mood from people you follow Args: type: (string) The `type` (same as repost/save type) to query from. limit?: (number) default=16, max=100 mood?: (string) default=None filter?: (string) Optional filter to include (supports 'followees') default=None """ args = to_dict(request.args) if 'limit' in request.args: args['limit'] = min(request.args.get('limit', type=int), 100) else: args['limit'] = 16 if 'mood' in request.args: args['mood'] = request.args.get('mood') else: args['mood'] = None if "with_users" in request.args: args["with_users"] = parse_bool_param(request.args.get("with_users")) try: playlists = get_top_playlists(type, args) return api_helpers.success_response(playlists) except exceptions.ArgumentError as e: return api_helpers.error_response(str(e), 400)
2012e95073291669a6bb881afa853961922160c5
20,052
import urllib def parse_host(incomplete_uri: str) -> str: """Get netloc/host from incomplete uri.""" # without // it is interpreted as relative return urllib.parse.urlparse(f"//{incomplete_uri}").netloc
099284e970756055f2616d484014db210cb04a76
20,053
import functools import operator def inner_by_delta(vec1: Vec, vec2: Vec): """Compute the inner product of two vectors by delta. The two vectors are assumed to be from the same base and have the same number of indices, or ValueError will be raised. """ indices1 = vec1.indices indices2 = vec2.indices if vec1.label != vec2.label or len(indices1) != len(indices2): raise ValueError( 'Invalid vectors to computer inner product by delta', (vec1, vec2) ) return functools.reduce(operator.mul, ( KroneckerDelta(i, j) for i, j in zip(indices1, indices2) ), Integer(1))
660f7d73e6d73cd4dfdf73d532e90e5a29f38481
20,054
def remove_mapping(rxn_smi: str, keep_reagents: bool = False) -> str: """ Removes all atom mapping from the reaction SMILES string Parameters ---------- rxn_smi : str The reaction SMILES string whose atom mapping is to be removed keep_reagents : bool (Default = False) whether to keep the reagents in the output reaction SMILES string Returns ------- str The reaction SMILES string with all atom mapping removed Also see: clean_rxn_smis_50k_one_phase, clean_rxn_smis_FULL_one_phase """ rxn = rdChemReactions.ReactionFromSmarts(rxn_smi, useSmiles=True) if not keep_reagents: rxn.RemoveAgentTemplates() prods = [mol for mol in rxn.GetProducts()] for prod in prods: for atom in prod.GetAtoms(): if atom.HasProp("molAtomMapNumber"): atom.ClearProp("molAtomMapNumber") rcts = [mol for mol in rxn.GetReactants()] for rct in rcts: for atom in rct.GetAtoms(): if atom.HasProp("molAtomMapNumber"): atom.ClearProp("molAtomMapNumber") return rdChemReactions.ReactionToSmiles(rxn)
fb16648fee136359bc8ef96684824319221a3359
20,055
def generate_bot_master_get_results_message(message_id, receiving_host, receiving_port): """ :rtype : fortrace.net.proto.genericmessage_pb2.GenericMessage :type receiving_port: int :type receiving_host: str :type message_id: long :param message_id: the id of this message :param receiving_host: the host that receives the order :param receiving_port: the host's port :return: the message to be generated """ m = genericmessage_pb2.GenericMessage() m.message_type = messagetypes_pb2.BOTMASTERGETRESULT m.message_id = message_id m.Extensions[botmastermessage_pb2.bm_result].receiving_host = receiving_host m.Extensions[botmastermessage_pb2.bm_result].receiving_port = receiving_port assert m.IsInitialized() return m
4c46a9c1bf69022092b7df4b48e302a87d2d7b90
20,056
import fileinput from datetime import datetime def readLogData(username,level,root='.'): """ Extracts key events from a log """ filename = getFilename(username,level,extension='log',root=root) log = [] start = None for line in fileinput.input(filename): elements = line.split() if elements[2] == MESSAGE_TAG: now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') log.insert(0,{'type': 'message','content': ' '.join(elements[3:]), 'time': now-start}) elif elements[2] == LOCATION_TAG: now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') index = symbol2index(elements[3],level) waypoint = WAYPOINTS[level][index] log.insert(0,{'type': 'location','destination': waypoint['name'], 'buildingNo': index+1,'buildingTotal': len(WAYPOINTS[level]), 'time': now-start}) elif elements[2] == CREATE_TAG: start = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') log.insert(0,{'type': 'create', 'time': 'Start','start': start}) elif elements[2] == COMPLETE_TAG: now = datetime.datetime.strptime('%s %s' % (elements[0][1:],elements[1][:-1]),'%Y-%m-%d %H:%M:%S') log.insert(0,{'type': 'complete','success': elements[3] == 'success', 'time': now-start}) elif elements[2] == USER_TAG: log[0]['choice'] = elements[3] log[0]['location'] = WAYPOINTS[level][symbol2index(elements[4],level)]['name'] log[0]['danger'] = elements[5] log[0]['dead'] = elements[6] log[0]['image'] = elements[7] log[0]['content'] = ' '.join(elements[8:])[1:-1] if ') (' in log[0]['content']: log[0]['content'],log[0]['ack'] = log[0]['content'].split(') (') else: log[0]['ack'] = '' fileinput.close() return log
f94c3e715d021b206ef46766fdc0e6051784615e
20,057
def get_type1(pkmn): """get_type1(pkmn) returns Type 1 of the Pokémon with the name 'pkmn' """ return __pokemon__[pkmn]['Type 1']
c4290f695160f2f1962f1dca158359e250a4803a
20,058
def load_json(fname): """ Load a JSON file containing a riptide object (or list/dict/composition thereof) """ with open(fname, 'r') as f: return from_json(f.read())
93f771ae0ba31974b564e1520412fab5719b08be
20,059
def get_stock_data(symbol, start_date, end_date, source="phisix", format="c"): """Returns pricing data for a specified stock and source. Parameters ---------- symbol : str Symbol of the stock in the PSE or Yahoo. You can refer to these links: PHISIX: https://www.pesobility.com/stock YAHOO: https://www.nasdaq.com/market-activity/stocks/screener?exchange=nasdaq start_date : str Starting date (YYYY-MM-DD) of the period that you want to get data on end_date : str Ending date (YYYY-MM-DD) of the period you want to get data on source : str First source to query from ("pse", "yahoo"). If the stock is not found in the first source, the query is run on the other source. format : str Format of the output data Returns ------- pandas.DataFrame Stock data (in the specified `format`) for the specified company and date range """ df_columns = [DATA_FORMAT_COLS[c] for c in format] if source == "phisix": # The query is run on 'phisix', but if the symbol isn't found, the same query is run on 'yahoo'. df = get_pse_data(symbol, start_date, end_date, format=format) if df is None: df = get_yahoo_data(symbol, start_date, end_date) elif source == "yahoo": # The query is run on 'yahoo', but if the symbol isn't found, the same query is run on 'phisix'. df = get_yahoo_data(symbol, start_date, end_date) if df is None: df = get_pse_data(symbol, start_date, end_date) else: raise Exception("Source must be either 'phisix' or 'yahoo'") missing_columns = [col for col in df_columns if col not in df.columns] # Fill missing columns with np.nan for missing_column in missing_columns: df[missing_column] = np.nan if len(missing_columns) > 0: print("Missing columns filled w/ NaN:", missing_columns) return df[df_columns]
94171c950198f0975c4232f232ec9be93bd3f2a3
20,060
def extract_borderless(result) -> list: """ extracts borderless masks from result Args: result: Returns: a list of the borderless tables. Each array describes a borderless table bounding box. the two coordinates in the array are the top right and bottom left coordinates of the bounding box. """ result_borderless = [] for r in result[0][2]: if r[4] > .85: # slices the threshold value of result_borderless.append(r[:4].astype(int)) return result_borderless
e81844a5deb553bf8d7380ebec8a76fec219ee72
20,061
import itertools def get_frequent_length_k_itemsets(transactions, min_support=0.2, k=1, frequent_sub_itemsets=None): """Returns all the length-k itemsets, from the transactions, that satisfy min_support. Parameters ---------- transactions : list of list min_support : float, optional From 0.0 to 1.0. Percentage of transactions that should contain an itemset for it to be considered frequent. k : int, optional Length that the frequent itemsets should be frequent_sub_itemsets : frozenset of frozenset, optional Facilitates candidate pruning by the Apriori property. Length-k itemset candidates that aren't supersets of at least 1 frequent sub-itemset are pruned. Returns ------- list of frozenset list of float """ if min_support <= 0 or min_support > 1: raise ValueError('min_support must be greater than 0 and less than or equal to 1.0') if k <= 0: raise ValueError('k must be greater than 0') all_items = set() if frequent_sub_itemsets: for sub_itemset in frequent_sub_itemsets: all_items = all_items.union(sub_itemset) else: for transaction in transactions: all_items = all_items.union(transaction) all_length_k_itemsets = itertools.product(all_items, repeat=k) all_length_k_itemsets = frozenset(frozenset(itemset) for itemset in all_length_k_itemsets) all_length_k_itemsets = frozenset(filter(lambda itemset: len(itemset) == k, all_length_k_itemsets)) # Remove itemsets that don't have a frequent sub-itemset to take advantage # of the Apriori property pruned_length_k_itemsets = all_length_k_itemsets if frequent_sub_itemsets: pruned_length_k_itemsets = set() for itemset in all_length_k_itemsets: has_frequent_sub_itemset = False for sub_itemset in frequent_sub_itemsets: if sub_itemset.issubset(itemset): has_frequent_sub_itemset = True if has_frequent_sub_itemset: pruned_length_k_itemsets.add(itemset) frequent_itemsets = [] frequent_supports = [] supports = support(transactions, pruned_length_k_itemsets) for itemset, itemset_support in supports.items(): if itemset_support >= min_support: frequent_itemsets.append(itemset) frequent_supports.append(itemset_support) return frequent_itemsets, frequent_supports
a293b48c62ebbafda7fa89abb6792f04c4ff1371
20,062
import types def create_news_markup(): """ Метод, создающий клавиатуру для новостей кино :return: telebot.types.ReplyKeyboardMarkup """ news_markup = types.ReplyKeyboardMarkup() news_markup.row(Commands.GET_BACK_COMMAND) return news_markup
654ec227d07fe914c795931f48ce634e0a4a6fc3
20,063
from datetime import datetime def get_bibtex_query_set(params): """Returns bibtex objects which match the search parameters. Args: params: dict which is maded by `parse_GET_params` Returns: QuerySet request_dict """ bibtex_queryset = Bibtex.objects.all() # Book_style book_style = params.get("book_style") if (book_style is not None) and (book_style != "ALL"): # TODO: Make it more better (remove if sentence) if (book_style == "AWARD") or (book_style == "KEYNOTE"): bibtex_queryset = bibtex_queryset.filter(bib_type=book_style) else: bibtex_queryset = bibtex_queryset.filter( book__style=book_style, bib_type="SAMEASBOOK", ) # Filter by published year period_method = params.get("period_method", "ACADEMIC_YEAR") year = params.get("period_year", datetime.datetime.now().year) if period_method == "YEAR": bibtex_queryset = bibtex_queryset.filter( pub_date__gte=datetime.date(int(year), 1, 1), pub_date__lte=datetime.date(int(year), 12, 31), ) elif period_method == "ACADEMIC_YEAR": bibtex_queryset = bibtex_queryset.filter( pub_date__gte=datetime.date(int(year), 4, 1), pub_date__lte=datetime.date(int(year) + 1, 3, 31), ) else: pass # Keywords keywords = params.get("keywords") if keywords is not None: keywords_list = keywords.split(" ") for keyword in keywords_list: bibtex_queryset = bibtex_queryset.filter( Q(title__icontains=keyword) | Q(book__title__icontains=keyword) | Q(book__abbr__icontains=keyword) | Q(authors__name_en__icontains=keyword) | Q(authors__name_ja__icontains=keyword) ).distinct() # Tags tags = params.get("tags") if tags is not None: tags_list = tags.split(" ") for tag in tags_list: bibtex_queryset = bibtex_queryset.filter( Q(tags__name__icontains=tag) ).distinct() # Sort sort = params.get("sort") if sort is None: return bibtex_queryset.order_by("-pub_date", "book", "title") elif sort == "ascending": return bibtex_queryset.order_by("-pub_date", "book", "title") elif sort == "desending": return bibtex_queryset.order_by("pub_date", "book", "title")
0f19b4cccf3a44ca3fd5e907a2cd07a24208badf
20,064
def _generate_description_from(command, name, description): """ Generates description from the command and it's optionally given description. If both `description` and `command.__doc__` is missing, defaults to `name`. Parameters ---------- command : `None` or `callable` The command's function. name : `str` or `None` The command's name, if name defaulting should be applied. description : `Any` The command's description. Returns ------- description : `str` The generated description. Raises ------ ValueError If `description` length is out of range [2:100]. """ while True: if (description is not None) or isinstance(description, str): break if command is not None: description = getattr(command, '__doc__', None) if (description is not None) and isinstance(description, str): break if name is not None: description = name break return description = normalize_description(description) if description is None: description_length = 0 else: description_length = len(description) if ( description_length < APPLICATION_COMMAND_DESCRIPTION_LENGTH_MIN or description_length > APPLICATION_COMMAND_DESCRIPTION_LENGTH_MAX ): raise ValueError( f'`description` length is out of the expected range ' f'[{APPLICATION_COMMAND_DESCRIPTION_LENGTH_MIN}:{APPLICATION_COMMAND_DESCRIPTION_LENGTH_MAX}], got ' f'{description_length!r}; {description!r}.' ) return description
e2f782f7e74635b3c50273b36b837e48d7999f4f
20,065
def uses_na_format(station: str) -> bool: """ Returns True if the station uses the North American format, False if the International format """ if station[0] in NA_REGIONS: return True elif station[0] in IN_REGIONS: return False elif station[:2] in M_NA_REGIONS: return True elif station[:2] in M_IN_REGIONS: return False raise BadStation("Station doesn't start with a recognized character set")
b3158a85ae9b1ba45ebeb3de27491650d7f4c4c8
20,066
def openFile(prompt,key = "r",defaulttype = None, defaultname = None): """ Method to open a text file with sanity checking, optional defaults and reprompt on failure. This is the main used callable function to open files. :param prompt: the prompt to be displayed :type prompt: str :param key: the key passed to open, default is "r" (read) :type key: str :param defaulttype: the default extension which will be added if not supplied, (default to None) :type defailttype: str :param defaultname: the defaault filename, (defaults to None) :type defaultname: str :return: the the opened file descriptor. The file names is processded to expand environmental variable and user names\ so for example $ENV/dir/file.data or ~user/dir/file.data are expanded """ while True: filename = getFilename(prompt,defaulttype,defaultname) # Get the filename try: filestream = open(filename,str(key)) # try and open return filestream except IOError: logger.error("Failed to open file '{0:s}' with key '{1:s}'".\ format(filename,str(key)))
e9985872c0beb15eaa5bafa543eefb01f5fd8413
20,067
def dsphere(n=100, d=2, r=1, noise=None, ambient=None): """ Sample `n` data points on a d-sphere. Parameters ----------- n : int Number of data points in shape. r : float Radius of sphere. ambient : int, default=None Embed the sphere into a space with ambient dimension equal to `ambient`. The sphere is randomly rotated in this high dimensional space. """ data = np.random.randn(n, d+1) # Normalize points to the sphere data = r * data / np.sqrt(np.sum(data**2, 1)[:, None]) if noise: data += noise * np.random.randn(*data.shape) if ambient: assert ambient > d, "Must embed in higher dimensions" data = embed(data, ambient) return data
8957a328c2025fbdb3741b004f2fb3825f19e4d9
20,068
def topological_sort_by_down(start_nodes=None, all_nodes=None): """ Topological sort method by down stream direction. 'start_nodes' and 'all_nodes' only one needs to be given. Args: start_nodes (list[NodeGraphQt.BaseNode]): (Optional) the start update nodes of the graph. all_nodes (list[NodeGraphQt.BaseNode]): (Optional) if 'start_nodes' is None the function can calculate start nodes from 'all_nodes'. Returns: list[NodeGraphQt.BaseNode]: sorted nodes. """ if not start_nodes and not all_nodes: return [] if start_nodes: start_nodes = __remove_BackdropNode(start_nodes) if all_nodes: all_nodes = __remove_BackdropNode(all_nodes) if not start_nodes: start_nodes = [n for n in all_nodes if not _has_input_node(n)] if not start_nodes: return [] if not [n for n in start_nodes if _has_output_node(n)]: return start_nodes graph = _build_down_stream_graph(start_nodes) return _sort_nodes(graph, start_nodes, True)
22a36d4f8225ae2978459796f115059e2bbb8d62
20,069
import os def check_directories(directories): """Checks if all given directories are really directories and on the same device. Parameters: directories (list of strings) - The directories to check. Returns: The tuple (ok, ok_dirs) where ok is a boolean and ok_dirs a list of directories (as strings). If the given directories contained no existing directories or it contained at least two directories that are not on the same device, then ok is False and ok_dirs is empty. Otherwise ok is True and ok_dirs contains all directories in the given directories that really exist. """ ok_dirs = [] for d in directories: if not os.path.exists(d): print("'%s' does not exist. Ignoring." % d) continue if not os.path.isdir(d): print("'%s' is no directory. Ignoring." % d) continue ok_dirs.append(d) if len(ok_dirs) == 0: print("No existing directory given. Exiting.") return False, [] prev_dir = None prev_device = None for d in ok_dirs: current_device = os.stat(d).st_dev if prev_device is not None and current_device != prev_device: print("'%s' and '%s' are not on the same device. Exiting." % \ (d, prev_dir)) return False, [] prev_dir = d prev_device = current_device return True, ok_dirs
779195d7509beb4b13ed237fec654514c6226586
20,070
import os def download_accessions(force_download=False): """Downloads the compound accessions :param bool force_download: If true, overwrites a previously cached file :rtype: str """ if os.path.exists(ACCESSION_DATA_PATH) and not force_download: log.info('using cached data at %s', ACCESSION_DATA_PATH) else: log.info('downloading %s to %s', ACCESSION_URL, ACCESSION_DATA_PATH) urlretrieve(ACCESSION_URL, ACCESSION_DATA_PATH) return ACCESSION_DATA_PATH
6a8871acd1b2d171ffd654718bc7e75f87627df7
20,071
from datetime import datetime def parseYear(year, patterns): """"This function returns a string representing a year based on the input and a list of possible patterns. >>> parseYear('2021', ['%Y']) '2021' >>> parseYear('2021', ['(%Y)', '%Y']) '2021' >>> parseYear('(2021)', ['%Y', '(%Y)']) '2021' """ parsedYear = None for p in patterns: try: tmp = datetime.strptime(year, p).date().year parsedYear = str(tmp) break except ValueError: pass if parsedYear == None: return year else: return parsedYear
743378c868a2439f721e428f676092f9da0a2e7a
20,072
def fit_oxy_nii(target_row, velocity_column = None, data_column = None, IP = "center", **kwargs): """ Fits oxygen bright line to spectrum for future subtraction Parameters ---------- target_row: `SkySurvey` row Row to match spectra to data_column: 'str', optional, must be keyword Name of data column, default of "DATA" velocity_column: 'str', optional, must be keyword Name of velocity column, default of "VELOCITY" **kwargs: dict keywords passed to Model.fit() """ if velocity_column is None: velocity_column = "VELOCITY_GEO" if data_column is None: data_column = "DATA" def bright_atm(x, baseline, amp, mean, std): g = c_component(amp, mean, std, IP = IP) y = np.zeros_like(x) y+= baseline y+= g(x) return y bright_atm_model = Model(bright_atm) params = Parameters() params.add("baseline", value = np.nanmin(target_row[data_column])) params.add("amp", value = np.nanmax(target_row[data_column])) params.add("mean", value = -281.3) params.add("std", value = 3) exclusion_mask = (target_row[velocity_column] < -315) | (target_row[velocity_column] > -215) res = bright_atm_model.fit(target_row[data_column][np.invert(exclusion_mask)], x = target_row[velocity_column][np.invert(exclusion_mask)], params = params, nan_policy = "omit", **kwargs) return res
1251bf102abaec690fc97117c2409e2f5e89f35b
20,073
import urllib import json import logging def get(url, accept=None, headers=None): """ Make a basic HTTP call to CMR using the POST action Parameters: url (string): resource to get body (dictionary): parameters to send, or string if raw text to be sent accept (string): encoding of the returned data, some form of json is expected client_id (string): name of the client making the (not python or curl) headers (dictionary): HTTP headers to apply """ logger.debug(" Headers->CMR= %s", headers) req = urllib.request.Request(url) if accept is not None: apply_headers_to_request(req, {'Accept': accept}) apply_headers_to_request(req, headers) try: #pylint: disable=R1732 # the mock code does not support this in tests resp = urllib.request.urlopen(req) response = resp.read() raw_response = response.decode('utf-8') if resp.status == 200: obj_json = json.loads(raw_response) if isinstance(obj_json, list): data = obj_json obj_json = {"hits": len(data), "items" : data} #print (obj_json) head_list = {} for head in resp.getheaders(): head_list[head[0]] = head[1] if logger.getEffectiveLevel() == logging.DEBUG: stringified = str(common.mask_dictionary(head_list, ["cmr-token", "authorization"])) logger.debug(" CMR->Headers = %s", stringified) #obj_json['http-headers'] = head_list elif resp.status == 204: obj_json = {} head_list = {} for head in resp.getheaders(): head_list[head[0]] = head[1] obj_json['http-headers'] = head_list else: if raw_response.startswith("{") and raw_response.endswith("}"): return json.loads(raw_response) return raw_response return obj_json except urllib.error.HTTPError as exception: raw_response = exception.read() try: obj_json = json.loads(raw_response) obj_json['code'] = exception.code obj_json['reason'] = exception.reason return obj_json except json.decoder.JSONDecodeError as err: return err return raw_response
c956a6d7f434b686263f0b179b2e6cd060cd35b0
20,074
def image_reproject_from_healpix_to_file(source_image_hdu, target_image_hdu_header, filepath=None): """ reproject from healpix image to normal wcs image :param source_image_hdu: the HDU object of source image (healpix) :param target_image_hdu_header: the HDU object of target image (wcs) :param filepath: the output file path :return: array, footprint """ array, footprint = reproject_from_healpix(source_image_hdu, target_image_hdu_header) if filepath is not None: # write file fits.writeto(filepath, array, target_image_hdu_header, clobber=True) # clobber=OVERWRITE else: # return array & footprint return array, footprint
b261663f18ccdf095c0b6e20c02d2ebc0282b713
20,075
def flux_reddening_wl(wl, flux_wl, ebv, Rv=None, law=LawFitz, mode=ReddeningLaw.MW): """ Apply extinction curves to flux(lambda) values :param wl: [A] :param flux_wl: [ergs s^-1 cm^-2 A^-1] :param ebv: E(B-V) :param Rv: R_V :param law: the variant of extinction curves :param mode: type of extinction curves (MW, LMC, SMC) :return: reddening flux """ if Rv is None: Rv = law.Rv[mode] A_lambda = law.Almd(wl, ebv, Rv=Rv) res = flux_wl * 10 ** (-0.4 * A_lambda) return res
668d1824d988989a3411c798614aeb1bc6a63cb6
20,076
import string def genRandomString( size: int = 5, upper: bool = False, lower: bool = False, mix: bool = False, numbers: bool = True) -> str: """ Generates a random string of the given size and content. :param numbers: Numbers are included in the string. Default True. :param upper: Uppercase only. Default False. :param lower: Lowecase only. Default False. :param mix: Mix lowecase and uppercase. Default False. :param size: Size of the desired string. :return: String """ chars = '' if upper: chars = string.ascii_uppercase elif lower: chars = string.ascii_lowercase elif mix: chars = string.ascii_letters if numbers: chars = chars + string.digits return ''.join(choice(chars) for _ in range(size))
a63a2be76675bbb42da2a4cd0ae20db8be723ee3
20,077
def process_whole_image(model, images, num_crops=4, receptive_field=61, padding=None): """Slice images into num_crops * num_crops pieces, and use the model to process each small image. Args: model: model that will process each small image images: numpy array that is too big for model.predict(images) num_crops: number of slices for the x and y axis to create sub-images receptive_field: receptive field used by model, required to pad images padding: type of padding for input images, one of {'reflect', 'zero'} Returns: model_output: numpy array containing model outputs for each sub-image """ if K.image_data_format() == 'channels_first': channel_axis = 1 row_axis = len(images.shape) - 2 col_axis = len(images.shape) - 1 else: channel_axis = len(images.shape) - 1 row_axis = len(images.shape) - 3 col_axis = len(images.shape) - 2 if not padding: padding_layers = get_padding_layers(model) if padding_layers: padding = 'reflect' if 'reflect' in padding_layers[0] else 'zero' if str(padding).lower() not in {'reflect', 'zero'}: raise ValueError('Expected `padding_mode` to be either `zero` or ' '`reflect`. Got ', padding) # Split the frames into quarters, as the full image size is too large crop_x = images.shape[row_axis] // num_crops crop_y = images.shape[col_axis] // num_crops # Set up receptive field window for padding win_x, win_y = (receptive_field - 1) // 2, (receptive_field - 1) // 2 # instantiate matrix for model output model_output_shape = tuple(list(model.layers[-1].output_shape)[1:]) if channel_axis == 1: output = np.zeros((images.shape[0], model_output_shape[1], *images.shape[2:])) else: output = np.zeros((*images.shape[0:-1], model_output_shape[-1])) expected_input_shape = get_cropped_input_shape(images, num_crops, receptive_field) if expected_input_shape != model.input_shape[1:]: raise ValueError('Expected model.input_shape to be {}. Got {}. Use ' '`get_cropped_input_shape()` to recreate your model ' ' with the proper input_shape'.format( expected_input_shape, model.input_shape[1:])) # pad the images only in the x and y axes pad_width = [] for i in range(len(images.shape)): if i == row_axis: pad_width.append((win_x, win_x)) elif i == col_axis: pad_width.append((win_y, win_y)) else: pad_width.append((0, 0)) if str(padding).lower() == 'reflect': padded_images = np.pad(images, pad_width, mode='reflect') else: padded_images = np.pad(images, pad_width, mode='constant', constant_values=0) for i in range(num_crops): for j in range(num_crops): e, f = i * crop_x, (i + 1) * crop_x + 2 * win_x g, h = j * crop_y, (j + 1) * crop_y + 2 * win_y if images.ndim == 5: if channel_axis == 1: predicted = model.predict(padded_images[:, :, :, e:f, g:h]) else: predicted = model.predict(padded_images[:, :, e:f, g:h, :]) else: if channel_axis == 1: predicted = model.predict(padded_images[:, :, e:f, g:h]) else: predicted = model.predict(padded_images[:, e:f, g:h, :]) # if using skip_connections, get the final model output if isinstance(predicted, list): predicted = predicted[-1] # if the model uses padding, trim the output images to proper shape # if model does not use padding, images should already be correct if padding: predicted = trim_padding(predicted, win_x, win_y) a, b = i * crop_x, (i + 1) * crop_x c, d = j * crop_y, (j + 1) * crop_y if images.ndim == 5: if channel_axis == 1: output[:, :, :, a:b, c:d] = predicted else: output[:, :, a:b, c:d, :] = predicted else: if channel_axis == 1: output[:, :, a:b, c:d] = predicted else: output[:, a:b, c:d, :] = predicted return output
3e9ab9485662f9bae40217c60837d8d8cba020d3
20,078
def compute_covariance(model, xy, XY=None): """Returns the covariance matrix for a given set of data""" if xy.size == 1: dist = 0 elif XY is None: dist = squareform(pdist(xy)) else: dist = cdist(xy, XY) C = model(dist) return C
b898ef57155898c75797033e057c6cab4e2487bc
20,079
import sqlite3 def prob1(cur: sqlite3.Cursor) -> pd.DataFrame: """List how many stops are in the database. Parameters ---------- cur (sqlite3.Cursor) : The cursor for the database we're accessing. Returns ------- (pd.DataFrame) : Table with the solution. """ cur.execute("SELECT COUNT(*) FROM stops;") return pd.DataFrame(cur.fetchall())
ed6a3a316e89177a6224fd7513ca5c098940e312
20,080
def q_fn(x): """ The Q-function assesses all possible actions that can be taken, given a state. Two layer feed forward neural network. All layers are fully connected, biases initialized with 0. The constants above define the layer sizes. :param x: Batch input tensor to the network. :return: Action softmax over three values. """ with tf.variable_scope('dense1') as scope: weights = tf.get_variable('weights', [INPUT_SIZE, DENSE1_UNITS], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=1.0 / DENSE1_UNITS)) biases = tf.get_variable('biases', shape=[DENSE1_UNITS], dtype=tf.float32, initializer=tf.constant_initializer(0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(x, weights), biases, name='pre_activation') dense1 = tf.sigmoid(pre_activation, name=scope.name) with tf.variable_scope('dense2') as scope: weights = tf.get_variable('weights', [DENSE1_UNITS, DENSE2_UNITS], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=1.0 / DENSE2_UNITS)) biases = tf.get_variable('biases', shape=[DENSE2_UNITS], dtype=tf.float32, initializer=tf.constant_initializer(0.0, dtype=tf.float32)) pre_activation = tf.add(tf.matmul(dense1, weights), biases, name='pre_activation') dense2 = tf.sigmoid(pre_activation, name=scope.name) with tf.variable_scope('actions') as scope: weights = tf.get_variable('weights', [DENSE2_UNITS, NUM_ACTIONS], dtype=tf.float32, initializer=tf.truncated_normal_initializer(stddev=1.0 / NUM_ACTIONS)) biases = tf.get_variable('biases', shape=[NUM_ACTIONS], dtype=tf.float32, initializer=tf.constant_initializer(0.0, dtype=tf.float32)) action_q = tf.add(tf.matmul(dense2, weights), biases, name='action_q_value') return action_q
4c4fece48773d020c321fd433aa75caa7bc258ee
20,081
def getPlayer(env, name, decoder): """Get user's player data""" players = getPlayers(env, decoder) if name in players.keys(): return players[name] else: return False
fd98e481abab6e7f53bf66d0285f506d06f5e6ea
20,082
import re def get_config_errors(conf, filename="<no name>"): """ Validate a configuration object and return the list of errors found. """ rv = [] # Give a clearer error message than what jsonschema would give # Something like: None is not of type 'object' if not isinstance(conf, dict): msg = "config must be an object containing 'db_objects'" rv.append(located_message(None, filename, msg)) return rv errors = list(validator.iter_errors(conf)) for error in errors: loc = location_from_error(conf, error) rv.append(located_message(loc, filename, error.message)) for obj in conf.get("db_objects", ()): if isinstance(obj, dict): rv.extend(_get_rule_errors(obj, filename)) # sort by line number def lineno(s): m = re.search(r":(\d+)", s) return int(m.group(1)) if m is not None else 0 rv.sort(key=lineno) return rv
4020d5dd33f840dc6c0e3c24be77acf80a606d57
20,083
def process_vcf( info ): """ pass izip object of line object and other needed vars info[0] = list of vcf lines from VCF object iterator. info[1] = clf object info[2] = dataset dictionary info[3] = filter arg supplied by user info[4] = min classification frequency supplied by user (defaults to None) """ #sys.stderr.write("... running process VCF with job id %d \n" %(os.getpid() ) ) #parse the args to function line_list = info[0] #list of lines from VCF obj clf = info[1] #randomForest object dataset = info[2] #dataset with class names filter = info[3] #filter arg supplied by user minclassfreq = info[4] #iterate over lines in the chunked data return_list = [] for line in line_list: line = line.strip().split("\t") vdat = parse_vcf_data( line[7] ) #parse all of vcf appended data filter_bool = run_filters( vdat, filtering=filter ) #boolean of whether line info passes filters if filter_bool: _x = vdat[ 'AT' ].split(",") #create list from data in 'AT' field _x = _x[1:] #results = classify_data( _x, clf, dataset['target_names'] ) results = classify_data( _x, clf, dataset['target_names'], minclassfreq ) line[7] = line[7] + ";" + results #append data to correct vcf column #print "\t".join( line ) #print results to stdout print_line = "\t".join( line ) return_list.append( print_line ) else: return_list.append( None ) #return the full list of updated line data return( return_list )
389146cd88804935ee2aae85a9e5d84684f81b7e
20,084
def createLayerOnFrameDepend(job, layer, onjob, onlayer, onframe): """Creates a layer on frame dependency @type job: string @param job: the name of the dependant job @type layer: string @param layer: the name of the dependant layer @type onjob: string @param onjob: the name of the job to depend on @type onlayer: string @param onlayer: the name of the layer to depend on @type onframe: int @param onframe: the number of the frame to depend on @rtype: Depend @return: the created dependency""" __is_valid(job, ERR_INVALID_ER_JOB) __is_valid(layer, ERR_INVALID_ER_LAYER) __is_valid(onjob, ERR_INVALID_ON_JOB) __is_valid(onlayer, ERR_INVALID_ON_LAYER) __is_valid(onframe, ERR_INVALID_ON_FRAME) logger.debug( "creating lof depend from %s/%s to %s/%s-%04d", job, layer, onjob, onlayer, onframe) depend_er_layer = opencue.api.findLayer(job,layer) depend_on_frame = opencue.api.findFrame(onjob, onlayer, onframe) return depend_er_layer.createDependencyOnFrame(depend_on_frame)
45b49c1406e678213f6d2d93d0cba4dae5d2a824
20,085
def compute_task1_f1_score(truth, solutions): """ compute f1 score for task 1 :param truth: list of ground truth values for all problem-ids :param solutions: list of solutions for all problem-ids :return: f1 score """ task1_truth, task1_solution = extract_task_results(truth, solutions, 'multi-author') return f1_score(task1_truth, task1_solution, average='micro')
2e44603c547062d85023fb405d3ab511d3ca40d3
20,086
def multiply(t1,t2): """ Multiplies (expands) two binary expressions t1 and t2 based on the distributive rule Args: t1 (str): first binary expression t2 (str): second binary expression Returns: A string representing the expansion of the boolean algebraic expressions """ t1 = t1.split('+') t2 = t2.split('+') prod = '' for m in t1: temp = "" for n in t2: if t1.index(m) == len(t1)-1 and t2.index(n) == len(t2)-1: if m!=n: temp=(temp+m+n) else: temp += m else: if m!=n: temp=temp + m+n+'+' else: temp+=m+'+' prod+=temp return prod
0078ee94420722600be31edc74a86b1932c4d2f2
20,087
def source_remove_all(obj_type, obj_id, name, analyst=None): """ Remove a source from a top-level object. :param obj_type: The CRITs type of the top-level object. :type obj_type: str :param obj_id: The ObjectId to search for. :type obj_id: str :param name: The name of the source. :type name: str :param analyst: The user performing the removal. :type analyst: str :returns: dict with keys "success" (boolean) and "message" (str) """ obj = class_from_id(obj_type, obj_id) if not obj: return {'success': False, 'message': 'Unable to find object in database.'} try: result = obj.remove_source(source=name, remove_all=True) obj.save(username=analyst) return result except ValidationError, e: return {'success':False, 'message': e}
c9c7860f27b230ea9834c4a58324da70ebee9e30
20,088
def driver(dbname): """ Determine driver module :Parameters: `dbname` : ``str`` DB name (section token in db.conf) :Return: Driver module :Rtype: ``module`` :Exceptions: - `DBConfigurationError` : DB not configured - `KeyError` : DB name not found - `ImportError` : Driver not found """ return _connection.driver(dbname)
01a2b0e10975879f944bbaf51b44d8eef6b97996
20,089
from typing import Union def d1tile_x_d2(d1: Union[float, np.ndarray], d2: np.ndarray) -> np.ndarray: """ Create array of repeated values with dimensions that match those of energy array Useful to multiply frequency-dependent values to frequency-time matrices :param d1: 1D input vector, nominally frequency/scale multipliers :param d2: 2D array, first dimension should be that same as d1 :return: array with matching values """ shape_out = d2.shape if len(shape_out) == 1: d1_matrix = np.tile(d1, (shape_out[0])) elif len(shape_out) == 2: d1_matrix = np.tile(d1, (shape_out[1], 1)).T else: raise TypeError('Cannot handle an array of shape {}.'.format(str(d1.shape))) if d1_matrix.shape == d2.shape: d1_x_d2 = d1_matrix * d2 else: raise TypeError('Cannot handle an array of shape {}.'.format(str(d1.shape))) return d1_x_d2
68721f7f9ab1b60f77e8199ad917dd47b19aaa95
20,090
def get_all_gradients_for_Q4( theta, X, Y ): """ Do the same thing as Q(iv) but it is actually only for storing and observing the sample gradient and whole gradient for the Q(iv) step Output the sample grdient and whole grdient data """ # Get difference of uclidean distance def get_difference( old_theta, new_theta ): difference_mat = old_theta - new_theta difference_square = np.multiply( difference_mat, difference_mat ) difference = math.sqrt( np.sum( difference_square ) ) return difference # Contains all gradient_i grad_i_val_observe = [] grad_val_observe = [] # Set random seed random.seed( 1 ) # Get updated theta def get_new_theta( old_theta, eta ): # Code for using single sample gradient random_i = random.randint( 0, X.shape[0] - 1 ) grad_i_val = get_grad_f_i( old_theta, X, Y, random_i ) # Get the whole gradient to observe grad_val = get_grad_f( old_theta, X, Y ) # Scale by the size N (multiply by 10,000) grad_i_val = grad_i_val * X.shape[0] # Store grad_val to observe Q(v) grad_i_val_list = grad_i_val.tolist() grad_i_val_list = grad_i_val_list[0] grad_val_list = grad_val.tolist() grad_val_list = grad_val_list[0] grad_i_val_observe.append( grad_i_val_list ) grad_val_observe.append( grad_val_list ) new_theta = old_theta - ( eta * grad_i_val ) return new_theta ############################################################ precision = 0.01 # eta = 0.000000008 # ############################################################ old_theta = theta new_theta = get_new_theta( old_theta, eta ) difference = get_difference( old_theta, new_theta ) while difference > precision: old_theta = new_theta new_theta = get_new_theta( old_theta, eta ) # Get new difference difference = get_difference( old_theta, new_theta ) value = op_func( new_theta, X, Y ) # Showing information... print print "difference: " + str( difference ) print "theta: " print new_theta print "function value: " + str( value ) return grad_i_val_observe, grad_val_observe
d508b826f552d844cf95f9d5515c5eb1512dfbcb
20,091
def findSubsetIndices(min_lat,max_lat,min_lon,max_lon,lats,lons): """Array to store the results returned from the function""" res=np.zeros((4),dtype=np.float64) minLon=min_lon; maxLon=max_lon distances1 = []; distances2 = [] indices=[]; index=1 for point in lats: s1 = max_lat-point # (vector subtract) s2 = min_lat-point # (vector subtract) distances1.append((np.dot(s1, s1), point, index)) distances2.append((np.dot(s2, s2), point, index-1)) index=index+1 distances1.sort() distances2.sort() indices.append(distances1[0]) indices.append(distances2[0]) distances1 = []; distances2 = []; index=1 for point in lons: s1 = maxLon-point # (vector subtract) s2 = minLon-point # (vector subtract) distances1.append((np.dot(s1, s1), point, index)) distances2.append((np.dot(s2, s2), point, index-1)) index=index+1 distances1.sort() distances2.sort() indices.append(distances1[0]) indices.append(distances2[0]) """ Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices""" minJ=indices[1][2] maxJ=indices[0][2] minI=indices[3][2] maxI=indices[2][2] res[0]=minI; res[1]=maxI; res[2]=minJ; res[3]=maxJ; return res
7d41b33fd492fa8b5cd62c368210bcbea2c7bc89
20,092
def call(stoptime, seconds, method=None): """ Returns a dict with route, direction, stop, call time and source. Call time is in UTC. """ result = dict(stoptime._asdict(), call_time=toutc(seconds), source=method or "I") result["deviation"] = result["call_time"] - stoptime.datetime return result
bfa78ec89b60bf1140b8b24bc91fd1a2dd9c2a63
20,093
import re def sanitize_value(val): """Remove crap from val string and then convert it into float""" val = re.sub(u"(\xa0|\s)", '', val) val = val.replace(',', '.') # positive or negative multiplier mult = 1 if '-' in val and len(val) > 1: mult = -1 val = val.replace('-', '') elif '-' in val: val = '0' if val is not None: if '%' in val: val = float(val.replace('%', '')) return float(val) * mult
0fc67bf519674575451f4fc029bee658ea2bd2da
20,094
def getObjectInfo(fluiddb, about): """ Gets object info for an object with the given about tag. """ return fluiddb.about[about].get()
8614edaf44944fcc11882ac2fcaa31ba31d48d30
20,095
import warnings def __getattr__(name): """Get attribute.""" deprecated = __deprecated__.get(name) if deprecated: warnings.warn( "'{}' is deprecated. Use '{}' instead.".format(name, deprecated[0]), category=DeprecationWarning, stacklevel=(3 if PY37 else 4) ) return deprecated[1] raise AttributeError("module '{}' has no attribute '{}'".format(__name__, name))
c904f1221492e8f08786918dd496d3d6861fd35e
20,096
def get_model_and_assets(): """Returns a tuple containing the model XML string and a dict of assets.""" return common.read_model('finger.xml'), common.ASSETS
cacb53de08eef5695a5464fbb72a1706ff489276
20,097
async def process_logout(): """ Purge the login information from the users session/cookie data :return: Redirect to main body """ # Simply destroy the cookies in this session and get rid of the creds, redirect to landing response = RedirectResponse("/") # Process the destruction from main app/test result response.delete_cookie("user") response.delete_cookie("flow") return response
98d9c8c40e0a1fe224538b353a2a803fdd7fce76
20,098
from typing import Optional def _lex_label(label: str) -> _LexedLabel: """Splits the label into packages and target.""" match = _LABEL_LEXER.match(label) if match is None: raise ValueError(f'{label} is not an absolute Bazel label') groups = match.groupdict() packages: Optional[str] = groups['packages'] target: Optional[str] = groups['target'] if packages is None and target is None: raise ValueError(f'{label} cannot be empty') init = packages.split('/') if packages else [] last = target[1:] if target else init[-1] return init, last
f067d5e81b02a4242d8459b41d49c302459f416b
20,099