content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def string_between(string, start, end): """ Returns a new string between the start and end range. Args: string (str): the string to split. start (str): string to start the split at. end (str): string to stop the split at. Returns: new string between start and end. """ try: return str(string).split(str(start), 1)[1].split(str(end))[0] except IndexError: return ""
fc6f2a3def4112140539c90abe6304f5daa8c1f4
18,601
def _parse_line(line: str): """ 行解析,逗号隔开,目前支持3个字段,第一个是展示的名称,第二个是xml中保存的名称,第三个是附带值value的正则标的形式 :param line: :return: """ line = line.strip() config = line.split(",") if len(config) == 2: return config[0], config[1], "" elif len(config) == 3: return config[0] + INTER_FLAG, config[1], config[2] elif len(config) == 1: return config[0], config[0], "" else: raise Exception("配置{}错误".format(line))
6f325886c82eb5cdb438bd76c893b4ef42c319f5
18,602
def harvest_zmat(zmat: str) -> Molecule: """Parses the contents of the Cfour ZMAT file into array and coordinate information. The coordinate info is converted into a rather dinky Molecule (no fragment, but does read charge, mult, unit). Return qcdb.Molecule. Written for findif zmat* where geometry always Cartesian and Bohr. """ zmat = zmat.splitlines()[1:] # skip comment line Nat = 0 readCoord = True isBohr = "" charge = 0 mult = 1 molxyz = "" for line in zmat: if line.strip() == "": readCoord = False elif readCoord: molxyz += line + "\n" Nat += 1 else: if line.find("CHARGE") > -1: idx = line.find("CHARGE") charge = line[idx + 7 :] idxc = charge.find(",") if idxc > -1: charge = charge[:idxc] charge = int(charge) if line.find("MULTIPLICITY") > -1: idx = line.find("MULTIPLICITY") mult = line[idx + 13 :] idxc = mult.find(",") if idxc > -1: mult = mult[:idxc] mult = int(mult) if line.find("UNITS=BOHR") > -1: isBohr = " bohr" molxyz = f"{Nat}{isBohr}\n{charge} {mult}\n" + molxyz mol = Molecule( validate=False, **qcel.molparse.to_schema( qcel.molparse.from_string(molxyz, dtype="xyz+", fix_com=True, fix_orientation=True)["qm"], dtype=2 ), ) return mol
aa8b89781c00d4939e7083613cb13fdf9eb4f710
18,603
def interaction_fingerprint_list(interactions, residue_dict, interaction_dict): """ Create list of fingerprints for all given structures. """ fp_list = [] for sites in interactions.items(): for site_name, site_interactions in sites.items(): if not site_name.startswith("LIG"): continue # fragments are labeled as LIG; other "sites" detected by PLIP are XRC artefacts for interaction_type, dataframe in site_to_dataframes(site_interactions).items(): if dataframe is not None: residue_nos = dataframe["RESNR"].tolist() fp = interaction_fingerprint( residue_dict, interaction_dict, residue_nos, interaction_type ) fp_list.append(fp) return fp_list
79bc84b1e4ceda4cb1d43653eec96e6f17956fa9
18,606
def get_dynamic_client( access_token: str, project_id: str, cluster_id: str, use_cache: bool = True ) -> CoreDynamicClient: """ 根据 token、cluster_id 等参数,构建访问 Kubernetes 集群的 Client 对象 :param access_token: bcs access_token :param project_id: 项目 ID :param cluster_id: 集群 ID :param use_cache: 是否使用缓存 :return: 指定集群的 CoreDynamicClient """ if use_cache: return _get_dynamic_client(access_token, project_id, cluster_id) # 若不使用缓存,则直接生成新的实例返回 return generate_core_dynamic_client(access_token, project_id, cluster_id)
41735ffc0ff1722d528c7022988ff47925318c33
18,607
def bucket(db, dummy_location): """File system location.""" b1 = Bucket.create() db.session.commit() return b1
755db301053bab638b1963cb5c6f985760d51688
18,609
def standardize_str(string): """Returns a standardized form of the string-like argument. This will convert from a `unicode` object to a `str` object. """ return str(string)
ea007582363cd1eeee34d4b342a39581fd876c3a
18,610
import json def lambda_handler(event, context): """ スタッフの日毎の空き情報を返却する Parameters ---------- event : dict フロントからのパラメータ群 context : dict コンテキスト内容。 Returns ------- return_calendar : dict スタッフの日毎の空き情報(予約がある日のみ空き有無の判定結果を返す) """ # パラメータログ、チェック logger.info(event) req_param = event['queryStringParameters'] if req_param is None: error_msg_display = common_const.const.MSG_ERROR_NOPARAM return utils.create_error_response(error_msg_display, 400) param_checker = validation.HairSalonParamCheck(req_param) # noqa 501 if error_msg := param_checker.check_api_staff_calendar_get(): error_msg_display = ('\n').join(error_msg) logger.error(error_msg_display) return utils.create_error_response(error_msg_display, 400) try: # スタッフIDで希望月のスタッフの空き情報を取得する staff_calendar = get_staff_calendar(req_param) except Exception as e: logger.exception('Occur Exception: %s', e) return utils.create_error_response('Error') body = json.dumps( staff_calendar, default=utils.decimal_to_int, ensure_ascii=False) return utils.create_success_response(body)
8f44cd7f7aa3b62c0fd0c4253f92a05749f94bb4
18,611
import torch def init_process_group_and_set_device(world_size, process_id, device_id, config): """ This function needs to be called on each spawned process to initiate learning using DistributedDataParallel. The function initiates the process' process group and assigns it a single GPU to use during training. """ config.world_size = world_size config.rank = process_id torch.cuda.set_device(device_id) device = torch.device(f'cuda:{device_id}') config.device = device if world_size > 1: config.distributed = True torch.distributed.init_process_group( torch.distributed.Backend.NCCL, world_size=world_size, rank=process_id ) torch.distributed.barrier(device_ids=[device_id]) utils.setup_for_distributed(config.rank == 0) else: config.distributed = False return device
5647e76b71d6d865487cdf348580ee8c58ba9bc5
18,612
from meerschaum.utils.warnings import error, warn from typing import Tuple from typing import Any def yes_no( question : str = '', options : Tuple[str, str] = ('y', 'n'), default : str = 'y', wrappers : Tuple[str, str] = ('[', ']'), icon : bool = True, yes : bool = False, noask : bool = False, interactive : bool = False, **kw : Any ) -> bool: """ Print a question and prompt the user with a yes / no input. Returns True for 'yes', False for 'no'. :param question: The question to print to the user. :param options: The y/n options. The first is always considered `True`, and all options must be lower case. This behavior may be modifiable change in the future. :param default: The default option. Is represented with a capital to distinguish that it's the default.\ E.g. [y/N] would return False by default. :param wrappers: Text to print around the '[y/n]' options. Defaults to ('[', ']'). :param icon: If True, prepend the configured question icon. :param interactive: Not implemented. Was planning on using prompt_toolkit, but for some reason I can't figure out how to make the default selection 'No'. """ default = options[0] if yes else default noask = yes or noask ending = f" {wrappers[0]}" + "/".join( [ o.upper() if o.lower() == default.lower() else o.lower() for o in options ] ) + f"{wrappers[1]}" while True: try: answer = prompt(question + ending, icon=icon, detect_password=False, noask=noask) success = True except KeyboardInterrupt: success = False if not success: error(f"Error getting response. Aborting...", stack=False) if answer == "": answer = default if answer.lower() in options: break warn('Please enter a valid reponse.', stack=False) return answer.lower() == options[0].lower()
930e0c7f56c0d94e9a00928df764998fd84506b1
18,613
def load(filename, instrument=None, **kw): """ Return a probe for NCNR data. """ header, data = parse_file(filename) return _make_probe(geometry=Polychromatic(), header=header, data=data, **kw)
fbe07b4036cf87f91e07f062bd33d75cae0f08ee
18,614
def get_repeat(): """ get_repeat() -> (delay, interval) see how held keys are repeated """ check_video() delay, interval = ffi.new('int*'), ffi.new('int*') sdl.SDL_GetKeyRepeat(delay, interval) return (delay[0], interval[0])
63d4b854b199ec5a55aa6dbbfb2f9e859d892ffa
18,615
def growth(params, ns, rho=None, theta=1.0, gamma=None, h=0.5, sel_params=None): """ exponential growth or decay model params = (nu,T) nu - final size T - time in past size changes begin """ nu,T = params if rho == None: print("Warning: no rho value set. Simulating with rho = 0.") rho = 0.0 if gamma==None: gamma=0.0 gamma = make_floats(gamma) rho = make_floats(rho) theta = make_floats(theta) sel_params = make_floats(sel_params) F = equilibrium(ns, rho=rho, theta=theta, gamma=gamma, h=h, sel_params=sel_params) nu_func = lambda t: np.exp(np.log(nu) * t/T) F.integrate(nu_func, T, rho=rho, theta=theta, gamma=gamma, h=h, sel_params=sel_params) return F
f797262cfc98f6194fd170ca435949baa123ccf5
18,616
def mse(y_true, y_pred): """ Mean Squared Error """ return K.mean(K.square(_error(y_true, y_pred)))
0bfaa4b8042af681a8c398f8004c8dc1b4838302
18,617
from sklearn.neighbors import NearestNeighbors from sklearn.feature_extraction.text import TfidfVectorizer from typing import Iterable def knn_name_matching( A: Iterable[str], B: Iterable[str], vectorizer_kws: dict = {}, nn_kws: dict = {}, max_distance: float = None, return_B=True) -> list: """ Nearest neighbor name matching of sentences in B to A. """ # vectorize the B documents after fitting on A vectorizer = TfidfVectorizer(**vectorizer_kws) Xa = vectorizer.fit_transform(A) Xb = vectorizer.transform(B) # find nearest neighbor matching neigh = NearestNeighbors(n_neighbors=1, **nn_kws) neigh.fit(Xa) if max_distance is None: indices = neigh.kneighbors(Xb, return_distance=False).flatten() else: indices, distances = neigh.kneighbors(Xb) indices, distances = indices.flatten(), distances.flatten() indices = indices[distances <= max_distance] if return_B: result = [(B[i], A[idx]) for i, idx in enumerate(indices)] else: result = [A[idx] for idx in indices] return result
fcde5e8d50d5696edf9df467f11f61db53b2c765
18,618
import logging def setup_platform(hass, config, add_devices, discovery_info=None): """ Sets up the ISY994 platform. """ logger = logging.getLogger(__name__) devs = [] # verify connection if ISY is None or not ISY.connected: logger.error('A connection has not been made to the ISY controller.') return False # import dimmable nodes for (path, node) in ISY.nodes: if node.dimmable and SENSOR_STRING not in node.name: if HIDDEN_STRING in path: node.name += HIDDEN_STRING devs.append(ISYLightDevice(node)) add_devices(devs)
8ad612cd7f9e9f95f8c299b79e2aafca9c5c331f
18,619
def frames_downsample(arFrames:np.array, nFramesTarget:int) -> np.array: """ Adjust number of frames (eg 123) to nFramesTarget (eg 79) works also if originally less frames then nFramesTarget """ nSamples, _, _, _ = arFrames.shape if nSamples == nFramesTarget: return arFrames # down/upsample the list of frames fraction = nSamples / nFramesTarget index = [int(fraction * i) for i in range(nFramesTarget)] liTarget = [arFrames[i,:,:,:] for i in index] #print("Change number of frames from %d to %d" % (nSamples, nFramesTarget)) #print(index) return np.array(liTarget)
5f597ed79cd31bf146f65e9a5f17dadf42b31bc7
18,620
def icon_dir(): """pathname of the directory from which to load custom icons""" return module_dir()+"/icons"
f531fcd312a36ab3ebcc3d16540cb8de7657340a
18,621
def post_question(): """ Post a question.""" q_data = request.get_json() # No data provied if not q_data: abort(make_response(jsonify({'status': 400, 'message': 'No data sent'}), 400)) else: try: data = QuestionSchema().load(q_data) if not MeetupModel().exists('id', data['meetup_id']): abort(make_response(jsonify({'status': 404, 'message': 'Meetup not found'}), 404)) else: data['user_id'] = get_jwt_identity() question = QuestionModel().save(data) result = QuestionSchema().dump(question) return jsonify({ 'status': 201, 'message': 'Question posted successfully', 'data':result}), 201 # return errors alongside valid data except ValidationError as errors: #errors.messages valid_data = errors.valid_data abort(make_response(jsonify({'status': 400, 'message' : 'Invalid data.', 'errors': errors.messages, 'valid_data':valid_data}), 400))
e78e71ef415e465a6ac95d5525d1eba067f51a99
18,623
def infinitegenerator(generatorfunction): """Decorator that makes a generator replay indefinitely An "infinite" parameter is added to the generator, that if set to True makes the generator loop indifenitely. """ def infgenerator(*args, **kwargs): if "infinite" in kwargs: infinite = kwargs["infinite"] del kwargs["infinite"] else: infinite = False if infinite == True: while True: for elem in generatorfunction(*args, **kwargs): yield elem else: for elem in generatorfunction(*args, **kwargs): yield elem return infgenerator
6915a16dd765195e0344b5ebd255c1aca7737699
18,624
import string import random def id_generator(size=6, chars=string.ascii_uppercase + string.digits): """Credit: http://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits-in-python""" return ''.join(random.choice(chars) for _ in range(size))
b5842d56e548d4054230d300e6fc5e05a18ce18c
18,626
def _update_objective(C_obj, Q, QN, R, R0, xr, z_init, u_init, const_offset, u_prev, N, nx, nu): """ Construct MPC objective function :return: """ res = np.hstack( ((C_obj.T @ Q @ (C_obj @ z_init[:, :-1] - xr[:, :-1])).T.flatten(), C_obj.T @ QN @ (C_obj @ z_init[:, -1] - xr[:, -1]), (R @ (u_init + const_offset)).T.flatten())) # Jitter regularization linear objective: res[(N+1)*nx:(N+1)*nx + nu] -= R0 @ u_prev return res
ed22a62a9f9dadfb1d6dc71fc3735cadb299ff2a
18,627
import torch def convert(M: any) -> torch.Tensor: """ Convert Scipy sparse matrix to pytorch sparse tensor. Parameters ---------- M : any Scipy sparse matrix. Returns ------- Ms : torch.Tensor pytorch sparse tensor. """ M = M.tocoo() indices = torch.from_numpy(np.vstack((M.row, M.col))).long() values = torch.from_numpy(M.data) shape = torch.Size(M.shape) Ms = torch.sparse_coo_tensor(indices, values, shape) return Ms
d65b4d1200c36baec6b37550c0e53c79c5a6fcaa
18,629
import math def process_lines(lines): """ It classifies the lines and combine them into a CombinedLine :param lines: np.array with all the lines detected in the image. It should be the output of a HoughLinesP function :return: np.array with 2 lines """ lines_l = CombinedLine() lines_r = CombinedLine() for line in lines[:, 0]: # the slope of the line slope = math.atan2(line[3] - line[1], line[2] - line[0]) # Filter almost horizontal lines if not filter_by_slope(slope): continue # Classifies lines in left and right lane lines and add them to the corespondent CombinedLine if slope > 0: lines_r.add(line) else: lines_l.add(line) # The max_y coordinate gives and approximation of the bottom of the image max_y = max(lines_l.point_bottom[1], lines_r.point_bottom[1]) # Calculate the intersection, it gives an approximation of the horizon intersection = CombinedLine.intersection(lines_l, lines_r) # A parameter to cut the horizon below intersection p_horizon = 1.1 # The output is created using the horizon and max_y as y coordinates and camculating the xs return np.array([[[lines_l.x(intersection[1]*p_horizon), intersection[1]*p_horizon, lines_l.x(max_y), max_y], [lines_r.x(intersection[1]*p_horizon), intersection[1]*p_horizon, lines_r.x(max_y), max_y]]], dtype=np.int16)
37c23e2008c7cf67f937da0720b8e64fac8d7367
18,630
def get_balances_with_token(token: str): """Returns all entries where a token is involved""" token = token.lower() conn = create_connection() with conn: cursor = conn.cursor() fiat = confighandler.get_fiat_currency().lower() cursor.execute( f"SELECT date,balance_btc,balance_{fiat} FROM cbalancehistory WHERE token = '{token}'") return cursor.fetchall()
e6a88362edf1e482877f8afca30b0db576663572
18,631
def relative_cumulative_gain_curve(df: pd.DataFrame, treatment: str, outcome: str, prediction: str, min_rows: int = 30, steps: int = 100, effect_fn: EffectFnType = linear_effect) -> np.ndarray: """ Orders the dataset by prediction and computes the relative cumulative gain curve curve according to that ordering. The relative gain is simply the cumulative effect minus the Average Treatment Effect (ATE) times the relative sample size. Parameters ---------- df : Pandas' DataFrame A Pandas' DataFrame with target and prediction scores. treatment : Strings The name of the treatment column in `df`. outcome : Strings The name of the outcome column in `df`. prediction : Strings The name of the prediction column in `df`. min_rows : Integer Minimum number of observations needed to have a valid result. steps : Integer The number of cumulative steps to iterate when accumulating the effect effect_fn : function (df: pandas.DataFrame, treatment: str, outcome: str) -> int or Array of int A function that computes the treatment effect given a dataframe, the name of the treatment column and the name of the outcome column. Returns ---------- relative cumulative gain curve: float The relative cumulative gain according to the predictions ordering. """ ate = effect_fn(df, treatment, outcome) size = df.shape[0] n_rows = list(range(min_rows, size, size // steps)) + [size] cum_effect = cumulative_effect_curve(df=df, treatment=treatment, outcome=outcome, prediction=prediction, min_rows=min_rows, steps=steps, effect_fn=effect_fn) return np.array([(effect - ate) * (rows / size) for rows, effect in zip(n_rows, cum_effect)])
78c2a889100b936a82a615ff0c07028cd1e10021
18,632
def add_curve_scatter(axis, analysis_spot, color_idx): """Ad one of more scatter curves that spot events Arguments: y_axis : a pyplot x-y axis analysis : a dictionnary { 'name': [<datetime>, ...], ... } """ curves = [] # each spot analysis has a different y value spot_value = 0 axis.set_ylim(-1, len(analysis_spot)+1) axis.get_yaxis().set_visible(False) for name in analysis_spot: data = analysis_spot[name] color = get_color(color_idx) t = data data_spot = [spot_value for _x in data] p = axis.scatter(t, data_spot, color=color, label=name) curves.append(p) spot_value += 1 color_idx += 1 return curves
ac2fb5c970de52f64de94dc42914f078e8d727c9
18,633
def get_character_journal(character_ccp_id, page = 1, page_limit=5): """ :param self: :param character_ccp_id: :param oldest_entry: :param page_limit: :return: """ character = EVEPlayerCharacter.get_object(character_ccp_id) if not character.has_esi_scope('esi-wallet.read_character_wallet.v1'): return None client = EsiClient(authenticating_character=character) journal_entries, _ = client.get("/v4/characters/%s/wallet/journal/?page=%s" % (character_ccp_id,page)) formatted_entries = [] for entry in journal_entries: e = verify_journal_entry(entry, character) formatted_entries.append(e) # pagination logic if formatted_entries and page <= page_limit: older_entries = get_character_journal( character_ccp_id = character_ccp_id, page = page + 1, page_limit = page_limit ) else: older_entries = [] return journal_entries + older_entries
3768c7e3548338dc63521cfa045d7da03061a9e2
18,634
def get_entropy(labels): """Calculates entropy using the formula `-Sum(Prob(class) * log2(Prob(class)))` for each class in labels.""" assert len(labels.shape) == 1 _, count = get_unique_classes_count(labels) probabilities = count / labels.shape return -np.sum(probabilities * np.log2(probabilities))
244e1be13ade51bc2aa5320b8f1575339559bc24
18,635
def store(): """Database storage fixture.""" in_memory_database = Database.in_memory(echo=False) in_memory_database.create_tables() return DBResultStorage(in_memory_database)
bf29f10b8f93c341d0e757268784944638458c0a
18,636
import json def test_homology(dash_threaded): """Test the display of a basic homology""" prop_type = 'dict' prop_val = { "chrOne": { "organism": "9606", "start": [10001, 105101383], "stop": [27814790, 156030895], }, "chrTwo": { "organism": "9606", "start": [3000000, 125101383], "stop": [9000000, 196130895], }, } def assert_callback(prop_value, nclicks, input_value): answer = '' if nclicks is not None: answer = FAIL if PROP_TYPES[prop_type](input_value) == prop_value: answer = PASS return answer template_test_component( dash_threaded, APP_NAME, assert_callback, ideogram_test_props_callback, 'homology', json.dumps(prop_val), prop_type=prop_type, component_base=COMPONENT_REACT_BASE, perspective="comparative", chromosomes=["1", "2"], **BASIC_PROPS ) driver = dash_threaded.driver # assert the absence of homology region regions = driver.find_elements_by_class_name('syntenicRegion') assert len(regions) == 0 # trigger a change of the component prop btn = wait_for_element_by_css_selector(driver, '#test-{}-btn'.format(APP_NAME)) btn.click() # assert the presence of homology region regions = wait_for_elements_by_css_selector(driver, '.syntenicRegion') assert len(regions) > 0
c03f0d0a33da034c5dfba1a77284bba100e8aca9
18,637
def load_vocab(filename): """Loads vocab from a file Args: filename: (string) the format of the file must be one word per line. Returns: d: dict[word] = index """ word2id = dict() with open(filename, 'r', encoding='utf-8') as f: for idx, word in enumerate(f): word = word.strip() word2id[word] = idx id2word = {v: k for k, v in word2id.items()} assert len(word2id) == len(id2word) return word2id, id2word
b4c2ea26647d85d610d9293fe8e847ae873d8bf8
18,640
def load_labels(label_path): """ Load labels for VOC2012, Label must be maded txt files and like my label.txt Label path can be change when run training code , use --label_path label : { label naem : label color} index : [ [label color], [label color]] """ with open(label_path, "r") as f: lines = f.readlines() label = {} index = [] for line in lines: sp = line.split() label[sp[0]] = [int(sp[1]), int(sp[2]), int(sp[3])] index.append([int(sp[3]), int(sp[2]), int(sp[1])]) return label, index
9c0388eb533293912b95ca020cbf3c9e9cb331d3
18,641
def _make_function_ptr_ctype(restype, argtypes): """Return a function pointer ctype for the given return type and argument types. This ctype can for example be used to cast an existing function to a different signature. """ if restype != void: try: restype.kind except AttributeError: raise TypeError("restype ({}) has no kind attribute. This usually means that restype is an array type, which is not a valid return type.".format(restype)) argdecls = [] for i, argtype in enumerate(argtypes): if argtype is ...: if i != len(argtypes) - 1: raise ValueError("... can only be the last argtype") else: argdecls.append("...") else: argdecls.append(ffi.getctype(argtype)) return ffi.getctype(restype, "(*)({})".format(",".join(argdecls)))
86381f41face07d00e976c28977292e0c80c367d
18,642
import re def parse_matl_results(output): """Convert MATL output to a custom data structure. Takes all of the output and parses it out into sections to pass back to the client which indicates stderr/stdout/images, etc. """ result = list() parts = re.split(r'(\[.*?\][^\n].*\n?)', output) for part in parts: if part == '': continue # Strip a single trailing newline part = part.rstrip('\n') item = {} if part.startswith('[IMAGE'): item = process_image(re.sub(r'\[IMAGE.*?\]', '', part), part.startswith('[IMAGE]')) elif part.startswith('[AUDIO]'): item = process_audio(part.replace('[AUDIO]', '')) elif part.startswith('[STDERR]'): item = {'type': 'stderr', 'value': part.replace('[STDERR]', '')} elif part.startswith('[STDOUT]'): item = {'type': 'stdout2', 'value': part.replace('[STDOUT]', '')} else: item = {'type': 'stdout', 'value': part} if item: result.append(item) return result
ec3a7c5c77edaea08a6cdf207f5848485fd33d8f
18,643
def astToMongo(ast): """Run the AST-to-mongo helper function after converting it to a not-free equivalent AST.""" return _astToMongo_helper(_eliminate_not(ast))
be2020aa325054147bdbc4c919b19e1c03a10953
18,644
def write_seqs_fasta(out_fp_seqs_fasta: str, out_fp_seqs_qza: str, tsv_pd: pd.DataFrame) -> str: """ Write the fasta sequences. :param out_fp_seqs_fasta: output sequences fasta file name. :param out_fp_seqs_qza: output sequences qiime2 Artefact file name. :param tsv_pd: table which feature names are sequences. :param cur_sh: writing file handle. """ with open(out_fp_seqs_fasta, 'w') as fas_o: for seq in tsv_pd.index: fas_o.write('>%s\n%s\n' % (seq.strip(), seq.strip())) cmd = run_import( out_fp_seqs_fasta, out_fp_seqs_qza, 'FeatureData[Sequence]') return cmd
c8af3e589fef023a5e0d2a784f404d682edc1276
18,645
def f(x,y): """ Takes in two numpy arrays that are result of meshgrid. Returns a numpy array with points representing the iteration number for divergence """ max_iter = 100 #maximum number of interations c = x + 1j*y z = np.zeros((N,N),dtype=complex) r = np.zeros((N,N),dtype=int) #return mask = np.full((N,N), True, dtype=bool) for i in range(0,max_iter,1): z[mask] = z[mask]**2 + c[mask] #z_i = z_i-1**2 + c r[mask] = i #i is the iteration number at which point escapes (diverges) #if point ever becomes larger than 2, the sequence will escape to infinity: #https://en.wikipedia.org/wiki/Mandelbrot_set#Basic_properties mask[np.abs(z) > 2] = False #points that diverge return r, mask
d742e48612f4d8063fd4f4f779d014716fb7fb5f
18,646
from typing import List def _expand_param_name(param: BaseDescriptor) -> List[str]: """ Get expanded param names :param param: The param to expand """ if not getattr(param, 'expand', False): raise ValueError('Cannot expand param that does not have the expand kwarg') new_arg_names = _get_expanded_param_names(param) prefix = getattr(param, 'prefix', '') new_arg_names = [prefix + n for n in new_arg_names] return new_arg_names
856eece57948111fa59507496fccd3d30b5bcc55
18,647
def accuracy_top_k(output, target, top_k=(1,)): """Computes the precision@k for the specified values of k""" max_k = max(top_k) batch_size = target.size(0) _, pred = output.topk(max_k, 1, True, True) pred = pred.t() correct = pred.eq(target.view(1, -1).expand_as(pred)) res = [] for k in top_k: correct_k = correct[:k].view(-1).float().sum(0, keepdim=True) res.append(correct_k.mul_(1.0 / batch_size)) return res
e615b001d6c95cc64ff6ba123337c9cf8ca9bae9
18,648
from typing import Optional def softmax(data: NodeInput, axis: int, name: Optional[str] = None) -> Node: """Apply softmax operation on each element of input tensor. :param data: The tensor providing input data. :param axis: An axis along which Softmax should be calculated. Can be positive or negative. :param name: Optional name for the node. returns The new node with softmax operation applied on each element. """ return _get_node_factory_opset8().create("Softmax", [as_node(data)], {"axis": axis})
c066251ba39e3de429d784d643460bd9ee9027a9
18,649
def get_duts_mac_address(duts): """ This is used to get the Duts and its mac addresses mapping :param duts: List of DUTs :return : Duts and its mac addresses mapping """ duts_mac_addresses = {} cmd = "show platform syseeprom" for dut in duts: if st.is_vsonic(dut): mac = basic.get_ifconfig_ether(dut) duts_mac_addresses[dut] = mac continue eeprom_details = st.show(dut, cmd, skip_error_check=True) if not eeprom_details: iteration=3 for i in range(1, iteration+1): st.wait(2) eeprom_details = st.show(dut, cmd, skip_error_check=True) if eeprom_details: break if not eeprom_details and i >= iteration + 1: st.log("EEPROM data not found for {}".format(dut)) st.report_fail("eeprom_data_not_found", dut) st.log("EEPROM DETAILS -- {}".format(eeprom_details)) if eeprom_details: for data in eeprom_details: if "tlv_name" in data and data["tlv_name"] == "Base MAC Address": duts_mac_addresses[dut] = data["value"].replace(":","") st.log("DUT MAC ADDRESS -- {}".format(duts_mac_addresses)) return duts_mac_addresses
7a3eb9a7fde99a97a04a8b28278790ff42130c79
18,650
def crop(sample, crop_area, in_crop_threshold): """Crop an image to a given area and transform target accordingly. Args: sample: { "image": PIL.Image, "bboxes": Numpy array :math:`(N, 4)` (XYXY format), "keypoints": Numpy array :math:`(N, n, 2)`, (optional) ... } crop_area: An array or list of four numbers (coordinates of the crop box). in_crop_threshold: Float, a threshold for dropping detection targets that intersect too little with the crop_area. Returns: A tuple of image crop (PIL.Image) and transformed targets. """ transformed_sample = {} crop_area = np.array(crop_area) bboxes = sample["bboxes"] intersections = intersection(bboxes, crop_area) bbox_areas = (bboxes[:,2:] - bboxes[:,:2]).prod(axis=1) in_crop = (intersections/bbox_areas > in_crop_threshold) bboxes = bboxes[in_crop] - np.tile(crop_area[:2], 2) transformed_sample["bboxes"] = bboxes if "keypoints" in sample.keys(): keypoints = sample["keypoints"] keypoints = keypoints[in_crop] - crop_area[:2] transformed_sample["keypoints"] = keypoints image = sample["image"] image = _crop_image(image, crop_area) transformed_sample["image"] = image for key in sample.keys(): if key in ["image", "bboxes", "keypoints"]: continue try: transformed_sample[key] = np.array(sample[key])[in_crop] except: transformed_sample[key] = deepcopy(sample[key]) return transformed_sample
264f5bbd9407246c38e539b6e750ea39933ec7af
18,651
from datetime import datetime def get_cpu_utilization(mqueries, region, days): """ Gets CPU utilization for instances """ client = SESSION.client('cloudwatch', region_name=region) time_from = (datetime.now() - timedelta(days=days)) time_to = datetime.now() response = client.get_metric_data( MetricDataQueries=mqueries, StartTime=time_from, EndTime=time_to ) return response['MetricDataResults']
f2ce39905e34eab66f7ff59735c9365d4c80eb86
18,652
def std_opt_end_independent(policy_net, target_net, optimizer, memory, batch_size=128, GAMMA=0.99, device='cuda'): """ Apply the standard procedure to an ensemble of deep Q network. """ if len(memory) < batch_size: return 0 total_loss = 0 for ens_num in range(policy_net.get_num_ensembles()): state_batch, action_batch, reward_batch, n_state_batch, done_batch = memory.sample( batch_size) state_batch = state_batch.to(device) action_batch = action_batch.to(device) reward_batch = reward_batch.to(device) n_state_batch = n_state_batch.to(device) done_batch = done_batch.to(device) q = policy_net(state_batch, ens_num=ens_num).gather(1, action_batch) nq = target_net(n_state_batch, ens_num=ens_num).max(1)[0].detach() # Compute the expected Q values expected_state_action_values = ( nq * GAMMA)*(1.-done_batch[:, 0]) + reward_batch[:, 0] # Compute Huber loss loss = F.smooth_l1_loss(q, expected_state_action_values.unsqueeze(1)) total_loss += loss # Optimize the model optimizer.zero_grad() total_loss.backward() optimizer.step() return total_loss.detach() / policy_net.get_num_ensembles()
ea27f65a2227e2a7d4c0c11c7664f79f4c6c5f63
18,653
from typing import Dict from typing import Tuple import logging import requests import re def scrape_forecast_products() -> Dict[str, Tuple[str, str]]: """ Get list of forecast products by scraping state overivew pages """ logging.info("Scraping list of BOM forecast products") products = dict() for state in STATES: url = f"http://www.bom.gov.au/{state}/forecasts/precis.shtml" r = requests.get(url, timeout=10) pattern = r'/forecasts/(?P<town>.+?).shtml">Detailed' for town in re.findall(pattern, r.text): product = get_town_forecast_product_id(state, town) if product: products[town] = (product, state) return products
ea67bc047d03bc36b2d82f253df54c24d8624ed3
18,654
def plate_from_list_spreadsheet( filename, sheet_name=0, num_wells="infer", wellname_field="wellname" ): """Create a plate from a Pandas dataframe where each row contains the name of a well and metadata on the well. Parameters ---------- filename Path to the spreadsheet file. sheet_name Index or name of the spreadsheet to use. num_wells Number of wells in the Plate to be created. If left to default 'infer', the size of the plate will be chosen as the smallest format (out of 96, 384 and 1536 wells) which contains all the well names. wellname_field="wellname" Name of the column of the spreadsheet giving the well names """ if ".xls" in filename: # includes xlsx dataframe = pd.read_excel(filename, sheet_name=sheet_name) elif filename.endswith(".csv"): dataframe = pd.read_csv(filename) return plate_from_dataframe( dataframe, wellname_field=wellname_field, num_wells=num_wells, data={"filename": filename}, )
b082a3900ffebf87f443d17e8b934c73c35b910d
18,655
from typing import Optional def displaced_species_along_mode(species: Species, mode_number: int, disp_factor: float = 1.0, max_atom_disp: float = 99.9) -> Optional[Species]: """ Displace the geometry along a normal mode with mode number indexed from 0, where 0-2 are translational normal modes, 3-5 are rotational modes and 6 is the largest magnitude imaginary mode (if present). To displace along the second imaginary mode we have mode_number=7 Arguments: species (autode.species.Species): mode_number (int): Mode number to displace along Keyword Arguments: disp_factor (float): Distance to displace (default: {1.0}) max_atom_disp (float): Maximum displacement of any atom (Å) Returns: (autode.species.Species): Raises: (autode.exceptions.CouldNotGetProperty): """ logger.info(f'Displacing along mode {mode_number} in {species.name}') mode_disp_coords = species.normal_mode(mode_number) if mode_disp_coords is None: logger.error('Could not get a displaced species. No normal mode ' 'could be found') return None coords = species.coordinates disp_coords = coords.copy() + disp_factor * mode_disp_coords # Ensure the maximum displacement distance any single atom is below the # threshold (max_atom_disp), by incrementing backwards in steps of 0.05 Å, # for disp_factor = 1.0 Å for _ in range(20): if np.max(np.linalg.norm(coords - disp_coords, axis=1)) < max_atom_disp: break disp_coords -= (disp_factor / 20) * mode_disp_coords # Create a new species from the initial disp_species = Species(name=f'{species.name}_disp', atoms=species.atoms.copy(), charge=species.charge, mult=species.mult) disp_species.coordinates = disp_coords return disp_species
ec0ada73abbbccb6ec24c1afacaa106e851d69a9
18,656
def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None, name=None, row_splits_dtype=dtypes.int64): """Constructs a constant RaggedTensor from a nested Python list. Example: ```python >>> ragged.constant([[1, 2], [3], [4, 5, 6]]).eval() RaggedTensorValue(values=[1, 2, 3, 4, 5, 6], splits=[0, 2, 3, 6]) ``` All scalar values in `pylist` must have the same nesting depth `K`, and the returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar values, then `K` is one greater than the maximum depth of empty lists in `pylist`. All scalar values in `pylist` must be compatible with `dtype`. Args: pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that is not a `list`, `tuple` or `np.ndarray` must be a scalar value compatible with `dtype`. dtype: The type of elements for the returned `RaggedTensor`. If not specified, then a default is chosen based on the scalar values in `pylist`. ragged_rank: An integer specifying the ragged rank of the returned `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K - 1 - len(inner_shape))` if `inner_shape` is specified. inner_shape: A tuple of integers specifying the shape for individual inner values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank` is not specified. If `ragged_rank` is specified, then a default is chosen based on the contents of `pylist`. name: A name prefix for the returned tensor (optional). row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits. One of `tf.int32` or `tf.int64`. Returns: A potentially ragged tensor with rank `K` and the specified `ragged_rank`, containing the values from `pylist`. Raises: ValueError: If the scalar values in `pylist` have inconsistent nesting depth; or if ragged_rank or inner_shape are incompatible with `pylist`. """ def ragged_factory(values, row_splits): row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype) return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits, validate=False) with ops.name_scope(name, "RaggedConstant"): return _constant_value(ragged_factory, constant_op.constant, pylist, dtype, ragged_rank, inner_shape)
a08da0da42a6b291671f33e52fc49c5c5a66e22c
18,657
def partition(lst, fn): """Partition lst by predicate. - lst: list of items - fn: function that returns True or False Returns new list: [a, b], where `a` are items that passed fn test, and `b` are items that failed fn test. >>> def is_even(num): ... return num % 2 == 0 >>> def is_string(el): ... return isinstance(el, str) >>> partition([1, 2, 3, 4], is_even) [[2, 4], [1, 3]] >>> partition(["hi", None, 6, "bye"], is_string) [['hi', 'bye'], [None, 6]] """ good = [] bad = [] for item in lst: if fn(item): good.append(item) else: bad.append(item) # [[fn(item) for item in lst], [item for item in lst if item not in good]] return [good, bad] # ACTUAL comp way to do it. put the value added first [val for val in lst if fn(val)] # return [ # [val for val in lst if fn(val)], # [val for val in lst if not fn(val)] # ]
94fc669744458952b75b225af489c440da98c740
18,659
def get_page(token, size): """Return portion of s3 backet objects.""" if token: response = client.list_objects_v2( Bucket=s3_bucket_name, MaxKeys=size, Prefix=s3_pbject_prefix, ContinuationToken=token, ) else: response = client.list_objects_v2( Bucket=s3_bucket_name, MaxKeys=size, Prefix=s3_pbject_prefix, ) return response
363e9566a09b4c73a4949a9cdc43c4706342058d
18,660
import click def num_physical_shards_option(f): """ Function to parse/validate the --num-physical-shards CLI option to dirbs-db repartition. :param f: obj :return: options obj """ def callback(ctx, param, value): if value is not None: if value < 1 or value > 100: raise click.BadParameter('Number of physical IMEI shards must be between 1 and 100') return value return click.option('--num-physical-shards', expose_value=True, type=int, help='The number of physical IMEI shards that tables in DIRBS Core should be split into.', callback=callback)(f)
f53eb8003533da0f8562456517110ad92beeea01
18,662
def _ls(method_name, ls_type, path=None, log_throwing=True): """ Private helper method shared by various API methods :param method_name: calling method name :param ls_type: the WLST return type requested :param path: the path (default is the current path) :param log_throwing: whether or not to log the throwing message if the path location is not found :return: the result of the WLST ls(returnMap='true') call :raises: PyWLSTException: if a WLST error occurs """ _method_name = method_name _logger.finest('WLSDPLY-00028', method_name, ls_type, path, class_name=_class_name, method_name=_method_name) if path is not None: # ls(path, returnMap='true') is busted in earlier versions of WLST so go ahead and # change directories to the specified path to workaround this current_path = get_pwd() cd(path) try: result = wlst.ls(ls_type, returnMap='true', returnType=ls_type) except (wlst.WLSTException, offlineWLSTException), e: pwe = exception_helper.create_pywlst_exception('WLSDPLY-00029', path, ls_type, _get_exception_mode(e), _format_exception(e), error=e) if log_throwing: _logger.throwing(class_name=_class_name, method_name=_method_name, error=pwe) cd(current_path) raise pwe cd(current_path) else: current_path = get_pwd() try: result = wlst.ls(ls_type, returnMap='true', returnType=ls_type) except (wlst.WLSTException, offlineWLSTException), e: pwe = exception_helper.create_pywlst_exception('WLSDPLY-00029', current_path, ls_type, _get_exception_mode(e), _format_exception(e), error=e) _logger.throwing(class_name=_class_name, method_name=_method_name, error=pwe) raise pwe _logger.finest('WLSDPLY-00030', method_name, ls_type, current_path, result, class_name=_class_name, method_name=_method_name) return result
39027d9963a2d621707f5934a440f671915ae7fb
18,663
def serialize_input_str(tx, prevout_n, sequence, script_sig): """ Based on project: https://github.com/chaeplin/dashmnb. """ s = ['CTxIn('] s.append('COutPoint(%s, %s)' % (tx, prevout_n)) s.append(', ') if tx == '00' * 32 and prevout_n == 0xffffffff: s.append('coinbase %s' % script_sig) else: script_sig2 = script_sig if len(script_sig2) > 24: script_sig2 = script_sig2[0:24] s.append('scriptSig=%s' % script_sig2) if sequence != 0xffffffff: s.append(', nSequence=%d' % sequence) s.append(')') return ''.join(s)
c90f194e2627bc2b5aca61066b6febf1fa189bb6
18,664
def evidence(): """ Confirm prohibition number and last name matches VIPS and applicant business rules satisfied to submit evidence. """ if request.method == 'POST': # invoke middleware functions args = helper.middle_logic(business.is_okay_to_submit_evidence(), prohibition_number=request.form['prohibition_number'], driver_last_name=request.form['last_name'], config=Config) if 'error_string' not in args: return jsonify(dict({"data": {"is_valid": True}})) return jsonify(dict({ "data": { "is_valid": False, "error": args.get('error_string'), } }))
f8314d33d86dfa6c0fff5ca7b007cd0a7aa8eeed
18,665
def skop(p, rule="b3s23"): """Return a list of pairs (Pattern, minimum population) representing the smallest known oscillators of the specified period in the given rule. Assumes that the local installation of lifelib knows about said rule.""" rule = sanirule(rule) rmod = import_module(f"..{aliases.get(rule, rule)}", __name__) cands = [] for line in rmod.fixeds.split("\n"): words = line.split(maxsplit=3) lp, apg, mp = words[:3] if int(lp) == p: source = words[3] if len(words) > 3 else None cands.append((rmod.lt.pattern(apg), int(mp), source)) for cfunc in rmod.cfuncs: if (out := cfunc(p)): cands.append(out + (() if len(out) > 2 else (None,))) if not cands: return [] cands = [trip if trip[1] else (trip[0], minpop(trip[0]), trip[2]) for trip in cands] mp = min(trip[1] for trip in cands) return list(filter(lambda trip: trip[1] == mp, cands))
e2fb0de582d4e124f21066ee2c43deafa4c882e5
18,666
def relabel_nodes_with_contiguous_numbers(graph_nx, start= 0): """ Creates a shallow copy """ mapping= {n : (idx + start) for idx, n in enumerate(list(graph_nx.nodes()))} return nx.relabel.relabel_nodes(graph_nx, mapping, copy= True), mapping
457cb714179fe8b7d5536858e37807d0ebd60770
18,667
import yaml def get_yaml_frontmatter(file): """ Get the yaml front matter and the contents of the given file-like object. """ line = file.readline() if line != "---\n": return (None, line + file.read()) frontmatter = [] for line in file: if line == "---\n": break else: frontmatter.append(line) return (yaml.load('\n'.join(frontmatter)), file.read())
741941e2b0ab2786eb2e808c9199844514fd713e
18,668
def haversine(coordinate1, coordinate2): """ returns the distance between two coordinates using the haversine formula """ lon1 = coordinate1['Longitude'] lat1 = coordinate1['Latitude'] lon2 = coordinate2['Longitude'] lat2 = coordinate2['Latitude'] lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2]) dlon = lon2 - lon1 dlat = lat2 - lat1 a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2 c = 2 * asin(sqrt(a)) distance = EARTH_CIRCUMFERENCE * c return distance
533aa0fab71bfae78b7f465e11575e444a352987
18,669
def f1(R1,R2,R3): """ f1 switching function """ R01,R02,R03 = 1.160, 1.160, 2.320 alpha1 = 1.0 rho1 = R1 - R01 rho2 = R2 - R02 rho3 = R3 - R03 return 0.5 * (1 - adf.tanh(0.5 * alpha1 * (3*rho1 - rho2 - rho3)))
4ec1d5bf3160e3fa3bd817fc016dcc803ccf5c82
18,670
def getUser(client, attrs): """Get the user, create it as needed. """ try: return client.assertedSearch("User [name='%s']" % attrs['name'])[0] except icat.SearchResultError: user = client.new("user") initobj(user, attrs) user.create() return user
97d084ee5eae18224c981a0348bbbb73457e8827
18,671
def _fn_pow_ ( self , b ) : """ Power function: f = pow( a, b ) ) >>> f = >>> a = f.pow ( b ) >>> a = f ** b """ return _fn_make_fun_ ( self , b , Ostap.MoreRooFit.Power , 'pow_%s_%s' )
843c793f061b33659220bf06d177a28971bb76b2
18,672
def funder_trans(params): """ :param params: :return: """ if 6 > len(params): LOG.error('funder_trans: Invalid params {}!'.format(params)) return None selfpubkey = params[0] otherpubkey = params[1] addressFunding = params[2] scriptFunding = params[3] deposit = params[4] asset_type = params[6] founding_txid = params[5] asset_id = get_asset_type_id(asset_type) C_tx = createCTX(addressFunding=addressFunding, balanceSelf=deposit, balanceOther=deposit, pubkeySelf=selfpubkey, pubkeyOther=otherpubkey, fundingScript=scriptFunding, asset_id=asset_id,fundingTxId=founding_txid) RD_tx = createRDTX(addressRSMC=C_tx["addressRSMC"], addressSelf=pubkeyToAddress(selfpubkey), balanceSelf=deposit, CTxId=C_tx["txId"], RSMCScript=C_tx["scriptRSMC"], asset_id=asset_id) return {"C_TX":C_tx,"R_TX":RD_tx}
a92b58ba23274c555e3caf17053c4c41feb5cf58
18,673
def get_replacements_by_guid(replacements_by_name): """Returns a lookup table that is by-guid rather than by-name.""" brush_lookup = BrushLookup.get() def guid_or_name_to_guid(guid_or_name): if guid_or_name in brush_lookup.guid_to_name: return guid_or_name elif guid_or_name in brush_lookup.name_to_guids: return brush_lookup.get_unique_guid(guid_or_name) else: raise LookupError("Not a known brush or brush guid: %r" % guid_or_name) dct = {} for before, after in replacements_by_name: before_guid = guid_or_name_to_guid(before) if after is True: after_guid = before_guid elif after is None: after_guid = None else: after_guid = guid_or_name_to_guid(after) dct[before_guid] = after_guid return dct
47d6e23999c3e414e6e182ad3db334aaa29234f4
18,674
from typing import Callable def _make_divergence_numba_1d(bcs: Boundaries) -> Callable: """make a 1d divergence operator using numba compilation Args: dim (int): The number of support points for each axes boundaries (:class:`~pde.grids.boundaries.axes.Boundaries`): {ARG_BOUNDARIES_INSTANCE} dx (float): The discretization Returns: A function that can be applied to an array of values """ dim_x = bcs.grid.shape[0] scale = 0.5 / bcs.grid.discretization[0] region_x = bcs[0].make_region_evaluator() @jit_allocate_out(out_shape=(dim_x,)) def divergence(arr, out=None): """ apply gradient operator to array `arr` """ for i in range(dim_x): valm, _, valp = region_x(arr[0], (i,)) out[i] = (valp - valm) * scale return out return divergence
f9298f0778b40d183714cfa50767e17b53258f47
18,675
def draw_predicted_rectangle(image_arr, y, x, half_height, half_width): """Draws a rectangle onto the image at the provided coordinates. Args: image_arr: Numpy array of the image. y: y-coordinate of the rectangle (normalized to 0-1). x: x-coordinate of the rectangle (normalized to 0-1). half_height: Half of the height of the rectangle (normalized to 0-1). half_width: Half of the width of the rectangle (normalized to 0-1). Returns: Modified image (numpy array) """ assert image_arr.shape[0] == 3, str(image_arr.shape) height = image_arr.shape[1] width = image_arr.shape[2] tl_y, tl_x, br_y, br_x = unnormalize_prediction(y, x, half_height, half_width, \ img_height=height, img_width=width) image_arr = np.copy(image_arr) * 255 image_arr = np.rollaxis(image_arr, 0, 3) return draw_rectangle(image_arr, tl_y, tl_x, br_y, br_x)
b7b796135ee8da7cef103f6406cae19c2129ed59
18,676
from typing import Sequence from typing import Optional def my_max(seq: Sequence[ItemType]) -> Optional[ItemType]: """Максимальный элемент последовательности Использует подход динамического программирования. :param seq: последовательность :type seq: Sequence[ItemType] :return: максимальный элемент последовательности :rtype: ItemType """ if not seq: return None if len(seq) == 2: if seq[0] >= seq[1]: return seq[0] return seq[1] new_max = my_max(seq[1:]) if new_max is not None: if seq[0] >= new_max: return seq[0] return new_max return seq[0]
43fbac6f57d2548f09eff145f58f8f5c9c7c8b92
18,677
def correct_pm0(ra, dec, pmra, pmdec, dist, vlsr=vlsr0, vx=0, vy=0, vz=0): """Corrects the proper motion for the speed of the Sun Arguments: ra - RA in deg dec -- Declination in deg pmra -- pm in RA in mas/yr pmdec -- pm in declination in mas/yr dist -- distance in kpc Returns: (pmra,pmdec) the tuple with the proper motions corrected for the Sun's motion """ C = acoo.ICRS(ra=ra * auni.deg, dec=dec * auni.deg, radial_velocity=0 * auni.km / auni.s, distance=dist * auni.kpc, pm_ra_cosdec=pmra * auni.mas / auni.year, pm_dec=pmdec * auni.mas / auni.year) kw = dict(galcen_v_sun=acoo.CartesianDifferential( np.array([vx + 11.1, vy + vlsr + 12.24, vz + 7.25]) * auni.km / auni.s)) frame = acoo.Galactocentric(**kw) Cg = C.transform_to(frame) Cg1 = acoo.Galactocentric(x=Cg.x, y=Cg.y, z=Cg.z, v_x=Cg.v_x * 0, v_y=Cg.v_y * 0, v_z=Cg.v_z * 0, **kw) C1 = Cg1.transform_to(acoo.ICRS()) return ((C.pm_ra_cosdec - C1.pm_ra_cosdec).to_value(auni.mas / auni.year), (C.pm_dec - C1.pm_dec).to_value(auni.mas / auni.year))
986ddf5d01ebacbd699fe80054be32e2e241d8b3
18,678
def clean_output_type_names(df: pd.DataFrame) -> pd.DataFrame: """Convenience function for cleaning up output type names The `outputs_clean` dict is located in the defaults submodule :param df: Input data frame to be cleaned up :type df: pandas DataFrame :return: DataFrame with output type names cleaned up. :rtype: pandas DataFrame """ df.replace(to_replace=outputs_clean, inplace=True) return df
5732d4b7279046d649cef32a40c0caacf3c368a9
18,679
def analyze_sentiment(input_text): """ Using VADER perform sentiment analysis on the given text """ sentiment_analyzer = SentimentIntensityAnalyzer() sentiment_dict = sentiment_analyzer.polarity_scores(input_text) return sentiment_dict
9134a321970ef049580050431c88af53a0e80bc9
18,680
def get_free_header(filepath, needed_keys=(), original_name=None, observatory=None): """Return the complete unconditioned header dictionary of a reference file. DOES NOT hijack warnings. DOES NOT verify checksums. Original name is used to determine file type for web upload temporary files which have no distinguishable extension. Original name is browser-side name for file. get_free_header() is a cached function to prevent repeat file reads. Although parameters are given default values, for caching to work correctly even default parameters should be specified positionally. Since get_free_header() is cached, loading file updates requires first clearing the function cache. """ file_obj = file_factory(filepath, original_name, observatory) header = file_obj.get_header(needed_keys, checksum=False) log.verbose("Header of", repr(filepath), "=", log.PP(header), verbosity=90) return header
f3f6460bc2af6a2005df0aeac009b2cbc602faa6
18,681
def clean_ice(options, args): """ Clean all orphaned VMs """ if len(args) < 2: print "The iceage command requires a run name. See --help" return 1 dbname = args[1] cb = CloudInitD(options.database, db_name=dbname, log_level=options.loglevel, logdir=options.logdir, terminate=False, boot=False, ready=True) ha = cb.get_iaas_history() for h in ha: state = h.get_state() handle = h.get_service_iaas_handle() if state == "running": if handle != h.get_id(): print_chars(2, "Terminating an orphaned VM %s\n" % (h.get_id()), bold=True) h.terminate() elif h.get_context_state() == cloudinitd.service_state_initial: print_chars(2, "Terminating pre-staged VM %s\n" % (h.get_id()), bold=True) h.terminate() return 0
fde1f35749ac40945f4225180c53d3cdc45e3067
18,682
def ADD_CIPD_FILE(api, pkg, platform, image, customization, success=True): """ mock add cipd file to unpacked image step """ return ADD_FILE( api, image, customization, '[CACHE]\\Pkgs\\CIPDPkgs\\resolved-instance_id-of-latest----------' + '\\{}\\{}\\*'.format(pkg, platform), success)
b6d5ed44c21bbb11c10e607ffe7773fb53909987
18,683
def row_to_columns(row): """Takes a row as a string and returns it as a list of columns.""" return [column for column in row.split() if column.strip() != '']
837477f2e9c160b93c339a9753e0598ac56c819e
18,684
def circ_diagonal_mode_mat(bk): """Diagonal matrix of radial coefficients for all modes/wavenumbers. Parameters ---------- bk : (M, N+1) numpy.ndarray Vector containing values for all wavenumbers :math:`M` and modes up to order :math:`N` Returns ------- Bk : (M, 2*N+1, 2*N+1) numpy.ndarray Multidimensional array containing diagnonal matrices with input vector on main diagonal. """ if len(bk.shape) == 1: bk = bk[np.newaxis, :] K, N = bk.shape Bk = np.zeros([K, N, N], dtype=complex) for k in range(K): Bk[k, :, :] = np.diag(bk[k, :]) return np.squeeze(Bk)
52cc6154e692a10c714c2a4574b7fea5ea464f2c
18,685
def log_updater(log, repetition, average_loss, optimization_time): """ Function to update the log object. """ index = repetition + 1 log["losses"] = log["losses"] + [[index, average_loss]] log["times"] = log["times"] + [[index, optimization_time]] return log
6517437987693a275a4d8d66416d7b94d0049ec5
18,686
import queue import threading def execute_function_multithreaded(fn, args_list, block_until_all_done=True, max_concurrent_executions=1000): """ Executes fn in multiple threads each with one set of the args in the args_list. :param fn: function to be executed :type fn: :param args_list: :type args_list: list(list) :param block_until_all_done: if is True, function will block until all the threads are done and will return the results of each thread's execution. :type block_until_all_done: bool :param max_concurrent_executions: :type max_concurrent_executions: int :return: If block_until_all_done is False, returns None. If block_until_all_done is True, function returns the dict of results. { index: execution result of fn with args_list[index] } :rtype: dict """ result_queue = queue.Queue() worker_queue = queue.Queue() for i, arg in enumerate(args_list): arg.append(i) worker_queue.put(arg) def fn_execute(): while True: try: arg = worker_queue.get(block=False) except queue.Empty: return exec_index = arg[-1] res = fn(*arg[:-1]) result_queue.put((exec_index, res)) threads = [] number_of_threads = min(max_concurrent_executions, len(args_list)) for _ in range(number_of_threads): thread = threading.Thread(target=fn_execute) if not block_until_all_done: thread.daemon = True thread.start() threads.append(thread) # Returns the results only if block_until_all_done is set. results = None if block_until_all_done: # Because join() cannot be interrupted by signal, a single join() # needs to be separated into join()s with timeout in a while loop. have_alive_child = True while have_alive_child: have_alive_child = False for t in threads: t.join(0.1) if t.is_alive(): have_alive_child = True results = {} while not result_queue.empty(): item = result_queue.get() results[item[0]] = item[1] if len(results) != len(args_list): raise RuntimeError( 'Some threads for func {func} did not complete ' 'successfully.'.format(func=fn.__name__)) return results
fc7fdfee1cdf04f10d7f7cea25230a4fd361b88e
18,687
def volume_encryption_metadata_get(context, volume_id, session=None): """Return the encryption metadata for a given volume.""" volume_ref = _volume_get(context, volume_id) encryption_ref = volume_type_encryption_get(context, volume_ref['volume_type_id']) values = { 'encryption_key_id': volume_ref['encryption_key_id'], } if encryption_ref: for key in ['control_location', 'cipher', 'key_size', 'provider']: values[key] = encryption_ref[key] return values
21794d8d2ccdb5e68cd766cee035bfdc14d51ebc
18,688
def test_shift_to_other_frame(hlwm, direction, frameindex, clients_per_frame): """ in a frame grid with 3 columns, where the middle column has 3 rows, we put the focused window in the middle, and then invoke 'shift' with the given 'direction'. Then, it is checked that the window stays focused but now resides in the frame with the given 'frameindex' """ winid, _ = hlwm.create_client() def otherclients(): # put 'otherclients'-many clients in every other frame winids = hlwm.create_clients(clients_per_frame) return ' '.join(winids) layout_131 = f""" (split horizontal:0.66:0 (split horizontal:0.5:1 (clients vertical:0 {otherclients()}) (split vertical:0.66:0 (split vertical:0.5:1 (clients vertical:0 {otherclients()}) (clients vertical:0 {winid})) (clients vertical:0 {otherclients()}))) (clients vertical:0 {otherclients()})) """ hlwm.call(['load', layout_131]) assert hlwm.attr.clients.focus.winid() == winid assert hlwm.attr.tags.focus.tiling.focused_frame.index() == '0101' hlwm.call(['shift', direction]) # the window is still focused assert hlwm.attr.clients.focus.winid() == winid # but it's now in another frame assert hlwm.attr.tags.focus.tiling.focused_frame.index() == frameindex
afeb04d178bd729fccae01118bc59e8e7b0c09dc
18,689
def perform_tick(gamefield): """ Perfom a tick. A tick is one round where each cell has a rule check """ tick_changes = get_tick_changes(gamefield) activate_rules(gamefield, tick_changes) return gamefield
45b1f26f4040fd5a317eaf348ac6e919e249f3b5
18,691
import ctypes def get_cairo_surface(pygame_surface): """ Black magic. """ class Surface(ctypes.Structure): _fields_ = [ ( 'HEAD', ctypes.c_byte * object.__basicsize__), ( 'SDL_Surface', ctypes.c_void_p)] class SDL_Surface(ctypes.Structure): _fields_ = [ ( 'flags', ctypes.c_uint), ( 'SDL_PixelFormat', ctypes.c_void_p), ( 'w', ctypes.c_int), ( 'h', ctypes.c_int), ( 'pitch', ctypes.c_ushort), ( 'pixels', ctypes.c_void_p)] surface = Surface.from_address(id(pygame_surface)) ss = SDL_Surface.from_address(surface.SDL_Surface) pixels_ptr = ctypes.pythonapi.PyMemoryView_FromMemory(ctypes.c_void_p(ss.pixels), ss.pitch * ss.h, PyBUF_WRITE) pixels = ctypes.cast(pixels_ptr, ctypes.py_object).value return cairo.ImageSurface.create_for_data(pixels, cairo.FORMAT_RGB24, ss.w, ss.h, ss.pitch)
2e8972c1c57a354527c0f3bb9b3700b80cebc45f
18,692
import io def string_out_table(dat, columns, caption, preferred_sizes=None, table_size="footnotesize"): """ - dat: (Dict String (Array String)), dict of arrays of data for the table - columns: (Array String), the column names in desired order - path: string, path to where to save the table - caption: None or string - preferred_sizes: None or (Array Integer), the preferred column sizes; column will be at least that size - table_size: None or string, if string, one of "Huge", "huge", "LARGE", "Large", "large", "normalsize", "small", "footnotesize", "scriptsize", "tiny", the table size RETURN: string of the table in Markdown """ if preferred_sizes is None: preferred_sizes = [0] * len(columns) the_str = "" with io.StringIO() as handle: if table_size is not None: handle.write(f"\\pandocbegin{{{table_size}}}\n\n") handle.write(make_table_from_dict_of_arrays( dat, columns=columns, preferred_sizes=preferred_sizes)) if caption is not None: handle.write(f"\nTable: {caption}\n") if table_size is not None: handle.write(f"\n\\pandocend{{{table_size}}}\n\n") the_str = handle.getvalue() return the_str
c09d3878d39d23a313fa57da9c62f98e2f4fa26b
18,693
def getCreationDate(pdf): """Return the creation date of a document.""" r = string_at(libc.pycpdf_getCreationDate(pdf.pdf)).decode() checkerror() return r
abede03b35ab87fd4532a8cf7348a992f7406df5
18,694
from typing import Annotated async def fast_dependencies( _: Annotated[int, Dependant(dep_without_delays)] ) -> Response: """An endpoint with dependencies that execute instantly""" return Response()
e4983c262ae5a0327af5128941de8658881dbce7
18,695
def _pretty_print_bnode(bnode: BNode): """Print a blank node.""" return f'😶 {bnode}'
6cf9f8e55315d8387c31708481751d9e584d497b
18,696
import itertools import numpy def combine_div(range1, range2): """ Combiner for Divide operation. >>> import gast as ast >>> combine(Range(-1, 5), Range(3, 8), ast.Div()) Range(low=-1, high=1) >>> combine(Range(-1, 5), Range(-5, -4), ast.Div()) Range(low=-2, high=0) >>> combine(Range(-1, 5), Range(-5, 3), ast.Div()) Range(low=-inf, high=inf) """ if range2.low <= 0 and range2.high >= 0: return UNKNOWN_RANGE if 0 in range2: return UNKNOWN_RANGE res = [v1 / v2 for v1, v2 in itertools.product(range1, range2)] return Range(numpy.min(res), numpy.max(res))
13fad21174e216fd2341d715eef85806ce65def0
18,698
def air(pos, res=None, shape=None, rowmajor=False, rad=None, ref=None): """Setups up an Airy system. See the build function for details.""" pos, res, shape, mid = validate(pos, res, shape, rowmajor) if rad is None: if pos.ndim != 2: raise ValueError("Airy requires either rad or pos[2,2]") w = angdist(mid[0]*deg2rad,pos[0,1]*deg2rad,mid[0]*deg2rad,pos[1,1]*deg2rad)*rad2deg h = angdist(pos[0,0]*deg2rad,mid[1]*deg2rad,pos[1,0]*deg2rad,mid[1]*deg2rad)*rad2deg rad = (w+h)/4 w = WCS(naxis=2) w.wcs.ctype = ["RA---AIR","DEC--AIR"] w.wcs.set_pv([(2,1,90-rad)]) if ref is "standard": ref = None return finalize(w, pos, res, shape, ref=ref)
79fc27074e43bd2f4d714e3d4fa7e6854d02ce14
18,699
def get_config_type(service_name): """ get the config tmp_type based on service_name """ if service_name == "HDFS": tmp_type = "hdfs-site" elif service_name == "HDFS": tmp_type = "core-site" elif service_name == "MAPREDUCE": tmp_type = "mapred-site" elif service_name == "HBASE": tmp_type = "hbase-site" elif service_name == "OOZIE": tmp_type = "oozie-site" elif service_name == "HIVE": tmp_type = "hive-site" elif service_name == "WEBHCAT": tmp_type = "webhcat-site" else: tmp_type = "global" return tmp_type
2fec790e67bdba757f8dffe058fae1d508b7d237
18,700
def is_music(file: File) -> bool: """See if the ext is a Music type.""" return file.ext in { "aac", "m4a", "mp3", "ogg", "wma", "mka", "opus", "alac", "ape", "flac", "wav", }
7e35a4f63c656d61d534a1a0116c84c6fc30fefd
18,703
import torch def sqeuclidean_pdist(x, y=None): """Fast and efficient implementation of ||X - Y||^2 = ||X||^2 + ||Y||^2 - 2 X^T Y Input: x is a Nxd matrix y is an optional Mxd matirx Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:] if y is not given then use 'y=x'. i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2 """ x_norm = (x**2).sum(1).unsqueeze(1) if y is not None: y_t = torch.transpose(y, 0, 1) y_norm = (y**2).sum(1).unsqueeze(0) else: y_t = torch.transpose(x, 0, 1) y_norm = x_norm.squeeze().unsqueeze(0) dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t) # get rid of NaNs dist[torch.isnan(dist)] = 0. # clamp negative stuff to 0 dist = torch.clamp(dist, 0., np.inf) # ensure diagonal is 0 if y is None: dist[dist == torch.diag(dist)] = 0. return dist
7b9077ed847847bd1030b6f850259717ccc586fe
18,704
def len(file, path): """获取dataset第一维长度。 Args: file: 文件路径。 path: dataset路径。 Returns: 返回长度。 """ with h5py.File(file, mode='r') as h5_file: length = h5_file[path].len() return length
d64d4ca0076a2bf76c5cb11dbba68adec15e34fa
18,707
from re import T def loop(step_fn, n_steps, sequences=None, outputs_info=None, non_sequences=None, go_backwards=False): """ Helper function to unroll for loops. Can be used to unroll theano.scan. The parameter names are identical to theano.scan, please refer to here for more information. Note that this function does not support the truncate_gradient setting from theano.scan. Parameters ---------- step_fn : function Function that defines calculations at each step. sequences : TensorVariable or list of TensorVariables List of TensorVariable with sequence data. The function iterates over the first dimension of each TensorVariable. outputs_info : list of TensorVariables List of tensors specifying the initial values for each recurrent value. Specify output_info to None for non-arguments to the step_function non_sequences: list of TensorVariables List of theano.shared variables that are used in the step function. n_steps: int Number of steps to unroll. go_backwards: bool If true the recursion starts at sequences[-1] and iterates backwards. Returns ------- List of TensorVariables. Each element in the list gives the recurrent values at each time step. """ if not isinstance(sequences, (list, tuple)): sequences = [] if sequences is None else [sequences] # When backwards reverse the recursion direction counter = range(n_steps) if go_backwards: counter = counter[::-1] output = [] # ====== check if outputs_info is None ====== # if outputs_info is not None: prev_vals = outputs_info else: prev_vals = [] output_idx = [i for i in range(len(prev_vals)) if prev_vals[i] is not None] # ====== check if non_sequences is None ====== # if non_sequences is None: non_sequences = [] # ====== Main loop ====== # for i in counter: step_input = [s[i] for s in sequences] + \ [prev_vals[idx] for idx in output_idx] + \ non_sequences out_ = step_fn(*step_input) # The returned values from step can be either a TensorVariable, # a list, or a tuple. Below, we force it to always be a list. if isinstance(out_, T.TensorVariable): out_ = [out_] if isinstance(out_, tuple): out_ = list(out_) output.append(out_) prev_vals = output[-1] # iterate over each scan output and convert it to same format as scan: # [[output11, output12,...output1n], # [output21, output22,...output2n],...] output_scan = [] for i in range(len(output[0])): l = map(lambda x: x[i], output) output_scan.append(T.stack(*l)) return output_scan
8abd7c0ccfcabd3e44eca10c6d30a7d9a7add627
18,708
import torch def get_model(hidden_size=20, n_hidden=5, in_dim=2, out_dim=1, penultimate=False, use_cuda=True, bn=False): """ Initialize the model and send to gpu """ in_dim = in_dim out_dim = out_dim #1 model = Net(in_dim, out_dim, n_hidden=n_hidden, hidden_size=hidden_size, activation=torch.nn.ReLU(), bias=True, penultimate=penultimate, bn=bn) if use_cuda: model=model.cuda() return model
fd7169276a2a420ce59733ac9687a289e7c3b0af
18,709
def convert_sweep(sweep,sweep_loc,new_sweep_loc,AR,taper): """This converts arbitrary sweep into a desired sweep given wing geometry. Assumptions: None Source: N/A Inputs: sweep [degrees] sweep_loc [unitless] new_sweep_loc [unitless] AR [unitless] taper [unitless] Outputs: quarter chord sweep Properties Used: N/A """ sweep_LE = np.arctan(np.tan(sweep)+4*sweep_loc* (1-taper)/(AR*(1+taper))) new_sweep = np.arctan(np.tan(sweep_LE)-4*new_sweep_loc* (1-taper)/(AR*(1+taper))) return new_sweep
17b52460f8a9d32cc2e6f407ccad2851c3edb1f8
18,710
def is_circular(linked_list): """ Determine whether the Linked List is circular or not Args: linked_list(obj): Linked List to be checked Returns: bool: Return True if the linked list is circular, return False otherwise The way we'll do this is by having two pointers, called "runners", moving through the list at different rates. Typically we have a "slow" runner which moves at one node per step and a "fast" runner that moves at two nodes per step. If a loop exists in the list, the fast runner will eventually move behind the slow runner as it moves to the beginning of the loop. Eventually it will catch up to the slow runner and both runners will be pointing to the same node at the same time. If this happens then you know there is a loop in the linked list. Below is an example where we have a slow runner and a fast runner (the red arrow). """ slow = linked_list.head fast = linked_list.head #as fast runner will reach end first if there is no loop so #adding a None check on just fast should be enough while fast and fast.next: slow = slow.next #move fast runner 2 times to make it fast as compared to slow runner fast = fast.next.next if fast == slow: return True # If we get to a node where fast doesn't have a next node or doesn't exist itself, # the list has an end and isn't circular return False
5a641df602f983de78c9c74b825847412aa54c21
18,711
from typing import List from typing import Any def bm_cv( X_train: pd.DataFrame, y_train: pd.Series, cv: int, metrics: List[Any], metrics_proba: List[Any], metric_kwargs: dict, model_dict: dict, ): """ Perform cross validation benchmark with all models specified under model_dictionary, using the metrics defined. Args: X_train: Array of features, used to train the model y_train: Array of label, used to train the model cv: Number of cross-validation fold metrics: List of metrics that we will use to score our validation performance metrics_proba : List of metrics that we will use to score our validation performance. This is only applicable for classification problem. The metrics under `metrics_proba` uses the predicted probability instead of predicted class metrics_kwargs: Dictionary containing the extra arguments needed for specific metrics, listed in metrics and metrics_proba model_dict: Model_dictionary, containing the model_name as the key and catalyst.ml.model object as value. Returns: DataFrame, which contains all of the metrics value for each of the model specified under model_dictionary, as well as the cross-validation index. """ result_cv_df = pd.DataFrame() kf = KFold(n_splits=cv, shuffle=True, random_state=42) for cv_idx, (dev_idx, val_idx) in enumerate(kf.split(X_train)): X_dev, X_val, y_dev, y_val = cv_split(X_train, y_train, dev_idx, val_idx) df = bm( X_dev, y_dev, X_val, y_val, metrics, metrics_proba, metric_kwargs, model_dict, ) df["cv_idx"] = cv_idx result_cv_df = pd.concat([result_cv_df, df]) return result_cv_df
d1d1944fd4802f196ca7a1a78cf0f55222f6886c
18,712
def index_get(array, *argv): """ checks if a index is available in the array and returns it :param array: the data array :param argv: index integers :return: None if not available or the return value """ try: for index in argv: array = array[index] return array # there is either no info available or no popular times # TypeError: rating/rating_n/populartimes wrong of not available except (IndexError, TypeError): return None
d7fbf0011fd14da905d167735e6900b1bbaf1a8f
18,713
def _env_vars_available() -> bool: """ Returns: `True` if all required environment variables for the Postgres connection are set, `False` otherwise """ return all(env_var in environ for env_var in DBConfigProviderEnvVarBasedImpl.required_env_vars)
8fcc9c06115056bbe8b3b691d192186c0313aeef
18,714
def precisionatk_implementation(y_true, y_pred, k): """Fujnction to calculate precision at k for a given sample Arguments: y_true {list} -- list of actual classes for the given sample y_pred {list} -- list of predicted classes for the given sample k {[int]} -- top k predictions we are interested in """ # if k = 0 return 0 as we should never have k=0 # as k is always >=1 if k == 0: return 0 # as we are interested in top k predictions y_pred = y_pred[:k] # convert predictions to set pred_set = set(y_pred) # convert actual values to set true_set = set(y_true) # find comon values in both common_values = pred_set.intersection(true_set) # return length of common values over k return len(common_values) / len(y_pred[:k])
945caa95b32681939569ca675475e2527dbdee78
18,715
import pandas def add_plane_data( data_frame: pandas.DataFrame, file_path: str, target_col: str = const.DF_PLANE_COL_NAME ) -> pandas.DataFrame: """Merges DataFrame with information about the flight planes Args: data_frame (pandas.DataFrame): Source DataFrame file_path (str): Source file path target_col (str): Target column to merge Returns: pandas.DataFrame: Source DataFrame with aditional information """ planes = df_fileloader.load_agenda(file_path) data_frame[target_col] = data_frame[target_col].astype(str) planes[target_col] = planes[target_col].astype(str) data_frame = pandas.merge(data_frame, planes, how='outer', on=[target_col], indicator=True) unmatched = data_frame.query('_merge == "left_only"').groupby([target_col]).size().reset_index(name='count') if not unmatched.empty: err_msg = 'There\'s missing information about the following planes:' for index, row in unmatched.iterrows(): err_msg += '\n {} with {} ocurrences.'.format(row[target_col], row['count']) utility.eprint(err_msg) return return data_frame.query('_merge == "both"').drop(columns=['_merge'])
0dbe3987cb4ee26f0ae6670173c65a3622ca9b5d
18,716