content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def cyk(word: str, cfg: CFG) -> bool: """ Checks whether grammar derive the word. This function is applicable to any CFG. Parameters ---------- word: str A word to derive in cfg cfg: CFG A CFG to derive a word Returns ------- bool: Whether grammar derive the word """ word_len = len(word) if not word_len: return cfg.generate_epsilon() cnf = cfg.to_normal_form() terminal_productions = [ production for production in cnf.productions if len(production.body) == 1 ] variable_productions = [ production for production in cnf.productions if len(production.body) == 2 ] matrix = [[set() for _ in range(word_len)] for _ in range(word_len)] for i in range(word_len): matrix[i][i].update( production.head.value for production in terminal_productions if production.body[0].value == word[i] ) for length in range(1, word_len): for start in range(word_len - length): end = start + length for current in range(start, end): matrix[start][end].update( production.head.value for production in variable_productions if production.body[0].value in matrix[start][current] and production.body[1].value in matrix[current + 1][end] ) return cnf.start_symbol.value in matrix[0][word_len - 1]
08fd0790f01ab5ff968564f2b684b833d7cda355
10,400
import sys def all_dynamic_dt_needed_paths(f, paths): """ Return a dictionary of all the DT_NEEDED => Library Paths for a given ELF file obtained by recursively following linkage. """ with open(f, 'rb') as file: try: readelf = ReadElf(file) eclass = readelf.elf_class() # This needs to be iterated until we traverse the entire linkage tree dt_needed = readelf.dynamic_dt_needed() dt_needed_paths = dynamic_dt_needed_paths(dt_needed, eclass, paths) for n, lib in dt_needed_paths.items(): dt_needed_paths = dict(all_dynamic_dt_needed_paths(lib, paths), **dt_needed_paths) except ELFError as ex: sys.stderr.write('ELF error: %s\n' % ex) sys.exit(1) return dt_needed_paths
e110b7372f075c8dafd3b84826395289ffb19d8b
10,401
def macd(df, ewa_short, ewa_long, ewa_signal, price_col="adj_close"): """Moving Average Convergence Divergence Parameters: ----------- df : DataFrame Input dataframe. ewa_short : int Exponentially weighted average time-window for a short time-span. A common choice for the short time-window is 12 intervals. ewa_long : int Exponentially weighted average time-window for a longer time-span. A common choice for the long time-window is 26 intervals. ewa_signal : int Time-window for the EWA of the difference between long and short averages. price_col : str Column name in `df` used for defining the current indicator (e.g. "open", "close", etc.) Returns: -------- macd_ts : Series Moving average convergence-divergence indicator for the time series. """ ewa_short = int(ewa_short) ewa_long = int(ewa_long) ewa_signal = int(ewa_signal) ewa12 = df[price_col].ewm(span=ewa_short).mean() ewa26 = df[price_col].ewm(span=ewa_long).mean() macd_ts = ewa12 - ewa26 signal_line = macd_ts.ewm(span=ewa_signal).mean() return macd_ts - signal_line, 'stationary'
3140f67371394244b66b9048d273e0d5fee5e471
10,402
from typing import Union from typing import Any from enum import Enum def make_annotation(field: ModelField): """ Convert a field annotation type to form data accepted type. The method convert structural field such as `BaseModel` and `Dict` to a str. Such as the model's value is supplied as a serialized JSON string format. Such string will be converted back to a dictionary, and used for initialize previous field. """ field_outer_type = field.outer_type_ is_literal = False # check outer type if isgeneric(field_outer_type): # outer type is a generic class if field_outer_type.__origin__ is Union: # only Union is valid generic class inner_types = field_outer_type.__args__ else: return str, True else: inner_types = (field_outer_type,) field_outer_type = None # check inner types inner_types_new = list() for inner_type in inner_types: if inner_type in (str, int, float, ..., Any): # inner type of `str`, `int` and `float` will be natively used as form data value inner_types_new.append(inner_type) elif issubclass(inner_type, Enum): inner_types_new.append(_make_form_enum(inner_type)) else: # other types will be converted to string literal is_literal = True inner_types_new.append(str) if field_outer_type is None: field_outer_type = inner_types_new[0] else: # set new generic type args field_outer_type = field_outer_type.__origin__[tuple(inner_types_new)] return field_outer_type, is_literal
9abba2c30302554d06c5a734ba13892ce5933811
10,403
def texture_symmetry_predict_patches(classifier, data=None, data_backup_file='FeaturesForPreds'): """Predict if symetric pairs of patches taken in a dermoscopic image are similar or not using features extracted with the `texture_symmetry_features()` function and stored in the "FeatureForPreds.csv" file. # Arguments : classifier: The trained random forest classifier (with patchesDataSet). data: As returned by the texture_symmetry_features function (optional). data_backup_filename: Only if data is None, file to load data from. # Outputs : preds: The predictions (0 if non similar, 1 if similar). nonSimilarNum: Int. The number of non similar matches. similarNum: Int. The number of similar matches. """ if data is None: data = pd.read_csv(f"{package_path()}/data/patchesDataSet/{data_backup_file}.csv", index_col=False) features = list(data) del features[0] else: features = list(data) toPredict = data[features] preds = classifier.predict(toPredict) nonSimilarNum = list(preds).count(0) similarNum = list(preds).count(1) return preds, nonSimilarNum, similarNum
87f5323b70b027992dc3ed56a536f43f0d8a8fd2
10,404
def instantiateSong(fileName): """Create an AudioSegment with the data from the given file""" ext = detectFormat(fileName) if(ext == "mp3"): return pd.AudioSegment.from_mp3(fileName) elif(ext == "wav"): return pd.AudioSegment.from_wav(fileName) elif(ext == "ogg"): return pd.AudioSegment.from_ogg(fileName) elif(ext == "flv"): return pd.AudioSegment.from_flv(fileName) elif(ext == "m4a"): return pd.AudioSegment.from_file(fileName, "mp4") else: return pd.AudioSegment.from_file(fileName, ext)
16d5daab7b4a8b0e62845339c5a7c51618e15cee
10,405
def get_href(link: bs4.element.Tag) -> str: """If a link has an href attribute, return it :param link: The link to be checked :returns: An href """ if (link.has_attr("href")): return (link["href"])
d9f9d9e9303cc6a7e57ca60f3f2b5582e99aa8a8
10,406
from typing import Dict import ast def get_contrib_requirements(filepath: str) -> Dict: """ Parse the python file from filepath to identify a "library_metadata" dictionary in any defined classes, and return a requirements_info object that includes a list of pip-installable requirements for each class that defines them. Note, currently we are handling all dependencies at the module level. To support future expandability and detail, this method also returns per-class requirements in addition to the concatenated list. Args: filepath: the path to the file to parse and analyze Returns: A dictionary: { "requirements": [ all_requirements_found_in_any_library_metadata_in_file ], class_name: [ requirements ] } """ with open(filepath) as file: tree = ast.parse(file.read()) requirements_info = {"requirements": []} for child in ast.iter_child_nodes(tree): if not isinstance(child, ast.ClassDef): continue current_class = child.name for node in ast.walk(child): if isinstance(node, ast.Assign): try: target_ids = [target.id for target in node.targets] except (ValueError, AttributeError): # some assignment types assign to non-node objects (e.g. Tuple) target_ids = [] if "library_metadata" in target_ids: library_metadata = ast.literal_eval(node.value) requirements = library_metadata.get("requirements", []) requirements_info[current_class] = requirements requirements_info["requirements"] += requirements return requirements_info
3b25fa4c4185f0e77f1efeab40a1bfd199e950dd
10,407
import base64 import requests import json import time import sys def watch(endpoint, key): """watch sends watch request to etcd. Examples: curl -L http://localhost:2379/v3alpha/watch \ -X POST -d ''{"create_request": {"key":"Zm9v"} }' """ # Python 2 # key_str = base64.b64encode(key) # Python 3 base64 requires utf-08 encoded bytes # Python 3 JSON encoder requires string key_str = base64.b64encode(bytes(key, "utf-8")).decode() req = {'create_request': {"key": key_str}} while True: try: rresp = requests.post(endpoint + '/v3alpha/watch', data=json.dumps(req), stream=True) for line in rresp.iter_lines(): # filter out keep-alive new lines if line: decoded_line = line.decode('utf-8') resp = json.loads(decoded_line) if 'result' not in resp: log.warning('{0} does not have result'.format(resp)) return '' if 'created' in resp['result']: if resp['result']['created']: log.warning('watching {0}'.format(key)) continue if 'events' not in resp['result']: log.warning('{0} returned no events: {1}'.format(key, resp)) return None if len(resp['result']['events']) != 1: log.warning('{0} returned >1 event: {1}'.format(key, resp)) return None if 'kv' in resp['result']['events'][0]: if 'value' in resp['result']['events'][0]['kv']: val = resp['result']['events'][0]['kv']['value'] return base64.b64decode(val) else: log.warning('no value in ', resp) return None else: log.warning('no kv in ', resp) return None except requests.exceptions.ConnectionError as err: log.warning('Connection error: {0}'.format(err)) time.sleep(5) except: log.warning('Unexpected error:', sys.exc_info()[0]) raise
7aabdaaeca048b2380d7f6672b29e033da3dc587
10,408
def __zedwalther(kin): """ Calculate the z-parameter for the Walther equation (ASTM D341). Parameters ---------- kin: scalar The kinematic viscosity of the lubricant. Returns ------- zed: scalar The z-parameter. """ zed = kin + 0.7 + 10 ** (-1.47 - 1.84 * kin - 0.51 * kin ** 2) return zed
d01a716da03230436c5f511cc65f9e7c96732d99
10,409
def o1_cosmologies_list(): """ Return the list of $\\sigma_8$ values used in training Q1 :return: A numpy array of 20 $\\sigma_8$ values """ return np.array([0.969, 0.654, 1.06, 0.703, 1.1615, 0.759, 0.885, 0.6295, 0.605, 0.7205, 1.1685, 1.179, 0.857, 1.123, 0.843, 0.5245, 0.99, 0.7485, 0.528, 1.1265, 0.8535, 0.9165])
f7fae1d4301631c6ad33090a6c0bceed94380345
10,410
def chrelerr(fbest, stop): """ checks whether the required tolerance for a test function with known global minimum has already been achieved Input: fbest function value to be checked stop(0) relative error with which a global minimum with not too small absolute value should be reached stop(1) global minimum function value of a test function stop(2) if abs(fglob) is very small, we stop if the function value is less than stop(2) Output: flag = 0 the required tolerance has been achieved = 1 otherwise """ fglob = stop[1] if fbest - fglob <= max(stop[0] * abs(fglob), stop[2]): return 0 return 1
c90ad548ea9490cdb5a43cfb3559d7f26a0c57fc
10,411
from scipy.stats import binom def prop_test(df): """ Inspired from R package caret confusionMatrix.R """ x = np.diag(df).sum() n = df.sum().sum() p = (df.sum(axis=0) / df.sum().sum()).max() d = { "statistic": x, # number of successes "parameter": n, # number of trials "null.value": p, # probability of success "p.value": binom.sf(x - 1, n, p), # see https://en.wikipedia.org/wiki/Binomial_test } return(d)
e2b584435cdcc25b091b0d0c17a04b07790a89cd
10,412
import time def time_and_log_query( fn ): """ Decorator to time operation of method From High Performance Python, p.27 """ @wraps( fn ) def measure_time( *args, **kwargs ): t1 = time.time() result = fn( *args, **kwargs ) t2 = time.time() elapsed = t2 - t1 log_query( elapsed ) log_query_timestamp() # print(("@timefn:%s took %s seconds" % (fn.__name__, elapsed))) return result return measure_time
75d2bb057afd63c9abbfd0c392c533236238fe15
10,413
def parse_anchor_body(anchor_body): """ Given the body of an anchor, parse it to determine what topic ID it's anchored to and what text the anchor uses in the source help file. This always returns a 2-tuple, though based on the anchor body in the file it may end up thinking that the topic ID and the text are identical. """ c_pos = anchor_body.find(':') if c_pos >= 0: id_val = anchor_body[:c_pos] anchor_body = anchor_body[c_pos+1:] id_val = id_val or anchor_body else: id_val = anchor_body return (id_val.casefold().rstrip(), anchor_body.strip())
5e86ac489727ec4da69f7ca14152cb79da541f3a
10,414
def func_parameters(func_name): """ Generates function parameters for a particular function. Parameters ---------- func_name : string Name of function. Returns -------- d : integer Size of dimension. g : gradient of objective function. `g(x, *func_args) -> 1-D array with shape (d, )` where `x` is a 1-D array with shape(d, ) and func_args is a tuple of arguments needed to compute the gradient. func_args : tuple Arguments passed to f and g. bound_1 : integer Lower bound used to generate starting points. bound_2 : integer Upper bound used to generate starting points. """ if func_name == 'styb': d = 5 g = mt_obj.styblinski_tang_gradient func_args = () bound_1 = -5 bound_2 = 5 elif func_name == 'qing': d = 5 g = mt_obj.qing_gradient func_args = (d,) bound_1 = -3 bound_2 = 3 elif func_name == 'zak': d = 10 g = mt_obj.zakharov_grad func_args = (d,) bound_1 = -5 bound_2 = 10 elif func_name == 'hart': d = 6 g = mt_obj.hartmann6_grad a, c, p = mt_obj.hartmann6_func_params() func_args = d, a, c, p bound_1 = 0 bound_2 = 1 return d, g, func_args, bound_1, bound_2
4bcd62167cae79c456754349e35209ba4c932caf
10,415
def range_overlap(range1, range2): """ determine range1 is within range2 (or is completely the same) :param range range1: a range :param range range2: another range :rtype: bool :return: True, range1 is subset of range2, False, not the case """ result = all([ range1.start >= range2.start, range1.stop <= range2.stop ]) return result
3df4edf59ea473ad7b832256443a1e4e8c7e0ce9
10,416
import fnmatch import os def _expand_query_list(session, queries, recursive=False, verbose=False): """This function expands ls queries by resolving relative paths, expanding wildcards and expanding recursive queries. If the user provides no queries, the method defaults to a single nonrecursive query for the current working directory.""" results = [] # If no queries are supplied by the user, default to a query for the # current working directory if len(queries) == 0: queries = [get_cwd()] # Wildcard expansion is performed first, so it can be combined with other types # of expansion, such as recursive expansion of subcollections later. Each collection # or data object is expanded only once. preprocessed_queries = [] already_expanded = {} for query in queries: # Currently only wildcards without a collection path are supported # e.g. "*.dat", but not "../*.dat" or "*/data.dat". if "/" not in query and ("?" in query or "*" in query): for d in get_dataobjects_in_collection(session, get_cwd()): if fnmatch(d["name"], query) and d["full_name"] not in already_expanded: preprocessed_queries.append(d["full_name"]) already_expanded[d["full_name"]] = 1 for c in get_direct_subcollections(session, get_cwd()): parent, coll = os.path.split(c["name"]) if fnmatch(coll, query) and d["name"] not in already_expanded: preprocessed_queries.append(c["name"]) already_expanded[d["name"]] = 1 else: preprocessed_queries.append(query) for query in preprocessed_queries: absquery = convert_to_absolute_path(query) if collection_exists(session, absquery): results.append({"original_query": query, "expanded_query": absquery, "expanded_query_type": "collection"}) if verbose: print_debug("Argument \"{}\" is a collection.".format(query)) if recursive: for subcollection in get_subcollections(session, absquery): if verbose: print_debug("Recursively adding subcollection " + subcollection + " to queries.") results.append({"original_query": query, "expanded_query": subcollection, "expanded_query_type": "collection"}) elif dataobject_exists(session, absquery): results.append({"original_query": query, "expanded_query": absquery, "expanded_query_type": "dataobject"}) if verbose: print_debug("Argument \"{}\" is a data object.".format(query)) else: print_error( "Query \"{}\" could not be resolved. Ignoring ... ".format(query)) return results
83d5190b15e682416657305309f07fa85c142c5d
10,417
import sys def qsnorm(p): """ rational approximation for x where q(x)=d, q being the cumulative normal distribution function. taken from Abramowitz & Stegun p. 933 |error(x)| < 4.5*10**-4 """ d = p if d < 0. or d > 1.: print('d not in (1,1) ') sys.exit() x = 0. if (d - 0.5) > 0: d = 1. - d if (d - 0.5) < 0: t2 = -2. * np.log(d) t = np.sqrt(t2) x = t - old_div((2.515517 + .802853 * t + .010328 * t2), (1. + 1.432788 * t + .189269 * t2 + .001308 * t * t2)) if p < 0.5: x = -x return x
016544dc01abe8e748f3633e4038540ea8f99985
10,418
def check_auth(username, password): """This function is called to check if a username / password combination is valid. """ user = User.query.filter(User.name==username and User.password_hash==encrypt_password(passsword)).first() if user == None: return False else: return True
e664f6885b68581d0f647a252db7b0176f54b8c8
10,419
def redirect_success(): """Save complete jsPsych dataset to disk.""" if request.is_json: ## Retrieve jsPsych data. JSON = request.get_json() ## Save jsPsch data to disk. write_data(session, JSON, method='pass') ## Flag experiment as complete. session['complete'] = 'success' write_metadata(session, ['complete','code_success'], 'a') ## DEV NOTE: ## This function returns the HTTP response status code: 200 ## Code 200 signifies the POST request has succeeded. ## The corresponding jsPsych function handles the redirect. ## For a full list of status codes, see: ## https://developer.mozilla.org/en-US/docs/Web/HTTP/Status return ('', 200)
23e4a91df3ea1bedf99dfc59c94cff24d0dd9d45
10,420
import tempfile def fill_region(compound, n_compounds, region, overlap=0.2, seed=12345, edge=0.2, temp_file=None): """Fill a region of a box with a compound using packmol. Parameters ---------- compound : mb.Compound or list of mb.Compound Compound or list of compounds to be put in region. n_compounds : int or list of int Number of compounds to be put in region. region : mb.Box or list of mb.Box Region to be filled by compounds. overlap : float, units nm, default=0.2 Minimum separation between atoms of different molecules. seed : int, default=12345 Random seed to be passed to PACKMOL. edge : float, units nm, default=0.2 Buffer at the edge of the region to not place molecules. This is necessary in some systems because PACKMOL does not account for periodic boundary conditions in its optimization. temp_file : str, default=None File name to write PACKMOL's raw output to. Returns ------- filled : mb.Compound If using mulitple regions and compounds, the nth value in each list are used in order. For example, if the third compound will be put in the third region using the third value in n_compounds. """ _check_packmol(PACKMOL) if not isinstance(compound, (list, set)): compound = [compound] if not isinstance(n_compounds, (list, set)): n_compounds = [n_compounds] if compound is not None and n_compounds is not None: if len(compound) != len(n_compounds): msg = ("`compound` and `n_compounds` must be of equal length.") raise ValueError(msg) # See if region is a single region or list if isinstance(region, Box): # Cannot iterate over boxes region = [region] elif not any(isinstance(reg, (list, set, Box)) for reg in region): region = [region] region = [_validate_box(reg) for reg in region] # In angstroms for packmol. overlap *= 10 # Build the input file and call packmol. filled_pdb = tempfile.mkstemp(suffix='.pdb')[1] input_text = PACKMOL_HEADER.format(overlap, filled_pdb, seed) for comp, m_compounds, reg in zip(compound, n_compounds, region): m_compounds = int(m_compounds) compound_pdb = tempfile.mkstemp(suffix='.pdb')[1] comp.save(compound_pdb, overwrite=True) reg_mins = reg.mins * 10 reg_maxs = reg.maxs * 10 reg_maxs -= edge * 10 # Apply edge buffer input_text += PACKMOL_BOX.format(compound_pdb, m_compounds, reg_mins[0], reg_mins[1], reg_mins[2], reg_maxs[0], reg_maxs[1], reg_maxs[2]) _run_packmol(input_text, filled_pdb, temp_file) # Create the topology and update the coordinates. filled = Compound() for comp, m_compounds in zip(compound, n_compounds): for _ in range(m_compounds): filled.add(clone(comp)) filled.update_coordinates(filled_pdb) return filled
dc125e905a8b6238d79724d66a4d19fa54d130bd
10,421
def morris_traversal(root): """ Morris(InOrder) travaersal is a tree traversal algorithm that does not employ the use of recursion or a stack. In this traversal, links are created as successors and nodes are printed using these links. Finally, the changes are reverted back to restore the original tree. root = Node(4) temp = root temp.left = Node(2) temp.right = Node(8) temp = temp.left temp.left = Node(1) temp.right = Node(5) """ inorder_traversal = [] # set current to root of binary tree current = root while current is not None: if current.left is None: inorder_traversal.append(current.data) current = current.right else: # find the previous (prev) of curr previous = current.left while previous.right is not None and previous.right != current: previous = previous.right # make curr as right child of its prev if previous.right is None: previous.right = current current = current.left # firx the right child of prev else: previous.right = None inorder_traversal.append(current.data) current = current.right return inorder_traversal
1770e1df3811edb6bebb64729e2eddef34348dc4
10,422
def normalize_matrix_rows(A): """ Normalize the rows of an array. :param A: An array. :return: Array with rows normalized. """ return A / np.linalg.norm(A, axis=1)[:, None]
cd04f8a77954c53e97f9025d35c232b755577d6d
10,423
def clear_cache() -> int: """ Очистка локального кэша форматов, меню и прочих ресурсов, прочитанных с сервера. :return: код возврата """ return IC_clearresourse()
3e94b618dd988d477517e25f3e7cca23163596f4
10,424
import os import re def get_files_by_ymd(dir_path, time_start, time_end, ext=None, pattern_ymd=None): """ :param dir_path: 文件夹 :param time_start: 开始时间 :param time_end: 结束时间 :param ext: 后缀名, '.hdf5' :param pattern_ymd: 匹配时间的模式, 可以是 r".*(\d{8})_(\d{4})_" :return: list """ files_found = [] if pattern_ymd is not None: pattern = pattern_ymd else: pattern = r".*(\d{8})" for root, dirs, files in os.walk(dir_path): for file_name in files: if ext is not None: if '.' not in ext: ext = '.' + ext if os.path.splitext(file_name)[1].lower() != ext.lower(): continue re_result = re.match(pattern, file_name) if re_result is not None: time_file = ''.join(re_result.groups()) else: continue if int(time_start) <= int(time_file) <= int(time_end): files_found.append(os.path.join(root, file_name)) files_found.sort() return files_found
fe8dc79444fda4e9d60e77edd2f823f3ab2443a1
10,425
def transform(shiftX=0.0, shiftY=0.0, rotate=0.0, skew=0.0, scale=1.0): """ Returns an NSAffineTransform object for transforming layers. Apply an NSAffineTransform t object like this: Layer.transform_checkForSelection_doComponents_(t,False,True) Access its transformation matrix like this: tMatrix = t.transformStruct() # returns the 6-float tuple Apply the matrix tuple like this: Layer.applyTransform(tMatrix) Component.applyTransform(tMatrix) Path.applyTransform(tMatrix) Chain multiple NSAffineTransform objects t1, t2 like this: t1.appendTransform_(t2) """ myTransform = NSAffineTransform.transform() if rotate: myTransform.rotateByDegrees_(rotate) if scale != 1.0: myTransform.scaleBy_(scale) if not (shiftX == 0.0 and shiftY == 0.0): myTransform.translateXBy_yBy_(shiftX,shiftY) if skew: skewStruct = NSAffineTransformStruct() skewStruct.m11 = 1.0 skewStruct.m22 = 1.0 skewStruct.m21 = tan(radians(skew)) skewTransform = NSAffineTransform.transform() skewTransform.setTransformStruct_(skewStruct) myTransform.appendTransform_(skewTransform) return myTransform
fa6b0eb4a84ae7fa13bab1ebb12591abe5362373
10,426
def get_entity(text, tokens): """获取ner结果 """ # 如果text长度小于规定的max_len长度,则只保留text长度的tokens text_len = len(text) tokens = tokens[:text_len] entities = [] entity = "" for idx, char, token in zip(range(text_len), text, tokens): if token.startswith("O") or token.startswith(app.model_configs["tag_padding"]): token_prefix = token token_suffix = None else: token_prefix, token_suffix = token.split("-") if token_prefix == "S": entities.append([token_suffix, char]) entity = "" elif token_prefix == "B": if entity != "": entities.append([tokens[idx-1].split("-")[-1], entity]) entity = "" else: entity += char elif token_prefix == "I": if entity != "": entity += char else: entity = "" else: if entity != "": entities.append([tokens[idx-1].split("-")[-1], entity]) entity = "" else: continue return entities
ee261ceda4443b8c0f0c4663c23c0a422971f72b
10,427
def new_func(message): """ new func :param message: :return: """ def get_message(message): """ get message :param message: :return: """ print('Got a message:{}'.format(message)) return get_message(message)
c5f23b0cd3cebfdd2d36398a3ace18342d6de37c
10,428
import torch import numpy import random def process_midi(raw_mid, max_seq, random_seq, condition_token=False, interval = False, octave = False, fusion=False, absolute=False, logscale=False, label = 0): """ ---------- Author: Damon Gwinn ---------- Takes in pre-processed raw midi and returns the input and target. Can use a random sequence or go from the start based on random_seq. ---------- """ if interval and octave: x = torch.full((max_seq, ), TOKEN_PAD_OCTAVE_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq, ), TOKEN_PAD_OCTAVE_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device()) elif interval and not octave: x = torch.full((max_seq, ), TOKEN_PAD_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq, ), TOKEN_PAD_INTERVAL, dtype=TORCH_LABEL_TYPE, device=cpu_device()) elif octave and fusion and absolute: x = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION_ABSOLUTE, dtype=TORCH_LABEL_TYPE, device=cpu_device()) elif octave and fusion: x = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq,), TOKEN_PAD_OCTAVE_FUSION, dtype=TORCH_LABEL_TYPE, device=cpu_device()) elif not interval and octave: x = torch.full((max_seq, ), TOKEN_PAD_OCTAVE, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq, ), TOKEN_PAD_OCTAVE, dtype=TORCH_LABEL_TYPE, device=cpu_device()) elif logscale: x = torch.full((max_seq, ), TOKEN_PAD_RELATIVE, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq, ), TOKEN_PAD_RELATIVE, dtype=TORCH_LABEL_TYPE, device=cpu_device()) else: x = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device()) tgt = torch.full((max_seq, ), TOKEN_PAD, dtype=TORCH_LABEL_TYPE, device=cpu_device()) raw_len = len(raw_mid) full_seq = max_seq + 1 # Performing seq2seq if(raw_len == 0): return x, tgt if(raw_len < full_seq): if interval and logscale and absolute: start_pitch = -1 last_pitch = -1 data_temp = numpy.array([]) for token in raw_mid: token_cpu = token.cpu().detach().numpy() if token_cpu in range(128, 128+255): if start_pitch == -1: start_pitch = token_cpu - 127 last_pitch = token_cpu -127 token_cpu = 127 data_temp = numpy.append(start_pitch, data_temp) # 앞에 절대음 토큰 else: token_cpu = (token_cpu-last_pitch)+127 last_pitch = last_pitch + token_cpu - 127 data_temp = numpy.append(data_temp, token_cpu) else: data_temp = numpy.append(data_temp, token_cpu) raw_mid = torch.tensor(data_temp[:], dtype=TORCH_LABEL_TYPE, device=cpu_device()) x[:raw_len] = raw_mid tgt[:raw_len-1] = raw_mid[1:] if interval and octave: tgt[raw_len] = TOKEN_END_OCTAVE_INTERVAL elif interval and not octave: tgt[raw_len] = TOKEN_END_INTERVAL elif octave and fusion and absolute: tgt[raw_len] = TOKEN_END_OCTAVE_FUSION_ABSOLUTE elif octave and fusion: tgt[raw_len] = TOKEN_END_OCTAVE_FUSION elif not interval and octave: tgt[raw_len] = TOKEN_END_OCTAVE elif logscale: tgt[raw_len] = TOKEN_END_RELATIVE else: tgt[raw_len] = TOKEN_END else: # Randomly selecting a range if(random_seq): end_range = raw_len - full_seq start = random.randint(SEQUENCE_START, end_range) # Always taking from the start to as far as we can else: start = SEQUENCE_START end = start + full_seq data = raw_mid[start:end] # 음차 만들어주기 if interval and logscale and absolute: start_pitch = -1 last_pitch = -1 data_temp = numpy.array([]) for token in data: token_cpu = token.cpu().detach().numpy() if token_cpu in range(128, 128+255): if start_pitch == -1: start_pitch = token_cpu - 127 last_pitch = token_cpu -127 token_cpu = 127 data_temp = numpy.append(start_pitch, data_temp) # 앞에 절대음 토큰 else: token_cpu = (token_cpu-last_pitch)+127 last_pitch = last_pitch + token_cpu - 127 data_temp = numpy.append(data_temp, token_cpu) else: data_temp = numpy.append(data_temp, token_cpu) data_temp = numpy.append(data_temp, token_cpu) data = torch.tensor(data_temp, dtype=TORCH_LABEL_TYPE, device=cpu_device()) # condition_token이 true면 label에 따라 조건코드를 추가해주자 if condition_token: if label == 0: data = torch.tensor(CONDITION_CLASSIC) + raw_mid[start:end] elif label == 1: data = torch.tensor(CONDITION_POP) + raw_mid[start:end] x = data[:max_seq] tgt = data[1:full_seq] # print("x:",x) # print("tgt:",tgt) return x, tgt
ae90ddf6c5c18a22298eb1b863a7a90a3f4c6a9f
10,429
def _rack_models(): """ Models list (for racks) """ models = list(Rack.objects. \ values_list('rack_model', flat=True).distinct()) models.sort() return models
6192656d82bee5227c19cd1c3446077027457251
10,430
def confidence_ellipse(cov, means, ax, n_std=3.0, facecolor='none', **kwargs): """ Create a plot of the covariance confidence ellipse of *x* and *y*. Parameters ---------- cov : array-like, shape (2, 2) Covariance matrix means: array-like, shape (2, ) Means array ax : matplotlib.axes.Axes The axes object to draw the ellipse into. n_std : float The number of standard deviations to determine the ellipse's radiuses. Returns ------- matplotlib.patches.Ellipse Other parameters ---------------- kwargs : `~matplotlib.patches.Patch` properties """ pearson = cov[0, 1] / np.sqrt(cov[0, 0] * cov[1, 1]) # Using a special case to obtain the eigenvalues of this # two-dimensionl dataset. ell_radius_x = np.sqrt(1 + pearson) ell_radius_y = np.sqrt(1 - pearson) ellipse = Ellipse((0, 0), width=ell_radius_x * 2, height=ell_radius_y * 2, facecolor=facecolor, **kwargs) # Calculating the stdandard deviation of x from # the squareroot of the variance and multiplying # with the given number of standard deviations. scale_x = np.sqrt(cov[0, 0]) * n_std mean_x = means[0] # calculating the stdandard deviation of y ... scale_y = np.sqrt(cov[1, 1]) * n_std mean_y = means[1] transf = transforms.Affine2D() \ .rotate_deg(45) \ .scale(scale_x, scale_y) \ .translate(mean_x, mean_y) ellipse.set_transform(transf + ax.transData) return ax.add_patch(ellipse)
eb7ac51f6e24ca41855232b1c73f054e6538f4d4
10,431
def catalogResolveURI(URI): """Do a complete resolution lookup of an URI """ ret = libxml2mod.xmlCatalogResolveURI(URI) return ret
20eefbe64bde8a57e7ce56538c1fa0da45922bfa
10,432
def high_low_difference(dataframe: pd.DataFrame, scale: float = 1.0, constant: float = 0.0) -> pd.DataFrame: """ Returns an allocation based on the difference in high and low values. This has been added as an example with multiple series and parameters. parameters: scale: determines amplitude factor. constant: scalar value added to the allocation size. """ dataframe[PandasEnum.ALLOCATION.value] = (dataframe["high"] - dataframe["low"]) * scale + constant return dataframe
f821f5ed7d3bc714ed9e75f4cba21e4173297148
10,433
def e_x(x, terms=10): """Approximates e^x using a given number of terms of the Maclaurin series """ n = np.arange(terms) return np.sum((x ** n) / fac(n))
ad924f4b7d713a64b6fa68c44d14a1a3aeff2650
10,434
from bs4 import BeautifulSoup import re async def parser(html: str) -> list: """解析页面 Args: html (str): 返回页面的源码 Returns: list: 最先的3个搜图结果(不满3个则返回所有,没有结果则返回str) """ if "No hits found" in html: return "没有找到符合的本子!" soup = BeautifulSoup(html, "lxml").find_all("table", class_="itg gltc")[0].contents all_list = [] for index, item in enumerate(soup): if index == 0: continue elif index > 3: break imdata = { "type": item.find("div", class_=re.compile(r"cn ct\d")).string, "title": item.find("div", class_="glink").string, "link": item.find("td", class_="gl3c glname").contents[0].attrs["href"], "page_count": item.find("td", class_="gl4c glhide").contents[1].string, "im_seq": "", } imdata["im_seq"] = await dl_image(imdata["link"]) all_list.append(imdata) return all_list
e676259e3bfe02a5c4fc7f6deb339d617ab5ff63
10,435
def set_namespace_root(namespace): """ Stores the GO ID for the root of the selected namespace. Parameters ---------- namespace : str A string containing the desired namespace. E.g. biological_process, cellular_component or molecular_function. Returns ------- list The list of GO ID's of the root terms of the selected namespace. """ if namespace == 'biological_process': namespace_list = ['GO:0008150'] elif namespace == 'cellular_component': namespace_list = ['GO:0005575'] elif namespace == 'molecular_function': namespace_list = ['GO:0003674'] else: namespace_list = ['GO:0008150', 'GO:0005575', 'GO:0003674'] return namespace_list
2719b2766912ad8caf3427513c7affa1cdb92eb3
10,436
import itertools import operator def get_commit(oid): """ get commit by oid """ parents = [] commit = data.get_object(oid, 'commit').decode() lines = iter(commit.splitlines()) for line in itertools.takewhile(operator.truth, lines): key, value = line.split(' ', 1) if key == 'tree': tree = value elif key == 'parent': parents = [] else: assert False, f'Unknown field {key}' message = '\n'.join(lines) return Commit(tree=tree, parents=parents, message=message)
e0e928253ddce7d0087775eedfe6859ddc7e1200
10,437
def _gaussian_dilated_conv2d_oneLearned(x, kernel_size, num_o, dilation_factor, name, top_scope, biased=False): """ Dilated conv2d with antecedent gaussian filter and without BN or relu. """ num_x = x.shape[3].value filter_size = dilation_factor - 1 sigma = _get_sigma(top_scope) # create kernel grid ax = np.arange(-filter_size // 2 + 1., filter_size // 2 + 1.) xx, yy = np.meshgrid(ax, ax) kernel = np.exp(-(xx**2 + yy**2)) mask = np.zeros([filter_size,filter_size, 1, 1, 1], dtype=np.float32) mask[:, :, 0, 0, 0] = kernel w_gauss_value = tf.Variable(tf.constant(0.0, shape=[filter_size,filter_size, 1,1,1]), name='w_gauss_value',trainable=False) # create gaussian filter w_gauss_value = tf.add(w_gauss_value, tf.constant(mask, dtype=tf.float32)) w_gauss_value = tf.div(w_gauss_value, tf.exp(2.0 * sigma**2)) w_gauss_value = tf.div(w_gauss_value, tf.reduce_sum(w_gauss_value)) # perform separable convolution o_gauss = tf.expand_dims(x, -1) o_gauss = tf.nn.conv3d(o_gauss, w_gauss_value, strides=[1,1,1,1,1], padding='SAME') o_gauss = tf.squeeze(o_gauss, -1) with tf.variable_scope(name) as scope: # perform dilated convolution w = tf.get_variable('weights', shape=[kernel_size, kernel_size, num_x, num_o]) o = tf.nn.atrous_conv2d(o_gauss, w, dilation_factor, padding='SAME') if biased: b = tf.get_variable('biases', shape=[num_o]) o = tf.nn.bias_add(o, b) return o
b05d1e4dd84ac9396fba64b4a158549ed0f11694
10,438
def get_cos_similarity(hy_vec, ref_vec): """ measure similarity from two vec """ return (1 - spatial.distance.cosine(hy_vec, ref_vec))
7bdb483ab443a8253317d2d0cca82701b2c762ec
10,439
def addDepthDimension (ds): """ Create depth coordinate Parameters ---------- ds : xarray DataSet OOI Profiler mooring data for one profiler Returns ------- ds : xarray DataSet dataset with iDEPTH coordinate set as a dimension """ if ( 'prof_depth' not in ds ): raise TypeError('Couldn\'t find prof_depth data variable') if ( 'actual_range' not in ds.prof_depth.attrs ): raise TypeError('Couldn\'t find prof_depth range attribute') iDEPTH = arange(max(abs(ds.prof_depth.attrs['actual_range'])) + 1) return ds.expand_dims({"iDEPTH":iDEPTH})
9940bc21af373f738c1b0ab682a6cae048e21ba0
10,440
def divide_dataset_by_dataarray(ds, dr, varlist=None): """ Divides variables in an xarray Dataset object by a single DataArray object. Will also make sure that the Dataset variable attributes are preserved. This method can be useful for certain types of model diagnostics that have to be divided by a counter array. For example, local noontime J-value variables in a Dataset can be divided by the fraction of time it was local noon in each grid box, etc. Args: ----- ds: xarray Dataset The Dataset object containing variables to be divided. dr: xarray DataArray The DataArray object that will be used to divide the variables of ds. Keyword Args (optional): ------------------------ varlist: list of str If passed, then only those variables of ds that are listed in varlist will be divided by dr. Otherwise, all variables of ds will be divided by dr. Returns: -------- ds_new : xarray Dataset A new xarray Dataset object with its variables divided by dr. """ # ----------------------------- # Check arguments # ----------------------------- if not isinstance(ds, xr.Dataset): raise TypeError("The ds argument must be of type xarray.Dataset!") if not isinstance(dr, xr.DataArray): raise TypeError("The dr argument must be of type xarray.DataArray!") if varlist is None: varlist = ds.data_vars.keys() # ----------------------------- # Do the division # ----------------------------- # Keep all Dataset attributes with xr.set_options(keep_attrs=True): # Loop over variables for v in varlist: # Divide each variable of ds by dr ds[v] = ds[v] / dr return ds
cdf519c425a3622d2293971650eb0325eda76ba8
10,441
def count_words(my_str): """ count number of word in string sentence by using string spilt function. INPUT - This is testing program OUTPUT - 4 """ my_str_list = my_str.split(" ") return len(my_str_list)
731291937205fd0b9cb9153b4ee95d42416a5124
10,442
from datetime import datetime import pytz def suggest_create(): """Create a suggestion for a resource.""" descriptors = Descriptor.query.all() for descriptor in descriptors: if descriptor.is_option_descriptor and \ descriptor.name != 'supercategories': choices = [(str(i), v) for i, v in enumerate(descriptor.values)] if descriptor.name == 'city': setattr( ResourceSuggestionForm, descriptor.name, SelectField(choices=choices)) else: setattr( ResourceSuggestionForm, descriptor.name, SelectMultipleField(choices=choices)) for descriptor in descriptors: if not descriptor.is_option_descriptor and \ descriptor.name != 'report count': setattr(ResourceSuggestionForm, descriptor.name, TextAreaField()) # Add form fields asking for the suggester's name, email, and phone number. # Dynamically added here so that form's fields are displayed in the # correct order. # setattr(ResourceSuggestionForm, 'contact_information', # FormField(ContactInformationForm)) form = ResourceSuggestionForm() if form.validate_on_submit(): resource_suggestion = ResourceSuggestion( name=form.name.data, # contact_name=form.contact_information.contact_name.data, # contact_email=form.contact_information.contact_email.data, # contact_phone_number=form.contact_information.contact_phone_number. # data, # additional_information=form.contact_information. # additional_information.data, submission_time=datetime.now(pytz.timezone('US/Eastern'))) if form.address.data: resource_suggestion.address = form.address.data save_associations(resource_suggestion, form, descriptors, False) db.session.add(resource_suggestion) try: db.session.commit() # app = create_app(os.getenv('FLASK_CONFIG') or 'default') # contact_email = app.config['ADMIN_EMAIL'] # get_queue().enqueue( # send_email, # recipient=contact_email, # subject='New Suggestion', # template='suggestion/email/suggestion', # # name=form.contact_name.data, # # email=form.contact_email.data, # # phone=form.contact_phone_number.data, # # message=form.suggestion_text.data, # resource_name=form.name.data, # resource_address=form.address.data, # ) flash('Thanks for the suggestion!', 'success') return redirect(url_for('main.index')) except IntegrityError: db.session.rollback() flash('Database error occurred. Please try again.', 'error') return render_template('suggestion/suggest.html', form=form, name=None)
368667911b4eea8debb76ec8d44a17939d7022d4
10,443
def remove_dataset_tags(): """Command for removing tags from a dataset.""" command = Command().command(_remove_dataset_tags).lock_dataset() return command.require_migration().with_commit(commit_only=DATASET_METADATA_PATHS)
dcb45a70b5fb61a70de5acc8c2954771f4dfaed6
10,444
from typing import Optional def delete_device(connection: Connection, id: str, error_msg: Optional[str] = None): """Delete a device. Args: connection: MicroStrategy REST API connection object id: ID of the device error_msg (string, optional): Custom Error Message for Error Handling Returns: Complete HTTP response object. Expected status is 204. """ url = f"{connection.base_url}/api/v2/devices/{id}" return connection.delete(url=url)
5beda713239ee46048247d1cfb2952abbc8d1739
10,445
import tokenize def build_model(): """ Build a ML pipeline with RandomForest classifier GriSearch :return: GridSearch Output """ pipeline = Pipeline([ ('vect', CountVectorizer(tokenizer=tokenize)), ('tfidf', TfidfTransformer()), ('clf', MultiOutputClassifier(RandomForestClassifier())) ]) parameters = {'clf__estimator__n_estimators': [50, 60], 'clf__estimator__min_samples_split': [2, 3, 4], 'clf__estimator__criterion': ['entropy', 'gini'] } cv = GridSearchCV(pipeline, param_grid=parameters) return cv
3c38d1e94c78f83fd1edfc91c6b16c67180d0ab6
10,446
def basic_auth(func): """Decorator for basic auth""" def wrapper(request, *args, **kwargs): try: if is_authenticated(request): return func(request, *args, **kwargs) else: return HttpResponseForbidden() except Exception, ex: return HttpResponse(json.dumps({'success': False, 'error': ex.message}), mimetype='text/json') return wrapper
652894c4d9eaf8022c0a783d85fde61a8bfdc5eb
10,447
async def test_transmute(request, user: str, env: str=None, group: [str]=None): """ API Description: Transmute Get. This will show in the swagger page (localhost:8000/api/v1/). """ return { "user": user, "env": env, "group": group, }
8b3cf64fdd44b43227d72d63bf38e341a3c20d40
10,448
from . import imagemaker from . import modspeclist from . import axecommands from . import realworld from . import configfile from .inputchecks import InputChecker import os import shutil def simdispim(incat=None, config=None, lambda_psf=None, dispim_name=None, model_spectra=None, model_images=None, nx=None, ny=None, exptime=None, bck_flux=0.0, extraction=True, extrfwhm=3.0, orient=True, slitless_geom=True, adj_sens=True, silent=True): """ Main function for the task SIMDISPIM This module is the high level wrapper function for the task SIMDISPIM. All necessary actions are done, feedback is given to the user @param incat: name of model object table @type incat: string @param config: aXe configuration file name @type config: string @param lambda_psf: wavelength the object shapes were determined at @type lambda_psf: float @param dispim_name: name of dispersed image @type dispim_name: string @param model_spectra: name of model spectra file @type model_spectra: string @param model_images: name of model images @type model_image: string @param nx: number of pixels in x @type nx: int @param ny: number of pixels in y @type ny: int @param exptime: exposure time @type exptime: dloat @param bck_flux: flux in background @type bck_flux: float @param extraction: flag for default extraction @type extraction: boolean @param extrfwhm: multiplier for extraction width @type extrfwhm: float @param orient: flag for tilted extraction @type orient: boolean @param slitless_geom: flag for slitless optimized extraction @type slitless_geom: boolean @param adj_sens: flag for adjusted flux conversion @type adj_sens: boolean @param silent: flag for silent run @type silen: boolean """ # give brief feedback print('\nSIMDISPIM: Starting ...') # just set the environments axe_setup(axesim=True) if incat == None or config==None: print(__doc__) return 1 # check the input parameters in_check = InputChecker(taskname='simdispim') # for the 'simdisp'-task in_check.check_simdispim_input(incat, config, lambda_psf, model_spectra, model_images, nx, ny, exptime, bck_flux, extraction, extrfwhm, orient, slitless_geom, adj_sens=adj_sens) if dispim_name == None: # derive the output name pos = incat.rfind('.') if pos < 0: dirima_name = incat + '_direct.fits' grisima_name = incat + '_slitless.fits' else: dirima_name = incat[:pos] + '_direct.fits' grisima_name = incat[:pos] + '_slitless.fits' else: dirima_name = dispim_name.replace('.fits','_direct.fits') grisima_name = dispim_name # make a full path to the # direct image as dummy and as final output dummy_dirima_path = getIMAGE(get_random_filename('t', '.fits')) dummy_grisima_path = getIMAGE(get_random_filename('t', '.fits')) final_dirima_path = getOUTSIM(dirima_name) final_grisima_path = getOUTSIM(grisima_name) try: # to convert the background value # to a float bck_flux = float(bck_flux) except ValueError: # now it must be a file; # check for its existence if not os.path.isfile(getCONF(bck_flux)): err_msg = 'Missing background image: ' + getCONF(bck_flux) raise aXeSIMError(err_msg) # store the path to the # background image bck_flux = getCONF(bck_flux) # load the aXe configuration file conf = configfile.ConfigFile(getCONF(config)) # make the simulation configuration # file pointing the correct extensions config_simul = conf.axesim_prep() # delete the object # explicitly del conf # load the simulation configuration file conf_simul = configfile.ConfigFile(getCONF(config_simul)) # make sure a reasonable default # for lambda_psf is given if needed if lambda_psf == None: lambda_psf = conf_simul.confirm_lambda_psf() print('SIMDISPIM: Input Model Object List: %s' % getIMAGE(incat)) print('SIMDISPIM: Input aXe configuration file: %s' % getCONF(config)) if model_spectra != None: print('SIMDISPIM: Input Model Spectra: %s' % getIMAGE(model_spectra)) if model_images != None: print('SIMDISPIM: Input Model Spectra: %s' % getIMAGE(model_images)) print('SIMDISPIM: Fixed wavlength for PSF: %s' % str(lambda_psf)) print('SIMDISPIM: Background flux/image: %s' % str(bck_flux)) if exptime != None: print('SIMDISPIM: Input exposure time: %s' % str(exptime)) if nx == None and ny == None: print('SIMDISPIM: Input image dimensions: %s' % 'AUTO') else: print('SIMDISPIM: Input image dimensions: (%s,%s)' % (str(nx),str(ny))) print('SIMDISPIM: Output dispersed image: %s' % final_grisima_path) if extraction: print('SIMDISPIM: Extraction width scaling: %.2f' % extrfwhm) print('SIMDISPIM: Extraction tilted: %s' % str(orient)) print('SIMDISPIM: Extraction slitless optimized: %s' % str(slitless_geom)) print('SIMDISPIM: Size-adjusted flux conversion: %s' % str(adj_sens)) print('SIMDISPIM: Output extracted spectra: %s' % final_grisima_path.replace('.fits', '_2.SPC.fits')) print('SIMDISPIM: Output stamp images: %s' % final_grisima_path.replace('.fits', '_2.STP.fits')) print('') # create the dummy image maker i_maker = imagemaker.DummyImages(getCONF(config_simul), dummy_grisima_path, dummy_dirima_path, nx, ny) # nake the dummy images i_maker.makeImages() # load the model object table inobjects = modspeclist.ModelObjectTable(getIMAGE(incat)) # fill the model object table inobjects.fill_columns(i_maker.WCSimage, i_maker.WCSext) # load the object to make the grism simulations grismator = axecommands.DispImator(i_maker, config_simul, getIMAGE(incat), lambda_psf, model_spectra, model_images) grismator.run(silent=silent) grismator.mopup() # get the name of the result image, which is the contamination image result_image = getOUTPUT(os.path.basename(dummy_grisima_path).replace('.fits','_2.CONT.fits')) # convert the 'contamination' image into # a full output image with three extensions # and noise (if desired) rworld = realworld.RealWorld(result_image, extname='SCI', exptime=exptime, bck_flux=bck_flux, rdnoise=conf_simul['RDNOISE'], instrument=conf_simul['INSTRUMENT']) rworld.make_real() # move the resulting image to the correct # name and place shutil.move(result_image, final_grisima_path) # check whether an extraction # is desired if extraction: # create and extractor extractor = axecommands.DummyExtractor(i_maker, final_grisima_path, config_simul, getIMAGE(incat), bck_flux, extrfwhm, orient, slitless_geom, adj_sens, lambda_mark=lambda_psf) # make the extraction extractor.prepare_extraction() extractor.run(silent=silent) extractor.mopup() # delete the dummy images i_maker.deleteImages() # give brief feedback print('SIMDISPIM: Done ...\n') return 0
b35f79035e26f485c6ce8ea2f475f8a45e960ad4
10,449
def ref_from_rfgc(sample): """ rename columns from RFGC catalog """ ref = dict( ra = sample['RAJ2000'], dec = sample['DEJ2000'], a = sample['aO'], b = sample['bO'], PA = sample['PA'] ) return ref
f93f4dfefc107c082f5454a59fb7a145ab9e9e60
10,450
import optparse def build_cmdline(): """ creates OptionParser instance and populates command-line options and returns OptionParser instance (cmd) """ cmd=optparse.OptionParser(version=__version__) cmd.add_option('-c', '', dest='config_fname',type="string", help='WHM/WHMCS configuration file', metavar="FILE") cmd.add_option('-s', '', dest="whm_section", type="string", help="WHM server to use. Specify section name. eg: -s ds01", metavar="SERVER") cmd.add_option('','--search', action="store", dest='search', type="string", help="Search client by DNS domain name or cPanel username", metavar="STRING") cmd.add_option('-d', '', dest='whmcs_deptid', type="int", help="WHMCS Department ID", metavar="INT") cmd.add_option('-m', '', dest='whmcs_ticketmsg_fname', type="string", help="WHMCS abuse ticket template file", metavar='FILE') cmd.add_option('-r', '', dest='whm_suspendmsg_fname', type="string", help='cPanel account suspension reason template file', metavar='FILE') cmd.add_option('-f', '', dest='whmcs_proofmsg_fname', type="string", help='Abuse proof file which will be appended to abuse ticket message', metavar='FILE') cmd.add_option('', '--subject', dest='whmcs_subject', type="string", help='Specify abuse ticket subject title.', metavar="STRING") cmd.add_option('-y', '--allyes', dest='allyes', action="store_true", default=False, help='Assume yes as an answer to any question which would be asked') return cmd
c72dddfbf9bc728d06bae73bf028a85bc16d8261
10,451
import urllib def get_repo_slugname(repo): """ >>> get_repo_slugname("https://build.frida.re") build.frida.re >>> get_repo_slugname("https://build.frida.re/./foo/bar") build.frida.re >>> get_repo_slugname("://build.frida.re") build.frida.re """ parse_result = urllib.parse.urlparse(repo) return parse_result.netloc
a36eec2c30018d3dbb298649d9d4c03586e60263
10,452
def lowess(x, y, f=2. / 3., itera=3): """lowess(x, y, f=2./3., iter=3) -> yest Lowess smoother: Robust locally weighted regression. The lowess function fits a nonparametric regression curve to a scatterplot. The arrays x and y contain an equal number of elements; each pair (x[i], y[i]) defines a data point in the scatterplot. The function returns the estimated (smooth) values of y. The smoothing span is given by f. A larger value for f will result in a smoother curve. The number of robustifying iterations is given by iter. The function will run faster with a smaller number of iterations. """ n = len(x) r = int(ceil(f * n)) h = [np.sort(np.abs(x - x[i]))[r] for i in range(n)] #h = [ (np.abs(x - x[i]))[r] for i in range(n)] w = np.clip(np.abs((x[:, None] - x[None, :]) / h), 0.0, 1.0) w = np.nan_to_num(w, nan=0.0) w = (1 - w ** 3) ** 3 s= np.diagonal(w) yest = np.zeros(n) delta = np.ones(n) for iteration in range(itera): for i in range(n): weights = delta * w[:, i] b = np.array([np.sum(weights * y), np.sum(weights * y * x)]) A = np.array([[np.sum(weights), np.sum(weights * x)], [np.sum(weights * x), np.sum(weights * x * x)]]) beta = linalg.solve(A, b) yest[i] = beta[0] + beta[1] * x[i] residuals = y - yest s = np.median(np.abs(residuals)) delta = np.clip(residuals / (6.0 * s), -1, 1) delta = (1 - delta ** 2) ** 2 return yest
9fd5543ab76d4ec61a08ad703e734122fe1fb718
10,453
import asyncio import aiohttp async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool: """Set up a config entry.""" tibber_connection = tibber.Tibber( access_token=entry.data[CONF_ACCESS_TOKEN], websession=async_get_clientsession(hass), time_zone=dt_util.DEFAULT_TIME_ZONE, ) hass.data[DOMAIN] = tibber_connection async def _close(event): await tibber_connection.rt_disconnect() entry.async_on_unload(hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)) try: await tibber_connection.update_info() except asyncio.TimeoutError as err: raise ConfigEntryNotReady from err except aiohttp.ClientError as err: _LOGGER.error("Error connecting to Tibber: %s ", err) return False except tibber.InvalidLogin as exp: _LOGGER.error("Failed to login. %s", exp) return False hass.config_entries.async_setup_platforms(entry, PLATFORMS) # set up notify platform, no entry support for notify component yet, # have to use discovery to load platform. hass.async_create_task( discovery.async_load_platform( hass, "notify", DOMAIN, {CONF_NAME: DOMAIN}, hass.data[DATA_HASS_CONFIG] ) ) return True
64d893959afa3c5af6e0f293f691183a07d04363
10,454
def scenario_mask_vulnerable(plot=plt, show=False): """ creates scenario with different groups that are more or less vulnerable Args: plot: plot to show show (bool): variable if graphic should be shown Returns: plot: plot to show ani_humans: animation of the humans ani_stack: animation of the stackplot """ # variables that influence the simulation prob, infection_radius, number_of_humans, temperature, number_vulnerable_humans, number_humans_with_mask = ask_for_different_input() number_standard_humans = number_of_humans - \ number_vulnerable_humans - number_humans_with_mask # plot setup fig = plot.figure(figsize=(10, 4)) # for healthy and vulnerable humans plot_humans = fig.add_subplot(1, 2, 1) plot_humans.axes.xaxis.set_visible(False) plot_humans.axes.yaxis.set_visible(False) # for stackplot plot_stack = fig.add_subplot(1, 2, 2) plot_stack.set_frame_on(False) plot_stack.axes.xaxis.set_visible(False) plot_stack.axes.yaxis.set_visible(False) # setting up the list of humans global_humans, energy = init.init_sys( temperature, prob, number_of_humans, infection_radius=infection_radius, world_limit=world_limit, ) global_humans = init.make_vulnerable( global_humans, number_of_humans, number_vulnerable_humans, infection_radius, prob) global_humans = init.wear_mask( global_humans, number_of_humans, number_humans_with_mask, infection_radius, prob) inf = [] suc = [] rec = [] inf_mask = [] suc_mask = [] rec_mask = [] inf_vulnerable = [] suc_vulnerable = [] rec_vulnerable = [] steps = [] # animation of the movement of humans ani_humans = animation.FuncAnimation( fig, scenario_basic_animation, fargs=[global_humans, plot_humans, time_step, energy], interval=plot_refresh_rate, ) # animation of the stackplot ani_stack = animation.FuncAnimation( fig, stack_animation_mask_vulnerable, fargs=[ global_humans, plot_stack, time_step, inf_vulnerable, inf, inf_mask, rec_vulnerable, rec, rec_mask, suc_vulnerable, suc, suc_mask, steps, number_of_humans, infection_radius], interval=plot_refresh_rate) if show: plot.show() return plot, ani_humans, ani_stack
aebdf569f2670ebb5ffcea5ccd1aad504f2447ae
10,455
def _operator_parser(expr, first, current): """This method parses the expression string and substitutes the temporal operators with numerical values. Supported operators for relative and absolute time are: - td() - the time delta of the current interval in days and fractions of days or the unit in case of relative time - start_time() - The start time of the interval from the begin of the time series in days and fractions of days or the unit in case of relative time - end_time() - The end time of the current interval from the begin of the time series in days and fractions of days or the unit in case of relative time Supported operators for absolute time: - start_doy() - Day of year (doy) from the start time [1 - 366] - start_dow() - Day of week (dow) from the start time [1 - 7], the start of the week is monday == 1 - start_year() - The year of the start time [0 - 9999] - start_month() - The month of the start time [1 - 12] - start_week() - Week of year of the start time [1 - 54] - start_day() - Day of month from the start time [1 - 31] - start_hour() - The hour of the start time [0 - 23] - start_minute() - The minute of the start time [0 - 59] - start_second() - The second of the start time [0 - 59] - end_doy() - Day of year (doy) from the end time [1 - 366] - end_dow() - Day of week (dow) from the end time [1 - 7], the start of the week is monday == 1 - end_year() - The year of the end time [0 - 9999] - end_month() - The month of the end time [1 - 12] - end_week() - Week of year of the end time [1 - 54] - end_day() - Day of month from the end time [1 - 31] - end_hour() - The hour of the end time [0 - 23] - end_minute() - The minute of the end time [0 - 59] - end_second() - The minute of the end time [0 - 59] The modified expression is returned. """ is_time_absolute = first.is_time_absolute() expr = _parse_td_operator(expr, is_time_absolute, first, current) expr = _parse_start_time_operator(expr, is_time_absolute, first, current) expr = _parse_end_time_operator(expr, is_time_absolute, first, current) expr = _parse_start_operators(expr, is_time_absolute, current) expr = _parse_end_operators(expr, is_time_absolute, current) return expr
fcee6006bdd9e96950b6e09f516895d89241a19a
10,456
def _read_data(filename): """ Read the script and return is as string :param filename: :return: """ javascript_path = _get_data_absolute_path(filename) with open(javascript_path) as javascript: return javascript.read()
73b2b3bc94831b761b29c8430044045217fd36ad
10,457
import os def xonshconfig(env): """Ensures and returns the $XONSHCONFIG""" xcd = env.get("XONSH_CONFIG_DIR") xc = os.path.join(xcd, "config.json") return xc
b2fe3b2766bed761b7d74419b0c2c7b7d27c56e5
10,458
import os import bz2 import lzma import gzip def file_open(filename, mode='r', encoding='utf8'): """Open file with implicit gzip/bz2 support Uses text mode by default regardless of the compression. In write mode, creates the output directory if it does not exist. """ if 'w' in mode and not os.path.isdir(os.path.dirname(filename)): os.makedirs(os.path.dirname(filename)) if filename.endswith('.bz2'): if mode in {'r', 'w', 'x', 'a'}: mode += 't' return bz2.open(filename, mode=mode, encoding=encoding) if filename.endswith('.xz'): if mode in {'r', 'w', 'x', 'a'}: mode += 't' return lzma.open(filename, mode=mode, encoding=encoding) if filename.endswith('.gz'): if mode in {'r', 'w', 'x', 'a'}: mode += 't' return gzip.open(filename, mode=mode, encoding=encoding) return open(filename, mode=mode, encoding=encoding)
ad4b3a02273e02339ef3a6d2c365fc3a087692ea
10,459
def schoollist(): """ Return all the schools. Return an empty schools object if no schools :return: """ items = get_schools() if items: return response_for_schools_list(get_schools_json_list(items)) return response_for_schools_list([])
3293fe590b3e7754c400c90da15a373fda909b13
10,460
from datetime import datetime def getNumNullops(duration, max_sample=1.0): """Return number of do-nothing loop iterations.""" for amount in [2**x for x in range(100)]: # 1,2,4,8,... begin = datetime.now() for ii in xrange(amount): pass elapsed = (datetime.now() - begin).total_seconds() if elapsed > max_sample: break return int(amount/elapsed*duration)
5d3114267c1d844e95fb2fd4f9123914ba69dafc
10,461
def get_dependencies_from_wheel_cache(ireq): """Retrieves dependencies for the given install requirement from the wheel cache. :param ireq: A single InstallRequirement :type ireq: :class:`~pip._internal.req.req_install.InstallRequirement` :return: A set of dependency lines for generating new InstallRequirements. :rtype: set(str) or None """ if ireq.editable or not is_pinned_requirement(ireq): return matches = WHEEL_CACHE.get(ireq.link, name_from_req(ireq.req)) if matches: matches = set(matches) if not DEPENDENCY_CACHE.get(ireq): DEPENDENCY_CACHE[ireq] = [format_requirement(m) for m in matches] return matches return
e3bb4f57a989f4f8c049ae68262511a97110204d
10,462
def hbp_fn(): """Create a ReLU layer with HBP functionality.""" return HBPReLU()
3f8b8aaa460ae786b292e98891761b1596e369cc
10,463
def xtrans(r): """RBDA Tab. 2.2, p. 23: Spatial coordinate transform (translation of origin). Calculates the coordinate transform matrix from A to B coordinates for spatial motion vectors, in which frame B is translated by an amount r (3D vector) relative to frame A. """ r1,r2,r3 = r return matrix.sqr(( 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, r3, -r2, 1, 0, 0, -r3, 0, r1, 0, 1, 0, r2, -r1, 0, 0, 0, 1))
58620149cff92a261c3a3c500fdf3b7308aded67
10,464
def get_chop_flux(obs, chunk_method="nanmedian", method="nanmean", err_type="internal", weight=None, on_off=True): """ Calculate the flux in chopped data. The data will first be processed in each chop chunk by chunk_method, unless the chunk_method is set to None or 'none' and the data will be left as it is. Then the data will be separated into on-chop and off-chop part, by which the difference is the flux. The function supports two ways to calculate error: if err_type is 'internal', the difference between mean of all on and off chop data is the flux, and the combined error of the two parts of the data is the final error; if err_type is 'external', then the difference of each on-off pair will be taken in the first step, and then the mean and error of these differences is used. The method of calculating mean in this step is denoted by the variable method, which supports 'mean', 'nanmean', 'median', 'nanmedian'. :param obs: Obs or ObsArray object containing data and chop_ :type obs: Obs or ObsArray :param str chunk_method: str, method parameter passed to chunk_proc() to chunk the data as the first step. If set to None or 'none', the data will skip the chunk step and the flux will be extracted from the raw data :param str method: str, the method parameter passed to weighted_proc_along_axis() to calculate the flux and error, suggested values are "nanmean" or "nanmedian" :param str err_type: str, allowed values are 'internal' and 'external' :param weight: Obs or ObsArray object containing weight, should of the same type as obs. If left None, will treat all data point as the same weight. :type weight: Obs or ObsArray :param bool on_off: bool flag of flux calculation using on chop - off chop, if False, flux is off chop - on chop :return: tuple of (flux, error, weight) objects of the same type as input obs :rtype: tuple :raises TypeError: invalid input type :raises ValueError: invalid method value """ if not isinstance(obs, Obs): raise TypeError("Invalid input type for obs, expect Obs/ObsArray.") obs = obs.copy() mean_obs = obs.proc_along_time(method="nanmean") if obs.empty_flag_ or obs.chop_.empty_flag_: raise ValueError("obs data_ or chop_ is empty.") if weight is None: weight = obs.replace(arr_in=np.ones(obs.shape_)) weight = weight.copy() weight.fill_by_mask(mask=np.isnan(obs.data_), fill_value=np.nan) if (chunk_method is None) or chunk_method.strip().lower() == "none": obs_chunk_on = obs.take_by_flag_along_time(chop=True) obs_chunk_off = obs.take_by_flag_along_time(chop=False) wt_chunk_on = weight.take_by_flag_along_time(flag_arr=obs.chop_.data_) wt_chunk_off = weight.take_by_flag_along_time(flag_arr=~obs.chop_.data_) else: obs_chunk = obs.chunk_proc(method=chunk_method) obs_chunk_on = obs_chunk.take_by_flag_along_time(chop=True) obs_chunk_off = obs_chunk.take_by_flag_along_time(chop=False) wt_chunk_method = "nansum" if chunk_method.strip().lower()[:3] == "nan" \ else "sum" wt_chunk = weight.chunk_proc(chunk_edge_idxs=obs.chop_.chunk_edge_idxs_, method=wt_chunk_method) wt_chunk_on = wt_chunk.take_by_flag_along_time( flag_arr=obs_chunk.chop_.data_) wt_chunk_off = wt_chunk.take_by_flag_along_time( flag_arr=~obs_chunk.chop_.data_) if err_type.strip().lower()[0] == "i": obs_chunk_on_mean, obs_chunk_on_err, obs_chunk_on_wt = \ weighted_proc_along_axis(obs=obs_chunk_on, method=method, weight=wt_chunk_on, axis=-1) obs_chunk_off_mean, obs_chunk_off_err, obs_chunk_off_wt = \ weighted_proc_along_axis(obs=obs_chunk_off, method=method, weight=wt_chunk_off, axis=-1) obs_flux = obs_chunk_on_mean - obs_chunk_off_mean obs_err = np.sqrt(obs_chunk_on_err ** 2 + obs_chunk_off_err ** 2) obs_wt = obs_chunk_on_wt + obs_chunk_off_wt elif err_type.strip().lower()[0] == "e": flag_arr1, flag_arr2 = get_match_phase_flags( chop1=obs_chunk_on.chop_, chop2=obs_chunk_off.chop_, match_same_phase=False) if (len(flag_arr1) != 0) and (len(flag_arr2) != 0): obs_chunk_on_match = obs_chunk_on.take_by_flag_along_time( flag_arr=flag_arr1) obs_chunk_off_match = obs_chunk_off.take_by_flag_along_time( flag_arr=flag_arr2) wt_chunk_on_match = wt_chunk_on.take_by_flag_along_time( flag_arr=flag_arr1) wt_chunk_off_match = wt_chunk_off.take_by_flag_along_time( flag_arr=flag_arr2) obs_chunk_diff = obs_chunk_on_match - obs_chunk_off_match wt_chunk_diff = 1 / (1 / wt_chunk_on_match + 1 / wt_chunk_off_match) wt_chunk_diff.fill_by_mask(mask=~np.isfinite(wt_chunk_diff.data_), fill_value=np.nan) obs_flux, obs_err, obs_wt = weighted_proc_along_axis( obs=obs_chunk_diff, method=method, weight=wt_chunk_diff, axis=-1) else: obs_flux, obs_err, obs_wt = ( mean_obs.replace( arr_in=np.full(mean_obs.shape_, fill_value=np.nan)), mean_obs.replace( arr_in=np.full(mean_obs.shape_, fill_value=np.nan)), mean_obs.replace( arr_in=np.full(mean_obs.shape_, fill_value=0))) else: raise ValueError("Invalid value for err_type.") if not on_off: obs_flux *= -1 obs_flux = mean_obs.replace(arr_in=obs_flux.data_) obs_err = mean_obs.replace(arr_in=obs_err.data_) obs_wt = mean_obs.replace(arr_in=obs_wt.data_) return obs_flux, obs_err, obs_wt
010d7038ec0e9b3fa683b53077f78181bf656e5d
10,465
def my_example_embeddings_method(paths, embedding_size, default_value=1): """ :param paths: (list) a list of BGP paths; a BGP path is a list of integers (ASNs) :param embedding_size: (int) the size of the embedding :param default_value: (int) the value for the embeddings :return: (pandas dataframe object) a dataframe with index the ASN numbers included in the paths where each row has <embedding_size> embeddings all with the same <default_value> """ unique_ASNs = set() for path in paths: unique_ASNs.update(path) columns = ['embedding_' + str(i) for i in range(embedding_size)] data = pd.DataFrame(default_value, index=unique_ASNs, columns=columns) return data
48e0c1b1089c236c74cdb82dde021cd9bebd62bf
10,466
def process_prompt_choice(value, prompt_type): """Convert command value to business value.""" if value is not None: idx = prompt_type(value) return idx raise CommandError("The choice is not exist, please choice again.")
b38f6f43da369928cea0058466578425a9b66024
10,467
import operator def summarise_input(rawimg, labelimg): """This function takes as input: 'rawimg' (the data) and 'labelimg' (the cell boundary cartoon) Then using the z=1, channel=1 frame, produces a summary table for inspection. It also calculates which label is the background, assuming it is the largest cell. It returns the following: (e.g. if there are three labels labeled 17, 20, 41. Where "20" is the background. This function will return: 1. A list of all the labels (e.g. [17,20,41]) 2. The number of labels (e.g. 3) 3. The index of the background (e.g. 1) 4. The label name of the background (e.g. 20) """ #Take a snapshot of the image at z=1 and c=1 for this analysis inputimg=Duplicator().run(rawimg, 1, 1, 1, 1, 1, 1); #ImagePlus imp, int firstC, int lastC, int firstZ, int lastZ, int firstT, int lastT) results = ArrayList() im = IntensityMeasures( inputimg, labelimg ) results.add( im.getMean() ) results.add( im.getStdDev() ) results.add( im.getNumberOfVoxels()) results.add( im.getMin() ) results.add( im.getMax() ) results.add( im.getMedian() ) results.add( im.getMode() ) mergedTable = ResultsTable() numLabels = results.get(0).getCounter() ###Create a dictionary to store data### d={} d["label"]=[] for i in xrange(results.size()): #for each heading (mean, std. dev. etc.) measure = results.get( i ).getColumnHeading( 0 ) d[measure]=[] ###################################### for i in xrange(numLabels): mergedTable.incrementCounter() label = results.get( 0 ).getLabel( i ) #obtains the 0-indexed ith label, regardless of its string-name. d["label"].append(label) mergedTable.addLabel(label) for j in xrange(results.size()): measure = results.get( j ).getColumnHeading( 0 ) value = results.get( j ).getValue( measure, i ) mergedTable.addValue( measure, value ) d[measure].append(value) if show_table: mergedTable.show( inputimg.getShortTitle() +"-intensity-measurements" ) ###Ensure labels file is in the correct format: ### #Labels sometimes have gaps (e.g. labels=[4,40,82] is possible). #The Python script stores them in a python list, and accesses them by “python indexes” (i.e. their order, starting with 0) #In this example, label 4 would have a python index of 0 and label 40 would have a python index of 1 etc. tmp=map(int, d["label"]) #convert label numbers (strings) to integers assert sorted(tmp) == tmp, "FATAL ERROR: The labels provided are not in numerical order, \ whereas this script was written assuming they are. \ If this error occurs, it means the script needs editing" ################################################### if manually_assign_backgroundlayer_to_label: background_label_index=tmp.index(manually_assign_backgroundlayer_to_label) print("The background has been manually selected as label {} (i.e. python index {})".format(manually_assign_backgroundlayer_to_label, background_label_index)) else: background_label_index, background_number_of_voxels = max(enumerate(d["NumberOfVoxels"]), key=operator.itemgetter(1)) print("The auto-selected background is at label {} (i.e. python index {})".format(d["label"][background_label_index], background_label_index)) return d["label"], numLabels, background_label_index, d["label"][background_label_index]
9442f8ecf4f00ad21895b4878395a99ec18b2019
10,468
import tempfile def create_inchi_from_ctfile_obj(ctf, **options): """Create ``InChI`` from ``CTfile`` instance. :param ctf: Instance of :class:`~ctfile.ctfile.CTfile`. :type ctf: :class:`~ctfile.ctfile.CTfile` :return: ``InChI`` string. :rtype: :py:class:`str` """ # apply fixed hydrogen layer when atom charges are present atom_charges = [atom.charge for atom in ctf.atoms if atom.charge != '0'] if atom_charges: options.update({'fixedH': '-xF'}) with tempfile.NamedTemporaryFile(mode='w') as moltempfh, tempfile.NamedTemporaryFile(mode='r') as inchitempfh: moltempfh.write(ctf.writestr(file_format='ctfile')) moltempfh.flush() openbabel.convert(input_file_path=moltempfh.name, output_file_path=inchitempfh.name, input_format='mol', output_format='inchi', **options) inchi_result = inchitempfh.read() return inchi_result.strip()
19f41603e37087aa6ca6fc79850b3456f86864e4
10,469
import io def get_info(df, verbose = None,max_cols = None, memory_usage = None, null_counts = None): """ Returns the .info() output of a dataframe """ assert type(df) is pd.DataFrame buffer = io.StringIO() df.info(verbose, buffer, max_cols, memory_usage, null_counts) return buffer.getvalue()
48e7e3f004c10125b2fece8a19950d05ac888032
10,470
import os def get_directory_size(directory): """" Get directory disk usage in MB""" directory_size = 0 for (path, dirs, files) in os.walk(directory): for file in files: directory_size += os.path.getsize(os.path.join(path, file)) return directory_size / (1024 * 1024.0)
eb6f4aefa9746cd4488475ec0cbf891c9dcb0091
10,471
def iwave_modes(N2, dz, k=None): """ Calculates the eigenvalues and eigenfunctions to the internal wave eigenvalue problem: $$ \left[ \frac{d^2}{dz^2} - \frac{1}{c_0} \bar{\rho}_z \right] \phi = 0 $$ with boundary conditions """ nz = N2.shape[0] # Remove the surface values dz2 = 1/dz**2 # Construct the LHS matrix, A A = np.diag(-1*dz2*np.ones((nz-1)),-1) + \ np.diag(2*dz2*np.ones((nz,)),0) + \ np.diag(-1*dz2*np.ones((nz-1)),1) # BC's A[0,0] = -1. A[0,1] = 0. A[-1,-1] = -1. A[-1,-2] = 0. # Construct the RHS matrix i.e. put N^2 along diagonals B = np.diag(N2,0) # Solve... (use scipy not numpy) w, phi = linalg.eig(A, b=B) c = 1. / np.power(w, 0.5) # since term is ... + N^2/c^2 \phi # Sort by the eigenvalues idx = np.argsort(c)[::-1] # descending order # Calculate the actual phase speed cn = np.real( c[idx] ) return phi[:,idx], cn
c3f930421916a2618ab69af4bab984f18cf962cc
10,472
def run_sql_migration(config, migration): """ Returns bool Runs all statements in a SQL migration file one-at-a-time. Uses get_statements as a generator in a loop. """ conn = config['conn'] write_log(config, "SQL migration from file '{}'".format(migration['filename'])) with open(migration['filename'], 'r') as sqlFile: for stmt in get_statements(sqlFile): write_log(config, "Executing statement:\n{}".format(stmt)) pre_statement(config, migration, stmt) with conn.cursor() as cur: cur.execute(stmt) post_statement(config, migration, stmt) return True
9a7eacb52f1ce3648f5fc336ef74ca89b3cb267b
10,473
from typing import Dict from typing import Any def get_variable_type(n: int, data: Dict[str, Any]) -> str: """Given an index n, and a set of data, return the type of a variable with the same index.""" if n in data[s.BOOL_IDX]: return VariableTypes.BINARY elif n in data[s.INT_IDX]: return VariableTypes.INTEGER return VariableTypes.CONTINUOUS
84b8efdf684aa7843edc938bc387df414d6e761a
10,474
import json import time def online_decompilation_main(result_path,path): """ :param online_decompiler_result_save_file: Store all the contract information in the name result.json, and then save it in this folder :param solidity_code_result: The address of the folder where the source code of the contract obtained by parsing the file is stored :param opcode_result: The operation code of the contract obtained by parsing the address of the folder that should be stored :param html_path: Store the html file in this folder, read the html file in the html folder for analysis :param path: All address information is stored in this path :return: """ # url = input("please input the contract tx:") # url = sys.argv[0] online_decompiler_result_save_file = result_path +"result/" solidity_code_result = result_path + "source_code_path/" opcode_result = result_path + "opcode_path/" html_path = result_path + "html_path/" f = open(path, ) data = json.load(f) # data is a list, and each list is a dictionary, which forms the json format all_num = 0 time_out = 0 list = [] l1 = path.split("/") list2 = [] result_json_name = l1[-1] for i in data: print(all_num,end=' ') all_num = all_num+1 url = i.get("address") dict = {"address":url} dict["tx_count"] = i.get("tx_count") dict["parse_lose"] = False dict["parse_timeout_information"] = "" start = time.time() try: http_get(url,html_path) # Get the address of the contract, crawl the content of the contract at that address, and then store the web page in the address of a folder in html_path except Exception as e: time_out = time_out + 1 list2.append(url) print(e) pass continue # dict["parsetime"] = 0 # dict["size"] str1, str2 = parsehtml(url,html_path) # Parse the html file corresponding to the contract if(str1==""): dict["parse_lose"] = True dict["parse_information"] = "parse html fail~!" end = time.time() dict["parsetime"] = end - start dict["size"] = len(str1) # print("url",url) # print(end-start) save_to_file(solidity_code_result + url + ".sol", str1) save_to_file(opcode_result + url + ".txt", str2) list.append(dict) # Save the acquired contract information in the list, and then save the list in a file write_list_to_json(list,result_json_name ,online_decompiler_result_save_file) return all_num,time_out,list2 # Write the list into a file, the list contains all the information obtained by the parsed contract, and then save it in a folder named result.json
c7e130c4dd3e148dd14d45a4cb21b36abe72094e
10,475
def filter_unit_name(merged_df:pd.DataFrame)->pd.DataFrame: """ Iteratively selects names that are close together based on the Levenstein distance (number of added/inserted/ removed letters of make two strings identical). TODO: this iterative approach is very inefficient and would not scale. Future work would speed up this algorithm. TODO: the max_dist parameter has been manually tuned to be 6. In future, some thought would be put into how to calculate this programmatically. """ # accepted string distance between names max_dist = 6 filter_df = pd.DataFrame(columns=merged_df.columns) for dist in range(max_dist): for index, row in merged_df.iterrows(): # this checks of the unit_name is already in # filtered_df, if so skip the row # unit_name_entso is row index 4 if not any(filter_df.unit_name.isin([row[4]])): # UNIT_platts is index 10 if editdistance.eval(row[4], row[10]) < dist: filter_df = filter_df.append(row) return filter_df
2fbf8090f955fbd0ebe09035557db98bbf14355d
10,476
def calculate_snr( Efield: np.ndarray, freqRange: tuple, h_obs: float = 525.0, Nants: int = 1, gain: float = 10.0, ) -> np.ndarray: """ given a peak electric field in V/m and a frequency range, calculate snr Parameters Efield: np.ndarray peak electric field in V/m freqRange: float tuple with low and high end of frequency band in MHz h_obs: float height in km above the earth surface of your observer (default = 525km) Nants: int number of antennas phased together (default = 1) gain: float gain of the antenna(s) in dBi Returns SNR for each trial """ df = ( 10.0 # efields made with 10 MHz bins, would need to redo for different bin size ) freqs = np.arange(freqRange[0], freqRange[1], df) + df / 2.0 V_sig = Nants * voltage_from_field(Efield, freqs, gain) V_noise = np.sqrt(Nants * np.sum(noise_voltage(freqs, h_obs) ** 2.0)) V_sigsum = np.sum(V_sig, axis=1) # print(V_sigsum.mean()) # print(V_noise) return V_sigsum / V_noise
86caa7e8fe7d0ac7d47bebb2ae34e25679d23013
10,477
def retrieve_obj_indices(batch_cls: np.ndarray, num_classes: int): """Helper function to save the object indices for later. E.g. a batch of 3 samples with varying number of objects (1, 3, 1) will produce a mapping [[0], [1,2,3], [4]]. This will be needed later on in the bipartite matching. Parameters ---------- batch_cls : np.ndarray Batch class targets of shape [Batch Size, #Queries, 1]. num_classes : int Number of target classes. Returns ------- obj_indices : list Object indices indicating for each sample at which position the associated objects are. """ obj_indices = [] batch_size = batch_cls.shape[0] for idx in np.arange(0, batch_size, dtype=np.int32): sample = batch_cls[idx] object_indices = np.where(sample != num_classes)[0] num_objects_in_sample = len(object_indices) if idx == 0: sample_obj_indices = np.arange(0, num_objects_in_sample, dtype=np.int32) obj_indices.append(sample_obj_indices.tolist()) last_num_objects = num_objects_in_sample else: start, upto = last_num_objects, last_num_objects + num_objects_in_sample sample_obj_indices = np.arange(start, upto, dtype=np.int32) obj_indices.append(sample_obj_indices.tolist()) last_num_objects = upto return obj_indices
6361a1533f09239782cb96427e686d404bbcf9b5
10,478
def get_code_type(code): """ 判断代码是属于那种类型,目前仅支持 ['fund', 'stock'] :return str 返回code类型, fund 基金 stock 股票 """ if code.startswith(('00', '30', '60')): return 'stock' return 'fund'
6fc389ec053080b596368920adcd00e99e159817
10,479
import profile def sgd(lr, tparams, grads, inp, cost, opt_ret=None): """ Stochastic gradient descent (SGD) optimizer :param lr: :param tparams: :param grads: :param inp: :param cost: :param opt_ret: :return f_grad_shared, f_update: """ gshared = [theano.shared(p.get_value() * 0., name='%s_grad' % k) for k, p in tparams.items()] gsup = [(gs, g) for gs, g in zip(gshared, grads)] outs = [cost] if opt_ret is not None: # opt_ret should be a dict outs += list(opt_ret.values()) f_grad_shared = theano.function(inp, outs, updates=gsup, profile=profile) pup = [(p, p - lr * g) for p, g in zip(itervalues(tparams), gshared)] f_update = theano.function([lr], [], updates=pup, profile=profile) return f_grad_shared, f_update
f9dc199a65e807b47a2f95bb5f20cf3ce4dfef0d
10,480
def build_empty_indexes(ngram_len): """ Build and return the nested indexes structure. The resulting index structure can be visualized this way:: 1. The unigrams index is in indexes[1] with this structure: {1: { u1: {index_docid1: [posting_list1], index_docid2: [posting_list2]}, u2: {index_docid1: [posting_list3], index_docid3: [posting_list4]} } } 2. The bigrams index is in indexes[2] with this structure: {2: { u3, u4: {index_docid1: [posting_list7], index_docid2: [posting_list6]}, u5, u6: {index_docid1: [posting_list5], index_docid3: [posting_list8]} } } and so on, until ngram_len """ indexes = {} for i in range(1, ngram_len + 1): indexes[i] = defaultdict(posting_list) return indexes
019d141a7f02838de3e7464fae5f8dddf0ff7394
10,481
def test_if_in_for_tensor(): """ Feature: JIT Fallback Description: Test fallback with control flow. Expectation: No exception. """ @ms_function def control_flow_for(): x = Tensor(7) y = Tensor(0) for _ in range(3): if y < Tensor(10): y += x return y res = control_flow_for() assert res == 14
6de3a6d41ed1bdae4493ad0a4a6eb8304e7a546c
10,482
def as_dicts(results): """Convert execution results to a list of tuples of dicts for better comparison.""" return [result.to_dict(dict_class=dict) for result in results]
f7d3a77c0ef82439137c2ed6c706afc64d597256
10,483
def merge_dicts(dict_to_merge, merged_dict): """Recursively merge the contents of dict_to_merge into merged_dict. Values that are already present in merged_dict will be overwritten if they are also present in dict_to_merge""" for key, value in iteritems(dict_to_merge): if isinstance(merged_dict.get(key), dict): merge_dicts(value, merged_dict[key]) else: merged_dict[key] = value return merged_dict
867d88d796ce51e075f29f1d530dd8d63b05c531
10,484
import importlib def _backend_name_to_class(backend_str: str): """ Convert a backend string to the test configuration class for the backend. """ known_backends = _get_all_backends() if backend_str not in known_backends: raise ValueError( f'Unknown backend {backend_str}. ' f'Known backends: {known_backends}' ) conftest = importlib.import_module( f'ibis.backends.{backend_str}.tests.conftest' ) return conftest.TestConf
1ab3aeb0fb16629197a943ff8fba92cacd692b77
10,485
def concat_allocator_cmd(allocator, cmd): """add env variable for different allocator modes.""" new_cmd = cmd if allocator == "direct": new_cmd = "DIRECT_BUFFER=1 " + cmd elif allocator == "unified": new_cmd = "UNIFIED_BUFFER=1 " + cmd elif allocator == "je_direct": new_cmd = "JEMALLOC=1 DIRECT_BUFFER=1 " + cmd elif allocator == "je_cycle": new_cmd = "JEMALLOC=1 " + cmd elif allocator == "je_unified": new_cmd = "JEMALLOC=1 UNIFIED_BUFFER=1 " + cmd return new_cmd
b0275705d9a148c4b197e10847a0846e1e96d822
10,486
from typing import Tuple from typing import Optional from typing import List def generate_property_comment( description: intermediate.PropertyDescription, ) -> Tuple[Optional[Stripped], Optional[List[Error]]]: """Generate the documentation comment for the given property.""" return _generate_summary_remarks_constraints(description)
21e7655b6efb98cbcac776fb988b7af483d9ebc3
10,487
def create_set(X, y, inds): """ X list and y nparray :return: """ new_X = [] for i in inds: new_X.append(X[i]) new_y = y[inds] return SignalAndTarget(new_X, new_y)
8f983d948449a39d539e8cf021585b936d23882d
10,488
def find_dates(): """ FInd valid dates """ text = read_file() valid = [] for i, c in enumerate(text): # Find "-" which we use identifier for possible dates if c == "-": try: date = validate_date_string(i, text) if date: valid.append(date) except ValueError: continue print(", ".join(valid)) return True
0af1558438e997685bd793125063be35ec466b36
10,489
def handle_400_error(_error): """Return a http 400 error to client""" return make_response(jsonify({'error': 'Misunderstood'}), 400)
76f85fc2eef7737a24178ca495357d0d0c752472
10,490
def control_norm_backward(grad_out, ustream, vstream, abkw, cache): """ Implements the forward pass of the control norm For each incoming sample it does: grad = grad_out - (1 - abkw) * vstream * out vstream = vstream + mu() y = (x - mstream) / sqrt(varstream) varstream = afwd * varstream + (1 - afwd) * var(x) + (afwd * (1 - afwd) * (mu(x) - mstream) ** 2 mstream = afwd * mstream + (1 - afwd) * mu(x) """ out, scale = cache grad_in = np.empty_like(grad_out) for idx in range(grad_out.shape[0]): grad = grad_out[idx] - (1 - abkw) * vstream * out[idx] vstream += grad * out[idx] grad = grad / scale[idx] grad_in[idx] = grad - (1 - abkw) * ustream ustream += grad_in[idx] return grad_in, ustream, vstream, (None, )
c42abb380addc595b1fb4d54e56313536d26fccc
10,491
from pathlib import Path from typing import Any def get_random_asset_id_of_dataset( db: Session = Depends(deps.get_db), dataset_id: int = Path(..., example="12"), viz_client: VizClient = Depends(deps.get_viz_client), current_user: models.User = Depends(deps.get_current_active_user), current_workspace: models.Workspace = Depends(deps.get_current_workspace), ) -> Any: """ Get random asset from specific dataset """ dataset = crud.dataset.get_with_task(db, user_id=current_user.id, id=dataset_id) if not dataset: raise DatasetNotFound() offset = get_random_asset_offset(dataset) assets = viz_client.get_assets( user_id=current_user.id, repo_id=current_workspace.hash, # type: ignore branch_id=dataset.task_hash, # type: ignore keyword=None, offset=offset, limit=1, ) if assets.total == 0: raise AssetNotFound() return {"result": assets.items[0]}
02e6e28c27fc5720c9968e89209b3b3222fa7dcd
10,492
def seconds_to_hours(s): """Convert seconds to hours: :param s: Number of seconds :type s: Float :return: Number of hours :rtype: Float """ return float(s) / 3600
9bf9a7b408bf49714c4e873f59ec5433cc4f1ecf
10,493
def assign_change_priority(zone: dict, change_operations: list) -> None: """ Given a list of change operations derived from the difference of two zones files, assign a priority integer to each change operation. The priority integer serves two purposes: 1. Identify the relative order the changes. The target of an alias record will have a higher priority, since it needs to be present when we commit our change transaction. 2. Group together all change operations that can be committed together in the same ResourceRecordSet change transaction. """ rr_prio = defaultdict(int) def is_same_zone(change: dict) -> bool: return change["zone"]["id"] == zone["id"] def is_alias(change: ComparableRecord) -> bool: record = change["record"] return record.alias_dns_name is not None and is_same_zone(change) def is_new_alias(change: ComparableRecord) -> bool: return is_alias(change) and change["operation"] in ("CREATE", "UPSERT") for change in change_operations: if is_new_alias(change): record = change["record"] rr_prio[record.alias_dns_name] += 1 for change in change_operations: if is_new_alias(change): record = change["record"] rr_prio[record.alias_dns_name] += rr_prio[record.name] for change in change_operations: record = change["record"] change["prio"] = rr_prio[record.name]
6e5e538b8d7e6a7a1d4296bf94875814a47054ec
10,494
def contigs_n_bases(contigs): """Returns the sum of all n_bases of contigs.""" return sum(c.n_bases for c in contigs)
57bbc1712739bf8501ad95a5aa72adece6803bc3
10,495
def parse_input_fn_result(result): """Gets features, labels, and hooks from the result of an Estimator input_fn. Args: result: output of an input_fn to an estimator, which should be one of: * A 'tf.data.Dataset' object: Outputs of `Dataset` object must be a tuple (features, labels) with same constraints as below. * A tuple (features, labels): Where `features` is a `Tensor` or a dictionary of string feature name to `Tensor` and `labels` is a `Tensor` or a dictionary of string label name to `Tensor`. Both `features` and `labels` are consumed by `model_fn`. They should satisfy the expectation of `model_fn` from inputs. Returns: Tuple of features, labels, and input_hooks, where features are as described above, labels are as described above or None, and input_hooks are a list of SessionRunHooks to be included when running. Raises: ValueError: if the result is a list or tuple of length != 2. """ input_hooks = [] if isinstance(result, dataset_ops.DatasetV2): iterator = dataset_ops.make_initializable_iterator(result) input_hooks.append(_DatasetInitializerHook(iterator)) result = iterator.get_next() return parse_iterator_result(result) + (input_hooks,)
3cada76012f3a56d30bcc29c3658ef32df26d605
10,496
def demosaic(cfa, pattern='RGGB'): """ Returns the demosaiced *RGB* colourspace array from given *Bayer* CFA using bilinear interpolation. Parameters ---------- CFA : array_like *Bayer* color filter array (CFA). pattern : unicode, optional **{'RGGB', 'BGGR', 'GRBG', 'GBRG'}**, Arrangement of the colour filters on the pixel array. Returns ------- ndarray *RGB* colourspace array. Notes ----- - The definition output is not clipped in range [0, 1] : this allows for direct HDRI / radiance image generation on *Bayer* CFA data and post demosaicing of the high dynamic range data as showcased in this `Jupyter Notebook <https://github.com/colour-science/colour-hdri/\ blob/develop/colour_hdri/examples/\ examples_merge_from_raw_files_with_post_demosaicing.ipynb>`_. References ---------- - :cite:`Losson2010c` Examples -------- >>> CFA = np.array( ... [[0.30980393, 0.36078432, 0.30588236, 0.3764706], ... [0.35686275, 0.39607844, 0.36078432, 0.40000001]]) >>> demosaic(CFA) array([[[ 0.69705884, 0.17941177, 0.09901961], [ 0.46176472, 0.4509804 , 0.19803922], [ 0.45882354, 0.27450981, 0.19901961], [ 0.22941177, 0.5647059 , 0.30000001]], <BLANKLINE> [[ 0.23235295, 0.53529412, 0.29705883], [ 0.15392157, 0.26960785, 0.59411766], [ 0.15294118, 0.4509804 , 0.59705884], [ 0.07647059, 0.18431373, 0.90000002]]]) >>> CFA = np.array( ... [[0.3764706, 0.360784320, 0.40784314, 0.3764706], ... [0.35686275, 0.30980393, 0.36078432, 0.29803923]]) >>> demosaic(CFA, 'BGGR') array([[[ 0.07745098, 0.17941177, 0.84705885], [ 0.15490197, 0.4509804 , 0.5882353 ], [ 0.15196079, 0.27450981, 0.61176471], [ 0.22352942, 0.5647059 , 0.30588235]], <BLANKLINE> [[ 0.23235295, 0.53529412, 0.28235295], [ 0.4647059 , 0.26960785, 0.19607843], [ 0.45588237, 0.4509804 , 0.20392157], [ 0.67058827, 0.18431373, 0.10196078]]]) """ cfa = np.asarray(cfa) R_m, G_m, B_m = masks(cfa.shape, pattern) H_G = np.asarray([[0, 1, 0], [1, 4, 1], [0, 1, 0]]) / 4 H_RB = np.asarray([[1, 2, 1], [2, 4, 2], [1, 2, 1]]) / 4 R = convolve(cfa * R_m, H_RB) G = convolve(cfa * G_m, H_G) B = convolve(cfa * B_m, H_RB) return np.concatenate( [R[..., np.newaxis], G[..., np.newaxis], B[..., np.newaxis]], axis=-1 )
af50a6a8f19cbcbf60cc9e590fa12c12df65e0ca
10,497
import copy def overwrite_core_fields(new_metadata, old_metadata): """For fields like dc and project_metadata, if overwrite the items in old_metadata with the fields in new_metadata""" old_metadata = copy.deepcopy(old_metadata) for cat in ['dc', 'project_metadata']: if cat not in new_metadata: continue for newk, newv in new_metadata[cat].items(): log.debug('Replacing old field [{}][{}] with {}'.format(cat, newk, newv)) old_metadata[cat][newk] = newv return old_metadata
d6ceb246aee13331046d719e8b8f8dfc794b568a
10,498
import random def compare_skill(embedding, idx=None): """Display a skill its most similar skills in the embedding. Args: embedding (array): skills embedding idx (int): index to select skill, defaults to None (if None, a random index is chosen) Returns: df: dataframe of a skill and the skills it is closest to in the embedding by cosine similarity """ if idx is None: description = embedding[random.randint(0, len(embedding))] else: description = embedding[idx] return ( skills[["preferredLabel", "description"]] .assign(cosine_scores=util.pytorch_cos_sim(description, embedding)[0]) .sort_values(by=["cosine_scores"], ascending=False) .head(10) )
f885e744a3f32ba297a2429e7c69a2d7c37670da
10,499