content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import requests def username(UID: str) -> str: """ Get a users username from their user ID. >>> username("zx7gd1yx") '1' >>> username("7j477kvj") 'AnInternetTroll' >>> username("Sesame Street") Traceback (most recent call last): ... utils.UserError: User with uid 'Sesame Street' not found. """ R: dict = requests.get(f"{API}/users/{UID}").json() try: return R["data"]["names"]["international"] except KeyError: raise UserError(f"User with uid '{UID}' not found.")
c2d66af182a970783ef6e2236c1db3e5a3f80b50
11,076
import logging def handle_exceptions(func): """Exception handler helper function.""" logging.basicConfig(level = logging.INFO) def wrapper_func(*args, **kwargs): try: return func(*args, **kwargs) except Exception as e: logging.error(f'{func.__name__} raised an error: {e}')#, exc_info = True) return None return wrapper_func
2d5c428e65cfb823d1afbf2d2c77f98b8722d685
11,077
def apply_hamming_window(image): """Cross correlate after applying hamming window to compensate side effects""" window_h = np.hamming(image.shape[0]) window_v = np.hamming(image.shape[1]) image = np.multiply(image.T, window_h).T return np.multiply(image, window_v)
f319506e9a51350664683ede7411e677bbf96ab3
11,078
from typing import Tuple from functools import reduce def calc_ewald_sum(dielectric_tensor: np.ndarray, real_lattice_set: np.ndarray, reciprocal_lattice_set: np.ndarray, mod_ewald_param: float, root_det_epsilon: float, volume: float, ) -> Tuple[float, float]: """Return real and reciprocal Ewald summations at given parameters""" epsilon_inv = np.linalg.inv(dielectric_tensor) real_sum = 0 # Skip the potential caused by the defect itself for v in real_lattice_set: root_r_inv_epsilon_r = np.sqrt(reduce(dot, [v.T, epsilon_inv, v])) real_sum += \ erfc(mod_ewald_param * root_r_inv_epsilon_r) / root_r_inv_epsilon_r real_part = real_sum / (4 * pi * root_det_epsilon) # Ewald reciprocal part # sum exp(-g * epsilon * g / (4 * ewald ** 2)) / g * epsilon * g [1/A] reciprocal_sum = 0 for g in reciprocal_lattice_set: g_epsilon_g = reduce(dot, [g.T, dielectric_tensor, g]) reciprocal_sum += \ (exp(- g_epsilon_g / 4.0 / mod_ewald_param ** 2) / g_epsilon_g * cos(dot(g, np.zeros(3)))) # [A^2] reciprocal_part = reciprocal_sum / volume return real_part, reciprocal_part
5be08f833c8e44a4afeab48af0f5160278fbf88a
11,079
import time def proximal_descent( x0, grad, prox, step_size, momentum='fista', restarting=None, max_iter=100, early_stopping=True, eps=np.finfo(np.float64).eps, obj=None, benchmark=False): """ Proximal descent algorithm. Parameters ---------- x0 : array, shape (n_length, ), initial variables grad : func, gradient function prox : func, proximal operator function step_size : float, step-size for the gradient descent momentum : str or None, (default='fista'), momentum to choose, possible choice are ('fista', 'greedy', None) restarting : str or None, (default=None), restarting to chosse, possible choice are ('obj', 'descent', None), if restarting == 'obj', obj function should be given max_iter : int, (default=100), maximum number of iterations to perform the analysis early_stopping : bool, (default=True), whether to early stop the analysis eps : float, (default=np.finfo(np.float64).eps), stoppping parameter w.r.t evolution of the cost-function obj : func, (default=None), cost-function function benchmark : bool, (default=False), whether or not to save the cost-function and the duration of computatio nof each iteration Return ------ x : array, shape (n_atoms, n_voxels), the estimated variable pobj : array or None, shape (n_iter,) or (3 * n_iter,), the saved cost-function times : array or None, shape (n_iter,) or(3 * n_iter,), the saved duration per steps """ if benchmark and obj is None: raise ValueError("If 'benchmark' is set True 'obj' should be given.") if restarting == 'obj' and obj is None: raise ValueError("If 'restarting' is set 'obj' 'obj' should be given.") x_old, x, y, y_old = np.copy(x0), np.copy(x0), np.copy(x0), np.copy(x0) t = t_old = 1 if benchmark: pobj, times = [obj(y)], [0.0] for ii in range(max_iter): if benchmark: t0 = time.process_time() y -= step_size * grad(y) x = prox(y, step_size) if momentum == 'fista': t = 0.5 * (1.0 + np.sqrt(1.0 + 4.0 * t_old**2)) y = x + (t_old - 1.0) / t * (x - x_old) elif momentum == 'greedy': y = x + (x - x_old) elif momentum is None: y = x restarted = False if restarting == 'obj' and (ii > 0) and (pobj[-1] > pobj[-2]): if momentum == 'fista': x = x_old t = 1.0 elif momentum == 'greedy': y = x restarted = True if restarting == 'descent': angle = (y_old - x).ravel().dot((x - x_old).ravel()) if angle >= 0.0: if momentum == 'fista': x = x_old t = 1.0 elif momentum == 'greedy': y = x restarted = True if benchmark: t1 = time.process_time() pobj.append(obj(y)) converged = np.linalg.norm(x - x_old) < eps * np.linalg.norm(x_old) if early_stopping and converged and not restarted: break t_old = t x_old = x y_old = y if benchmark: times.append(t1 - t0) if benchmark: return x, np.array(pobj), np.array(times) else: return x
e6a05c2ef4295b67e3bc3ac2b1608b16d43bc09e
11,080
from stable_baselines_custom.common.atari_wrappers import wrap_deepmind def wrap_atari_dqn(env): """ wrap the environment in atari wrappers for DQN :param env: (Gym Environment) the environment :return: (Gym Environment) the wrapped environment """ return wrap_deepmind(env, frame_stack=True, scale=False)
6c47492fe412b5620f22db17a45aa42968ed9a62
11,082
def get_Theta_CR_i_d_t(pv_setup, Theta_A_d_t, I_s_i_d_t): """加重平均太陽電池モジュール温度 (6) Args: pv_setup(str): 太陽電池アレイ設置方式 Theta_A_d_t(ndarray): 日付dの時刻tにおける外気温度(℃) I_s_i_d_t(ndarray): 日付dの時刻tにおける太陽電池アレイiの設置面の単位面積当たりの日射量(W/m2) Returns: ndarray: 日付dの時刻tにおける太陽電池アレイiの加重平均太陽電池モジュール温度 """ # 係数 f_A, f_B if pv_setup == '架台設置型': f_A_i = get_table_6()[0][0] f_B_i = get_table_6()[0][1] elif pv_setup == '屋根置き型': f_A_i = get_table_6()[1][0] f_B_i = get_table_6()[1][1] elif pv_setup == 'その他': f_A_i = get_table_6()[2][0] f_B_i = get_table_6()[2][1] else: raise NotImplementedError() # 太陽電池アレイの接地面における風速 V_i_d_t = get_V_i_d_t() return Theta_A_d_t + (f_A_i/(f_B_i * V_i_d_t**0.8 + 1)+2) * I_s_i_d_t * 10**(-3) - 2
6c96d9c4692de19909feccf647fd39126358b29c
11,083
from typing import Set def or_equality(input_1: Variable, input_2: Variable, output: Variable) -> Set[Clause]: """ Encode an OR-Gate into a CNF. :param input_1: variable representing the first input of the OR-Gate :param input_2: variable representing the second input of the OR-Gate :param output: variable representing the output of the OR-Gate :return: A set of clauses encoding the OR-Gate """ return { frozenset([-input_1, output]), frozenset([-input_2, output]), frozenset([input_1, input_2, -output]) }
f101b1d7ae3d70e7849133562cd274275f8419a8
11,084
import math def keyPosition_to_keyIndex(key_position: int, key: int) -> int: """ キーポジションからどのキーのノーツなのかを変換します 引数 ---- key_position : int -> キーポジション key : int -> 全体のキー数、4Kなら4と入力 戻り値 ------ int -> キーインデックス、指定したキーの0~キー-1の間の数 """ return math.floor(key_position * key / 512)
e6edcc1711a283336da046e1f8f174cc7ff87760
11,085
from masonite.routes import Patch def patch(url, controller): """Shortcut for Patch HTTP class. Arguments: url {string} -- The url you want to use for the route controller {string|object} -- This can be a string controller or a normal object controller Returns: masonite.routes.Patch -- The Masonite Patch class. """ return Patch().route(url, controller)
c267ca8c2e2c55369584a94cd07aaf26b0b7ae4b
11,087
def get_user(message: discord.Message, username: str): """ Get member by discord username or osu username. """ member = utils.find_member(guild=message.guild, name=username) if not member: for key, value in osu_tracking.items(): if value["new"]["username"].lower() == username.lower(): member = discord.utils.get(message.guild.members, id=int(key)) return member
323ac71e24e4da516263df3a4683ed5fd87138ce
11,088
import colorsys def resaturate_color(color, amount=0.5): """ Saturates the given color by setting saturation to the given amount. Input can be matplotlib color string, hex string, or RGB tuple. """ if not isinstance(color, np.ndarray) and color in matplotlib.colors.cnames: color = matplotlib.colors.cnames[color] hls = colorsys.rgb_to_hls(*matplotlib.colors.to_rgb(color)) new_hls = hls[0], hls[1], amount new_color = colorsys.hls_to_rgb(*new_hls) return tuple(np.minimum(np.maximum(0, new_color), 1))
2bd1b9b4d9e1d11390efc79f56a89bf7555cbe71
11,089
def create_reach_segment(upstream_point, downstream_point, polyline, identifier="HA", junctionID=0, isEnd=False): """Returns a polyline based on two bounding vertices found on the line. """ part = polyline.getPart (0) total_length = polyline.length lineArray = arcpy.Array () #Identifies bounding vertices and associated distance along the line. if isEnd: last_point= polyline.lastPoint upstream_point_dist = round (total_length - polyline.measureOnLine (downstream_point , False) , 2) downstream_point_dist = round(total_length - polyline.measureOnLine (last_point , False), 2) else: upstream_point_dist = round (total_length - polyline.measureOnLine (upstream_point , False) , 2) downstream_point_dist = round(total_length - polyline.measureOnLine (downstream_point , False), 2) #Retrieves all vertices between bounding vertices of a polyline. for pnt in part: pnt_dist = round(total_length - polyline.measureOnLine (pnt , False), 2) if pnt_dist <= upstream_point_dist and pnt_dist>=downstream_point_dist: if lineArray.count == 0: lineArray.add(upstream_point) lineArray.add (pnt) else: lineArray.add (pnt) #Makes ending downstream point is added to array if lineArray[lineArray.count -1].X != downstream_point.X and lineArray[lineArray.count -1].Y != downstream_point.Y: lineArray.add(downstream_point) #Creates a new polyline from point array new_polyline = arcpy.Polyline(lineArray) identifier = str(identifier) junc = identifier if identifier.upper().find('J') == len(identifier)-1: identifier =identifier.upper()[0:len(identifier)-1] + 'R' else: identifier = identifier.upper() + 'R' return {'name':identifier,'polyline':new_polyline, 'DJunc':junc, 'JuncID':junctionID}
c378fb05c1eda5cde35d5caf60a9d732578ae6d8
11,090
def sample_recipe(user, **params): """ Helper function for creating recipes """ """ for not writing every single time this fields """ defaults = { 'title': 'Sample recipe', 'time_minutes': 10, 'price': 5.00 } """ Override any field of the defaults dictionary. Updating the keys:field from params to defaults if params has any similar key. If params has a new key, then it appends to defaults. """ defaults.update(params) return Recipe.objects.create(user=user, **defaults)
11fe56c88cc0c641b1c04b279b2346615b2257c9
11,091
def _unary_geo(op, left, *args, **kwargs): # type: (str, np.array[geoms]) -> np.array[geoms] """Unary operation that returns new geometries""" # ensure 1D output, see note above data = np.empty(len(left), dtype=object) data[:] = [getattr(geom, op, None) for geom in left] return data
d302bdb41c74f7b127df4ccd24dd6bc56c694a56
11,092
def map_func(h, configs, args): """Polygons command line in parallel. """ if args.verbose: cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp -v".format( configs["path"]["polygons"], configs["path"]["output"], h, configs["path"]["output"], h ) print cmd else: cmd = "python {} -i {}/threshold{}.tif -o {}/threshold{}.shp".format( configs["path"]["polygons"], configs["path"]["output"], h, configs["path"]["output"], h ) cmd_args = shlex.split(cmd) stdout,stderr = sp.Popen( cmd_args, stdin = sp.PIPE, stdout = sp.PIPE, stderr = sp.PIPE ).communicate() if args.verbose: print stdout, stderr return True
4ff4e961b2d0eb9a19b277a0b8e2ef165aa43819
11,093
def check_health(request: HttpRequest) -> bool: """Check app health.""" return True
20d572edd68e1518e51cbdbe331c17798bc850fe
11,095
def return_galo_tarsilo(message): """Middle function for returning "gaucho" vídeo. Parameters ---------- message : telebot.types.Message The message object. Returns ------- msg : str User/Chat alert list addition/removal. """ return 'https://www.youtube.com/watch?v=MVYEwZFixJ8'
58307b763d139dc38220b9a93af15644ccd32959
11,096
def preimage_func(f, x): """Pre-image a funcation at a set of input points. Parameters ---------- f : typing.Callable The function we would like to pre-image. The output type must be hashable. x : typing.Iterable Input points we would like to evaluate `f`. `x` must be of a type acceptable by `f`. Returns ------- D : dict(object, list(object)) This dictionary maps the output of `f` to the list of `x` values that produce it. """ D = {} for xx in x: D.setdefault(f(xx), []).append(xx) return D
6ca0496aff52cff1ce07e327f845df4735e3266a
11,097
import warnings def load_tree(tree,fmt=None): """ Load a tree into an ete3 tree data structure. tree: some sort of tree. can be an ete3.Tree (returns self), a dendropy Tree (converts to newick and drops root), a newick file or a newick string. fmt: format for reading tree from newick. 0-9 or 100. See ete3 documentation for how these are read (http://etetoolkit.org/docs/latest/tutorial/tutorial_trees.html#reading-and-writing-newick-trees). As of ETE3.1.1, these numbers mean: | ====== ============================================== | FORMAT DESCRIPTION | ====== ============================================== | 0 flexible with support values | 1 flexible with internal node names | 2 all branches + leaf names + internal supports | 3 all branches + all names | 4 leaf branches + leaf names | 5 internal and leaf branches + leaf names | 6 internal branches + leaf names | 7 leaf branches + all names | 8 all names | 9 leaf names | 100 topology only | ====== ============================================== if fmt is None, try to parse without a format descriptor, then these formats in numerical order. Returns an ete3 tree object. """ # Already an ete3 tree. if type(tree) is ete3.TreeNode: return tree # Convert dendropy tree into newick (drop root) if type(tree) is dp.Tree: tree = tree.as_string(schema="newick",suppress_rooting=True) # If we get here, we need to convert. If fmt is not specified, try to parse # without a format string. if fmt is None: try: t = Tree(tree) except ete3.parser.newick.NewickError: # Try all possible formats now, in succession w = "\n\nCould not parse tree without format string. Going to try different\n" w += "formats. Please check output carefully.\n\n" warnings.warn(w) formats = list(range(10)) formats.append(100) t = None for f in formats: try: t = Tree(tree,format=f) w = f"\n\nSuccessfully parsed tree with format style {f}.\n" w += "Please see ete3 documentation for details:\n\n" w += "http://etetoolkit.org/docs/latest/tutorial/tutorial_trees.html#reading-and-writing-newick-trees\n\n" warnings.warn(w) break except ete3.parser.newick.NewickError: continue if t is None: err = "\n\nCould not parse tree!\n\n" raise ValueError(err) else: # Try a conversion with the specified format t = Tree(tree,format=fmt) return t
efc727fee6f12b4a8bc0e8c2b2319be2a820df13
11,100
from typing import Dict def load_extract(context, extract: Dict) -> str: """ Upload extract to Google Cloud Storage. Return GCS file path of uploaded file. """ return context.resources.data_lake.upload_df( folder_name="nwea_map", file_name=extract["filename"], df=extract["value"] )
c9d5fedf6f2adcb871abf4d9cead057b0627267a
11,101
def _make_default_colormap(): """Return the default colormap, with custom first colors.""" colormap = np.array(cc.glasbey_bw_minc_20_minl_30) # Reorder first colors. colormap[[0, 1, 2, 3, 4, 5]] = colormap[[3, 0, 4, 5, 2, 1]] # Replace first two colors. colormap[0] = [0.03137, 0.5725, 0.9882] colormap[1] = [1.0000, 0.0078, 0.0078] return colormap
ca6275fc60efe198be5a89662d791f6c47e45b24
11,102
def poly_to_box(poly): """Convert a polygon into an array of tight bounding box.""" box = np.zeros(4, dtype=np.float32) box[0] = min(poly[:, 0]) box[2] = max(poly[:, 0]) box[1] = min(poly[:, 1]) box[3] = max(poly[:, 1]) return box
4fb8cea86494c34832f43dbf7f942a214dc2e010
11,103
import torch def default_collate(batch): """Puts each data field into a tensor with outer dimension batch size""" error_msg = "batch must contain tensors, numbers, dicts or lists; found {}" elem_type = type(batch[0]) if isinstance(batch[0], torch.Tensor): return torch.stack(batch, 0) elif ( elem_type.__module__ == "numpy" and elem_type.__name__ != "str_" and elem_type.__name__ != "string_" ): # pragma: no cover elem = batch[0] if elem_type.__name__ == "ndarray": return torch.stack([torch.from_numpy(b) for b in batch], 0) if elem.shape == (): # scalars py_type = float if elem.dtype.name.startswith("float") else int return numpy_type_map[elem.dtype.name](list(map(py_type, batch))) elif isinstance(batch[0], int_classes): # pragma: no cover return torch.LongTensor(batch) elif isinstance(batch[0], float): # pragma: no cover return torch.DoubleTensor(batch) elif isinstance(batch[0], string_classes): # pragma: no cover return batch elif isinstance(batch[0], container_abcs.Mapping): # pragma: no cover return {key: default_collate([d[key] for d in batch]) for key in batch[0]} elif isinstance(batch[0], container_abcs.Sequence): # pragma: no cover transposed = zip(*batch) return [default_collate(samples) for samples in transposed] raise TypeError((error_msg.format(type(batch[0]))))
576366ac5e57a84a015ffa3e5e80e8d4b62ac329
11,104
def updatestatus(requestdata, authinfo, acldata, supportchan, session): """Update the /Status page of a user.""" if requestdata[2] in acldata['wikis']: wikiurl = str('https://' + acldata['wikis'][requestdata[2]]['url'] + '/w/api.php') sulgroup = acldata['wikis'][requestdata[2]]['sulgroup'] else: return 'Wiki could not be found' if requestdata[0] in acldata['users']: if sulgroup in acldata['users'][requestdata[0]]['groups']: request = [acldata['users'][requestdata[0]]['groups'][sulgroup], requestdata[3]] else: return f"Data not found for {sulgroup} in {requestdata[0]}, Keys were: {acldata['users'][requestdata[0]].keys()}" elif requestdata[1][0] in acldata['sulgroups'][sulgroup]['cloaks']: request = [requestdata[1][1], requestdata[3]] else: ERRNOAUTH = "You don't seem to be authorised to use this plugin. Check you are signed into NickServ and try again." if supportchan is None: return ERRNOAUTH return f'{ERRNOAUTH} If this persists, ask for help in {supportchan}.' return mwapi.main( performer=request[0], target=str('User:' + (str(request[0]) + '/Status')), action='create', reason=str('Updating status to ' + str(request[1]) + ' per ' + str(request[0])), url=wikiurl, authinfo=[authinfo[0], authinfo[1]], content=str(request[1]), session=session, )
f305f1c4ceb6b4cfd949a1005a961b710e81740f
11,105
def sortByTimeStamps(paths): """Sorts the given list of file paths by their time-stamp :paths: The file paths to sort by time-stamp :returns: A sorted list of file paths """ sortedPaths = [] timeStamps = [] # Extract the YYYYMMDD & HHMMSS timestamps from the file paths for p in paths: timeStamp = getTimeStamps(p) timeStamps.append((int(timeStamp[0]), int(timeStamp[1]))) # Sort the timestamps in ascending order FIX FOR TUPLE timeStamps = sorted(timeStamps, key = lambda x: (int(x[0]), int(x[1]))) # Sort the paths by comparing to the sorted timestamps for t in timeStamps: for p in paths: timeStamp = getTimeStamps(p) if (int(timeStamp[0]), int(timeStamp[1])) == t: sortedPaths.append(p) return sortedPaths
01d60e0f3d793ca17f04462911406d03a6c3ddf0
11,106
def get_xml_nk_bands(xml_tree): """ Function to specifically get kpoint (cartesian) coordinates and corresponding eigenvalues (in Hartree) """ k_points_car = [] k_eigenvalues = [] k_occupations = [] for ks_energies in xml_tree.iter(tag='ks_energies'): k_points_car.append( get_xml_data(ks_energies,'k_point',as_type=float) ) k_eigenvalues.append( get_xml_data(ks_energies,'eigenvalues',as_type=float) ) k_occupations.append( get_xml_data(ks_energies,'occupations',as_type=float) ) k_points_car = np.array(k_points_car) k_eigenvalues = np.array(k_eigenvalues) k_occupations = np.array(k_occupations) return k_points_car, k_eigenvalues, k_occupations
e510995ee468552d395c179aa8713f159b1ad0e1
11,107
def enumerate(server, directory_list, filenames): """ Enumerate directories and files on the web server. """ print('\n[*] Enumerating resources.') to_search = [server] directories = [] resources = [] print('[*] Recursively searching for directories.') while len(to_search) != 0: base_url = to_search.pop(0) print('[*] Searching for directories in {0}'.format(base_url)) to_search.extend(check(base_url, directory_list)) directories.append(base_url) resources.append(base_url) if len(filenames) > 0: print('\n[*] Searching for files.') for url in directories: resources.extend(check(url, filenames, False)) return resources
e9b2eb94b71b48dcc032448369a413cc4c1790ba
11,108
def deep_equals(x, y): """Test two objects for equality in value. Correct if x/y are one of the following valid types: types compatible with != comparison pd.Series, pd.DataFrame, np.ndarray lists, tuples, or dicts of a valid type (recursive) Important note: this function will return "not equal" if types of x,y are different for instant, bool and numpy.bool are *not* considered equal Parameters ---------- x: object y: object Returns ------- bool - True if x and y are equal in value x and y do not need to be equal in reference """ if type(x) != type(y): return False # we now know all types are the same # so now we compare values if isinstance(x, pd.Series): if x.dtype != y.dtype: return False # if columns are object, recurse over entries and index if x.dtype == "object": index_equal = x.index.equals(y.index) return index_equal and deep_equals(list(x.values), list(y.values)) else: return x.equals(y) elif isinstance(x, pd.DataFrame): if not x.columns.equals(y.columns): return False # if columns are equal and at least one is object, recurse over Series if sum(x.dtypes == "object") > 0: return np.all([deep_equals(x[c], y[c]) for c in x.columns]) else: return x.equals(y) elif isinstance(x, np.ndarray): if x.dtype != y.dtype: return False return np.array_equal(x, y, equal_nan=True) # recursion through lists, tuples and dicts elif isinstance(x, (list, tuple)): return _tuple_equals(x, y) elif isinstance(x, dict): return _dict_equals(x, y) elif x != y: return False return True
27f5dc79e5c3b9e8a08a4bbd0db847995f0fa9ef
11,109
import re def _is_valid_img_uri(uri: str) -> bool: """ Returns true if a string is a valid uri that can be saved in the database. """ regex = "data:image/jpeg;base64*." return not uri or re.match(regex, uri)
0836bfa447b42fb7ed24fc897de8fb40c6e593b2
11,113
def update_config(a, b, mode="default"): """Update the configuration a with b.""" if not b: return a from_version = get_config_version(a) to_version = get_config_version(b) if from_version == 1 and to_version == 2: # When updating the configuration to a newer version, we clear all user fields. a = {k: v for k, v in a.items() if k in _non_user_fields} return replace_config(a, b) if mode == "default" or mode == "merge": return merge_config(a, b) if mode == "replace": return replace_config(a, b) raise ValueError("Invalid configuration update mode: %s" % mode)
464adc3a4daeedb246d911caab5477ff4d55841e
11,114
def create_app(config_name='DevelopmentConfig'): """Create the Flask application from a given config object type. Args: config_name (string): Config instance name. Returns: Flask Application with config instance scope. """ app = Flask(__name__) {{cookiecutter.package_name | upper}}(app, config_name=config_name) return app
6022c976ffa2bf6afa692bb96c5c53bfed4a7d32
11,116
def label_accuracy_score(hist): """Returns accuracy score evaluation result. - overall accuracy - mean accuracy - mean IU - fwavacc """ with np.errstate(divide='ignore', invalid='ignore'): iu = np.diag(hist) / ( hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist) ) mean_iu = np.nanmean(iu) return mean_iu
5e129604d476f17e0cfd7a30f785775266763432
11,117
def get_page_title(title: str): """ Возвращает заголовок, отображаемый на вкладках """ return f'{title} | NeuraHS'
3df2de16325cf0c4c849e7d09111ea87e36c309a
11,118
def make_pointer_union_printer(val): """Factory for an llvm::PointerUnion printer.""" pointer, value = get_pointer_int_pair(val['Val']) if not pointer or not value: return None pointer_type = val.type.template_argument(int(value)) string = 'llvm::PointerUnion containing %s' % pointer_type return make_printer(string, [('pointer', pointer.cast(pointer_type))])
40d12a45a05fb49dd32b1a450b7dff23ab0ece7c
11,119
def get_paramvals_percentile(table, percentile, chi2_arr): """ Isolates 68th percentile lowest chi^2 values and takes random 1000 sample Parameters ---------- table: pandas dataframe Mcmc chain dataframe pctl: int Percentile to use chi2_arr: array Array of chi^2 values Returns --------- mcmc_table_pctl: pandas dataframe Random 1000 sample of 68th percentile lowest chi^2 values """ percentile = percentile/100 table['chi2'] = chi2_arr table = table.sort_values('chi2').reset_index(drop=True) slice_end = int(percentile*len(table)) mcmc_table_pctl = table[:slice_end] # Best fit params are the parameters that correspond to the smallest chi2 bf_params = mcmc_table_pctl.drop_duplicates().reset_index(drop=True).\ values[0][:5] # Sample random 100 of lowest chi2 mcmc_table_pctl = mcmc_table_pctl.drop_duplicates().sample(10) return mcmc_table_pctl, bf_params
1d83c54b61446aecf0a7fcbf4d8ae49e96a25b3f
11,120
def get_text_from_span(s, (start, end)): """ Return the text from a given indices of text (list of words) """ return " ".join(s[start: end])
df58cf8056039b183dc421c94baa22176fe23e84
11,121
import time import struct def __timestamp(): """Generate timestamp data for pyc header.""" today = time.time() ret = struct.pack(b'=L', int(today)) return ret
477c8473026c706785b4091bbbf647b86eaa560f
11,122
def reverse_index(alist, value): """Finding the index of last occurence of an element""" return len(alist) - alist[-1::-1].index(value) -1
21fc4e17a91000085123ea4be42c72cb27a3482c
11,123
def generate_twist(loops, non_interacting=False): """Generate initial configuration to start braid moves where the active end has crossed outside the loops and they have an initial twist. Format: ' │ │ │┃' '┏━━━━━┛' '┃│ │ │ ' '┗━┓│ │ ' ' │┃│ │ ' '┏│┛│ │ ' '┃│ │ │ ' Keyword arguments: non_interacting -- loops which the active end cannot interact with (default False) -- if False, all loops are interactable -- if Integer (n), n loops randomly selected to be non-interactive -- if List (j,k,l), loops j, k and l (from left) are non-interactive """ # we can use the peppino generator for the first part of this configuration # we just add the additional lines spaces = (loops * 2) + 1 row_1, row_2, row_3 = generate_peppino(loops, non_interacting) if row_3[1] == "┆": first_loop = "┆" else: first_loop = "│" # row 4 row_4 = list(row_3) # add first crossing row_4[0] = "┗" row_4[1] = "━" row_4[2] = "┓" # row 5 row_5 = list(row_3) row_5[0] = " " row_5[1] = first_loop row_5[2] = "┃" # row 6 row_6 = list(row_3) row_6[0] = "┏" row_6[1] = first_loop row_6[2] = "┛" # row 7 row_7 = list(row_3) return ( row_1, row_2, row_3, "".join(row_4), "".join(row_5), "".join(row_6), "".join(row_7), )
fdeb58b49d2e559c4d0ccfc24e439057683f7e96
11,124
def IsShuttingDown(_shutting_down=_shutting_down): """ Whether the interpreter is currently shutting down. For use in finalizers, __del__ methods, and similar; it is advised to early bind this function rather than look it up when calling it, since at shutdown module globals may be cleared. """ return _shutting_down[0]
6cbc5d3388ee8eb0cabbb740fc5e0b8f2ac4714a
11,126
def ellipse_center(a): """ Parameters ---------- a : fitted_ellipse_obj """ b,c,d,f,g,a = a[1]/2, a[2], a[3]/2, a[4]/2, a[5], a[0] num = b*b-a*c x0=(c*d-b*f)/num y0=(a*f-b*d)/num return np.array([x0,y0])
66487a641c35d2c1c1c8a8c7c0bb129eda55f4c4
11,127
def _to_average_temp(name, temperature_map): """ Converts the list of temperatures associated to a label to a list of average temperatures. If the sensor does not exist, it will return _default_temperature. If the high or critical temperature thresholds are invalid, it will use the values from _default_temperature instead. :param name: Name of the sensor to check. :param temperature_map: Dictionary of temperatures, as returned by psutil.sensors_temperatures :return: List containing the current, high and critical temperatures of the label. """ if name not in temperature_map: return _default_temperature temps = [0.0, 0.0, 0.0] for temp in temperature_map[name]: current = temp.current if temp.current is not None and temp.current > -50.0 else _default_temperature[0] high = temp.high if temp.high is not None and temp.high > 0.0 else _default_temperature[1] critical = temp.critical if temp.critical is not None and temp.critical > 0.0 else _default_temperature[2] temps[0] += current temps[1] += high temps[2] += critical size = float(len(temperature_map[name])) temps[0] = _round(temps[0] / size) temps[1] = _round(temps[1] / size) temps[2] = _round(temps[2] / size) return temps
88c3b5d0bdd64f782a26a7dc11d44dc39e6efc82
11,128
def segments_decode(aseg): """ Decode segments. Parameters ---------- aseg : numpy.ndarra of uint32 Returns ------- segments : list of list of int """ max = 2 ** 32 - 1 segments = [] l = [] for x in list(aseg): if x == max: segments.append(l) l = [] else: l.append(x) return segments
d5edf85ae489b62c8820c3616a75a9ca305f06ec
11,129
def cvGetReal3D(*args): """cvGetReal3D(CvArr arr, int idx0, int idx1, int idx2) -> double""" return _cv.cvGetReal3D(*args)
4130a4f9571bdea1c9e54b5fcf7d1d0f5c3ce083
11,130
def get_wf_double_FF_opt( molecule, pcm_dielectric, linked=False, qchem_input_params=None, name="douple_FF_opt", db_file=">>db_file<<", **kwargs, ): """ Firework 1 : write QChem input for an FF optimization, run FF_opt QCJob, parse directory and insert into db, pass relaxed molecule to fw_spec and on to fw2, Firework 2 : write QChem input for an optimization in the presence of a PCM, using the molecule passed from fw1, run FF_opt QCJob, parse directory and insert into db Args: molecule (Molecule): input molecule to be optimized and run. pcm_dielectric (float): The PCM dielectric constant. max_cores (int): Maximum number of cores to parallelize over. Defaults to 32. qchem_input_params (dict): Specify kwargs for instantiating the input set parameters. Basic uses would be to modify the default inputs of the set, such as dft_rung, basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See pymatgen/io/qchem/sets.py for default values of all input parameters. For instance, if a user wanted to use a more advanced DFT functional, include a pcm with a dielectric of 30, and use a larger basis, the user would set qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set": "6-311++g**"}. However, more advanced customization of the input is also possible through the overwrite_inputs key which allows the user to directly modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to inputs.py to print an actual input file. For instance, if a user wanted to set the sym_ignore flag in the rem section of the input file to true, then they would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore": "true"}}. Of course, overwrite_inputs could be used in conjunction with more typical modifications, as seen in the test_double_FF_opt workflow test. qchem_cmd (str): Command to run QChem. db_file (str): path to file containing the database credentials. kwargs (keyword arguments): additional kwargs to be passed to Workflow Returns: Workflow """ first_qchem_input_params = qchem_input_params or {} # Optimize the molecule in vacuum fw1 = FrequencyFlatteningOptimizeFW( molecule=molecule, name="first_FF_no_pcm", qchem_cmd=">>qchem_cmd<<", max_cores=">>max_cores<<", qchem_input_params=first_qchem_input_params, linked=linked, db_file=db_file, ) # Optimize the molecule in PCM second_qchem_input_params = {"pcm_dielectric": pcm_dielectric} for key in first_qchem_input_params: second_qchem_input_params[key] = first_qchem_input_params[key] fw2 = FrequencyFlatteningOptimizeFW( name="second_FF_with_pcm", qchem_cmd=">>qchem_cmd<<", max_cores=">>max_cores<<", qchem_input_params=second_qchem_input_params, linked=linked, db_file=db_file, parents=fw1, ) fws = [fw1, fw2] wfname = f"{molecule.composition.reduced_formula}:{name}" return Workflow(fws, name=wfname, **kwargs)
d21b04035d41beb3a24e9cbba45c420bd8d9b727
11,131
def get_element_action_names(element): """Get a list of all the actions the specified accessibility object can perform. Args: element: The AXUIElementRef representing the accessibility object Returns: an array of actions the accessibility object can perform (empty if the accessibility object supports no actions) """ error_code, names = AXUIElementCopyActionNames(element, None) error_messages = { kAXErrorIllegalArgument: "One or both of the arguments is an illegal value.", kAXErrorInvalidUIElement: "The AXUIElementRef is invalid.", kAXErrorFailure: "There was some sort of system memory failure.", kAXErrorCannotComplete: "The function cannot complete " "because messaging has failed in some way.", kAXErrorNotImplemented: "The process does not fully support the accessibility API.", } check_ax_error(error_code, error_messages) return names
f906f9565eb72b060d9e4c69ab052dc6001f192a
11,133
import re def ParseFile(fname): """Parse a micrcode.dat file and return the component parts Args: fname: Filename to parse Returns: 3-Tuple: date: String containing date from the file's header license_text: List of text lines for the license file microcodes: List of Microcode objects from the file """ re_date = re.compile('/\* *(.* [0-9]{4}) *\*/$') re_license = re.compile('/[^-*+] *(.*)$') re_name = re.compile('/\* *(.*)\.inc *\*/', re.IGNORECASE) microcodes = {} license_text = [] date = '' data = [] name = None with open(fname) as fd: for line in fd: line = line.rstrip() m_date = re_date.match(line) m_license = re_license.match(line) m_name = re_name.match(line) if m_name: if name: microcodes[name] = Microcode(name, data) name = m_name.group(1).lower() data = [] elif m_license: license_text.append(m_license.group(1)) elif m_date: date = m_date.group(1) else: data.append(line) if name: microcodes[name] = Microcode(name, data) return date, license_text, microcodes
2774157dd256f11268a7ea4ee3d941e7aea1ca4f
11,134
import numpy def cal_q_vel(guidance_v): """ 暂时使用默认参考速度进行优化,等调试成熟再使用粗糙速度来优化 :return: """ q_vel = numpy.zeros((1, n_t + 1)) if flag_obs == 0: q_vel[0][0] = -ref_v q_vel[0][n_t] = ref_v if flag_obs == 1: for i in range(n_t + 1): if i < 1: q_vel[0][i] = -guidance_v[0][i] elif i >= n_t: q_vel[0][i] = guidance_v[0][i - 1] else: q_vel[0][i] = guidance_v[0][i - 1] - guidance_v[0][i] # print('q_vel:', numpy.shape(q_vel), q_vel) return q_vel
b7551e7b911c5e0fd27a1e90f00c1e1a3a60f53f
11,135
def tf_decode( ref_pts, ref_theta, bin_x, res_x_norm, bin_z, res_z_norm, bin_theta, res_theta_norm, res_y, res_size_norm, mean_sizes, Ss, DELTAs, R, DELTA_THETA, ): """Turns bin-based box3d format into an box_3d Input: ref_pts: (B,p,3) [x,y,z] ref_theta: (B,p) [ry] or a constant value bin_x: (B,p,K), bin assignments along X-axis res_x_norm: (B,p,K), normalized residual corresponds to bin_x bin_z: (B,p,K), bin assignments along Z-axis res_z_norm: (B,p,K), normalized residual corresponds to bin_z bin_theta: (B,p,K), bin assignments for orientation res_theta_norm: (B,p,K), normalized residual corresponds to bin_theta res_y: (B,p,K), residual w.r.t. ref_pts along Y-axis res_size_norm: (B,p,K,3), residual w.r.t. the average object size [l,w,h] mean_sizes, (B,p,K,3), average object size [l,w,h] Ss: XZ search range for different classes [-Ss, +Ss] DELTAs: XZ_BIN_LENs for different classes R: THETA search range [-R, +R] DELTA_THETA: THETA_BIN_LEN = 2 * R / NUM_BIN_THETA Output: boxes_3d: (B,p,K,7) 3D box in box_3d format [x, y, z, l, w, h, ry] """ ndims = ref_pts.shape.ndims dx = (tf.to_float(bin_x) + 0.5) * DELTAs - Ss + res_x_norm * DELTAs dz = (tf.to_float(bin_z) + 0.5) * DELTAs - Ss + res_z_norm * DELTAs if ndims == 3: # rpn K = tf.shape(bin_x)[2] if isinstance(ref_theta, tf.Tensor): # rotate along y all_rys = ref_theta ry_sin = tf.sin(all_rys) ry_cos = tf.cos(all_rys) rot_mats = tf.stack( [ tf.stack([ry_cos, ry_sin], axis=2), tf.stack([-ry_sin, ry_cos], axis=2), ], axis=3, ) rot_mats = tf.tile(tf.expand_dims(rot_mats, 2), [1, 1, K, 1, 1]) dxz_rot = tf.matmul( rot_mats, tf.expand_dims(tf.stack([dx, dz], axis=3), axis=3), transpose_a=True, transpose_b=True, ) dxz_rot = tf.squeeze(tf.matrix_transpose(dxz_rot), axis=3) dx = dxz_rot[:, :, :, 0] dz = dxz_rot[:, :, :, 1] else: assert ref_theta == 0 ref_pts_tiled = tf.tile(tf.expand_dims(ref_pts, axis=2), [1, 1, K, 1]) x = dx + ref_pts_tiled[:, :, :, 0] z = dz + ref_pts_tiled[:, :, :, 2] y = res_y + ref_pts_tiled[:, :, :, 1] elif ndims == 2: # rcnn K = tf.shape(bin_x)[1] if isinstance(ref_theta, tf.Tensor): # rotate along y all_rys = ref_theta ry_sin = tf.sin(all_rys) ry_cos = tf.cos(all_rys) rot_mats = tf.stack( [ tf.stack([ry_cos, ry_sin], axis=1), tf.stack([-ry_sin, ry_cos], axis=1), ], axis=2, ) rot_mats = tf.tile(tf.expand_dims(rot_mats, 1), [1, K, 1, 1]) dxz_rot = tf.matmul( rot_mats, tf.expand_dims(tf.stack([dx, dz], axis=2), axis=2), transpose_a=True, transpose_b=True, ) dxz_rot = tf.squeeze(tf.matrix_transpose(dxz_rot), axis=2) dx = dxz_rot[:, :, 0] dz = dxz_rot[:, :, 1] else: assert ref_theta == 0 ref_pts_tiled = tf.tile(tf.expand_dims(ref_pts, axis=1), [1, K, 1]) x = dx + ref_pts_tiled[:, :, 0] z = dz + ref_pts_tiled[:, :, 2] y = res_y + ref_pts_tiled[:, :, 1] ref_theta = tf.tile(tf.expand_dims(ref_theta, axis=1), [1, K]) theta = ( ref_theta + (tf.to_float(bin_theta) + 0.5) * DELTA_THETA - R + res_theta_norm * 0.5 * DELTA_THETA ) size = mean_sizes + res_size_norm * mean_sizes if ndims == 3: l = size[:, :, :, 0] w = size[:, :, :, 1] h = size[:, :, :, 2] # combine all boxes_3d = tf.stack([x, y, z, l, w, h, theta], axis=3) # y+h/2 elif ndims == 2: l = size[:, :, 0] w = size[:, :, 1] h = size[:, :, 2] # combine all boxes_3d = tf.stack([x, y, z, l, w, h, theta], axis=2) # y+h/2 return boxes_3d
720252aaad2b8d380d30d871e97d47b2c9309a68
11,136
def _histogram_discretize(target, num_bins=gin.REQUIRED): """Discretization based on histograms.""" discretized = np.zeros_like(target) for i in range(target.shape[0]): discretized[i, :] = np.digitize(target[i, :], np.histogram( target[i, :], num_bins)[1][:-1]) return discretized
14108b9208dca586f7fd39dac3a5a17f1e5a2928
11,137
def apply_acl(instance, content): """Apply ACLs.""" any_acl_applied = False if not isinstance(instance, roleable.Roleable): return any_acl_applied instance_acl_dict = {(l.ac_role_id, p.id): l for p, l in instance.access_control_list} person_ids = set() for role_id, data in content.get("access_control_list", {}).iteritems(): person_ids |= {i["id"] for i in data["added"] + data["deleted"]} person_dict = {p.id: p for p in all_models.Person.query.filter( all_models.Person.id.in_(person_ids)) } acr_dict = {r.id: r for r in ACR.get_ac_roles_for(instance.type).values()} for role_id, data in content.get("access_control_list", {}).iteritems(): role_id = int(role_id) if role_id not in acr_dict: continue for add in data["added"]: if (role_id, add["id"]) not in instance_acl_dict: instance.add_person_with_role_id(person_dict[add["id"]], role_id) any_acl_applied = True for delete in data["deleted"]: if (role_id, delete["id"]) in instance_acl_dict: instance.acr_id_acl_map[role_id].remove_person( person_dict[delete["id"]] ) any_acl_applied = True return any_acl_applied
134f4ae98018626712c2f918ce5b501129169a30
11,138
import json def serialize(results): """Serialize a ``QueryDict`` into json.""" serialized = {} for result in results: serialized.update(result.to_dict()) return json.dumps(serialized, indent=4)
1ce996e1172344ba72ccbb9487b51b0efc30fa5c
11,139
def allowed_once (cave, visited): """Only allows small caves to be visited once. Returns False if `cave` is small and already in `visited`. """ return big(cave) or (small(cave) and cave not in visited)
f3619c1d230de50fab539103084457413f30a74e
11,140
from datetime import datetime import json def _serialize_examstruct(exam): """ Serialize the exam structure for, eg. cache. The dates, especially, need work before JSON """ assert isinstance(exam, dict) date_fmt = '%Y-%m-%d %H:%M:%S' assert isinstance(exam['start'], datetime.datetime) assert isinstance(exam['end'], datetime.datetime) safe = exam.copy() safe['start'] = exam['start'].strftime(date_fmt) safe['end'] = exam['end'].strftime(date_fmt) return json.dumps(safe)
3c553986bfd6b565bbdc34218ca01d984d3aab69
11,141
def _analysis_test_impl(ctx): """Implementation function for analysis_test. """ _ignore = [ctx] return [AnalysisTestResultInfo( success = True, message = "All targets succeeded analysis", )]
5f006c817581b771bf3d1f5b3cc7861cd98e8958
11,142
import warnings def CD_Joint(CD_J_AS = None, Ypred = None, beta = None, zeta = None, active_set = None, lam = None, P = None, P_interaction = None, Y = None, B = None, B_interaction = None, S = None, S_interaction = None, I = None, interaction_terms = None, r = None, max_iter = None, tol = 1e-4, full_set = None, MaxSuppSize_main = None, MaxSuppSize_interaction = None, verbose = False, path = None): """Cyclic Block Coordinate Descent over the full set of main/interaction effects. Args: CD_J_AS: a callable function that optimizes over a reduced set of main effects, callable. Ypred: numpy array of shape (N, ). beta: coefficients for main/interaction effects, 2 lists of arrays of shapes [ [(Ki+1, 1), ...], [(Kij+1, 1), ...]] zeta: binary vector to track which main effects are in the active set, 2 bool arrays of shape [(1, d), (1, Imax)] active_set: indices of main effects to optimize over, a numpy int array. lam: regularization parameters [lam_1, lam_2], list of floats. P: B^T*B + 2*N*(lam_1*S_i + eps*I) matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...]. eps is a small epsilon for numerical stability. P_interaction: B^T*B + 2*N*(lam_1*S_ij + eps*I) matrices for main effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...]. eps is a small epsilon for numerical stability. Y: training target responses, a float numpy array of shape (N,). B: B-spline transformed sparse matrices for main effects, list of sparse matrices of shapes [(N, Ki+1), ...]. B_interaction: B-spline transformed sparse matrices for interaction effects, list of sparse matrices of shapes [(N, Kij+1), ...]. S: Smoothness matrices for main effects, list of sparse matrices of shapes [(Ki+1, Ki+1), ...]. S_interaction: Smoothness matrices for interaction effects, list of sparse matrices of shapes [(Kij+1, Kij+1), ...]. I: number of possible main/interaction effects, int scalers. interaction_terms: list of interaction effects to consider if only a subset need to be considered, a 2D numpy array of of shape (Imax, 2). r: relative scaling factor for L0 penalty between main and interaction effects. We consider r=1.0 (corresponds to alpha symbol in the paper), float scaler. max_iter: maximum number of Cyclic BCD on the active set, int scaler. tol: relative loss termination criteria for stopping, a float scalar. full_set: indices of all main effects, a numpy int array. main_terms: list of main effects to consider if only a subset need to be considered, not supported yet. MaxSuppSize_main: Stop L0 regularization if the active set of main effects is larger than the MaxSuppSize_main and move to next smoothing lambda setting and start L0 regularization, int scaler. MaxSuppSize_interaction: Stop L0 regularization if the active set of interaction effects is larger than the MaxSuppSize_interaction and move to next smoothing lambda setting and start L0 regularization, int scaler. verbose: for printing optimization steps, bool scaler. path: for logging, str. Returns: Ypred: Updated prediction, numpy array of shape (N, ). beta: Updated coefficients for main effects, list of arrays of shapes [(Ki+1, 1), ...]. zeta: Updated binary vector to track which main effects are in the active set, a bool array of shape (1, d). delta: Updated coefficients for interaction effects, list of arrays of shapes [(Kij+1, 1), ...]. alpha: Updated binary vector to track which interaction effects are in the active set, a bool array of shape (1, Imax). active_set: Updated indices of nonzero main effects, a numpy int array. active_interaction_set: Updated indices of nonzero interaction effects, a numpy int array. MaxSuppSize_flag: indicates Maximum Support size is reached, bool scaler. """ N = Y.shape[0] delta = beta[1] beta = beta[0] alpha = zeta[1] zeta = zeta[0] active_interaction_set = active_set[1] active_set = active_set[0] full_interaction_set = full_set[1] full_set = full_set[0] Bspam = B Bspam_interaction = B_interaction Pspam = P Pspam_interaction = P_interaction d = I[0] dinteraction = I[1] MaxSuppSize_flag = 0 eps = 1e-8 warnings.filterwarnings("error") res = Y-Ypred beta_p = [(P.solve((B.transpose()).dot(res))).reshape(-1,1) for B, P in zip(Bspam, Pspam)] res_p = np.array([np.linalg.norm(res-B.dot(bp)) for B, bp in zip(Bspam, beta_p)]) active_set = np.arange(d) # if active_set is None: # A = int(np.ceil(0.1*d)) # active_set = res_p.argsort()[:A] # else: # A = np.minimum(np.maximum(int(np.ceil(0.2*len(active_set))),10), 50) # active_set = np.union1d(active_set, res_p.argsort()[:A]) res = Y-Ypred delta_p = [(P.solve((B.transpose()).dot(res))).reshape(-1,1) for B, P in zip(Bspam_interaction, Pspam_interaction)] res_p = np.array([np.linalg.norm(res-B.dot(dp)) for B, dp in zip(Bspam_interaction, delta_p)]) if active_interaction_set is None: A = int(np.ceil(0.01*dinteraction)) active_interaction_set = res_p.argsort()[:A] else: A = np.minimum(np.maximum(int(np.ceil(0.2*len(active_interaction_set))),10), 50) active_interaction_set = np.union1d(active_interaction_set, res_p.argsort()[:A]) ''' Coordinate Descent over full set ''' for it in range(max_iter): Ypred, beta, zeta, delta, alpha = CD_J_AS(Ypred = Ypred, beta = [beta, delta], zeta = [zeta, alpha], active_set = [active_set, active_interaction_set], lam = [lam[0], lam[1]], P = Pspam, P_interaction = Pspam_interaction) active_set = np.where(zeta[0,:] == 1)[0] active_interaction_set = np.where(alpha[0,:] == 1)[0] if (len(np.where(zeta[0,:] == 1)[0]) > MaxSuppSize_main) or (len(np.where(alpha[0,:] == 1)[0]) > MaxSuppSize_interaction): MaxSuppSize_flag = 1 break J = 0.5*mean_squared_error(Y, Ypred)+\ lam[0]*sum([(np.transpose(beta[k])).dot(S[k].dot(beta[k]))[0,0] for k in active_set])+\ lam[0]*sum([(np.transpose(delta[k])).dot(S_interaction[k].dot(delta[k]))[0,0] for k in active_interaction_set])+\ eps*sum([np.dot(beta[k][:,0],beta[k][:,0]) for k in active_set])+\ eps*sum([np.dot(delta[k][:,0],delta[k][:,0]) for k in active_interaction_set])+\ lam[1]*(np.count_nonzero(zeta[0,:]))+\ r*lam[1]*(np.count_nonzero(alpha[0,:])) if verbose == True: display(Math(r'Iteration: {}, Obj: {:.0f}, '.format(it, J)+', \sum_{j \in S^c} z_j: '+'{} \leq {}.'.format(np.count_nonzero(zeta[0,:]), len(active_set))+'\sum_{ij \in S^c} z_{ij}: '+'{} \leq {}.'.format(np.count_nonzero(alpha[0,:]),len(active_interaction_set)))) for j in [x for x in full_set if x not in active_set]: if zeta[0,j]==1: Ypred -= Bspam[j].dot(beta[j]) res = Y-Ypred beta[j], zeta[:,j] = utilities.solve(B=Bspam[j], P=Pspam[j], y=res, beta=beta[j], S=S[j], lam=[lam[0], lam[1]]) if zeta[0,j]==1: Ypred += Bspam[j].dot(beta[j]) for j in [x for x in full_interaction_set if x not in active_interaction_set]: if alpha[0,j]==1: Ypred -= Bspam_interaction[j].dot(delta[j]) res = Y-Ypred delta[j], alpha[:,j] = utilities.solve(B=Bspam_interaction[j], P=Pspam_interaction[j], y=res, beta=delta[j], S=S_interaction[j], lam=[lam[0], r*lam[1]]) if alpha[0,j]==1: Ypred += Bspam_interaction[j].dot(delta[j]) if np.count_nonzero(zeta[0,:])==active_set.shape[0] and np.count_nonzero(alpha[0,:])==active_interaction_set.shape[0]: if np.sum(sorted(active_set) == np.where(zeta[0,:] == 1)[0])==active_set.shape[0] and np.sum(sorted(active_interaction_set) == np.where(alpha[0,:] == 1)[0])==active_interaction_set.shape[0]: #print('Active set converged') active_set = np.where(zeta[0,:] == 1)[0] active_interaction_set = np.where(alpha[0,:] == 1)[0] break active_set = np.where(zeta[0,:] == 1)[0] active_interaction_set = np.where(alpha[0,:] == 1)[0] # for i in active_set: # Pspam[i] = sp.linalg.splu((Bspam[i].transpose()).dot(Bspam[i])+2*N*(lam[0]*S[i]+eps*sp.csr_matrix(np.identity(Bspam[i].shape[1])))) # for i in active_interaction_set: # Pspam_interaction[i] = sp.linalg.splu((Bspam_interaction[i].transpose()).dot(Bspam_interaction[i])+2*N*(lam[0]*S_interaction[i]+eps*sp.csr_matrix(np.identity(Bspam_interaction[i].shape[1])))) if(it == max_iter-1): with open(path+'/Warning.txt', "a") as f: f.write('Warning: CD over full set did not converge within the chosen max_iter!') f.write('\lambda_1: {:.7f},\lambda_2: {:.7f}'.format(lam[0], lam[1])) return Ypred, beta, zeta, delta, alpha, active_set, active_interaction_set, MaxSuppSize_flag
780bbd6a44dcfacf55a22390a6f7ee8c98e2d2f0
11,144
from typing import Tuple def testAllCallbacksSmokeTest( args_count: int, type_checker: TypeCheckerFixture ) -> None: """ Parametrized test to do basic checking over all Callbacks (except Callback0). We generate functions with too much arguments, too few, and correct number, and check that the errors are as expected. This should be enough to catch copy/paste errors when declaring the Callback overloads. """ def gen_signature_and_args(count: int) -> Tuple[str, str, str]: # Generates "v1: int, v2: int" etc signature = ", ".join(f"v{i}: int" for i in range(count)) # Generates "10, 20" etc args = ", ".join(f"{i+1}0" for i in range(count)) # Generates "int, int" etc types = ", ".join("int" for _ in range(count)) return signature, args, types sig_too_few, args_too_few, types_too_few = gen_signature_and_args(args_count - 1) sig_too_many, args_too_many, types_too_many = gen_signature_and_args(args_count + 1) sig_ok, args_ok, types_ok = gen_signature_and_args(args_count) type_checker.make_file( f""" from oop_ext.foundation.callback import Callback{args_count} c = Callback{args_count}[{types_ok}]() def too_few_func({sig_too_few}) -> None: ... c.Register(too_few_func) c({args_too_few}) def too_many_func({sig_too_many}) -> None: ... c.Register(too_many_func) c({args_too_many}) def ok_func({sig_ok}) -> None: ... c.Register(ok_func) c({args_ok}) """ ) result = type_checker.run() result.assert_errors( [ "has incompatible type", "Missing positional argument", "has incompatible type", "Too many arguments", ] )
8459b040f2c7dc145a6a41ddebd4edb24873d704
11,145
def transform_unnamed_cols_range(df: pd.DataFrame, columns_range: range, new_column_name_prefix: str, inplace=False) -> object: """ This function transforms a range of columns based assuming the presence of following schema in dataframe: |base_column_name|Unnamed_n|Unnamed_n+1|Unnamed_n+2|--- |option_1 |NaN |NaN |NaN |--- |----------------|NaN |option_3 |NaN |--- |----------------|option_2 |NaN |NaN |--- |----------------|NaN |NaN |option_4 |--- Without a precise order, only one cell will be checked as "option_x" and that the following schema will be given as output: |base_column_name_option_1|base_column_name_option_2 |base_column_name_option_3|base_column_name_option_4|--- Also, it will replace cell values from this columns with binary data (1, 0) according to the presence or not of the corresponding categorical value. :param df: input dataframe to be processed :param columns_range: range of columns from input dataframe to be transformed :param new_column_name_prefix: new column_name to be added as base_name to rename map :param inplace: If False, return a copy. Otherwise, do operation inplace and return None. :return: input dataframe with Unnamed columns dropped and string values transformed to binary values (0,1) """ # extracting columns of interest df_target_columns = df.iloc[:, columns_range] return _even_out_categorical_as_binaries(df, df_target_columns.columns, new_column_name_prefix=new_column_name_prefix, inplace=inplace)
c54394531cec3aeef6e1717d3db0be17852ade9b
11,146
def shingles(tokens, n): """ Return n-sized shingles from a list of tokens. >>> assert list(shingles([1, 2, 3, 4], 2)) == [(1, 2), (2, 3), (3, 4)] """ return zip(*[tokens[i:-n + i + 1 or None] for i in range(n)])
93e8f3828bf4b49397e09cb46565199dcd7a68be
11,147
import json def load_json(filename): """Load JSON file as dict.""" with open(join(dirname(__file__), filename), "rb") as fp: return json.load(fp)
3ce3a92b4a11a005709ea3fab003d73133627183
11,148
def getLayerList(layer_list, criterionFn): """Returns a list of all of the layers in the stack that match the given criterion function, including substacks.""" matching_layer = [] for layer in layer_list: if criterionFn(layer): matching_layer.append(layer) if hasattr(layer, 'layerStack'): matching_layer.extend(getLayerList(layer.layerStack().layerList(), criterionFn)) if layer.hasMaskStack(): matching_layer.extend(getLayerList(layer.maskStack().layerList(), criterionFn)) if hasattr(layer, 'hasAdjustmentStack') and layer.hasAdjustmentStack(): matching_layer.extend(getLayerList(layer.adjustmentStack().layerList(), criterionFn)) return matching_layer
5e09065b350f1305a2fcd45379751fac6552031e
11,149
def getBiLinearMap(edge0, edge1, edge2, edge3): """Get the UV coordinates on a square defined from spacing on the edges""" if len(edge0) != len(edge1): raise ValueError("getBiLinearMap: The len of edge0 and edge1 are not the same") if len(edge2) != len(edge3): raise ValueError("getBiLinearMap: The len of edge2 and edge3 are no the same") N = len(edge0) M = len(edge2) UV = np.zeros((N, M, 2)) UV[:, 0, 0] = edge0 UV[:, 0, 1] = 0.0 UV[:, -1, 0] = edge1 UV[:, -1, 1] = 1.0 UV[0, :, 0] = 0.0 UV[0, :, 1] = edge2 UV[-1, :, 0] = 1.0 UV[-1, :, 1] = edge3 for i in range(1, N - 1): x1 = edge0[i] y1 = 0.0 x2 = edge1[i] y2 = 1.0 for j in range(1, M - 1): x3 = 0 y3 = edge2[j] x4 = 1.0 y4 = edge3[j] UV[i, j] = calcIntersection(x1, y1, x2, y2, x3, y3, x4, y4) return UV
a75626a846c18418db8dbb98afdb25ab0c903969
11,150
def _parse_orientation(response: HtmlResponse): """Parse Orientation. Returns None if not available or is unknown. """ value = response.css('th:contains("Ausrichtung") + td ::text').get() if value: if value == "unbekannt" or value == "verschieden": return None fk_value = { "Nord": "N", "Nordost": "NO", "Ost": "O", "Südost": "SO", "Süd": "S", "Südwest": "SW", "West": "W", "Nordwest": "NW", } return Orientation.objects.get(name=fk_value[value]) else: return None
338fb6dbc8e3f1c0e116f766f86a01b110c922f2
11,152
def binaryread(file, vartype, shape=(1,), charlen=16): """ Uses numpy to read from binary file. This was found to be faster than the struct approach and is used as the default. """ # read a string variable of length charlen if vartype == str: result = file.read(charlen * 1) else: # find the number of values nval = np.prod(shape) result = np.fromfile(file, vartype, nval) if nval == 1: result = result # [0] else: result = np.reshape(result, shape) return result
221e0a71271eea4a31423a94244c12784af7fef2
11,153
def subsample_data(neuron_data, sample_size = 10000): """ Acquires a subsample of the Neuron dataset. This function samples a set of neurons without replacement. Params ----------- Returns ----------- rand_ix (array-like): Array containing the chosen indices sample_neurons (array-like ): Array with shape (sample_size, neuron_data.shape[1]) containing a subset of the neuron traces. """ # Get random indices sampling without replacement rand_ix = np.random.choice( np.arange(neuron_data.shape[0]), size= sample_size, replace=False ) # Get subsample by choosing indices along rows sample_neurons = neuron_data[rand_ix, :] return rand_ix, sample_neurons
801d0d618576e14b67b33bf9071c135409362bfe
11,154
def connect_db(): """Connects to the specific database.""" mongo = MongoClient(DATABASE_URL,replicaset=MONGO_REPLICASET) #if COLLECTION_NAME in mongo[DATABASE_NAME].collection_names(): collection = mongo[DATABASE_NAME][COLLECTION_NAME] #else: # mongo[DATABASE_NAME].create_collection(COLLECTION_NAME) # collection = mongo[DATABASE_NAME][COLLECTION_NAME] # collection.createIndex( { "timestamp": 1 }, { 'unique': True } ) return collection
0e037a2bfb8687d4ff2b477a59c3f5ba99335c44
11,156
def transient(func): """ decorator to make a function execution transient. meaning that before starting the execution of the function, a new session with a new transaction will be started, and after the completion of that function, the new transaction will be rolled back without the consideration or affecting the parent transaction which by default is scoped to request. the corresponding new session will also be removed after function execution. note that you *should not* commit, flush or rollback anything inside a transient function, the `@transient` decorator will handle rollback operation when needed. otherwise, unexpected behaviors may occur. also note that you *should not* remove the corresponding session from session factory when using `@transient` decorator. the removal operation will be handled by decorator itself and if you remove session manually, it will cause broken chain of sessions and unexpected behaviour. this decorator also supports multiple `@transient` usage in a single call hierarchy. for example: def service_root(): store = get_current_store() value = EntityRoot() store.add(value) service_a() @atomic def service_a(): store = get_current_store() value = EntityA() store.add(value) service_b() @transient def service_b(): store = get_current_store() value = EntityB() store.add(value) service_c() @transient def service_c(): value = EntityC() value.save() in the above example, if the call hierarchy starts with `service_root()`, at the end, the data of `service_root` and `service_a` will be persisted into database. but the data of `service_b` and `service_c` will not be persisted because they are decorated as transient. :param function func: function. :returns: function result. """ def decorator(*args, **kwargs): """ decorates the given function and makes its execution transient. :param object args: function arguments. :param object kwargs: function keyword arguments. :returns: function result. """ store = database_services.get_atomic_store() try: result = func(*args, **kwargs) return result finally: store.rollback() factory = database_services.get_current_session_factory() factory.remove(atomic=True) return update_wrapper(decorator, func)
454c808d15bbdddd800db70ec56d228f432921f8
11,157
def make_mapping(environ, start_response): """ Establishing a mapping, storing the provided URI as a field on a tiddler in the PRIVATEER bag. Accepted data is either a json dictory with a uri key or a POST CGI form with a uri query paramter. Respond with a location header containing the uri of the mapping. """ uri = None try: content_type = environ['tiddlyweb.type'] except KeyError: content_type = None if content_type == 'application/json': try: length = environ['CONTENT_LENGTH'] content = environ['wsgi.input'].read(int(length)) data = simplejson.loads(content) uri = data['uri'] except (KeyError, IOError, simplejson.JSONDecodeError), exc: raise HTTP400('Unable to parse input: %s' % exc) else: try: uri = environ['tiddlyweb.query']['uri'][0] except (KeyError, IndexError), exc: raise HTTP400('Unable to parse input: %s' % exc) if uri: title_uuid = _make_mapping_tiddler(environ, uri) else: raise HTTP400('No uri for mapping provided') start_response('201 Created', [ ('Location', _mapping_uri(environ, title_uuid))]) return []
e90a72bce2132d703504230d41f3e807ea77d7a2
11,158
def create_space_magnitude_region(region, magnitudes): """Simple wrapper to create space-magnitude region """ if not (isinstance(region, CartesianGrid2D) or isinstance(region, QuadtreeGrid2D)) : raise TypeError("region must be CartesianGrid2D") # bind to region class if magnitudes is None: raise ValueError("magnitudes should not be None if creating space-magnitude region.") region.magnitudes = magnitudes region.num_mag_bins = len(region.magnitudes) return region
64f4606c74ad38bd34ade7673074124e3d3faa48
11,159
def get_productivity(coin_endowments): """Returns the total coin inside the simulated economy. Args: coin_endowments (ndarray): The array of coin endowments for each of the agents in the simulated economy. Returns: Total coin endowment (float). """ return np.sum(coin_endowments)
e6dfe2485bce54599bc919d9a2b2235b90166702
11,161
def prefix_attrs(source, keys, prefix): """Rename some of the keys of a dictionary by adding a prefix. Parameters ---------- source : dict Source dictionary, for example data attributes. keys : sequence Names of keys to prefix. prefix : str Prefix to prepend to keys. Returns ------- dict Dictionary of attributes with some keys prefixed. """ out = {} for key, val in source.items(): if key in keys: out[f"{prefix}{key}"] = val else: out[key] = val return out
e1c8102fddf51cd7af620f9158419bff4b3f0c57
11,162
def add(coefficient_1, value_1, coefficient_2, value_2): """Provides an addition algebra for various types, including scalars and histogram objects. Incoming values are not modified. Args: coefficient_1: The first coefficient, a scalar value_1: The first value, a histogram or scalar coefficient_2: The second coefficient, a scalar value_2: The second value, a histogram or scalar Returns: The value of the expression: ((coefficient_1 * value_1) + (coefficient_2 * value_2)) """ # Verify that the incoming types match if type(value_1) != type(value_2): raise ValueError('values must be of the same type') # Handle based on type if isinstance(value_1, TH1): # Create the result result = value_1.Clone(uuid4().hex) # Add the histograms result.Add(value_1, value_2, coefficient_1, coefficient_2) else: # Create the result result = ((coefficient_1 * value_1) + (coefficient_2 * value_2)) # All done return result
70bccba3d504325a66090104ffc4d464649f2b32
11,164
import time def kotlin_object_type_summary(lldb_val, internal_dict = {}): """Hook that is run by lldb to display a Kotlin object.""" start = time.monotonic() log(lambda: f"kotlin_object_type_summary({lldb_val.unsigned:#x}: {lldb_val.GetTypeName()})") fallback = lldb_val.GetValue() if lldb_val.GetTypeName() != "ObjHeader *": if lldb_val.GetValue() is None: bench(start, lambda: "kotlin_object_type_summary:({:#x}) = NULL".format(lldb_val.unsigned)) return NULL bench(start, lambda: "kotlin_object_type_summary:({:#x}) = {}".format(lldb_val.unsigned, lldb_val.signed)) return lldb_val.value if lldb_val.unsigned == 0: bench(start, lambda: "kotlin_object_type_summary:({:#x}) = NULL".format(lldb_val.unsigned)) return NULL tip = internal_dict["type_info"] if "type_info" in internal_dict.keys() else type_info(lldb_val) if not tip: bench(start, lambda: "kotlin_object_type_summary:({0:#x}) = falback:{0:#x}".format(lldb_val.unsigned)) return fallback value = select_provider(lldb_val, tip, internal_dict) bench(start, lambda: "kotlin_object_type_summary:({:#x}) = value:{:#x}".format(lldb_val.unsigned, value._valobj.unsigned)) start = time.monotonic() str0 = value.to_short_string() bench(start, lambda: "kotlin_object_type_summary:({:#x}) = str:'{}...'".format(lldb_val.unsigned, str0[:3])) return str0
79883644017bfc35c77a17a3e5da4b5913864ef2
11,165
def _grep_first_pair_of_parentheses(s): """ Return the first matching pair of parentheses in a code string. INPUT: A string OUTPUT: A substring of the input, namely the part between the first (outmost) matching pair of parentheses (including the parentheses). Parentheses between single or double quotation marks do not count. If no matching pair of parentheses can be found, a ``SyntaxError`` is raised. EXAMPLES:: sage: from sage.misc.sageinspect import _grep_first_pair_of_parentheses sage: code = 'def foo(a="\'):", b=4):\n return' sage: _grep_first_pair_of_parentheses(code) '(a="\'):", b=4)' sage: code = 'def foo(a="%s):", \'b=4):\n return'%("'") sage: _grep_first_pair_of_parentheses(code) Traceback (most recent call last): ... SyntaxError: The given string does not contain balanced parentheses """ out = [] single_quote = False double_quote = False escaped = False level = 0 for c in s: if level>0: out.append(c) if c=='(' and not single_quote and not double_quote and not escaped: level += 1 elif c=='"' and not single_quote and not escaped: double_quote = not double_quote elif c=="'" and not double_quote and not escaped: single_quote = not single_quote elif c==')' and not single_quote and not double_quote and not escaped: if level == 1: return '('+''.join(out) level -= 1 elif c=="\\" and (single_quote or double_quote): escaped = not escaped else: escaped = False raise SyntaxError("The given string does not contain balanced parentheses")
7441c1b8734c211b9b320e195155719452cf7407
11,167
import requests def login(): """ """ url = "http://127.0.0.1:5001/rest/login" data = {"username": "kivanc", "password": "1234"} r = requests.post(url, json=data) output = r.json() return output["access_token"]
a2b4bd68110fd053c48988f7cc490c88f148bc1f
11,168
import tqdm import torch def evaluation_per_relation(triples: dict, model: EvaluationModel, batch_size: int = 4): """ :param triples: It should be a dict in form (Relation id):[(s_1,p_1,o_1)...(s_n,p_n,o_n)] """ # Evaluate per relation and store scores/evaluation measures score_per_rel = dict() for k in tqdm.tqdm(triples.keys()): # use API to evaluate model and generate model output for error analysis sub = torch.tensor(triples[k][:, 0]).cuda() pra = torch.tensor(triples[k][:, 1]).cuda() obj = torch.tensor(triples[k][:, 2]).cuda() score_per_rel[k] = model.evaluate_only_metrics(sub, pra, obj, batch_size=batch_size) return score_per_rel
73262587c181fa285b97479110f49ea4dd178946
11,170
from datetime import datetime def check_upload(): """ 判断今天的代码是否上传 :return: """ ctime = datetime.date.today() # 当前日期 data = db_helper.fetchone('select id from record where ctime = %s and user_id = %s', (ctime, session['user_info']['id'])) return data
2dfceb7cc91668b3a41920b931e946188332c6e4
11,171
def get_package_object(): """Gets a sample package for the submission in Dev Center.""" package = { # The file name is relative to the root of the uploaded ZIP file. "fileName" : "bin/super_dev_ctr_api_sim.appxupload", # If you haven't begun to upload the file yet, set this value to "PendingUpload". "fileStatus" : "PendingUpload" } return package
d65329372f356325c08ecb814f48ad856b9509bc
11,172
def load_config(config_file="config.yaml"): """Load config file to initialize fragment factories. A config file is a Python file, loaded as a module. Example config file: # config.yaml name: My LDF server maintainer: chuck Norris <[email protected]> datasets: - name: DBpedia-2016-04 description: DBpedia dataset, version 2016-04 backend: hdt-file file: /home/chuck-norris/dbpedia-2016-04.hdt - name: Chuck-Norris-facts description: Best Chuck Norris facts ever backend: rdf-file format: nt file: /home/chuck-norris/facts.nt """ config = load(open(config_file)) # set page size, i.e. the number of triples per page quota = config['quota'] if 'quota' in config else 75 max_results = config['max_results'] if 'max_results' in config else inf config['quota'] = quota for c in config["datasets"]: if 'quota' not in c: c['quota'] = quota if 'max_results' not in c: c['max_results'] = max_results if 'queries' not in c: c['queries'] = [] # build graphs graphs = {c["name"]: Graph(c) for c in config["datasets"]} return (config, graphs)
b5ee03a3b30f4374da05469cd3a289566eb26540
11,176
def find_start_end(grid): """ Finds the source and destination block indexes from the list. Args grid: <list> the world grid blocks represented as a list of blocks (see Tutorial.pdf) Returns start: <int> source block index in the list end: <int> destination block index in the list """ #------------------------------------ # # Fill and submit this code # # return (None, None) #------------------------------------- counter = 0 eb_index = None rb_index = None air_block=[] diamond_block=[] state=[] for i in grid: if i =='diamond_block': diamond_block.append(counter) if i =='air': air_block.append(counter) if i == 'emerald_block': eb_index = counter if i == 'redstone_block': rb_index = counter state.append(counter) counter+=1 return (eb_index, rb_index,air_block,diamond_block)
d617af3d6ebf9a2c9f42250214e3fe52d2017170
11,178
from typing import Optional import inspect def find_method_signature(klass, method: str) -> Optional[inspect.Signature]: """Look through a class' ancestors and fill out the methods signature. A class method has a signature. But it might now always be complete. When a parameter is not annotated, we might want to look through the ancestors and determine the annotation. This is very useful when you have a base class that has annotations, and child classes that are not. Examples -------- >>> class Parent: ... ... def foo(self, x: int) -> int: ... ... >>> find_method_signature(Parent, 'foo') <Signature (self, x: int) -> int> >>> class Child(Parent): ... ... def foo(self, x, y: float) -> str: ... ... >>> find_method_signature(Child, 'foo') <Signature (self, x: int, y: float) -> str> """ m = getattr(klass, method) sig = inspect.signature(m) params = [] for param in sig.parameters.values(): if param.name == "self" or param.annotation is not param.empty: params.append(param) continue for ancestor in inspect.getmro(klass): try: ancestor_meth = inspect.signature(getattr(ancestor, m.__name__)) except AttributeError: break try: ancestor_param = ancestor_meth.parameters[param.name] except KeyError: break if ancestor_param.annotation is not param.empty: param = param.replace(annotation=ancestor_param.annotation) break params.append(param) return_annotation = sig.return_annotation if return_annotation is inspect._empty: for ancestor in inspect.getmro(klass): try: ancestor_meth = inspect.signature(getattr(ancestor, m.__name__)) except AttributeError: break if ancestor_meth.return_annotation is not inspect._empty: return_annotation = ancestor_meth.return_annotation break return sig.replace(parameters=params, return_annotation=return_annotation)
17d3e7d554720766ca62cb4ad7a66c42f947fc1c
11,179
def format_long_calc_line(line: LongCalcLine) -> LongCalcLine: """ Return line with .latex attribute formatted with line breaks suitable for positioning within the "\aligned" latex environment. """ latex_code = line.latex long_latex = latex_code.replace("=", "\\\\&=") # Change all... long_latex = long_latex.replace("\\\\&=", "&=", 1) # ...except the first one line_break = "\\\\\n" comment_space = "" comment = "" if line.comment: comment_space = "\\;" comment = format_strings(line.comment, comment=True) line.latex = f"{long_latex}{comment_space}{comment}{line_break}" return line
a6f19b7f3a1876f3b6b0c88baddfd02b16901b41
11,180
from riddle import emr, feature_importance from riddle.models import MLP import time import pickle def run(data_fn, prop_missing=0., max_num_feature=-1, feature_selection='random', k=10, data_dir='_data', out_dir='_out'): """Run RIDDLE classification interpretation pipeline. Arguments: data_fn: string data file filename prop_missing: float proportion of feature observations which should be randomly masked; values in [0, 1) max_num_feature: int maximum number of features to use feature_selection: string feature selection method; values = {'random', 'frequency', 'chi2'} k: int number of partitions for k-fold cross-validation interpret_model: bool whether to interpret the trained model for first k-fold partition which_half: str which half of experiments to do; values = {'first', 'last', 'both'} data_dir: string directory where data files are located cache_dir: string directory where cached files (e.g., saved parameters) are located out_dir: string outer directory where outputs (e.g., results) should be saved """ start = time.time() base_out_dir = get_base_out_dir(out_dir, 'riddle', data_fn, prop_missing, max_num_feature, feature_selection) recursive_mkdir(base_out_dir) # get common data x_unvec, y, idx_feat_dict, idx_class_dict, icd9_descript_dict, perm_indices = ( get_preprocessed_data(data_dir, data_fn, prop_missing=prop_missing)) num_feature = len(idx_feat_dict) num_class = len(idx_class_dict) list_sums_D, list_sums_D2, list_sums_contribs = [], [], [] for k_idx in range(k): full_out_dir = '{}/k_idx={}'.format(base_out_dir, k_idx) print('\nPartition k = {}'.format(k_idx)) x_train_unvec, y_train, _, _, x_test_unvec, y_test = emr.get_k_fold_partition( x_unvec, y, k_idx=k_idx, k=k, perm_indices=perm_indices) if max_num_feature > 0: # select features and re-encode feat_encoding_dict, idx_feat_dict = select_features( x_train_unvec, y_train, idx_feat_dict, method=feature_selection, num_feature=num_feature, max_num_feature=max_num_feature) x_test_unvec = subset_reencode_features( x_test_unvec, feat_encoding_dict) num_feature = max_num_feature # interpret start = time.time() temp_mlp = MLP(num_feature=num_feature, num_class=num_class) hdf5_path = full_out_dir + '/model.h5' sums_D, sums_D2, sums_contribs, pairs = \ feature_importance.get_diff_sums( hdf5_path, x_test_unvec, process_x_func=temp_mlp.process_x, num_feature=num_feature, num_class=num_class) with open(full_out_dir + '/sums_D.pkl', 'wb') as f: pickle.dump(sums_D, f) with open(full_out_dir + '/sums_D2.pkl', 'wb') as f: pickle.dump(sums_D2, f) with open(full_out_dir + '/sums_contribs.pkl', 'wb') as f: pickle.dump(sums_contribs, f) list_sums_D.append(sums_D) list_sums_D2.append(sums_D2) list_sums_contribs.append(sums_contribs) def compute_total_sums(list_sums): total_sums = list_sums[0] for i in range(1, len(list_sums)): for j in range(len(total_sums)): total_sums[j] = np.add(total_sums[j], list_sums[i][j]) return total_sums total_sums_D = compute_total_sums(list_sums_D) total_sums_D2 = compute_total_sums(list_sums_D2) total_sums_contribs = compute_total_sums(list_sums_contribs) num_sample = len(x_unvec) run_interpretation_summary( x_unvec, y, total_sums_D, total_sums_D2, total_sums_contribs, idx_feat_dict=idx_feat_dict, idx_class_dict=idx_class_dict, icd9_descript_dict=icd9_descript_dict, pairs=pairs, num_sample=num_sample, full_out_dir=base_out_dir) print('Computed DeepLIFT scores and analysis in {:.4f} seconds' .format(time.time() - start)) print('-' * 72) print()
ac28216cbea67b0bdc6d2b3f617c24c975623415
11,181
def h_lgn(t, mu, sigma, normalize=False): """ Log-normal density Args: t: input argument (array) mu: mean parameter (-infty,infty) sigma: std parameter > 0 normalize: trapz integral normalization over t Returns: function values """ y = np.zeros(len(t)) y[t>0] = 1/(t[t>0]*sigma*np.sqrt(2*np.pi)) * np.exp(-(np.log(t[t>0]) - mu)**2 / (2*sigma**2)) y[np.isinf(y) | np.isnan(y)] = 0 # Protect underflows if normalize: y /= np.abs(trapz(x=t, y=y)) # abs for numerical protection return y
63bd6ea48f5ea28c5631b3ce259066d3624d038b
11,182
from .background import set_background_alignment import copy def align_background(data, align='auto'): """ Determine the Qz value associated with the background measurement. The *align* flag determines which background points are matched to the sample points. It can be 'sample' if background is measured using an offset from the sample angle, or 'detector' if it is offset from detector angle. If *align* is 'auto', then use 'Qz_target' to align the background scan. For 'auto' alignment without Qz_target set, we can only distinguish relative and constant offsets, and cannot determine which of sample and detector is offset from the specular condition, so we must rely on convention. If the offset is constant for each angle, then it is assumed to be a sample offset. If the the offset is proportional to the angle (and therefore offset divided by angle is constant), then it is assumed to be a detector offset. If neither condition is met, it is assumed to be a sample offset. The 'auto' test is robust: 90% of the points should be within 5% of the median value of the vector for the offset to be considered a constant. **Inputs** data (refldata) : background data with unknown $q$ align (opt:auto|sample|detector) : angle which determines $q_z$ **Returns** output (refldata) : background with known $q$ 2015-12-17 Paul Kienzle 2020-10-16 Paul Kienzle rename 'offset' to 'align' """ data = copy(data) set_background_alignment(data, align) return data
a8b33aa5440cf6c212d964d58720bef771fe2083
11,183
def get_bounds_from_config(b, state, base_units): """ Method to take a 3- or 4-tuple state definition config argument and return tuples for the bounds and default value of the Var object. Expects the form (lower, default, upper, units) where units is optional Args: b - StateBlock on which the state vars are to be constructed state - name of state var as a string (to be matched with config dict) base_units - base units of state var to be used if conversion required Returns: bounds - 2-tuple of state var bounds in base units default_val - default value of state var in base units """ try: var_config = b.params.config.state_bounds[state] except (KeyError, TypeError): # State definition missing return (None, None), None if len(var_config) == 4: # Units provided, need to convert values bounds = (pyunits.convert_value(var_config[0], from_units=var_config[3], to_units=base_units), pyunits.convert_value(var_config[2], from_units=var_config[3], to_units=base_units)) default_val = pyunits.convert_value(var_config[1], from_units=var_config[3], to_units=base_units) else: bounds = (var_config[0], var_config[2]) default_val = var_config[1] return bounds, default_val
c9e757a2032178e656f7bbc27519bd1650eb9a79
11,184
def read_train_data(): """ train_data.shape = (73257, 32, 32, 3) train_label.shape = (73257,) extra_data.shape = (531131, 32, 32, 3) extra_label.shape = (531131,) data.shape = (604388, 32, 32, 3) labels.shape = (604388,) """ train_data, train_label = read_images(full_data_dir+'train_32x32.mat') extra_data, extra_label = read_images(full_data_dir+'extra_32x32.mat') data = np.concatenate( (train_data, extra_data) ) label = np.concatenate( (train_label, extra_label) ) return data, label
d6e5c06ceb3a95e20e8ae301d83e1f480fc48591
11,185
def laguerreFunction(n, alpha, t, normalized=True): """Evaluate Laguerre function using scipy.special""" if normalized: Z = np.exp( .5*sps.gammaln(n+1) - .5*sps.gammaln(n+alpha+1) ) else: Z = 1 return Z * np.sqrt(mu(alpha,t)) * sps.eval_genlaguerre(n, alpha, t)
6c48f3ddaed9db7d748ad8fc972a132795ad3916
11,186
def end(s): """Select the mobile or weight hanging at the end of a side.""" assert is_side(s), "must call end on a side" return branches(s)[0]
2bcbc61e989287d714e9401660e58bd2f54c6fe6
11,187
def get_ca_pos_from_atoms(df, atoms): """Look up alpha carbon positions of provided atoms.""" ca = df[df['atom_name'] == 'CA'].reset_index() nb = ca.reindex(atoms) nb = nb.reset_index().set_index('index') return nb
c069db751d94f6626be5d56e7b286ef3c873c04e
11,188
def LoadComponent(self,filename): # real signature unknown; restored from __doc__ """ LoadComponent(self: object,filename: str) -> object LoadComponent(self: object,stream: Stream) -> object LoadComponent(self: object,xmlReader: XmlReader) -> object LoadComponent(self: object,filename: TextReader) -> object LoadComponent(self: object,reader: XamlXmlReader) -> object """ return object()
17b893a6e91f4ef62b8ba18646d9dc2005c52ccd
11,190
def split_bits(word : int, amounts : list): """ takes in a word and a list of bit amounts and returns the bits in the word split up. See the doctests for concrete examples >>> [bin(x) for x in split_bits(0b1001111010000001, [16])] ['0b1001111010000001'] >>> [bin(x) for x in split_bits(0b1001111010000001, [8,8])] ['0b10011110', '0b10000001'] not the whole 16 bits! >>> [bin(x) for x in split_bits(0b1001111010000001, [8])] Traceback (most recent call last): AssertionError: expected to split exactly one word This is a test splitting MOVE.B (A1),D4 >>> [bin(x) for x in split_bits(0b0001001010000100, [2,2,3,3,3,3])] ['0b0', '0b1', '0b1', '0b10', '0b0', '0b100'] """ nums = [] pos = 0 for amount in amounts: # get a group of "amount" 1's mask = 2**amount - 1 # shift mask to the left so it aligns where the last # iteration ended off shift = 16 - amount - pos mask = mask << shift # update location in the word pos += amount # extract the relavent bits bits = word & mask # shift back and insert the list to be returned nums.append(bits >> shift) assert pos == 16, 'expected to split exactly one word' return nums
556a389bb673af12a8b11d8381914bf56f7e0599
11,191
def global_tracer(ot_tracer): """A function similar to one OpenTracing users would write to initialize their OpenTracing tracer. """ set_global_tracer(ot_tracer) return ot_tracer
87207d92179a0b23f20806e3e93ec7e78b1b31f1
11,192
import requests def getListProjectsInGroup(config, grp): """ Get list of issue in group """ print("Retrieve project of group: %s " % grp.name) data = None __prjLst = gitlabProjectList(grp) if (DUMMY_DATA): testFile = getFullFilePath(ISSUES_GRP_TEST_FILE) with open (testFile, 'rt') as f: data = f.read() f.close() else: # retrieve data from server url = getApiUrl(config, "groups/%s/projects" % grp.id) logD("URL " + url) token = config.getToken() hdrs = {"PRIVATE-TOKEN":config.getToken()} __totalPage = 0 __page = 1 while True: logD("Page %d" % (__page)) params = {'page': __page} logD("header %s" % hdrs) resp = requests.get(url, headers=hdrs, params=params) logD("resp status_code %s" % resp.status_code) if (resp.status_code == 200): data = resp.content logD (resp.headers) if (len(resp.headers.get('X-Next-Page')) > 0): __page = int(resp.headers.get('X-Next-Page')) else: __page = 0 logD("next page %d" % (__page)) else: __page = 0 break if (data is not None) and len(data) > 0: logD("data %s" % data) __prjLst.parseData(data) __totalPage += 1 if (config.getMaxProject() is not None) and (__prjLst.getLen() >= config.getMaxProject()): print("Reach max %s/%s" % (__prjLst.getLen(), config.getMaxProject())) break if (__page == 0): #ok, reach end, out break if (__totalPage > 500): # 500 pages? no way, something wrong, out print("SOMETHING WRONG, total is to big, out") break print("Total pages %d" % (__totalPage)) return __prjLst
1c926e8b855cba502229ab1c31c9706c20882a1c
11,193
def group_naptan_datatypes(gdf, naptan_column='LocalityName'): """[summary] groups together naptan datasets into subsets that are grouped by the given naptan column. Args: gdf ([type]): [description] naptan_column (str, optional): [description]. Defaults to 'LocalityName'. Returns: [type]: [description] """ # collapse dataset to minimum, keeping possibly useable datasets gdf2 = gdf[['LocalityName', 'NptgLocalityCode', 'AreaName', 'StopAreaCode', 'Latitude', 'Longitude']] # calculates the centroid of each given naptan segment. gdf3 = gdf2.groupby([naptan_column], as_index=False)[ ['Latitude', 'Longitude']].apply(lambda x: np.mean(x, axis=0)) # convert the lat lon into centroid geometry points. gdf4 = geo.calculate_naptan_geometry(gdf3) # save output to csv. gdf4.to_csv(f'{naptan_column}.csv', encoding='utf-8', sep=',') return gdf4
d4cca1180f1b3d6622c7c2fd5df1cdd1b369c5b3
11,194
def get_facts_by_name_and_value(api_url=None, fact_name=None, fact_value=None, verify=False, cert=list()): """ Returns facts by name and value :param api_url: Base PuppetDB API url :param fact_name: Name of fact :param fact_value: Value of fact """ return utils._make_api_request(api_url, '/facts/{0}/{1}'.format(fact_name, fact_value), verify, cert)
bdce7473bff944609ffdb191948b008ca4a1422a
11,195
def produce_phase(pipeline_run): """Produce result with Produce phase data.""" scores = pipeline_run['run']['results']['scores'] if len(scores) > 1: raise ValueError('This run has more than one score!') scores = scores[0] return { 'metric': scores['metric']['metric'], 'context': pipeline_run['context'], 'normalized_score': scores['normalized'] }
7ed003281eac240a407dac1d03a5e3f5a6e5b2cd
11,196
import json from unittest.mock import patch async def init_integration(hass: HomeAssistant, use_nickname=True) -> MockConfigEntry: """Set up the Mazda Connected Services integration in Home Assistant.""" get_vehicles_fixture = json.loads(load_fixture("mazda/get_vehicles.json")) if not use_nickname: get_vehicles_fixture[0].pop("nickname") get_vehicle_status_fixture = json.loads( load_fixture("mazda/get_vehicle_status.json") ) config_entry = MockConfigEntry(domain=DOMAIN, data=FIXTURE_USER_INPUT) config_entry.add_to_hass(hass) client_mock = MagicMock( MazdaAPI( FIXTURE_USER_INPUT[CONF_EMAIL], FIXTURE_USER_INPUT[CONF_PASSWORD], FIXTURE_USER_INPUT[CONF_REGION], aiohttp_client.async_get_clientsession(hass), ) ) client_mock.get_vehicles = AsyncMock(return_value=get_vehicles_fixture) client_mock.get_vehicle_status = AsyncMock(return_value=get_vehicle_status_fixture) client_mock.lock_doors = AsyncMock() client_mock.unlock_doors = AsyncMock() with patch( "homeassistant.components.mazda.config_flow.MazdaAPI", return_value=client_mock, ), patch("homeassistant.components.mazda.MazdaAPI", return_value=client_mock): assert await hass.config_entries.async_setup(config_entry.entry_id) await hass.async_block_till_done() return client_mock
96756b011f66786c0c8a8704446546c0751de13f
11,197
def get_app_domain(): """ Returns the full URL to the domain. The output from this function gets generally appended with a path string. """ url = settings.INCOMEPROPERTYEVALUATOR_APP_HTTP_PROTOCOL url += settings.INCOMEPROPERTYEVALUATOR_APP_HTTP_DOMAIN return url
0a9c58a179c281072104fb5b7859b2d0ef8426ae
11,198