content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def value_iteration(model, maxiter=100): """ Solves the supplied environment with value iteration. Parameters ---------- model : python object Holds information about the environment to solve such as the reward structure and the transition dynamics. maxiter : int The maximum number of iterations to perform. Return ------ val_ : numpy array of shape (N, 1) Value function of the environment where N is the number of states in the environment. pi : numpy array of shape (N, 1) Optimal policy of the environment. """ # initialize the value function and policy pi = np.ones((model.num_states, 1)) val_ = np.zeros((model.num_states, 1)) for i in range(maxiter): # initialize delta delta = 0 # perform Bellman update for each state for state in range(model.num_states): # store old value tmp = val_[state].copy() # compute the value function val_[state] = np.max( np.sum((model.R[state] + model.gamma * val_) * model.P[state,:,:], 0) ) # find maximum change in value delta = np.max( (delta, np.abs(tmp - val_[state])) ) # stopping criteria if delta <= EPS * (1 - model.gamma) / model.gamma: print("Value iteration converged after %d iterations." % i) break # compute the policy for state in range(model.num_states): pi[state] = np.argmax(np.sum(val_ * model.P[state,:,:],0)) return val_, pi
e04ffc27be47470f466832a14f9ecf9910d18f27
15,125
from typing import Callable import inspect import abc def node(function: Callable): """A decorator that registers a function to execute when a node runs""" sig = inspect.signature(function) args = [] for (name, param) in sig.parameters.items(): value = param.default if value is inspect.Parameter.empty: raise TypeError(f"{name} must have a type (e.g. {name}=InputTable)") if inspect.isclass(value) and issubclass(value, _NodeInterfaceEntry): if value.__class__ in (type, abc.ABCMeta): value = value() # noinspection PyCallingNonCallable args.append(value(name)) else: raise TypeError(f"{name} is not a valid node parameter type") return NodeFunction(function, args)
65ed2c383e1354a0663daef98b78b73382ea65ea
15,126
def mg_refractive(m, mix): """Maxwell-Garnett EMA for the refractive index. Args: m: Tuple of the complex refractive indices of the media. mix: Tuple of the volume fractions of the media, len(mix)==len(m) (if sum(mix)!=1, these are taken relative to sum(mix)) Returns: The Maxwell-Garnett approximation for the complex refractive index of the effective medium If len(m)==2, the first element is taken as the matrix and the second as the inclusion. If len(m)>2, the media are mixed recursively so that the last element is used as the inclusion and the second to last as the matrix, then this mixture is used as the last element on the next iteration, and so on. """ if len(m) == 2: cF = float(mix[1]) / (mix[0]+mix[1]) * \ (m[1]**2-m[0]**2) / (m[1]**2+2*m[0]**2) er = m[0]**2 * (1.0+2.0*cF) / (1.0-cF) m = np.sqrt(er) else: m_last = mg_refractive(m[-2:], mix[-2:]) mix_last = mix[-2] + mix[-1] m = mg_refractive(m[:-2] + (m_last,), mix[:-2] + (mix_last,)) return m
57712b6abd9b6a5a642767fa91b6212729b697dc
15,127
def locateObjLocation(data, questionDict, questionIdict): """ Locate the object of where questions. Very naive heuristic: take the noun immediately after "where". """ where = questionDict['where'] for t in range(data.shape[0] - 1): if data[t, 0] == where: for u in range(t + 1, data.shape[0]): word = questionIdict[data[u, 0] - 1] lexname = lookupLexname(word) if (lexname is not None and \ lexname.startswith('noun')) or \ (lexname is None): return data[u, 0] print 'not found' return data[-1, 0]
4b0b8ff892e7d6fdbd9b1cf9d7a9ce7a50ba90c2
15,128
from typing import Union from typing import List def mkshex(shapes: Union[CSVShape, List[CSVShape]]) -> Schema: """Convert list of csv2shape Shapes to ShExJSG Schema object.""" # pylint: disable=invalid-name # One- and two-letter variable names do not conform to snake-case naming style if isinstance(shapes, CSVShape): shapes = [shapes] schema_shexjsg = Schema() for s in shapes: shape_id = IRIREF(s.shapeID) if s.start: if schema_shexjsg.start: print(f"Multiple start shapes: <{schema_shexjsg.start}>, <{shape_id}>") else: schema_shexjsg.start = shape_id shape = Shape(id=shape_id) for csv_tc in s.tc_list: add_triple_constraint(shape, csv_tc) if not schema_shexjsg.shapes: schema_shexjsg.shapes = [shape] else: schema_shexjsg.shapes.append(shape) return schema_shexjsg
3cc83d3a23ca982f30c6b4b64553801e404ef1b3
15,130
def get_unsigned_js_val(abs_val: int, max_unit: int, abs_limit: int) -> int: """Get unsigned remaped joystick value in reverse range (For example if the limit is 2000, and the input valueis also 2000, the value returned will be 1. And with the same limit, if the input value is 1, the output value wwill be 2000. The same applies to the values in between). This evenly devides the value so that the maximum js range is remapped to a value in the range of the specified limit. abs_val - The current joystick value max_unit - The maximum value to remap the joystick value abs_limit - The maximum range of the joystick """ inc = abs_limit / max_unit # ignoring signs to keep results positive if abs_val > 0: abs_val *= -1 val = int((abs_val / inc) + max_unit) # if the value is zero, return 1 (maximum range) if val == 0: val = 1 return val
6e77d76423ffeef756291924d00cbdbb2c03cc07
15,131
def to_xyz(struct, extended_xyz: bool = True, print_stds: bool = False, print_forces: bool = False, print_max_stds: bool = False, print_energies: bool = False, predict_energy=None, dft_forces=None, dft_energy=None, timestep=-1, write_file: str = '', append: bool = False, labels=None) -> str: """ Function taken from the FLARE python package by Vandermause et al. at: https://github.com/mir-group/flare Reference: Vandermause, J., Torrisi, S. B., Batzner, S., Xie, Y., Sun, L., Kolpak, A. M. & Kozinsky, B. On-the-fly active learning of interpretable Bayesian force fields for atomistic rare events. npj Comput Mater 6, 20 (2020). https://doi.org/10.1038/s41524-020-0283-z Convenience function which turns a structure into an extended .xyz file; useful for further input into visualization programs like VESTA or Ovito. Can be saved to an output file via write_file. :param print_stds: Print the stds associated with the structure. :param print_forces: :param extended_xyz: :param print_max_stds: :param write_file: :return: """ species_list = [Z_to_element(x) for x in struct.coded_species] xyz_str = '' xyz_str += f'{len(struct.coded_species)} \n' # Add header line with info about lattice and properties if extended # xyz option is called. if extended_xyz: cell = struct.cell xyz_str += f'Lattice="{cell[0,0]} {cell[0,1]} {cell[0,2]}' xyz_str += f' {cell[1,0]} {cell[1,1]} {cell[1,2]}' xyz_str += f' {cell[2,0]} {cell[2,1]} {cell[2,2]}"' if timestep > 0: xyz_str += f' Timestep={timestep}' if predict_energy: xyz_str += f' PE={predict_energy}' if dft_energy is not None: xyz_str += f' DFT_PE={dft_energy}' xyz_str += ' Properties=species:S:1:pos:R:3' if print_stds: xyz_str += ':stds:R:3' stds = struct.stds if print_forces: xyz_str += ':forces:R:3' forces = struct.forces if print_max_stds: xyz_str += ':max_std:R:1' stds = struct.stds if labels: xyz_str += ':tags:R:1' clustering_labels = struct.local_energy_stds if print_energies: if struct.local_energies is None: print_energies = False else: xyz_str += ':local_energy:R:1' local_energies = struct.local_energies if dft_forces is not None: xyz_str += ':dft_forces:R:3' xyz_str += '\n' else: xyz_str += '\n' for i, pos in enumerate(struct.positions): # Write positions xyz_str += f"{species_list[i]} {pos[0]} {pos[1]} {pos[2]}" # If extended XYZ: Add in extra information if print_stds and extended_xyz: xyz_str += f" {stds[i,0]} {stds[i,1]} {stds[i,2]}" if print_forces and extended_xyz: xyz_str += f" {forces[i,0]} {forces[i,1]} {forces[i,2]}" if print_energies and extended_xyz: xyz_str += f" {local_energies[i]}" if print_max_stds and extended_xyz: xyz_str += f" {np.max(stds[i,:])} " if labels and extended_xyz: xyz_str += f" {clustering_labels[i]} " if dft_forces is not None: xyz_str += f' {dft_forces[i, 0]} {dft_forces[i,1]} ' \ f'{dft_forces[i, 2]}' if i < (len(struct.positions) - 1): xyz_str += '\n' # Write to file, optionally if write_file: if append: fmt = 'a' else: fmt = 'w' with open(write_file, fmt) as f: f.write(xyz_str) f.write("\n") return xyz_str
729a5429d2c6b4cc0c63462577b91a582bf197ed
15,132
def load_file(filename: str): """Load the .xls file and return as a dataframe object.""" df = pd.read_csv(filename, delimiter='\t') return df
09a7f6abc67bf80651dffe5d7698798f5dfc5be8
15,133
def loadRegexList(regexListFile): """Returns regexList, registries, internetSources""" regexList = [] registries = set() internetSourceTypes = set() libLF.log('Loading regexes from {}'.format(regexListFile)) with open(regexListFile, 'r') as inStream: for line in inStream: line = line.strip() if len(line) == 0: continue try: # Build the Regex regex = libLF.Regex() regex.initFromNDJSON(line) regexList.append(regex) registries = registries.union(regex.registriesUsedIn()) internetSourceTypes = internetSourceTypes.union(regex.internetSourcesAppearedIn()) except KeyboardInterrupt: raise except BaseException as err: libLF.log('Exception parsing line:\n {}\n {}'.format(line, err)) libLF.log('Loaded {} Regex\'es'.format(len(regexList))) return regexList, list(registries), list(internetSourceTypes)
7a3fe4c269aa4c868684384417f3c1d0229fcad8
15,134
def _decode_and_center_crop(image_bytes, image_size, resize_method=None): """Crops to center of image with padding then scales image_size.""" shape = tf.shape(image_bytes) image_height = shape[0] image_width = shape[1] padded_center_crop_size = tf.cast( ((image_size / (image_size + CROP_PADDING)) * tf.cast(tf.minimum(image_height, image_width), tf.float32)), tf.int32) offset_height = ((image_height - padded_center_crop_size) + 1) // 2 offset_width = ((image_width - padded_center_crop_size) + 1) // 2 image = tf.image.crop_to_bounding_box(image_bytes, offset_height, offset_width, padded_center_crop_size, padded_center_crop_size) image = _resize_image(image, image_size, resize_method) return image
933bfb91a84f9fe403adf9cbfc9efeb57d1e50f0
15,135
import typing import numpy def match_beacons_translate_only( sensor_a_beacons: typing.Set[typing.Tuple[int, int, int]], sensor_b_beacons: numpy.ndarray, min_matching: int, ) -> typing.Optional[numpy.ndarray]: """ Search for matching beacons between `sensor_a_beacons` and `sensor_b_beacons`, assuming their orientation matches. Returns either the offset of sensor_b relative to sensor_a, or None if no 12 matching beacons were found. """ # naive approach: full search for beacon_a in sensor_a_beacons: for beacon_b_num in range(sensor_b_beacons.shape[0]): # assume sensor_a_beacons[beacon_a_num] is the same beacon as # sensor_b_beacons[beacon_b_num] sensor_b_relative_to_sensor_a = beacon_a - sensor_b_beacons[beacon_b_num] sensor_b_beacons_relative_to_sensor_a = sensor_b_beacons + sensor_b_relative_to_sensor_a m = num_matching_beacons(sensor_a_beacons, sensor_b_beacons_relative_to_sensor_a) if m >= min_matching: return sensor_b_relative_to_sensor_a return None
0fd12c05d9ee159e301c7bad9d1ddcaa3009a960
15,136
def txm_log(): """ Return the logger. """ return __log__
3b03daf2075549dc4d333e5a47d8e9a1cef21152
15,137
def remove_list_by_name(listslist, name): """ Finds a list in a lists of lists by it's name, removes and returns it. :param listslist: A list of Twitter lists. :param name: The name of the list to be found. :return: The list with the name, if it was found. None otherwise. """ for i in range(len(listslist)): if listslist[i].name == name: return listslist.pop(i)
356a7d12f3b2af9951327984ac6d55ccb844bf72
15,138
import math def song_clicks_metric(ranking): """ Spotify p :param ranking: :return: """ if 1 in ranking: first_idx = ranking.index(1) return math.floor(first_idx / 10) return 51 @staticmethod def print_subtest_results(sub_test_names, metric_names, results): (num_subtest, num_metrics) = results.shape print('{0: <15}'.format("Subtest"),"\t", end="") for i in range(num_metrics): print(metric_names[i], "\t", end="") print() for st in range(num_subtest): print('{0: <15}'.format(sub_test_names[st]), "\t", end="") for m in range(num_metrics): print(np.round(results[st][m],decimals=3), "\t", end="") print() @staticmethod def print_overall_results(metric_names, results): print('{0: <15}'.format(""),"\t", end="") for i in range(len(metric_names)): print(metric_names[i], "\t", end="") print() print('{0: <15}'.format("Overall"),"\t", end="") for m in range(len(metric_names)): print(np.round(results[m],decimals=3), "\t", end="") print()
ec6400e7929a2ab0f7f691fffa0ecb3be039b012
15,139
import copy def merge_reports(master: dict, report: dict): """ Merge classification reports into a master list """ keys = master.keys() ret = copy.deepcopy(master) for key in keys: scores = report[key] for score, value in scores.items(): ret[key][score] += [value] return ret
3ac633c38a8bb73a57841138cba8cbb80091cf04
15,140
def multi_layer_images(): """ Returns complex images (with sizes) for push and pull testing. """ # Note: order is from base layer down to leaf. layer1_bytes = layer_bytes_for_contents( "layer 1 contents", mode="", other_files={"file1": "from-layer-1",} ) layer2_bytes = layer_bytes_for_contents( "layer 2 contents", mode="", other_files={"file2": "from-layer-2",} ) layer3_bytes = layer_bytes_for_contents( "layer 3 contents", mode="", other_files={"file1": "from-layer-3", "file3": "from-layer-3",} ) layer4_bytes = layer_bytes_for_contents( "layer 4 contents", mode="", other_files={"file3": "from-layer-4",} ) layer5_bytes = layer_bytes_for_contents( "layer 5 contents", mode="", other_files={"file4": "from-layer-5",} ) return [ Image( id="layer1", bytes=layer1_bytes, parent_id=None, size=len(layer1_bytes), config={"internal_id": "layer1"}, ), Image( id="layer2", bytes=layer2_bytes, parent_id="layer1", size=len(layer2_bytes), config={"internal_id": "layer2"}, ), Image( id="layer3", bytes=layer3_bytes, parent_id="layer2", size=len(layer3_bytes), config={"internal_id": "layer3"}, ), Image( id="layer4", bytes=layer4_bytes, parent_id="layer3", size=len(layer4_bytes), config={"internal_id": "layer4"}, ), Image( id="someid", bytes=layer5_bytes, parent_id="layer4", size=len(layer5_bytes), config={"internal_id": "layer5"}, ), ]
08b35fa4202a7d25ec415ed3b6d1ae6a9f37fd9c
15,142
from typing import Iterable def user_teams(config: Config, email: str) -> Iterable[Team]: """Return the teams a user member is expected to be a member of. Only the teams in which the user is a direct member are return. The ancestors of these teams are not returned. """ names = config.by_member.get(email) if not names: return [] return (_get_team_exists(config, x) for x in names)
669ec82b68e8e530dafeee4e272c85743bde7db1
15,143
import torch def se3_transform(g, a, normals=None): """ Applies the SE3 transform Args: g: SE3 transformation matrix of size ([1,] 3/4, 4) or (B, 3/4, 4) a: Points to be transformed (N, 3) or (B, N, 3) normals: (Optional). If provided, normals will be transformed Returns: transformed points of size (N, 3) or (B, N, 3) """ R = g[..., :3, :3] # (B, 3, 3) p = g[..., :3, 3] # (B, 3) if len(g.size()) == len(a.size()): b = torch.matmul(a, R.transpose(-1, -2)) + p[..., None, :] else: raise NotImplementedError b = R.matmul(a.unsqueeze(-1)).squeeze(-1) + p # No batch. Not checked if normals is not None: rotated_normals = normals @ R.transpose(-1, -2) return b, rotated_normals else: return b
9d8ca31dd6df6382e6a45fb80f30b61e9902da5c
15,144
def summarize_single_OLS(regression, col_dict, name, is_regularized=False): """Return dataframe aggregating over-all stats from a dictionary-like object containing OLS result objects.""" reg = regression try: col_dict['rsquared'][name] = reg.rsquared except AttributeError: col_dict['rsquared'][name] = 'NA' try: col_dict['rsquared_adj'][name] = reg.rsquared_adj except AttributeError: col_dict['rsquared_adj'][name] = 'NA' col_dict['f_pvalue'][name] = reg.f_pvalue col_dict['condition_number'][name] = reg.condition_number col_dict['regularized'][name] = is_regularized if not is_regularized: outliers = reg.outlier_test(method='fdr_bh')['fdr_bh(p)'] <= 0.05 col_dict['n_outliers'][name] = (outliers).sum() col_dict['outliers'][name] = ','.join(outliers.index[outliers].values) else: col_dict['n_outliers'][name] = "NA" col_dict['outliers'][name] = "NA" col_dict['aic'][name] = reg.aic return col_dict
b7dd8dfac6cf1b743491ae4e1abfc20fb73e8f31
15,145
def simplify(polynom): """Simplifies a function with binary variables """ polynom = Poly(polynom) new_polynom = 0 variables = list(polynom.free_symbols) for var_i in variables: coefficient_i = polynom.as_expr().coeff(var_i)/2 coefficient_i += polynom.as_expr().coeff(var_i ** 2) new_polynom += coefficient_i.as_coefficients_dict()[1] * var_i for var_j in variables: if var_j != var_i: coefficient_j = coefficient_i.coeff(var_j) new_polynom += coefficient_j.as_coefficients_dict()[1] *\ var_i * var_j return new_polynom + polynom.as_expr().as_coefficients_dict()[1]
62647c9a7530df8b73644e7af96b77b06bfb5285
15,146
def process_login(): """Log user into site. Find the user's login credentials located in the 'request', look up the user, and store them in the session. """ user_login = request.get_json() if crud.get_user_by_email(user_login['email']): current_user = crud.get_user_by_email(user_login['email']) print(current_user) if current_user.password == user_login['password']: session['user'] = current_user.user_name flash("You've logged in successfully. Welcome to your Shelve-It account.") return(jsonify({'status': "ok. you are logged in!", "user" : current_user.user_name})) else: session['user'] = 'unknown' return (jsonify({'status': "incorrect password"})) else: session['user'] = 'needs_to_register' flash("No account with that email exists. Please create one or try again") return(jsonify({'status': "no user with that email"}))
39e0498370e06ca3203c1212552ce435b1d047e0
15,147
def is_int(var): """ is this an integer (ie, not a float)? """ return isinstance(var, int)
09924c6ea036fc7ee1add6ccbefc3fb0c9696345
15,148
def returnstringpacket(pkt): """Returns a packet as hex string""" myString = "" for c in pkt: myString += "%02x" % c return myString
866ef7c69f522d4a2332798bdf97a966740ea0e4
15,149
def GetIndicesMappingFromTree( tree ): """ GetIndicesMappingFromTree ========================= reuse bill's idea to gives the indexes of all nodes (may they be a sub tree or a single leaf) gives a list of indices of every sublist. To do that, I add one thing: the last element of an index is the length of the present list. e.g. - get_indices_mapping_from_tree([1,2,3,4,5,6,7,8,9]) gives: [([0], 9)] - get_indices_mapping_from_tree([1,[2,3],4,5,6,7,8,9]) gives: [([0], 8), ([1], 2)] - get_indices_mapping_from_tree([1,[2,3,7],4,5,6,7,8,9]) gives: [([0], 8), ([1], 3)] - get_indices_mapping_from_tree([1,[2,3,7],4,[5,[6,[7,8,9]]]]) gives: [([0], 4), ([1], 3), ([3], 2), ([3, 1], 2), ([3, 1, 1], 3)] @param tree: a nested list representing a tree @return: a nested list representing the indexes of the nested lists by depth """ q = deque([ ([],tree) ]) list_of_index_lists = [([0],len(tree))] while q: (indices, sub_tree) = q.popleft() list_of_index_lists.append((indices,len(sub_tree))) for (ordinal, sst) in enumerate( sub_tree[1:] ): if isinstance( sst, list ): idxs = indices[:] idxs.append(ordinal+1) q.append( (idxs, sst) ) list_of_index_lists.pop(1) return list_of_index_lists
d18e85943273a1f4a75951f3f3fda176853b06e0
15,150
import random def generate_pairwise(params, n_comparisons=10): """Generate pairwise comparisons from a Bradley--Terry model. This function samples comparisons pairs independently and uniformly at random over the ``len(params)`` choose 2 possibilities, and samples the corresponding comparison outcomes from a Bradley--Terry model parametrized by ``params``. Parameters ---------- params : array_like Model parameters. n_comparisons : int Number of comparisons to be returned. Returns ------- data : list of (int, int) Pairwise-comparison samples (see :ref:`data-pairwise`). """ n = len(params) items = tuple(range(n)) params = np.asarray(params) data = list() for _ in range(n_comparisons): # Pick the pair uniformly at random. a, b = random.sample(items, 2) if compare((a, b), params) == a: data.append((a, b)) else: data.append((b, a)) return tuple(data)
96bea4a192d81eaf9a43f8ae493187d826dcdb21
15,153
def render_json(fun): """ Decorator for views which return a dictionary that encodes the dictionary into a JSON string and sets the mimetype of the response to application/json. """ @wraps(fun) def wrapper(request, *args, **kwargs): response = fun(request, *args, **kwargs) try: return JSONResponse(response) except TypeError: # The response isn't JSON serializable. return response return wrapper
15984f0fe7a6a5fbc5a6c9b360bb2780854868b4
15,154
from typing import List def count_smileys_concise(arr: List[str]) -> int: """ Another person's implementation. Turns the list into an string, then uses findall() on that string. Turning the result into a list makes it possible to return the length of that list. So this version is more concise, but uses more space. O(n) where n is length of arr. """ return len(list(findall(r"[:;][-~]?[)D]", " ".join(arr))))
8fbd353422cbac9840294af3d0a6022d8a45e4e1
15,155
def transform_dead_op_vars(graph, translator=None): """Remove dead operations and variables that are passed over a link but not used in the target block. Input is a graph.""" return transform_dead_op_vars_in_blocks(list(graph.iterblocks()), [graph], translator)
c10fde9cca58732bf6bd17018e963ee629a1796d
15,156
import torch def test_reconstruction_torch(): """Test that input reconstruction via backprop has decreasing loss.""" if skip_all: return None if run_without_pytest else pytest.skip() if cant_import('torch'): return None if run_without_pytest else pytest.skip() device = 'cuda' if torch.cuda.is_available() else 'cpu' J = 6 Q = 8 N = 1024 n_iters = 30 jtfs = TimeFrequencyScattering1D(J, N, Q, J_fr=4, average_fr=False, frontend='torch', out_type='array', sampling_filters_fr=('exclude', 'resample'), max_pad_factor=1, max_pad_factor_fr=2, pad_mode_fr='conj-reflect-zero', ).to(device) y = torch.from_numpy(echirp(N, fmin=1).astype('float32')).to(device) Sy = jtfs(y) div = Sy.max() Sy /= div torch.manual_seed(0) x = torch.randn(N, device=device) x /= torch.max(torch.abs(x)) x.requires_grad = True optimizer = torch.optim.SGD([x], lr=140000, momentum=.9, nesterov=True) loss_fn = torch.nn.MSELoss() losses, losses_recon = [], [] for i in range(n_iters): optimizer.zero_grad() Sx = jtfs(x) Sx /= div loss = loss_fn(Sx, Sy) loss.backward() optimizer.step() losses.append(float(loss.detach().cpu().numpy())) xn, yn = x.detach().cpu().numpy(), y.detach().cpu().numpy() losses_recon.append(float(rel_l2(yn, xn))) # unsure why CPU's worse th = 1e-5 if device == 'cuda' else 2e-5 th_end_ratio = 50 if device == 'cuda' else 30 th_recon = 1.05 end_ratio = losses[0] / losses[-1] assert end_ratio > th_end_ratio, end_ratio assert min(losses) < th, "{:.2e} > {}".format(min(losses), th) assert min(losses_recon) < th_recon, "{:.2e} > {}".format(min(losses_recon), th_recon) if metric_verbose: print(("\nReconstruction (torch):\n(end_start_ratio, min_loss, " "min_loss_recon) = ({:.1f}, {:.2e}, {:.2f})").format( end_ratio, min(losses), min(losses_recon)))
070e7e52ce44c2a875a7ad418ffb985a1827d8c6
15,157
def to_pixels(Hinv, loc): """ Given H^-1 and (x, y, z) in world coordinates, returns (c, r) in image pixel indices. """ loc = to_image_frame(Hinv, loc).astype(int) return (loc[1], loc[0])
09dff4d2045c64d753aa8229f44f049f1a6936c3
15,158
def calc_distance_between_points_two_vectors_2d(v1, v2): """calc_distance_between_points_two_vectors_2d [pairwise distance between vectors points] Arguments: v1 {[np.array]} -- [description] v2 {[type]} -- [description] Raises: ValueError -- [description] ValueError -- [description] ValueError -- [description] Returns: [type] -- [description] testing: >>> v1 = np.zeros((2, 5)) >>> v2 = np.zeros((2, 5)) >>> v2[1, :] = [0, 10, 25, 50, 100] >>> d = calc_distance_between_points_two_vectors_2d(v1.T, v2.T) """ # Check dataformats if not isinstance(v1, np.ndarray) or not isinstance(v2, np.ndarray): raise ValueError("Invalid argument data format") if not v1.shape[1] == 2 or not v2.shape[1] == 2: raise ValueError("Invalid shape for input arrays") if not v1.shape[0] == v2.shape[0]: raise ValueError("Error: input arrays should have the same length") # Calculate distance if v1.shape[1] < 20000 and v1.shape[0] < 20000: # For short vectors use cdist dist = distance.cdist(v1, v2, "euclidean") dist = dist[:, 0] else: dist = [ calc_distance_between_points_2d(p1, p2) for p1, p2 in zip(v1, v2) ] return dist
75d00fae9dbe8353e1b53d12428de054e267a528
15,161
import heapq import random def get_nearest_list_index(node_list, guide_node): """ Finds nearest nodes among node_list, using the metric given by weighted_norm and chooses one of them at random. Parameters ---------- node_list : list list of nodes corresponding to one of the two search trees growing towards each other. guide_node : dict node that has been randomly chosen to expand towards Returns ------- min_ind : int index of the chosen node min_dist_choice : float distance between the chosen node and the guide_node """ k_nearest = int(len(node_list) / 100) + 1 dlist = [weighted_norm(node, guide_node) for node in node_list] k_min_dist_list = heapq.nsmallest(k_nearest, dlist) min_dist_choice = random.choice(k_min_dist_list) min_ind = dlist.index(min_dist_choice) return min_ind, min_dist_choice
7d8a373a589e87dc04f72150424685c088b535fb
15,162
def get_extensions_from_dir(path: str) -> list[str]: """Gets all files that end with ``.py`` in a directory and returns a python dotpath.""" dirdotpath = ".".join(path.split(sep)[1:]) # we ignore the first part because we don't want to add the ``./``. return [f"{dirdotpath}.{file}" for file in listdir(path) if file.endswith(".py")]
c5a12241270f970733c055493534c7f5e8548fd2
15,163
def to_normalized_exacta_dividends(x,scr=-1): """ Convert 2-d representation of probabilities to dividends :param x: :param scr: :return: """ fx = to_normalized_dividends( to_flat_exacta(x), scr=scr ) return from_flat_exacta(fx, diag_value=scr)
1c216908752326333185da7d21e7657f722e20f1
15,164
def CSVcreation(): """This functions allows to access to page for the creation of csv""" if "logged_in" in session and session["logged_in"] == True: print("User login", session["username"]) try: count1 = managedb.getCountLoginDB(session["username"]) if count1 == 0: return redirect(url_for('index')) return render_template('CSVcreation.html',data=data) except Exception as e: print("Error DB:",str(e)) return redirect(url_for('index')) return redirect(url_for('index'))
33af7221ab77d8d0d40b60d220ce8e59ba728f0f
15,165
import types def generate_copies(func, phis): """ Emit stores to stack variables in predecessor blocks. """ builder = Builder(func) vars = {} loads = {} # Allocate a stack variable for each phi builder.position_at_beginning(func.startblock) for block in phis: for phi in phis[block]: vars[phi] = builder.alloca(types.Pointer(phi.type)) # Generate loads in blocks containing the phis for block in phis: leaders = list(block.leaders) last_leader = leaders[-1] if leaders else block.head builder.position_after(last_leader) for phi in phis[block]: loads[phi] = builder.load(vars[phi]) # Generate copies (store to stack variables) for block in phis: for phi in phis[block]: preds, args = phi.args var = vars[phi] phi_args = [loads.get(arg, arg) for arg in args] for pred, arg in zip(preds, phi_args): builder.position_before(pred.terminator) builder.store(arg, var) # Replace phis for block in phis: for phi in phis[block]: phi.replace_uses(loads[phi]) phi.delete() return vars, loads
5ee76907970dea569c34d3bd4a5f57456bed7eb4
15,167
from typing import Sequence def calculate_dv(wave: Sequence): """ Given a wavelength array, calculate the minimum ``dv`` of the array. Parameters ---------- wave : array-like The wavelength array Returns ------- float delta-v in units of km/s """ return C.c_kms * np.min(np.diff(wave) / wave[:-1])
8e29af2644a97948330a4a5fcaaeb2e49ddad831
15,168
def check_link_errors(*args, visit=(), user="user", **kwargs): """ Craw site starting from the given base URL and raise an error if the resulting error dictionary is not empty. Notes: Accept the same arguments of the :func:`crawl` function. """ errors, visited = crawl(*args, **kwargs) for url in visit: if url not in visited: errors[url] = f"URL was not visited by {user}" if errors: for url, code in errors.items(): if isinstance(code, int): print(f"URL {url} returned invalid status code: {code}") else: print(f"Invalid URL {url} encountered at {code}") raise AssertionError(errors, visited) return visited
571b03e555894560128530c6e751c50a4aed0e21
15,169
def cart3_to_polar2(xyz_array): """ Convert 3D cartesian coordinates into 2D polar coordinates. This is a simple routine for converting a set of 3D cartesian vectors into spherical coordinates, where the position (0, 0) lies along the x-direction. Parameters ---------- xyz_array : ndarray of float Cartesian coordinates, need not be of unit vector length. Shape is (3, coord_shape). Returns ------- lon_array : ndarray of float Longitude coordinates, which increases in the counter-clockwise direction. Units of radians, shape is (coord_shape,). lat_array : ndarray of float Latitude coordinates, where 0 falls on the equator of the sphere. Units of radians, shape is (coord_shape,). """ if not isinstance(xyz_array, np.ndarray): raise ValueError("xyz_array must be an ndarray.") if xyz_array.ndim == 0: raise ValueError("xyz_array must have ndim > 0") if xyz_array.shape[0] != 3: raise ValueError("xyz_array must be length 3 across the zeroth axis.") # The longitude coord is relatively easy to calculate, just take the X and Y # components and find the arctac of the pair. lon_array = np.mod(np.arctan2(xyz_array[1], xyz_array[0]), 2.0 * np.pi, dtype=float) # If we _knew_ that xyz_array was always of length 1, then this call could be a much # simpler one to arcsin. But to make this generic, we'll use the length of the XY # component along with arctan2. lat_array = np.arctan2( xyz_array[2], np.sqrt((xyz_array[0:2] ** 2.0).sum(axis=0)), dtype=float ) # Return the two arrays return lon_array, lat_array
37220bd026ae48bf5a9914117075a10a51efba5a
15,170
import requests def deploy_release(rel_id, env_id): """deploy_release will start deploying a release to a given environment""" uri = config.OCTOPUS_URI + "/api/deployments" r = requests.post(uri, headers=config.OCTOPUS_HEADERS, verify=False, json={'ReleaseId': rel_id, 'EnvironmentId': env_id}) return r.json()
08eae9366a3233704f65a6f952801cdd3ffbe867
15,171
def create_static_route(dut, next_hop=None, static_ip=None, shell="vtysh", family='ipv4', interface = None, vrf = None): """ To create static route Author: Prudvi Mangadu ([email protected]) :param dut: :param next_hop: :param static_ip: :param shell: sonic|vtysh :param family: ipv4|ipv6 :return: """ if not static_ip: st.log("Provide static_ip") return False if shell == "vtysh": if family.lower() == "ipv4" or family.lower() == "": if next_hop: command = "ip route {} {}".format(static_ip, next_hop) else: command = "ip route {}".format(static_ip) elif family.lower() == "ipv6": command = "ipv6 route {} {}".format(static_ip, next_hop) if interface: command +=" {}".format(interface) if vrf: command +=" vrf {}".format(vrf) st.config(dut, command, type='vtysh') else: if family.lower() == "ipv4" or family.lower() == "": if next_hop: command = "ip route add {} via {}".format(static_ip, next_hop) else: command = "ip route add {}".format(static_ip) elif family.lower() == "ipv6": if next_hop: command = "ip -6 route add {} via {}".format(static_ip, next_hop) else: command = "ip -6 route add {}".format(static_ip) if interface: command +=" dev {}".format(interface) st.config(dut, command)
9097f016eaeb85e9b84351d50cac71d88779b1c1
15,172
def _markfoundfiles(arg, initargs, foundflags): """Mark file flags as found.""" try: pos = initargs.index(arg) - 1 except ValueError: pos = initargs.index("../" + arg) - 1 # In cases where there is a single input file as the first parameter. This # should cover cases such as: # exec input.file # exec input.file > output.file if arg == initargs[0]: foundflags.append("<") # Other cases should pretty much be formats like: # exec -flag file -flag file -flag file elif (len(initargs) > 1 and initargs[pos][0] == "-" and initargs[pos] not in foundflags): foundflags.append(initargs[pos]) # Or cases like exec -flag file -flag file inputfile > outputfile elif (len(initargs) > 1 and initargs[pos][0] != "-" and initargs[pos] not in foundflags): foundflags.append("<") return foundflags
e27ca91de403a6364cbebc8ee4ee835a9335dccc
15,173
def part_a(puzzle_input): """ Calculate the answer for part_a. Args: puzzle_input (list): Formatted as the provided input from the website. Returns: string: The answer for part_a. """ recipes_to_make = int(''.join(puzzle_input)) elf_index_1 = 0 elf_index_2 = 1 recipies = [3, 7] while len(recipies) < recipes_to_make + 10: new_recipes = recipies[elf_index_1] + recipies[elf_index_2] if new_recipes >= 10: recipies.append(1) recipies.append(new_recipes - 10) else: recipies.append(new_recipes) elf_index_1 = (elf_index_1 + (recipies[elf_index_1] + 1)) % len(recipies) elf_index_2 = (elf_index_2 + (recipies[elf_index_2] + 1)) % len(recipies) return ''.join(map(str, recipies[recipes_to_make:recipes_to_make + 10]))
50e1cf923184a15747322528a47bad248c03dfa2
15,174
def _CompareFields(field, other_field): """Checks if two ProtoRPC fields are "equal". Compares the arguments, rather than the id of the elements (which is the default __eq__ behavior) as well as the class of the fields. Args: field: A ProtoRPC message field to be compared. other_field: A ProtoRPC message field to be compared. Returns: Boolean indicating whether the fields are equal. """ field_attrs = _GetFieldAttributes(field) other_field_attrs = _GetFieldAttributes(other_field) if field_attrs != other_field_attrs: return False return field.__class__ == other_field.__class__
d6ce0b7f7caafd17dff188679800dee2dbe8e791
15,175
def loadtxt_rows(filename, rows, single_precision=False): """ Load only certain rows """ # Open the file f = open(filename, "r") # Storage results = {} # Row number i = 0 # Number of columns ncol = None while(True): # Read the line and split by commas line = f.readline() cells = line.split(",") # Quit when you see a different number of columns if ncol is not None and len(cells) != ncol: break # Non-comment lines if cells[0] != "#": # If it's the first one, get the number of columns if ncol is None: ncol = len(cells) # Otherwise, include in results if i in rows: if single_precision: results[i] = np.array([float(cell) for cell in cells],\ dtype="float32") else: results[i] = np.array([float(cell) for cell in cells]) i += 1 results["ncol"] = ncol return results
9393cf7df8f24910a81e7d55128164a9bb467d91
15,177
def create_signal(frequencies, amplitudes, number_of_samples, sample_rate): """Create a signal of given frequencies and their amplitudes. """ timesamples = arange(number_of_samples) / sample_rate signal = zeros(len(timesamples)) for frequency, amplitude in zip(frequencies, amplitudes): signal += amplitude * sin(2*pi*frequency*timesamples) return signal, timesamples
58876fd45e96d221220ccc4ad0129cf48912d691
15,178
import logging import requests from datetime import datetime def serp_goog(q, cx, key, c2coff=None, cr=None, dateRestrict=None, exactTerms=None, excludeTerms=None, fileType=None, filter=None, gl=None, highRange=None, hl=None, hq=None, imgColorType=None, imgDominantColor=None, imgSize=None, imgType=None, linkSite=None, lowRange=None, lr=None, num=None, orTerms=None, relatedSite=None, rights=None, safe=None, searchType=None, siteSearch=None, siteSearchFilter=None, sort=None, start=None): """Query Google and get search results in a DataFrame. For each parameter, you can supply single or multiple values / arguments. If you pass multiple arguments, all the possible combinations of arguments (the product) will be requested, and you will get one DataFrame combining all queries. See examples below. :param q: The search expression. :param cx: The custom search engine ID to use for this request. :param key: The API key of your custom search engine. :param c2coff: Enables or disables Simplified and Traditional Chinese Search. The default value for this parameter is 0 (zero), meaning that the feature is enabled. Supported values are:1: Disabled0: Enabled (default) :param cr: Restricts search results to documents originating in a particular country. You may use Boolean operators in the cr parameter's value.Google Search determines the country of a document by analyzing:the top- level domain (TLD) of the document's URLthe geographic location of the Web server's IP addressSee the Country Parameter Values page for a list of valid values for this parameter. :param dateRestrict: Restricts results to URLs based on date. Supported values include:d[number]: requests results from the specified number of past days. - d[number]: requests results from the specified number of past days. - w[number]: requests results from the specified number of past weeks. - m[number]: requests results from the specified number of past months. - y[number]: requests results from the specified number of past years. :param exactTerms: Identifies a phrase that all documents in the search results must contain. :param excludeTerms: Identifies a word or phrase that should not appear in any documents in the search results. :param fileType: Restricts results to files of a specified extension. A list of file types indexable by Google can be found in Search Console Help Center. :param filter: Controls turning on or off the duplicate content filter.See Automatic Filtering for more information about Google's search results filters. Note that host crowding filtering applies only to multi-site searches.By default, Google applies filtering to all search results to improve the quality of those results. Acceptable values are: "0": Turns off duplicate content filter. "1": Turns on duplicate content filter. :param gl: Geolocation of end user. The gl parameter value is a two-letter country code. The gl parameter boosts search results whose country of origin matches the parameter value. See the Country Codes page for a list of valid values.Specifying a gl parameter value should lead to more relevant results. This is particularly true for international customers and, even more specifically, for customers in English- speaking countries other than the United States. :param highRange: Specifies the ending value for a search range.Use lowRange and highRange to append an inclusive search range of lowRange...highRange to the query. :param hl: Sets the user interface language. Explicitly setting this parameter improves the performance and the quality of your search results.See the Interface Languages section of Internationalizing Queries and Results Presentation for more information, and Supported Interface Languages for a list of supported languages. :param hq: Appends the specified query terms to the query, as if they were combined with a logical AND operator. :param imgColorType: Returns black and white, grayscale, or color images: mono, gray, and color. Acceptable values are: "color": color "gray": gray "mono": mono :param imgDominantColor: Returns images of a specific dominant color. Acceptable values are: "black": black "blue": blue "brown": brown "gray": gray "green": green "orange": orange "pink": pink "purple": purple "red": red "teal": teal "white": white "yellow": yellow :param imgSize: Returns images of a specified size. Acceptable values are: "huge": huge "icon": icon "large": large "medium": medium "small": small "xlarge": xlarge "xxlarge": xxlarge :param imgType: Returns images of a type. Acceptable values are: "clipart": clipart "face": face "lineart": lineart "news": news "photo": photo :param linkSite: Specifies that all search results should contain a link to a particular URL :param lowRange: Specifies the starting value for a search range. Use lowRange and highRange to append an inclusive search range of lowRange...highRange to the query. :param lr: Restricts the search to documents written in a particular language (e.g., lr=lang_ja). Acceptable values are: "lang_ar": Arabic "lang_bg": Bulgarian "lang_ca": Catalan "lang_cs": Czech "lang_da": Danish "lang_de": German "lang_el": Greek "lang_en": English "lang_es": Spanish "lang_et": Estonian "lang_fi": Finnish "lang_fr": French "lang_hr": Croatian "lang_hu": Hungarian "lang_id": Indonesian "lang_is": Icelandic "lang_it": Italian "lang_iw": Hebrew "lang_ja": Japanese "lang_ko": Korean "lang_lt": Lithuanian "lang_lv": Latvian "lang_nl": Dutch "lang_no": Norwegian "lang_pl": Polish "lang_pt": Portuguese "lang_ro": Romanian "lang_ru": Russian "lang_sk": Slovak "lang_sl": Slovenian "lang_sr": Serbian "lang_sv": Swedish "lang_tr": Turkish "lang_zh- CN": Chinese (Simplified) "lang_zh-TW": Chinese (Traditional) :param num: Number of search results to return.Valid values are integers between 1 and 10, inclusive. :param orTerms: Provides additional search terms to check for in a document, where each document in the search results must contain at least one of the additional search terms. :param relatedSite: Specifies that all search results should be pages that are related to the specified URL. :param rights: Filters based on licensing. Supported values include: cc_publicdomain, cc_attribute, cc_sharealike, cc_noncommercial, cc_nonderived, and combinations of these. :param safe: Search safety level. Acceptable values are: "active": Enables SafeSearch filtering. "off": Disables SafeSearch filtering. (default) :param searchType: Specifies the search type: image. If unspecified, results are limited to webpages. Acceptable values are: "image": custom image search. :param siteSearch: Specifies all search results should be pages from a given site. :param siteSearchFilter: Controls whether to include or exclude results from the site named in the siteSearch parameter. Acceptable values are: "e": exclude "i": include :param sort: The sort expression to apply to the results. :param start: The index of the first result to return.Valid value are integers starting 1 (default) and the second result is 2 and so forth. For example &start=11 gives the second page of results with the default "num" value of 10 results per page.Note: No more than 100 results will ever be returned for any query with JSON API, even if more than 100 documents match the query, so setting (start + num) to more than 100 will produce an error. Note that the maximum value for num is 10. The following function call will produce two queries: "hotel" in the USA, and "hotel" in France >>> serp_goog(q='hotel', gl=['us', 'fr'], cx='YOUR_CX', key='YOUR_KEY') The below function call will prouce four queries and make four requests: "fligts" in UK "fligts" in Australia "tickets" in UK "tickets" in Australia 'cr' here refers to 'country restrict', which focuses on content originating from the specified country. >>> serp_goog(q=['flights', 'tickets'], cr=['countryUK', 'countryAU'], cx='YOUR_CX', key='YOUR_KEY') """ params = locals() supplied_params = {k: v for k, v in params.items() if params[k] is not None} for p in supplied_params: if isinstance(supplied_params[p], (str, int)): supplied_params[p] = [supplied_params[p]] for p in supplied_params: if p in SERP_GOOG_VALID_VALS: if not set(supplied_params[p]).issubset(SERP_GOOG_VALID_VALS[p]): raise ValueError('Please make sure you provide a' ' valid value for "{}", valid values:\n' '{}'.format(p, sorted(SERP_GOOG_VALID_VALS[p]))) params_list = _dict_product(supplied_params) base_url = 'https://www.googleapis.com/customsearch/v1?' specified_cols = ['searchTerms', 'rank', 'title', 'snippet', 'displayLink', 'link', 'queryTime', 'totalResults'] responses = [] for param in params_list: param_log = ', '.join([k + '=' + str(v) for k, v in param.items()]) logging.info(msg='Requesting: ' + param_log) resp = requests.get(base_url, params=param) if resp.status_code >= 400: raise Exception(resp.json()) responses.append(resp) result_df = pd.DataFrame() for i, resp in enumerate(responses): request_metadata = resp.json()['queries']['request'][0] del request_metadata['title'] search_info = resp.json()['searchInformation'] if int(search_info['totalResults']) == 0: df = pd.DataFrame(columns=specified_cols, index=range(1)) df['searchTerms'] = request_metadata['searchTerms'] # These keys don't appear in the response so they have to be # added manually for missing in ['lr', 'num', 'start', 'c2coff']: if missing in params_list[i]: df[missing] = params_list[i][missing] else: df = pd.DataFrame(resp.json()['items']) df['cseName'] = resp.json()['context']['title'] start_idx = request_metadata['startIndex'] df['rank'] = range(start_idx, start_idx + len(df)) for missing in ['lr', 'num', 'start', 'c2coff']: if missing in params_list[i]: df[missing] = params_list[i][missing] meta_columns = {**request_metadata, **search_info} df = df.assign(**meta_columns) df['queryTime'] = datetime.datetime.now(tz=datetime.timezone.utc) df['queryTime'] = pd.to_datetime(df['queryTime']) if 'image' in df: img_df = json_normalize(df['image']) img_df.columns = ['image.' + c for c in img_df.columns] df = pd.concat([df, img_df], axis=1) result_df = result_df.append(df, sort=False, ignore_index=True) ordered_cols = (list(set(params_list[i]).difference({'q', 'key', 'cx'})) + specified_cols) non_ordered = result_df.columns.difference(set(ordered_cols)) final_df = result_df[ordered_cols + list(non_ordered)] if 'pagemap' in final_df: pagemap_df = pd.DataFrame() for p in final_df['pagemap']: try: temp_pagemap_df = json_normalize(p) pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False) except Exception as e: temp_pagemap_df = pd.DataFrame({'delete_me': None}, index=range(1)) pagemap_df = pagemap_df.append(temp_pagemap_df, sort=False) pagemap_df = pagemap_df.reset_index(drop=True) if 'delete_me' in pagemap_df: del pagemap_df['delete_me'] for col in pagemap_df: if col in final_df: pagemap_df = pagemap_df.rename(columns={col: 'pagemap_' + col}) final_df = pd.concat([final_df, pagemap_df], axis=1) if 'metatags' in pagemap_df: metatag_df = pd.DataFrame() for m in pagemap_df['metatags']: try: temp_metatags_df = json_normalize(m) metatag_df = metatag_df.append(temp_metatags_df, sort=False) except Exception as e: temp_metatags_df = pd.DataFrame({'delete_me': None}, index=range(1)) metatag_df = metatag_df.append(temp_metatags_df, sort=False) metatag_df = metatag_df.reset_index(drop=True) if 'delete_me' in metatag_df: del metatag_df['delete_me'] for col in metatag_df: if col in final_df: metatag_df = metatag_df.rename(columns={col: 'metatag_' + col}) final_df = pd.concat([final_df, metatag_df], axis=1) return final_df
ca1b32d2795c035aab8578f0dc36f4a8dd503bec
15,179
def get_wiki_modal_data(term): """ runs the wikiperdia helper functions and created the wikipedia data ready for the modal """ return_data = False summary_data = get_wiki_summary(term=term) related_terms = get_similar_search(term=term) if summary_data: return_data = { 'wiki_term': term, 'summary_data': summary_data, 'related_terms': related_terms } return return_data
2d19c8ac1b3d261b3866b69a6a70d78ddac0ad0c
15,181
def format_taxa_to_js(otu_coords, lineages, prevalence, min_taxon_radius=0.5, max_taxon_radius=5, radius=1.0): """Write a string representing the taxa in a PCoA plot as javascript Parameters ---------- otu_coords : array_like Numpy array where the taxa is positioned lineages : array_like Label for each of these lineages prevalence : array_like Score of prevalence for each of the taxa that is drawn min_taxon_radius : float, optional Smallest radius for a sphere. max_taxon_radius : float, optional Largest radius for a spehere. radius : float, optional Base radius for a sphere. Outputs ------- str JavaScript string where the taxa information is written to create the spheres representing each of these, will return only the variable declaration if the inputs are empty. Notes ----- These parameters should work more as constants and once we find out that there's a value that is too big to be presented, the proper checks should be put into place. Currently we haven't found such cases in any study* min_taxon_radius: minimum value for the radius of the spheres on the plot max_taxon_radious: maximum value for the radius of the spheres on the plot radius: default value size """ js_biplots_string = [] js_biplots_string.append('\nvar g_taxaPositions = new Array();\n') # if we have prevalence scores, calculate the taxa radii values if len(prevalence): taxa_radii = radius * (min_taxon_radius + (max_taxon_radius - min_taxon_radius) * prevalence) else: taxa_radii = [] index = 0 # write the data in the form of a dictionary for taxa_label, taxa_coord, t_radius in zip(lineages, otu_coords, taxa_radii): js_biplots_string.append("g_taxaPositions['%d'] = { 'lineage': '%s', " "'x': %f, 'y': %f, 'z': %f, 'radius': %f};\n" % (index, taxa_label, taxa_coord[0], taxa_coord[1], taxa_coord[2], t_radius)) index += 1 js_biplots_string.append('\n') # join the array of strings as a single string return ''.join(js_biplots_string)
46052620ee7d4092761e728d78d6ab7b6abb6b45
15,182
def compute_heading(mag_read): """ Computes the compass heading from the magnetometer X and Y. Returns a float in degrees between 0 and 360. """ return ((atan2(mag_read[1], mag_read[0]) * 180) / pi) + 180
c160e7a69aa0d4bdfe232f45094e863d0d8dd478
15,184
def ConvertTrieToFlatPaths(trie, prefix=None): """Flattens the trie of paths, prepending a prefix to each.""" result = {} for name, data in trie.items(): if prefix: name = prefix + '/' + name if len(data) != 0 and not 'results' in data: result.update(ConvertTrieToFlatPaths(data, name)) else: result[name] = data return result
c226f3c9d72ca04d5dfe3267a92888bc6255d649
15,185
from typing import List from pathlib import Path def get_root_version_for_subset_version(root_dataset_path: str, sub_dataset_version: str, sub_dataset_path: MetadataPath ) -> List[str]: """ Get the versions of the root that contains the given sub_dataset_version at the given sub_dataset_path, if any exists. If the configuration does not exist return an empty iterable. """ root_path = Path(root_dataset_path).resolve() current_path = (root_path / sub_dataset_path).resolve() # Ensure that the sub-dataset path is under the root-dataset path current_path.relative_to(root_path) current_version = sub_dataset_version current_path = current_path.parent while len(current_path.parts) >= len(root_path.parts): # Skip intermediate directories, i.e. check only on git # repository roots. if len(tuple(current_path.glob(".git"))) == 0: current_path = current_path.parent continue current_version = find_version_containing(current_path, current_version) if current_version == "": return [] current_path = current_path.parent return [current_version]
b77da6b9f35e50e463dfba8cd2d710c357615d36
15,186
def subject() -> JsonCommandTranslator: """Get a JsonCommandTranslator test subject.""" return JsonCommandTranslator()
eed4b66f06a0257b2070e17b7cffa9f9005b6b0d
15,187
import torch def accuracy(output, target, cuda_enabled=True): """ Compute accuracy. Args: output: [batch_size, 10, 16, 1] The output from DigitCaps layer. target: [batch_size] Labels for dataset. Returns: accuracy (float): The accuracy for a batch. """ batch_size = target.size(0) v_length = torch.sqrt((output**2).sum(dim=2, keepdim=True)) softmax_v = F.softmax(v_length, dim=1) assert softmax_v.size() == torch.Size([batch_size, 10, 1]) _, max_index = softmax_v.max(dim=1) assert max_index.size() == torch.Size([batch_size, 1]) pred = max_index.view(batch_size) # max_index.squeeze() # assert pred.size() == torch.Size([batch_size]) if cuda_enabled: target = target.cuda() pred = pred.cuda() correct_pred = torch.eq(target, pred.data) # tensor # correct_pred_sum = correct_pred.sum() # scalar. e.g: 6 correct out of 128 images. acc = correct_pred.float().mean() # e.g: 6 / 128 = 0.046875 return acc
fc795bf54bfccfeea6bb3e8f1f81aa7282499d39
15,188
import sqlite3 def save_message(my_dict): """ Saves a message if it is not a duplicate. """ conn = sqlite3.connect(DB_STRING) # Create a query cursor on the db connection queryCurs = conn.cursor() if my_dict.get('message_status') == None: my_dict['message_status'] = "Unconfirmed" queryCurs.execute("SELECT rowid FROM Messages WHERE sender = ? and destination = ? and stamp = ? and hash = ?", (my_dict.get('sender'), my_dict.get('destination'), my_dict.get('stamp'), my_dict.get('hash'),)) data = queryCurs.fetchone() if data == None: queryCurs.execute('''INSERT INTO Messages (sender, destination, message, stamp, markdown, encoding, encryption, hashing, hash, decryption_key, file, filename, content_type, message_status) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?)''', (my_dict.get('sender'), my_dict.get('destination'), my_dict.get('message'), my_dict.get('stamp'), my_dict.get('markdown'), my_dict.get('encoding'), my_dict.get('encryption'), my_dict.get('hashing'), my_dict.get('hash'), my_dict.get('decryptionKey'), my_dict.get('file'), my_dict.get('filename'), my_dict.get('content_type'), my_dict.get('message_status'))) conn.commit() conn.close() return True
57f65f949c731b8120dec1b0f1b77b3d29505497
15,189
from typing import List def getKFolds(train_df: pd.DataFrame, seeds: List[str]) -> List[List[List[int]]]: """Generates len(seeds) folds for train_df Usage: # 5 folds folds = getKFolds(train_df, [42, 99, 420, 120, 222]) for fold, (train_idx, valid_idx, test_idx) in enumerate(folds): train_fold = train.iloc[train_idx] valid_fold = train.iloc[valid_idx] ... Returns: folds: list of [train, val, test] indices for each """ folds = [] for seed in seeds: train, val, test = split_into_train_val_test(train_df, seed=seed) folds.append([list(train), list(val), list(test)]) return folds
adc25fad4530bf0f134033d95a1d936fb7eb2653
15,190
def redownload_window() -> str: """The number of days for which the performance data will be redownloaded""" return '30'
d5cc816f426f26586870def4797b91a05e37825a
15,191
def clean_flight_probs(flight_probs: np.ndarray, rng: np.random.Generator) -> np.ndarray: """ Round off probabilities in flight_probs to 0 or 1 with random bias of the current probability :param flight_probs: a vector of inclusion probabilities after the landing phase :param rng: a random number generator :returns: a vector of inclusion probabilities that have been rounded off """ for i in range(len(flight_probs)): if flight_probs[i] - 0 > tol and flight_probs[i] < 1 - tol: flight_probs[i] = 1 if rng.random() < flight_probs[i] else 0 return flight_probs
f7127433781df86dabe699575be740f775310194
15,192
async def question(session: AskSession): """ Ask user for his answer on which LeetCode problem he whats to anticipate. """ return await session.prompt( message="Enter the problem URL from LeetCode site: ", validator=LeetCodeUrlValidator(session) )
a4ac5fd194736d2850e70ee5ac89e3569abf4410
15,193
import copy def mongo_instance(instance_dict, ts_dt): """An instance as a model.""" dict_copy = copy.deepcopy(instance_dict) dict_copy["status_info"]["heartbeat"] = ts_dt return Instance(**dict_copy)
32b0547cad0d84400a879814790eed6219ddb84a
15,194
def get_conversion_option(shape_records): """Prompts user for conversion options""" print("1 - Convert to a single zone") print("2 - Convert to one zone per shape (%d zones) (this can take a while)" % (len(shape_records))) import_option = int(input("Enter your conversion selection: ")) return import_option
7608c588960eb3678970e0d4467c67ff9f17a331
15,196
def base_conditional(Kmn, Lm, Knn, f, *, full_cov=False, q_sqrt=None, white=False): """ Given a g1 and g2, and distribution p and q such that p(g2) = N(g2;0,Kmm) p(g1) = N(g1;0,Knn) p(g1|g2) = N(g1;0,Knm) And q(g2) = N(g2;f,q_sqrt*q_sqrt^T) This method computes the mean and (co)variance of q(g1) = \int q(g2) p(g1|g2) :param Kmn: M x N :param Kmm: M x M :param Knn: N x N or N :param f: M x R :param full_cov: bool :param q_sqrt: None or R x M x M (lower triangular) :param white: bool :return: N x R or R x N x N """ # compute kernel stuff num_func = tf.shape(f)[1] # R # Compute the projection matrix A A = tf.matrix_triangular_solve(Lm, Kmn, lower=True) # compute the covariance due to the conditioning if full_cov: fvar = Knn - tf.matmul(A, A, transpose_a=True) fvar = tf.tile(fvar[None, :, :], [num_func, 1, 1]) # R x N x N else: fvar = Knn - tf.reduce_sum(tf.square(A), 0) fvar = tf.tile(fvar[None, :], [num_func, 1]) # R x N # another backsubstitution in the unwhitened case if not white: A = tf.matrix_triangular_solve(tf.transpose(Lm), A, lower=False) # construct the conditional mean fmean = tf.matmul(A, f, transpose_a=True) if q_sqrt is not None: if q_sqrt.get_shape().ndims == 2: LTA = A * tf.expand_dims(tf.transpose(q_sqrt), 2) # R x M x N elif q_sqrt.get_shape().ndims == 3: L = q_sqrt A_tiled = tf.tile(tf.expand_dims(A, 0), tf.stack([num_func, 1, 1])) LTA = tf.matmul(L, A_tiled, transpose_a=True) # R x M x N else: # pragma: no cover raise ValueError("Bad dimension for q_sqrt: %s" % str(q_sqrt.get_shape().ndims)) if full_cov: fvar = fvar + tf.matmul(LTA, LTA, transpose_a=True) # R x N x N else: fvar = fvar + tf.reduce_sum(tf.square(LTA), 1) # R x N if not full_cov: fvar = tf.transpose(fvar) # N x R return fmean, fvar
a6ddc7d2904836d7fa83557057dc42e25a8b8a9b
15,197
def find_shortest_dijkstra_route(graph, journey): """ all_pairs_dijkstra_path() and all_pairs_dijkstra_path_length both return a generator, hense the use of dict(). """ all_paths = dict(nx.all_pairs_dijkstra_path(graph)) all_lengths = dict(nx.all_pairs_dijkstra_path_length(graph)) if len(all_paths) != len(all_lengths): print("Path count is not equal to path length count, " "maybe some links are missing a weight?") return False shortest_path = [] for destination, path in all_paths[journey[0]].items(): # If all nodes in our journey are in the current path being checked if all(node in path for node in journey): if (len(shortest_path) == 0) or (len(path) < len(shortest_path)): shortest_path = path total = 0 for section in shortest_path: total += len(section) - 1 print("\nShortest dijkstra journey: {} connection(s)".format(total)) if len(shortest_path) < 1: print("No shortest dijkstra path found!\n") return False else: print("{} hop(s) {}\n".format(len(shortest_path) - 1, shortest_path)) return shortest_path
49689cc3f4b03fa6589369bf0d085ee2dbe64d5d
15,198
from functools import reduce import operator def product_consec_digits(number, consecutive): """ Returns the largest product of "consecutive" consecutive digits from number """ digits = [int(dig) for dig in str(number)] max_start = len(digits) - consecutive return [reduce(operator.mul, digits[i:i + consecutive]) for i in range(max_start + 1)]
2df16f7445e6d579b632e86904b77ec93e52a1f3
15,199
def WeightedCrossEntropyLoss(alpha=0.5): """ Calculates the Weighted Cross-Entropy Loss, which applies a factor alpha, allowing one to trade off recall and precision by up- or down-weighting the cost of a positive error relative to a negative error. A value alpha > 1 decreases the false negative count, hence increasing the recall. Conversely, setting alpha < 1 decreases the false positive count and increases the precision. """ def _gradient(yhat, dtrain, alpha): """Compute the weighted cross-entropy gradient. Args: yhat (np.array): Margin predictions dtrain: The XGBoost / LightGBM dataset alpha (float): Scale applied Returns: grad: Weighted cross-entropy gradient """ y = dtrain.get_label() yhat = clip_sigmoid(yhat) grad = (y * yhat * (alpha - 1)) + yhat - (alpha * y) return grad def _hessian(yhat, dtrain, alpha): """Compute the weighted cross-entropy hessian. Args: yhat (np.array): Margin predictions dtrain: The XGBoost / LightGBM dataset alpha (float): Scale applied Returns: hess: Weighted cross-entropy Hessian """ y = dtrain.get_label() yhat = clip_sigmoid(yhat) hess = (y * (alpha - 1) + 1) * yhat * (1 - yhat) return hess def weighted_cross_entropy( yhat, dtrain, alpha=alpha ): """ Calculate gradient and hessian for weight cross-entropy, Args: yhat (np.array): Predictions dtrain: The XGBoost / LightGBM dataset alpha (float): Scale applied Returns: grad: Weighted cross-entropy gradient hess: Weighted cross-entropy Hessian """ grad = _gradient(yhat, dtrain, alpha=alpha) hess = _hessian(yhat, dtrain, alpha=alpha) return grad, hess return weighted_cross_entropy
5746bab38e39dd6f688ea648f29e7c30d7827466
15,201
def expand_stylesheet(abbr: str, config: Config): """ Expands given *stylesheet* abbreviation (a special Emmet abbreviation designed for stylesheet languages like CSS, SASS etc.) and outputs it according to options provided in config """ return stringify_stylesheet(stylesheet_abbreviation(abbr, config), config)
17a65d1d6f6f2205a71e6e0ab653ef723672d756
15,202
def generate_legacy_dir(ctx, config, manifest, layers): """Generate a intermediate legacy directory from the image represented by the given layers and config to /image_runfiles. Args: ctx: the execution context config: the image config file manifest: the image manifest file layers: the list of layer tarballs Returns: The filepaths generated and runfiles to be made available. config: the generated config file. layers: the generated layer tarball files. temp_files: all the files generated to be made available at runtime. """ # Construct image runfiles for input to pusher. image_files = [] + layers if config: image_files += [config] if manifest: image_files += [manifest] path = "image_runfiles/" layer_files = [] # Symlink layers to ./image_runfiles/<i>.tar.gz for i in range(len(layers)): layer_symlink = ctx.actions.declare_file(path + str(i) + ".tar.gz") layer_files.append(layer_symlink) ctx.actions.run_shell( outputs = [layer_symlink], inputs = [layers[i]], command = "ln {src} {dst}".format( src = layers[i].path, dst = layer_symlink.path, ), ) # Symlink config to ./image_runfiles/config.json config_symlink = ctx.actions.declare_file(path + "config.json") ctx.actions.run_shell( outputs = [config_symlink], inputs = [config], command = "ln {src} {dst}".format( src = config.path, dst = config_symlink.path, ), ) return { "config": config_symlink, "layers": layer_files, "temp_files": [config_symlink] + layer_files, }
6001820e63ac3586625f7ca29311d717cc1e4c07
15,203
def workflow_key(workflow): """Return text search key for workflow""" # I wish tags were in the manifest :( elements = [workflow['name']] elements.extend(workflow['tags']) elements.extend(workflow['categories']) elements.append(workflow['author']) return ' '.join(elements)
57347705b605e68a286dd953de5bb157ac50628e
15,204
def get_logits(input_ids,mems,input_mask,target_mask): """Builds the graph for calculating the final logits""" is_training = False cutoffs = [] train_bin_sizes = [] eval_bin_sizes = [] proj_share_all_but_first = True n_token = FLAGS.n_token batch_size = FLAGS.batch_size features = {"input": input_ids} inp = tf.transpose(features["input"], [1, 0]) input_mask = tf.transpose(input_mask, [1, 0]) target_mask = tf.transpose(target_mask, [1, 0]) tgt = None inp_perms, tgt_perms, head_tgt = None, None, None if FLAGS.init == "uniform": initializer = tf.initializers.random_uniform( minval=-FLAGS.init_range, maxval=FLAGS.init_range, seed=None) elif FLAGS.init == "normal": initializer = tf.initializers.random_normal( stddev=FLAGS.init_std, seed=None) proj_initializer = tf.initializers.random_normal( stddev=FLAGS.proj_init_std, seed=None) tie_projs = [False for _ in range(len(cutoffs) + 1)] if proj_share_all_but_first: for i in range(1, len(tie_projs)): tie_projs[i] = True tf.logging.info("Vocab size : {}".format(n_token)) tf.logging.info("Batch size : {}".format(batch_size)) logits, new_mems = model.transformer( dec_inp=inp, target=tgt, mems=mems, n_token=n_token, n_layer=FLAGS.n_layer, d_model=FLAGS.d_model, d_embed=FLAGS.d_embed, n_head=FLAGS.n_head, d_head=FLAGS.d_head, d_inner=FLAGS.d_inner, dropout=0, dropatt=0, initializer=initializer, is_training=is_training, mem_len=FLAGS.seq_len+FLAGS.max_decode_length, cutoffs=cutoffs, div_val=1, tie_projs=tie_projs, input_perms=inp_perms, target_perms=tgt_perms, head_target=head_tgt, same_length=FLAGS.same_length, clamp_len=FLAGS.clamp_len, use_tpu=FLAGS.use_tpu, untie_r=FLAGS.untie_r, proj_same_dim=True, bidirectional_mask=FLAGS.bi_mask, infer=True, target_mask=target_mask, input_mask=input_mask, tgt_len=1) return logits,new_mems
4719104fdbb693411a9614e8a4048cbf6b932d1f
15,205
def _api_get_scripts(name, output, kwargs): """ API: accepts output """ return report(output, keyword="scripts", data=list_scripts())
88f002646cdec6911a76aa16cec2939b32cffd33
15,207
import requests def get_children(key): """ Lists all direct child usages for a name usage :return: list of species """ api_url = 'http://api.gbif.org/v1/species/{key}/children'.format( key=key ) try: response = requests.get(api_url) json_response = response.json() if json_response['results']: return json_response['results'] return None except (HTTPError, KeyError) as e: print(e) return None
8d4a4ca4c1231ca2c7d98f7c0cede5ecdac003d5
15,208
def _extend(obj, *args): """ adapted from underscore-py Extend a given object with all the properties in passed-in object(s). """ args = list(args) for src in args: obj.update(src) for k, v in src.items(): if v is None: del obj[k] return obj
9fe1bffcd05ac44a3587b53a71f592c462975482
15,209
from asgiref.sync import async_to_sync import functools def async_test(func): """ Wrap async_to_sync with another function because Pytest complains about collecting the resulting callable object as a test because it's not a true function: PytestCollectionWarning: cannot collect 'test_foo' because it is not a function. """ # inner import because for Python 3.6+ tests only sync_func = async_to_sync(func) @functools.wraps(func) def wrapper(*args, **kwargs): return sync_func(*args, **kwargs) return wrapper
10127bd083230404a7bb79d764502e6354f44b5a
15,210
import logging def get_logger(lname, logfile): """logging setup logging config - to be moved to file at some point """ logger = logging.getLogger(lname) logging.config.dictConfig({ 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '%(levelname)s:\t%(message)s' }, 'verbose': { 'format': '%(levelname)s:\t%(message)s\tFROM: %(name)s' } }, 'handlers': { 'stdout': { 'level': 'INFO', 'formatter': 'verbose', 'class': 'logging.StreamHandler' }, 'logfile': { 'level': 'INFO', 'formatter': 'standard', 'class': 'logging.FileHandler', 'filename': logfile } }, 'loggers': { '': { 'handlers': ['stdout', 'logfile'], 'level': 'INFO', 'propagate': True } } }) return logger
0a4795f383077b52b84afb882f090e0f9140fd0f
15,211
def get_percentiles(data, percentiles, integer_valued=True): """Returns a dict of percentiles of the data. Args: data: An unsorted list of datapoints. percentiles: A list of ints or floats in the range [0, 100] representing the percentiles to compute. integer_valued: Whether or not the values are all integers. If so, interpolate to the nearest datapoint (instead of computing a fractional value between the two nearest datapoints). Returns: A dict mapping each element of percentiles to the computed result. """ # Ensure integer datapoints for cleaner binning if necessary. interpolation = 'nearest' if integer_valued else 'linear' results = np.percentile(data, percentiles, interpolation=interpolation) return {percentiles[i]: results[i] for i in range(len(percentiles))}
763c0c1a724b55ac4bb5b83a6831fa5aa44993fd
15,212
def estimate_poster_dedpul(diff, alpha=None, quantile=0.05, alpha_as_mean_poster=False, max_it=100, **kwargs): """ Estimates posteriors and priors alpha (if not provided) of N in U with dedpul method :param diff: difference of densities f_p / f_u for the sample U, np.array (n,), output of estimate_diff() :param alpha: priors, share of N in U (estimated if None) :param quantile: if alpha is None, relaxation of the estimate of alpha; here alpha is estimaeted as infinum, and low quantile is its relaxed version; share of posteriors probabilities that we allow to be negative (with the following zeroing-out) :param kwargs: dummy :return: tuple (alpha, poster), e.g. (priors, posteriors) of N in U for the U sample, represented by diff """ if alpha_as_mean_poster and (alpha is not None): poster = 1 - diff * (1 - alpha) poster[poster < 0] = 0 cur_alpha = np.mean(poster) if cur_alpha < alpha: left_border = alpha right_border = 1 else: left_border = 0 right_border = alpha poster_zero = 1 - diff poster_zero[poster_zero < 0] = 0 if np.mean(poster_zero) > alpha: left_border = -50 right_border = 0 # return 0, poster_zero it = 0 try_alpha = cur_alpha while (abs(cur_alpha - alpha) > kwargs.get('tol', 10 ** -5)) and (it < max_it): try_alpha = (left_border + (right_border - left_border) / 2) poster = 1 - diff * (1 - try_alpha) poster[poster < 0] = 0 cur_alpha = np.mean(poster) if cur_alpha > alpha: right_border = try_alpha else: left_border = try_alpha it += 1 alpha = try_alpha if it >= max_it: print('Exceeded maximal number of iterations in finding mean_poster=alpha') else: if alpha is None: alpha = 1 - 1 / max(np.quantile(diff, 1 - quantile, interpolation='higher'), 1) poster = 1 - diff * (1 - alpha) poster[poster < 0] = 0 return alpha, poster
5d7fe900e379418f38f6097ac8024984fc2e66fa
15,213
def get_short_topic_name(test_run_name): """Returns the collection name for the DLQ. Keyword arguments: test_run_name -- the unique id for this test run """ return test_run_name[3:] if test_run_name.startswith("db.") else test_run_name
6901ecd14b9cde9e0d8b7b62d11cf3c04b3b4a2e
15,214
from shapely.geometry import Point, LineString def cut_in_two(line): """ Cuts input line into two lines of equal length Parameters ---------- line : shapely.LineString input line Returns ---------- list (LineString, LineString, Point) two lines and the middle point cutting input line """ # Get final distance value distance = line.length / 2 # Cuts a line in two at a distance from its starting point if distance <= 0.0 or distance >= line.length: return [LineString(line)] coords = list(line.coords) for i, p in enumerate(coords): pd = line.project(Point(p)) if pd == distance: return [LineString(coords[: i + 1]), LineString(coords[i:]), pd] if pd > distance: cp = line.interpolate(distance) return [ LineString(coords[:i] + [(cp.x, cp.y)]), LineString([(cp.x, cp.y)] + coords[i:]), cp, ]
95df9b6b3995a930b6772a5137db3a14f10b4b26
15,215
def get_processor(aid): """ Return the processor module for a given achievement. Args: aid: the achievement id Returns: The processor module """ try: path = get_achievement(aid)["processor"] base_path = api.config.get_settings()["achievements"]["processor_base_path"] return SourceFileLoader(path[:-3], join(base_path, path)).load_module() except FileNotFoundError: raise PicoException("Achievement processor is offline.")
941e998e0e3ee81a6e22903976959e7696dd11ef
15,216
import locale import re def parse_price(price): """ Convert string price to numbers """ if not price: return 0 price = price.replace(',', '') return locale.atoi(re.sub('[^0-9,]', "", price))
bb90aa90b38e66adc73220665bb5e6458bfe5374
15,217
def render_content(template, context={}, request=None): """Renderiza el contenido para un email a partir de la plantilla y el contexto. Deben existir las versiones ".html" y ".txt" de la plantilla. Adicionalmente, si se recibe el request, se utilizará para el renderizado. """ if request: context_class = RequestContext(request, context) else: context_class = Context(context) template = Template(template) return { "text_content": template.render(context_class), "html_content": template.render(context_class) }
0ef06bb3d42f737e9ae112a852460595b8bb1824
15,218
def calculate_psi(expected, actual, buckettype="bins", breakpoints=None, buckets=10, axis=0): """Calculate the PSI (population stability index) across all variables Args: expected: numpy matrix of original values actual: numpy matrix of new values buckettype: type of strategy for creating buckets, bins splits into even splits, quantiles splits into quantile buckets, customize split into customized buckets breakpoints: if buckettype is customizer, pass a numpy array as breakpoints buckets: number of quantiles to use in bucketing variables axis: axis by which variables are defined, 0 for vertical, 1 for horizontal Returns: psi_values: ndarray of psi values for each variable """ def psi(expected_array, actual_array, buckets, breaks=None): """Calculate the PSI for a single variable Args: expected_array: numpy array of original values actual_array: numpy array of new values buckets: number of percentile ranges to bucket the values into breaks: default None, customize breakpoints Returns: psi_value: calculated PSI value """ breakpoints = np.arange(0, buckets + 1) / (buckets) * 100 if buckettype == 'bins': breakpoints = scale_range(breakpoints, np.min(expected_array), np.max(expected_array)) elif buckettype == 'quantiles': breakpoints = np.stack([np.percentile(expected_array, b) for b in breakpoints]) elif buckettype == 'customize': assert breaks is not None, "buckettype is customize, breakpoints should not be None" breakpoints = breaks expected_percents = np.histogram(expected_array, breakpoints)[0] / len(expected_array) actual_percents = np.histogram(actual_array, breakpoints)[0] / len(actual_array) psi_value = sum(sub_psi(expected_percents[i], actual_percents[i]) for i in range(0, len(expected_percents))) return psi_value if len(expected.shape) == 1: psi_values = np.empty(len(expected.shape)) else: psi_values = np.empty(expected.shape[axis]) for i in range(0, len(psi_values)): if len(psi_values) == 1: psi_values = psi(expected, actual, buckets, breakpoints) elif axis == 0: psi_values[i] = psi(expected[:,i], actual[:,i], buckets, breakpoints) elif axis == 1: psi_values[i] = psi(expected[i,:], actual[i,:], buckets, breakpoints) return psi_values
d5250a93e784ce13cc24a1d16a88929d33426c1c
15,219
import requests def get_sid(token): """ Obtain the sid from a given token, returns None if failed connection or other error preventing success Do not use manually """ r = requests.get(url=str(URL + "app"), headers={'Accept': 'text/plain', 'authorization': token, 'Content-Type': 'application/json;charset=utf-8'}) cookie = r.headers.get('set-cookie') # If successful, then the cookie was set if cookie: return cookie.split("connect.sid=", 1)[1].strip("; Path=/; HttpOnly") return None
eaa26681f988b8c27fecf489bbf1bb1d5c460810
15,221
def generer_lien(mots, commande="http://www.lextutor.ca/cgi-bin/conc/wwwassocwords.pl?lingo=French&KeyWordFormat=&Maximum=10003&LineWidth=100&Gaps=no_gaps&store_dic=&is_refire=true&Fam_or_Word=&Source=http%3A%2F%2Fwww.lextutor.ca%2Fconc%2Ffr%2F&unframed=true&SearchType=equals&SearchStr={0}&Corpus=Fr_le_monde.txt&ColloSize=&SortType=right&AssocWord=&Associate={1}",contextes=["right","left"]): """ retourne une liste de liens. {'ce':'liendroit'},'ce':'liengauche'} """ liens = {} for mot in mots: for contexte in contextes: command = commande.format(quote_plus(mot,encoding="ISO 8859-1"),contexte) liens.update({contexte:{mot:command}}) return liens
0a646603fb538468a4ae29b102cb8250479fedce
15,222
import numpy def ring_forming_scission_grid(zrxn, zma, npoints=(7,)): """ Build forward WD grid for a ring forming scission reaction # the following allows for a 2-d grid search in the initial ts_search # for now try 1-d grid and see if it is effective """ # Obtain the scan coordinate scan_name = ring_forming_scission_scan_coordinate(zrxn, zma) # Build the grid npoints1 = npoints[0] brk_bnd_len = _ts_bnd_len(zma, scan_name) if brk_bnd_len is not None: r1min = brk_bnd_len + 0.1 * phycon.ANG2BOHR r1max = brk_bnd_len + 0.7 * phycon.ANG2BOHR else: r1min = (1.54 + 0.1) * phycon.ANG2BOHR r1max = (1.54 + 0.7) * phycon.ANG2BOHR grid1 = numpy.linspace(r1min, r1max, npoints1) grid = tuple(val.item() for val in grid1) return grid
de6e521ae28603b5afea5148f98a65f578e7b349
15,223
import re def parse_proj(lines): """ parse a project file, looking for section definitions """ section_regex_start = re.compile( '\s*([0-9A-F]+) /\* ([^*]+) \*/ = {$', re.I) section_regex_end = re.compile('\s*};$') children_regex = re.compile('\s*([0-9A-F]+) /\* ([^*]+) \*/,', re.I) children_regex_start = re.compile('\s*children = \(') children_regex_end = re.compile('\s*\);') group_regex = re.compile('\s*sourceTree = ([^;]+);') file_reference_regex = re.compile( '\s*([0-9A-F]+) /\* ([^*]+) \*/ = .* ' + 'path = "?([^;"]+)"?; sourceTree = ([^;]+);', re.I) entries = {} current_section = None got_children = False for line in lines: if current_section: end = section_regex_end.match(line) if end: current_section = None continue # look for the children marker, or append to children if got_children: if children_regex_end.match(line): got_children = False else: child_match = children_regex.match(line) if child_match: id = child_match.groups()[0] name = child_match.groups()[1] current_section.add_link(Link(id, name)) elif children_regex_start.match(line): got_children = True else: # no children, try to match a sourceTree = ...; line group = group_regex.match(line) if group: current_section.location = group.groups()[0] else: # try for a new section new_section_matches = section_regex_start.match(line) if new_section_matches: id = new_section_matches.groups()[0] name = new_section_matches.groups()[1] current_section = Section(id, name) entries[id] = current_section else: # no new section, check for a plain FileReference file_ref_captures = file_reference_regex.match(line) if file_ref_captures: id = file_ref_captures.groups()[0] name = file_ref_captures.groups()[1] path = file_ref_captures.groups()[2] location = file_ref_captures.groups()[3] entries[id] = FileReference(id, name, path, location) return entries
28e979a6a3c82f5669704375e8a6104f406af33f
15,224
import torch def mot_decode(heat, wh, reg=None, cat_spec_wh=False, K=100): """ 多目标检测结果解析 """ batch, cat, height, width = heat.size() # N×C×H×W # heat = torch.sigmoid(heat) # perform nms on heatmaps heat = _nms(heat) # 默认应用3×3max pooling操作, 检测目标数变为feature map的1/9 scores, inds, clses, ys, xs = _topk(scores=heat, K=K) if reg is not None: reg = _tranpose_and_gather_feat(reg, inds) reg = reg.view(batch, K, 2) xs = xs.view(batch, K, 1) + reg[:, :, 0:1] ys = ys.view(batch, K, 1) + reg[:, :, 1:2] else: xs = xs.view(batch, K, 1) + 0.5 ys = ys.view(batch, K, 1) + 0.5 wh = _tranpose_and_gather_feat(wh, inds) if cat_spec_wh: wh = wh.view(batch, K, cat, 2) clses_ind = clses.view(batch, K, 1, 1).expand(batch, K, 1, 2).long() wh = wh.gather(2, clses_ind).view(batch, K, 2) else: wh = wh.view(batch, K, 2) clses = clses.view(batch, K, 1).float() # 目标类别 scores = scores.view(batch, K, 1) bboxes = torch.cat([xs - wh[..., 0:1] / 2, # left ys - wh[..., 1:2] / 2, # top xs + wh[..., 0:1] / 2, # right ys + wh[..., 1:2] / 2], # down dim=2) detections = torch.cat([bboxes, scores, clses], dim=2) return detections, inds
22d5c8f85bd90936c46faf73ecb6c520466fb6da
15,225
def get_descriptors(smiles): """ Use RDkit to get molecular descriptors for the given smiles string """ mol = Chem.MolFromSmiles(smiles) return pd.Series({name: func(mol) for name, func in descList.items()})
2107b4e1d13c2a7a02e15392fe38e1448d1772c2
15,226
def mro(*bases): """Calculate the Method Resolution Order of bases using the C3 algorithm. Suppose you intended creating a class K with the given base classes. This function returns the MRO which K would have, *excluding* K itself (since it doesn't yet exist), as if you had actually created the class. Another way of looking at this, if you pass a single class K, this will return the linearization of K (the MRO of K, *including* itself). """ seqs = [list(C.__mro__) for C in bases] + [list(bases)] res = [] while True: non_empty = list(filter(None, seqs)) if not non_empty: # Nothing left to process, we're done. return tuple(res) for seq in non_empty: # Find merge candidates among seq heads. candidate = seq[0] not_head = [s for s in non_empty if candidate in s[1:]] if not_head: # Reject the candidate. candidate = None else: break if not candidate: raise TypeError("inconsistent hierarchy, no C3 MRO is possible") res.append(candidate) for seq in non_empty: # Remove candidate. if seq[0] == candidate: del seq[0]
87d259d00b073c8728833d8608fed5e4f484a987
15,227
def ends_with(s, suffix, ignore_case=False): """ suffix: str, list, or tuple """ if is_str(suffix): suffix = [suffix] suffix = list(suffix) if ignore_case: for idx, suf in enumerate(suffix): suffix[idx] = to_lowercase(suf) s = to_lowercase(s) suffix = tuple(suffix) return s.endswith(suffix)
4b92596f95bb482a196bf2b8a07a6a954f526045
15,228
def compareVersion(self, version1, version2): """ :type version1: str :type version2: str :rtype: int """ # 学学 version1 = [int(val) for val in version1.split(".")] version2 = [int(val) for val in version2.split(".")] if len(version1) > len(version2): min_version = version2 max_version = version1 else: min_version = version1 max_version = version2 # Compare up to min character for i in range(len(min_version)): if version1[i] > version2[i]: return 1 elif version1[i] < version2[i]: return -1 if len(version1) == len(version2): return 0 for j in range(i + 1, len(max_version)): if max_version[j] > 0: return 1 if max_version == version1 else - 1 return 0
70ff77595f61620e1dac32d29be510e0906b505b
15,229
import glob import re def create_capital(): """ Use fy and p-t-d capital sets and ref sets to make capital datasets """ adopted = glob.glob(conf['temp_data_dir'] \ + "/FY*_ADOPT_CIP_BUDGET.xlsx") proposed = glob.glob(conf['temp_data_dir'] \ + "/FY*_PROP_CIP_BUDGET.xlsx") todate = glob.glob(conf['temp_data_dir'] \ + "/FY*_2DATE_CIP_BUDGET.xlsx") budgets = adopted + proposed + todate fund_ref = pd.read_csv(prod_path \ + "/budget_reference_funds_datasd_v1.csv",dtype={'fund_number':str}) proj_ref = pd.read_csv(prod_path \ + "/budget_reference_projects_datasd_v1.csv",dtype={'project_number':str}) accounts_ref = pd.read_csv(prod_path \ + "/budget_reference_accounts_datasd_v1.csv",dtype={'account_number':str}) for count, budget in enumerate(budgets): fy_pattern = re.compile(r'([0-9][0-9])') this_fy = fy_pattern.findall(budget) if "2DATE" in budget: out_fname = prod_path \ + "/budget_capital_ptd_FY{}_datasd_v1.csv".format(this_fy[0]) elif "PROP" in budget: out_fname = prod_path \ + "/budget_capital_FY{}_prop_datasd_v1.csv".format(this_fy[0]) else: out_fname = prod_path \ + "/budget_capital_FY{}_datasd_v1.csv".format(this_fy[0]) df = pd.read_excel(budget) df = df.iloc[:, [0,1,2,3]] df.columns = ['amount','code','project_number','object_number'] df['code'] = df['code'].astype(str) df['project_number'] = df['project_number'].astype(str) df['object_number'] = df['object_number'].astype(str) df = pd.merge(df, fund_ref[['fund_type','fund_number']], left_on='code', right_on='fund_number', how='left') df = pd.merge(df, proj_ref[['asset_owning_dept','project_name','project_number']], left_on='project_number', right_on='project_number', how='left') df = pd.merge(df, accounts_ref[['account','account_number']], left_on='object_number', right_on='account_number', how='left') df = df[['amount', 'fund_type', 'fund_number', 'asset_owning_dept', 'project_name', 'project_number', 'account', 'account_number']] general.pos_write_csv(df,out_fname) return "Successfully created capital budgets"
64abc2c73e1455d42b94039cf857534a03075c41
15,230
def gen_gt_from_quadrilaterals(gt_quadrilaterals, input_gt_class_ids, image_shape, width_stride, box_min_size=3): """ 从gt 四边形生成,宽度固定的gt boxes :param gt_quadrilaterals: GT四边形坐标,[n,(x1,y1,x2,y2,x3,y3,x4,y4)] :param input_gt_class_ids: GT四边形类别,一般就是1 [n] :param image_shape: :param width_stride: 分割的步长,一般16 :param box_min_size: 分割后GT boxes的最小尺寸 :return: gt_boxes:[m,(y1,x1,y2,x2)] gt_class_ids: [m] """ h, w = list(image_shape)[:2] x_array = np.arange(0, w + 1, width_stride, np.float32) # 固定宽度间隔的x坐标点 # 每个四边形x 最小值和最大值 x_min_np = np.min(gt_quadrilaterals[:, ::2], axis=1) x_max_np = np.max(gt_quadrilaterals[:, ::2], axis=1) gt_boxes = [] gt_class_ids = [] for i in np.arange(len(gt_quadrilaterals)): xs = get_xs_in_range(x_array, x_min_np[i], x_max_np[i]) # 获取四边形内的x中坐标点 ys_min, ys_max = get_min_max_y(gt_quadrilaterals[i], xs) # print("xs:{}".format(xs)) # 为每个四边形生成固定宽度的gt for j in range(len(xs) - 1): x1, x2 = xs[j], xs[j + 1] y1, y2 = np.min(ys_min[j:j + 2]), np.max(ys_max[j:j + 2]) gt_boxes.append([y1, x1, y2, x2]) gt_class_ids.append(input_gt_class_ids[i]) gt_boxes = np.reshape(np.array(gt_boxes), (-1, 4)) gt_class_ids = np.reshape(np.array(gt_class_ids), (-1,)) # 过滤高度太小的边框 height = gt_boxes[:, 2] - gt_boxes[:, 0] width = gt_boxes[:, 3] - gt_boxes[:, 1] indices = np.where(np.logical_and(height >= 8, width >= 2)) return gt_boxes[indices], gt_class_ids[indices]
4dfd81bd7a0f20334385bc9e1c9681d371e6f609
15,231
def monotonic(l: list): """Return True is list elements are monotonically increasing or decreasing. >>> monotonic([1, 2, 4, 20]) True >>> monotonic([1, 20, 4, 10]) False >>> monotonic([4, 1, 0, -10]) True """ #[SOLUTION] if l == sorted(l) or l == sorted(l, reverse=True): return True return False
1f8a34943e288ea9695f040be91f18cfe82a6e48
15,232
import math def get_weight(stats): """ Return a data point weight for the result. """ if stats is None or 'ci_99_a' not in stats or 'ci_99_b' not in stats: return None try: a = stats['ci_99_a'] b = stats['ci_99_b'] if math.isinf(a) or math.isinf(b): # Infinite interval is due to too few samples --- consider # weight as missing return None return 2 / abs(b - a) except ZeroDivisionError: return None
7e44032bc9e51e5fe7522c3f51ead5e733d4107a
15,236
import torch def get_true_posterior(X: Tensor, y: Tensor) -> (Tensor, Tensor, float, float, float): """ Get the parameters of the true posterior of a linear regression model fit to the given data. Args: X: The features, of shape (n_samples, n_features). y: The targets, of shape (n_samples,). Returns: mean: The posterior mean, of shape (n_features,). covar: The posterior covariance, of shape (n_features, n_features). bias: The posterior bias. alpha: The precision of the Gaussian prior. beta: The precision of Gaussian target noise. """ br = BayesianRidge() br.fit(X.numpy(), y.numpy()) mean = torch.from_numpy(br.coef_).float() covar = torch.from_numpy(br.sigma_).float() bias = br.intercept_ alpha = br.lambda_ beta = br.alpha_ return mean, covar, bias, alpha, beta
3431513d52905ec51bbe7b694af02a8274cbf48e
15,237
def findmax(engine,user,measure,depth): """Returns a list of top (user,measure) pairs, sorted by measure, up to a given :depth""" neighbors = engine.neighbors(user) d = {v:measure(user,v) for v in neighbors} ranked = sorted(neighbors,key=lambda v:d[v],reverse=True) return list((v,d[v]) for v in ranked[:depth])
ecf6d72f8c689f1b7af78a714e55d8fbfe57f2ad
15,238
from typing import OrderedDict def cart_update(request, pk): """ Add/Remove single product (possible multiple qty of product) to cart :param request: Django's HTTP Request object, pk: Primary key of products to be added to cart :return: Success message """ if request.method == 'GET': sess = request.session qty = request.GET.get('qty', False) if qty: # Initialize a cart and its qty in session if they don't exist sess['cart_qty'] = sess.get('cart_qty', 0) + int(qty) sess['cart'] = sess.get('cart', OrderedDict()) # In case the it is add to cart and product not already in cart new_cart_item = {'qty': 0, 'pk': str(pk)} # Update cart item quantity of new/existing item sess['cart'][str(pk)] = sess['cart'].get(str(pk), new_cart_item) new_qty = sess['cart'][str(pk)]['qty'] + int(qty) new_qty_above_max = Product.objects.get(pk=pk).quantity < new_qty # import pdb; pdb.set_trace() if not new_qty_above_max: # Sets new quantity to 0 in case quantity has gone negative sess['cart'][str(pk)]['qty'] = int((abs(new_qty) + new_qty) / 2) return JsonResponse({'success': True}) return JsonResponse({ 'success': False, 'msg': 'Max quantity of this product has already been added.' })
1673b299a41bdccaf6d0a27b15fbf85a0bb7028f
15,239
def mlp_gradient(x, y, ws, bs, phis, alpha): """ Return a list containing the gradient of the cost with respect to z^(k)for each layer. :param x: a list of lists representing the x matrix. :param y: a list of lists of output values. :param ws: a list of weight matrices (one for each layer) :param bs: a list of biases (one for each layer) :param phis: a list of activation functions:param hs : a list of outputs for each layer include h^(0) = x :return: A list of gradients of J with respect to z^(k) for k=1..l """ hs = mlp_feed_forward(x, ws, bs, phis) D = mlp_propagate_error(x, y, ws, bs, phis, hs) result_w = [] result_b = [] w_1 = np.dot(np.transpose(x), D[0]) step = np.multiply(alpha, ws[0]) w_1 = np.add(w_1, step) w_1 = np.ndarray.tolist(w_1) result_w.append(w_1) for layers in range(1, len(ws)): w_2 = np.dot(np.transpose(hs[layers]), D[layers]) w_2 = np.add(w_2, np.multiply(alpha, ws[layers])) result_w.append(w_2) for layers in range(len(ws)): ones = np.ones((len(x), 1)) b_1 = np.dot(np.transpose(ones), D[layers]) result_b.append(b_1) result_w = np.reshape(result_w, (1, -1)) return result_w, result_b
0e148d5b3b343a982d9332637c4f51be8b3afa3b
15,241
import torch def src_one(y: torch.Tensor, D: torch.Tensor, *, k=None, device=None) -> torch.Tensor: """ y = Dx :param y: image (h*w) :param D: dict (class_sz, train_im_sz, h*w) :param k: :param device: pytorch device :return: predict tensor(int) """ assert y.dim() == 1 assert D.dim() == 3 assert y.size(dim=0) == D.size(dim=2) class_sz, train_im_sz, n_features = D.shape # n_features=h*w D_x = D.view(class_sz * train_im_sz, n_features) D_x = D_x.permute([1, 0]) # D_x(n_features, class_sz*train_im_sz) # y(n_features) a = omp(D_x, y, k=k, device=device) # a(class_sz*train_im_sz) X_i = D.permute([0, 2, 1]) # X_i(class_sz, h*w, train_im_sz) a_i = a.view(class_sz, train_im_sz, 1) # a(class_sz, train_im_sz, 1) y_p = torch.matmul(X_i, a_i).view(class_sz, n_features) e_y = torch.mean((y - y_p) ** 2, dim=1) return torch.argmin(e_y)
b779e3313fb707bb6659fe48f59b030b9c9ae7d3
15,242