content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def separable_hnn(num_points, input_h_s=None, input_model=None, save_path='temp_save_path', train=True, epoch_save=100): """ Separable Hamiltonian network. :return: """ if input_h_s: h_s = input_h_s model = input_model else: h_s = HNN1DWaveSeparable(nn.Sequential( nn.Linear(3*num_points, 20), nn.Tanh(), nn.Linear(20, 20), nn.Tanh(), nn.Linear(20, 20), nn.Tanh(), nn.Linear(20, 20), nn.Tanh(), nn.Linear(20, 20), nn.Tanh(), nn.Linear(20, 1))).to(device) model = DENNet(h_s, case='1DWave').to(device) if train: learn_sep = Learner(model, num_boundary=num_boundary, save_path=save_path, epoch_save=epoch_save) logger = TensorBoardLogger('separable_logs') trainer_sep = pl.Trainer(min_epochs=701, max_epochs=701, logger=logger, gpus=1) trainer_sep.fit(learn_sep) return h_s, model
c9912f69b4367a2ed83ce367970551f31e0cb087
28,671
def get_n_largest(n, lst, to_compare=lambda x: x): """ This returns largest n elements from list in descending order """ largests = [lst[0]]*n # this will be in descending order for x in lst[1:]: if to_compare(x) <= to_compare(largests[-1]): continue else: for i, y in enumerate(largests): if to_compare(x) >= to_compare(y): largests = largests[:i] + [x] + largests[i:-1] break return largests
4ef85d8656ae152ecab65d3a01bce7f885c47577
28,672
from natsort import natsorted import collections from typing import Optional from typing import Union def base_scatter( x: Optional[Union[np.ndarray, list]], y: Optional[Union[np.ndarray, list]], hue: Optional[Union[np.ndarray, list]] = None, ax=None, title: str = None, x_label: str = None, y_label: str = None, color_bar: bool = False, bad_color: str = "lightgrey", dot_size: int = None, palette: Optional[Union[str, list]] = 'stereo', invert_y: bool = True, legend_ncol=2, show_legend=True, show_ticks=False, vmin=None, vmax=None, SegmentedColormap = None, ): # scatter plot, ่š็ฑปๅŽ่กจ่พพ็Ÿฉ้˜ต็ฉบ้—ดๅˆ†ๅธƒ """ scatter plotter :param invert_y: whether to invert y-axis. :param x: x position values :param y: y position values :param hue: each dot's values, use for color set, eg. ['1', '3', '1', '2'] :param ax: matplotlib Axes object :param title: figure title :param x_label: x label :param y_label: y label :param color_bar: show color bar or not, color_values must be int array or list when color_bar is True :param bad_color: the name list of clusters to show. :param dot_size: marker size. :param palette: customized colors :param legend_ncol: number of legend columns :param show_legend :param show_ticks :param vmin: :param vmax: :return: matplotlib Axes object color_values must be int array or list when color_bar is True """ if not ax: _, ax = plt.subplots(figsize=(7, 7)) dot_size = 120000 / len(hue) if dot_size is None else dot_size # add a color bar if color_bar: colors = conf.linear_colors(palette) cmap = ListedColormap(colors) cmap.set_bad(bad_color) sns.scatterplot(x=x, y=y, hue=hue, ax=ax, palette=cmap, size=hue, sizes=(dot_size, dot_size), vmin=vmin, vmax=vmax) if vmin is None and vmax is None: norm = plt.Normalize(hue.min(), hue.max()) sm = plt.cm.ScalarMappable(cmap=cmap, norm=norm) sm.set_array([]) ax.figure.colorbar(sm) ax.legend_.remove() else: g = natsorted(set(hue)) colors = conf.get_colors(palette) color_dict = collections.OrderedDict(dict([(g[i], colors[i]) for i in range(len(g))])) sns.scatterplot(x=x, y=y, hue=hue, hue_order=g, palette=color_dict, size=hue, sizes=(dot_size, dot_size), ax=ax) handles, labels = ax.get_legend_handles_labels() ax.legend_.remove() ax.legend(handles, labels, ncol=legend_ncol, bbox_to_anchor=(1.02, 1), loc='upper left', borderaxespad=0, frameon=False) for lh in ax.legend_.legendHandles: lh.set_alpha(1) lh._sizes = [40] if invert_y: ax.invert_yaxis() if not show_legend: ax.legend_.remove() if not show_ticks: ax.set_aspect('equal', adjustable='datalim') ax.set_title(title, fontsize=18, fontweight='bold') ax.set_ylabel(y_label, fontsize=15) # ่ฎพ็ฝฎy่ฝดๆ ‡็ญพ ax.set_xlabel(x_label, fontsize=15) # ่ฎพ็ฝฎx่ฝดๆ ‡็ญพ if not show_ticks: ax.set_yticks([]) ax.set_xticks([]) return ax
72e159af3ffad86e66b53789368edbb3a7bc406a
28,673
def lambda_sum_largest_canon(expr, real_args, imag_args, real2imag): """Canonicalize nuclear norm with Hermitian matrix input. """ # Divide by two because each eigenvalue is repeated twice. real, imag = hermitian_canon(expr, real_args, imag_args, real2imag) real.k *= 2 if imag_args[0] is not None: real /= 2 return real, imag
41e2d460fc5d18d65e1d7227093ecaf88a925151
28,674
def dp_palindrome_length(dp, S, i, j): """ Recursive function for finding the length of the longest palindromic sequence in a string This is the algorithm covered in the lecture It uses memoization to improve performance, dp "dynamic programming" is a Python dict containing previously computed values """ if i == j: return 1 if (i, j) in dp: return dp[(i, j)] if S[i] == S[j]: if i + 1 == j: dp[(i, j)] = 2 else: dp[(i, j)] = 2 + \ dp_palindrome_length(dp, S, i + 1, j - 1) else: dp[(i, j)] = \ max( dp_palindrome_length(dp, S, i + 1, j), dp_palindrome_length(dp, S, i, j - 1)) return dp[(i, j)]
10a8ac671674ba1ef57cd473413211a339f94e62
28,675
def ellip_enclose(points, color, inc=1, lw=2, nst=2): """ Plot the minimum ellipse around a set of points. Based on: https://github.com/joferkington/oost_paper_code/blob/master/error_ellipse.py """ def eigsorted(cov): vals, vecs = np.linalg.eigh(cov) order = vals.argsort()[::-1] return vals[order], vecs[:,order] x = points[:,0] y = points[:,1] cov = np.cov(x, y) vals, vecs = eigsorted(cov) theta = np.degrees(np.arctan2(*vecs[:,0][::-1])) w, h = 2 * nst * np.sqrt(vals) center = np.mean(points, 0) ell = patches.Ellipse(center, width=inc*w, height=inc*h, angle=theta, facecolor=color, alpha=0.2, lw=0) edge = patches.Ellipse(center, width=inc*w, height=inc*h, angle=theta, facecolor='none', edgecolor=color, lw=lw) return ell, edge
c6f3fabfb306f29c5c09ffee732d5afea2c1fe33
28,676
def catalog_dictionary_per_observation(cats, obs_nums, targets, defaults): """Translate a dictionary of catalogs from a case of either: 1. Separate catalogs for each target name 2. Separate catalogs for each target name and instrument into a dictionary of catalogs for each instrument and observation Parameters ---------- cats : dict Dictionary of catalogs. Can be: Same catalogs for all instruments within each observation catalogs = {'my_targ_1': {'point_source': 'ptsrc1.cat', 'galaxy': 'galaxy1.cat', 'extended': 'ex1.cat'}, 'my_targ_2': {'point_source': 'ptsrc2.cat', 'galaxy': 'galaxy2.cat', 'extended': 'ex2.cat'}} Different catalogs for each instrument in each observation catalogs = {'my_targ_1': {'nircam': {'point_source': 'ptsrc1.cat', 'galaxy': 'galaxy1.cat', 'extended': 'ex1.cat'}, 'niriss': {'pointsource': 'ptsrc_nis.cat', 'galaxy': 'galaxy_nis.cat'}}, 'my_targ_2': {'nircam': {'point_source': 'ptsrc2.cat', 'galaxy': 'galaxy2.cat', 'extended': 'ex2.cat'}}} obs_nums : numpy.ndarray 1D array of observation ID numbers targets : numpy.ndarray 1d array of target names, with a 1:1 correspondence to obs_nums defaults : dict Dictionary of default catalog values Returns ------- obs_cats : dict Dictionary of catalogs per observation, with keys that match those in the defaults obs_cats = {'001': {'nircam': {'PointsourceCatalog': 'ptsrc1.cat', 'GalaxyCatalog': 'galaxy1.cat', 'ExtendedCatalog': 'ex1.cat'}, 'niriss': {'PointsourceCatalog': 'ptsrc_nis.cat', 'GalaxyCatalog': 'galaxy_nis.cat'}, } '002': {'nircam': {'PointsourceCatalog': 'ptsrc2.cat', 'GalaxyCatalog': 'galaxy2.cat', 'ExtendedCatalog': 'ex2.cat'}, 'niriss': {'PointsourceCatalog': 'ptsrc_nis2.cat', 'GalaxyCatalog': 'galaxy_nis2.cat'} } } """ # Set up the output dictionary. Populate with keys for all observations # and default catalog values to cover any entries in obs_cats that are # note present obs_cats = {} for number in obs_nums: obs_cats[number] = {'nircam': {}, 'niriss': {}, 'fgs': {}, 'miri':{}, 'nirspec': {}} for cat_type in POSSIBLE_CATS: obs_cats[number]['nircam'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]] obs_cats[number]['niriss'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]] obs_cats[number]['fgs'][CAT_TYPE_MAPPING[cat_type]] = defaults[CAT_TYPE_MAPPING[cat_type]] obs_cats[number]['miri'][CAT_TYPE_MAPPING[cat_type]] = 'None' obs_cats[number]['nirspec'][CAT_TYPE_MAPPING[cat_type]] = 'None' # Loop over the keys in the top level of the input dictionary for key1 in cats: # Find the observation numbers that use this target match = np.array(targets) == key1 # Check to see if the second level of the input dictionary is # a dictionary of catalogs, or a dictionary of instruments keys2 = cats[key1].keys() keys_present = [True if poss in keys2 else False for poss in POSSIBLE_CATS] if any(keys_present): # Dictionary contains catalog names, so we use the same catalogs # for all instruments # Loop over the observation numbers that use this target and # populate the entries for each with the catalog names. In # this case the catalog names are the same for all instruments for obs_number in obs_nums[match]: for key2 in keys2: obs_cats[obs_number]['nircam'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2] obs_cats[obs_number]['niriss'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2] obs_cats[obs_number]['fgs'][CAT_TYPE_MAPPING[key2]] = cats[key1][key2] else: # Dictionary contains instrument names # Loop over observation numbers that use this target and # populate the different catalogs for each instrument for obs_number in obs_nums[match]: for instrument in keys2: ctypes = cats[key1][instrument].keys() for ctype in ctypes: obs_cats[obs_number][instrument][CAT_TYPE_MAPPING[ctype]] = cats[key1][instrument][ctype] return obs_cats
b418e0315b242c251d6796636fdd3fdbcfefbfa5
28,677
def sierpinkspi(p1, p2, p3, degree, draw, image, colors): """ Draw Sierpinksi Triangles. """ colour = colors draw.polygon(((p1[0], p1[1]), (p2[0], p2[1]), (p3[0], p3[1])), fill=colour[degree]) if degree > 0: sierpinkspi(p1, mid(p1, p2), mid(p1, p3), degree-1, draw, image, colors) sierpinkspi(p2, mid(p1, p2), mid(p2, p3), degree-1, draw, image, colors) sierpinkspi(p3, mid(p1, p3), mid(p2, p3), degree-1, draw, image, colors) else: return image
c43662d50a655eed4298e34d2f9830e678a0ca96
28,678
def generate_depth_map(camera, Xw, shape): """Render pointcloud on image. Parameters ---------- camera: Camera Camera object with appropriately set extrinsics wrt world. Xw: np.ndarray (N x 3) 3D point cloud (x, y, z) in the world coordinate. shape: np.ndarray (H, W) Output depth image shape. Returns ------- depth: np.array Rendered depth image. """ assert len(shape) == 2, 'Shape needs to be 2-tuple.' # Move point cloud to the camera's (C) reference frame from the world (W) Xc = camera.p_cw * Xw # Project the points as if they were in the camera's frame of reference uv = Camera(K=camera.K).project(Xc).astype(int) # Colorize the point cloud based on depth z_c = Xc[:, 2] # Create an empty image to overlay H, W = shape depth = np.zeros((H, W), dtype=np.float32) in_view = np.logical_and.reduce([(uv >= 0).all(axis=1), uv[:, 0] < W, uv[:, 1] < H, z_c > 0]) uv, z_c = uv[in_view], z_c[in_view] depth[uv[:, 1], uv[:, 0]] = z_c return depth
f219d2128bdecf56e8e03aef7d6249b518d55f06
28,679
def create_own_child_column(X): """ Replaces the column 'relationship' with a binary one called own-child """ new_column = X['relationship'] == 'own-child' X_transformed = X.assign(own_child=new_column) X_transformed = X_transformed.drop('relationship', axis=1) return X_transformed
303ec8f073920f0bba6704740b200c7f3306b7bd
28,681
def find_next_gate(wires, op_list): """Given a list of operations, finds the next operation that acts on at least one of the same set of wires, if present. Args: wires (Wires): A set of wires acted on by a quantum operation. op_list (list[Operation]): A list of operations that are implemented after the operation that acts on ``wires``. Returns: int or None: The index, in ``op_list``, of the earliest gate that uses one or more of the same wires, or ``None`` if no such gate is present. """ next_gate_idx = None for op_idx, op in enumerate(op_list): if len(Wires.shared_wires([wires, op.wires])) > 0: next_gate_idx = op_idx break return next_gate_idx
287a3b2905f86dff0c75027bcba6bd00bab82fd8
28,682
def FDilatedConv1d(xC, xP, nnModule): """1D DILATED CAUSAL CONVOLUTION""" convC = nnModule.convC # current convP = nnModule.convP # previous output = F.conv1d(xC, convC.weight, convC.bias) + \ F.conv1d(xP, convP.weight, convP.bias) return output
900065f6618f1b4c12191b1363ce6706ec28d222
28,683
def load_spans(file): """ Loads the predicted spans """ article_id, span_interval = ([], []) with open(file, 'r', encoding='utf-8') as f: for line in f.readlines(): art_id, span_begin, span_end = [int(x) for x in line.rstrip().split('\t')] span_interval.append((span_begin, span_end)) article_id.append(art_id) return article_id, span_interval
8f8de31e1d1df7f0d2a44d8f8db7f846750bd89f
28,684
def is_stupid_header_row(row): """returns true if we believe row is what the EPN-TAP people used as section separators in the columns table. That is: the text is red:-) """ try: perhaps_p = row.contents[0].contents[0] perhaps_span = perhaps_p.contents[0] if perhaps_span.get("style")=='color: rgb(255,0,0);': return True except (AttributeError, KeyError): pass # Fall through to False return False
124108520486c020d2da64a8eb6f5d266990ae02
28,685
def get_cli_parser() -> ArgumentParser: """Return an ArgumentParser instance.""" parser = ArgumentParser(description="CLI options for Alice and Bob key share") parser.add_argument('-p', help='Prime p for information exchange', type=int) parser.add_argument('-g', help='Prime g for information exchange', type=int) parser.add_argument('--bits', help="The number of bits for the private encryption key", type=int, default=512) return parser
2ca9feff2940064163d8b5724b647ab56f4ea5e6
28,686
import re def _get_http_and_https_proxy_ip(creds): """ Get the http and https proxy ip. Args: creds (dict): Credential information according to the dut inventory """ return (re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('http_proxy', ''))[0], re.findall(r'[0-9]+(?:\.[0-9]+){3}', creds.get('proxy_env', {}).get('https_proxy', ''))[0])
b18d89718456830bdb186b3b1e120f4ae7c673c7
28,687
def geometric_expval(p): """ Expected value of geometric distribution. """ return 1. / p
3afb3adb7e9dafa03026f22074dfcc1f81c58ac8
28,689
def make_ticc_dataset( clusters=(0, 1, 0), n_dim=3, w_size=5, break_points=None, n_samples=200, n_dim_lat=0, sparsity_inv_matrix=0.5, T=9, rand_seed=None, **kwargs): """Generate data as the TICC method. Library implementation of `generate_synthetic_data.py`, original can be found at https://github.com/davidhallac/TICC """ if (len(clusters) * n_samples) % T != 0: raise ValueError( 'n_clusters * n_samples should be a multiple of n_times ' 'to avoid having samples in the same time period in different ' 'clusters') id_cluster = np.repeat(np.asarray(list(clusters)), n_samples) y = np.repeat(np.arange(T), len(clusters) * n_samples // T) cluster_mean = np.zeros(n_dim) cluster_mean_stack = np.zeros(n_dim * w_size) clusters = np.unique(list(clusters)) # Generate two inverse matrices precisions = {} covs = {} for i, cluster in enumerate(clusters): precisions[cluster] = make_ticc( rand_seed=i, num_blocks=w_size, n_dim_obs=n_dim, n_dim_lat=n_dim_lat, sparsity_inv_matrix=sparsity_inv_matrix, **kwargs) covs[cluster] = linalg.pinvh(precisions[cluster]) # Data matrix X = np.empty((id_cluster.size, n_dim)) precs = [] n = n_dim for i, label in enumerate(id_cluster): # for num in range(old_break_pt, break_pt): if i == 0: # conditional covariance and mean cov_tom = covs[label][:n_dim, :n_dim] mean = cluster_mean_stack[n_dim * (w_size - 1):] elif i < w_size: cov = covs[label][:(i + 1) * n, :(i + 1) * n] Sig11, Sig22, Sig21, Sig12 = _block_matrix(cov, i * n, i * n) Sig21Theta11 = Sig21.dot(linalg.pinvh(Sig11)) cov_tom = Sig22 - Sig21Theta11.dot(Sig12) # sigma2|1 mean = cluster_mean + Sig21Theta11.dot( X[:i].flatten() - cluster_mean_stack[:i * n_dim]) else: cov = covs[label][:w_size * n, :w_size * n] Sig11, Sig22, Sig21, Sig12 = _block_matrix( cov, (w_size - 1) * n, (w_size - 1) * n) Sig21Theta11 = Sig21.dot(linalg.pinvh(Sig11)) cov_tom = Sig22 - Sig21Theta11.dot(Sig12) # sigma2|1 mean = cluster_mean + Sig21Theta11.dot( X[i - w_size + 1:i].flatten() - cluster_mean_stack[:(w_size - 1) * n_dim]) X[i] = np.random.multivariate_normal(mean, cov_tom) precs.append(linalg.pinvh(cov_tom)) id_cluster_group = [] for c in np.unique(y): idx = np.where(y == c)[0] # check samples at same time belong to a single cluster assert np.unique(id_cluster[idx]).size == 1 id_cluster_group.append(id_cluster[idx][0]) data = Bunch( X=X, y=y, id_cluster=id_cluster, covs=covs, precs=precs, id_cluster_group=np.asarray(id_cluster_group)) return data
7c77d5ea4ff9e87681b0494333c49e24360b7072
28,690
from dustmaps import sfd from dustmaps import planck def get_dustmap(sourcemap, useweb=False): """ get the dustmap (from the dustmaps package) of the given source. Parameters --------- sourcemap: [string] origin of the MW extinction information. currently implemented: planck, sfd useweb: [bool] -optional- shall this query from the web = only implemented for sfd, ignored otherwise = Returns ------- dustmaps.Dustmap """ if sourcemap.lower() == "sfd": return sfd.SFDQuery() if not useweb else sfd.SFDWebQuery() if sourcemap.lower() == "planck": return planck.PlanckQuery() raise NotImplementedError(f"Only Planck and SFD maps implemented. {sourcemap} given.")
a5daec02601c968d25942afe1577ad301bbb6a55
28,692
def make_retro_pulse(x, y, z, zenith, azimuth): """Retro pulses originate from a DOM with an (x, y, z) coordinate and (potentially) a zenith and azimuth orientation (though for now the latter are ignored). """ pulse = I3CLSimFlasherPulse() pulse.type = I3CLSimFlasherPulse.FlasherPulseType.retro pulse.pos = I3Position(x, y, z) pulse.dir = I3Direction(zenith, azimuth) pulse.time = 0.0 pulse.numberOfPhotonsNoBias = 10000. # Following values don't make a difference pulse.pulseWidth = 1.0 * I3Units.ns pulse.angularEmissionSigmaPolar = 360.0 * I3Units.deg pulse.angularEmissionSigmaAzimuthal = 360.0 * I3Units.deg return pulse
de6fa8905276122c501b5a80842a12abfa2a81f1
28,693
def shiftLeft(col, numBits): """Shift the given value numBits left. >>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect() [Row(r=42)] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
769cbcb4f66473bdeb789c1326aa58e763c4f320
28,694
def lerp(x0: float, x1: float, p: float) -> float: """ Interplates linearly between two values such that when p=0 the interpolated value is x0 and at p=1 it's x1 """ return (1 - p) * x0 + p * x1
c4114dcb5636e70b30cd72a6e7ceab1cd683fa8d
28,695
def discard_events(library, session, event_type, mechanism): """Discards event occurrences for specified event types and mechanisms in a session. Corresponds to viDiscardEvents function of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param event_type: Logical event identifier. :param mechanism: Specifies event handling mechanisms to be disabled. (Constants.QUEUE, .Handler, .SUSPEND_HNDLR, .ALL_MECH) :return: return value of the library call. :rtype: :class:`pyvisa.constants.StatusCode` """ return library.viDiscardEvents(session, event_type, mechanism)
72010fae64bb0a1e615ce859d150f7f24f2c7171
28,696
def getSpeed(spindle=0): """Gets the interpreter's speed setting for the specified spindle. Args: spindle (int, optional) : The number of the spindle to get the speed of. If ``spindle`` is not specified spindle 0 is assumed. Returns: float: The interpreter speed setting, with any override applied if override enabled. """ raw_speed = STAT.settings[2] if raw_speed == 0: raw_speed = abs(DEFAULT_SPEED) return raw_speed
a7c759ff91c079aacd77d7aa0141f42aa9ca60af
28,697
def appointment() -> any: """ Defines route to appointment booking page. :return: String of HTML template for appointment booking page or homepage if booking was successful. """ if request.method == 'POST': user_input = request.form.to_dict() try: request_is_valid(request=user_input) except ValueError as e: flash(f"{e}", category='error') return render_template('appointment.html', user_input=user_input, slots=[dt.time(hour=h, minute=m) for h in range(8, 23) for m in [0, 15, 30, 45]], today=dt.date.today(), max_days=dt.date.today() + dt.timedelta(days=14)) add_person(person=user_input) try: app_added = add_appointment(email=user_input['email1'], appointment_day=user_input['appointment_day'], appointment_time=user_input['appointment_time']) except TypeError as e: flash(f"{e}", category='error') return redirect(url_for('views.appointment')) if app_added: send_booking_confirmation(email=user_input['email1'], first_name=user_input['first_name'], appointment_day=user_input['appointment_day'], appointment_time=user_input['appointment_time']) flash('Appointment booked successfully! Please check your inbox for the booking confirmation.', category='success') else: flash('Appointment is already booked! Please check your inbox for the booking confirmation.', category='error') return redirect(url_for('views.home')) return render_template('appointment.html', slots=[dt.time(hour=h, minute=m) for h in range(8, 23) for m in [0, 15, 30, 45]], today=dt.date.today(), max_days=dt.date.today()+dt.timedelta(days=14))
5c55c0387300f21cfea45809bdf534ace4137fc6
28,698
def wait_for_task(task, actionName='job', hideResult=False): """ Waits and provides updates on a vSphere task """ while task.info.state == vim.TaskInfo.State.running: time.sleep(2) if task.info.state == vim.TaskInfo.State.success: if task.info.result is not None and not hideResult: out = '%s completed successfully, result: %s' % (actionName, task.info.result) print out else: out = '%s completed successfully.' % actionName print out else: out = '%s did not complete successfully: %s' % (actionName, task.info.error) raise task.info.error print out return task.info.result
c750238117579236b159bf2389e947e08c8af979
28,700
def _get_candidate_names(): """Common setup sequence for all user-callable interfaces.""" global _name_sequence if _name_sequence is None: _once_lock.acquire() try: if _name_sequence is None: _name_sequence = _RandomNameSequence() finally: _once_lock.release() return _name_sequence
bc42c4af0822fcaa419047644e9d4b9d064a42fd
28,701
def relative_difference(x: np.array, y: np.array) -> np.array: """ Returns the relative difference estimator for two Lagrange multipliers. """ maximum = np.max([x, y]) minimum = np.min([x, y]) difference = maximum-minimum return np.abs(difference) / np.max(np.abs([x, y, difference, 1.]))
6b169a3deb3f6ed91958744521aa8028451fb3d8
28,702
def ConvertPngToYuvBarcodes(input_directory='.', output_directory='.'): """Converts PNG barcodes to YUV barcode images. This function reads all the PNG files from the input directory which are in the format frame_xxxx.png, where xxxx is the number of the frame, starting from 0000. The frames should be consecutive numbers. The output YUV file is named frame_xxxx.yuv. The function uses ffmpeg to do the conversion. Args: input_directory(string): The input direcotry to read the PNG barcodes from. output_directory(string): The putput directory to write the YUV files to. Return: (bool): True if the conversion was without errors. """ return helper_functions.PerformActionOnAllFiles( input_directory, 'barcode_', 'png', 0, _ConvertToYuvAndDelete, output_directory=output_directory, pattern='barcode_')
43cc0dd4126b0699212064e445608c82123ad7b9
28,703
from pathlib import Path def _ignore_on_copy(directory, contents): # pylint: disable=unused-argument """Provides list of items to be ignored. Args: directory (Path): The path to the current directory. contents (list): A list of files in the current directory. Returns: list: A list of files to be ignored. """ # shutil passes strings, so ensure a Path directory = Path(directory) if directory.name == "material": return ["mkdocs_theme.yml", "main.html", "404.html"] if directory.name == "partials": return ["integrations"] if directory.name == "images": return ["favicon.png"] return []
3a551f6a252406b88fb19c0dc8180631cd5996ce
28,704
def registDeptUser(request): """ ํ™ˆํƒ์Šค ํ˜„๊ธˆ์˜์ˆ˜์ฆ ๋ถ€์„œ์‚ฌ์šฉ์ž ๊ณ„์ •์„ ๋“ฑ๋กํ•ฉ๋‹ˆ๋‹ค. - https://docs.popbill.com/htcashbill/python/api#RegistDeptUser """ try: # ํŒ๋นŒํšŒ์› ์‚ฌ์—…์ž๋ฒˆํ˜ธ CorpNum = settings.testCorpNum # ํ™ˆํƒ์Šค ๋ถ€์„œ์‚ฌ์šฉ์ž ๊ณ„์ •์•„์ด๋”” DeptUserID = "deptuserid" # ํ™ˆํƒ์Šค ๋ถ€์„œ์‚ฌ์šฉ์ž ๊ณ„์ •๋น„๋ฐ€๋ฒˆํ˜ธ DeptUserPWD = "deptuserpwd" response = htCashbillService.registDeptUser(CorpNum, DeptUserID, DeptUserPWD) return render(request, 'response.html', {'code': response.code, 'message': response.message}) except PopbillException as PE: return render(request, 'exception.html', {'code': PE.code, 'message': PE.message})
e5f923ac4290fd029eafdf6e408d7847be9d0c6b
28,705
import torch def generate_fake_data_loader(): """" Generate fake-DataLoader with four batches, i.e. a list with sub-lists of samples and labels. It has four batches with three samples each. """ samples1 = torch.tensor([[2., 2., 2., 2.], [2., 2., 0., 0.], [0., 0., 2., 2.]]) samples2 = torch.tensor([[1., 2., 3., 4.], [1., 1., 2., 2.], [2., 2., 2., 2.]]) labels1 = torch.tensor([0, 0, 1]) labels2 = torch.tensor([1, 1, 0]) return [[samples1, labels1], [samples1, labels2], [samples2, labels1], [samples2, labels2]]
4d86ab464653f5766a44f03e41fd2c26714cabf1
28,709
def get_RV_K( P_days, mp_Mearth, Ms_Msun, ecc=0.0, inc_deg=90.0, nsamples=10000, percs=[50, 16, 84], return_samples=False, plot=False, ): """Compute the RV semiamplitude in m/s via Monte Carlo P_days : tuple median and 1-sigma error mp_Mearth : tuple median and 1-sigma error Ms_Msun : tuple median and 1-sigma error """ if ( isinstance(P_days, tuple), isinstance(Ms_Msun, tuple), isinstance(mp_Mearth, tuple), ): # generate samples P_days = np.random.rand(nsamples) * P_days[1] + P_days[0] mp_Mearth = np.random.rand(nsamples) * mp_Mearth[1] + mp_Mearth[0] Ms_Msun = np.random.rand(nsamples) * Ms_Msun[1] + Ms_Msun[0] P = P_days * u.day.to(u.second) * u.second Ms = Ms_Msun * u.Msun.to(u.kg) * u.kg mp = mp_Mearth * u.Mearth.to(u.kg) * u.kg inc = np.deg2rad(inc_deg) K_samples = ( (2 * np.pi * c.G / (P * Ms * Ms)) ** (1.0 / 3) * mp * np.sin(inc) / unumpy.sqrt(1 - ecc ** 2) ).value K, K_lo, K_hi = np.percentile(K_samples, percs) K, K_siglo, K_sighi = K, K - K_lo, K_hi - K if plot: _ = hist(K_samples, bins="scott") if return_samples: return (K, K_siglo, K_sighi, K_samples) else: return (K, K_siglo, K_sighi)
11cdb7bfeef27d5a05638d74232e105a22fa0222
28,711
def arc(color, start_angle, stop_angle, width, height, x=None, y=None, thickness=1, anchor='center', **kwargs): """ Function to make an arc. :param color: color to draw arc :type color: str or List[str] :param start_angle: angle to start drawing arc at :type start_angle: int :param stop_angle: angle to stop drawing arc at :type stop_angle: int :param thickness: thickness of arc in pixels :type thickness: int :param args: left top corner of arc and width and height of arc :type args: two Tuples (left, top), (width, height) or four ints left, top, width, height :return: Arc object created """ return Arc((x, y), width, height, start_angle, stop_angle, anchor, color, thickness, **kwargs)
42c0a53632315ff03b92c53cbc172a0cfd08f5a7
28,712
def calculate_shapley_value(g, prob_vals, maxIter=20000): """ This algorithm is based on page 29 of the following paper: https://arxiv.org/ftp/arxiv/papers/1402/1402.0567.pdf :param g: the graph :param prob_vals: a list. it contains the weight of each node in the graph :param maxIter: maximum number of iterations. for 6-12 nodes, the value should be near 2000. for 1000 nodes, this value is around 200000 :return: """ ## first block n_nodes = len(g) node_list = list(range(0, n_nodes)) shapley_val_list = [0] * n_nodes ##second block for i in range(0, maxIter): shuffle(node_list) P = [] for node in node_list: ## forming the subgraph based on the nodes in P subgraph2 = nx.Graph() if P: subgraph2_nodes = P subgraph2.add_nodes_from(subgraph2_nodes) if len(subgraph2_nodes) > 1: for x in range(0, len(subgraph2_nodes)): for y in range(x + 1, len(subgraph2_nodes)): if g.has_edge(subgraph2_nodes[x], subgraph2_nodes[y]): subgraph2.add_edge(subgraph2_nodes[x], subgraph2_nodes[y]) map_val2 = tshp.get_map_value(subgraph2, prob_vals) else: map_val2 = 0 ## adding extra node to get map value 1 subgraph2.add_node(node) if len(subgraph2) > 1: nbrs = set(g.neighbors(node)) for nbr in nbrs - set([node]): if subgraph2.has_node(nbr): subgraph2.add_edge(node, nbr) map_val1 = tshp.get_map_value(subgraph2, prob_vals) shapley_val_list[node] += (map_val1 - map_val2) P.append(node) ## third block for i in range(0, n_nodes): shapley_val_list[i] = shapley_val_list[i]/float(maxIter) ## fourth block return shapley_val_list
41329a17f0914597bcf457ea04e9dc0a7053ae62
28,713
from typing import List from typing import Dict from typing import Any async def complete_multipart_upload(bucket: str, s3_key: str, parts: List, upload_id: str) -> Dict[str, Any]: """Complete multipart upload to s3. Args: bucket (str): s3 bucket s3_key (str): s3 prefix parts (List): all parts info upload_id (str): multipart upload Id Returns: Dict[str, Any]: response of operation """ response = await S3['client']['obj'].complete_multipart_upload( Bucket=bucket, Key=s3_key, UploadId=upload_id, MultipartUpload={'Parts': parts}) return response
01441cbc196f594bead4dd9a9b17fe1a3c8bfa4d
28,714
def build_mask(module='A', pixscale=0.03): """Create coronagraphic mask image Return a truncated image of the full coronagraphic mask layout for a given module. +V3 is up, and +V2 is to the left. """ if module=='A': names = ['MASK210R', 'MASK335R', 'MASK430R', 'MASKSWB', 'MASKLWB'] elif module=='B': names = ['MASKSWB', 'MASKLWB', 'MASK430R', 'MASK335R', 'MASK210R'] allims = [coron_trans(name,module,pixscale) for name in names] return np.concatenate(allims, axis=1)
97e068fe8eef6e8fdd65b1e426428001cf549332
28,715
async def update_login_me( *, password: str = Body(...), new_email: tp.Optional[EmailStr] = Body(None, alias='newEmail'), new_password: tp.Optional[str] = Body(None, alias='newPassword'), current_user: models.User = Depends(common.get_current_user), uow: IUnitOfWork = Depends(common.get_uow), ) -> models.User: """Updates the user's login credentials.""" user = uow.user.authenticate(current_user.email, password) if not user: raise HTTPException( status_code=400, detail="Incorrect password" ) elif not user.is_active: raise HTTPException( status_code=400, detail="Inactive user", ) user_in = schema.UserUpdate() if new_email: user_in.email = new_email if new_password: user_in.password = new_password with uow: new_user = uow.user.update(obj=user, obj_in=user_in) return new_user
ec3f56ee474d19a4fd89c51940f3a198322672a1
28,716
def comp_sharpness(is_stationary, signal, fs, method='din', skip=0): """ Acoustic sharpness calculation according to different methods: Aures, Von Bismarck, DIN 45692, Fastl Parameters: ---------- is_stationary: boolean True if the signal is stationary, false if it is time varying signal: numpy.array time history values fs: integer sampling frequency method: string 'din' by default,'aures', 'bismarck','fastl' skip : float number of second to be cut at the beginning of the analysis Outputs ------ S : float sharpness value """ if method!= 'din' and method!='aures' and method !='fastl' and method != 'bismarck' : raise ValueError("ERROR: method must be 'din', 'aures', 'bismarck', 'fastl'") loudness = comp_loudness(is_stationary, signal, fs) if method == 'din': S = comp_sharpness_din(loudness['values'], loudness['specific values'], is_stationary ) elif method == 'aures': S = comp_sharpness_aures(loudness['values'], loudness['specific values'], is_stationary ) elif method == 'bismarck': S = comp_sharpness_bismarck(loudness['values'], loudness['specific values'], is_stationary ) elif method == 'fastl': S = comp_sharpness_fastl(loudness['values'], loudness['specific values'], is_stationary ) if is_stationary == False: # Cut transient effect time = np.linspace(0, len(signal/fs, len(S))) cut_index = np.argmin(np.abs(time - skip)) S = S[cut_index:] output = { "name" : "sharpness", "method" : method, "values" : S, "skip" : skip } return output
a8ae39740c90e824081e3979d5ff2b5c96a8ad75
28,717
def load_hobbies(path='data', extract=True): """ Downloads the 'hobbies' dataset, saving it to the output path specified and returns the data. """ # name of the dataset name = 'hobbies' data = _load_file_data(name, path, extract) return data
e60e024d0fe1766c599a3b693f51522cb7d7303a
28,718
def is_viable(individual): """ evaluate.evaluate() will set an individual's fitness to NaN and the attributes `is_viable` to False, and will assign any exception triggered during the individuals evaluation to `exception`. This just checks the individual's `is_viable`; if it doesn't have one, this assumes it is viable. :param individual: to be checked if viable :return: True if individual is viable """ if hasattr(individual, 'is_viable'): return individual.is_viable else: return True
c1e5c839f362e99800dcd1a996be9345cabb4261
28,719
def combine_counts(hits1, hits2, multipliers=None, total_reads=0, unmatched_1="Unknown", unmatched_2="Unknown", ): """ compile counts into nested dicts """ total_counted = 0 counts = {} # keep track of all unique hit ids from set 2 types2 = set() if multipliers is None: def _get_increment(read): return 1 def _update_hits(increment, counts, hit1, hit2): """ Just add 1 to get raw numbers """ h1counts = counts.setdefault(hit1, {}) h1counts[hit2] = h1counts.get(hit2, 0) + increment else: # if a mult table was given, use it to get total count if total_reads == 0: total_reads = len(multipliers) def _get_increment(read): """ get multiplier. Use pop to see leftovers """ return multipliers.pop(read, 1) def _update_hits(increment, counts, hit1, hit2): """ count both raw numbers and multiplied """ h1counts = counts.setdefault(hit1, {}) count_tuple = h1counts.get(hit2, (0, 0)) count_tuple = (count_tuple[0] + 1, count_tuple[1] + increment) h1counts[hit2] = count_tuple # Start by using reads from hits1 as index for (read, hit_list1) in hits1.items(): # remove hits from hits2 as we go, so we know what didn't match hits1 # default to umatched_2 total_counted += 1 increment = _get_increment(read) hit_list2 = hits2.pop(read, [unmatched_2, ]) for hit2 in hit_list2: for hit1 in hit_list1: _update_hits(increment, counts, hit1, hit2) types2.add(hit2) # remaining reads in hits2 counted as Unknown # we know these don't exist in hits1 hit1 = unmatched_1 for read, hit_list2 in hits2.items(): total_counted += 1 increment = _get_increment(read) for hit2 in hit_list2: _update_hits(increment, counts, hit1, hit2) types2.add(hit2) # if a total was given if total_reads > 0: unknown_counts = counts.setdefault(unmatched_1, {}) if multipliers is None: unknown_counts[unmatched_2] = total_reads - total_counted else: unknown_counts[unmatched_2] = (total_reads - total_counted, sum(multipliers.values())) return (counts, types2)
505e91f6538267e40f438926df201cf25cb1a3f9
28,720
def root(): """Serves the website home page""" return render_template("index.html")
676c966da523108bd9802c2247cf320993815124
28,721
import string import random def getCookie(): """ This function will return a randomly generated cookie :return: A cookie """ lettersAndDigits = string.ascii_lowercase + string.digits cookie = 'JSESSIONID=' cookie += ''.join(random.choice(lettersAndDigits) for ch in range(31)) return cookie
6fff76d37921174030fdaf9d4cb8a39222c8906c
28,722
def get_authenticated_igramscraper(username: str, password: str): """Gets an authenticated igramscraper Instagram client instance.""" client = Instagram() client.with_credentials(username, password) #client.login(two_step_verificator=True) client.login(two_step_verificator=False) return client
c8f7cf4500aa82f11cf1b27a161d75a7261ee84a
28,723
def read_in_nn_path(path): """ Read in NN from a specified path """ tmp = np.load(path) w_array_0 = tmp["w_array_0"] w_array_1 = tmp["w_array_1"] w_array_2 = tmp["w_array_2"] b_array_0 = tmp["b_array_0"] b_array_1 = tmp["b_array_1"] b_array_2 = tmp["b_array_2"] x_min = tmp["x_min"] x_max = tmp["x_max"] wavelength_payne = tmp["wavelength_payne"] NN_coeffs = (w_array_0, w_array_1, w_array_2, b_array_0, b_array_1, b_array_2, x_min, x_max) tmp.close() return NN_coeffs, wavelength_payne
3f2366ab9fd4b4625c8b7d00b1191429678b466b
28,724
def crop_zeros(array, remain=0, return_bound=False): """ Crop the edge zero of the input array. Parameters ---------- array : numpy.ndarray 2D numpy array. remain : int The number of edges of all zeros which you want to remain. return_bound : str or bool Select the mode to manipulate the drawing. True: return array and bound. 'only_bound': return bound. Others: return array. Returns ------- out : np.ndarray, optional Cropped array. left_bound : int, optional The edge of the left cropping. right_bound : int, optional The edge of the right cropping. upper_bound : int, optional The edge of the upper cropping. lower_bound : int, optional The edge of the lower cropping. References ---------- https://stackoverflow.com/questions/48987774/how-to-crop-a-numpy-2d-array-to-non-zero-values """ row = array.any(1) if row.any(): row_size, col_size = array.shape col = array.any(0) left_bound = np.max([col.argmax() - remain, 0]) right_bound = np.min([col_size - col[::-1].argmax() + remain, col_size - 1]) # col[::-1] is reverse of col upper_bound = np.max([row.argmax() - remain, 0]) lower_bound = np.min([row_size - row[::-1].argmax() + remain, row_size - 1]) # row[::-1] is reverse of row out = array[upper_bound:lower_bound, left_bound:right_bound] else: left_bound = None right_bound = None upper_bound = None lower_bound = None out = np.empty((0, 0), dtype=bool) if isinstance(return_bound, bool) and return_bound: return out, (left_bound, right_bound, upper_bound, lower_bound) elif return_bound == 'only_bound': return left_bound, right_bound, upper_bound, lower_bound else: return out
13cb5a0a289ef622d3dd663777e6a0d2814b5104
28,725
def go_info_running(data, info_name, arguments): """Returns "1" if go is running, otherwise "0".""" return '1' if 'modifier' in hooks else '0'
8027d0106e379156225c87db1959110fcfac6777
28,726
def letra_mas_comun(cadena: str) -> str: """ Letra Parรกmetros: cadena (str): La cadena en la que se quiere saber cuรกl es la letra mรกs comรบn Retorno: str: La letra mรกs comรบn en la cadena que ingresa como parรกmetro, si son dos es la letra alfabรฉticamente posterior. """ letras_en_conteo = {} mayor_conteo = 0 letra_moda = "" for cada_caracter in cadena: if cada_caracter >= "A" and cada_caracter <= "z": # Conteo de cada caracter if cada_caracter not in letras_en_conteo: letras_en_conteo[cada_caracter] = 1 else: letras_en_conteo[cada_caracter] += 1 # Verificaciรณn del mayor carรกcter contado if letras_en_conteo[cada_caracter] == mayor_conteo: if cada_caracter > letra_moda: letra_moda = cada_caracter elif letras_en_conteo[cada_caracter] > mayor_conteo: mayor_conteo = letras_en_conteo[cada_caracter] letra_moda = cada_caracter print(letras_en_conteo) print(letra_moda) return letra_moda
c36a753717365164ca8c3089b398d9b6e358ef3f
28,727
def dwt2(image, wavelet, mode="symmetric", axes=(-2, -1)): """Computes single level wavelet decomposition for 2D images """ wavelet = ensure_wavelet_(wavelet) image = promote_arg_dtypes(image) dec_lo = wavelet.dec_lo dec_hi = wavelet.dec_hi axes = tuple(axes) if len(axes) != 2: raise ValueError("Expected two dimensions") # make sure that axes are positive axes = [a + image.ndim if a < 0 else a for a in axes] ca, cd = dwt_axis(image, wavelet, axes[0], mode) caa, cad = dwt_axis(ca, wavelet, axes[1], mode) cda, cdd = dwt_axis(cd, wavelet, axes[1], mode) return caa, (cda, cad, cdd)
4ee7e1f3c19bb1b0bf8670598f1744b7241b235d
28,728
def recommended_global_tags_v2(release, base_tags, user_tags, metadata): """ Determine the recommended set of global tags for the given conditions. This function is called by b2conditionsdb-recommend and it may be called by conditions configuration callbacks. While it is in principle not limited to the use case of end user analysis this is expected to be the main case as in the other cases the production manager will most likely set the global tags explicitly in the steering file. Parameters: release (str): The release version that the user has set up. base_tags (list(str)): The global tags of the input files or default global tags in case of no input. user_tags (list(str)): The global tags provided by the user. metadata (list): The EventMetaData objects of the input files or None in case of no input. Returns: A dictionary with the following keys: tags : list of recommended global tags (mandatory) message: a text message for the user (optional) release: a recommended release (optional) """ # gather information that we may want to use for the decision about the recommended GT: # existing GTs, release used to create the input data existing_master_tags = [tag for tag in base_tags if tag.startswith('master_') or tag.startswith('release-')] existing_data_tags = [tag for tag in base_tags if tag.startswith('data_')] existing_mc_tags = [tag for tag in base_tags if tag.startswith('mc_')] existing_analysis_tags = [tag for tag in base_tags if tag.startswith('analysis_')] data_release = metadata[0]['release'] if metadata else None # if this is run-independent MC we don't want to show data tags (all other cases, we do) if metadata: is_mc = bool(metadata[0]['isMC']) experiments = [int(metadata[0]['experimentLow']), int(metadata[0]['experimentHigh'])] is_run_independent_mc = experiments[0] == experiments[1] and experiments[0] in [0, 1002, 1003] else: is_run_independent_mc = False # now construct the recommendation result = {'tags': [], 'message': ''} # recommended release recommended_release = supported_release(release) if (release.startswith('release') or release.startswith('light')) and recommended_release != release: result['message'] += 'You are using %s, but we recommend to use %s.\n' % (release, recommended_release) result['release'] = recommended_release # tag to be used for (raw) data processing, depending on the release used for the processing # data_tags provides a mapping of supported release to the recommended data GT data_tags = {_supported_releases[-1]: 'data_reprocessing_proc9'} data_tag = data_tags.get(recommended_release, None) # tag to be used for run-dependent MC production, depending on the release used for the production # mc_tags provides a mapping of supported release to the recommended mc GT mc_tags = {_supported_releases[-1]: 'mc_production_mc12'} mc_tag = mc_tags.get(recommended_release, None) # tag to be used for analysis tools, depending on the release used for the analysis # analysis_tags provides a mapping of supported release to the recommended analysis GT analysis_tags = {_supported_releases[-1]: 'analysis_tools_light-2106-rhea'} analysis_tag = analysis_tags.get(recommended_release, None) # In case of B2BII we do not have metadata if metadata == []: result['tags'] = ['B2BII'] else: # If we have a master GT this means either we are generating events # or we read a file that was produced with it. So we keep it as last GT. result['tags'] += existing_master_tags # Always use online GT result['tags'].insert(0, 'online') # Prepend the data GT if the file is not run-independent MC if metadata is None or not is_run_independent_mc: if data_tag: result['tags'].insert(0, data_tag) else: result['message'] += 'WARNING: There is no recommended data global tag.' # Prepend the MC GT if we generate events (no metadata) # or if we read a file that was produced with a MC GT if metadata is None or existing_mc_tags: if mc_tag: result['tags'].insert(0, mc_tag) else: result['message'] += 'WARNING: There is no recommended mc global tag.' # Prepend the analysis GT if analysis_tag: result['tags'].insert(0, analysis_tag) else: result['message'] += 'WARNING: There is no recommended analysis global tag.' # What else do we want to tell the user? if result['tags'] != base_tags: result['message'] += 'The recommended tags differ from the base tags: %s' % ' '.join(base_tags) + '\n' result['message'] += 'Use the default conditions configuration if you want to take the base tags.\n' return result
8396dcc2d54a5e36dfe5485d33ef439059a944c6
28,729
def plot_corelation_matrix(data): """ Plotting the co-relation matrix on the dataset using the numeric columns only. """ corr = data.select_dtypes(include=['float64', 'int64']).iloc[:, 1:].corr() # Generate a mask for the upper triangle mask = np.zeros_like(corr, dtype=np.bool) mask[np.triu_indices_from(mask)] = True # Set up the matplotlib figure f, ax = plt.subplots(figsize=(22, 22)) # Generate a custom diverging colormap cmap = sns.diverging_palette(220, 10, as_cmap=True) sns.heatmap( corr, mask=mask, cmap=cmap, center=0.0, vmax=1, square=True, linewidths=.5, ax=ax ) return corr
49e89f3ba844f0bf9676bca4051c72ad1305294f
28,730
import urllib from bs4 import BeautifulSoup def product_by_id(product_id): """ Get Product description by product id :param product_id: Id of the product :return: """ host = "https://cymax.com/" site_data = urllib.urlopen(host + str(product_id) + '--C0.htm').read() soup = BeautifulSoup(site_data) product = soup.find_all("div", class_="product-item") # if search result is more tha one item, # it's most likely returning all items if len(product) == 1: product_description = product[0].find(class_="product-description").getText() product_img = product[0].find(class_="product-item-img")["src"] return product_description, product_img
2f2f3abfd0dcf5a124ae4a1bd3975734fbac7783
28,731
def overlap(X, window_size, window_step): """ Create an overlapped version of X Parameters ---------- X : ndarray, shape=(n_samples,) Input signal to window and overlap window_size : int Size of windows to take window_step : int Step size between windows Returns ------- X_strided : shape=(n_windows, window_size) 2D array of overlapped X """ if window_size % 2 != 0: raise ValueError("Window size must be even!") # Make sure there are an even number of windows before stridetricks append = np.zeros((window_size - len(X) % window_size)) X = np.hstack((X, append)) ws = window_size ss = window_step a = X valid = len(a) - ws nw = (valid) // ss out = np.ndarray((nw, ws), dtype=a.dtype) for i in range(nw): # "slide" the window along the samples start = i * ss stop = start + ws out[i] = a[start: stop] return out
4f53be9c87d0ce9800a6e1b1d96ae4786eace78b
28,732
def may_ozerov_depth_3_complexity(n, k, w, mem=inf, hmap=1, memory_access=0): """ Complexity estimate of May-Ozerov algorithm in depth 3 using Indyk-Motwani for NN search [MayOze15] May, A. and Ozerov, I.: On computing nearest neighbors with applications to decoding of binary linear codes. In: Annual International Conference on the Theory and Applications of Cryptographic Techniques. pp. 203--228. Springer (2015) expected weight distribution:: +-------------------------+---------------------+---------------------+ | <-----+ n - k - l+----->|<--+ (k + l) / 2 +-->|<--+ (k + l) / 2 +-->| | w - 2p | p | p | +-------------------------+---------------------+---------------------+ INPUT: - ``n`` -- length of the code - ``k`` -- dimension of the code - ``w`` -- Hamming weight of error vector - ``mem`` -- upper bound on the available memory (as log2), default unlimited - ``hmap`` -- indicates if hashmap is being used (default: true) - ``memory_access`` -- specifies the memory access cost model (default: 0, choices: 0 - constant, 1 - logarithmic, 2 - square-root, 3 - cube-root or deploy custom function which takes as input the logarithm of the total memory usage) EXAMPLES:: >>> from .estimator import may_ozerov_depth_3_complexity >>> may_ozerov_depth_3_complexity(n=100,k=50,w=10) # doctest: +SKIP """ solutions = max(0, log2(binom(n, w)) - (n - k)) time = inf memory = 0 r = _optimize_m4ri(n, k, mem) i_val = [20, 200, 20, 10] i_val_inc = [10, 10, 10, 10] params = [-1 for _ in range(4)] while True: stop = True for p in range(max(params[0] - i_val_inc[0] // 2, 0), min(w // 2, i_val[0]), 2): for l in range(max(params[1] - i_val_inc[1] // 2, 0), min(n - k - (w - 2 * p), i_val[1])): k1 = (k + l) // 2 for p2 in range(max(params[2] - i_val_inc[2] // 2, p // 2 + ((p // 2) % 2)), p + i_val[2], 2): for p1 in range(max(params[3] - i_val_inc[3] // 2, (p2 + 1) // 2), min(p2 + i_val[3], k1 - p2 // 2)): L1 = binom(k1, p1) if log2(L1) > time: continue reps1 = (binom(p2, p2 // 2) * binom(k1 - p2, p1 - p2 // 2)) ** 2 l1 = int(ceil(log2(reps1))) if l1 > l: continue L12 = max(1, L1 ** 2 // 2 ** l1) reps2 = (binom(p, p // 2) * binom(k1 - p, p2 - p // 2)) ** 2 L1234 = max(1, L12 ** 2 // 2 ** (l - l1)) tmp_mem = log2((2 * L1 + L12 + L1234) + _mem_matrix(n, k, r)) if tmp_mem > mem: continue Tp = max( log2(binom(n, w)) - log2(binom(n - k - l, w - 2 * p)) - 2 * log2(binom(k1, p)) - solutions, 0) Tg = _gaussian_elimination_complexity(n, k, r) T_tree = 4 * _list_merge_complexity(L1, l1, hmap) + 2 * _list_merge_complexity(L12, l - l1, hmap) + _indyk_motwani_complexity( L1234, n - k - l, w - 2 * p, hmap) T_rep = int(ceil(2 ** (max(l - log2(reps2), 0) + 3 * max(l1 - log2(reps1), 0)))) tmp = Tp + log2(Tg + T_rep * T_tree) tmp += __memory_access_cost(tmp_mem, memory_access) if tmp < time: time = tmp memory = tmp_mem params = [p, l, p2, p1] for i in range(len(i_val)): if params[i] >= i_val[i] - i_val_inc[i] / 2: i_val[i] += i_val_inc[i] stop = False if stop: break break par = {"l": params[1], "p": params[0], "p1": params[3], "p2": params[2], "depth": 3} res = {"time": time, "memory": memory, "parameters": par} return res
b390a515626185912cbc234fbebd492a0e154bbb
28,733
import json import copy def unpack_single_run_meta(storage, meta, molecules): """Transforms a metadata compute packet into an expanded QC Schema for multiple runs. Parameters ---------- db : DBSocket A live connection to the current database. meta : dict A JSON description of the metadata involved with the computation molecules : list of str, dict A list of molecule ID's or full JSON molecules associated with the run. Returns ------- ret : tuple(dict, list) A dictionary of JSON representations with keys built in. Examples -------- >>> meta = { "procedure": "single", "driver": "energy", "method": "HF", "basis": "sto-3g", "options": "default", "program": "psi4", } >>> molecules = [{"geometry": [0, 0, 0], "symbols" : ["He"]}] >>> unpack_single_run_meta(storage, meta, molecules) """ # Get the required molecules indexed_molecules = {k: v for k, v in enumerate(molecules)} raw_molecules_query = storage.mixed_molecule_get(indexed_molecules) # Pull out the needed options if meta["options"] is None: option_set = {} else: option_set = storage.get_options(program=meta["program"], name=meta["options"], with_ids=False)["data"][0] del option_set["name"] del option_set["program"] # Create the "universal header" task_meta = json.dumps({ "schema_name": "qc_schema_input", "schema_version": 1, "program": meta["program"], "driver": meta["driver"], "keywords": option_set, "model": { "method": meta["method"], "basis": meta["basis"] }, "qcfractal_tags": { "program": meta["program"], "options": meta["options"] } }) tasks = {} indexer = copy.deepcopy(meta) for idx, mol in raw_molecules_query["data"].items(): data = json.loads(task_meta) data["molecule"] = mol indexer["molecule"] = mol["id"] tasks[interface.schema.format_result_indices(indexer)] = data return tasks, []
3a3237067b4e52a5f7cb7d5ecc314061eaaa2b15
28,734
def getKey(event): """Returns the Key Identifier of the given event. Available Codes: https://www.w3.org/TR/2006/WD-DOM-Level-3-Events-20060413/keyset.html#KeySet-Set """ if hasattr(event, "key"): return event.key elif hasattr(event, "keyIdentifier"): if event.keyIdentifier in ["Esc", "U+001B"]: return "Escape" else: return event.keyIdentifier return None
0935ad4cb1ba7040565647b2e26f265df5674e1d
28,735
def get_long_season_name(short_name): """convert short season name of format 1718 to long name like 2017-18. Past generations: sorry this doesn't work for 1999 and earlier! Future generations: sorry this doesn't work for the 2100s onwards! """ return '20' + short_name[:2] + '-' + short_name[2:]
314ef85571af349e2e31ab4d08497a04e19d4118
28,736
from typing import List from typing import Any from typing import Dict def make_variables_snapshots(*, variables: List[Any]) -> str: """ Make snapshots of specified variables. Parameters ---------- variables : list Variables to make snapshots. Returns ------- snapshot_name : str Snapshot name to be used. """ ended: Dict[int, bool] = {} snapshot_name: str = '' for variable in variables: if not isinstance(variable, RevertInterface): continue var_id: int = id(variable) if var_id in ended: continue if snapshot_name == '': snapshot_name = variable._get_next_snapshot_name() variable._run_all_make_snapshot_methods( snapshot_name=snapshot_name) ended[var_id] = True return snapshot_name
d6a7bf5be51ebe7f4fb7985b2a440548c502d4ec
28,737
def sext_to(value, n): """Extend `value` to length `n` by replicating the msb (`value[-1]`)""" return sext(value, n - len(value))
683316bd7259d624fddb0d9c947c7a06c5f28c7e
28,738
def parse_matching_pairs(pair_txt): """Get list of image pairs for matching Arg: pair_txt: file contains image pairs and essential matrix with line format image1 image2 sim w p q r x y z ess_vec Return: list of 3d-tuple contains (q=[wpqr], t=[xyz], essential matrix) """ im_pairs = {} f = open(pair_txt) for line in f: cur = line.split() im1, im2 = cur[0], cur[1] q = np.array([float(i) for i in cur[3:7]], dtype=np.float32) t = np.array([float(i) for i in cur[7:10]], dtype=np.float32) ess_mat = np.array([float(i) for i in cur[10:19]], dtype=np.float32).reshape(3,3) im_pairs[(im1, im2)] = (q, t, ess_mat) f.close() return im_pairs
6697e63a091b23701e0751c59f8dc7fe0e582a97
28,739
import threading from typing import OrderedDict def compile_repo_info(repos, all=False, fetch=False): """Compiles all the information about found repos.""" # global to allow for threading work global git_info git_info = {} max_ = len(repos) threads = [] for i, repo in enumerate(repos): t = threading.Thread(target=process_repo, args=(repo, fetch)) threads.append(t) t.start() for thread in threads: thread.join() git_info = OrderedDict(sorted(git_info.items(), key=lambda t: t[0])) output_table = create_repo_table(git_info, fetch, all) return output_table
b3cbdcdd53ce2c5274990520756390f396156aaa
28,740
def histogram2d(x, y, bins=10, range=None, weights=None, density=False): # pylint: disable=redefined-builtin """ Computes the multidimensional histogram of some data. Note: Deprecated numpy argument `normed` is not supported. Args: x (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the x coordinates of the points to be histogrammed. y (Union[list, tuple, Tensor]): An array with shape `(N,)` containing the y coordinates of the points to be histogrammed. bins (Union[int, tuple, list], optional): The bin specification: If int, the number of bins for the two dimensions ``(nx=ny=bins)``. If array_like, the bin edges for the two dimensions ``(x_edges=y_edges=bins)``. If [int, int], the number of bins in each dimension ``(nx, ny = bins)``. If [array, array], the bin edges in each dimension ``(x_edges, y_edges = bins)``. A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. range(Union[list, tuple], optional): has shape (2, 2), the leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the bins parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. weights (Union[list, tuple, Tensor], optional): An array with shape `(N,)` of values `w_i` weighing each sample `(x_i, y_i)`. density (boolean, optional): If False, the default, returns the number of samples in each bin. If True, returns the probability density function at the bin, ``bin_count / sample_count / bin_volume``. Returns: (Tensor, Tensor, Tensor), the values of the bi-directional histogram and the bin edges along the first and second dimensions. Raises: ValueError: if `range` does not have the same size as the number of samples. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> from mindspore import numpy as np >>> x = np.arange(5) >>> y = np.arange(2, 7) >>> print(np.histogram2d(x, y, bins=(2, 3))) (Tensor(shape=[2, 3], dtype=Float32, value= [[ 2.00000000e+00, 0.00000000e+00, 0.00000000e+00], [ 0.00000000e+00, 1.00000000e+00, 2.00000000e+00]]), Tensor(shape=[3], dtype=Float32, value= [ 0.00000000e+00, 2.00000000e+00, 4.00000000e+00]), Tensor(shape=[4], dtype=Float32, value= [ 2.00000000e+00, 3.33333349e+00, 4.66666698e+00, 6.00000000e+00])) """ count, bin_edges = histogramdd((x, y), bins=bins, range=range, weights=weights, density=density) return count, bin_edges[0], bin_edges[1]
8b537168cb7248ccd2959c95ae4fb742b81aa225
28,741
def get_project_arg_details(): """ **get_project_arg_details** obtains project details from arguments and then returns them :return: """ project_id = request.args.get('project_id') names = request.args.get('names') cell = request.args.get('cell') email = request.args.get('email') website = request.args.get('website') facebook = request.args.get('facebook') twitter = request.args.get('twitter') company = request.args.get('company') freelancing = request.args.get('freelancing') project_type = request.args.get('project-type') project_title = request.args.get('project-title') project_description = request.args.get('project-description') estimated_budget = request.args.get('estimated-budget') start_date = request.args.get('start-date') project_status = request.args.get('project-status') return (cell, company, email, facebook, freelancing, names, project_description, project_id, project_status, project_title, project_type, start_date, twitter, website)
5efcaebf0efe89a5d8fa5f52d50777041b545177
28,742
def vibronic_ls(x, s, sigma, gamma, e_vib, kt=0, n_max=None, m_max=None): """ Produce a vibronic (Frank-Condom) lineshape. The vibronic transition amplitude computed relative to 0 (ie: relative to the electronic transition energy). Lines are broadened using a voigt profile. Parameters ---------- x : np.ndarray Energy values. x==0 is the 0->0 line (no vibrational quanta change) s : float Huang-Rhys parameter S e_vib : float Energy of a vibrational quanta sigma : float Width (1/e^2) of gaussian component gamma : float Width of Lorententzian component kt : float Thermal energy. If >0, will compute transitions from vibrationally excited states. Default 0. n_max : int Largest vibrational number in final manifold. If not supplied, a guess is provided, but may not be adequate. m_max : int Largest vibrational number in orginal manifold. If not supplied, a guess is provided, but may not be adequate. """ #determine n, m, values if m_max is None: m_max = 0 if kt==0 else int(kt/e_vib*10) # found that factor with my thumb if n_max is None: n_max = m_max + int(10*s) n = np.arange(n_max+1) m = np.arange(m_max+1) fcf = vibronic_intensity(m, n, s, e_vib, kt) n, m = np.meshgrid(n, m) dvib = n-m y = np.zeros_like(x) for d, f in zip(dvib.flatten(), fcf.flatten()): y += voigt(x, f, d*e_vib, sigma, gamma) return y
428f0c44566cf3a824902fc9f7fb8012089d1b89
28,743
import re import requests def handleFunction(command,func): """ Function to calculate, Translate """ try: # re.search(r"(?i)"+func,' '.join(SET_OF_FUNCTIONS)) if("calculate" == func.lower()): func,command = command.split() try: return eval(command) except: return "Sorry! We are unable to calculate this expression." elif("translate" == func.lower()): command = re.split(r'\s',command) isoLan = findISO(command[len(command)-1]) if isoLan == None: translation = "Sorry! we are unable to translate into this language" return translation translator= Translator(to_lang=isoLan) translation = translator.translate(' '.join(command[1:len(command)-2])) return translation elif("temperature" == func.lower() or "weather" == func.lower()): command = re.split(r'\s',command) cityName = (command[len(command)-1]).capitalize() temp = getTemp(cityName) if temp: temp_in_celcius = "It is "+str(round(temp[0]-273,2))+" C, "+temp[1] return temp_in_celcius return "Sorry we are unable to calculate temperature at this moment. Please try after sometime." elif re.search(r"(.)* ?horoscope ?(.)*",command,re.I): for sign in ZODIAC_SIGNS: if re.search(r'\b'+sign+r'\b',command,re.I): zodiac_sign = re.search(r'\b'+sign+r'\b',command,re.I).group(0) API_response = requests.get(url = "http://horoscope-api.herokuapp.com/horoscope/today/"+zodiac_sign) return API_response.json()['horoscope'] return "Please choose appropriate zodiac sign" else: return None except: return None
c5ff05b0b31a7441f7efaf9ce76c496f3f708eea
28,744
import json def auth(): """returns worker_id !!!currently!!! does not have auth logic""" response_body = {} status_code = 200 try: auth_token = request.args.get("auth_token", None) resp = fl_events_auth({"auth_token": auth_token}, None) resp = json.loads(resp)["data"] except Exception as e: status_code = 401 resp = {"error_auth_failed": e} return Response(json.dumps(resp), status=status_code, mimetype="application/json")
bbbeb0dbf7401b11e56399890f43a799f859eb87
28,745
def get_templates_environment(templates_dir): """Create and return a Jinja environment to deal with the templates.""" env = Environment( loader=PackageLoader('charmcraft', 'templates/{}'.format(templates_dir)), autoescape=False, # no need to escape things here :-) keep_trailing_newline=True, # they're not text files if they don't end in newline! optimized=False, # optimization doesn't make sense for one-offs undefined=StrictUndefined) # fail on undefined return env
9f3571ce4cb8f18f64912e6c259bc2f1022698f2
28,747
import numpy as np def return_U_given_sinusoidal_u1(i,t,X,u1,**kwargs): """ Takes in current step (i), numpy.ndarray of time (t) of shape (N,), state numpy.ndarray (X) of shape (8,), and previous input scalar u1 and returns the input U (shape (2,)) for this time step. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ **kwargs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1) Bounds - must be a (2,2) list with each row in ascending order. Default is given by Activation_Bounds. """ assert (np.shape(t) == (len(t),)) and (str(type(t)) == "<class 'numpy.ndarray'>"),\ "t must be a numpy.ndarray of shape (len(t),)." assert np.shape(X) == (8,) and str(type(X)) == "<class 'numpy.ndarray'>", "X must be a (8,) numpy.ndarray" assert str(type(u1)) in ["<class 'int'>","<class 'float'>","<class 'numpy.float64'>"], \ "u1 must be an int or a float." Bounds = kwargs.get("Bounds",Activation_Bounds) assert type(Bounds) == list and np.shape(Bounds) == (2,2), "Bounds for Muscle Activation Control must be a (2,2) list." assert Bounds[0][0]<Bounds[0][1],"Each set of bounds must be in ascending order." assert Bounds[1][0]<Bounds[1][1],"Each set of bounds must be in ascending order." Coefficient1,Coefficient2,Constraint1 = return_constraint_variables(t[i],X) assert Coefficient1!=0 and Coefficient2!=0, "Error with Coefficients. Shouldn't both be zero" if Constraint1 < 0: assert not(Coefficient1 > 0 and Coefficient2 > 0), "Infeasible activations. (Constraint1 < 0, Coefficient1 > 0, Coefficient2 > 0)" if Constraint1 > 0: assert not(Coefficient1 < 0 and Coefficient2 < 0), "Infeasible activations. (Constraint1 > 0, Coefficient1 < 0, Coefficient2 < 0)" u2 = (Constraint1 - Coefficient1*u1)/Coefficient2 NextU = np.array([u1,u2]) assert (Bounds[0][0]<=u1<=Bounds[0][1]) and (Bounds[1][0]<=u2<=Bounds[1][1]), "Error! Choice of u1 results in infeasible activation along backstepping constraint." return(NextU)
b5faba122af139f29f20dbce983b84fe5c0c277c
28,748
def verify(s): """ Check if the cube definition string s represents a solvable cube. @param s is the cube definition string , see {@link Facelet} @return 0: Cube is solvable<br> -1: There is not exactly one facelet of each colour<br> -2: Not all 12 edges exist exactly once<br> -3: Flip error: One edge has to be flipped<br> -4: Not all 8 corners exist exactly once<br> -5: Twist error: One corner has to be twisted<br> -6: Parity error: Two corners or two edges have to be exchanged """ count = [0] * 6 # new int[6] try: for i in range(54): assert s[i] in colors count[colors[s[i]]] += 1 except: return -1 for i in range(6): if count[i] != 9: return -1 fc = FaceCube(s) cc = fc.toCubieCube() return cc.verify()
d3e765af153a7400d84e59c72d292a9ccd9170f5
28,749
from typing import cast import copy def copy_jsons(o: JSONs) -> MutableJSONs: """ Make a new, mutable copy of a JSON array. >>> a = [{'a': [1, 2]}, {'b': 3}] >>> b = copy_jsons(a) >>> b[0]['a'].append(3) >>> b [{'a': [1, 2, 3]}, {'b': 3}] >>> a [{'a': [1, 2]}, {'b': 3}] """ return cast(MutableJSONs, copy.deepcopy(o))
c9fffefe0dd541e20a7a3bef503e0b1af847909d
28,750
def string_to_dot(typed_value): # type: (TypedValue) -> Tuple[List[str], List[str]] """Serialize a String object to Graphviz format.""" string = f'{typed_value.value}'.replace('"', r'\"') dot = f'_{typed_value.name} [shape="record", color="#A0A0A0", label="{{{{String | {string}}}}}"]' return [dot], []
287d2c886aca5ca940b323b751af91e33ed54fc4
28,751
def resolve_country_subdivisions(_, info, alpha_2): """ Country resolver :param info: QraphQL request context :param alpha_2: ISO 3166 alpha2 code :param code: ISO 3166-2 code """ return CountrySubdivision.list_for_country(country_code=alpha_2)
56ffa7343f1da686819c85dee54770cd4d1564d3
28,752
from typing import Tuple def bool2bson(val: bool) -> Tuple[bytes, bytes]: """Encode bool as BSON Boolean.""" assert isinstance(val, bool) return BSON_BOOLEAN, ONE if val else ZERO
3d4456f6db88939966997b8a49c3d766b1ef4ba1
28,753
def isbn13_to_isbn10 (isbn_str, cleanse=True): """ Convert an ISBN-13 to an ISBN-10. :Parameters: isbn_str : string The ISBN as a string, e.g. " 0-940016-73-6 ". It should be 13 digits after normalisation. cleanse : boolean If true, formatting will be stripped from the ISBN before conversion. :Returns: A normalaised ISBN-10, e.g. "0940016736", or ``None`` if no conversion is possible. For example:: >>> isbn13_to_isbn10 ("978-0-940016-73-6") '0940016737' >>> isbn13_to_isbn10 ("9780940016736", cleanse=False) '0940016737' >>> isbn13_to_isbn10 ("979-1-234-56789-6") >>> isbn13_to_isbn10 ("978-3-8055-7505-8") '380557505X' >>> isbn13_to_isbn10 ("978-0-940016-73-6", cleanse=False) Traceback (most recent call last): ... AssertionError: input '978-0-940016-73-6' is not 13 digits """ ## Preconditions: if (cleanse): isbn_str = clean_isbn (isbn_str) assert (len (isbn_str) == 13), "input '%s' is not 13 digits" % isbn_str if (not isbn_str.startswith ('978')): return None ## Main: isbn_str = isbn_str[3:-1] isbn_str += isbn10_checksum (isbn_str) ## Return: assert (len (isbn_str) == 10), "output ISBN-10 is '%s'" % isbn_str return isbn_str
b39d6d9f7a850a8b0edbb6b9502f4d6bb73f8848
28,754
import json def import_data(): """Import datasets to internal memory""" with open('data/names.json') as f: data_names = json.load(f) with open('data/issues.json') as f: data_issues = json.load(f) with open('data/disasters.json') as f: data_disasters = json.load(f) with open('data/options.json') as f: data_options = json.load(f) return data_names, data_issues, data_disasters, data_options
11db10c2c56b6b714ecffa57510c9a79abfa1d86
28,755
def synthesize_ntf_dunn(order=3, osr=64, H_inf=1.5): """ Alias of :func:`ntf_dunn` .. deprecated:: 0.11.0 Function has been moved to the :mod:`NTFdesign` module with name :func:`ntf_dunn`. """ warn("Function superseded by ntf_dunn in " "NTFdesign module", PyDsmDeprecationWarning) return ntf_dunn(order, osr, H_inf)
2920ec676ab070ebb5f7c95e245baf078b723fae
28,756
def schedule_notification() -> str: """Randomly select either news or covid stats to add to the notifcation column, if there is already a 'news' item in the notificaitons column then it will update the item with a newer piece of news""" #NEWS news_title, news_content = get_news() notif_exists = False for notif in notifications: if notif["title"] == news_title: notif_exists = True break if not notif_exists: if len(notifications) <6: notifications.insert(0,{"title":news_title, "content":news_content}) #COVID NEWS covid_news_title, covid_news_content = get_covid_news() notif_covid_exists = False for notif in notifications: if notif["title"] == "COVID-19 Statistics": notif_covid_exists = True break if not notif_covid_exists: notifications.insert(0,{"title":covid_news_title, "content":covid_news_content}) #WEATHER weather_notif_exists = False notif_content = get_forecast("Exeter") for notif in notifications: if notif["title"] == "Weather": weather_notif_exists = True notif["content"] = notif_content break if not weather_notif_exists: notifications.insert(0,{"title":"Weather", "content":notif_content}) return "Passed"
9aed44251170dc124f71b11bf482ef009ebe973e
28,757
def _parse_hexblob(blob: str) -> bytes: """ Binary conversions from hexstring are handled by bytes(hstr2bin()). :param blob: :return: """ return bytes(hstr2bin(blob))
e49348f7cb15bbba850dbf05c0a3625427d0ac2d
28,758
from typing import List from typing import Dict def _row_to_col_index_dict(headers: List[Cell]) -> Dict[str, int]: """Calculate a mapping of cell contents to column index. Returns: dict[str, int]: {MFP nutrient name: worksheet column index} mapping. int: N """ return {h.value: h.col - 1 for h in headers}
11f7a68cd211b216d2a27850be99291cc830d52f
28,759
def preprocess_cat_cols(X_train, y_train, cat_cols=[], X_test=None, one_hot_max_size=1, learning_task=LearningTask.CLASSIFICATION): """Preprocess categorial columns(cat_cols) in X_train and X_test(if specified) with cat-counting(the same as in catboost) or with one-hot-encoding, depends on number of unique labels(one_hot_max_size) Args: X_train (numpy.ndarray): train dataset y_train (numpy.ndarray): train labels cat_cols (list of columns indices): categorical columns X_test (None or numpy.ndarray): test dataset one_hot_max_size(int): max unique labels for one-hot-encoding learning_task (LearningTask): a type of learning task Returns: numpy.ndarray(, numpy.ndarray): transformed train and test datasets or only train, depends on X_test is None or not """ one_hot_cols = [col for col in cat_cols if len(np.unique(X_train[:, col])) <= one_hot_max_size] cat_count_cols = list(set(cat_cols) - set(one_hot_cols)) preprocess_counter_cols(X_train, y_train, cat_count_cols, X_test, learning_task=learning_task) X_train, X_test = preprocess_one_hot_cols(X_train, one_hot_cols, X_test) if X_test is None: return X_train else: return X_train, X_test
10402fe0fd534eb73598fa99a1202b970202f2c0
28,760
def get_start_time(period, time_zone=None): """Doc.""" today = pd.Timestamp.today(tz=time_zone or 'Europe/Stockholm') if period == 'thisyear': return pd.Timestamp(f'{today.year}0101').strftime('%Y-%m-%d %H:%M:%S') elif period in DAYS_MAPPER: return (today - pd.Timedelta(days=DAYS_MAPPER.get(period)) ).strftime('%Y-%m-%d %H:%M:%S') else: # Return according to "day". return (today - pd.Timedelta(days=1)).strftime('%Y-%m-%d %H:%M:%S')
c5e9ab4543f813f7210bc278e83d9c4a554d242b
28,761
def setup_textbox(parent, font="monospace", width=70, height=12): """Setup for the textboxes, including scrollbars and Text widget.""" hsrl = ttk.Scrollbar(parent, orient="horizontal") hsrl.pack(side=tk.BOTTOM, fill=tk.X) vsrl = ttk.Scrollbar(parent) vsrl.pack(side=tk.RIGHT, fill=tk.Y) textbox = tk.Text(parent, xscrollcommand=hsrl.set, yscrollcommand=vsrl.set, font=font, width=width, height=height, wrap=tk.NONE) textbox.bind("<Tab>", focus_next_widget) textbox.pack(side="top", fill="both", expand=True) hsrl.config(command=textbox.xview) vsrl.config(command=textbox.yview) return textbox
674bc72eefacc16485a4b369535f1253187e5ded
28,762
def generate_mock_statuses(naive_dt=True, datetime_fixtures=None): """ A dict of statuses keyed to their id. Useful for mocking an API response. These are useful in``Timeline`` class testing. May be set to have a utc timezone with a ``False`` value for the ``naive_dt`` argument. """ mock_statuses = list() if datetime_fixtures is None: datetime_fixtures = generate_datetime_fixtures() for dt in datetime_fixtures: identifier = len(mock_statuses) + 1 mock_status_text = 'Content for tweet mock status {0}'.format(identifier) mock_status = generate_mock_status(identifier, mock_status_text) mock_status.created_at = dt mock_statuses.append(mock_status) return mock_statuses
aca7bd235ef6fd404f8da894b6917636d7895dcb
28,763
import hashlib def hashlib_mapper(algo): """ :param algo: string :return: hashlib library for specified algorithm algorithms available in python3 but not in python2: sha3_224 sha3_256, sha3_384, blake2b, blake2s, sha3_512, shake_256, shake_128 """ algo = algo.lower() if algo == "md5": return hashlib.md5() elif algo == "sha1": return hashlib.sha1() elif algo == "sha224": return hashlib.sha224() elif algo == "sha256": return hashlib.sha256() elif algo == "sha384": return hashlib.sha384() elif algo == "sha3_224": return hashlib.sha3_224() elif algo == "sha3_256": return hashlib.sha3_256() elif algo == "sha3_384": return hashlib.sha3_384() elif algo == "sha3_512": return hashlib.sha3_512() elif algo == "sha512": return hashlib.sha512() elif algo == "blake2b": return hashlib.blake2b() elif algo == "blake2s": return hashlib.blake2s() elif algo == "shake_128": return hashlib.shake_128() elif algo == "shake_256": return hashlib.shake_256() else: raise Exception("Unsupported hashing algorithm: %s" % algo)
56830caccd0b3f88982bfe09a8789002af99c1e7
28,765
def partition_cells(config, cells, edges): """ Partition a set of cells - cells -- A DataFrame of cells - edges -- a list of edge times delimiting boundaries between cells Returns a DataFrame of combined cells, with times and widths adjusted to account for missing cells """ # get indices of cell indexes just beyond each edge time ii = np.searchsorted(cells.t, edges) # Get the appropriate boundary times to apply to combined cells # this is complicated by missing cells, need to put boundary in gaps if ncessary ileft = ii[:-1] cleft = cells.iloc[ileft ] tleft = (cleft.t - cleft.tw/2).values iright = ii[1:]-1 cright = cells.iloc[iright ] tright = (cright.t+cright.tw/2).values betweens = 0.5*(tleft[1:] + tright[:-1]) tboundary = np.append(np.insert(betweens, 0, tleft[0]), tright[-1]) # now combine the cells, newcells = [] for k in range(len(ii)-1): a,b = ii[k:k+2] check = cells.iloc[a:b] subset = check[~pd.isna(check.n)] # ca, cb = subset.iloc[0], subset.iloc[-1] # newcell = dict(t= 0.5*(ca.t-ca.tw/2 + cb.t+cb.tw/2) ) tl, tr = tboundary[k:k+2] newcell = dict(t=0.5*(tl+tr), tw=tr-tl) for col in 'e n S B'.split(): newcell[col] = subset[col].sum() newcell['e'] /= len(subset) newcell['w'] = np.concatenate(list(subset.w.values)) #np.array(w, np.uint8) newcells.append(newcell) return pd.DataFrame(newcells)
c8532cbf148802b482380f8978dbc8d9d3b1b35f
28,766
from typing import List from typing import Union def timing_stats(results: List[Result]) -> List[str]: """Calculate and format lines with timings across completed results.""" def percentile(data: List[float], percent: int) -> Union[float, str]: if not data: return '-' data_sorted = sorted(data) pos = max(int(round(percent / 100 * len(data) + 0.5)), 2) return data_sorted[pos - 2] def format_line(name: str, *values: Union[float, int, str]) -> str: line = f'{name:<10s}' for value in values: if isinstance(value, float): line += f' {value:6.0f}' else: line += f' {value:>6}' return line total_times = [r.total_time * 1000 for r in results if r.total_time] ttfb_times = [r.ttfb_time * 1000 for r in results if r.ttfb_time] conn_times = [r.conn_time * 1000 for r in results if r.conn_time] percentiles = (50, 80, 95, 99) lines = [ format_line( '', 'Mean', 'Min', *(f'{p}%' for p in percentiles), 'Max', ), format_line( 'Connect:', mean(conn_times) if conn_times else '-', min(conn_times) if conn_times else '-', *(percentile(conn_times, p) for p in percentiles), max(conn_times) if conn_times else '-', ), format_line( 'TTFB:', mean(ttfb_times) if ttfb_times else '-', min(ttfb_times) if ttfb_times else '-', *(percentile(ttfb_times, p) for p in percentiles), max(ttfb_times) if ttfb_times else '-', ), format_line( 'Total:', mean(total_times) if total_times else '-', min(total_times) if total_times else '-', *(percentile(total_times, p) for p in percentiles), max(total_times) if total_times else '-', ), ] return lines
08d671b2866674924dc070dda2e7e85a4c56c064
28,767
import logging def analyse_gamma( snps_object, output_summary_filename, output_logger, SWEEPS, TUNE, CHAINS, CORES, N_1kG, fix_intercept=False, ): """ Bayesian hierarchical regression on the dataset with the gamma model. :param snps_object: snps instance :param output_summary_filename: output summary table :param output_logger: logger instance :param SWEEPS: samples for each chain :param TUNE: burn-in samples :param CHAINS: number of chains :param CORES: number of cores :param N1kG: number of SNPs :param fix_intercept: if True the model fixes the intercept. """ snp_dataset = snps_object.table.copy().reset_index(drop=True) n_patients = snps_object.n_patients nSNP = snps_object.n_snps # to run the regression as a mixed effect model, I need a vector (cat) to assign each SNP to its gene idx = 0 cat = np.zeros(nSNP) Mg = [] genes = [] # g2 are all the SNPs inside the gene for k2, g2 in snp_dataset.groupby("gene"): cat[g2.index] = int(idx) idx += 1 genes.append(k2) Mg.append(len(g2)) cat = cat.astype(int) logging.info("Model Evaluation Started") logging.info("Average stats: %f" % np.mean(snps_object.table["stats"].values)) with pm.Model() as model: e = pm.Normal("e", mu=1, sd=0.001) mi = pm.Beta("mi", 1, 1) beta = pm.Gamma("beta", alpha=mi, beta=N_1kG, shape=idx) diff = pm.Deterministic("diff", subtract(beta, mi / N_1kG)) herTOT = pm.Deterministic("herTOT", tt.sum(beta * Mg)) if fix_intercept: fixed_variable = pm.Normal( "fxd", mu=(n_patients) * beta[cat] * (snps_object.table["l"]) + 1, sd=np.sqrt(np.asarray(snps_object.table["l"])), observed=snps_object.table["stats"], ) else: fixed_variable = pm.Normal( "fxd", mu=(n_patients) * beta[cat] * (snps_object.table["l"]) + e, sd=np.sqrt(np.asarray(snps_object.table["l"])), observed=snps_object.table["stats"], ) # step = pm.Metropolis() trace = pm.sample( SWEEPS, tune=TUNE, chains=CHAINS, cores=CORES, nuts_kwargs=dict(target_accept=0.90), ) if CHAINS > 1: logging.info("evaluating Gelman-Rubin") GR = pm.diagnostics.gelman_rubin(trace, varnames=["mi"]) output_logger.info( "DIAGNOSTIC (gelman-rubin) " + str(GR) + "\n" + "(If this number is >> 1 the method has some convergence problem, \n try increasing the number of s and b)" ) logging.info("Writing output") # save general stats to summary file su = pm.summary( trace, varnames=["mi", "herTOT", "e"], extend=True, stat_funcs=[trace_median, trace_quantiles], ) su.to_csv(output_summary_filename, sep=",", mode="w") d = {} d["beta"] = N_1kG * trace["beta"] e_GW = np.mean(trace["e"]) e_GW_sd = np.std(trace["e"]) output_logger.info(" Intercept: " + str(e_GW) + " (sd= " + str(e_GW_sd) + ")\n") herTOT = np.median(trace["herTOT"]) herTOT_sd = np.std(trace["herTOT"]) output_logger.info( " heritability from genes: " + str(herTOT) + " (sd= " + str(herTOT_sd) + ")\n" ) mi_mean = np.mean(trace["mi"], axis=0) mi_median = np.median(trace["mi"], axis=0) mi_std = np.std(trace["mi"], axis=0) mi_5perc = np.percentile(trace["mi"], 5, axis=0) mi_95perc = np.percentile(trace["mi"], 95, axis=0) output_logger.info( " Heritability: " + str(mi_mean) + " (std= " + str(mi_std) + ")\n" + "[ 5th perc= " + str(mi_5perc) + "," + " 95 perc= " + str(mi_95perc) + "]\n" ) Prob = (np.sum(trace["diff"] > 0, axis=0) / len(trace["diff"]))[:, np.newaxis] data = np.hstack((np.asarray(genes)[:, np.newaxis], Prob)) df = pd.DataFrame(data, columns=("name", "P")) df["bg_mean"] = np.mean(d["beta"], axis=0)[:, np.newaxis] df["bg_median"] = np.median(d["beta"], axis=0)[:, np.newaxis] df["bg_var"] = np.var(d["beta"], axis=0)[:, np.newaxis] df["bg_5perc"] = np.percentile(d["beta"], 5, axis=0)[:, np.newaxis] df["bg_95perc"] = np.percentile(d["beta"], 95, axis=0)[:, np.newaxis] df["mi_mean"] = mi_mean df["mi_median"] = mi_median return df
fac6111e4ad87d63d89d2942e5cfc28023950117
28,768
def dpc_variant_to_string(variant: _DV) -> str: """Convert a Basix DPCVariant enum to a string. Args: variant: The DPC variant Returns: The DPC variant as a string. """ return variant.name
2eb7eeff47eb36bea47714b9e233f3d286925d3b
28,769
import secrets from datetime import datetime async def refresh_token(request: web.Request) -> web.Response: """ Refresh Token endpoints """ try: content = await request.json() if "token" not in content: return web.json_response({"error": "Wrong data. Provide token."}, status=400) except Exception: return web.json_response({"error": "Wrong data. Provide token."}, status=400) Session = sessionmaker(bind=request.app["db_engine"]) s = Session() r = s.query(Token).filter(Token.token == content["token"]).first() if r is not None: token = secrets.token_hex(20) now = datetime.now() r.token = token r.expire = now + timedelta(days=1) r.updated_at = now s.commit() s.close() return web.json_response({"token": token}) else: s.close() return web.json_response({"error": "Token not found. Provide correct token."}, status=400)
a8008a33793ccb7b34900724b62fb3add061fa30
28,770
def get_my_choices_projects(): """ Retrieves all projects in the system for the project management page """ proj_list = Project.objects.all() proj_tuple = [] counter = 1 for proj in proj_list: proj_tuple.append((counter, proj)) counter = counter + 1 return proj_tuple
f35563adb12aff32ac1b60152b3085c63dc839f0
28,771
import math def normalDistributionBand(collection, band, mean=None, std=None, name='normal_distribution'): """ Compute a normal distribution using a specified band, over an ImageCollection. For more see: https://en.wikipedia.org/wiki/Normal_distribution :param band: the name of the property to use :type band: str :param mean: the mean value. If None it will be computed from the source. defaults to None. :type mean: float :param std: the standard deviation value. If None it will be computed from the source. Defaults to None. :type std: float """ if mean is None: imean = ee.Image(collection.mean()) else: imean = ee.Image.constant(mean) if std is None: istd = ee.Image(collection.reduce(ee.Reducer.stdDev())) else: istd = ee.Image.constant(std) ipi = ee.Image.constant(math.pi) imax = ee.Image(1) \ .divide(istd.multiply(ee.Image.constant(2).multiply(ipi).sqrt())) return gaussFunctionBand(collection, band, mean=imean, output_max=imax, std=istd, name=name)
57b0d6beb590126253c4934e403487bd69c7c094
28,773
import torch def compute_ctrness_targets(reg_targets): """ :param reg_targets: :return: """ if len(reg_targets) == 0: return reg_targets.new_zeros(len(reg_targets)) left_right = reg_targets[:, [0, 2]] top_bottom = reg_targets[:, [1, 3]] ctrness = (left_right.min(dim=-1)[0] / left_right.max(dim=-1)[0]) * \ (top_bottom.min(dim=-1)[0] / top_bottom.max(dim=-1)[0]) return torch.sqrt(ctrness)
538a63b6adcd73fbd601d6e61eea5f27642746fa
28,774
import hashlib import hmac import base64 def create_hmac_signature(key:bytes, data_to_sign:str, hashmech:hashlib=hashlib.sha256) -> str: """ Creates an HMAC signature for the provided data string @param key: HMAC key as bytes @param data_to_sign: The data that needs to be signed @param hashmech: The hashing mechanism to use, defaults to sha256 @return: Base64 encoded signature """ sig = hmac.new(key, data_to_sign.encode("utf-8"), hashmech).digest() return base64.b64encode(sig).decode("utf-8")
0c3f5b8bef6e3330e8c24fca62ce2707b0de5286
28,775
def CV_range( bit_depth: Integer = 10, is_legal: Boolean = False, is_int: Boolean = False ) -> NDArray: """ Returns the code value :math:`CV` range for given bit depth, range legality and representation. Parameters ---------- bit_depth Bit depth of the code value :math:`CV` range. is_legal Whether the code value :math:`CV` range is legal. is_int Whether the code value :math:`CV` range represents integer code values. Returns ------- :class:`numpy.ndarray` Code value :math:`CV` range. Examples -------- >>> CV_range(8, True, True) array([ 16, 235]) >>> CV_range(8, True, False) # doctest: +ELLIPSIS array([ 0.0627451..., 0.9215686...]) >>> CV_range(10, False, False) array([ 0., 1.]) """ if is_legal: ranges = np.array([16, 235]) ranges *= 2 ** (bit_depth - 8) else: ranges = np.array([0, 2 ** bit_depth - 1]) if not is_int: ranges = as_float_array(ranges) / (2 ** bit_depth - 1) return ranges
e1eb079e4e75cb7b8353d88e13bb7eb82d15428c
28,776
import torch def bw_transform(x): """Transform rgb separated balls to a single color_channel.""" x = x.sum(2) x = torch.clamp(x, 0, 1) x = torch.unsqueeze(x, 2) return x
3ecec3ada4b75486ff96c30890e8a3e173ca7d31
28,777
def fom(A, b, x0=None, maxiter=None, residuals=None, errs=None): """Full orthogonalization method Parameters ---------- A : {array, matrix, sparse matrix, LinearOperator} n x n, linear system to solve b : {array, matrix} right hand side, shape is (n,) or (n,1) x0 : {array, matrix} initial guess, default is a vector of zeros maxiter : int maximum number of allowed iterations residuals : list residuals has the residual norm history, including the initial residual, appended to it errs : list of errors returned through (Ax,x), so test the errors on Ax=0 """ n = len(b) if maxiter is None: maxiter = n if x0 is None: x = np.ones((n,)) else: x = x0.copy() r = b - A * x beta = np.linalg.norm(r) if residuals is not None: residuals[:] = [beta] # initial residual if errs is not None: errs[:] = [np.sqrt(np.dot(A * x, x))] V = np.zeros((n, maxiter)) H = np.zeros((maxiter, maxiter)) V[:, 0] = (1 / beta) * r for j in range(0, maxiter): w = A * V[:, j] for i in range(0, j + 1): H[i, j] = np.dot(w, V[:, i]) w += -H[i, j] * V[:, i] newh = np.linalg.norm(w) if abs(newh) < 1e-13: break elif j < (maxiter - 1): H[j + 1, j] = newh V[:, j + 1] = (1 / newh) * w # do some work to check the residual # if residuals is not None: e1 = np.zeros((j + 1, 1)) e1[0] = beta y = np.linalg.solve(H[0:j + 1, 0:j + 1], e1) z = np.dot(V[:, 0:j + 1], y) x = x0 + z.ravel() residuals.append(abs(newh * y[j])) if errs is not None: e1 = np.zeros((j + 1, 1)) e1[0] = beta y = np.linalg.solve(H[0:j + 1, 0:j + 1], e1) z = np.dot(V[:, 0:j + 1], y) x = x0 + z.ravel() errs.append(np.sqrt(np.dot(A * x, x))) e1 = np.zeros((j + 1, 1)) e1[0] = beta y = np.linalg.solve(H[0:j + 1, 0:j + 1], e1) z = np.dot(V[:, 0:j + 1], y) x = x0 + z.ravel() return (x, newh)
b95ac8b383150e57ffd599fb2e77608dd7503d9d
28,778
def get_gprMax_materials(fname): """ Returns the soil permittivities. Fname is an .in file. """ materials = {'pec': 1.0, # Not defined, usually taken as 1. 'free_space': 1.000536} for mat in get_lines(fname, 'material'): props = mat.split() materials[props[-1]] = float(props[0]) return materials
f56e720c5c2209b67ca521b779ce9472665beb6a
28,779
import random def generate_utt_pairs(librispeech_md_file, utt_pairs, n_src): """Generate pairs of utterances for the mixtures.""" # Create a dict of speakers utt_dict = {} # Maps from speaker ID to list of all utterance indices in the metadata file speakers = list(librispeech_md_file["speaker_ID"].unique()) for speaker in speakers: utt_indices = librispeech_md_file.index[librispeech_md_file["speaker_ID"] == speaker] utt_dict[speaker] = list(utt_indices) while len(speakers) >= n_src: # Select random speakers selected = random.sample(speakers, n_src) # Select random utterance from each speaker utt_list = [] for speaker in selected: utt = random.choice(utt_dict[speaker]) utt_list.append(utt) utt_dict[speaker].remove(utt) if not utt_dict[speaker]: # no more utts for this speaker speakers.remove(speaker) utt_pairs.append(utt_list) return utt_pairs
9079fa35b961de053c86b08527085e8eb84609b8
28,780
def simpson(so, spl: str, attr: str, *, local=True, key_added=None, graph_key='knn', inplace=True) -> None: """Computes the Simpson Index on the observation or the sample level Args: so: SpatialOmics instance spl: Spl for which to compute the metric attr: Categorical feature in SpatialOmics.obs to use for the grouping local: Whether to compute the metric on the observation or the sample level key_added: Key added to either obs or spl depending on the choice of `local` graph_key: Specifies the graph representation to use in so.G[spl] if `local=True`. inplace: Whether to add the metric to the current SpatialOmics instance or to return a new one. Returns: """ if key_added is None: key_added = 'simpson' key_added = f'{key_added}_{attr}' if local: key_added += f'_{graph_key}' metric = _simpson kwargs_metric = {} return _compute_metric(so=so, spl=spl, attr=attr, key_added=key_added, graph_key=graph_key, metric=metric, kwargs_metric=kwargs_metric, local=local, inplace=inplace)
d10fd40305f384d75f8c33d391a87f6b5c8adcd5
28,781