content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
from typing import List from typing import Union from typing import Optional import importlib def search_import( method: str, modules: List[Union[str, ModuleType]] ) -> Optional[object]: """ Method has to check if any of `modules` contains `callable` object with name `method_name` and return list of such objects args: method - name of object (method) we wanna find modules - where to search. Given by str or Module object. return: first found object (method) """ for module in modules: try: if isinstance(module, ModuleType): mod = module elif isinstance(module, str): # get module by string name mod = importlib.import_module(module) else: raise TypeError('Must be list of strings or ModuleType') # get method from module by string name met = getattr(mod, method, None) if met: return met except ImportError: # import_module can fail continue return None
14cd9d6c02081915a1de394b041d80c7b6cc518b
35,606
def retrieve_context_connectivity_service_end_point_capacity_total_size_total_size(uuid, local_id): # noqa: E501 """Retrieve total-size Retrieve operation of resource: total-size # noqa: E501 :param uuid: ID of uuid :type uuid: str :param local_id: ID of local_id :type local_id: str :rtype: CapacityValue """ return 'do some magic!'
7b154007da10064cfd6ed72e4fa6c883697955df
35,607
def identity(n): """ Creates a n x n identity matrix. """ I = zeroes(n, n) for i in range(n): I.g[i][i] = 1.0 return I
3c59a042f91dfe8778a9676436a26d14b8db1ed9
35,608
def mnist_test_labels_file(): """ Train images of MNSIT. :return: filepath :rtype: str """ return data_file('mnist/test_labels', HDF5_EXT)
48bd7426fd89f4f2f8319c7c3cba4a62ab33813d
35,610
def is_eval_epoch(cur_epoch): """Determines if the model should be evaluated at the current epoch.""" return cfg.TRAIN.EVAL_PERIOD != -1 and ( (cur_epoch + 1) % cfg.TRAIN.EVAL_PERIOD == 0 or (cur_epoch + 1) == cfg.OPTIM.MAX_EPOCH )
41f5375aab3147371eaf7862dcc5b2919e47f46a
35,611
from datetime import datetime def utc_now_to_file_str(): """ Format UTC now to _YYYYmmdd_HHMMSS :return: """ return datetime.datetime.strftime(datetime.datetime.utcnow(), '_%Y%m%d_%H%M%S')
3f41612f871d6a5bd2b55156e9a418e5baee52cb
35,612
from typing import Union from typing import cast def intersect1d( pda1: groupable, pda2: groupable, assume_unique: bool = False ) -> Union[pdarray, groupable]: """ Find the intersection of two arrays. Return the sorted, unique values that are in both of the input arrays. Parameters ---------- pda1 : pdarray/Sequence[pdarray, Strings, Categorical] Input array/Sequence of groupable objects pda2 : pdarray/List Input array/sequence of groupable objects assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- pdarray/groupable Sorted 1D array/List of sorted pdarrays of common and unique elements. Raises ------ TypeError Raised if either pda1 or pda2 is not a pdarray RuntimeError Raised if the dtype of either pdarray is not supported See Also -------- unique, union1d Notes ----- ak.intersect1d is not supported for bool or float64 pdarrays Examples -------- # 1D Example >>> ak.intersect1d([1, 3, 4, 3], [3, 1, 2, 1]) array([1, 3]) # Multi-Array Example >>> a = ak.arange(5) >>> b = ak.array([1, 5, 3, 4, 2]) >>> c = ak.array([1, 4, 3, 2, 5]) >>> d = ak.array([1, 2, 3, 5, 4]) >>> multia = [a, a, a] >>> multib = [b, c, d] >>> ak.intersect1d(multia, multib) [array([1, 3]), array([1, 3]), array([1, 3])] """ if isinstance(pda1, pdarray) and isinstance(pda2, pdarray): if pda1.size == 0: return pda1 # nothing in the intersection if pda2.size == 0: return pda2 # nothing in the intersection if (pda1.dtype == int and pda2.dtype == int) or ( pda1.dtype == akuint64 and pda2.dtype == akuint64 ): repMsg = generic_msg( cmd="intersect1d", args="{} {} {}".format(pda1.name, pda2.name, assume_unique) ) return create_pdarray(cast(str, repMsg)) if not assume_unique: pda1 = cast(pdarray, unique(pda1)) pda2 = cast(pdarray, unique(pda2)) aux = concatenate((pda1, pda2), ordered=False) aux_sort_indices = argsort(aux) aux = aux[aux_sort_indices] mask = aux[1:] == aux[:-1] int1d = aux[:-1][mask] return int1d elif (isinstance(pda1, list) or isinstance(pda1, tuple)) and ( isinstance(pda2, list) or isinstance(pda2, tuple) ): multiarray_setop_validation(pda1, pda2) if not assume_unique: ag = GroupBy(pda1) ua = ag.unique_keys bg = GroupBy(pda2) ub = bg.unique_keys else: ua = pda1 ub = pda2 # Key for deinterleaving result isa = concatenate( (ones(ua[0].size, dtype=akbool), zeros(ub[0].size, dtype=akbool)), ordered=False ) c = [concatenate(x, ordered=False) for x in zip(ua, ub)] g = GroupBy(c) if assume_unique: # need to verify uniqueness, otherwise answer will be wrong if (g.sum(isa)[1] > 1).any(): raise ValueError("Called with assume_unique=True, but first argument is not unique") if (g.sum(~isa)[1] > 1).any(): raise ValueError("Called with assume_unique=True, but second argument is not unique") k, ct = g.count() in_union = ct == 2 return [x[in_union] for x in k] else: raise TypeError( f"Both pda1 and pda2 must be pdarray, List, or Tuple. Received {type(pda1)} and {type(pda2)}" )
a5408d3e738d88fb76e355ec9acf8a26e3c6b5aa
35,614
def less_or_equal(left: ValueOrExpression, right: ValueOrExpression) -> Expression: """ Constructs a *less than or equal to* expression. """ return Comparison( operators.ComparisonOperator.LE, ensure_expr(left), ensure_expr(right) )
f790a117516c1ecd90fdcc289e56050449a106c4
35,615
def build_birnn_multifeature_coattention_model( voca_dim, time_steps, num_feature_channels, num_features, feature_dim, output_dim, model_dim, atten_dim, mlp_dim, item_embedding=None, rnn_depth=1, mlp_depth=1, drop_out=0.5, rnn_drop_out=0., rnn_state_drop_out=0., trainable_embedding=False, gpu=False, return_customized_layers=False): """ Create A Bidirectional Attention Model. :param voca_dim: vocabulary dimension size. :param time_steps: the length of input :param output_dim: the output dimension size :param model_dim: rrn dimension size :param mlp_dim: the dimension size of fully connected layer :param item_embedding: integer, numpy 2D array, or None (default=None) If item_embedding is a integer, connect a randomly initialized embedding matrix to the input tensor. If item_embedding is a matrix, this matrix will be used as the embedding matrix. If item_embedding is None, then connect input tensor to RNN layer directly. :param rnn_depth: rnn depth :param mlp_depth: the depth of fully connected layers :param num_feature_channels: the number of attention channels, this can be used to mimic multi-head attention mechanism :param drop_out: dropout rate of fully connected layers :param rnn_drop_out: dropout rate of rnn layers :param rnn_state_drop_out: dropout rate of rnn state tensor :param trainable_embedding: boolean :param gpu: boolean, default=False If True, CuDNNLSTM is used instead of LSTM for RNN layer. :param return_customized_layers: boolean, default=False If True, return model and customized object dictionary, otherwise return model only :return: keras model """ if model_dim % 2 == 1: model_dim += 1 if item_embedding is not None: inputs = models.Input(shape=(time_steps,), dtype='int32', name='input0') x1 = inputs # item embedding if isinstance(item_embedding, np.ndarray): assert voca_dim == item_embedding.shape[0] x1 = layers.Embedding( voca_dim, item_embedding.shape[1], input_length=time_steps, weights=[item_embedding, ], trainable=trainable_embedding, mask_zero=False, name='embedding_layer0' )(x1) elif utils.is_integer(item_embedding): x1 = layers.Embedding( voca_dim, item_embedding, input_length=time_steps, trainable=trainable_embedding, mask_zero=False, name='embedding_layer0' )(x1) else: raise ValueError("item_embedding must be either integer or numpy matrix") else: inputs = models.Input(shape=(time_steps, voca_dim), dtype='float32', name='input0') x1 = inputs inputs1 = list() for fi in range(num_feature_channels): inputs1.append(models.Input(shape=(num_features, feature_dim), dtype='float32', name='input1' + str(fi))) feature_map_layer = layers.TimeDistributed( layers.Dense(model_dim, name="feature_map_layer", activation="sigmoid"), name="td_feature_map_layer" ) x2s = list(map( lambda input_: feature_map_layer(input_), inputs1 )) if gpu: # rnn encoding for i in range(rnn_depth): x1 = layers.Bidirectional( layers.CuDNNLSTM(int(model_dim / 2), return_sequences=True), name='bi_lstm_layer' + str(i))(x1) x1 = layers.BatchNormalization(name='rnn_batch_norm_layer' + str(i))(x1) x1 = layers.Dropout(rnn_drop_out, name="rnn_dropout_layer" + str(i))(x1) else: # rnn encoding for i in range(rnn_depth): x1 = layers.Bidirectional( layers.LSTM(int(model_dim / 2), return_sequences=True, dropout=rnn_drop_out, recurrent_dropout=rnn_state_drop_out), name='bi_lstm_layer' + str(i))(x1) x1 = layers.BatchNormalization(name='rnn_batch_norm_layer' + str(i))(x1) coatten_layer = clayers.CoAttentionWeight(name="coattention_weights_layer") featnorm_layer1 = clayers.FeatureNormalization(name="normalized_coattention_weights_layer1", axis=1) featnorm_layer2 = clayers.FeatureNormalization(name="normalized_coattention_weights_layer2", axis=2) focus_layer1 = layers.Dot((1, 1), name="focus_layer1") focus_layer2 = layers.Dot((2, 1), name="focus_layer2") pair_layer1 = layers.Concatenate(axis=-1, name="pair_layer1") pair_layer2 = layers.Concatenate(axis=-1, name="pair_layer2") compare_layer1 = layers.TimeDistributed(layers.Dense(model_dim, activation="relu"), name="compare_layer1") compare_layer2 = layers.TimeDistributed(layers.Dense(model_dim, activation="relu"), name="compare_layer2") flatten_layer = layers.Flatten(name="flatten_layer") xs = list() for x2_ in x2s: xs += _coatten_compare_aggregate( coatten_layer, featnorm_layer1, featnorm_layer2, focus_layer1, focus_layer2, pair_layer1, pair_layer2, compare_layer1, compare_layer2, flatten_layer, x1, x2_) x = layers.Concatenate(axis=1, name="concat_feature_layer")(xs) # MLP Layers for i in range(mlp_depth - 1): x = layers.Dense(mlp_dim, activation='selu', kernel_initializer='lecun_normal', name='selu_layer' + str(i))(x) x = layers.AlphaDropout(drop_out, name='alpha_layer' + str(i))(x) outputs = layers.Dense(output_dim, activation="softmax", name="softmax_layer0")(x) model = models.Model([inputs] + inputs1, outputs) if return_customized_layers: return model, {'CoAttentionWeight': clayers.CoAttentionWeight, "FeatureNormalization": clayers.FeatureNormalization} return model
f212f748323136ec994035d8b71d330f5e02b601
35,617
def splitext(value): """ Return a filename sans extension. Alias to os.splitext. """ return pathsplitext(value)[0]
b5a72e03895e32903ac912d8e1527d659b377bd9
35,618
def normspec(*specs, smooth=False, span=13, order=1): """ Normalize a series of 1D signals. **Parameters**\n *specs: list/2D array Collection of 1D signals. smooth: bool | False Option to smooth the signals before normalization. span, order: int, int | 13, 1 Smoothing parameters of the LOESS method (see ``scipy.signal.savgol_filter()``). **Return**\n normalized_specs: 2D array The matrix assembled from a list of maximum-normalized signals. """ nspec = len(specs) specnorm = [] for i in range(nspec): spec = specs[i] if smooth: spec = savgol_filter(spec, span, order) if type(spec) in (list, tuple): nsp = spec / max(spec) else: nsp = spec / spec.max() specnorm.append(nsp) # Align 1D spectrum normalized_specs = np.asarray(specnorm) return normalized_specs
9fa751fe0cfada0114ba071135399a1ea8a461c5
35,619
import pickle def load(): """ Load the bibmanager database of BibTeX entries. Returns ------- List of Bib() entries. Return an empty list if there is no database file. Examples -------- >>> import bibmanager.bib_manager as bm >>> bibs = bm.load() """ try: with open(u.BM_DATABASE, 'rb') as handle: return pickle.load(handle) except: # TBD: I think I'm not defaulting to this case anymore, I should # let it break if the input file does not exist return []
501adedfb1bb5a4203351ea5ab0acd3e9b495780
35,620
def repeat_n_m(v): """Repeat elements in a vector . Returns a vector with the elements of the vector *v* repeated *n* times, where *n* denotes the position of the element in *v*. The function can be used to order the coefficients in the vector according to the order of spherical harmonics. If *v* is a matrix, it is treated as a stack of vectors residing in the last index and broadcast accordingly. Parameters ---------- v : (,N+1) numpy.ndarray Input vector or stack of input vectors. Returns ------- : (,(N+1)**2) numpy.ndarray Vector or stack of vectors containing repetated values. """ krlist = [np.tile(v, (2*i+1, 1)).T for i, v in enumerate(v.T.tolist())] return np.squeeze(np.concatenate(krlist, axis=-1))
e26cec345f4ed88a64d85452f518b5d497dc0f1e
35,621
def contingency(cont_table=None, alpha=0.05, precision=4): """ Check RULE of FIVE before running the test >>> return chi2, p, dof, ex """ chi2, p, dof, ex = stats.chi2_contingency(cont_table, correction=False) chi2_cv = stats.chi2.ppf(1 - alpha, dof) flag = False if p < alpha: flag = True result = f"""======= Tests of Independence: Contingency Table ======= chi2 Statistics = {chi2:.{precision}f} chi2 critical value = {chi2_cv:.{precision}f} Degree of freedom = {dof} p-value = {p:.{precision}f} ({inter_p_value(p)}) Reject H_0 (dependent) → {flag} Expected Frequency: {ex} """ print(result) return chi2, p, dof, ex
3006636096be34032de612b1272cd508ae312b42
35,622
def smp_dict(): """Returns a dictionary containing typical options for a generic Sample object""" out = base_dict() out['mro']['current'] = ['Sample'] out['name']['current'] = 'Sample' ao(out, 'idx', 'Integer', attr=['Hidden']) ao(out, 'ii', 'Integer', attr=['Hidden']) ao(out, 'initialDimension', 'Float', 0., name='Initial Dimension') return out
7ef58042c2826591f2fb5ba21b055f334528b157
35,623
def setup(): """ Connect to the Arango database and Elasticsearch. Returns ------- Connection, Database, list 1. The connection to Elasticsearch 2. The ArangoDB database that holds the collection 3. The list of fields that are take over from ArangoDB to Elasticsearch Raises ------ ConfigError If a required entry is missing in the config file. DBError If the connection to the ArangoDB server failed or the database or the collection can not be found. SearchError If the connection to the Elasticsearch server failed. """ if not "es_hosts" in config: raise ConfigError("Setting missing in config file: 'es_hosts'.") conn = connections.create_connection(hosts=config["es_hosts"]) if not conn.ping(): raise SearchError("Connection to the Elasticsearch server failed.") try: arangoconn = connect(config) db = select_db(config, arangoconn) except (ConfigError, DBError) as e: logger.error(e) raise e return conn, db, config["fields"]
f140a72a1e05d34a810bb7ef8ea3ae855c0307f6
35,625
def test_timedelta_tests(): """These test cases are taken from CPython's Lib/test/datetimetester.py""" # Create compatibility functions so rest of test can be pasted with minimal # changes def eq(a, b): assert a == b def td(days=0, seconds=0, microseconds=0): return TimeDelta(days=days, seconds=seconds, microseconds=microseconds) a = td(7) # One week b = td(0, 60) # One minute c = td(0, 0, 1000) # One millisecond eq(a+b+c, td(7, 60, 1000)) eq(a-b, td(6, 24*3600 - 60)) eq(b.__rsub__(a), td(6, 24*3600 - 60)) eq(-a, td(-7)) eq(+a, td(7)) eq(-b, td(-1, 24*3600 - 60)) eq(-c, td(-1, 24*3600 - 1, 999000)) eq(abs(a), a) eq(abs(-a), a) eq(td(6, 24*3600), a) eq(td(0, 0, 60*1000000), b) eq(a*10, td(70)) eq(a*10, 10*a) eq(a*10, 10*a) eq(b*10, td(0, 600)) eq(10*b, td(0, 600)) eq(b*10, td(0, 600)) eq(c*10, td(0, 0, 10000)) eq(10*c, td(0, 0, 10000)) eq(c*10, td(0, 0, 10000)) eq(a*-1, -a) eq(b*-2, -b-b) eq(c*-2, -c+-c) eq(b*(60*24), (b*60)*24) eq(b*(60*24), (60*b)*24) eq(c*1000, td(0, 1)) eq(1000*c, td(0, 1)) eq(a//7, td(1)) eq(b//10, td(0, 6)) eq(c//1000, td(0, 0, 1)) eq(a//10, td(0, 7*24*360)) eq(a//3600000, td(0, 0, 7*24*1000)) eq(a/0.5, td(14)) eq(b/0.5, td(0, 120)) eq(a/7, td(1)) eq(b/10, td(0, 6)) eq(c/1000, td(0, 0, 1)) eq(a/10, td(0, 7*24*360)) eq(a/3600000, td(0, 0, 7*24*1000)) # Multiplication by float us = td(microseconds=1) eq((3*us) * 0.5, 2*us) eq((5*us) * 0.5, 2*us) eq(0.5 * (3*us), 2*us) eq(0.5 * (5*us), 2*us) eq((-3*us) * 0.5, -2*us) eq((-5*us) * 0.5, -2*us) # Issue #23521 # Note: TimeDelta differs in output here from timedelta because integer # number of microseconds is used. eq(td(seconds=1) * 0.123456, td(microseconds=123456)) eq(td(seconds=1) * 0.6112295, td(microseconds=611230)) # Division by int and float eq((3*us) / 2, 2*us) eq((5*us) / 2, 2*us) eq((-3*us) / 2.0, -2*us) eq((-5*us) / 2.0, -2*us) eq((3*us) / -2, -2*us) eq((5*us) / -2, -2*us) eq((3*us) / -2.0, -2*us) eq((5*us) / -2.0, -2*us) for i in range(-10, 10): eq((i*us/3)//us, round(i/3)) for i in range(-10, 10): eq((i*us/-3)//us, round(i/-3)) # Issue #23521 eq(td(seconds=1) / (1 / 0.6112295), td(microseconds=611230)) # Issue #11576 eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998), td(0, 0, 1)) eq(td(999999999, 1, 1) - td(999999999, 1, 0), td(0, 0, 1))
dc61cd77621a3e8a7b1ffd9999a147c84af1da59
35,626
def shorten_line(line: Line, intersections: list[Matchstick], gw: GameWindow) -> Line: """ Shorten a line so that it fits nicely within the row and doesn't get too close to adjacent sticks when drawn :param line: the line to shorten :param intersections: the sticks that the line intersects with :param gw: the game window :return: the shortened line """ # Get the smallest and largest x coordinates of the intersected sticks smallest_stick_x = get_min_x(intersections) largest_stick_x = get_max_x(intersections) # All the sticks are on the same row, so they all have the same y coordinates y_low = intersections[0].v_pos - gw.stick_length / 2 y_high = intersections[0].v_pos + gw.stick_length / 2 # Adjust the x and y coordinates new_line = chop_y(line, y_low, y_high) new_line = chop_x(new_line, smallest_stick_x - gw.h_spacing/3, largest_stick_x + gw.h_spacing/3) return new_line
381c70da0e5ee740e1c563e033d851e920e62702
35,627
def uprev_overlays(overlays, build_targets=None, chroot=None, output_dir=None): """Uprev the given overlays. Args: overlays (list[str]): The list of overlay paths. build_targets (list[build_target_lib.BuildTarget]|None): The build targets to clean in |chroot|, if desired. No effect unless |chroot| is provided. chroot (chroot_lib.Chroot|None): The chroot to clean, if desired. output_dir (str|None): The path to optionally dump result files. Returns: list[str] - The paths to all of the modified ebuild files. This includes the new files that were added (i.e. the new versions) and all of the removed files (i.e. the old versions). """ assert overlays manifest = git.ManifestCheckout.Cached(constants.SOURCE_ROOT) uprev_manager = uprev_lib.UprevOverlayManager( overlays, manifest, build_targets=build_targets, chroot=chroot, output_dir=output_dir) uprev_manager.uprev() return uprev_manager.modified_ebuilds
0190e28ff270596cd2829c7956044a4af69fb328
35,628
import math def find_closest_pucker(phi, theta, pucker_dict): """ Calculated based on cord length: delta_x = sin(phi2) * cos(theta2) - cos(ph1)*cos(theta1) delta_y = sin(phi2) * sin(theta2) - cos(phi1)*sin(theta1) delta_z = cos(phi2) - cos(phi1) chord_length = sqrt( delta_x^2 + delta_y^2 + delta_z^2) (aka norm of the vectors) central angle (aka delta_sigma) = 2 * arcsin( chord / 2) d = r * delta_sigma :param phi: phi of the pucker to identify :param theta: second CP parameter of the pucker to identify :param pucker_dict: dictionary of names and CP params of IUPAC puckers :return: closest_pucker: (string) closest IUPAC pucker name """ closest_pucker = None closest_dist = np.inf xyz = angles_to_xyz(phi, theta) for pucker in pucker_dict: chord = np.linalg.norm(xyz-pucker_dict[pucker][XYZ]) current_dist = 2. * math.asin(chord / 2.) if current_dist < closest_dist: closest_dist = current_dist closest_pucker = pucker if closest_pucker is None: warning("Did not find a closest pucker. Check puckering dictionary.") return closest_pucker
224395dad4514062e6e065e8506c64e06832bb9d
35,629
def signal_ramp(k_start): """Signal generator for a ramp signal Parameters ---------- k_start : SignalUserTemplate the sampling index as returned by counter() at which the ramp starts increasing. Returns ------- SignalUserTemplate the output signal Details ------- y[k] = { 0 for k < k_start { k-k_start for k >= k_start """ k = dy.counter() active = dy.int32(k_start) <= k linearRise = dy.convert( (k - dy.int32(k_start) ), dy.DataTypeFloat64(1) ) activation = dy.convert( active, dy.DataTypeFloat64(1) ) return activation * linearRise
045d1e8530993e47c9da2bbfb38105c86007ba37
35,630
def pooling_layer(inputs, pooling=constants.MAXPOOL, pool_size=2, strides=2, name=None): """ Args: inputs: (4d tensor) input tensor of shape [batch_size, height, width, n_channels] pooling: (Optional, {AVGPOOL, MAXPOOL}, defaults to MAXPOOL) Type of pooling to be used, which is always of stride 2 and pool size 2. pool_size: (Optional, defaults to 2) pool size of pooling operation strides: (Optional, defaults to 2) strides of pooling operation name: (Optional, defaults to None) A name for the operation. """ errors.check_valid_value( pooling, 'pooling', [constants.AVGPOOL, constants.MAXPOOL]) if pooling == constants.AVGPOOL: outputs = tf.layers.average_pooling2d( inputs=inputs, pool_size=pool_size, strides=strides, name=name) else: # MAXPOOL outputs = tf.layers.max_pooling2d( inputs=inputs, pool_size=pool_size, strides=strides, name=name) return outputs
856c4c9c378d11764f2885defe433d1b9ec85814
35,631
import types def test_equal_ImageDecoderSlice_ImageDecoder(): """ Comparing results of pipeline: (ImageDecoder -> Slice), with the same operation performed by fused operator """ batch_size =128 eii = ExternalInputIterator(128) pos_size_iter = iter(eii) class NonFusedPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(NonFusedPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus) self.input_crop_pos = ops.ExternalSource() self.input_crop_size = ops.ExternalSource() self.input_crop = ops.ExternalSource() self.decode = ops.ImageDecoder(device='mixed', output_type=types.RGB) self.slice = ops.Slice(device = 'gpu') def define_graph(self): jpegs, labels = self.input() self.crop_pos = self.input_crop_pos() self.crop_size = self.input_crop_size() images = self.decode(jpegs) slice = self.slice(images, self.crop_pos, self.crop_size) return (slice, labels) def iter_setup(self): (crop_pos, crop_size) = pos_size_iter.next() self.feed_input(self.crop_pos, crop_pos) self.feed_input(self.crop_size, crop_size) class FusedPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id, num_gpus): super(FusedPipeline, self).__init__(batch_size, num_threads, device_id) self.input = ops.CaffeReader(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus) self.input_crop_pos = ops.ExternalSource() self.input_crop_size = ops.ExternalSource() self.input_crop = ops.ExternalSource() self.decode = ops.ImageDecoderSlice(device = 'mixed', output_type = types.RGB) def define_graph(self): jpegs, labels = self.input() self.crop_pos = self.input_crop_pos() self.crop_size = self.input_crop_size() images = self.decode(jpegs, self.crop_pos, self.crop_size) return (images, labels) def iter_setup(self): (crop_pos, crop_size) = pos_size_iter.next() self.feed_input(self.crop_pos, crop_pos) self.feed_input(self.crop_size, crop_size) nonfused_pipe = NonFusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1) nonfused_pipe.build() nonfused_pipe_out = nonfused_pipe.run() fused_pipe = FusedPipeline(batch_size=batch_size, num_threads=1, device_id = 0, num_gpus = 1) fused_pipe.build() fused_pipe_out = fused_pipe.run() for i in range(batch_size): nonfused_pipe_out_cpu = nonfused_pipe_out[0].as_cpu() fused_pipe_out_cpu = fused_pipe_out[0].as_cpu() assert(np.sum(np.abs(nonfused_pipe_out_cpu.at(i)-fused_pipe_out_cpu.at(i)))==0)
6dc72175560dba39d54a03da9aaaca67e6cf16c9
35,632
from typing import Union def count_tiles(reader_or_writer: Union[BioReader, BioWriter]) -> int: """ Returns the number of tiles in a BioReader/BioWriter. """ tile_size = TILE_SIZE_2D if reader_or_writer.Z == 1 else TILE_SIZE_3D num_tiles = ( len(range(0, reader_or_writer.Z, tile_size)) * len(range(0, reader_or_writer.Y, tile_size)) * len(range(0, reader_or_writer.X, tile_size)) ) return num_tiles
1e70208c4269dc9cbee3d6926283a78554fcac44
35,633
import re def fn(groups, lsv_fn): """Regular expression did not contain a match""" field, pattern = groups route_regex = re.compile(pattern) return lambda data: route_regex.search(str(lsv_fn(data, field))) == None
65667e58af7e9e6bb4e38d28b267d60e9bc6bd46
35,634
import glob import random def airq_data_loader(normalize="none"): """Function to load the Air Quality dataset into TF dataset objects The data is loaded, normalized, padded, and a mask channel is generated to indicate missing observations The raw csv files can be downloaded from: https://archive.ics.uci.edu/ml/datasets/Beijing+Multi-Site+Air-Quality+Data Args: normalize: The type of data normalizatino to perform ["none", "mean_zero", "min_max"] """ all_files = glob.glob("./data/air_quality/*.csv") column_list = ["year", "month", "day", "hour", "PM2.5", "PM10", "SO2", "NO2", "CO", "O3", "TEMP", "PRES", "DEWP", "RAIN", "WSPM", "station"] feature_list = ["PM2.5", "PM10", "SO2", "NO2", "CO", "O3", "TEMP", "PRES", "DEWP", "WSPM"] sample_len = 24 *28 *1 # 2 months worth of data all_stations = [] for file_names in all_files: station_data = pd.read_csv(file_names)[column_list] all_stations.append(station_data) all_stations = pd.concat(all_stations, axis=0, ignore_index=True) df_sampled = all_stations[column_list].groupby(['year', 'month', 'station']) signals, signal_maps = [], [] inds, valid_inds, test_inds = [], [], [] z_ls, z_gs = [], [] for i, sample in enumerate(df_sampled): if len(sample[1]) < sample_len: continue # Determine training indices for different years if sample[0][0] in [2013, 2014, 2015, 2017]: inds.extend([i] ) elif sample[0][0] in [2016]: # data from 2016 is used for testing, because we have fewer recordings for the final year test_inds.extend([i]) x = sample[1][feature_list][:sample_len].astype('float32') sample_map = x.isna().astype('float32') z_l = sample[1][['day', 'RAIN']][:sample_len] x = x.fillna(0) z_g = np.array(sample[0]) signals.append(np.array(x)) signal_maps.append(np.array(sample_map)) z_ls.append(np.array(z_l)) z_gs.append(np.array(z_g)) signals_len = np.zeros((len(signals),)) + sample_len signals = np.stack(signals) signal_maps = np.stack(signal_maps) z_ls = np.stack(z_ls) z_gs = np.stack(z_gs) random.shuffle(inds) train_inds = inds[:int(len(inds)*0.85)] valid_inds = inds[int(len(inds)*0.85):] train_signals, valid_signals, test_signals, normalization_specs = normalize_signals(signals, signal_maps, (train_inds, valid_inds, test_inds), normalize) # plot a random sample ind = np.random.randint(0, len(train_inds)) f, axs = plt.subplots(nrows=train_signals.shape[-1], ncols=1, figsize=(18 ,14)) for i, ax in enumerate(axs): ax.plot(train_signals[ind, :, i]) ax.set_title(feature_list[i]) plt.tight_layout() plt.savefig('./data/air_quality/sample.pdf') trainset = tf.data.Dataset.from_tensor_slices((train_signals, signal_maps[train_inds], signals_len[train_inds], z_ls[train_inds], z_gs[train_inds])).shuffle(10).batch(10) validset = tf.data.Dataset.from_tensor_slices( (valid_signals, signal_maps[valid_inds], signals_len[valid_inds], z_ls[valid_inds], z_gs[valid_inds])).shuffle(10).batch(10) testset = tf.data.Dataset.from_tensor_slices( (test_signals, signal_maps[test_inds], signals_len[test_inds], z_ls[test_inds], z_gs[test_inds])).shuffle(10).batch(10) return trainset, validset, testset, normalization_specs
c70217ed045633d39b7758bfc00ede6aaa483c58
35,635
def get_living_neighbors(i, j, generation): """ returns living neighbors around the cell """ living_neighbors = 0 # count for living neighbors neighbors = [(i-1, j), (i+1, j), (i, j-1), (i, j+1), (i-1, j+1), (i-1, j-1), (i+1, j+1), (i+1, j-1)] for k, l in neighbors: if 0 <= k < len(generation) and 0 <= l < len(generation[0]): if generation[k][l] == 1: living_neighbors += 1 return living_neighbors
437229b8152c3b2ce5b90ef6ddef83daa5c24a85
35,636
def hook_makeOutline(VO, blines): """Return (tlines, bnodes, levels) for Body lines blines. blines is either Vim buffer object (Body) or list of buffer lines. """ Z = len(blines) tlines, bnodes, levels = [], [], [] tlines_add, bnodes_add, levels_add = tlines.append, bnodes.append, levels.append for i in xrange(Z): if not blines[i].startswith('+'): continue bline = blines[i] m = headline_match(bline) if not m: continue lev = len(m.group(1)) head = bline[2+lev:].strip() tline = ' %s|%s' %('. '*(lev-1), head) tlines_add(tline) bnodes_add(i+1) levels_add(lev) return (tlines, bnodes, levels)
b755b6580e983f5758bb301318b862bab083a240
35,637
def domain_min(): """ Variable evaluator that represents the minimum value in the current domain of the variable chosen by the search. Returns: An evaluator of integer variable """ return CpoFunctionCall(Oper_domain_min, Type_IntVarEval, ())
9dadab36bac75053e5b62012c27a9e1a1c484af7
35,639
def StrToList(val): """ Takes a string and makes it into a list of ints (<= 8 bits each)""" return [ord(c) for c in val]
79ee38dc4952b677896a77379c3cccca8f74eb2c
35,640
def our_completion_DFA(states, alphabet, transitions, initialState, finalStates): """ For every transition (from, to, c) adds every transition (from, to, s') where s' represents c plus every other symbol """ table = powerset_table(alphabet) new_alphabet = alphabet.union([str(i) for i in range(len(table))]) new_transitions = [] for from_s, to_s, c in transitions: for i,pairs in enumerate(table): if c in pairs and (from_s, to_s, str(i)) not in new_transitions: new_transitions.append( (from_s, to_s, str(i)) ) return (states, new_alphabet, transitions + new_transitions, initialState, finalStates)
851b96bd218cda81197c636f942488404a11654e
35,641
def createCopy(source): """Link the source set to the destination If one does not find the value in the destination set, search will go on to the source set to get the value. Value from source are copy-on-write. i.e. any try to modify one of them will end up putting the modified value in the destination set. """ return source.createCopy()
0cd045dff53f0dcda2b9b3d86d595fb6d4b7bd25
35,642
import torch def generate_data(network_list, data_set, Lb, Ub, total_data_num, NUM): """ NUM为需要生成的数据量 """ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") device = "cpu" # 网络使用 total_predicts_data = [] for k in range(len(data_set)): model = network_list[k] data_num = data_set[k].shape[0] T = torch.as_tensor(np.random.rand(int(NUM * data_num / total_data_num)).reshape(-1, 1) * 1.5 - 0.75, dtype=torch.float32).to(device) predicts_data_list = [] predicts = model(T) count = 0 for i in range(model.raw_dim): if i in model.outputParameter_index_list: predicts_data = predicts[:, [count]] count += 1 else: predicts_data = torch.ones(T.shape[0], 1) * float(np.mean((data_set[k])[:, [i]])) predicts_data_list.append(predicts_data.cpu().detach().numpy()) predicts_data_list = np.hstack(predicts_data_list) total_predicts_data.append(predicts_data_list) total_predicts_data = np.vstack(total_predicts_data) total_predicts_data = total_predicts_data * (Ub - Lb) + Lb return total_predicts_data
9e13ec3d722930883fdde7874731d7325f2f987a
35,644
from typing import Any def convert_audio( in_sound: ArrayLike, in_rate: int, *, out_rate: int, out_format: DTypeLike, out_channels: int ) -> NDArray[Any]: """Convert an audio sample into a format supported by this device. Returns the converted array. This might be a reference to the input array if no conversion was needed. Args: in_sound: The input ArrayLike sound sample. Input format and channels are derived from the array. in_rate: The samplerate of the input array. out_rate: The samplerate of the output array. out_format: The output format of the converted array. out_channels: The number of audio channels of the output array. .. versionadded:: 13.6 .. seealso:: :any:`AudioDevice.convert` """ in_array: NDArray[Any] = np.asarray(in_sound) if len(in_array.shape) == 1: in_array = in_array[:, np.newaxis] if not len(in_array.shape) == 2: raise TypeError(f"Expected a 1 or 2 ndim input, got {in_array.shape} instead.") cvt = ffi.new("SDL_AudioCVT*") in_channels = in_array.shape[1] in_format = _get_format(in_array.dtype) out_sdl_format = _get_format(out_format) if _check(lib.SDL_BuildAudioCVT(cvt, in_format, in_channels, in_rate, out_sdl_format, out_channels, out_rate)) == 0: return in_array # No conversion needed. # Upload to the SDL_AudioCVT buffer. cvt.len = in_array.itemsize * in_array.size out_buffer = cvt.buf = ffi.new("uint8_t[]", cvt.len * cvt.len_mult) np.frombuffer(ffi.buffer(out_buffer[0 : cvt.len]), dtype=in_array.dtype).reshape(in_array.shape)[:] = in_array _check(lib.SDL_ConvertAudio(cvt)) out_array: NDArray[Any] = ( np.frombuffer(ffi.buffer(out_buffer[0 : cvt.len_cvt]), dtype=out_format).reshape(-1, out_channels).copy() ) return out_array
5069db14d2cca3233ecc96d57874c6badf08b341
35,645
def normalize_tags(string): """Return a list of normalized tags from a string with comma separated tags""" tags = string.split(',') result = [] for tag in tags: normalized = normalize(tag) if normalized and not normalized in result: result.append(normalized) return result
6a03e6681246709e33d4a34c169158d02b4d191c
35,646
def basemz(df): """ The mz of the most abundant ion. """ # returns the d = np.array(df.columns)[df.values.argmax(axis=1)] return Trace(d, df.index, name='basemz')
8843b0065e8de383743a5c0442246a01edd1de23
35,647
def M_matrix_old(s, c, m, l_max): """Legacy function. Same as :meth:`M_matrix` except trying to be cute with ufunc's, requiring scope capture with temp func inside :meth:`give_M_matrix_elem_ufunc`, which meant that numba could not speed up this method. Remains here for testing purposes. See documentation for :meth:`M_matrix` parameters and return value. """ _ells = ells(s, m, l_max) uf = give_M_matrix_elem_ufunc(s, c, m) return uf.outer(_ells,_ells).astype(complex)
2d6576c56663d6cd7226d02b6d794305bb943ae7
35,648
def create_model(modelfunc, fname='', listw=[], outfname=''): """:modelfunc: is a function that takes a word and returns its splits. for ngram model this function returns all the ngrams of a word, for PCFG it will return te split of the password. @modelfunc: func: string -> [list of strings] @fname: name of the file to read from @listw: list of passwords. Used passwords from both the files and listw if provided. @outfname: the file to write down the model. """ pws = [] if fname: pws = helper.open_get_line(fname, limit=3e6) def join_iterators(_pws, listw): for p in _pws: yield p for p in listw: yield p big_dict = defaultdict(int) total_f, total_e = 0, 0 for pw, c in join_iterators(pws, listw): for ng in modelfunc(pw): big_dict[ng] += c if len(big_dict)%100000 == 0: print ("Dictionary size: {}".format(len(big_dict))) total_f += c total_e += 1 big_dict['__TOTAL__'] = total_e big_dict['__TOTALF__'] = total_f nDawg= dawg.IntCompletionDAWG(big_dict) if not outfname: outfname = 'tmpmodel.dawg' nDawg.save(outfname) return nDawg
7a6494b8017829e94fbfe364e07267b4f882a0b2
35,649
from typing import Collection def iterable_to_wikitext(items: Collection[object]) -> str: """Convert iterable to wikitext.""" if len(items) == 1: return f"{next(iter(items))}" text = "" for item in items: text += f"\n* {item}" return text
b4b082eb25e20deac738ae85d4141a0d3a98c349
35,650
def ElemMatch(q, *conditions): """ The ElemMatch operator matches documents that contain an array field with at least one element that matches all the specified query criteria. """ new_condition = {} for condition in conditions: if isinstance(condition, (Condition, Group)): condition = condition.to_dict() deep_merge(condition, new_condition) return Condition(q._path, new_condition, '$elemMatch')
cd45f06cc5bd19ecfc2539ec7de0f43d8e0bb05b
35,651
from typing import List def get_links(client: SymphonyClient) -> List[Link]: """This function returns all existing links Returns: List[ `pyinventory.common.data_class.Link` ] Example: ``` all_links = client.get_links() ``` """ links = LinksQuery.execute(client, first=PAGINATION_STEP) edges = links.edges if links else [] while links is not None and links.pageInfo.hasNextPage: links = LinksQuery.execute( client, after=links.pageInfo.endCursor, first=PAGINATION_STEP ) if links is not None: edges.extend(links.edges) result = [] for edge in edges: node = edge.node if node is not None: result.append( Link( id=node.id, properties=node.properties, service_ids=[s.id for s in node.services], ) ) return result
683f605239ade48e8938d9e7a23c3033a533abe6
35,652
def set(data,c): """ Set Data to a Constant Parameters: * data Array of spectral data. * c Constant to set data to (may be complex) """ data[...,:]=c return data
cff2592b3973bbd3f9a1a4dbaa6d6ba4b99260bc
35,654
def browse_announcements(): """ This function is used to browse Announcements Department-Wise made by different faculties and admin. @variables: cse_ann - Stores CSE Department Announcements ece_ann - Stores ECE Department Announcements me_ann - Stores ME Department Announcements sm_ann - Stores SM Department Announcements all_ann - Stores Announcements intended for all Departments context - Dictionary for storing all above data """ cse_ann = Announcements.objects.filter(department="CSE") ece_ann = Announcements.objects.filter(department="ECE") me_ann = Announcements.objects.filter(department="ME") sm_ann = Announcements.objects.filter(department="SM") all_ann = Announcements.objects.filter(department="ALL") context = { "cse" : cse_ann, "ece" : ece_ann, "me" : me_ann, "sm" : sm_ann, "all" : all_ann } return context
eaa72fbbd845c2b1b0771659fab2c0f772932d1e
35,655
def set_point_restraint(self,point,restraints): """ params: point: str, name of point restraints: bool, list of 6 to set restraints return: status of success """ try: assert len(restraints)==6 pt=self.session.query(Point).filter_by(name=point).first() if pt is None: raise Exception("Point doesn't exists.") res=self.session.query(PointRestraint).filter_by(point_name=point).first() if res is None: res=PointRestraint() res.point_name=point res.u1=restraints[0] res.u2=restraints[1] res.u3=restraints[2] res.r1=restraints[3] res.r2=restraints[4] res.r3=restraints[5] self.session.add(res) elif not (restraints[0] or restraints[1] or restraints[2] or\ restraints[3] or restraints[4] or restraints[5]): self.session.delete(res) else: res.u1=restraints[0] res.u2=restraints[1] res.u3=restraints[2] res.r1=restraints[3] res.r2=restraints[4] res.r3=restraints[5] self.session.add(res) return True except Exception as e: logger.info(str(e)) self.session.rollback() return False
1b512de2fc9dd1a9f57e85cab65eb029be1e036c
35,656
def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6) -> float: """Numpy implementation of the Frechet Distance. The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1) and X_2 ~ N(mu_2, C_2) is d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)). Stable version by Dougal J. Sutherland. Params: -- mu1 : Numpy array containing the activations of a layer of the inception net (like returned by the function 'get_predictions') for generated samples. -- mu2 : The sample mean over activations, precalculated on an representative data set. -- sigma1: The covariance matrix over activations for generated samples. -- sigma2: The covariance matrix over activations, precalculated on an representative data set. Returns: -- : The Frechet Distance. """ mu1 = np.atleast_1d(mu1) mu2 = np.atleast_1d(mu2) sigma1 = np.atleast_2d(sigma1) sigma2 = np.atleast_2d(sigma2) assert mu1.shape == mu2.shape, \ 'Training and test mean vectors have different lengths' assert sigma1.shape == sigma2.shape, \ 'Training and test covariances have different dimensions' diff = mu1 - mu2 # Product might be almost singular covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) if not np.isfinite(covmean).all(): msg = ('fid calculation produces singular product; ' 'adding %s to diagonal of cov estimates') % eps print(msg) offset = np.eye(sigma1.shape[0]) * eps covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) # Numerical error might give slight imaginary component if np.iscomplexobj(covmean): if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): m = np.max(np.abs(covmean.imag)) raise ValueError('Imaginary component {}'.format(m)) covmean = covmean.real tr_covmean = np.trace(covmean) return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean)
01a7434325192472add5030581df17d79a79b5e0
35,657
from typing import Any from typing import Dict def get_meta(instance: Any) -> Dict[str, Any]: """ Returns object pjrpc metadata. """ return getattr(instance, '__pjrpc_meta__', {})
1357cab8698297b8ba9c10423e4c0473690cb8f0
35,658
def initial_pop(size, password): """ Generate a population consisting of random words, each with the same length as the password, and the population has the size specified. """ return [word_generate(len(password)) for _ in range(size)]
08e797996928e94565a822c7b2e5075145568edd
35,659
from pathlib import Path def getAllComicImagePaths(source = "./comic_pages"): """Collect an ordered list of comic page images.""" p = Path(source) files = [x for x in p.iterdir() if x.is_file()] return [f for f in sorted(files) if _isImage(f)]
1f9a560085ec090352d3a9e48c2d3521a2ee9bb5
35,660
import requests def login(): """[summary] Returns: [type]: [description] """ if current_user.is_authenticated: return flask.redirect(flask.url_for("bp.index")) if flask.request.method == "POST": if flask.request.form["submit_button"] == "GOOGLE LOGIN": # Find out what URL to hit for Google Login google_provider_cfg = requests.get(GOOGLE_DISCOVERY_URL).json() authorization_endpoint = google_provider_cfg["authorization_endpoint"] request_uri = client.prepare_request_uri( authorization_endpoint, redirect_uri=flask.request.base_url + "/callback", scope=["openid", "email", "profile"], ) return flask.redirect(request_uri) if flask.request.form["submit_button"] == "LOG IN HERE": email = flask.request.form.get("email") password = flask.request.form.get("password") if email == "" or password == "": flask.flash("Please enter email or password") return flask.render_template("login.html") my_user = User.query.filter_by(email=email).first() if not my_user or not check_password_hash(my_user.password, password): flask.flash("Please check your login details and try again") return redirect("/login") login_user(my_user) return flask.redirect(flask.url_for("bp.index")) return flask.render_template("login.html")
e25058f928b586b75a022b6438f882e24a596f1c
35,662
def settings_page(): """ The data web pages where you can download/delete the raw gnss data """ return render_template("settings.html")
6c12dedb13ac88ebda7c67eabe25a73da54eaa21
35,663
def SaveNumResults(doc:NexDoc, fileName): """Saves the numerical results to a text file with the specified name.""" return NexRun("SaveNumResults", locals())
9a1750d3bf92e2a5a459da5563705abe9e177bb0
35,664
def load_laplacian(n=0): """ Laplacians have these normalizations (from Ashish Raj): n=0: L0 = diag(rowdegree) - C; n=1: L1 = eye(nroi) - diag(1./(rowdegree+eps)) * C; n=2: L2 = eye(nroi) - diag(1./(sqrt(rowdegree)+eps)) * C* diag(1./(sqrt(coldegree)+eps)) ; n=3: L3 = eye(nroi) - diag(1./(sqrt(rowdegree.*coldegree)+eps)) * C; % * diag(1./(sqrt(coldegree)+eps)) ; n=4: L4 = eye(nroi) - diag(1./(sqrt(sqrt(rowdegree.*coldegree)+eps))) * C * diag(1./(sqrt(sqrt(rowdegree.*coldegree)+eps))); n=5: L5 = eye(nroi) - diag(1./(sqrt((rowdegree+coldegree)/2)+eps)) * C; % * diag(1./(sqrt(coldegree)+eps)) ; """ laplacian_filepath = get_file_path('connectivity_matrices/laplacians.mat') laplacian = pd.DataFrame(loadmat(laplacian_filepath)['laplacians'][0][0][n]) DK = load_atlas(atlas="DK", portion="LRRL") DK = DK.drop( [ "Right-choroid-plexus", "Left-choroid-plexus", "Right-VentralDC", "Left-VentralDC", ], axis=0, ) laplacian.columns = list(DK.index) laplacian.index = list(DK.index) return laplacian
188f5e3d920d4a7a660b4f2133c6675888efd7f8
35,665
def parse_host_port(address, default_port=None): """ Parse an endpoint address given in the form "host:port". """ if isinstance(address, tuple): return address def _fail(): raise ValueError("invalid address %r" % (address,)) def _default(): if default_port is None: raise ValueError("missing port number in address %r" % (address,)) return default_port if address.startswith('['): # IPv6 notation: '[addr]:port' or '[addr]'. # The address may contain multiple colons. host, sep, tail = address[1:].partition(']') if not sep: _fail() if not tail: port = _default() else: if not tail.startswith(':'): _fail() port = tail[1:] else: # Generic notation: 'addr:port' or 'addr'. host, sep, port = address.partition(':') if not sep: port = _default() elif ':' in host: _fail() return host, int(port)
883f09d67e6be048b98806f0f1bbb5e00472c47b
35,666
def free_residents(residents_prefs_dict, matched_dict): """ In this function, we return a list of resident who do not have empty prefrences list and unmatched with any hospital. """ fr = [] for res in residents_prefs_dict: if residents_prefs_dict[res]: if not (any(res in match for match in matched_dict.values())): fr.append(res) return fr
b07991f6286be3c0e4b163ca2f0991630f910b4c
35,667
def index(): """View of providing a feedback from the client side""" form = FeedbackForm() if form.validate_on_submit(): try: data = { 'email': form.email.data, 'title': form.title.data, 'content': form.content.data, } new_feedback = Feedback(**data) db.session.add(new_feedback) db.session.commit() committed_feedback = Feedback.query \ .filter_by(**data).first() token = committed_feedback.token send_email(data['email'], 'Thank you for your feedback.', 'main/mail/new_feedback', token=token) flash('Your Feedback is added Successfully') return render_template('/main/success.html', token=token) except SQLAlchemyError: flash('Failed to give a Feedback') return render_template('/main/index.html', form=form)
5974112cd5b87bfa204b73080346e50e63fb5ece
35,668
def predict_increment(big_n, obs_t, mu, basis_lag, coef): """ This should return predicted increments between successive observations """ rate_hat = predict_intensity( big_n, obs_t, mu, basis_lag, coef ) increment_size = np.diff(obs_t) return rate_hat * increment_size
dbb17ac3538e25c1da4e426c7629f6c9c05737a1
35,669
def _dualprf_error_unwrap(data_ma, ref_ma, err_mask, pvel_arr, prf_arr): """ Finds the correction factor that minimises the difference between the gate velocity and the reference velocity Parameters ---------- data_ma : masked array Data ref_ma : masked array Reference data err_mask : bool array Mask for the identified outliers pvel_arr : array Primary (high/low PRF) velocity for each gate prf_arr : array PRF (high/low) of each gate Returns ------- nuw : int array Unwrap number (correction factor) for each gate """ # Convert non-outliers to zero ma_out = data_ma * err_mask th_arr_out = pvel_arr * err_mask ref_out = ref_ma * err_mask # Primary velocity and prf factor of low PRF gates prf_factor = np.unique(np.min(prf_arr))[0] th_l = th_arr_out.copy() th_l[prf_arr == prf_factor] = 0 dev = np.ma.abs(ma_out - ref_out) nuw = np.zeros(ma_out.shape) # Loop for possible correction factors for ni in range(-prf_factor, (prf_factor + 1)): # New velocity values for identified outliers if abs(ni) == prf_factor: v_corr_tmp = ma_out + 2 * ni * th_l else: v_corr_tmp = ma_out + 2 * ni * th_arr_out # New deviation for new velocity values dev_tmp = np.ma.abs(v_corr_tmp - ref_out) # Compare with previous deviation delta = dev - dev_tmp # Update unwrap number when deviation has decreased nuw[delta > 0] = ni # Update corrected velocity and deviation v_corr = ma_out + 2 * nuw * th_arr_out dev = np.ma.abs(v_corr - ref_out) return nuw.astype(int)
60230960aa37024f78d26e4df107ab22c91dafce
35,672
import pickle def load(directory): """Loads pkl file from directory""" with open(directory, 'rb') as f: data = pickle.load(f) return data
d500c6f717535ee95f452abd435be4d8688a59a4
35,673
from typing import Callable from typing import Iterable def choose( chooser: Callable[[_TSource], Option[_TResult]] ) -> Callable[[Iterable[_TSource]], Iterable[_TResult]]: """Choose items from the sequence. Applies the given function to each element of the list. Returns the list comprised of the results x for each element where the function returns `Some(x)`. Args: chooser: The function to generate options from the elements. Returns: The list comprising the values selected from the chooser function. """ def _choose(source: Iterable[_TSource]) -> Iterable[_TResult]: def mapper(x: _TSource) -> Iterable[_TResult]: return chooser(x).to_seq() return pipe(source, collect(mapper)) return _choose
388a98d8c9a7b5cf34a19515a66a72216ba64817
35,674
from .src import connect_s_fast def connect_fast(ntwkA: Network, k: int, ntwkB: Network, l: int) -> Network: """ Connect two n-port networks together (using C-implementation) Specifically, connect ports `k` on `ntwkA` to ports `l` thru on `ntwkB`. The resultant network has `(ntwkA.nports + ntwkB.nports - 2)` ports. The port indices ('k','l') start from 0. Port impedances **are** taken into account. Parameters ---------- ntwkA : :class:`Network` network 'A' k : int starting port index on `ntwkA` ( port indices start from 0 ) ntwkB : :class:`Network` network 'B' l : int starting port index on `ntwkB` Returns ------- ntwkC : :class:`Network` new network of rank `(ntwkA.nports + ntwkB.nports - 2)` Note ---- The effect of mis-matched port impedances is handled by inserting a 2-port 'mismatch' network between the two connected ports. This mismatch Network is calculated with the :func:`impedance_mismatch` function. Examples -------- To implement a *cascade* of two networks >>> ntwkA = rf.Network('ntwkA.s2p') >>> ntwkB = rf.Network('ntwkB.s2p') >>> ntwkC = rf.connect(ntwkA, 1, ntwkB,0) """ num = 1 # some checking check_frequency_equal(ntwkA, ntwkB) # create output Network, from copy of input ntwkC = ntwkA.copy() # if networks' z0's are not identical, then connect a impedance # mismatch, which takes into account the effect of differing port # impedances. if assert_z0_at_ports_equal(ntwkA, k, ntwkB, l) == False: ntwkC.s = connect_s( ntwkA.s, k, impedance_mismatch(ntwkA.z0[:, k], ntwkB.z0[:, l]), 0) # the connect_s() put the mismatch's output port at the end of # ntwkC's ports. Fix the new port's impedance, then insert it # at position k where it belongs. ntwkC.z0[:, k:] = npy.hstack((ntwkC.z0[:, k + 1:], ntwkB.z0[:, [l]])) ntwkC.renumber(from_ports=[ntwkC.nports - 1] + range(k, ntwkC.nports - 1), to_ports=range(k, ntwkC.nports)) # call s-matrix connection function ntwkC.s = connect_s_fast(ntwkC.s, k, ntwkB.s, l) # combine z0 arrays and remove ports which were `connected` ntwkC.z0 = npy.hstack( (npy.delete(ntwkA.z0, range(k, k + num), 1), npy.delete(ntwkB.z0, range(l, l + num), 1))) return ntwkC
2e2fe2f57d5bc26ee3f6b1d11583f96d9f9c1ebc
35,675
import numpy def interleave(left, right): """Convert two mono sources into one stereo source.""" return numpy.ravel(numpy.vstack((left, right)), order='F')
29833d8b4516de2bdab9a33246cb165556d287bc
35,676
from typing import Tuple from typing import OrderedDict from operator import concat def DIN( item_seq_feat_group: EmbdFeatureGroup, other_feature_group: FeatureGroup, dnn_hidden_units: Tuple[int] = (64, 32, 1), dnn_activation: str = "dice", dnn_dropout: float = 0, dnn_bn: bool = False, l2_dnn: float = 0, lau_dnn_hidden_units: Tuple[int] = (32, 1), lau_dnn_activation: str = "dice", lau_dnn_dropout: float = 0, lau_dnn_bn: bool = False, lau_l2_dnn: float = 0, seed: int = 2022, ) -> Model: """Implementation of Deep Interest Network (DIN) model Parameters ---------- item_seq_feat_group : EmbdFeatureGroup Item sequence feature group. other_feature_group : FeatureGroup Feature group for other features. dnn_hidden_units : Tuple[int], optional DNN structure, by default ``(64, 32, 1)``. dnn_activation : str, optional DNN activation function, by default ``"dice"``. dnn_dropout : float, optional DNN dropout ratio, by default ``0``. dnn_bn : bool, optional Whether use batch normalization or not, by default ``False``. l2_dnn : float, optional DNN l2 regularization param, by default ``0``. lau_dnn_hidden_units : Tuple[int], optional DNN structure in local activation unit, by default ``(32, 1)``. lau_dnn_activation : str, optional DNN activation function in local activation unit, by default ``"dice"``. lau_dnn_dropout : float, optional DNN dropout ratio in local activation unit, by default ``0``. lau_dnn_bn : bool, optional Whether use batch normalization or not in local activation unit, by default ``False``. l2_dnn : float, optional DNN l2 regularization param in local activation unit, by default ``0``. seed : int, optional Random seed of dropout in local activation unit, by default ``2022``. Returns ------- Model A YouTubeDNN rank mdoel. References ---------- .. [1] Zhou, Guorui, et al. "Deep interest network for click-through rate prediction." Proceedings of the 24th ACM SIGKDD international conference on knowledge discovery & data mining. 2018. """ feature_pool = item_seq_feat_group.feat_pool other_dense, other_sparse = other_feature_group.embedding_lookup(pool_method="mean") embd_outputs = OrderedDict() id_input = None for feat in item_seq_feat_group.features: if id_input is None: id_input = feature_pool.init_input( feat.unit.name, {"name": feat.unit.name, "shape": (1,), "dtype": tf.int32}, ) sparse_embd = item_seq_feat_group.embd_layers[feat.unit.name] seq_input = item_seq_feat_group.input_layers[feat.name] lau = LocalActivationUnit( lau_dnn_hidden_units, lau_dnn_activation, lau_l2_dnn, lau_dnn_dropout, lau_dnn_bn, seed, ) embd_seq = sparse_embd(seq_input) # * (batch_size, seq_len, embd_dim) embd_seq = SqueezeMask()(embd_seq) # * att_score: (batch_size, 1, seq_len) query = sparse_embd(id_input) att_score = lau([query, embd_seq]) # * (batch_size, 1, embd_dim) embd_outputs[feat.name] = tf.matmul(att_score, embd_seq) local_activate_pool = list(embd_outputs.values()) # * concat input layers -> DNN dnn_input = concat(other_dense, other_sparse + local_activate_pool) dnn_output = DNN( hidden_units=tuple(list(dnn_hidden_units) + [1]), activation=dnn_activation, output_activation="sigmoid", l2_reg=l2_dnn, dropout_rate=dnn_dropout, use_bn=dnn_bn, seed=seed, )(dnn_input) # * Construct model inputs = list(feature_pool.input_layers.values()) model = Model(inputs=inputs, outputs=dnn_output) return model
da49559837584b53989e4d0989a09796f130fb51
35,677
def get_seconds(time_string): """ Convert e.g. 1m5.928s to seconds """ minutes = float(time_string.split("m")[0]) seconds = float(time_string.split("m")[1].split("s")[0]) return minutes * 60.0 + seconds
5a729d24ab6c437fca536cae8ac3d34a45bb9054
35,678
import time def generate_features_for_all_nodes(n_feature, features_filename): """ generates node-list with features for RiWalk-NA """ # generate node-features-df print("\tFeatures generation starts.") start_time = time.time() nodes_all_features_df = n_feature.gen_features_all_nodes() nodes_all_features_df.to_csv(features_filename, index=False) print("\tFeature generation lasted {} seconds.".format(time.time() - start_time)) return nodes_all_features_df
e1f65c523a3140aa718ff867751657e04945fc38
35,679
def sharesnet34(**kwargs): """ ShaResNet-34 model from 'ShaResNet: reducing residual network parameter number by sharing weights,' https://arxiv.org/abs/1702.08782. Parameters: ---------- pretrained : bool, default False Whether to load the pretrained weights for model. root : str, default '~/.torch/models' Location for keeping the model parameters. """ return get_sharesnet(blocks=34, model_name="sharesnet34", **kwargs)
50706761947f605fc0716740b07a661a6ad91277
35,680
import requests from bs4 import BeautifulSoup def query_spikeins(accession): """ Query spikeines IDs from Encode Websites """ query = f'https://www.encodeproject.org/experiments/{accession}/' page = requests.get(query) soup = BeautifulSoup(page.content, 'html.parser') for div in soup.find_all('div'): try: if div['data-test'] == 'spikeins': return div.find('a').get_text() except KeyError: continue return None
c05d9480bba0b052a44b4a683da4ef16fa01e6fc
35,682
import tqdm def pinnuts(f, M, Madapt, theta0, delta=0.6, epsilon=None): """ Implements the multinomial Euclidean Hamiltonian Monte Carlo sampler described in Betancourt (2016). Runs Madapt steps of burn-in, during which it adapts the step size parameter epsilon, then starts generating samples to return. Note the initial step size is tricky and not exactly the one from the initial paper. In fact the initial step size could be given by the user in order to avoid potential problems INPUTS ------ epsilon: float step size see nuts8 if you want to avoid tuning this parameter f: callable it should return the log probability and gradient evaluated at theta logp, grad = f(theta) M: int number of samples to generate. Madapt: int the number of steps of burn-in/how long to run the dual averaging algorithm to fit the step size epsilon. theta0: ndarray[float, ndim=1] initial guess of the parameters. KEYWORDS -------- delta: float targeted acceptance fraction OUTPUTS ------- samples: ndarray[float, ndim=2] M x D matrix of samples generated by NUTS. note: samples[0, :] = theta0 """ if len(np.shape(theta0)) > 1: raise ValueError('theta0 is expected to be a 1-D array') D = len(theta0) samples = np.empty((M + Madapt, D), dtype=float) lnprob = np.empty(M + Madapt, dtype=float) logp, grad = f(theta0) samples[0, :] = theta0 lnprob[0] = logp # Choose a reasonable first epsilon by a simple heuristic. if epsilon is None: epsilon = find_reasonable_epsilon(theta0, grad, logp, f) # Parameters to the dual averaging algorithm. gamma = 0.05 t0 = 10 kappa = 0.75 mu = log(10. * epsilon) # Initialize dual averaging algorithm. epsilonbar = 1 Hbar = 0 for m in tqdm.trange(1, M + Madapt): # Resample momenta. r0 = np.random.normal(0, 1, D) #joint lnp of theta and momentum r joint = logp - 0.5 * np.dot(r0, r0.T) # if all fails, the next sample will be the previous one samples[m, :] = samples[m - 1, :] lnprob[m] = lnprob[m - 1] alpha, nalpha, thetaprime, grad, logp = tree_sample(samples[m - 1, :], lnprob[m - 1], r0, grad, epsilon, f, joint, maxheight=10) samples[m, :] = thetaprime[:] lnprob[m] = logp # Do adaptation of epsilon if we're still doing burn-in. eta = 1. / float(m + t0) Hbar = (1. - eta) * Hbar + eta * (delta - alpha / float(nalpha)) if (m <= Madapt): epsilon = exp(mu - sqrt(m) / gamma * Hbar) eta = m ** -kappa epsilonbar = exp((1. - eta) * log(epsilonbar) + eta * log(epsilon)) else: epsilon = epsilonbar samples = samples[Madapt:, :] lnprob = lnprob[Madapt:] return samples, lnprob, epsilon
242c48e97137eb5ca843a3254c35b7e921d32703
35,685
from .source import importable, getname import tempfile def dump_source(object, **kwds): """write object source to a NamedTemporaryFile (instead of dill.dump) Loads with "import" or "dill.temp.load_source". Returns the filehandle. >>> f = lambda x: x**2 >>> pyfile = dill.temp.dump_source(f, alias='_f') >>> _f = dill.temp.load_source(pyfile) >>> _f(4) 16 >>> f = lambda x: x**2 >>> pyfile = dill.temp.dump_source(f, dir='.') >>> modulename = os.path.basename(pyfile.name).split('.py')[0] >>> exec('from %s import f as _f' % modulename) >>> _f(4) 16 Optional kwds: If 'alias' is specified, the object will be renamed to the given string. If 'prefix' is specified, the file name will begin with that prefix, otherwise a default prefix is used. If 'dir' is specified, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. On some operating systems, this makes no difference. NOTE: Keep the return value for as long as you want your file to exist ! """ #XXX: write a "load_source"? kwds.pop('suffix', '') # this is *always* '.py' alias = kwds.pop('alias', '') #XXX: include an alias so a name is known name = str(alias) or getname(object) name = "\n#NAME: %s\n" % name #XXX: assumes kwds['dir'] is writable and on $PYTHONPATH file = tempfile.NamedTemporaryFile(suffix='.py', **kwds) file.write(b(''.join([importable(object, alias=alias),name]))) file.flush() return file
978ed048c875856a38c711a06fcbb063b8757023
35,686
async def remove_role_requirement(reaction_role: _ReactionRole, ctx: _Context, abort_text: str) -> _Tuple[bool, bool]: """ Returns: (success: bool, aborted: bool) """ role_requirement = await inquire_for_role_requirement_remove(ctx, reaction_role.role_requirements, abort_text) if role_requirement: role: _Role = ctx.guild.get_role(role_requirement.role_id) role_requirement_id = role_requirement.id await _utils.discord.reply(ctx, f'Removed role requirement (ID: {role_requirement_id}) for role \'{role.name}\' (ID: {role.id}).') return True, False return False, True
0daf6b243a685e993fc110e93cb1e4188208db93
35,687
def get_predicates(): """ I'm not quite sure how to best get at all the predicates and tag them as relations with id's """ """ results = GolrAssociationQuery( rows=0, facet_fields=['relation'] ).exec() facet_counts = results['facet_counts'] relations = facet_counts['relation'] return jsonify([{'id' : BiolinkTerm(c).curie(), 'name' : c, 'definition' : None} for key in relations]) """ # Not yet implemented... don't really know how return jsonify([])
2b0ec74aa91b099278c0fde7479dd90665897670
35,688
def study_dir(study_name: str) -> str: """(Deprecated) old name for storage directory Args: study_name: storage name Returns: Absolute path of storage directory Warnings: Deprecated in favor of :func:`storage_dir` """ return storage_dir(study_name)
7b0183fe16eea9b711fb023bd7c24c761f184885
35,689
def to_adjacent_matrix(m, threshold=5e-3): """given a numeric dxd matrix, convert it adjcent matrix Args: m: dxd ndarray threshold: (Default value = 5e-3) Returns: dxd binary adjacent matrix """ m_ = m.copy() m_[np.where(abs(m) > threshold)] = 1 # otherwise make it zero m_[np.where(abs(m) <= threshold)] = 0 return m_
61349dddc057ce0f3f46157c2d89dc4d82dfdef2
35,690
import re def hour_min_to_sec(hm): """Convert string in format hh:mm to seconds""" h, m = re.match(r'(\d{1,2}):(\d{2})', hm).groups() return 3600 * (int(h) + float(m) / 60.)
4b186810c1c4fe6f9767b5e3914e8869168fffbf
35,691
def create_target(hosts, name=None, comment=None): """ In short: Create a target. The client uses the create_target command to create a new target. """ if name is None: name = hosts root = etree.Element("create_target") tree_name = etree.SubElement(root, "name") tree_comment = etree.SubElement(root, "comment") tree_host = etree.SubElement(root, "hosts") tree_name.text = name if comment: tree_comment.text = comment tree_host.text = hosts return send_command_xml(root)
0f1fca078f9a13f9cf71e36b80dcb98211ae125b
35,693
import json def get_slow_queries(**kwargs): """Simple function to construct and pass the args for a redis LRANGE query on the slow_queries log.""" pipe = redis.pipeline() resp = {} hash_key='SLOW_QUERIES' # minus 1 from the actual integer passed for row limit pipe.lrange(hash_key, 0, kwargs['row_limit']-1) data=pipe.execute()[0] resp['slow_queries'] = [json.loads(d) for d in data] return resp
27bbc3910da8055682de284e503dee6c0c6419bd
35,694
def getEscInfo(esc_record): """Extracts ESC information from a ESC record. Args: esc_record: A ESC record (dict of schema |EscSensorRecord|)). Returns: A tuple of: esc_point: A (longitude, latitude) tuple. esc_info: A |EscInformation| tuple. """ esc_install_params = esc_record['installationParam'] esc_point = (esc_install_params['longitude'], esc_install_params['latitude']) ant_pattern = esc_install_params['azimuthRadiationPattern'] ant_pattern = sorted([(pat['angle'], pat['gain']) for pat in ant_pattern]) angles, gains = zip(*ant_pattern) if angles != tuple(range(360)): raise ValueError('ESC pattern inconsistent') ant_gain_pattern = np.array(gains) esc_info = EscInformation( antenna_height=esc_install_params['height'], antenna_azimuth=esc_install_params['antennaAzimuth'], antenna_gain_pattern=ant_gain_pattern) return esc_point, esc_info
ad3e3adaf15ff9ba4cf7f34ec70afb24a9085f63
35,695
def crcremainder(data, key): """ crcremainder Function Function to calculate the CRC remainder of a CRC message. Contributing Author Credit: Shaurya Uppal Available from: geeksforgeeks.org Parameters ---------- data: string of bits The bit-string to be decoded. key: string of bits Bit-string representing key. Returns ------- remainder: string of bits Bit-string representation of encoded message. """ # Define Sub-Functions def xor(a, b): # initialize result result = [] # Traverse all bits, if bits are # same, then XOR is 0, else 1 for i in range(1, len(b)): if a[i] == b[i]: result.append('0') else: result.append('1') return(''.join(result)) # Performs Modulo-2 division def mod2div(divident, divisor): # Number of bits to be XORed at a time. pick = len(divisor) # Slicing the divident to appropriate # length for particular step tmp = divident[0 : pick] while pick < len(divident): if tmp[0] == '1': # replace the divident by the result # of XOR and pull 1 bit down tmp = xor(divisor, tmp) + divident[pick] else: # If leftmost bit is '0' # If the leftmost bit of the dividend (or the # part used in each step) is 0, the step cannot # use the regular divisor; we need to use an # all-0s divisor. tmp = xor('0'*pick, tmp) + divident[pick] # increment pick to move further pick += 1 # For the last n bits, we have to carry it out # normally as increased value of pick will cause # Index Out of Bounds. if tmp[0] == '1': tmp = xor(divisor, tmp) else: tmp = xor('0'*pick, tmp) checkword = tmp return(checkword) # Condition data data = str(data) # Condition Key key = str(key) l_key = len(key) # Appends n-1 zeroes at end of data appended_data = data + '0'*(l_key-1) remainder = mod2div(appended_data, key) return(remainder)
6c0e401014d1a26a80c14f26f6b43a126bc0c17e
35,696
def test_method_nesting(server): """Test that we correctly nest namespaces""" def handler(message): return { "jsonrpc": "2.0", "result": True if message.params[0] == message.method else False, "id": 1, } server._handler = handler assert server.nest.testmethod("nest.testmethod") assert server.nest.testmethod.some.other.method( "nest.testmethod.some.other.method")
9c8264f357e0e958b94669ef82ee70b3efff7e8c
35,697
def merge_nn_dicts( peaks, n_neighbors, peaks_in_chunk_idx_list, knn_indices_list, knn_distances_list ): """merge together peaks_in_chunk_idx_list and knn_indices_list to build final graph Args: peaks (_type_): array of peaks n_neighbors (_type_): number of neighbors peaks_in_chunk_idx_list (_type_): list of spike index knn_indices_list (_type_): indices of connections knn_distances_list (_type_): distances Raises: ValueError: _description_ Returns: _type_: _description_ """ nn_index_array = np.zeros((len(peaks), n_neighbors * 2), dtype=int) - 1 nn_distance_array = np.zeros((len(peaks), n_neighbors * 2), dtype=float) end_last = -1 # for each nn graph for idxi, (peaks_in_chunk_idx, knn_indices, knn_distances) in enumerate( zip(peaks_in_chunk_idx_list, knn_indices_list, knn_distances_list) ): # put new neighbors in first 5 rows nn_index_array[ peaks_in_chunk_idx[peaks_in_chunk_idx > end_last], :n_neighbors ] = knn_indices[peaks_in_chunk_idx > end_last] # put overlapping neighbors from previous nn_index_array[ peaks_in_chunk_idx[peaks_in_chunk_idx <= end_last], n_neighbors: ] = knn_indices[peaks_in_chunk_idx <= end_last] # repeat for distances nn_distance_array[ peaks_in_chunk_idx[peaks_in_chunk_idx > end_last], :n_neighbors ] = knn_distances[peaks_in_chunk_idx > end_last] nn_distance_array[ peaks_in_chunk_idx[peaks_in_chunk_idx <= end_last], n_neighbors: ] = knn_distances[peaks_in_chunk_idx <= end_last] # double up neighbors the beginning, since we only sample these once if idxi == 0: nn_index_array[peaks_in_chunk_idx, n_neighbors:] = knn_indices nn_distance_array[peaks_in_chunk_idx, n_neighbors:] = knn_distances # double up neighbors in the end, since we only sample these once if idxi == len(peaks_in_chunk_idx_list) - 1: nn_index_array[ peaks_in_chunk_idx[peaks_in_chunk_idx > end_last], n_neighbors: ] = knn_indices[peaks_in_chunk_idx > end_last] # repeat for distances nn_distance_array[ peaks_in_chunk_idx[peaks_in_chunk_idx > end_last], n_neighbors: ] = knn_distances[peaks_in_chunk_idx > end_last] end_last = peaks_in_chunk_idx[-1] return nn_index_array, nn_distance_array
030ff04b74f532507945c2dfd6570836e6ece941
35,699
import io def bytes_to_PCM_16bits(bytes, start=0, end=None): """ Transform audio file to a format readable by scipy, ie. uncompressed PCM 16-bits. Support transformation from any format supported by ffmpeg. """ try: audiofile = AudioSegment.from_file(bytes) except Exception as e: raise Exception("""Invalid audio file.""") # Crop audio audiofile = audiofile[start*1000:] if end: audiofile = audiofile[:end*1000] # Apply desired preprocessing audiofile = audiofile.set_sample_width(2) # set to 16-bits # audiofile.strip_silence # Return the results as bytes without writing to disk # ref: https://github.com/jiaaro/pydub/issues/270 buf = io.BytesIO() audiofile.export(buf, format="wav") return buf.getvalue()
cac297a610b90a3459542def406cd62f58dd86bd
35,700
import keras from keras.datasets import cifar10 from keras.utils import np_utils def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000): """ Preprocess CIFAR10 dataset :return: """ global keras if keras is None: # These values are specific to CIFAR10 img_rows = 32 img_cols = 32 nb_classes = 10 # the data, shuffled and split between train and test sets (x_train, y_train), (x_test, y_test) = cifar10.load_data() if keras.backend.image_dim_ordering() == 'th': x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols) x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols) else: x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3) x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3) x_train = x_train.astype('float32') x_test = x_test.astype('float32') x_train /= 255 x_test /= 255 print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') # convert class vectors to binary class matrices y_train = np_utils.to_categorical(y_train, nb_classes) y_test = np_utils.to_categorical(y_test, nb_classes) x_train = x_train[train_start:train_end, :, :, :] y_train = y_train[train_start:train_end, :] x_test = x_test[test_start:test_end, :] y_test = y_test[test_start:test_end, :] return x_train, y_train, x_test, y_test
1a23e78aa1cb873f18b31df616f467cea8db3f5e
35,702
def create_log_group_arn(logs_client, hosted_zone_test_name): """Return ARN of a newly created CloudWatch log group.""" log_group_name = f"/aws/route53/{hosted_zone_test_name}" response = logs_client.create_log_group(logGroupName=log_group_name) assert response["ResponseMetadata"]["HTTPStatusCode"] == 200 log_group_arn = None response = logs_client.describe_log_groups() for entry in response["logGroups"]: if entry["logGroupName"] == log_group_name: log_group_arn = entry["arn"] break return log_group_arn
79113cc7c9ac844c4d38cd1780b38d14a112b40d
35,703
def _make_filetags(attributes, default_filetag = None): """Helper function for rendering RPM spec file tags, like ``` %attr(0755, root, root) %dir ``` """ template = "%attr({mode}, {user}, {group}) {supplied_filetag}" mode = attributes.get("mode", "-") user = attributes.get("user", "-") group = attributes.get("group", "-") supplied_filetag = attributes.get("rpm_filetag", default_filetag) return template.format( mode = mode, user = user, group = group, supplied_filetag = supplied_filetag or "", )
56898ec1fc974721150b7e1055be3ab3782754a2
35,704
from typing import Union def button_callback(update: Update, context: CallbackContext) -> Union[None, str]: """Обработчик запросов обратного вызова""" query = update.callback_query query.answer() message = query.data.split() return callbacks[message[0]](update=update, context=context, query=query, message=message)
0c9e04f29fd931288cdb0d4cab0c732b03fd8fa0
35,705
def has_banking_permission(data): """ CHecks to see if the user hs the correct permission. Based on Castorr91's Gamble""" if not Parent.HasPermission(data.User, CESettings.BankingPermissions, CESettings.BankingPermissionInfo): message = CESettings.PermissionResp.format(data.UserName, CESettings.BankingPermission, CESettings.BankingPermissionInfo) SendResp(data, CESettings.Usage, message) return False return True
39dcb62c36e2f89a51503902db183a4c128128e6
35,706
from datetime import datetime def calc_temps(start='start_date'): """min, max, avg temp for start range""" start_date = datetime.strptime('2016-08-01', '%Y-%m-%d').date() start_results = session.query(func.max(Measurements.tobs), \ func.min(Measurements.tobs),\ func.avg(Measurements.tobs)).\ filter(Measurements.date >= start_date) start_tobs = [] for tobs in start_results: tobs_dict = {} tobs_dict["TAVG"] = float(tobs[2]) tobs_dict["TMAX"] = float(tobs[0]) tobs_dict["TMIN"] = float(tobs[1]) start_tobs.append(tobs_dict) return jsonify(start_tobs)
78aaa7721c7f0d83843d85af55d2262faad69cdc
35,707
def reindex(tensor, shape, displacement=.5): """ Re-color the given tensor, by sampling along one axis at a specified frequency. .. image:: images/reindex.jpg :width: 1024 :height: 256 :alt: Noisemaker example output (CC0) :param Tensor tensor: An image tensor. :param list[int] shape: :param float displacement: :return: Tensor """ height, width, channels = shape reference = value_map(tensor, shape) mod = min(height, width) x_offset = tf.cast((reference * displacement * mod + reference) % width, tf.int32) y_offset = tf.cast((reference * displacement * mod + reference) % height, tf.int32) tensor = tf.gather_nd(tensor, tf.stack([y_offset, x_offset], 2)) return tensor
a9aa0e323b7e2ee4f06e4f6a44fecb0c57e1acb7
35,708
def test_dal_getitem_access(): """ Verify `__getitem__` is identical to `__getattr__` """ expected = "foo" class SampleService(Service): def sample_method(self): return expected dm = _BaseDataManager() dm.register_services(sample=SampleService()) with dm.context() as ctx: assert ctx.dal["sample.sample_method"]() == expected
8202385241a8ec7f4c4634b644dd8f459f5642cf
35,709
def seq_to_matrix(seq, letter_to_index=None): """Convert a list of characters to an N by 4 integer matrix Parameters ---------- seq : list list of characters A,C,G,T,-,=; heterozygous locus is separated by semiconlon. '-' is filler for heterozygous indels; '=' is homozygous deletions letter_to_index: dict or None dictionary mapping DNA/RNA/amino-acid letters to array index Returns ------- seq_mat : numpy.array numpy matrix for the sequences """ if letter_to_index is None: letter_to_index = {'A': 0, 'C': 1, 'G': 2, 'T': 3, 'U': 3} mat_len = len(seq) mat = np.zeros((mat_len, 4)) n_letter = len(seq) for i in range(n_letter): try: letter = seq[i] if letter in letter_to_index: mat[i, letter_to_index[letter]] += 1 elif letter == 'N': mat[i, :] += 0.25 except KeyError: print(i, letter, seq[i]) return mat
f43e406bc8efecc0b00ec7a22b57ebee4e16efc2
35,710
def plot( title: str, body_system: Body_System, x_start: float = -1, x_end: float = 1, y_start: float = -1, y_end: float = 1,) -> None: """ Utility function to plot how the given body-system evolves over time. No doctest provided since this function does not have a return value. """ INTERVAL = 20 DELTA_TIME = INTERVAL / 1000 fig = plt.figure() fig.canvas.set_window_title(title) ax = plt.axes( xlim = (x_start, x_end), ylim =(y_start, y_end) ) plt.gca().set_aspect("equal") patches = [ plt.Circle((body.position_x, body.position_y), body.size, fc = body.color) for body in body_system.bodies ] for patch in patches: ax.add_patch(patch) def update(frame: int) -> list[plt.Circle]: update_step(body_system, DELTA_TIME, patches) return patches """ anim = animation.FuncAnimation( fig, update, interval = INTERVAL, blit = True ) """ plt.show()
33cdeee3be9625c82643d3cefa75a5d554b0f54e
35,712
from caffe2.python import workspace, core def GetGPUMemoryUsageStats(): """Get GPU memory usage stats from CUDAContext/HIPContext. This requires flag --caffe2_gpu_memory_tracking to be enabled""" workspace.RunOperatorOnce( core.CreateOperator( "GetGPUMemoryUsage", [], ["____mem____"], device_option=core.DeviceOption(workspace.GpuDeviceType, 0), ), ) b = workspace.FetchBlob("____mem____") return { 'total_by_gpu': b[0, :], 'max_by_gpu': b[1, :], 'total': np.sum(b[0, :]), 'max_total': np.sum(b[1, :]) }
84cd6d5b4786f666bfa756addf4d3010af6737f9
35,713
import typing def from_jsonable(return_type: typing.Any, obj: typing.Any) -> typing.Any: """Return an instance of the specified 'return_type' that has been constructed based on the specified 'obj', which is a composition of python objects as would result from JSON deserialization by the 'json' module. """ return gencodeutil.from_jsonable(return_type, obj, _name_mappings, _class_by_name)
6e7df09a53c116fe48cabb6e18587f297640e2cc
35,714
def _convert_symbol(op_name, inputs, attrs, identity_list=None, convert_map=None): """Convert from mxnet op to nnvm op. The converter must specify some conversions explicitly to support gluon format ops such as conv2d... Parameters ---------- op_name : str Operator name, such as Convolution, FullyConnected inputs : list of nnvm.Symbol List of input symbols. attrs : dict Dict of operator attributes identity_list : list List of operators that don't require conversion convert_map : dict Dict of name : callable, where name is the op's name that require conversion to nnvm, callable are functions which take attrs and return (new_op_name, new_attrs) Returns ------- sym : nnvm.Symbol Converted nnvm Symbol """ identity_list = identity_list if identity_list else _identity_list convert_map = convert_map if convert_map else _convert_map if op_name in identity_list: op = _get_nnvm_op(op_name) sym = op(*inputs, **attrs) elif op_name in convert_map: sym = convert_map[op_name](inputs, attrs) else: _raise_not_supported('Operator: ' + op_name) return sym
bf525a15b131369cb5f77d4681cab374afc1287c
35,715
from typing import List from typing import Union def non_parametric_double_ml_learner(df: pd.DataFrame, feature_columns: List[str], treatment_column: str, outcome_column: str, debias_model: Union[RegressorMixin, None] = None, debias_feature_columns: List[str] = None, denoise_model: Union[RegressorMixin, None] = None, denoise_feature_columns: List[str] = None, final_model: Union[RegressorMixin, None] = None, final_model_feature_columns: List[str] = None, prediction_column: str = "prediction", cv_splits: int = 2, encode_extra_cols: bool = True) -> LearnerReturnType: """ Fits an Non-Parametric Double/ML Meta Learner for Conditional Average Treatment Effect Estimation. It implements the following steps: 1) fits k instances of the debias model to predict the treatment from the features and get out-of-fold residuals t_res=t-t_hat; 2) fits k instances of the denoise model to predict the outcome from the features and get out-of-fold residuals y_res=y-y_hat; 3) fits a final ML model to predict y_res / t_res from the features using weighted regression with weights set to t_res^2. Trained like this, the final model will output treatment effect predictions. Parameters ---------- df : pandas.DataFrame A Pandas' DataFrame with features, treatment and target columns. The model will be trained to predict the target column from the features. feature_columns : list of str A list os column names that are used as features for the denoise, debias and final models in double-ml. All this names should be in `df`. treatment_column : str The name of the column in `df` that should be used as treatment for the double-ml model. It will learn the impact of this column with respect to the outcome column. outcome_column : str The name of the column in `df` that should be used as outcome for the double-ml model. It will learn the impact of the treatment column on this outcome column. debias_model : RegressorMixin (default None) The estimator for fitting the treatment from the features. Must implement fit and predict methods. It can be an scikit-learn regressor. When None, defaults to GradientBoostingRegressor. debias_feature_columns : list of str (default None) A list os column names to be used only for the debias model. If not None, it will replace feature_columns when fitting the debias model. denoise_model : RegressorMixin (default None) The estimator for fitting the outcome from the features. Must implement fit and predict methods. It can be an scikit-learn regressor. When None, defaults to GradientBoostingRegressor. denoise_feature_columns : list of str (default None) A list os column names to be used only for the denoise model. If not None, it will replace feature_columns when fitting the denoise model. final_model : RegressorMixin (default None) The estimator for fitting the outcome residuals from the treatment residuals. Must implement fit and predict methods. It can be an arbitrary scikit-learn regressor. The fit method must accept sample_weight as a keyword argument. When None, defaults to GradientBoostingRegressor. final_model_feature_columns : list of str (default None) A list os column names to be used only for the final model. If not None, it will replace feature_columns when fitting the final model. prediction_column : str (default "prediction") The name of the column with the treatment effect predictions from the final model. cv_splits : int (default 2) Number of folds to split the training data when fitting the debias and denoise models encode_extra_cols : bool (default: True) If True, treats all columns in `df` with name pattern fklearn_feat__col==val` as feature columns. """ features = feature_columns if not encode_extra_cols else expand_features_encoded(df, feature_columns) debias_model = GradientBoostingRegressor() if debias_model is None else clone(debias_model, safe=False) denoise_model = GradientBoostingRegressor() if denoise_model is None else clone(denoise_model, safe=False) final_model = GradientBoostingRegressor() if final_model is None else clone(final_model, safe=False) t_hat, mts = _cv_estimate(debias_model, df, features if debias_feature_columns is None else debias_feature_columns, treatment_column, cv_splits) y_hat, mys = _cv_estimate(denoise_model, df, features if denoise_feature_columns is None else denoise_feature_columns, outcome_column, cv_splits) y_res = df[outcome_column] - y_hat t_res = df[treatment_column] - t_hat final_target = y_res / t_res weights = t_res ** 2 final_model_x = features if final_model_feature_columns is None else final_model_feature_columns model_final_fitted = final_model.fit(X=df[final_model_x], y=final_target, sample_weight=weights) def p(new_df: pd.DataFrame) -> pd.DataFrame: return new_df.assign(**{prediction_column: model_final_fitted.predict(new_df[final_model_x].values)}) p.__doc__ = learner_pred_fn_docstring("non_parametric_double_ml_learner") log = {'non_parametric_double_ml_learner': { 'features': feature_columns, 'debias_feature_columns': debias_feature_columns, 'denoise_feature_columns': denoise_feature_columns, 'final_model_feature_columns': final_model_feature_columns, 'outcome_column': outcome_column, 'treatment_column': treatment_column, 'prediction_column': prediction_column, 'package': "sklearn", 'package_version': sk_version, 'feature_importance': None, 'training_samples': len(df)}, 'debias_models': mts, 'denoise_models': mys, 'cv_splits': cv_splits, 'object': model_final_fitted} return p, p(df), log
0a78944ecf81d5d40114a517dbae587bfc5bb13c
35,716
def convert_image_cv2_to_cv(image, depth=cv.IPL_DEPTH_8U, channels=1): """ Converts an OpenCV2 wrapper of an image to an OpenCV1 wrapper. Source: http://stackoverflow.com/a/17170855 Args: image: image used by OpenCV2 depth: depth of each channel of the image channels: number of channels Returns: OpenCV wrapper of the image passed as an argument """ img_ipl = cv.CreateImageHeader((image.shape[1], image.shape[0]), depth, channels) cv.SetData(img_ipl, image.tostring(), image.dtype.itemsize * channels * image.shape[1]) return img_ipl
9a803eedadbb1082d8614f823b6d4063772fdaa3
35,717
def split_game_path(path): """Split a game path into individual components.""" # filter out empty parts that are caused by double slashes return [p for p in path.split('/') if p]
9b939058aa7f8b3371d3e37b0252a5a01dba4e7b
35,719
def create_complete_dag(workbench: Workbench) -> nx.DiGraph: """creates a complete graph out of the project workbench""" dag_graph = nx.DiGraph() for node_id, node in workbench.items(): dag_graph.add_node( node_id, name=node.label, key=node.key, version=node.version, inputs=node.inputs, run_hash=node.run_hash, outputs=node.outputs, state=node.state.current_status, ) for input_node_id in node.input_nodes: predecessor_node = workbench.get(str(input_node_id)) if predecessor_node: dag_graph.add_edge(str(input_node_id), node_id) return dag_graph
026cb6ab7e168f6c4f1d84e7ae574d4b62bbbfe2
35,721
import csv def read_single_disk(image_id): """ Stores a single image to disk. Parameters: --------------- image_id integer unique ID for image Returns: ---------- image image array, (32, 32, 3) to be stored label associated meta data, int label """ image = np.array(Image.open(disk_dir / f"{image_id}.png")) with open(disk_dir / f"{image_id}.csv", "r") as csvfile: reader = csv.reader( csvfile, delimiter=" ", quotechar="|", quoting=csv.QUOTE_MINIMAL ) label = int(next(reader)[0]) return image, label
094b8b98fba85a224d613efc7623ed497129bfc7
35,723
import click def try_get_balance(agent_config: AgentConfig, wallet: Wallet, type_: str) -> int: """ Try to get wallet balance. :param agent_config: agent config object. :param wallet: wallet object. :param type_: type of ledger API. :retun: token balance. """ try: if type_ not in DEFAULT_LEDGER_CONFIGS: # pragma: no cover raise ValueError("No ledger api config for {} available.".format(type_)) address = wallet.addresses[type_] balance = LedgerApis.get_balance(type_, address) if balance is None: # pragma: no cover raise ValueError("No balance returned!") return balance except (AssertionError, ValueError) as e: # pragma: no cover raise click.ClickException(str(e))
5cf32708c493b54a4950eee01144259be3be481d
35,724
def test_self_iat_hook_success(): """Test hook success in single(self) thread""" pythondll_mod = [m for m in windows.current_process.peb.modules if m.name.startswith("python") and m.name.endswith(".dll")][0] RegOpenKeyEx = [n for n in pythondll_mod.pe.imports['advapi32.dll'] if n.name == function_to_hook][0] hook_value = [] @callback_type def open_reg_hook(hKey, lpSubKey, ulOptions, samDesired, phkResult, real_function): hook_value.append((hKey, lpSubKey.value)) phkResult[0] = 12345678 return 0 x = RegOpenKeyEx.set_hook(open_reg_hook) open_args = (0x12345678, "MY_KEY_VALUE") k = winreg.OpenKey(*open_args) assert k.handle == 12345678 assert hook_value[0] == open_args # Remove the hook x.disable()
682a8c251d0dee3e923f92bdfe0b4402d69c6b69
35,725