content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def _select_by_property(peak_properties, pmin, pmax): """ Evaluate where the generic property of peaks confirms to an interval. Parameters ---------- peak_properties : ndarray An array with properties for each peak. pmin : None or number or ndarray Lower interval boundary for `peak_properties`. ``None`` is interpreted as an open border. pmax : None or number or ndarray Upper interval boundary for `peak_properties`. ``None`` is interpreted as an open border. Returns ------- keep : bool A boolean mask evaluating to true where `peak_properties` confirms to the interval. See Also -------- find_peaks Notes ----- .. versionadded:: 1.1.0 """ keep = np.ones(peak_properties.size, dtype=bool) if pmin is not None: keep &= (pmin <= peak_properties) if pmax is not None: keep &= (peak_properties <= pmax) return keep
12fda9525334d8a2a50e1a4785587dbbb0a70f00
19,855
def from_period_type_name(period_type_name: str) -> PeriodType: """ Safely get Period Type from its name. :param period_type_name: Name of the period type. :return: Period type enum. """ period_type_values = [item.value for item in PeriodType] if period_type_name.lower() not in period_type_values: raise AttributeError(f"Non-existent period type {period_type_name}, supported types: {period_type_values}") else: return PeriodType(period_type_name.lower())
97feb3bd1f18c1752ba4510628411f23ea77acb1
19,856
import random import string def randomInt(length=4, seed=None): """ Returns random integer value with provided number of digits >>> random.seed(0) >>> randomInt(6) 874254 """ if seed is not None: _ = getCurrentThreadData().random _.seed(seed) choice = _.choice else: choice = random.choice return int("".join(choice(string.digits if _ != 0 else string.digits.replace('0', '')) for _ in xrange(0, length)))
3d37b6410337271c6798cb1b3542189fcdd04226
19,857
import re def matchatleastone(text, regexes): """Returns a list of strings that match at least one of the regexes.""" finalregex = "|".join(regexes) result = re.findall(finalregex, text) return result
1e0775413189931fc48a3dc82c23f0ffe28b333e
19,858
def safe_string_equals(a, b): """ Near-constant time string comparison. Used in order to avoid timing attacks on sensitive information such as secret keys during request verification (`rootLabs`_). .. _`rootLabs`: http://rdist.root.org/2010/01/07/timing-independent-array-comparison/ """ if len(a) != len(b): return False result = 0 for x, y in zip(a, b): result |= ord(x) ^ ord(y) return result == 0
6253b747061dfdc82a533b103009f0ab469a76ef
19,859
def DFS_complete(g): """Perform DFS for entire graph and return forest as a dictionary. forest maps each vertex v to the edge that was used to discover it. (Vertices that are roots of a DFS tree are mapped to None.) :param g: a Graph class object :type g: Graph :return: A tuple of dicts summarizing the clusters of the input graph. The second returned value, that which is of interest in this project, is a dict where a key is a discovery vertex of a cluster and its corresponding value is the list of vertices in its cluster. :rtype: tuple """ forest = {} clusters = {} for u in g.vertices(): if u not in forest: forest[u] = None # u will be the root of a tree cluster = [u] DFS(g, u, forest, cluster) clusters[u] = cluster return forest, clusters
7cf500d204b70cbcb9cedf33dda42cb2b717e162
19,860
def get_keys(mapping, *keys): """Return the values corresponding to the given keys, in order.""" return (mapping[k] for k in keys)
e3b8bdbdff47c428e4618bd4ca03c7179b9f4a2b
19,861
def accents_dewinize(text): """Replace Win1252 symbols with ASCII chars or sequences needed when copying code parts from MS Office, like Word... From the book "Fluent Python" by Luciano Ramalho (O'Reilly, 2015) >>> accents_dewinize('“Stupid word • error inside™ ”') '"Stupid word - error inside(TM) "' """ return sanitize.dewinize(text)
d34a4cd694b4d713ac7b207680e26d7c79f2b957
19,862
def spiral_trajectory(base_resolution, spiral_arms, field_of_view, max_grad_ampl, min_rise_time, dwell_time, views=1, phases=None, ordering='linear', angle_range='full', tiny_number=7, readout_os=2.0, gradient_delay=0.0, larmor_const=42.577478518, vd_inner_cutoff=1.0, vd_outer_cutoff=1.0, vd_outer_density=1.0, vd_type='linear'): """Calculate a spiral trajectory. Args: base_resolution: An `int`. The base resolution, or number of pixels in the readout dimension. spiral_arms: An `int`. The number of spiral arms that a fully sampled k-space should be divided into. field_of_view: A `float`. The field of view, in mm. max_grad_ampl: A `float`. The maximum allowed gradient amplitude, in mT/m. min_rise_time: A `float`. The minimum allowed rise time, in us/(mT/m). dwell_time: A `float`. The digitiser's real dwell time, in us. This does not include oversampling. The effective dwell time (with oversampling) is equal to `dwell_time * readout_os`. views: An `int`. The number of radial views per phase. phases: An `int`. The number of phases for cine acquisitions. If `None`, this is assumed to be a non-cine acquisition with no time dimension. ordering: A `string`. The ordering type. Must be one of: `{'linear', 'golden', 'tiny', 'sorted'}`. angle_range: A `string`. The range of the rotation angle. Must be one of: `{'full', 'half'}`. If `angle_range` is `'full'`, the full circle/sphere is included in the range. If `angle_range` is `'half'`, only a semicircle/hemisphere is included. tiny_number: An `int`. The tiny golden angle number. Only used if `ordering` is `'tiny'` or `'tiny_half'`. Must be >= 2. Defaults to 7. readout_os: A `float`. The readout oversampling factor. Defaults to 2.0. gradient_delay: A `float`. The system's gradient delay relative to the ADC, in us. Defaults to 0.0. larmor_const: A `float`. The Larmor constant of the imaging nucleus, in MHz/T. Defaults to 42.577478518 (the Larmor constant of the 1H nucleus). vd_inner_cutoff: Defines the inner, high-density portion of *k*-space. Must be between 0.0 and 1.0, where 0.0 is the center of *k*-space and 1.0 is the edge. Between 0.0 and `vd_inner_cutoff`, *k*-space will be sampled at the Nyquist rate. vd_outer_cutoff: Defines the outer, low-density portion of *k*-space. Must be between 0.0 and 1.0, where 0.0 is the center of *k*-space and 1.0 is the edge. Between `vd_outer_cutoff` and 1.0, *k*-space will be sampled at a rate `vd_outer_density` times the Nyquist rate. vd_outer_density: Defines the sampling density in the outer portion of *k*-space. Must be > 0.0. Higher means more densely sampled. Multiplies the Nyquist rate: 1.0 means sampling at the Nyquist rate, < 1.0 means undersampled and > 1.0 means oversampled. vd_type: Defines the rate of variation of the sampling density the variable-density portion of *k*-space, i.e., between `vd_inner_cutoff` and `vd_outer_cutoff`. Must be one of `'linear'`, `'quadratic'` or `'hanning'`. Returns: A `Tensor` of type `float32` and shape `[views, samples, 2]` if `phases` is `None`, or of shape `[phases, views, samples, 2]` if `phases` is not `None`. `samples` is equal to `base_resolution * readout_os`. The units are radians/voxel, ie, values are in the range `[-pi, pi]`. References: .. [1] Pipe, J.G. and Zwart, N.R. (2014), Spiral trajectory design: A flexible numerical algorithm and base analytical equations. Magn. Reson. Med, 71: 278-285. https://doi.org/10.1002/mrm.24675 """ return _kspace_trajectory('spiral', {'base_resolution': base_resolution, 'spiral_arms': spiral_arms, 'field_of_view': field_of_view, 'max_grad_ampl': max_grad_ampl, 'min_rise_time': min_rise_time, 'dwell_time': dwell_time, 'readout_os': readout_os, 'gradient_delay': gradient_delay, 'larmor_const': larmor_const, 'vd_inner_cutoff': vd_inner_cutoff, 'vd_outer_cutoff': vd_outer_cutoff, 'vd_outer_density': vd_outer_density, 'vd_type': vd_type}, views=views, phases=phases, ordering=ordering, angle_range=angle_range, tiny_number=tiny_number)
b7acf7a14835e63e1f2524a5ac7dd67d16a4eba7
19,863
def total_examples(X): """Counts the total number of examples of a sharded and sliced data object X.""" count = 0 for i in range(len(X)): for j in range(len(X[i])): count += len(X[i][j]) return count
faf42a940e4413405d97610858e13496eb848eae
19,864
def convert_and_save(model, input_shape, weights=True, quiet=True, ignore_tests=False, input_range=None, filename=None, directory=None): """ Conversion between PyTorch and Keras, and automatic save (Conversions from Keras to PyTorch aren't implemented) Arguments: -model: A Keras or PyTorch model or layer to convert -input_shape: Input shape (list, tuple or int), without batchsize. -weights (bool): Also convert weights. If set to false, only convert model architecture -quiet (bool): If a progress bar and some messages should appear -ignore_tests (bool): If tests should be ignored. If set to True, converted model will still be tested by security. If models are not identical, it will only print a warning. If set to False, and models are not identical, RuntimeWarning will be raised If weights is False, tests are automatically ignored -input_range: Optionnal. A list of 2 elements containing max and min values to give as input to the model when performing the tests. If None, models will be tested on samples from the "standard normal" distribution. -filename: Filename to give to model's hdf5 file. If filename is not None and save is not False, then save will automatically be set to True -directory: Where to save model's hdf5 file. If directory is not None and save is not False, then save will automatically be set to True Returns: Name of created hdf5 file """ return convert(model=model, input_shape=input_shape, weights=weights, quiet=quiet, ignore_tests=ignore_tests, input_range=input_range, save=True, filename=filename, directory=directory)
00305f6d9a163b61a963e04e810a8d3808403d23
19,865
def no_afni(): """ Checks if AFNI is available """ if Info.version() is None: return True return False
fc10292bc69ca5996a76227c3bbbd5855eb2520e
19,866
def delta_eta_plot_projection_range_string(inclusive_analysis: "correlations.Correlations") -> str: """ Provides a string that describes the delta phi projection range for delta eta plots. """ # The limit is almost certainly a multiple of pi, so we try to express it more naturally # as a value like pi/2 or 3*pi/2 value = _find_pi_coefficient(value = inclusive_analysis.near_side_phi_region.max) return labels.make_valid_latex_string( fr"$|\Delta\varphi|<{value}$" )
3b36d65a3223ca2989ad9607fc2b15b298b1c709
19,867
def has_content_in(page, language): """Fitler that return ``True`` if the page has any content in a particular language. :param page: the current page :param language: the language you want to look at """ if page is None: return False return Content.objects.filter(page=page, language=language).count() > 0
6207583ad110aa098b5f556ad7a13b1b5218a1d3
19,871
def public_encrypt(key, data, oaep): """ public key encryption using rsa with pkcs1-oaep padding. returns the base64-encoded encrypted data data: the data to be encrypted, bytes key: pem-formatted key string or bytes oaep: whether to use oaep padding or not """ if isinstance(key, str): key = key.encode("ascii") pubkey = load_public_key(key) if oaep: encrypted = rsa_oaep_encrypt(pubkey, data) else: encrypted = rsa_pkcs1v15_encrypt(pubkey, data) return b64encode(encrypted).decode("ascii")
7310a0d408deff30efad2c961518261187a89dbf
19,872
def newton_method(f, x_init = 0, epsilon = 1e-10): """ Newton Raphson Optimizer ... Parameters --- f: Function to calculate root for x_init(optional) : initial value of x epsilon(optional): Adjustable precision Returns --- x: Value of root """ prev_value = x_init + 2 * epsilon value = x_init iterations = 0 while abs(prev_value - value) > epsilon: prev_value = value f_dash = derivative(f, value) value = value - f(value) / f_dash iterations += 1 print(f"Newton Method converged in {iterations} iterations") return value
5801d5f908e30551c321eaf0ec8dfbf42869e005
19,873
def create_preference_branch(this, args, callee): """Creates a preference branch, which can be used for testing composed preference names.""" if args: if args[0].is_literal: res = this.traverser.wrap().query_interface('nsIPrefBranch') res.hooks['preference_branch'] = args[0].as_str() return res
6e6cc013b9d6c645a6a94087fe63b3a186582003
19,874
def circle( gdf, radius=10, fill=True, fill_color=None, name="layer", width=950, height=550, location=None, color="blue", tooltip=None, zoom=7, tiles="OpenStreetMap", attr=None, style={}, ): """ Convert Geodataframe to geojson and plot it. Parameters ---------- gdf : GeoDataframe radius: radius of the circle fill: fill the circle fill_color: fill the circle with this color (column name or color) name : name of the geojson layer, optional, default "layer" width : width of the map, default 950 height : height of the map, default 550 location : center of the map rendered, default centroid of first geometry color : color of your geometries, default blue use random to randomize the colors (column name or color) tooltip : hover box on the map with geometry info, default all columns can be a list of column names zoom : zoom level of the map, default 7 tiles : basemap, default openstreetmap, options ['google','googlesatellite','googlehybrid'] or custom wms attr : Attribution to external basemaps being used, default None style : dict, additional style to geometries Returns ------- m : folium.map """ gpd_copy = _get_lat_lon(gdf.copy()) m = _folium_map( gpd_copy, width, height, location, tiles=tiles, attr=attr, zoom_start=zoom ) for index, row in gpd_copy.iterrows(): if tooltip is not None: tooltip_dict = {k: v for k, v in dict(row).items() if k in tooltip} tooltip = "".join( [ "<p><b>{}</b> {}</p>".format(keyvalue[0], keyvalue[1]) for keyvalue in list(tooltip_dict.items()) ] ) else: tooltip = _get_tooltip(tooltip, gdf) if fill_color in list(gpd_copy.columns): fill_color = row[fill_color] if color in list(gpd_copy.columns): color = row[color] folium.Circle( radius=radius, location=[row["latitude"], row["longitude"]], tooltip=tooltip, popup=tooltip, fill=fill, color=color, fill_color=fill_color, ).add_to(m) return m
d2f2e066c2f6988f950ffce6a7655b60c91c3cec
19,875
import traceback def no_recurse(f): """Wrapper function that forces a function to return True if it recurse.""" def func(*args, **kwargs): for i in traceback.extract_stack(): if i[2] == f.__name__: return True return f(*args, **kwargs) return func
cce02b5e8fff125040e457c66c7cc9c344e209cb
19,876
from typing import List import pandas from typing import Tuple def get_tap_number(distSys: SystemClass, names: List[str]) -> pandas.DataFrame: """ Get the tap number of regulators. Args: distSys : An instance of [SystemClass][dssdata.SystemClass]. names : Regulators names Returns: The tap number of regulators. """ def get_one(reg_name: str) -> Tuple[str, int]: distSys.dss.RegControls.Name(reg_name) return (reg_name, int(distSys.dss.RegControls.TapNumber())) __check_elements(names, distSys.dss.RegControls.AllNames()) return pandas.DataFrame( data=tuple(map(get_one, names)), columns=["reg_name", "tap"] )
a780b151670f261656d938ec48a5ac684c8c9d6d
19,877
def status(app: str) -> dict: """ :param app: The name of the Heroku app in which you want to change :type app: str :return: dictionary containing information about the app's status """ return Herokron(app).status()
f5251469c8388edf885ac9e4ae502549f0092703
19,878
def GetIPv4Interfaces(): """Returns a list of IPv4 interfaces.""" interfaces = sorted(netifaces.interfaces()) return [x for x in interfaces if not x.startswith('lo')]
01fc53160b01e3322af8d18175fde0011d87d127
19,879
def merge_dicts(source, destination): """ Recursively merges two dictionaries source and destination. The source dictionary will only be read, but the destination dictionary will be overwritten. """ for key, value in source.items(): if isinstance(value, dict): # get node or create one node = destination.setdefault(key, {}) merge_dicts(value, node) else: destination[key] = value return destination
dea2d01f2cdf42c38daee8589abcc69a3f82e5c8
19,880
def create_hive_connection(): """ Create a connection for Hive :param username: str :param password: str :return: jaydebeapi.connect or None """ try: conn = jaydebeapi.connect('org.apache.hive.jdbc.HiveDriver', hive_jdbc_url, [hive_username, hive_password], hive_jar_path, '') return conn except Exception as e: raise Exception(e)
48ac2859c9ceec9129d377f722ff96786a1c9552
19,881
def main(): """ The main function to execute upon call. Returns ------- int returns integer 0 for safe executions. """ print('Hello World') return 0
077df89bc009a12889afc6567bfd97abdb173411
19,882
def brightness(image, magnitude, name=None): """Adjusts the `magnitude` of brightness of an `image`. Args: image: An int or float tensor of shape `[height, width, num_channels]`. magnitude: A 0-D float tensor or single floating point value above 0.0. name: An optional string for name of the operation. Returns: A tensor with same shape and type as that of `image`. """ _check_image_dtype(image) with tf.name_scope(name or "brightness"): dark = tf.zeros_like(image) bright_image = blend(dark, image, magnitude) return bright_image
52e3016dce51bd5435e2c4085aa0a4d50b9c3502
19,883
def sitemap_xml(): """Sitemap XML""" sitemap = render_template("core/sitemap.xml") return Response(sitemap, mimetype="text/xml")
12d954b7f3c88f10e694e0aa1998699322a5602b
19,884
def get_CM(): """Pertzの係数CMをndarrayとして取得する Args: Returns: CM(ndarray[float]):Pertzの係数CM """ # pythonは0オリジンのため全て-1 CM = [0.385230, 0.385230, 0.385230, 0.462880, 0.317440,#1_1 => 0_0 0.338390, 0.338390, 0.221270, 0.316730, 0.503650, 0.235680, 0.235680, 0.241280, 0.157830, 0.269440, 0.830130, 0.830130, 0.171970, 0.841070, 0.457370, 0.548010, 0.548010, 0.478000, 0.966880, 1.036370, 0.548010, 0.548010, 1.000000, 3.012370, 1.976540, 0.582690, 0.582690, 0.229720, 0.892710, 0.569950, 0.131280, 0.131280, 0.385460, 0.511070, 0.127940,#1_2 => 0_1 0.223710, 0.223710, 0.193560, 0.304560, 0.193940, 0.229970, 0.229970, 0.275020, 0.312730, 0.244610, 0.090100, 0.184580, 0.260500, 0.687480, 0.579440, 0.131530, 0.131530, 0.370190, 1.380350, 1.052270, 1.116250, 1.116250, 0.928030, 3.525490, 2.316920, 0.090100, 0.237000, 0.300040, 0.812470, 0.664970, 0.587510, 0.130000, 0.400000, 0.537210, 0.832490,#1_3 => 0_2 0.306210, 0.129830, 0.204460, 0.500000, 0.681640, 0.224020, 0.260620, 0.334080, 0.501040, 0.350470, 0.421540, 0.753970, 0.750660, 3.706840, 0.983790, 0.706680, 0.373530, 1.245670, 0.864860, 1.992630, 4.864400, 0.117390, 0.265180, 0.359180, 3.310820, 0.392080, 0.493290, 0.651560, 1.932780, 0.898730, 0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_4 => 0_3 0.810820, 0.810820, 0.810820, 0.810820, 0.810820, 3.241680, 2.500000, 2.291440, 2.291440, 2.291440, 4.000000, 3.000000, 2.000000, 0.975430, 1.965570, 12.494170, 12.494170, 8.000000, 5.083520, 8.792390, 21.744240, 21.744240, 21.744240, 21.744240, 21.744240, 3.241680, 12.494170, 1.620760, 1.375250, 2.331620, 0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_5 => 0_4 0.810820, 0.810820, 0.810820, 0.810820, 0.810820, 3.241680, 2.500000, 2.291440, 2.291440, 2.291440, 4.000000, 3.000000, 2.000000, 0.975430, 1.965570, 12.494170, 12.494170, 8.000000, 5.083520, 8.792390, 21.744240, 21.744240, 21.744240, 21.744240, 21.744240, 3.241680, 12.494170, 1.620760, 1.375250, 2.331620, 0.126970, 0.126970, 0.126970, 0.126970, 0.126970,#1_6 => 0_5 0.810820, 0.810820, 0.810820, 0.810820, 0.810820, 3.241680, 2.500000, 2.291440, 2.291440, 2.291440, 4.000000, 3.000000, 2.000000, 0.975430, 1.965570, 12.494170, 12.494170, 8.000000, 5.083520, 8.792390, 21.744240, 21.744240, 21.744240, 21.744240, 21.744240, 3.241680, 12.494170, 1.620760, 1.375250, 2.331620, 0.337440, 0.337440, 0.969110, 1.097190, 1.116080,#2_1 => 1_0 0.337440, 0.337440, 0.969110, 1.116030, 0.623900, 0.337440, 0.337440, 1.530590, 1.024420, 0.908480, 0.584040, 0.584040, 0.847250, 0.914940, 1.289300, 0.337440, 0.337440, 0.310240, 1.435020, 1.852830, 0.337440, 0.337440, 1.015010, 1.097190, 2.117230, 0.337440, 0.337440, 0.969110, 1.145730, 1.476400, 0.300000, 0.300000, 0.700000, 1.100000, 0.796940,#2_2 => 1_1 0.219870, 0.219870, 0.526530, 0.809610, 0.649300, 0.386650, 0.386650, 0.119320, 0.576120, 0.685460, 0.746730, 0.399830, 0.470970, 0.986530, 0.785370, 0.575420, 0.936700, 1.649200, 1.495840, 1.335590, 1.319670, 4.002570, 1.276390, 2.644550, 2.518670, 0.665190, 0.678910, 1.012360, 1.199940, 0.986580, 0.378870, 0.974060, 0.500000, 0.491880, 0.665290,#2_3 => 1_2 0.105210, 0.263470, 0.407040, 0.553460, 0.582590, 0.312900, 0.345240, 1.144180, 0.854790, 0.612280, 0.119070, 0.365120, 0.560520, 0.793720, 0.802600, 0.781610, 0.837390, 1.270420, 1.537980, 1.292950, 1.152290, 1.152290, 1.492080, 1.245370, 2.177100, 0.424660, 0.529550, 0.966910, 1.033460, 0.958730, 0.310590, 0.714410, 0.252450, 0.500000, 0.607600,#2_4 => 1_3 0.975190, 0.363420, 0.500000, 0.400000, 0.502800, 0.175580, 0.196250, 0.476360, 1.072470, 0.490510, 0.719280, 0.698620, 0.657770, 1.190840, 0.681110, 0.426240, 1.464840, 0.678550, 1.157730, 0.978430, 2.501120, 1.789130, 1.387090, 2.394180, 2.394180, 0.491640, 0.677610, 0.685610, 1.082400, 0.735410, 0.597000, 0.500000, 0.300000, 0.310050, 0.413510,#2_5 => 1_4 0.314790, 0.336310, 0.400000, 0.400000, 0.442460, 0.166510, 0.460440, 0.552570, 1.000000, 0.461610, 0.401020, 0.559110, 0.403630, 1.016710, 0.671490, 0.400360, 0.750830, 0.842640, 1.802600, 1.023830, 3.315300, 1.510380, 2.443650, 1.638820, 2.133990, 0.530790, 0.745850, 0.693050, 1.458040, 0.804500, 0.597000, 0.500000, 0.300000, 0.310050, 0.800920,#2_6 => 1_5 0.314790, 0.336310, 0.400000, 0.400000, 0.237040, 0.166510, 0.460440, 0.552570, 1.000000, 0.581990, 0.401020, 0.559110, 0.403630, 1.016710, 0.898570, 0.400360, 0.750830, 0.842640, 1.802600, 3.400390, 3.315300, 1.510380, 2.443650, 1.638820, 2.508780, 0.204340, 1.157740, 2.003080, 2.622080, 1.409380, 1.242210, 1.242210, 1.242210, 1.242210, 1.242210,#3_1 => 2_0 0.056980, 0.056980, 0.656990, 0.656990, 0.925160, 0.089090, 0.089090, 1.040430, 1.232480, 1.205300, 1.053850, 1.053850, 1.399690, 1.084640, 1.233340, 1.151540, 1.151540, 1.118290, 1.531640, 1.411840, 1.494980, 1.494980, 1.700000, 1.800810, 1.671600, 1.018450, 1.018450, 1.153600, 1.321890, 1.294670, 0.700000, 0.700000, 1.023460, 0.700000, 0.945830,#3_2 => 2_1 0.886300, 0.886300, 1.333620, 0.800000, 1.066620, 0.902180, 0.902180, 0.954330, 1.126690, 1.097310, 1.095300, 1.075060, 1.176490, 1.139470, 1.096110, 1.201660, 1.201660, 1.438200, 1.256280, 1.198060, 1.525850, 1.525850, 1.869160, 1.985410, 1.911590, 1.288220, 1.082810, 1.286370, 1.166170, 1.119330, 0.600000, 1.029910, 0.859890, 0.550000, 0.813600,#3_3 => 2_2 0.604450, 1.029910, 0.859890, 0.656700, 0.928840, 0.455850, 0.750580, 0.804930, 0.823000, 0.911000, 0.526580, 0.932310, 0.908620, 0.983520, 0.988090, 1.036110, 1.100690, 0.848380, 1.035270, 1.042380, 1.048440, 1.652720, 0.900000, 2.350410, 1.082950, 0.817410, 0.976160, 0.861300, 0.974780, 1.004580, 0.782110, 0.564280, 0.600000, 0.600000, 0.665740,#3_4 => 2_3 0.894480, 0.680730, 0.541990, 0.800000, 0.669140, 0.487460, 0.818950, 0.841830, 0.872540, 0.709040, 0.709310, 0.872780, 0.908480, 0.953290, 0.844350, 0.863920, 0.947770, 0.876220, 1.078750, 0.936910, 1.280350, 0.866720, 0.769790, 1.078750, 0.975130, 0.725420, 0.869970, 0.868810, 0.951190, 0.829220, 0.791750, 0.654040, 0.483170, 0.409000, 0.597180,#3_5 => 2_4 0.566140, 0.948990, 0.971820, 0.653570, 0.718550, 0.648710, 0.637730, 0.870510, 0.860600, 0.694300, 0.637630, 0.767610, 0.925670, 0.990310, 0.847670, 0.736380, 0.946060, 1.117590, 1.029340, 0.947020, 1.180970, 0.850000, 1.050000, 0.950000, 0.888580, 0.700560, 0.801440, 0.961970, 0.906140, 0.823880, 0.500000, 0.500000, 0.586770, 0.470550, 0.629790,#3_6 => 2_5 0.500000, 0.500000, 1.056220, 1.260140, 0.658140, 0.500000, 0.500000, 0.631830, 0.842620, 0.582780, 0.554710, 0.734730, 0.985820, 0.915640, 0.898260, 0.712510, 1.205990, 0.909510, 1.078260, 0.885610, 1.899260, 1.559710, 1.000000, 1.150000, 1.120390, 0.653880, 0.793120, 0.903320, 0.944070, 0.796130, 1.000000, 1.000000, 1.050000, 1.170380, 1.178090,#4_1 => 3_0 0.960580, 0.960580, 1.059530, 1.179030, 1.131690, 0.871470, 0.871470, 0.995860, 1.141910, 1.114600, 1.201590, 1.201590, 0.993610, 1.109380, 1.126320, 1.065010, 1.065010, 0.828660, 0.939970, 1.017930, 1.065010, 1.065010, 0.623690, 1.119620, 1.132260, 1.071570, 1.071570, 0.958070, 1.114130, 1.127110, 0.950000, 0.973390, 0.852520, 1.092200, 1.096590,#4_2 => 3_1 0.804120, 0.913870, 0.980990, 1.094580, 1.042420, 0.737540, 0.935970, 0.999940, 1.056490, 1.050060, 1.032980, 1.034540, 0.968460, 1.032080, 1.015780, 0.900000, 0.977210, 0.945960, 1.008840, 0.969960, 0.600000, 0.750000, 0.750000, 0.844710, 0.899100, 0.926800, 0.965030, 0.968520, 1.044910, 1.032310, 0.850000, 1.029710, 0.961100, 1.055670, 1.009700,#4_3 => 3_2 0.818530, 0.960010, 0.996450, 1.081970, 1.036470, 0.765380, 0.953500, 0.948260, 1.052110, 1.000140, 0.775610, 0.909610, 0.927800, 0.987800, 0.952100, 1.000990, 0.881880, 0.875950, 0.949100, 0.893690, 0.902370, 0.875960, 0.807990, 0.942410, 0.917920, 0.856580, 0.928270, 0.946820, 1.032260, 0.972990, 0.750000, 0.857930, 0.983800, 1.056540, 0.980240,#4_4 => 3_3 0.750000, 0.987010, 1.013730, 1.133780, 1.038250, 0.800000, 0.947380, 1.012380, 1.091270, 0.999840, 0.800000, 0.914550, 0.908570, 0.999190, 0.915230, 0.778540, 0.800590, 0.799070, 0.902180, 0.851560, 0.680190, 0.317410, 0.507680, 0.388910, 0.646710, 0.794920, 0.912780, 0.960830, 1.057110, 0.947950, 0.750000, 0.833890, 0.867530, 1.059890, 0.932840,#4_5 => 3_4 0.979700, 0.971470, 0.995510, 1.068490, 1.030150, 0.858850, 0.987920, 1.043220, 1.108700, 1.044900, 0.802400, 0.955110, 0.911660, 1.045070, 0.944470, 0.884890, 0.766210, 0.885390, 0.859070, 0.818190, 0.615680, 0.700000, 0.850000, 0.624620, 0.669300, 0.835570, 0.946150, 0.977090, 1.049350, 0.979970, 0.689220, 0.809600, 0.900000, 0.789500, 0.853990,#4_6 => 3_5 0.854660, 0.852840, 0.938200, 0.923110, 0.955010, 0.938600, 0.932980, 1.010390, 1.043950, 1.041640, 0.843620, 0.981300, 0.951590, 0.946100, 0.966330, 0.694740, 0.814690, 0.572650, 0.400000, 0.726830, 0.211370, 0.671780, 0.416340, 0.297290, 0.498050, 0.843540, 0.882330, 0.911760, 0.898420, 0.960210, 1.054880, 1.075210, 1.068460, 1.153370, 1.069220,#5_1 => 4_0 1.000000, 1.062220, 1.013470, 1.088170, 1.046200, 0.885090, 0.993530, 0.942590, 1.054990, 1.012740, 0.920000, 0.950000, 0.978720, 1.020280, 0.984440, 0.850000, 0.908500, 0.839940, 0.985570, 0.962180, 0.800000, 0.800000, 0.810080, 0.950000, 0.961550, 1.038590, 1.063200, 1.034440, 1.112780, 1.037800, 1.017610, 1.028360, 1.058960, 1.133180, 1.045620,#5_2 => 4_1 0.920000, 0.998970, 1.033590, 1.089030, 1.022060, 0.912370, 0.949930, 0.979770, 1.020420, 0.981770, 0.847160, 0.935300, 0.930540, 0.955050, 0.946560, 0.880260, 0.867110, 0.874130, 0.972650, 0.883420, 0.627150, 0.627150, 0.700000, 0.774070, 0.845130, 0.973700, 1.006240, 1.026190, 1.071960, 1.017240, 1.028710, 1.017570, 1.025900, 1.081790, 1.024240,#5_3 => 4_2 0.924980, 0.985500, 1.014100, 1.092210, 0.999610, 0.828570, 0.934920, 0.994950, 1.024590, 0.949710, 0.900810, 0.901330, 0.928830, 0.979570, 0.913100, 0.761030, 0.845150, 0.805360, 0.936790, 0.853460, 0.626400, 0.546750, 0.730500, 0.850000, 0.689050, 0.957630, 0.985480, 0.991790, 1.050220, 0.987900, 0.992730, 0.993880, 1.017150, 1.059120, 1.017450,#5_4 => 4_3 0.975610, 0.987160, 1.026820, 1.075440, 1.007250, 0.871090, 0.933190, 0.974690, 0.979840, 0.952730, 0.828750, 0.868090, 0.834920, 0.905510, 0.871530, 0.781540, 0.782470, 0.767910, 0.764140, 0.795890, 0.743460, 0.693390, 0.514870, 0.630150, 0.715660, 0.934760, 0.957870, 0.959640, 0.972510, 0.981640, 0.965840, 0.941240, 0.987100, 1.022540, 1.011160,#5_5 => 4_4 0.988630, 0.994770, 0.976590, 0.950000, 1.034840, 0.958200, 1.018080, 0.974480, 0.920000, 0.989870, 0.811720, 0.869090, 0.812020, 0.850000, 0.821050, 0.682030, 0.679480, 0.632450, 0.746580, 0.738550, 0.668290, 0.445860, 0.500000, 0.678920, 0.696510, 0.926940, 0.953350, 0.959050, 0.876210, 0.991490, 0.948940, 0.997760, 0.850000, 0.826520, 0.998470,#5_6 => 4_5 1.017860, 0.970000, 0.850000, 0.700000, 0.988560, 1.000000, 0.950000, 0.850000, 0.606240, 0.947260, 1.000000, 0.746140, 0.751740, 0.598390, 0.725230, 0.922210, 0.500000, 0.376800, 0.517110, 0.548630, 0.500000, 0.450000, 0.429970, 0.404490, 0.539940, 0.960430, 0.881630, 0.775640, 0.596350, 0.937680, 1.030000, 1.040000, 1.000000, 1.000000, 1.049510,#6_1 => 5_0 1.050000, 0.990000, 0.990000, 0.950000, 0.996530, 1.050000, 0.990000, 0.990000, 0.820000, 0.971940, 1.050000, 0.790000, 0.880000, 0.820000, 0.951840, 1.000000, 0.530000, 0.440000, 0.710000, 0.928730, 0.540000, 0.470000, 0.500000, 0.550000, 0.773950, 1.038270, 0.920180, 0.910930, 0.821140, 1.034560, 1.041020, 0.997520, 0.961600, 1.000000, 1.035780,#6_2 => 5_1 0.948030, 0.980000, 0.900000, 0.950360, 0.977460, 0.950000, 0.977250, 0.869270, 0.800000, 0.951680, 0.951870, 0.850000, 0.748770, 0.700000, 0.883850, 0.900000, 0.823190, 0.727450, 0.600000, 0.839870, 0.850000, 0.805020, 0.692310, 0.500000, 0.788410, 1.010090, 0.895270, 0.773030, 0.816280, 1.011680, 1.022450, 1.004600, 0.983650, 1.000000, 1.032940,#6_3 => 5_2 0.943960, 0.999240, 0.983920, 0.905990, 0.978150, 0.936240, 0.946480, 0.850000, 0.850000, 0.930320, 0.816420, 0.885000, 0.644950, 0.817650, 0.865310, 0.742960, 0.765690, 0.561520, 0.700000, 0.827140, 0.643870, 0.596710, 0.474460, 0.600000, 0.651200, 0.971740, 0.940560, 0.714880, 0.864380, 1.001650, 0.995260, 0.977010, 1.000000, 1.000000, 1.035250,#6_4 => 5_3 0.939810, 0.975250, 0.939980, 0.950000, 0.982550, 0.876870, 0.879440, 0.850000, 0.900000, 0.917810, 0.873480, 0.873450, 0.751470, 0.850000, 0.863040, 0.761470, 0.702360, 0.638770, 0.750000, 0.783120, 0.734080, 0.650000, 0.600000, 0.650000, 0.715660, 0.942160, 0.919100, 0.770340, 0.731170, 0.995180, 0.952560, 0.916780, 0.920000, 0.900000, 1.005880,#6_5 => 5_4 0.928620, 0.994420, 0.900000, 0.900000, 0.983720, 0.913070, 0.850000, 0.850000, 0.800000, 0.924280, 0.868090, 0.807170, 0.823550, 0.600000, 0.844520, 0.769570, 0.719870, 0.650000, 0.550000, 0.733500, 0.580250, 0.650000, 0.600000, 0.500000, 0.628850, 0.904770, 0.852650, 0.708370, 0.493730, 0.949030, 0.911970, 0.800000, 0.800000, 0.800000, 0.956320,#6_6 => 5_5 0.912620, 0.682610, 0.750000, 0.700000, 0.950110, 0.653450, 0.659330, 0.700000, 0.600000, 0.856110, 0.648440, 0.600000, 0.641120, 0.500000, 0.695780, 0.570000, 0.550000, 0.598800, 0.40000 , 0.560150, 0.475230, 0.500000, 0.518640, 0.339970, 0.520230, 0.743440, 0.592190, 0.603060, 0.316930, 0.794390 ] return np.array(CM, dtype=float).reshape((6,6,7,5))
434bab68e2aa434a79a53dd91a4932e631a6367c
19,885
def remove_sleepEDF(mne_raw, CHANNELS): """Extracts CHANNELS channels from MNE_RAW data. Args: raw - mne data strucutre of n number of recordings and t seconds each CHANNELS - channels wished to be extracted Returns: extracted - mne data structure with only specified channels """ extracted = mne_raw.pick_channels(CHANNELS) return extracted
7bb04810676a127742d391c518fc505bf7568aac
19,886
from datetime import datetime import pytz def save_email_schedule(request, action, schedule_item, op_payload): """ Function to handle the creation and edition of email items :param request: Http request being processed :param action: Action item related to the schedule :param schedule_item: Schedule item or None if it is new :param op_payload: dictionary to carry over the request to the next step :return: """ # Create the form to ask for the email subject and other information form = EmailScheduleForm( data=request.POST or None, action=action, instance=schedule_item, columns=action.workflow.columns.filter(is_key=True), confirm_items=op_payload.get('confirm_items', False)) # Check if the request is GET, or POST but not valid if request.method == 'GET' or not form.is_valid(): now = datetime.datetime.now(pytz.timezone(settings.TIME_ZONE)) # Render the form return render(request, 'scheduler/edit.html', {'action': action, 'form': form, 'now': now}) # Processing a valid POST request # Save the schedule item object s_item = form.save(commit=False) # Assign additional fields and save s_item.user = request.user s_item.action = action s_item.status = ScheduledAction.STATUS_CREATING s_item.payload = { 'subject': form.cleaned_data['subject'], 'cc_email': [x for x in form.cleaned_data['cc_email'].split(',') if x], 'bcc_email': [x for x in form.cleaned_data['bcc_email'].split(',') if x], 'send_confirmation': form.cleaned_data['send_confirmation'], 'track_read': form.cleaned_data['track_read'] } # Verify that that action does comply with the name uniqueness # property (only with respec to other actions) try: s_item.save() except IntegrityError as e: # There is an action with this name already form.add_error('name', _( 'A scheduled execution of this action with this name ' 'already exists')) return render(request, 'scheduler/edit.html', {'action': action, 'form': form, 'now': datetime.datetime.now(pytz.timezone( settings.TIME_ZONE))}) # Upload information to the op_payload op_payload['schedule_id'] = s_item.id op_payload['confirm_items'] = form.cleaned_data['confirm_items'] if op_payload['confirm_items']: # Update information to carry to the filtering stage op_payload['exclude_values'] = s_item.exclude_values op_payload['item_column'] = s_item.item_column.name op_payload['button_label'] = ugettext('Schedule') request.session[action_session_dictionary] = op_payload return redirect('action:item_filter') else: # If there is not item_column, the exclude values should be empty. s_item.exclude_values = [] s_item.save() # Go straight to the final step return finish_scheduling(request, s_item, op_payload)
3a14e11a195bcf96ac132d2876e61ce52f0b8ccd
19,887
def slice( _data: DataFrame, *rows: NumericOrIter, _preserve: bool = False, base0_: bool = None, ) -> DataFrame: """Index rows by their (integer) locations Original APIs https://dplyr.tidyverse.org/reference/slice.html Args: _data: The dataframe rows: The indexes Ranges can be specified as `f[1:3]` Note that the negatives mean differently than in dplyr. In dplyr, negative numbers meaning exclusive, but here negative numbers are negative indexes like how they act in python indexing. For exclusive indexes, you need to use inversion. For example: `slice(df, ~f[:3])` excludes first 3 rows. You can also do: `slice(df, ~c(f[:3], 6))` to exclude multiple set of rows. To exclude a single row, you can't do this directly: `slice(df, ~1)` since `~1` is directly compiled into a number. You can do this instead: `slice(df, ~c(1))` Exclusive and inclusive expressions are allowed to be mixed, unlike in `dplyr`. They are expanded in the order they are passed in. _preserve: Relevant when the _data input is grouped. If _preserve = FALSE (the default), the grouping structure is recalculated based on the resulting data, otherwise the grouping is kept as is. base0_: If rows are selected by indexes, whether they are 0-based. If not provided, `datar.base.get_option('index.base.0')` is used. Returns: The sliced dataframe """ if not rows: return _data rows = _sanitize_rows(rows, _data.shape[0], base0_) out = _data.iloc[rows, :] if isinstance(_data.index, RangeIndex): out.reset_index(drop=True, inplace=True) # copy_attrs(out, _data) # attrs carried return out
a58d2ae140d1e441100f7f71587588b93ecaa7b4
19,888
def get_random_color(): """ Get random color :return: np.array([r,g,b]) """ global _start_color, _color_step # rgb = np.random.uniform(0, 25, [3]) # rgb = np.asarray(np.floor(rgb) / 24 * 255, np.uint8) _start_color = (_start_color + _color_step) % np.array([256, 256, 256]) rgb = np.asarray(_start_color, np.uint8).tolist() return rgb
3c9596f264e75c064f76a56d71e06dbe55669936
19,889
def get_client_ip(request): """ Simple function to return IP address of client :param request: :return: """ x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR') if x_forwarded_for: ip = x_forwarded_for.split(',')[0] # pylint: disable=invalid-name else: ip = request.META.get('REMOTE_ADDR') # pylint: disable=invalid-name return ip
976755d296127a42de5b6d7c39bfc9a607b273ee
19,890
def entropy(wair,temp,pres,airf=None,dhum=None,dliq=None,chkvals=False, chktol=_CHKTOL,airf0=None,dhum0=None,dliq0=None,chkbnd=False, mathargs=None): """Calculate wet air entropy. Calculate the specific entropy of wet air. :arg float wair: Total dry air fraction in kg/kg. :arg float temp: Temperature in K. :arg float pres: Pressure in Pa. :arg airf: Dry air fraction in humid air in kg/kg. :type airf: float or None :arg dhum: Humid air density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dhum: float or None :arg dliq: Liquid water density in kg/m3. If unknown, pass None (default) and it will be calculated. :type dliq: float or None :arg bool chkvals: If True (default False) and all values are given, this function will calculate the disequilibrium and raise a warning if the results are not within a given tolerance. :arg float chktol: Tolerance to use when checking values (default _CHKTOL). :arg airf0: Initial guess for the dry fraction in kg/kg. If None (default) then `iceair4a._approx_tp` is used. :type airf0: float or None :arg dhum0: Initial guess for the humid air density in kg/m3. If None (default) then `liqair4a._approx_tp` is used. :type dhum0: float or None :arg dliq0: Initial guess for the liquid water density in kg/m3. If None (default) then `liqair4a._approx_tp` is used. :type dliq0: float or None :arg bool chkbnd: If True then warnings are raised when the given values are valid but outside the recommended bounds (default False). :arg mathargs: Keyword arguments to the root-finder :func:`_newton <maths3.newton>` (e.g. maxiter, rtol). If None (default) then no arguments are passed and default parameters will be used. :returns: Entropy in J/kg/K. :raises RuntimeWarning: If the relative disequilibrium is more than chktol, if chkvals is True and all values are given. :raises RuntimeWarning: If air with the given parameters would be unsaturated. :Examples: >>> entropy(0.5,300.,1e5) 343.783393872 """ g_t = liqair_g(0,1,0,wair,temp,pres,airf=airf,dhum=dhum,dliq=dliq, chkvals=chkvals,chktol=chktol,airf0=airf0,dhum0=dhum0,dliq0=dliq0, chkbnd=chkbnd,mathargs=mathargs) s = -g_t return s
4cd0b53bdf549a0f53d2543e693f6b179a0f0915
19,891
def get_list_item(view, index): """ get item from listView by index version 1 :param view: :param index: :return: """ return var_cache['proxy'].get_list_item(view, index)
bcb1db741a87bc2c12686ade8e692449030eb9cf
19,892
def interquartile_range_checker(train_user: list) -> float: """ Optional method: interquatile range input : list of total user in float output : low limit of input in float this method can be used to check whether some data is outlier or not >>> interquartile_range_checker([1,2,3,4,5,6,7,8,9,10]) 2.8 """ train_user.sort() q1 = np.percentile(train_user, 25) q3 = np.percentile(train_user, 75) iqr = q3 - q1 low_lim = q1 - (iqr * 0.1) return low_lim
29eba79cef5c91e491250780ed3fb7ca74d7cace
19,893
def make_linear_colorscale(colors): """ Makes a list of colors into a colorscale-acceptable form For documentation regarding to the form of the output, see https://plot.ly/python/reference/#mesh3d-colorscale """ scale = 1.0 / (len(colors) - 1) return [[i * scale, color] for i, color in enumerate(colors)]
dabd2a2a9d6bbf3acfcabcac52246048332fae73
19,894
def background(image_size: int, level: float=0, grad_i: float=0, grad_d: float=0) -> np.array: """ Return array representing image background of size `image_size`. The image may have an illimination gradient of intensity `I` and direction `grad_d`. The `image_size` is in pixels. `grad_i` expected to be between 0 and 1. `grad_d` is gradient direction in radians. """ h = image_size // 2 background = np.ones((image_size,image_size)) * level ix,iy = np.meshgrid(np.arange(-h, h + 1), np.arange(-h, h + 1)) illumination_gradient = grad_i * ((ix * np.sin(grad_d)) + (iy * np.cos(grad_d))) / (np.sqrt(2) * image_size) return background + illumination_gradient
7fc6d34ec02752604024746e707f70a82ad61450
19,895
def mapTypeCategoriesToSubnetName(nodetypecategory, acceptedtypecategory): """This function returns a name of the subnet that accepts nodetypecategory as child type and can be created in a container whose child type is acceptedtypecategory. Returns None if these two categories are the same (ie, no need for a subnet to accommodate nodetypecategory). Also returns None if the mapping has not been defined yet. """ return ''
c9a31c571807cd2592340ce685b1f130f99da156
19,896
def sorted_unique(series): """Return the unique values of *series*, correctly sorted.""" # This handles Categorical data types, which sorted(series.unique()) fails # on. series.drop_duplicates() is slower than Series(series.unique()). return list(pd.Series(series.unique()).sort_values())
3da88962171acd15f3af020ff056afb66d284425
19,897
import json def create_query_from_request(p, request): """ Create JSON object representing the query from request received from Dashboard. :param request: :return: """ query_json = {'process_type': DVAPQL.QUERY} count = request.POST.get('count') generate_tags = request.POST.get('generate_tags') selected_indexers = json.loads(request.POST.get('selected_indexers',"[]")) selected_detectors = json.loads(request.POST.get('selected_detectors',"[]")) query_json['image_data_b64'] = request.POST.get('image_url')[22:] query_json['tasks'] = [] indexer_tasks = defaultdict(list) if generate_tags and generate_tags != 'false': query_json['tasks'].append({'operation': 'perform_analysis', 'arguments': {'analyzer': 'tagger','target': 'query',} }) if selected_indexers: for k in selected_indexers: indexer_pk, retriever_pk = k.split('_') indexer_tasks[int(indexer_pk)].append(int(retriever_pk)) for i in indexer_tasks: di = TrainedModel.objects.get(pk=i,model_type=TrainedModel.INDEXER) rtasks = [] for r in indexer_tasks[i]: rtasks.append({'operation': 'perform_retrieval', 'arguments': {'count': int(count), 'retriever_pk': r}}) query_json['tasks'].append( { 'operation': 'perform_indexing', 'arguments': { 'index': di.name, 'target': 'query', 'map': rtasks } } ) if selected_detectors: for d in selected_detectors: dd = TrainedModel.objects.get(pk=int(d),model_type=TrainedModel.DETECTOR) if dd.name == 'textbox': query_json['tasks'].append({'operation': 'perform_detection', 'arguments': {'detector_pk': int(d), 'target': 'query', 'map': [{ 'operation': 'perform_analysis', 'arguments': {'target': 'query_regions', 'analyzer': 'crnn', 'filters': {'event_id': '__parent_event__'} } }] } }) elif dd.name == 'face': dr = Retriever.objects.get(name='facenet',algorithm=Retriever.EXACT) query_json['tasks'].append({'operation': 'perform_detection', 'arguments': {'detector_pk': int(d), 'target': 'query', 'map': [{ 'operation': 'perform_indexing', 'arguments': {'target': 'query_regions', 'index': 'facenet', 'filters': {'event_id': '__parent_event__'}, 'map':[{ 'operation':'perform_retrieval', 'arguments':{'retriever_pk':dr.pk, 'filters':{'event_id': '__parent_event__'}, 'target':'query_region_index_vectors', 'count':10} }]} }] } }) else: query_json['tasks'].append({'operation': 'perform_detection', 'arguments': {'detector_pk': int(d), 'target': 'query', }}) user = request.user if request.user.is_authenticated else None p.create_from_json(query_json, user) return p.process
4936376d6d900ca20d2ea9339634d0a7d90ebc2e
19,898
from typing import List from typing import Any def create_cxr_transforms_from_config(config: CfgNode, apply_augmentations: bool) -> ImageTransformationPipeline: """ Defines the image transformations pipeline used in Chest-Xray datasets. Can be used for other types of images data, type of augmentations to use and strength are expected to be defined in the config. :param config: config yaml file fixing strength and type of augmentation to apply :param apply_augmentations: if True return transformation pipeline with augmentations. Else, disable augmentations i.e. only resize and center crop the image. """ transforms: List[Any] = [ExpandChannels()] if apply_augmentations: if config.augmentation.use_random_affine: transforms.append(RandomAffine( degrees=config.augmentation.random_affine.max_angle, translate=(config.augmentation.random_affine.max_horizontal_shift, config.augmentation.random_affine.max_vertical_shift), shear=config.augmentation.random_affine.max_shear )) if config.augmentation.use_random_crop: transforms.append(RandomResizedCrop( scale=config.augmentation.random_crop.scale, size=config.preprocess.resize )) else: transforms.append(Resize(size=config.preprocess.resize)) if config.augmentation.use_random_horizontal_flip: transforms.append(RandomHorizontalFlip(p=config.augmentation.random_horizontal_flip.prob)) if config.augmentation.use_gamma_transform: transforms.append(RandomGamma(scale=config.augmentation.gamma.scale)) if config.augmentation.use_random_color: transforms.append(ColorJitter( brightness=config.augmentation.random_color.brightness, contrast=config.augmentation.random_color.contrast, saturation=config.augmentation.random_color.saturation )) if config.augmentation.use_elastic_transform: transforms.append(ElasticTransform( alpha=config.augmentation.elastic_transform.alpha, sigma=config.augmentation.elastic_transform.sigma, p_apply=config.augmentation.elastic_transform.p_apply )) transforms.append(CenterCrop(config.preprocess.center_crop_size)) if config.augmentation.use_random_erasing: transforms.append(RandomErasing( scale=config.augmentation.random_erasing.scale, ratio=config.augmentation.random_erasing.ratio )) if config.augmentation.add_gaussian_noise: transforms.append(AddGaussianNoise( p_apply=config.augmentation.gaussian_noise.p_apply, std=config.augmentation.gaussian_noise.std )) else: transforms += [Resize(size=config.preprocess.resize), CenterCrop(config.preprocess.center_crop_size)] pipeline = ImageTransformationPipeline(transforms) return pipeline
9d16e844291a69d4b82681c4cdcc48f5a1b3d67f
19,899
def is_compiled_with_npu(): """ Whether paddle was built with WITH_ASCEND_CL=ON to support Ascend NPU. Returns (bool): `True` if NPU is supported, otherwise `False`. Examples: .. code-block:: python import paddle support_npu = paddle.device.is_compiled_with_npu() """ return core.is_compiled_with_npu()
54bf625843a098bfee93d8c1ac5b79bd562602fe
19,900
def odd_occurrence_parity_set(arr): """ A similar implementation to the XOR idea above, but more naive. As we iterate over the passed list, a working set keeps track of the numbers that have occurred an odd number of times. At the end, the set will only contain one number. Though the worst-case time complexity is the same as the hashmap method implemented below, this will probably be significantly faster as dictionaries have much longer lookup times than sets. Space complexity: $O(n)$; Time complexity: $O(n)$. Parameters ---------- arr : integer Returns ------- integer """ seen_odd_times = set() for num in arr: if num in seen_odd_times: seen_odd_times.remove(num) else: seen_odd_times.add(num) return list(seen_odd_times)[0]
57f9362e05786724a1061bef07e49635b1b2b142
19,901
import time def larmor_step_search(step_search_center=cfg.LARMOR_FREQ, steps=200, step_bw_MHz=5e-3, plot=False, shim_x=cfg.SHIM_X, shim_y=cfg.SHIM_Y, shim_z=cfg.SHIM_Z, delay_s=1, gui_test=False): """ Run a stepped search through a range of frequencies to find the highest signal response Used to find a starting point, not for precision Args: step_search_center (float): [MHz] Center for search, defaults to config LARMOR_FREQ steps (int): Number of search steps step_bw_MHz (float): [MHz] Distance in MHz between each step plot (bool): Default False, plot final data shim_x, shim_y, shim_z (float): Shim value, defaults to config SHIM_ values, must be less than 1 magnitude delay_s (float): Delay between readings in seconds gui_test (bool): Default False, takes dummy data instead of actual data for GUI testing away from scanner Returns: float: Estimated larmor frequency in MHz dict: Dictionary of data """ # Pick out the frequencies to run through swept_freqs = np.linspace(step_search_center - ((steps-1)/2 * step_bw_MHz), step_search_center + ((steps-1)/2 * step_bw_MHz), num=steps) larmor_freq = swept_freqs[0] # Set the sequence file for a single spin echo seq_file = cfg.MGH_PATH + 'cal_seq_files/se_1.seq' # Run the experiment once to prep array rxd, rx_t = scr.run_pulseq(seq_file, rf_center=larmor_freq, tx_t=1, grad_t=10, tx_warmup=100, shim_x=shim_x, shim_y=shim_y, shim_z=shim_z, grad_cal=False, save_np=False, save_mat=False, save_msgs=False, gui_test=gui_test) # Create array for storing data rx_arr = np.zeros((rxd.shape[0], steps), dtype=np.cdouble) rx_arr[:,0] = rxd # Pause for spin recovery time.sleep(delay_s) # Repeat for each frequency after the first for i in range(1, steps): print(f'{swept_freqs[i]:.4f} MHz') rx_arr[:,i], _ = scr.run_pulseq(seq_file, rf_center=swept_freqs[i], tx_t=1, grad_t=10, tx_warmup=100, shim_x=shim_x, shim_y=shim_y, shim_z=shim_z, grad_cal=False, save_np=False, save_mat=False, save_msgs=False, gui_test=gui_test) time.sleep(delay_s) # Find the frequency data with the largest maximum absolute value max_ind = np.argmax(np.max(np.abs(rx_arr), axis=0, keepdims=False)) max_freq = swept_freqs[max_ind] print(f'Max frequency: {max_freq:.4f} MHz') # Plot figure if plot: fig, axs = plt.subplots(2, 1, constrained_layout=True) fig.suptitle(f'{steps}-step search around {step_search_center:.4f} MHz') axs[0].plot(np.real(rx_arr)) axs[0].legend([f'{freq:.4f} MHz' for freq in swept_freqs]) axs[0].set_title('Concatenated signal -- Real') axs[1].plot(np.abs(rx_arr)) axs[1].set_title('Concatenated signal -- Magnitude') plt.show() # Output of useful data for visualization data_dict = {'rx_arr': rx_arr, 'rx_t': rx_t, 'larmor_freq': larmor_freq } # Return the frequency that worked the best return max_freq, data_dict
647d67a491cf787dbc092a621c9ba5ad8097b21e
19,902
from typing import Optional def get_role_tempalte(context: Optional[str] = None, name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetRoleTempalteResult: """ Use this data source to access information about an existing resource. """ pulumi.log.warn("""get_role_tempalte is deprecated: rancher2.getRoleTempalte has been deprecated in favor of rancher2.getRoleTemplate""") __args__ = dict() __args__['context'] = context __args__['name'] = name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('rancher2:index/getRoleTempalte:getRoleTempalte', __args__, opts=opts, typ=GetRoleTempalteResult).value return AwaitableGetRoleTempalteResult( administrative=__ret__.administrative, annotations=__ret__.annotations, builtin=__ret__.builtin, context=__ret__.context, default_role=__ret__.default_role, description=__ret__.description, external=__ret__.external, hidden=__ret__.hidden, id=__ret__.id, labels=__ret__.labels, locked=__ret__.locked, name=__ret__.name, role_template_ids=__ret__.role_template_ids, rules=__ret__.rules)
edc2bdaba9f287995f6c4323a4acb45935be02e4
19,903
from sklearn.metrics import roc_curve, roc_auc_score def threshold_xr_via_auc(ds, df, res_factor=3, if_nodata='any'): """ Takes a xarray dataset/array of gdv likelihood values and thresholds them according to a pandas dataframe (df) of field occurrence points. Scipy roc curve and auc is generated to perform thresholding. Pandas dataframe must include absences along with presences or the roc curve cannot be performed. Parameters ---------- ds : xarray dataset/array A dataset with x, y and time dims with likelihood values. df : pandas dataframe A dataframe of field occurrences with x, y values and presence, absence column. res_factors : int Controls the tolerance of occurence points intersection with nearest pixels. In other words, number of pixels that a occurrence point can be 'out'. if_nodata : str Whether to exclude a point from the auc threshold method if any or all values are nan. Default is any. Returns ---------- ds_thresh : xarray dataset or array. """ # imports check try: except: raise ImportError('Could not import sklearn.') # notify print('Thresholding dataset via occurrence records and AUC.') # check xr type, dims, num time if not isinstance(ds, (xr.Dataset, xr.DataArray)): raise TypeError('Dataset not an xarray type.') elif 'x' not in list(ds.dims) or 'y' not in list(ds.dims): raise ValueError('No x or y dimensions in dataset.') # we need a dataset, try and convert from array was_da = False if isinstance(ds, xr.DataArray): try: was_da = True ds = ds.to_dataset(dim='variable') except: raise TypeError('Failed to convert xarray DataArray to Dataset.') # check if pandas type, columns, actual field if not isinstance(df, pd.DataFrame): raise TypeError('Occurrence records is not a pandas type.') elif 'x' not in df or 'y' not in df: raise ValueError('No x, y fields in occurrence records.') elif 'actual' not in df: raise ValueError('No actual field in occurrence records.') # check if nodatavals is in dataset if not hasattr(ds, 'nodatavals') or ds.nodatavals == 'unknown': raise AttributeError('Dataset does not have a nodatavalue attribute.') # check if res factor and if_nodata valid if not isinstance(res_factor, int) and res_factor < 1: raise TypeError('Resolution factor must be an integer of 1 or greater.') elif if_nodata not in ['any', 'all']: raise TypeError('If nodata policy must be either any or all.') # split ds into arrays depending on dims da_list = [ds] if 'time' in ds.dims: da_list = [ds.sel(time=dt) for dt in ds['time']] # loop each slice, threshold to auc thresh_list = [] for da in da_list: # take a copy da = da.copy(deep=True) # intersect points with current da df_data = df[['x', 'y', 'actual']].copy() df_data = tools.intersect_records_with_xr(ds=da, df_records=df_data, extract=True, res_factor=res_factor, if_nodata=if_nodata) # remove no data df_data = tools.remove_nodata_records(df_data, nodata_value=ds.nodatavals) # check if dataframe has 1s and 0s only unq = df_data['actual'].unique() if not np.any(unq == 1) or not np.any(unq == 0): raise ValueError('Occurrence records do not contain 1s and/or 0s.') elif len(unq) != 2: raise ValueError('Occurrence records contain more than just 1s and/or 0s.') # rename column, add column of actuals (1s) df_data = df_data.rename(columns={'like': 'predicted'}) # get fpr, tpr, thresh, auc and optimal threshold fpr, tpr, thresholds = roc_curve(df_data['actual'], df_data['predicted']) auc = roc_auc_score(df_data['actual'], df_data['predicted']) cut_off = thresholds[np.argmax(tpr - fpr)] # threshold da to cutoff and append da = da.where(da > cut_off) thresh_list.append(da) # notify if 'time' in ds.dims: print('AUC: {0} for time: {1}.'.format(round(auc, 3), da['time'].values)) else: print('AUC: {0} for whole dataset.'.format(round(auc, 3))) for e in fpr: print(e) print('\n') for e in tpr: print(e) print('\n') print(auc) print('\n') print(cut_off) # show print('- ' * 30) plt.show() print('- ' * 30) print('') # concat array back together if len(thresh_list) > 1: ds_thresh = xr.concat(thresh_list, dim='time').sortby('time') else: ds_thresh = thresh_list[0] if was_da: ds_thresh = ds_thresh.to_array() # notify and return print('Thresholded dataset successfully.') return ds_thresh
6415b7aa7298c7d2bf6488d5c9f0834facbd4300
19,904
def kd_or_scan(func=None, array=None, extra_data=None): """Decorator to allow functions to be call with a scan number or kd object """ if func is None: return partial(kd_or_scan, array=array, extra_data=extra_data) @wraps(func) def wrapper(scan, *args, **kwargs): # If scan number given, read the scan into the object and pass it to function if isinstance(scan, (int, np.int, np.int64)): scan = read_scan(scan, array=array, extra_data=extra_data) return func(scan, *args, **kwargs) return wrapper
8eda0c54717293f57cd817f20a6f008abae6b825
19,905
def matching_intervals(original: DomainNode, approx: DomainNode, conf: float) -> bool: """ Checks if 2 intervals match in respect to a confidence interval.""" # out_of_bounds = (not matching_bounds(original.domains[v], approx.domains[v], conf) for v in original.variables) # return not any(out_of_bounds) vars_in_bounds = (matching_bounds(original.domains[var], approx.domains[var], conf) for var in original.variables) return all(vars_in_bounds)
bb2540872d0406b88551ec5b3a9ef28fbc39d366
19,906
def _make_label_sigmoid_cross_entropy_loss(logits, present_labels, split): """ Helper function to create label loss Parameters ---------- logits: tensor of shape [batch_size, num_verts, num_labels] present_labels: tensor of shape [batch_size, num_verts, num_labels]; labels of labelled verts split: tensor of shape [batch_size, num_verts], 0 if censored, 1 if not censored Returns ------- The cross-entropy loss corresponding to the label. """ if len(logits.shape) == 3: batch_size = tf.cast(tf.shape(input=logits)[0], dtype=tf.float32) else: batch_size = 1 label_pred_losses = tf.compat.v1.losses.sigmoid_cross_entropy( present_labels, logits=logits, weights=tf.expand_dims(split, -1), reduction=tf.compat.v1.losses.Reduction.NONE) # sum rather than (tf default of) mean because ¯\_(ツ)_/¯ label_pred_loss = tf.reduce_sum(input_tensor=label_pred_losses) return label_pred_loss / batch_size
290364255222f20ef864636ef2ac8df51599a587
19,907
import copy def _merge_meta(base, child): """Merge the base and the child meta attributes. List entries, such as ``indexes`` are concatenated. ``abstract`` value is set to ``True`` only if defined as such in the child class. Args: base (dict): ``meta`` attribute from the base class. child (dict): ``meta`` attribute from the child class. Returns: dict: Merged metadata. """ base = copy.deepcopy(base) child.setdefault('abstract', False) for key, value in child.items(): if isinstance(value, list): base.setdefault(key, []).extend(value) else: base[key] = value return base
ba219b8091244a60658bee826fbef5003d3f7883
19,908
from typing import Dict from typing import Any def _parse_quotes(quotes_dict: Dict[str, Dict[str, Dict[str, Any]]]) -> "RegionalQuotes": """ Parse quote data for a :class:`~.DetailedProduct`. :param quotes_dict: """ quotes: RegionalQuotes = RegionalQuotes() for gsp, payment_methods in quotes_dict.items(): quotes[gsp] = {} for method, fuels in payment_methods.items(): quotes[gsp][method] = {} for fuel, quote in fuels.items(): quotes[gsp][method][fuel] = Quote(**quote) return quotes
82ea906391b5e3d23a40619eefc19eaa353e18bc
19,909
def train_validation(train_df, valid_df, epochs=100, batch_size=512, plot=False, nn_args={}): """ Wrapper for training on the complete training data and evaluating the performance on the hold-out set. Parameter: ------------------- train_df: df, train df with features and valid_df: df, validation df with features Returns: ------------------- res_df: metrics nnmodel: neural network model """ #format the dtaframe for ML X_train_full, Seqs_train_full, y_train_full = process_df(train_df) X_valid_full, Seqs_valid_full, y_valid_full = process_df(valid_df) # encode class values as integers encoder = LabelEncoder() encoder.fit(y_train_full) #output dims depending on the number of fractions output_dims = len(np.unique(train_df.Fraction)) input_dims = X_train_full.shape[1] nnmodel = models.SAX_Model(output_dim=output_dims, input_dim=input_dims, **nn_args) print (nnmodel.summary()) history = nnmodel.fit(np.array(X_train_full), np_utils.to_categorical(encoder.transform(y_train_full)), epochs=epochs, batch_size=batch_size) #fit the model to the complete training data yhat_train_prob = nnmodel.predict(np.array(X_train_full)) yhat_train_disc = yhat_train_prob.argmax(axis=1) + 1 yhat_val_prob = nnmodel.predict(np.array(X_valid_full)) yhat_val_disc = yhat_val_prob.argmax(axis=1) + 1 #evaluate res_train = pd.DataFrame(eval_predictions_complex(y_train_full, yhat_train_disc, "keras_Train")) res_valid = pd.DataFrame(eval_predictions_complex(y_valid_full, yhat_val_disc, "keras_Valid")) res_df = pd.concat([res_train.transpose(), res_valid.transpose()]) res_df.columns = eval_predictions_complex(None, None, None, True) if plot: x = np.arange(-4, 30, 1) ax1 = sns.jointplot(x=y_valid_full, y=yhat_val_disc, kind="kde", xlim=(-4, 30 ), ylim=(-4, 30 )) ax1.set_axis_labels(xlabel="True Fraction", ylabel="Prediction") ax1.ax_joint.plot(x, x, '-k') print ("Results on the validation data:") print (res_df) return(res_df, nnmodel, history)
7dffa50d427c0e74fe4f6e6a8ff1e0198304de2a
19,910
import warnings import inspect def bootstrap_compute( hind, verif, hist=None, alignment="same_verifs", metric="pearson_r", comparison="m2e", dim="init", reference=["uninitialized", "persistence"], resample_dim="member", sig=95, iterations=500, pers_sig=None, compute=compute_hindcast, resample_uninit=bootstrap_uninitialized_ensemble, reference_compute=compute_persistence, **metric_kwargs, ): """Bootstrap compute with replacement. Args: hind (xr.Dataset): prediction ensemble. verif (xr.Dataset): Verification data. hist (xr.Dataset): historical/uninitialized simulation. metric (str): `metric`. Defaults to 'pearson_r'. comparison (str): `comparison`. Defaults to 'm2e'. dim (str or list): dimension(s) to apply metric over. default: 'init'. reference (str, list of str): Type of reference forecasts with which to verify. One or more of ['persistence', 'uninitialized']. If None or empty, returns no p value. resample_dim (str): dimension to resample from. default: 'member':: - 'member': select a different set of members from hind - 'init': select a different set of initializations from hind sig (int): Significance level for uninitialized and initialized skill. Defaults to 95. pers_sig (int): Significance level for persistence skill confidence levels. Defaults to sig. iterations (int): number of resampling iterations (bootstrap with replacement). Defaults to 500. compute (func): function to compute skill. Choose from [:py:func:`climpred.prediction.compute_perfect_model`, :py:func:`climpred.prediction.compute_hindcast`]. resample_uninit (func): function to create an uninitialized ensemble from a control simulation or uninitialized large ensemble. Choose from: [:py:func:`bootstrap_uninitialized_ensemble`, :py:func:`bootstrap_uninit_pm_ensemble_from_control`]. reference_compute (func): function to compute a reference forecast skill with. Default: :py:func:`climpred.prediction.compute_persistence`. ** metric_kwargs (dict): additional keywords to be passed to metric (see the arguments required for a given metric in :ref:`Metrics`). Returns: results: (xr.Dataset): bootstrapped results for the three different skills: - `initialized` for the initialized hindcast `hind` and describes skill due to initialization and external forcing - `uninitialized` for the uninitialized/historical and approximates skill from external forcing - `persistence` for the persistence forecast computed by `compute_persistence` the different results: - `verify skill`: skill values - `p`: p value - `low_ci` and `high_ci`: high and low ends of confidence intervals based on significance threshold `sig` Reference: * Goddard, L., A. Kumar, A. Solomon, D. Smith, G. Boer, P. Gonzalez, V. Kharin, et al. “A Verification Framework for Interannual-to-Decadal Predictions Experiments.” Climate Dynamics 40, no. 1–2 (January 1, 2013): 245–72. https://doi.org/10/f4jjvf. See also: * climpred.bootstrap.bootstrap_hindcast * climpred.bootstrap.bootstrap_perfect_model """ warn_if_chunking_would_increase_performance(hind, crit_size_in_MB=5) if pers_sig is None: pers_sig = sig if isinstance(dim, str): dim = [dim] if isinstance(reference, str): reference = [reference] if reference is None: reference = [] p = (100 - sig) / 100 ci_low = p / 2 ci_high = 1 - p / 2 p_pers = (100 - pers_sig) / 100 ci_low_pers = p_pers / 2 ci_high_pers = 1 - p_pers / 2 # get metric/comparison function name, not the alias metric = METRIC_ALIASES.get(metric, metric) comparison = COMPARISON_ALIASES.get(comparison, comparison) # get class Metric(metric) metric = get_metric_class(metric, ALL_METRICS) # get comparison function comparison = get_comparison_class(comparison, ALL_COMPARISONS) # Perfect Model requires `same_inits` setup isHindcast = True if comparison.name in HINDCAST_COMPARISONS else False reference_alignment = alignment if isHindcast else "same_inits" chunking_dims = [d for d in hind.dims if d not in CLIMPRED_DIMS] # carry alignment for compute_reference separately metric_kwargs_reference = metric_kwargs.copy() metric_kwargs_reference["alignment"] = reference_alignment # carry alignment in metric_kwargs if isHindcast: metric_kwargs["alignment"] = alignment if hist is None: # PM path, use verif = control hist = verif # slower path for hindcast and resample_dim init if resample_dim == "init" and isHindcast: warnings.warn("resample_dim=`init` will be slower than resample_dim=`member`.") ( bootstrapped_init_skill, bootstrapped_uninit_skill, bootstrapped_pers_skill, ) = _bootstrap_hindcast_over_init_dim( hind, hist, verif, dim, reference, resample_dim, iterations, metric, comparison, compute, reference_compute, resample_uninit, **metric_kwargs, ) else: # faster: first _resample_iterations_idx, then compute skill resample_func = _get_resample_func(hind) if not isHindcast: if "uninitialized" in reference: # create more members than needed in PM to make the uninitialized # distribution more robust members_to_sample_from = 50 repeat = members_to_sample_from // hind.member.size + 1 uninit_hind = xr.concat( [resample_uninit(hind, hist) for i in range(repeat)], dim="member", **CONCAT_KWARGS, ) uninit_hind["member"] = np.arange(1, 1 + uninit_hind.member.size) if dask.is_dask_collection(uninit_hind): # too minimize tasks: ensure uninit_hind get pre-computed # alternativly .chunk({'member':-1}) uninit_hind = uninit_hind.compute().chunk() # resample uninit always over member and select only hind.member.size bootstrapped_uninit = resample_func( uninit_hind, iterations, "member", replace=False, dim_max=hind["member"].size, ) bootstrapped_uninit["lead"] = hind["lead"] # effectively only when _resample_iteration_idx which doesnt use dim_max bootstrapped_uninit = bootstrapped_uninit.isel( member=slice(None, hind.member.size) ) if dask.is_dask_collection(bootstrapped_uninit): bootstrapped_uninit = bootstrapped_uninit.chunk({"member": -1}) bootstrapped_uninit = _maybe_auto_chunk( bootstrapped_uninit, ["iteration"] + chunking_dims ) else: # hindcast if "uninitialized" in reference: uninit_hind = resample_uninit(hind, hist) if dask.is_dask_collection(uninit_hind): # too minimize tasks: ensure uninit_hind get pre-computed # maybe not needed uninit_hind = uninit_hind.compute().chunk() bootstrapped_uninit = resample_func( uninit_hind, iterations, resample_dim ) bootstrapped_uninit = bootstrapped_uninit.isel( member=slice(None, hind.member.size) ) bootstrapped_uninit["lead"] = hind["lead"] if dask.is_dask_collection(bootstrapped_uninit): bootstrapped_uninit = _maybe_auto_chunk( bootstrapped_uninit.chunk({"lead": 1}), ["iteration"] + chunking_dims, ) if "uninitialized" in reference: bootstrapped_uninit_skill = compute( bootstrapped_uninit, verif, metric=metric, comparison="m2o" if isHindcast else comparison, dim=dim, add_attrs=False, **metric_kwargs, ) # take mean if 'm2o' comparison forced before if isHindcast and comparison != __m2o: bootstrapped_uninit_skill = bootstrapped_uninit_skill.mean("member") bootstrapped_hind = resample_func(hind, iterations, resample_dim) if dask.is_dask_collection(bootstrapped_hind): bootstrapped_hind = bootstrapped_hind.chunk({"member": -1}) bootstrapped_init_skill = compute( bootstrapped_hind, verif, metric=metric, comparison=comparison, add_attrs=False, dim=dim, **metric_kwargs, ) if "persistence" in reference: if not metric.probabilistic: pers_skill = reference_compute( hind, verif, metric=metric, dim=dim, **metric_kwargs_reference, ) # bootstrap pers if resample_dim == "init": bootstrapped_pers_skill = reference_compute( bootstrapped_hind, verif, metric=metric, **metric_kwargs_reference, ) else: # member _, bootstrapped_pers_skill = xr.broadcast( bootstrapped_init_skill, pers_skill, exclude=CLIMPRED_DIMS ) else: bootstrapped_pers_skill = bootstrapped_init_skill.isnull() # calc mean skill without any resampling init_skill = compute( hind, verif, metric=metric, comparison=comparison, dim=dim, **metric_kwargs, ) if "uninitialized" in reference: # uninit skill as mean resampled uninit skill uninit_skill = bootstrapped_uninit_skill.mean("iteration") if "persistence" in reference: if not metric.probabilistic: pers_skill = reference_compute( hind, verif, metric=metric, dim=dim, **metric_kwargs_reference ) else: pers_skill = init_skill.isnull() # align to prepare for concat if set(bootstrapped_pers_skill.coords) != set(bootstrapped_init_skill.coords): if ( "time" in bootstrapped_pers_skill.dims and "init" in bootstrapped_init_skill.dims ): bootstrapped_pers_skill = bootstrapped_pers_skill.rename( {"time": "init"} ) # allow member to be broadcasted bootstrapped_init_skill, bootstrapped_pers_skill = xr.broadcast( bootstrapped_init_skill, bootstrapped_pers_skill, exclude=("init", "lead", "time"), ) # get confidence intervals CI init_ci = _distribution_to_ci(bootstrapped_init_skill, ci_low, ci_high) if "uninitialized" in reference: uninit_ci = _distribution_to_ci(bootstrapped_uninit_skill, ci_low, ci_high) # probabilistic metrics wont have persistence forecast # therefore only get CI if persistence was computed if "persistence" in reference: if "iteration" in bootstrapped_pers_skill.dims: pers_ci = _distribution_to_ci( bootstrapped_pers_skill, ci_low_pers, ci_high_pers ) else: # otherwise set all persistence outputs to false pers_ci = init_ci == -999 # pvalue whether uninit or pers better than init forecast if "uninitialized" in reference: p_uninit_over_init = _pvalue_from_distributions( bootstrapped_uninit_skill, bootstrapped_init_skill, metric=metric ) if "persistence" in reference: p_pers_over_init = _pvalue_from_distributions( bootstrapped_pers_skill, bootstrapped_init_skill, metric=metric ) # wrap results together in one xr object if reference == []: results = xr.concat( [ init_skill, init_ci.isel(quantile=0, drop=True), init_ci.isel(quantile=1, drop=True), ], dim="results", ) results["results"] = ["verify skill", "low_ci", "high_ci"] results["skill"] = ["initialized"] results = results.squeeze() elif reference == ["persistence"]: skill = xr.concat([init_skill, pers_skill], dim="skill", **CONCAT_KWARGS) skill["skill"] = ["initialized", "persistence"] # ci for each skill ci = xr.concat([init_ci, pers_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "persistence"] results = xr.concat([skill, p_pers_over_init], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] elif reference == ["uninitialized"]: skill = xr.concat([init_skill, uninit_skill], dim="skill", **CONCAT_KWARGS) skill["skill"] = ["initialized", "uninitialized"] # ci for each skill ci = xr.concat([init_ci, uninit_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "uninitialized"] results = xr.concat([skill, p_uninit_over_init], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] elif set(reference) == set(["uninitialized", "persistence"]): skill = xr.concat( [init_skill, uninit_skill, pers_skill], dim="skill", **CONCAT_KWARGS ) skill["skill"] = ["initialized", "uninitialized", "persistence"] # probability that i beats init p = xr.concat( [p_uninit_over_init, p_pers_over_init], dim="skill", **CONCAT_KWARGS ) p["skill"] = ["uninitialized", "persistence"] # ci for each skill ci = xr.concat([init_ci, uninit_ci, pers_ci], "skill", coords="minimal").rename( {"quantile": "results"} ) ci["skill"] = ["initialized", "uninitialized", "persistence"] results = xr.concat([skill, p], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p"] if set(results.coords) != set(ci.coords): res_drop = [c for c in results.coords if c not in ci.coords] ci_drop = [c for c in ci.coords if c not in results.coords] results = results.drop_vars(res_drop) ci = ci.drop_vars(ci_drop) results = xr.concat([results, ci], dim="results", **CONCAT_KWARGS) results["results"] = ["verify skill", "p", "low_ci", "high_ci"] else: raise ValueError("results not created") # Attach climpred compute information to skill metadata_dict = { "confidence_interval_levels": f"{ci_high}-{ci_low}", "bootstrap_iterations": iterations, "reference": reference, } if reference is not None: metadata_dict[ "p" ] = "probability that reference performs better than initialized" metadata_dict.update(metric_kwargs) results = assign_attrs( results, hind, alignment=alignment, metric=metric, comparison=comparison, dim=dim, function_name=inspect.stack()[0][3], # take function.__name__ metadata_dict=metadata_dict, ) # Ensure that the lead units get carried along for the calculation. The attribute # tends to get dropped along the way due to ``xarray`` functionality. results["lead"] = hind["lead"] if "units" in hind["lead"].attrs and "units" not in results["lead"].attrs: results["lead"].attrs["units"] = hind["lead"].attrs["units"] return results
1a419d129419d15276f21f6f09bbd613a8e662da
19,912
def description_for_number(numobj, lang, script=None, region=None): """Return a text description of a PhoneNumber object for the given language. The description might consist of the name of the country where the phone number is from and/or the name of the geographical area the phone number is from. This function explicitly checks the validity of the number passed in Arguments: numobj -- The PhoneNumber object for which we want to get a text description. lang -- A 2-letter lowercase ISO 639-1 language code for the language in which the description should be returned (e.g. "en") script -- A 4-letter titlecase (first letter uppercase, rest lowercase) ISO script code as defined in ISO 15924, separated by an underscore (e.g. "Hant") region -- A 2-letter uppercase ISO 3166-1 country code (e.g. "GB") Returns a text description in the given language code, for the given phone number, or an empty string if no description is available.""" ntype = number_type(numobj) if ntype == PhoneNumberType.UNKNOWN: return "" elif not is_number_type_geographical(ntype, numobj.country_code): return country_name_for_number(numobj, lang, script, region) return description_for_valid_number(numobj, lang, script, region)
d67d53528c99c8b3ce6323c7e4eb5170603660c1
19,913
def _construct_new_particles(samples, old_particles): """Construct new array of particles given the drawing results over the old particles. Args: + *samples* (np.ndarray): NxM array that contains the drawing results, where N is number of observations and M number of particles. + *old_particles* (np.ndarray): 3xNxM array that stores old particles. Returns: + new particles (np.ndarray): 3xNxM array of newly assembled particles (for each observation, there will be repeated particles). """ N, M = samples.shape ret_arr = 5*np.ones((3,N,M)) m_outer = np.zeros(N) while 0 < np.amax(samples): indices = np.nonzero(samples) last_n = -1 for i, n in enumerate(indices[0]): if last_n < n: if last_n >= 0: m_outer[last_n] += m_inner m_inner = 0 ret_arr[:,n,int(m_outer[n]+m_inner)] = old_particles[ :,n, indices[1][i] ] m_inner += 1 last_n = n m_outer[last_n] += m_inner samples[indices] -= 1 return ret_arr
ec511554074f637466d47d24c449eda8a263100e
19,914
def trunc(x, y, w, h): """Truncates x and y coordinates to live in the (0, 0) to (w, h) Args: x: the x-coordinate of a point y: the y-coordinate of a point w: the width of the truncation box h: the height of the truncation box. """ return min(max(x, 0), w - 1), min(max(y, 0), h - 1)
3edecdfbd9baf24f8b4f3f71b9e35a222c6be1ea
19,915
def exact_match(true_labels, predicts): """ exact_match This is the most strict metric for the multi label setting. It's defined as the percentage of samples that have all their labels correctly classified. Parameters ---------- true_labels: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the true labels for all the classification tasks and for n_samples. predicts: numpy.ndarray of shape (n_samples, n_target_tasks) A matrix with the predictions for all the classification tasks and for n_samples. Returns ------- float The exact match percentage between the given sets. Examples -------- >>> from skmultiflow.evaluation.metrics.metrics import exact_match >>> true_labels = [[0,1,0,1],[0,0,0,1],[1,1,0,1],[1,1,1,1]] >>> predictions = [[0,1,0,1],[0,1,1,0],[0,1,0,1],[1,1,1,1]] >>> exact_match(true_labels, predictions) 0.5 """ if not hasattr(true_labels, 'shape'): true_labels = np.asarray(true_labels) if not hasattr(predicts, 'shape'): predicts = np.asarray(predicts) N, L = true_labels.shape return np.sum(np.sum((true_labels == predicts) * 1, axis=1)==L) * 1. / N
ebcc1d6ce96ff8b5933e16ce69f5e143e371bf28
19,917
def dimred3(dat): """convenience function dimensionally reduce input data, each row being an element in some vector space, to dimension 3 using PCA calcualted by the SVD""" return dimred(dat, 3)
5151ee8bb0e8bcfe6dbb1633d95e9b355714ae35
19,918
def render_orchestrator_registrations( driver: Driver = None, collab_id: str = None, project_id: str = None ): """ Renders out retrieved registration metadata in a custom form Args: driver (Driver): A connected Synergos driver to communicate with the selected orchestrator. collab_id (str): ID of selected collaboration to be rendered project_id (str): ID of selected project to be rendered """ # Type 1 view: Orchestrator's Perspective if driver and collab_id and project_id: registry_data = driver.registrations.read_all( collab_id=collab_id, project_id=project_id ).get('data', []) participant_ids = [reg['key']['participant_id'] for reg in registry_data] # Type 2 view: Insufficiant keys -> Render nothing else: registry_data = [] participant_ids = [] selected_participant_id = st.selectbox( label="Participant ID:", options=participant_ids, help="""Select an participant to view.""" ) if registry_data: selected_registry = [ reg for reg in registry_data if reg['key']['participant_id'] == selected_participant_id ].pop() else: selected_registry = {} with st.beta_container(): render_participant( driver=driver, participant_id=selected_participant_id ) with st.beta_expander("Registration Details"): reg_renderer.display(selected_registry) with st.beta_expander("Tag Details"): tags = selected_registry.get('relations', {}).get('Tag', []) tag_details = tags.pop() if tags else {} tag_renderer.display(tag_details) with st.beta_expander("Alignment Details"): alignments = selected_registry.get('relations', {}).get('Alignment', []) alignment_details = alignments.pop() if alignments else {} align_renderer.display(alignment_details) return selected_participant_id
14a84029ff20a09d2c8c6e41007827f96fb35f60
19,919
def check_nan(data, new_data): """checks if nan values are conserved """ old = np.isnan(data) new = np.isnan(new_data) if np.all(new == old): return True else: return False
d1dafaadd6e37848aa147b6714cce74f6097e074
19,920
def Var(poly, dist=None, **kws): """ Element by element 2nd order statistics. Args: poly (chaospy.poly.ndpoly, Dist): Input to take variance on. dist (Dist): Defines the space the variance is taken on. It is ignored if ``poly`` is a distribution. Returns: (numpy.ndarray): Element for element variance along ``poly``, where ``variation.shape == poly.shape``. Examples: >>> dist = chaospy.J(chaospy.Gamma(1, 1), chaospy.Normal(0, 2)) >>> chaospy.Var(dist) array([1., 4.]) >>> x, y = chaospy.variable(2) >>> poly = chaospy.polynomial([1, x, y, 10*x*y]) >>> chaospy.Var(poly, dist) array([ 0., 1., 4., 800.]) """ if dist is None: dist, poly = poly, polynomials.variable(len(poly)) poly = polynomials.setdim(poly, len(dist)) if not poly.isconstant: return poly.tonumpy()**2 poly = poly-E(poly, dist, **kws) poly = polynomials.square(poly) return E(poly, dist, **kws)
6ec5e867ed7287f90584e0c134d29b8daf4f9b9c
19,921
def op_par_loop_parse(text): """Parsing for op_par_loop calls""" loop_args = [] search = "op_par_loop" i = text.find(search) while i > -1: arg_string = text[text.find('(', i) + 1:text.find(';', i + 11)] # parse arguments in par loop temp_args = [] num_args = 0 # parse each op_arg_dat search2 = "op_arg_dat" search3 = "op_arg_gbl" search4 = "op_opt_arg_dat" j = arg_string.find(search2) k = arg_string.find(search3) l = arg_string.find(search4) while j > -1 or k > -1 or l > -1: index = min(j if (j > -1) else sys.maxint,k if (k > -1) else sys.maxint,l if (l > -1) else sys.maxint ) if index == j: temp_dat = get_arg_dat(arg_string, j) # append this struct to a temporary list/array temp_args.append(temp_dat) num_args = num_args + 1 j = arg_string.find(search2, j + 11) elif index == k: temp_gbl = get_arg_gbl(arg_string, k) # append this struct to a temporary list/array temp_args.append(temp_gbl) num_args = num_args + 1 k = arg_string.find(search3, k + 11) elif index == l: temp_dat = get_opt_arg_dat(arg_string, l) # append this struct to a temporary list/array temp_args.append(temp_dat) num_args = num_args + 1 l = arg_string.find(search4, l + 15) temp = {'loc': i, 'name1': arg_string.split(',')[0].strip(), 'name2': arg_string.split(',')[1].strip(), 'set': arg_string.split(',')[2].strip(), 'args': temp_args, 'nargs': num_args} loop_args.append(temp) i = text.find(search, i + 10) print '\n\n' return (loop_args)
826bb5cd58e4b34846419fc47977caa73fd5573c
19,922
def bin_to_hex(bin_str: str) -> str: """Convert a binary string to a hex string. The returned hex string will contain the prefix '0x' only if given a binary string with the prefix '0b'. Args: bin_str (str): Binary string (e.g. '0b1001') Returns: str: Hexadecimal string zero-padded to len(bin_str) // 4 Example: >>> bin_str = '0b1010101111001101' >>> bin_to_hex(bin_str) '0xabcd' >>> bin_to_hex(bin_str[2:]) # remove '0b' 'abcd' """ if not isinstance(bin_str, str): raise TypeError(f'Expecting type str. given {bin_str.__class__.__name__}.') literal = '0x' if bin_str[2:].lower() == '0b' else '' num_nibbles = len(bin_str) // BITS_PER_NIBBLE bin_str = bin_str[:num_nibbles * BITS_PER_NIBBLE] # truncate to whole number of nibbles return literal + hex(int(bin_str, 2))[2:].zfill(num_nibbles)
0f44311a600a7b5eac52d3716db4b116302c97ac
19,923
def exp_value_interpolate_bp(prod_inst, util_opti, b_ssv_sd, k_ssv_sd, epsilon_ssv_sd, b_ssv, k_ssv, epsilon_ssv, b_ssv_zr, k_ssv_zr, epsilon_ssv_zr, states_vfi_dim, shocks_vfi_dim): """interpolate value function and expected value function. Need three matrix here: 1. state matrix x shock matrix where optimal choices were solved at - previously, shock for this = 0, but now shock vector might not be zero 2. state matrix x shock matrix where shocks are drawn monte carlo way to allow for averaging, integrating over shocks for each x row 3. state matrix alone, shock = 0, each of the x row in matrix x """ 'A Get States to Integrate over' k_alpha_ae_sd, b_ssv_sd, \ k_alpha_ae, b_ssv, \ k_alpha_ae_zr, b_ssv_zr = \ inter_states_bp(prod_inst, util_opti, b_ssv_sd, k_ssv_sd, epsilon_ssv_sd, b_ssv, k_ssv, epsilon_ssv, b_ssv_zr, k_ssv_zr, epsilon_ssv_zr, states_vfi_dim, shocks_vfi_dim) 'B. invoke' util_emax = \ exp_value_interpolate_main(u1=util_opti, x1=k_alpha_ae_sd, y1=b_ssv_sd, x2=k_alpha_ae, y2=b_ssv, x2_noshk=k_alpha_ae_zr, y2_noshk=b_ssv_zr, states_dim=states_vfi_dim, shocks_dim=shocks_vfi_dim, return_uxy=False) 'C. collect' interpolant_exp_v = {'evu': util_emax, 'kae': k_alpha_ae_zr, 'b': b_ssv_zr} return interpolant_exp_v
e8d698834186efa779bbd81b042e9cf4caa1276a
19,924
from typing import Union from typing import List def remove_non_protein( molecule: oechem.OEGraphMol, exceptions: Union[None, List[str]] = None, remove_water: bool = False, ) -> oechem.OEGraphMol: """ Remove non-protein atoms from an OpenEye molecule. Parameters ---------- molecule: oechem.OEGraphMol An OpenEye molecule holding a molecular structure. exceptions: None or list of str Exceptions that should not be removed. remove_water: bool If water should be removed. Returns ------- selection: oechem.OEGraphMol An OpenEye molecule holding the filtered structure. """ if exceptions is None: exceptions = [] if remove_water is False: exceptions.append("HOH") # do not change input mol selection = molecule.CreateCopy() for atom in selection.GetAtoms(): residue = oechem.OEAtomGetResidue(atom) if residue.IsHetAtom(): if residue.GetName() not in exceptions: selection.DeleteAtom(atom) return selection
6afa4df25cbcf504b2ac06325a3e89291e9a0e4f
19,925
import json def configure_connection(instance, name='eventstreams', credentials=None): """Configures IBM Streams for a certain connection. Creates an application configuration object containing the required properties with connection information. Example for creating a configuration for a Streams instance with connection details:: from icpd_core import icpd_util from streamsx.rest_primitives import Instance import streamsx.eventstreams as es cfg = icpd_util.get_service_instance_details(name='your-streams-instance') cfg[streamsx.topology.context.ConfigParams.SSL_VERIFY] = False instance = Instance.of_service(cfg) app_cfg = es.configure_connection(instance, credentials='my_crdentials_json') Args: instance(streamsx.rest_primitives.Instance): IBM Streams instance object. name(str): Name of the application configuration, default name is 'eventstreams'. credentials(str|dict): The service credentials for Eventstreams. Returns: Name of the application configuration. .. warning:: The function can be used only in IBM Cloud Pak for Data. .. versionadded:: 1.1 """ description = 'Eventstreams credentials' properties = {} if credentials is None: raise TypeError(credentials) if isinstance(credentials, dict): properties['eventstreams.creds'] = json.dumps(credentials) else: properties['eventstreams.creds'] = credentials # check if application configuration exists app_config = instance.get_application_configurations(name=name) if app_config: print('update application configuration: ' + name) app_config[0].update(properties) else: print('create application configuration: ' + name) instance.create_application_configuration(name, properties, description) return name
5f263af94590e7237e27dc90f2e502b952d010fc
19,926
def setup(app): """ Any time a python class is referenced, make it a pretty link that doesn't include the full package path. This makes the base classes much prettier. """ app.add_role_to_domain("py", "class", truncate_class_role) return {"parallel_read_safe": True}
69660fd86216dfe0a5642b0885dbdb0704ce8ffc
19,927
def transects_to_gdf(transects): """ Saves the shore-normal transects as a gpd.GeoDataFrame KV WRL 2018 Arguments: ----------- transects: dict contains the coordinates of the transects Returns: ----------- gdf_all: gpd.GeoDataFrame """ # loop through the mapped shorelines for i,key in enumerate(list(transects.keys())): # save the geometry + attributes geom = geometry.LineString(transects[key]) gdf = gpd.GeoDataFrame(geometry=gpd.GeoSeries(geom)) gdf.index = [i] gdf.loc[i,'name'] = key # store into geodataframe if i == 0: gdf_all = gdf else: gdf_all = gdf_all.append(gdf) return gdf_all
a2e1c517a7d4d86618a08da07459686fa947d597
19,928
def deduce_final_configuration(fetched_config): """ Fills some variables in configuration based on those already extracted. Args: fetched_config (dict): Configuration variables extracted from a living environment, Returns: dict: Final configuration from live environment. """ final_config = fetched_config.copy() final_config[THRIFT_SERVER_URL] = _get_thrift_server_url(final_config) final_config[HIVE_SERVER_URL] = _get_hive_server_url(final_config) return final_config
30d21a8eb0bd1d282dbd127551e55fc7061e82ed
19,929
def total_benchmark_return_nb(benchmark_value: tp.Array2d) -> tp.Array1d: """Get total market return per column/group.""" out = np.empty(benchmark_value.shape[1], dtype=np.float_) for col in range(benchmark_value.shape[1]): out[col] = returns_nb.get_return_nb(benchmark_value[0, col], benchmark_value[-1, col]) return out
74d2924031dc4f0251b346555bc473d8d225453d
19,930
def young_modulus(data): """ Given a stress-strain dataset, returns Young's Modulus. """ yielding = yield_stress(data)[0] """Finds the yield index""" yield_index = 0 for index, point in enumerate(data): if (point == yielding).all(): yield_index = index break """Finds data in elastic region""" elastic = data[:yield_index+1] """ Finds the upper yield point (lower yield point is the *yielding* variable). We're taking the first element ([0]) because it returns the first element that meets the criteria in parentheses. It's a two-dimensional array so we have to do this twice. """ upperyieldpoint_index = np.where(elastic==max(elastic[:,1]))[0][0] upperyieldpoint = elastic[upperyieldpoint_index] """We estimate the region until the first upper yield point with a linear model""" lin_elastic_region = elastic[:upperyieldpoint_index+1] """The slope of this region is Young's Modulus""" return (lin_elastic_region[-1,1]-lin_elastic_region[0,1])/(lin_elastic_region[-1,0]-lin_elastic_region[0,0])
f41d4c358ae58760055d72e0364a3f79b7258512
19,931
def generateODTableDf(database: pd.DataFrame, save: bool = True) -> pd.DataFrame: """生成各区间OD表相关的数据集 Args: database (pd.DataFrame): 经初始化的原始数据集 save (bool, optional): 是否另外将其保存为csv文件. Defaults to True. Returns: pd.DataFrame: 各区间OD表相关的数据集 """ table4OD: np.ndarray = fetchTable4OD(database, originStations) df4OD: pd.DataFrame = pd.DataFrame( table4OD, columns=originStations, index=originStations ) if save: df4OD.to_csv(SEPERATOR.join([".", "result", "raw", "OD表.csv"])) return df4OD
9660d1c604e0f3514bb9a168ef91b3f29d7ba8b9
19,932
import typing def check_datatype(many: bool): """Checks if data/filter to be inserted is a dictionary""" def wrapper(func): def inner_wrapper(self, _filter={}, _data=None, **kwargs): if _data is None: # statements without two args - find, insert etc if many: # statements that expect a list of dictionaries: insert_many if isinstance(_filter, typing.Sequence): return func(self, _filter, **kwargs) else: raise TypeError("Unexpected Datatype.") if isinstance(_filter, dict): return func(self, _filter, **kwargs) else: raise TypeError("Unexpected Datatype.") else: # update statements if isinstance(_filter, dict) and isinstance(_data, dict): return func(self, _filter, _data, **kwargs) else: raise TypeError("Unexpected Datatype.") return inner_wrapper return wrapper
c5300507936db04b2ae5e4190421cc354f6ac2d4
19,933
def login(): """Login Page""" if request.cookies.get('user_id') and request.cookies.get('username'): session['user_id'] = request.cookies.get('user_id') session['username'] = request.cookies.get('username') update_last_login(session['user_id']) return render_template('main/index.html', username=session['username']) login_form = LoginForm() if login_form.validate_on_submit(): username = request.form['username'] password = (request.form['password']) user_id = check_user_exist(username, password) if user_id: response = login_user(user_id, username) return response else: flash('Username/Password Incorrect!') return render_template('auth/login.html', form=login_form)
e8f02d520c5913e8d8d2c99d1a98b9c546a9a220
19,934
def _get_index_train_test_path(split_num, train = True): """ Method to generate the path containing the training/test split for the given split number (generally from 1 to 20). @param split_num Split number for which the data has to be generated @param train Is true if the data is training data. Else false. @return path Path of the file containing the requried data """ if train: return _DATA_DIRECTORY_PATH + "index_train_" + str(split_num) + ".txt" else: return _DATA_DIRECTORY_PATH + "index_test_" + str(split_num) + ".txt"
201ac816085211b1f6500e2b84d5e9b293dd8c2e
19,935
def mock_socket() -> MagicMock: """A mock websocket.""" return MagicMock(spec=WebSocket)
a3b8e53d2c929566e2bc9419cfb1d56ca3f25032
19,936
def gen_device(dtype, ip, mac, desc, cloud): """Convenience function that generates devices based on they type.""" devices = { # sp1: [0], sp2: [ 0x2711, # SP2 0x2719, 0x7919, 0x271A, 0x791A, # Honeywell SP2 0x2720, # SPMini 0x753E, # SP3 0x7D00, # OEM branded SP3 0x947A, 0x9479, # SP3S 0x2728, # SPMini2 0x2733, 0x273E, # OEM branded SPMini 0x7530, 0x7546, 0x7918, # OEM branded SPMini2 0x7D0D, # TMall OEM SPMini3 0x2736, # SPMiniPlus ], rm: [ 0x2712, # RM2 0x2737, # RM Mini 0x273D, # RM Pro Phicomm 0x2783, # RM2 Home Plus 0x277C, # RM2 Home Plus GDT 0x278F, # RM Mini Shate 0x27C2, # RM Mini 3 0x27D1, # new RM Mini3 0x27DE, # RM Mini 3 (C) ], rm4: [ 0x51DA, # RM4 Mini 0x5F36, # RM Mini 3 0x6070, # RM4c Mini 0x610E, # RM4 Mini 0x610F, # RM4c 0x62BC, # RM4 Mini 0x62BE, # RM4c 0x6364, # RM4S 0x648D, # RM4 mini 0x6539, # RM4c Mini 0x653A, # RM4 mini ], rmp: [ 0x272A, # RM2 Pro Plus 0x2787, # RM2 Pro Plus2 0x279D, # RM2 Pro Plus3 0x27A9, # RM2 Pro Plus_300 0x278B, # RM2 Pro Plus BL 0x2797, # RM2 Pro Plus HYC 0x27A1, # RM2 Pro Plus R1 0x27A6, # RM2 Pro PP ], rm4p: [ 0x6026, # RM4 Pro 0x61A2, # RM4 pro 0x649B, # RM4 pro 0x653C, # RM4 pro ], a1: [0x2714], # A1 mp1: [ 0x4EB5, # MP1 0x4EF7, # Honyar oem mp1 0x4F1B, # MP1-1K3S2U 0x4F65, # MP1-1K3S2U ], # hysen: [0x4EAD], # Hysen controller # S1C: [0x2722], # S1 (SmartOne Alarm Kit) # dooya: [0x4E4D] # Dooya DT360E (DOOYA_CURTAIN_V2) } # Look for the class associated to devtype in devices [device_class] = [dev for dev in devices if dtype in devices[dev]] or [None] if device_class is None: print("Unknow device type 0x%x" % dtype) return BroadlinkDevice(dtype, name=desc, cloud=cloud) return device_class(ip=ip, mac=mac, devtype=dtype, name=desc, cloud=cloud)
07c9ff4ee594bf0c94aa95efc05f63306811e996
19,938
def parse_multi_id_graph(graph, ids): """ Parse a graph with 1 to 3 ids and return individual graphs with their own braced IDs. """ new_graphs = '' LEVEL_STATE.next_token = ids[0] pid1 = LEVEL_STATE.next_id() split1 = graph.partition('({})'.format(ids[1])) text1 = combine_bolds(split1[0]) pid2_marker = split1[1] remainder = bold_first_italics(split1[2]) new_graphs += "\n{" + pid1 + "}\n" new_graphs += text1 + '\n' LEVEL_STATE.next_token = ids[1] pid2 = LEVEL_STATE.next_id() new_graphs += "\n{" + pid2 + "}\n" if len(ids) == 2: text2 = combine_bolds(" ".join([pid2_marker, remainder])) new_graphs += text2 + '\n' return new_graphs else: split2 = remainder.partition('({})'.format(ids[2])) pid3_marker = split2[1] remainder2 = bold_first_italics(split2[2]) text2 = combine_bolds(" ".join([pid2_marker, split2[0]])) new_graphs += text2 + '\n' LEVEL_STATE.next_token = ids[2] pid3 = LEVEL_STATE.next_id() new_graphs += "\n{" + pid3 + "}\n" text3 = combine_bolds(" ".join([pid3_marker, remainder2])) new_graphs += text3 + '\n' return new_graphs
3dc693e359573ec1a2e71400856e0383653a5533
19,941
def param_to_secopt(param): """Convert a parameter name to INI section and option. Split on the first dot. If not dot exists, return name as option, and None for section.""" sep = '.' sep_loc = param.find(sep) if sep_loc == -1: # no dot in name, skip it section = None option = param else: section = param[0:sep_loc] option = param[sep_loc+1:] return (section, option)
7d7e2b03cb67ed26d184f85f0328236674fa6497
19,945
from typing import List from typing import Dict import json def load_contracts( web3: web3.Web3, contracts_file: str, contracts_names: List[str] ) -> Dict[str, web3.contract.Contract]: """ Given a list of contract names, returns a dict of contract names and contracts. """ res = {} with open(contracts_file) as infile: source_json = json.load(infile) for contract_name in contracts_names: try: res[contract_name] = web3.eth.contract( address=source_json[contract_name]["address"], abi=source_json[contract_name]["abi"] ) except (KeyError, InvalidAddress) as ex: raise ex return res
6f6c47c5742de0c61eddacfd9358b6d86eefb525
19,946
import logging def removecandidate(_id=''): """ Remove a candidate from the candidate list Use with the lexcion's identifiers /removecandidate?identifier=katt..nn.1 """ lexicon = request.args.get('lexicon', C.config['default']) lexconf = lexconfig.get_lexiconconf(lexicon) try: identifier = request.args.get('identifier', '') # ask karp for the identifier q = 'extended||and|%s.search|equals|%s' % ('identifier', identifier) res = helpers.karp_query('query', query={'q': q}, mode=lexconf['candidateMode'], resource=lexconf['candidatelexiconName']) _id = helpers.es_first_id(res) except Exception as e1: logging.error(e1) raise e.MflException("Could not find candidate %s" % identifier, code="unknown_candidate") # delete it ans = helpers.karp_delete(_id, lexconf['candidatelexiconName']) return jsonify({"deleted": ans})
55d9cfede364a35e44cf44b597653de598867d55
19,947
def svn_log_entry_dup(*args): """svn_log_entry_dup(svn_log_entry_t log_entry, apr_pool_t pool) -> svn_log_entry_t""" return _core.svn_log_entry_dup(*args)
223e7aa1dbf890eae3c5aa86a08cac287ce796c8
19,949
from typing import IO from io import StringIO def input_stream() -> IO: """Input stream fixture.""" return StringIO( """mask = XXXXXXXXXXXXXXXXXXXXXXXXXXXXX1XXXX0X mem[8] = 11 mem[7] = 101 mem[8] = 0""" )
6e9e65754478b0ff69d220f683140675d3f6efdc
19,950
def get_lr_scheduler(optimizer: Optimizer, cfg: CfgNode, start_epoch: int = 0): """Returns LR scheduler module""" # Get mode if cfg.TRAIN.LOSS.TYPE in ["categorical_crossentropy", "focal_loss"]: mode = "min" else: raise NotImplementedError if cfg.TRAIN.SCHEDULER.TYPE == "ReduceLROnPlateau": scheduler = ReduceLROnPlateau( optimizer, mode, factor=cfg.TRAIN.SCHEDULER.FACTOR, patience=cfg.TRAIN.SCHEDULER.PATIENCE, verbose=True, ) elif cfg.TRAIN.SCHEDULER.TYPE == "StepLR": scheduler = StepLR( optimizer, step_size=cfg.TRAIN.SCHEDULER.PATIENCE, gamma=cfg.TRAIN.SCHEDULER.FACTOR, last_epoch=start_epoch - 1, ) elif cfg.TRAIN.SCHEDULER.TYPE == "None": scheduler = None else: raise NotImplementedError logger.info(f"Used scheduler: {scheduler}") return scheduler
59bbb672ac74fcc0331e5cba5bd722ce41049a5d
19,951
async def create_and_open_pool(pool_name, pool_genesis_txn_file): """ Creates a new local pool ledger configuration. Then open that pool and return the pool handle that can be used later to connect pool nodes. :param pool_name: Name of the pool ledger configuration. :param pool_genesis_txn_file: Pool configuration json. if NULL, then default config will be used. :return: The pool handle was created. """ utils.print_header("\nCreate Ledger\n") await create_pool_ledger_config(pool_name, pool_genesis_txn_file) utils.print_header("\nOpen pool ledger\n") pool_handle = await pool.open_pool_ledger(pool_name, None) return pool_handle
eb05893870ff1b8928391c0e748d21b9eb8aef66
19,952
def rotate_char(c, n): """Rotate a single character n places in the alphabet n is an integer """ # alpha_number and new_alpha_number will represent the # place in the alphabet (as distinct from the ASCII code) # So alpha_number('a')==0 # alpha_base is the ASCII code for the first letter of the # alphabet (different for upper and lower case) if c.islower(): alpha_base = ord('a') elif c.isupper(): alpha_base = ord('A') else: # Don't rotate character if it's not a letter return c # Position in alphabet, starting with a=0 alpha_number = ord(c) - alpha_base # New position in alphabet after shifting # The % 26 at the end is for modulo 26, so if we shift it # past z (or a to the left) it'll wrap around new_alpha_number = (alpha_number + n) % 26 # Add the new position in the alphabet to the base ASCII code for # 'a' or 'A' to get the new ASCII code, and use chr() to convert # that code back to a letter return chr(alpha_base + new_alpha_number)
b1259722c7fb2a60bd943e86d87163866432539f
19,953
def subset_language(vocabulary, vectors, wordlist, N=32768): """ Subset the vocabulary/vectors to those in a wordlist. The wordlist is a list arranged in order of 'preference'. Note: we hope the vocabulary is contained in the wordlist, but it might not be. N is the number of words we require. If the wordlist contains fewer than N words, (but the vocabulary has >= N), we supplement the result from the vocabulary randomly. Also, we want to make sure the order of vocabulary is random (because some structure could negatively influence the optimisation procedure later). """ keep_indices = [] # indices of vocabulary/vectors to keep added = 0 if type(wordlist) == str: # load from path print 'Loading wordlist from', wordlist wordlist = np.loadtxt(wordlist, dtype=str) else: assert type(wordlist) == list or type(wordlist) == np.ndarray print 'Subsetting vocabulary.' for word in wordlist: print word if added == N: break try: word_index = vocabulary.index(word) keep_indices.append(word_index) added += 1 except ValueError: continue print 'Acquired', len(keep_indices), 'words.' miss = N - len(keep_indices) if miss > 0: print 'Supplementing with', miss, 'random words.' for i in xrange(miss): random_index = np.random.choice(len(vocabulary), 1) while random_index in keep_indices: random_index = np.random.choice(len(vocabulary), 1) keep_indices.append(random_index) print 'Shuffling.' # shuffle np.random.shuffle(keep_indices) # populate new arrays print 'Populating subsetted arrays.' vectors_subset = np.array([vectors[i] for i in keep_indices]) vocabulary_subset = [vocabulary[i] for i in keep_indices] return vocabulary_subset, vectors_subset
17c5718134f25f1ef7b6ed8fb0086fc1f45d058b
19,954
def compile_subject(*, subject_id, date_of_birth, sex): """Compiles the NWB Subject object.""" return Subject(subject_id=subject_id, date_of_birth=date_of_birth, sex=sex)
86fc69318cfac98f44b11fa4f4c2a47423da317d
19,955
from typing import Any def circulation(**kwargs: Any) -> str: """Url to get :class:`~pymultimatic.model.component.Circulation` details.""" return _CIRCULATION.format(**kwargs)
021d28b92cfac9a69723796d2ee29f53dc16039d
19,956
def split_parentheses(info): """ make all strings inside parentheses a list :param s: a list of strings (called info) :return: info list without parentheses """ # if we see the "(" sign, then we start adding stuff to a temp list # in case of ")" sign, we append the temp list to the new_info list # otherwise, just add the string to the new_info list new_info = [] make_list = False current_list = [] for idx in range(len(info)): if info[idx] == "(": make_list = True elif info[idx] == ")": make_list = False new_info.append(current_list) current_list = [] else: if make_list: current_list.append(info[idx]) else: new_info.append(info[idx]) return new_info
37006936d52abe31e6d5e5d264440ab4950d874b
19,957
from typing import Optional from typing import Callable import inspect def event( name: Optional[str] = None, *, handler: bool = False ) -> Callable[[EventCallable], EventCallable]: """Create a new event using the signature of a decorated function. Events must be defined before handlers can be registered using before_event, on_event, after_event, or event_handler. :param handler: When True, the decorated function implementation is registered as an on event handler. """ def decorator(fn: EventCallable) -> EventCallable: event_name = name if name else fn.__name__ module = inspect.currentframe().f_back.f_locals.get("__module__", None) if handler: # If the method body is a handler, pass the signature directly into `create_event` # as we are going to pass the method body into `on_event` signature = inspect.Signature.from_callable(fn) create_event(event_name, signature, module=module) else: create_event(event_name, fn, module=module) if handler: decorator = on_event(event_name) return decorator(fn) else: return fn return decorator
ce7821bbe67c3c776f8dfa4b69a4bed25ab814e3
19,958
import re def add_target_to_anchors(string_to_fix, target="_blank"): """Given arbitrary string, find <a> tags and add target attributes""" pattern = re.compile("<a(?P<attributes>.*?)>") def repl_func(matchobj): pattern = re.compile("target=['\"].+?['\"]") attributes = matchobj.group("attributes") if pattern.search(attributes): return "<a%s>" % re.sub(pattern, "target='%s'" % target, attributes) else: return "<a%s target='%s'>" % (attributes, target) return re.sub(pattern, repl_func, string_to_fix)
4650dcf933e9b6e153646c6b7f3535881e4db1f8
19,960
def calcInvariants(S, R, gradT, with_tensor_basis=False, reduced=True): """ This function calculates the invariant basis at one point. Arguments: S -- symmetric part of local velocity gradient (numpy array shape (3,3)) R -- anti-symmetric part of local velocity gradient (numpy array shape (3,3)) gradT -- array with local temperature gradient (numpy array shape (3,)) with_tensor_basis -- optional, a flag that determines whether to also calculate tensor basis. By default, it is false (so only invariants are returned) reduced -- optional argument, a boolean flag that determines whether the features that depend on a vector (lambda 7 thru lambda 13) should be calculated. If reduced==True, extra features are NOT calculated. Default value is True. Returns: invariants -- array of shape (n_features-2,) that contains the invariant basis from the gradient tensors that are used by the ML model to make a prediction at the current point. tensor_basis -- array of shape (n_basis,3,3) that contains the form invariant tensor basis that are used by the TBNN to construct the tensorial diffusivity at the current point. # Taken from the paper of Zheng, 1994, "Theory of representations for tensor functions - A unified invariant approach to constitutive equations" """ # For speed, pre-calculate these S2 = np.linalg.multi_dot([S, S]) R2 = np.linalg.multi_dot([R, R]) S_R2 = np.linalg.multi_dot([S, R2]) ### Fill basis 0-12 if reduced: num_features = constants.NUM_FEATURES_F2-2 else: num_features = constants.NUM_FEATURES_F1-2 invariants = np.empty(num_features) # Velocity gradient only (0-5) invariants[0] = np.trace(S2) invariants[1] = np.trace(np.linalg.multi_dot([S2, S])) invariants[2] = np.trace(R2) invariants[3] = np.trace(S_R2) invariants[4] = np.trace(np.linalg.multi_dot([S2, R2])) invariants[5] = np.trace(np.linalg.multi_dot([S2, R2, S, R])) # Velocity + temperature gradients (6-12) if not reduced: invariants[6] = np.linalg.multi_dot([gradT, gradT]) invariants[7] = np.linalg.multi_dot([gradT, S, gradT]) invariants[8] = np.linalg.multi_dot([gradT, S2, gradT]) invariants[9] = np.linalg.multi_dot([gradT, R2, gradT]) invariants[10] = np.linalg.multi_dot([gradT, S, R, gradT]) invariants[11] = np.linalg.multi_dot([gradT, S2, R, gradT]) invariants[12] = np.linalg.multi_dot([gradT, R, S_R2, gradT]) # Also calculate the tensor basis if with_tensor_basis: tensor_basis = np.empty((constants.N_BASIS,3,3)) tensor_basis[0,:,:] = np.eye(3) tensor_basis[1,:,:] = S tensor_basis[2,:,:] = R tensor_basis[3,:,:] = S2 tensor_basis[4,:,:] = R2 tensor_basis[5,:,:] = np.linalg.multi_dot([S, R]) + np.linalg.multi_dot([R, S]) return invariants, tensor_basis return invariants
2ce8407843947c4f7c9779d061971822707f147e
19,961
def with_uproot(histo_path: str) -> bh.Histogram: """Reads a histogram with uproot and returns it. Args: histo_path (str): path to histogram, use a colon to distinguish between path to file and path to histogram within file (example: ``file.root:h1``) Returns: bh.Histogram: histogram containing data """ hist = uproot.open(histo_path).to_boost() return hist
c03e7c7054a550769c23c904892c0c327b2bcafa
19,962
def slide5x5(xss): """Slide five artists at a time.""" return slidingwindow(5, 5, xss)
56374e53384d2012d2e6352efcd0e972ff3d04bf
19,963
def compute_consensus_rule( profile, committeesize, algorithm="fastest", resolute=True, max_num_of_committees=MAX_NUM_OF_COMMITTEES_DEFAULT, ): """ Compute winning committees with the Consensus rule. Based on Perpetual Consensus from Martin Lackner Perpetual Voting: Fairness in Long-Term Decision Making In Proceedings of the 34th AAAI Conference on Artificial Intelligence (AAAI 2020) Parameters ---------- profile : abcvoting.preferences.Profile A profile. committeesize : int The desired committee size. algorithm : str, optional The algorithm to be used. The following algorithms are available for the Consensus rule: .. doctest:: >>> Rule("consensus-rule").algorithms ('float-fractions', 'gmpy2-fractions', 'standard-fractions') resolute : bool, optional Return only one winning committee. If `resolute=False`, all winning committees are computed (subject to `max_num_of_committees`). max_num_of_committees : int, optional At most `max_num_of_committees` winning committees are computed. If `max_num_of_committees=None`, the number of winning committees is not restricted. The default value of `max_num_of_committees` can be modified via the constant `MAX_NUM_OF_COMMITTEES_DEFAULT`. Returns ------- list of CandidateSet A list of winning committees. """ rule_id = "consensus-rule" rule = Rule(rule_id) if algorithm == "fastest": algorithm = rule.fastest_available_algorithm() rule.verify_compute_parameters( profile=profile, committeesize=committeesize, algorithm=algorithm, resolute=resolute, max_num_of_committees=max_num_of_committees, ) committees, detailed_info = _consensus_rule_algorithm( profile=profile, committeesize=committeesize, algorithm=algorithm, resolute=resolute, max_num_of_committees=max_num_of_committees, ) # optional output output.info(header(rule.longname), wrap=False) if not resolute: output.info("Computing all possible winning committees for any tiebreaking order") output.info(" (aka parallel universes tiebreaking) (resolute=False)\n") output.details(f"Algorithm: {ALGORITHM_NAMES[algorithm]}\n") output.info( str_committees_with_header(committees, cand_names=profile.cand_names, winning=True) ) # end of optional output return committees
0dd12aa8faab485a62cdeccfaf87385df85b0b7f
19,964
def addcron(): """ { "uid": "张三", "mission_name": "定时服务名字", "pid": "c3009c8e62544a23ba894fe5519a6b64", "EnvId": "9d289cf07b244c91b81ce6bb54f2d627", "SuiteIdList": ["75cc456d9c4d41f6980e02f46d611a5c"], "runDate": 1239863854, "interval": 60, "alwaysSendMail": true, "alarmMailGroupList": "['4dc0e648e61846a4aca01421aa1202e2', '2222222222222']", "triggerType": "interval" } """ try: require_items = get_post_items(request, CronJob.REQUIRE_ITEMS, throwable=True) option_items = get_post_items(request, CronJob.OPTIONAL_ITEMS) require_items.update(option_items) require_items.update({"uid": g.user_object_id}) mission_name = get_models_filter(CronJob, CronJob.mission_name == require_items["mission_name"]) if mission_name != []: return jsonify({'status': 'failed', 'data': '名字已存在'}) temp = require_items.get("alarmMailGroupList") require_items["alarmMailGroupList"] = str(temp) times = Run_Times(**require_items) if times == True: _model = create_model(CronJob, **require_items) cron_manager.add_cron( **{ "mission_name": require_items.get("mission_name"), "mode": require_items.get("triggerType"), "seconds": require_items.get("interval"), "run_Date": require_items.get("runDate"), "task_Job": require_items, "object_id": _model.object_id, }) return jsonify({'status': 'ok', 'object_id': _model.object_id}) else: return jsonify(times) except BaseException as e: return jsonify({'status': 'failed', 'data': '新建失败 %s' % e})
87aca95b6486bbbd9abd9277aa3e2eb39b7bbdad
19,965
def dict_expand(d, prefix=None): """ Recursively expand subdictionaries returning dictionary dict_expand({1:{2:3}, 4:5}) = {(1,2):3, 4:5} """ result = {} for k, v in d.items(): if isinstance(v, dict): result.update(dict_expand(v, prefix=k)) else: result[k] = v if prefix is not None: result = {make_tuple(prefix) + make_tuple(k): v for k, v in result.items()} return result
842503eaffca7574f127b731216b5f5b10ddf86f
19,966
def parse_config_list(config_list): """ Parse a list of configuration properties separated by '=' """ if config_list is None: return {} else: mapping = {} for pair in config_list: if (constants.CONFIG_SEPARATOR not in pair) or (pair.count(constants.CONFIG_SEPARATOR) != 1): raise ValueError("configs must be passed as two strings separted by a %s", constants.CONFIG_SEPARATOR) (config, value) = pair.split(constants.CONFIG_SEPARATOR) mapping[config] = value return mapping
12ab7dc51420196a60ef027ea606a837da3b1b59
19,967