content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def bell_sigmoid(ds, a=None, bc=None, d=None, inplace=True): """ Apply a fuzzy membership function to data using bell-shaped sigmoidal function. Requires a low left inflection (a), a mid-point (bc), and a low right inflection (d) point to set the bounds in which to rescale all values to. Values at or closer to the bc inflection point will be boosted, where as values on right and left sides will be reduced. The output dataset will have values rescaled to 0-1. Parameters ---------- ds: xarray dataset/array A dataset with x, y dims. a : int Lower left slope inflection point. bc : int Mid slope inflection point. d : int Lower right slope inflection point. Returns ---------- ds : xarray dataset or array. """ # check inputs if a is None or bc is None or d is None: raise ValueError('Must provide values for a, bc and d.') elif a > bc or a > d: raise ValueError('Value for \'a\' must be less than value for \'bc\' and \'d\'.') elif bc < d: raise ValueError('Value for \'bc\' must be less than value for \'d\'.') # create copy if not inplace: ds = ds.copy(deep=True) # create masks to handle out of bound values mask_lt_bc = xr.where((ds >= a) & (ds <= bc), True, False) mask_gt_bc = xr.where((ds > bc) & (ds <= d), True, False) # perform inc sigmoidal (left side of bell curve) left = np.cos((1 - ((ds - a) / (bc - a))) * (np.pi / 2))**2 left = left.where(mask_lt_bc, 0.0) # perform dec sigmoidal (right side of bell curve) right = right = np.cos(((ds - bc) / (d - bc)) * (np.pi / 2))**2 right = right.where(mask_gt_bc, 0.0) # sum ds = left + right return ds
3e0476a4df3d2c63646aedbc4a64e8ee3656bc43
30,651
def get_composite(name, error=DontCatchError, error_message=None, identifier=None, component_category='unknown'): """ Gets a Composite Singleton :param: - `name`: name to register singleton (clients that want same singleton, use same name) - `error`: exception to catch (``DontCatchError`` default) - `error_message`: message to log on catching the error - `identifier`: an identifier for the component (for logging, etc.) - `component_category`: classifier for Composite.components :return: Composite singleton """ if SingletonEnum.composite not in singletons: singletons[SingletonEnum.composite] = {} if name not in singletons[SingletonEnum.composite]: if error_message is None: error_message = "{0} component error".format(name) if identifier is None: identifier = name singletons[SingletonEnum.composite][name] = Composite(error=error, error_message=error_message, identifier=identifier, component_category=component_category) return singletons[SingletonEnum.composite][name]
39bfe67a1482c7c157e655c3f1accb308fa211b0
30,653
def is_list_of_float(value): """ Check if an object is a liat of floats :param value: :return: """ return bool(value) and isinstance(value, list) and all(isinstance(elem, float) for elem in value)
35ec9531bcddc33166e0f17d3fc59a08341d2d95
30,654
def get_example_params(example_index): """ Gets used variables for almost all visualizations, like the image, model etc. Args: example_index (int): Image id to use from examples returns: original_image (numpy arr): Original image read from the file prep_img (numpy_arr): Processed image target_class (int): Target class for the image pretrained_model(Pytorch model): Model to use for the operations """ # Pick one of the examples # Read image original_image = Image.open(img_path).convert('RGB') # Process image prep_img = preprocess_image(original_image) # Define model #pretrained_model = model return (original_image, prep_img, target_class)
6e2760e4d91d888ce9f1443787b9bfa864fe7118
30,655
from geometrylab.geometry import Polyline def bezier_curve(points, nTimes=500, is_poly=False, is_crv=False): """ Given a set of control points, return the bezier curve defined by the control points. points should be a list of lists, or list of tuples such as [ [1,1], [2,3], [4,5], ..[Xn, Yn] ] nTimes is the number of time steps, defaults to 1000 See http://processingjs.nihongoresources.com/bezierinfo/ Hui Note: given points, the returned crv points in reverse direction: Q[0] == points[-1]; Q[-1] == points[0] """ if is_poly: return Polyline(points,closed=False) else: nPoints = len(points) xPoints = np.array([p[0] for p in points]) yPoints = np.array([p[1] for p in points]) zPoints = np.array([p[2] for p in points]) t = np.linspace(0.0, 1.0, nTimes) polynomial_array = np.array([ bernstein_poly(i, nPoints-1, t) for i in range(0, nPoints)]) xvals = np.dot(xPoints, polynomial_array) yvals = np.dot(yPoints, polynomial_array) zvals = np.dot(zPoints, polynomial_array) Q = np.flip(np.c_[xvals,yvals,zvals],axis=0) if is_crv: crv = Polyline(Q,closed=False) return crv else: return Q
0b60554a2b8697c665822ecf408a089b89df7107
30,656
import functools def decorator_with_keywords(func=None, **dkws): # NOTE: ONLY ACCEPTS KW ARGS """ A decorator that can handle optional keyword arguments. When the decorator is called with no optional arguments like this: @decorator def function ... The function is passed as the first argument and decorate returns the decorated function, as expected. If the decorator is called with one or more optional arguments like this: @decorator(optional_argument1='some value') def function .... Then decorator is called with the function argument with value None, so a function that decorates is returned, as expected. """ # print('WHOOP', func, dkws) def _decorate(func): @functools.wraps(func) def wrapped_function(*args, **kws): # print('!!') return func(*args, **kws) return wrapped_function if func: return _decorate(func) return _decorate
64c4ddd26cc04a43cbf559600652113db81b79ae
30,657
from datetime import datetime def parse_line(line): """ Extract all the data we want from each line. :param line: A line from our log files. :return: The data we have extracted. """ time = line.split()[0].strip() response = line.split(' :') message = response[len(response) - 1].strip('\n') channel = response[1].split('#') username = channel[0].split('!') username = username[0] channel = channel[1] time = datetime.strptime(time, '%Y-%m-%d_%H:%M:%S') return time, channel, username, message
72b4362b7628d31996075941be00e4ddcbd5edbc
30,658
def semi_lagrangian(field: GridType, velocity: Field, dt: float, integrator=euler) -> GridType: """ Semi-Lagrangian advection with simple backward lookup. This method samples the `velocity` at the grid points of `field` to determine the lookup location for each grid point by walking backwards along the velocity vectors. The new values are then determined by sampling `field` at these lookup locations. Args: field: quantity to be advected, stored on a grid (CenteredGrid or StaggeredGrid) velocity: vector field, need not be compatible with with `field`. dt: time increment integrator: ODE integrator for solving the movement. Returns: Field with same sample points as `field` """ lookup = integrator(field.elements, velocity, -dt) interpolated = reduce_sample(field, lookup) return field.with_values(interpolated)
b265e660100a9855a99e03f3c03cbd4bad0f79c8
30,659
def pptx_to_bbox(left, top, width, height): """ Convert matplotlib bounding box format to pptx format Parameters ---------- left : float top : float width : float height : float Returns ------- bottom, left, width, height """ return top-height, left, width, height
3cdc186301d7e6e97ea44923ca6859e2e51f0774
30,660
from typing import Counter def reindex(labels): """ Given a list of labels, reindex them as integers from 1 to n_labels Also orders them in nonincreasing order of prevalence """ old2new = {} j = 1 for i, _ in Counter(labels).most_common(): old2new[i] = j j += 1 old2newf = lambda x: old2new[x] return [old2newf(a) for a in labels]
c12afd3b6431f10ccc43cce858e71bc504088a6e
30,661
def _validate_vg(module, vg): """ Check the current state of volume group. :param module: Ansible module argument spec. :param vg: Volume Group name. :return: True (VG in varyon state) or False (VG in varyoff state) or None (VG does not exist), message. """ lsvg_cmd = module.get_bin_path('lsvg', True) rc, current_active_vgs, err = module.run_command("%s -o" % lsvg_cmd) if rc != 0: module.fail_json(msg="Failed executing %s command." % lsvg_cmd) rc, current_all_vgs, err = module.run_command("%s" % lsvg_cmd) if rc != 0: module.fail_json(msg="Failed executing %s command." % lsvg_cmd) if vg in current_all_vgs and vg not in current_active_vgs: msg = "Volume group %s is in varyoff state." % vg return False, msg elif vg in current_active_vgs: msg = "Volume group %s is in varyon state." % vg return True, msg else: msg = "Volume group %s does not exist." % vg return None, msg
c5d68f69243f1ca24140f09c7047269b7012ed6c
30,662
from typing import List def news_items(news_index_page) -> List[News]: """Fixture providing 10 News objects attached to news_index_page """ rv = [] for _ in range(0, 10): p = _create_news_page(f"Test News Page {_}", news_index_page) rv.append(p) return rv
e7e8f417cefd713b9d79e6e28b654df9dd0ca0da
30,663
def is_annotated(procedure): """Return True if procedure is annotated.""" procedure = annotatable(procedure) try: ann = procedure.func_annotations return ann.are_for(procedure) and bool(ann) except AttributeError: return False
70eccace122462584e3c536fafe272b5397ac659
30,664
def _enable_disable_pim_config(tgen, topo, input_dict, router, build=False): """ Helper API to enable or disable pim on interfaces Parameters ---------- * `tgen` : Topogen object * `topo` : json file data * `input_dict` : Input dict data, required when configuring from testcase * `router` : router id to be configured. * `build` : Only for initial setup phase this is set as True. Returns ------- list of config """ config_data = [] # Enable pim on interfaces for destRouterLink, data in sorted(topo[router]["links"].items()): if "pim" in data and data["pim"] == "enable": # Loopback interfaces if "type" in data and data["type"] == "loopback": interface_name = destRouterLink else: interface_name = data["interface"] cmd = "interface {}".format(interface_name) config_data.append(cmd) config_data.append("ip pim") # pim global config if "pim" in input_dict[router]: pim_data = input_dict[router]["pim"] del_action = pim_data.setdefault("delete", False) for t in [ "join-prune-interval", "keep-alive-timer", "register-suppress-time", ]: if t in pim_data: cmd = "ip pim {} {}".format(t, pim_data[t]) if del_action: cmd = "no {}".format(cmd) config_data.append(cmd) return config_data
4408ac212126895ba161f834e7e076a0c14d864f
30,666
from typing import Union def linear_interpolation_formula( left: Union[float, np.array], right: Union[float, np.array], gamma: Union[float, np.array], ) -> Union[float, np.array]: """ Compute the linear interpolation weighted by gamma on each point of two same shape array. """ return gamma * right + (1 - gamma) * left
cdcb915f6bfc60db2f3754044ab5b67432d66370
30,667
def estimate_visib_mask_est(d_test, d_est, visib_gt, delta, visib_mode='bop19'): """Estimates a mask of the visible object surface in the estimated pose. For an explanation of why the visibility mask is calculated differently for the estimated and the ground-truth pose, see equation (14) and related text in Hodan et al., On Evaluation of 6D Object Pose Estimation, ECCVW'16. :param d_test: Distance image of a scene in which the visibility is estimated. :param d_est: Rendered distance image of the object model in the est. pose. :param visib_gt: Visibility mask of the object model in the GT pose (from function estimate_visib_mask_gt). :param delta: Tolerance used in the visibility test. :param visib_mode: See _estimate_visib_mask. :return: Visibility mask. """ visib_est = _estimate_visib_mask(d_test, d_est, delta, visib_mode) visib_est = np.logical_or(visib_est, np.logical_and(visib_gt, d_est > 0)) return visib_est
90f2de0a4e489207e128668510ba8b08a0bd361f
30,668
import itertools def combine_assertions(input_filename, output_filename): """ Take in a tab-separated, sorted "CSV" files, indicated by `input_filename`, that should be grouped together into assertions. Output a msgpack stream of assertions the file indicated by `output_filename`. The input file should be made from multiple sources of assertions by concatenating and sorting them. The combined assertions will all have the dataset of the first edge that produces them, and the license of the strongest license being combined. This process requires its input to be a sorted CSV so that all edges for the same assertion will appear consecutively. """ def group_func(line): "Group lines by their URI (their first column)." return line.split('\t', 1)[0] out = MsgpackStreamWriter(output_filename) out_bad = MsgpackStreamWriter(output_filename + '.reject') with open(input_filename, encoding='utf-8') as stream: for key, line_group in itertools.groupby(stream, group_func): assertion = make_assertion(line_group) if assertion is None: continue if assertion['weight'] > 0: destination = out else: destination = out_bad destination.write(assertion) out.close() out_bad.close()
87e2e7df2484dcff7f315da91ef39472991c2351
30,669
def sanitize_mobile_number(number): """Add country code and strip leading zeroes from the phone number.""" return "254" + str(number).lstrip("0")
944e6e5baef92ee7c59249714a9ba3463ff5981f
30,671
def fakebaraxis(ticks, painter=fakebarpainter(),*args, **kwargs): """Return a PyX linear axis that can be used to make fake bar plots. Use "keyticks" to create the ticks expected by this function.""" return axis.linear( min=-0.75, max=len(ticks)-0.25, parter=None, manualticks=ticks, painter=painter, *args, **kwargs )
99b30a9b76b9da8e4c8e1c937431aa509b47ab16
30,672
import csv def get_score_sent_pairs_from_tsv(tsv_filepath, encoding="ISO-8859-1"): """expects tokenized sentences in tsv file!""" with open(tsv_filepath, encoding=encoding) as tsvfile: reader = csv.reader(tsvfile, delimiter='\t') score_sent_pairs = [[float(row[0]), row[1]] for row in reader] return score_sent_pairs
44f5c150d40b407b50a93cd0ad968658fd5ef431
30,673
def test_timings_trie(port, individual_test_timings): """Breaks a test name into chunks by directory and puts the test time as a value in the lowest part, e.g. foo/bar/baz.html: 1ms foo/bar/baz1.html: 3ms becomes foo: { bar: { baz.html: 1, baz1.html: 3 } } """ trie = {} for test_result in individual_test_timings: test = test_result.test_name add_path_to_trie(test, int(1000 * test_result.test_run_time), trie) return trie
dfca4a92715063620b3a110df3ea29b2de3bb0b6
30,674
def prop_end(wf, **kwargs): """Set variables needed to properly conclude a propagation run. Parameters ---------- wf : obj The current WaveFront class object Returns ------- wf.wfarr : numpy ndarray Wavefront array sampling : float Sampling in meters Other Parameters ---------------- EXTRACT : int Returns the dx by dx pixel central portion of the wavefront. NOABS : bool If set, the complex-values wavefront field is returned. By default, the intensity (modulus squared) of the field is returned. """ sampling = proper.prop_get_sampling(wf) if ("NOABS" in kwargs and kwargs["NOABS"]): wf.wfarr = proper.prop_shift_center(wf.wfarr) else: wf.wfarr = proper.prop_shift_center(np.abs(wf.wfarr)**2) if "EXTRACT" in kwargs: EXTRACT = kwargs["EXTRACT"] ny, nx = wf.wfarr.shape wf.wfarr = wf.wfarr[ny/2-EXTRACT/2:ny/2+EXTRACT/2,nx/2-EXTRACT/2:nx/2+EXTRACT/2] return (wf.wfarr, sampling)
d294939f5e26df7672611ae6b58ac7039e8d22c0
30,675
def add_others_ta(df, close, fillna=False): """Add others analysis features to dataframe. Args: df (pandas.core.frame.DataFrame): Dataframe base. close (str): Name of 'close' column. fillna(bool): if True, fill nan values. Returns: pandas.core.frame.DataFrame: Dataframe with new features. """ df['others1'] = daily_return(df[close], fillna=fillna) df['others2'] = cumulative_return(df[close], fillna=fillna) return df
97185202663cb83ed1dc5f4bd02320b0ce02c4aa
30,676
def _fixture_union(caller_module, name, fixtures, idstyle, scope="function", ids=fixture_alternative_to_str, unpack_into=None, autouse=False, **kwargs): """ Internal implementation for fixture_union :param caller_module: :param name: :param fixtures: :param idstyle: :param scope: :param ids: :param unpack_into: :param autouse: :param kwargs: :return: """ # test the `fixtures` argument to avoid common mistakes if not isinstance(fixtures, (tuple, set, list)): raise TypeError("fixture_union: the `fixtures` argument should be a tuple, set or list") # validate the idstyle idstyle = IdStyle(idstyle) # first get all required fixture names f_names = [] for f in fixtures: # possibly get the fixture name if the fixture symbol was provided f_names.append(get_fixture_name(f) if not isinstance(f, str) else f) if len(f_names) < 1: raise ValueError("Empty fixture unions are not permitted") # then generate the body of our union fixture. It will require all of its dependent fixtures and receive as # a parameter the name of the fixture to use @with_signature("(%s, request)" % ', '.join(f_names)) def _new_fixture(request, **all_fixtures): if not is_used_request(request): return NOT_USED else: alternative = request.param if isinstance(alternative, UnionFixtureAlternative): fixture_to_use = alternative.fixture_name return all_fixtures[fixture_to_use] else: raise TypeError("Union Fixture %s received invalid parameter type: %s. Please report this issue." "" % (name, alternative.__class__)) _new_fixture.__name__ = name # finally create the fixture per se. # WARNING we do not use pytest.fixture but pytest_fixture_plus so that NOT_USED is discarded f_decorator = pytest_fixture_plus(scope=scope, params=[UnionFixtureAlternative(_name, idstyle) for _name in f_names], autouse=autouse, ids=ids, **kwargs) fix = f_decorator(_new_fixture) # Dynamically add fixture to caller's module as explained in https://github.com/pytest-dev/pytest/issues/2424 check_name_available(caller_module, name, if_name_exists=WARN, caller=param_fixture) setattr(caller_module, name, fix) # if unpacking is requested, do it here if unpack_into is not None: _unpack_fixture(caller_module, argnames=unpack_into, fixture=name) return fix
7063ab888b99cc0aa10890de9f4575f0ce758017
30,677
def service_class(cls): """ A class decorator enabling the instances of the class to be used as a ``services``-provider in `JSONRpc Objects`_ and `BSONRpc Objects`_. Use decorators ``request``, ``notification``, ``rpc_request`` and ``rpc_notification`` to expose methods for the RPC peer node. """ cls._request_handlers = {} cls._notification_handlers = {} for name, method in cls.__dict__.items(): if hasattr(method, '_request_handler'): cls._request_handlers[name] = method if hasattr(method, '_notification_handler'): cls._notification_handlers[name] = method return cls
7c146b1d04415cd494e62fb9ee310364c345c217
30,678
def lightcurveplain(request, tcs_transient_objects_id): """lightcurveplain. Args: request: tcs_transient_objects_id: """ transient = get_object_or_404(TcsTransientObjects, pk=tcs_transient_objects_id) mjdLimit = 55347.0 # Hard wired to 31st May 2010 # 2012-07-18 KWS Changed this code to call the custom query from a # dedicated file full of custom queries for lightcurves. recurrences = lightcurvePlainQuery(transient.id, mjdLimit = mjdLimit, djangoRawObject = CustomAllObjectOcurrencesPresentation) return render(request, 'psdb/lightcurve.txt',{'transient' : transient, 'table' : recurrences }, content_type="text/plain")
0db9b5ccdb5df9c65fab971fe72d5cec6da84676
30,679
def locate_address(ip_list, ip_attack): """ for each line in the file pointer define the ip ranges and country codes if the attacking ip is in between the range then return country code :param ip_list - list of ip address ranges and country code: :param ip_attack - attacking ip as an integer: :return country_code - country code as a string: """ for line in ip_list: start_ip = line[0] end_ip = line[1] country_code = line[2] if ip_attack >= start_ip and ip_attack <= end_ip: return country_code else: pass
82a8f9ed0cf79a2ba39d21348779687c1f8c19a8
30,680
import scipy def invertnd(f, x, *other_vars, kind='linear', vectorized=False): """ Invert a multivariate function numerically Args: f: Function to invert x: Domain to invert the function on (range of inverted function) *other_vars: Domain to invert the function on (parameters of inverted function) kind: Specifies the kind of interpolation as a string ('linear', 'nearest', 'cubic') (cubic only available for 1 or 2 variables) vectorized: Specifies if the input function is vectorized Returns: Inverted function where the first argument corresponds to the output of the original function """ n = len(x) reshape_dim = np.ones(len(other_vars) + 1, dtype=int) reshape_dim[0] = n x_reshape = np.reshape(x, reshape_dim) reshape_dim[0] = 1 if not np.issubdtype(x_reshape.dtype, np.number): raise ValueError('Input domain is not numeric') dim = [1, *(len(v) for v in other_vars)] x_arr = np.tile(x_reshape, dim) dim[0] = n v_arrs = [] for i, v in enumerate(other_vars): reshape_dim[i + 1] = len(v) v_reshape = np.reshape(v, reshape_dim) reshape_dim[i + 1] = 1 if not np.issubdtype(v_reshape.dtype, np.number): raise ValueError('Input domain is not numeric') dim[i + 1] = 1 v_arrs.append(np.tile(v_reshape, dim)) dim[i + 1] = len(v) if vectorized: y = f(x_arr, *v_arrs) else: def recursive_f(x_in, *v_in): if hasattr(x_in, '__iter__'): return [recursive_f(x_n, *v_n) for x_n, v_n in zip(x_in, zip(*v_in))] return f(x_in, *v_in) y = np.array(recursive_f(x_arr, *v_arrs)) if not np.issubdtype(y.dtype, np.number): raise ValueError('Input function is not numeric') points = np.array(list(zip(y.flat, *(v.flat for v in v_arrs)))) values = np.array(x_arr.flat) def f_inverse(x_new, *v_new): return scipy.interpolate.griddata(points, values, (x_new, *v_new), method=kind) return f_inverse
bc2798e382a700755a1d6a5d59743b969d96a02d
30,681
def most_mentioned(msgs, limit=20): """Top mentions by '@' references """ mentions = {} for m in msgs: for at in preproc.extract_ats_from_text(m['text']): mentions[at] = mentions[at] + 1 if at in mentions else 1 return sorted(mentions.items(), key=lambda x: x[1], reverse=True)[:limit]
10aa70248d33325d585fb19875a13965f67896b5
30,682
import string def is_valid_matlab_field_label(label): """ Check that passed string is a valid MATLAB field label """ if not label.startswith(tuple(string.ascii_letters)): return False VALID_CHARS = set(string.ascii_letters + string.digits + "_") return set(label).issubset(VALID_CHARS)
ea1358e94f4fc936cb12b9cad5d7285ee39dba55
30,683
def identity(n, dtype=DEFAULT_FLOAT_DTYPE): """ Returns the identity tensor. Args: n (int): Number of rows and columns in the output, must be larger than 0. dtype (Union[mstype.dtype, str], optional): Designated tensor dtype, can be in format of np.float32, or `float32`. Default is mstype.float32. Returns: result (Tensor): A tensor of shape (n,n). A tensor where all elements are equal to zero, except for the diagonal, whose values are equal to one. Supported Platforms: ``Ascend`` ``GPU`` ``CPU`` Examples: >>> import mindspore.numpy as np >>> print(np.identity(2)) [[1. 0.] [0. 1.]] """ dtype = _check_dtype(dtype) return eye(n, dtype=dtype)
7ad0025b5fb5bc02b8f07039b8beb15e8c402c11
30,684
def _chain_connectivity(edges, chains): """Returns chain connectivity treated as clustered entities represented by nodes""" chain_connectivity = np.empty((len(edges), 2), dtype=np.int64) starts = defaultdict(list) for section_index, chain in enumerate(chains): starts[chain[0][0]].append(section_index) i_connection = 0 for section_index, chain in enumerate(chains): section_end = chain[-1][1] for child_section in starts[section_end]: if section_index != child_section: chain_connectivity[i_connection, 0] = section_index chain_connectivity[i_connection, 1] = child_section i_connection += 1 return chain_connectivity[:i_connection]
5687b433a1c47a75641442aef1b8cff4c8cb4e17
30,685
def reading2celsius(self, reading): """ Converts sensor reading to celsius """ celsius = reading / 50 - 273.15 return celsius
72e6933002c9725165145451e10bbf98c162b625
30,686
import sqlite3 def evaluate_csp(website_id, test_weights): """ Checks: no fallback to default: base-uri form-action frame-ancestors report-to/uri sandbox upgrade-insecure-requests src: child-src connect-src default-src font-src frame-src - fallsback to child-src which falls back to default img-src manifest-src media-src object-src style-src script-src strict-dynamic unsafe-hashes worker-src if a check is to be done on script-src for example but it's not explicitly defined but default-src is, use the score from default-src instead """ score_dict = {'default-src': 0, 'child-src': 0, 'connect-src': 0, 'font-src': 0, 'frame-src': 0, 'img-src': 0, 'manifest-src': 0, 'media-src': 0, 'object-src': 0, 'script-src': 0, 'style-src': 0, 'worker-src': 0, 'report-to/uri': 0, 'base-uri': 0, 'form-action': 0, 'frame-ancestors': 0, 'sandbox': 0, 'upgrade-insecure-requests': 0} csp_data = None with closing(sqlite3.connect("results.db")) as connection: with closing(connection.cursor()) as cursor: csp_src_directives = ["default-src","child-src","connect-src","font-src","frame-src","img-src","manifest-src","media-src","object-src","script-src","style-src","worker-src"] csp_default_directive_score = 0 csp_child_src_directive_score = 0 cursor.execute("SELECT scheme FROM website WHERE id = ?", (website_id,)) redirected_scheme = cursor.fetchone() if redirected_scheme != None: redirected_scheme = redirected_scheme[0] else: #Assume http redirected_scheme = "http" for directive in csp_src_directives: cursor.execute("SELECT csp_data FROM csp WHERE website_id = ? AND csp_type = ?", (website_id, directive)) csp_data = cursor.fetchall() if len(csp_data) > 0: result = csp_src_check(csp_data, redirected_scheme) if directive == "default-src": csp_default_directive_score = result elif directive == "child-src": csp_child_src_directive_score = result score_dict[directive] = round(result * test_weights[directive], 4) elif directive == "frame-src": score_dict[directive] = round(csp_child_src_directive_score * test_weights[directive], 4) elif directive == "child-src": score_dict[directive] = round(csp_default_directive_score * test_weights[directive], 4) csp_child_src_directive_score = csp_default_directive_score elif directive != "default-src": score_dict[directive] = round(csp_default_directive_score * test_weights[directive], 4) csp_directives = ["base-uri","form-action","frame-ancestors","report-to","report-uri","sandbox","upgrade-insecure-requests"] for directive in csp_directives: cursor.execute("SELECT csp_data FROM csp WHERE website_id = ? AND csp_type = ?", (website_id, directive)) csp_data = cursor.fetchall() if len(csp_data) > 0: result = 0 if directive == 'base-uri' or directive == 'form-action': result = csp_src_check(csp_data, redirected_scheme) elif directive == 'frame-ancestors': result = csp_frame_ancestors_check(csp_data, redirected_scheme) elif directive == 'report-to' or directive == 'report-uri': result = 1 elif directive == 'sandbox': result = 1 elif directive == 'upgrade-insecure-requests': result = 1 if directive == 'report-to' or directive == 'report-uri': score_dict['report-to/uri'] = round(result * test_weights['report-to/uri'], 4) else: score_dict[directive] = round(result * test_weights[directive], 4) return score_dict
a5c24968ad98790eb3361db8310416b977e4adc7
30,687
def get_analysis_alias_from_metadata(eload_cfg): """ Returns analysis alias only if we find a metadata spreadsheet and it has exactly one analysis. Otherwise provides an error message and raise an error. """ metadata_spreadsheet = eload_cfg.query('submission', 'metadata_spreadsheet') if metadata_spreadsheet: reader = EvaXlsxReader(metadata_spreadsheet) if len(reader.analysis) == 1: return reader.analysis[0].get('Analysis Alias') if len(reader.analysis) > 1: logger.error("Can't assign analysis alias: multiple analyses found in metadata!") else: logger.error("Can't assign analysis alias: no analyses found in metadata!") else: logger.error("Can't assign analysis alias: no metadata found!") logger.error("Try running upgrade_config and passing an analysis alias explicitly.") raise ValueError("Can't find an analysis alias for config upgrade.")
ac3ecc7aa14f37fa2a25f9b7995923013c68a5c3
30,688
from imcsdk.mometa.bios.BiosProfileManagement import BiosProfileManagement from imcsdk.mometa.bios.BiosProfileManagement import \ def bios_profile_backup_running(handle, server_id=1, **kwargs): """ Backups up the running configuration of various bios tokens to create a 'cisco_backup_profile'. Will overwrite the existing backup profile if it exists. Args: handle (ImcHandle) server_id (int): Id of the server to perform this operation on C3260 platforms kwargs : Key-Value paired arguments for future use Returns: BiosProfile object corresponding to the backup profile created Raises: ImcOperationError if the backup profile is not created Examples: bios_profile_backup_running(handle, server_id=1) """ BiosProfileManagementConsts mo = BiosProfileManagement(parent_mo_or_dn=_get_bios_dn(handle, server_id)) mo.admin_action = BiosProfileManagementConsts.ADMIN_ACTION_BACKUP mo.set_prop_multiple(**kwargs) handle.set_mo(mo) return _get_bios_profile(handle, name='cisco_backup_profile')
e1c1a7b498df6af5238914522eae56e666df328f
30,689
def variantCombinations(items): """ Calculates variant combinations for given list of options. Each item in the items list represents unique value with it's variants. :param list items: list of values to be combined >>> c = variantCombinations([["1.1", "1.2"], ["2.1", "2.2"], ["3.1", "3.2"]]) >>> len(c) 8 >>> for combination in c:print combination ['1.1', '2.1', '3.1'] ['1.1', '2.1', '3.2'] ['1.1', '2.2', '3.1'] ['1.1', '2.2', '3.2'] ['1.2', '2.1', '3.1'] ['1.2', '2.1', '3.2'] ['1.2', '2.2', '3.1'] ['1.2', '2.2', '3.2'] """ assert isinstance(items, list) and list if len(items) == 1: result = items[0] else: result = [] subItems = variantCombinations(items[1:]) for masterItem in items[0]: for subItem in subItems: if isinstance(subItem, list): item = [masterItem] item.extend(subItem) result.append(item) else: result.append([masterItem, subItem]) return result
72bfdb19db3cf692e4260a5f75d10324e562f20e
30,690
import regex def bm_regex(regex_string): """Compile best multiline regex.""" return regex.compile(regex_string, regex.B | regex.M)
9c6507708b1d04ef91783bfd04f4949a9dfc6b76
30,691
def test_enable_8021q_1(monkeypatch): """Verify that enable_802q_1 function return exception when 802.1q is not supported by current os. """ def mockreturn(command): return CmdStatus("", "", 0) # monkeypatch.setattr(CLISSHNetNS, 'exec_command', mockreturn) lh = GenericLinuxHost(LH_CFG, OPTS) monkeypatch.setattr(lh.ssh, 'exec_command', mockreturn) with pytest.raises(Exception) as excepinfo: lh.enable_8021q() result = "Current OS doesn't support 802.1q." assert result == str(excepinfo.value)
4c3261ef788b369d185c4caff0f02a67818c5cc8
30,692
def qlearning_dataset(env, dataset=None, terminate_on_end=False, **kwargs): """ Returns datasets formatted for use by standard Q-learning algorithms, with observations, actions, next_observations, rewards, and a terminal flag. Args: env: An OfflineEnv object. dataset: An optional dataset to pass in for processing. If None, the dataset will default to env.get_dataset() terminate_on_end (bool): Set done=True on the last timestep in a trajectory. Default is False, and will discard the last timestep in each trajectory. **kwargs: Arguments to pass to env.get_dataset(). Returns: A dictionary containing keys: observations: An N x dim_obs array of observations. actions: An N x dim_action array of actions. next_observations: An N x dim_obs array of next observations. rewards: An N-dim float array of rewards. terminals: An N-dim boolean array of "done" or episode termination flags. """ if dataset is None: dataset = env.get_dataset(**kwargs) N = dataset['rewards'].shape[0] obs_ = [] next_obs_ = [] action_ = [] reward_ = [] done_ = [] # The newer version of the dataset adds an explicit # timeouts field. Keep old method for backwards compatability. use_timeouts = False if 'timeouts' in dataset: use_timeouts = True episode_step = 0 for i in range(N-1): obs = dataset['observations'][i].astype(np.float32) new_obs = dataset['observations'][i+1].astype(np.float32) action = dataset['actions'][i].astype(np.float32) reward = dataset['rewards'][i].astype(np.float32) done_bool = bool(dataset['terminals'][i]) if use_timeouts: final_timestep = dataset['timeouts'][i] else: final_timestep = (episode_step == env._max_episode_steps - 1) if (not terminate_on_end) and final_timestep: # Skip this transition and don't apply terminals on the last step of an episode episode_step = 0 continue if done_bool or final_timestep: episode_step = 0 obs_.append(obs) next_obs_.append(new_obs) action_.append(action) reward_.append(reward) done_.append(done_bool) episode_step += 1 return { 'observations': np.array(obs_), 'actions': np.array(action_), 'next_observations': np.array(next_obs_), 'rewards': np.array(reward_), 'terminals': np.array(done_), }
bcc59e159ada77d2b3acaed530f190d3fcf8a706
30,693
import torch def build_save_dataset(corpus_type, fields, opt): # corpus_type: train or valid """ Building and saving the dataset """ assert corpus_type in ["train", "valid"] # Judging whether it is train or valid if corpus_type == "train": src_corpus = opt.train_src # 获取源端、目标端和结构信息的path tgt_corpus = opt.train_tgt structure_corpus = opt.train_structure mask_corpus = opt.train_mask relation_lst_corpus = opt.train_relation_lst relation_mat_corpus = opt.train_relation_mat align_corpus = opt.train_align else: src_corpus = opt.valid_src tgt_corpus = opt.valid_tgt structure_corpus = opt.valid_structure mask_corpus = opt.valid_mask relation_lst_corpus = opt.valid_relation_lst relation_mat_corpus = opt.valid_relation_mat align_corpus = opt.valid_align if opt.shard_size > 0: return build_save_in_shards_using_shards_size( src_corpus, tgt_corpus, structure_corpus, mask_corpus, relation_lst_corpus, relation_mat_corpus, align_corpus, fields, corpus_type, opt, ) # We only build a monolithic dataset. # But since the interfaces are uniform, it would be not hard to do this should users need this feature. src_iter = make_text_iterator_from_file(src_corpus) tgt_iter = make_text_iterator_from_file(tgt_corpus) structure_iter = make_text_iterator_from_file(structure_corpus) mask_iter = make_text_iterator_from_file(mask_corpus) relation_iter = make_text_iterator_from_file(relation_lst_corpus) relation_iter_2 = make_text_iterator_from_file(relation_mat_corpus) align_iter = make_text_iterator_from_file(align_corpus) dataset = build_dataset( fields, src_iter, tgt_iter, structure_iter, mask_iter, relation_iter, relation_iter_2, align_iter, src_seq_length=opt.src_seq_length, tgt_seq_length=opt.tgt_seq_length, src_seq_length_trunc=opt.src_seq_length_trunc, tgt_seq_length_trunc=opt.tgt_seq_length_trunc, ) # We save fields in vocab.pt seperately, so make it empty. dataset.fields = [] pt_file = "{:s}_{:s}.pt".format(opt.save_data, corpus_type) logger.info(" * saving %s dataset to %s." % (corpus_type, pt_file)) torch.save(dataset, pt_file) return [pt_file]
85594737b15ff356da3dcb431bab9c648122f57a
30,694
def gaussian_filter(image, sigma): """Returns image filtered with a gaussian function of variance sigma**2""" i, j = np.meshgrid(np.arange(image.shape[0]), np.arange(image.shape[1]), indexing='ij') mu = (int(image.shape[0]/2.0), int(image.shape[1]/2.0)) gaussian = 1.0/(2.0*np.pi*sigma*sigma)*np.exp(-0.5*(((i-mu[0])/sigma)**2+\ ((j-mu[1])/sigma)**2)) gaussian = np.roll(gaussian, (-mu[0], -mu[1]), axis=(0, 1)) image_fft = np.fft.rfft2(image) gaussian_fft = np.fft.rfft2(gaussian) image = np.fft.irfft2(image_fft*gaussian_fft) return image
18f8d59ebe82fbeb5cc6090c3c01460923cbbf08
30,695
def get_lattice_points(strand): """ 格子点の情報を取得 @param ストランドの格子点の対 @return ストランドの格子点の始点と終点 """ strand_list = eval(strand) strand_from = strand_list[0] strand_to = strand_list[1] return strand_from, strand_to
a69902c15b9d8ce9f518891f4dea55d9aca186cf
30,696
def while_(condition): """ A while loop that can be used in a workchain outline. Use as:: while_(cls.conditional)( cls.step1, cls.step2 ) Each step can, of course, also be any valid workchain step e.g. conditional. :param condition: The workchain method that will return True or False """ return _While(condition)
6594c6da24d6a27d674ddb18713a0e521f0dc2dd
30,697
def new(init=None): """Return a new Whirlpool object. An optional string argument may be provided; if present, this string will be automatically hashed.""" return Whirlpool(init)
2d6bc8ce41009d642c78d92b022b44a23f67c496
30,698
def extract_bcr(tab, rep_col='CDR3_aa'): """ Extract BCR repertorie for each patient Args: tab: data table from TRUST BCR outputs rep_col: 'CDR3_aa' or 'complete_CDR3_sequences' or a list of keys Output: a Series vector containing lists of BCR CDR3 sequences """ tab['patient'] = tab.TCGA_id.str.slice(0,12) tab['Sample_Type'] = tab.TCGA_id.str.slice(13,15) ## https://gdc.cancer.gov/resources-tcga-users/tcga-code-tables/sample-type-codes ## Code Definition Short Letter Code ## 01 Primary Solid Tumor TP ## 02 Recurrent Solid Tumor TR ## 03 Primary Blood Derived Cancer - Peripheral Blood TB ## 04 Recurrent Blood Derived Cancer - Bone Marrow TRBM ## 05 Additional - New Primary TAP ## 06 Metastatic TM ## 07 Additional Metastatic TAM ## 08 Human Tumor Original Cells THOC ## 09 Primary Blood Derived Cancer - Bone Marrow TBM ## 10 Blood Derived Normal NB ## 11 Solid Tissue Normal NT ## 12 Buccal Cell Normal NBC ## 40 Recurrent Blood Derived Cancer - Peripheral Blood TRB ## 50 Cell Lines CELL ## 60 Primary Xenograft Tissue XP ## 61 Cell Line Derived Xenograft Tissue XCL tumor = tab[tab.Sample_Type.isin(['01','02','06','07'])] ## select tumor samples normal = tab[tab.Sample_Type.isin(['10','11'])] ## select normal samples normal['patient'] = 'Normal_'+normal['patient'] ## rename Normal sample Ids print('Tumor data of shape', tumor.shape) print('Normal data of shape', normal.shape) # 对数据集进行分组,并且应用了to——list函数 out = [ tumor.groupby('patient')[rep_col].apply(to_list), normal.groupby('patient')[rep_col].apply(to_list) ] return pd.concat(out)
998a3cfd6619b2fa3ae791e523f258a5a82e584b
30,699
def filter_labels(a, min_size, max_size=None): """ Remove (set to 0) labeled connected components that are too small or too large. Note: Operates in-place. """ if min_size == 0 and (max_size is None or max_size > np.prod(a.shape)): # shortcut for efficiency return a try: component_sizes = np.bincount( a.ravel() ) except TypeError: # On 32-bit systems, must explicitly convert from uint32 to int # (This fix is just for VM testing.) component_sizes = np.bincount( np.asarray(a.ravel(), dtype=int) ) bad_sizes = component_sizes < min_size if max_size is not None: np.logical_or( bad_sizes, component_sizes > max_size, out=bad_sizes ) bad_locations = bad_sizes[a] a[bad_locations] = 0 return a
5754959bd2f404fa0189aee406c08745f236c294
30,701
def mask(bigtiff,profile,out_mask): """ mask a 1 or 3 band image, band by band for memory saving """ if profile['count']==4: bigtiff1 = bigtiff[0,:,:] bigtiff1[out_mask==1] = profile['nodata'] bigtiff[0,:,:] = bigtiff1 del bigtiff1 bigtiff2 = bigtiff[1,:,:] bigtiff2[out_mask==1] = profile['nodata'] bigtiff[1,:,:] = bigtiff2 del bigtiff2 bigtiff3 = bigtiff[2,:,:] bigtiff3[out_mask==1] = profile['nodata'] bigtiff[2,:,:] = bigtiff3 del bigtiff3 bigtiff4 = bigtiff[3,:,:] bigtiff4[out_mask==1] = profile['nodata'] bigtiff[3,:,:] = bigtiff4 del bigtiff4 else: bigtiff1 = bigtiff[0,:,:] bigtiff1[out_mask==1] = profile['nodata'] bigtiff[0,:,:] = bigtiff1 del bigtiff1 return bigtiff
0e31612da8d80f5fb4d8f35a0664708294e98312
30,702
def quadratic_sum(n: int) -> int: """calculate the quadratic num from 1 ~ n""" sum = 0 for n in range(1, n + 1): sum += n ** 2 return sum
e47a3ee49888c85cc06c72c428d983885ed7009f
30,703
def dag_rules(rules, required_keys): """ Serializing dag parameters from variable. Checking for required fields using required_keys :return dict dag_rules. An example of how they should look, in ..._settings.json, "airflow_settings" """ is_exists_rule_keys = all(key in rules.keys() for key in required_keys) if not is_exists_rule_keys: raise SettingFieldMissingError( f"Some of the required fields {','.join(required_keys)} are missing in Variable, " f"get: {','.join(rules.keys())}") try: rules["start_date"] = parse(rules["start_date"]) except Exception as err: raise SettingFieldTypeError(f"Error in start date parser: {err}") return rules
319096f113ff82c783266d83d8e194badeb7ec7d
30,704
import socket def ip_address(value): """Get IPAddress""" return write_tv(ASN1_IPADDRESS, socket.inet_aton(value))
beb56177436f7a67abbd2627ebf681eef3ed6352
30,705
def _GetUpdatedMilestoneDict(master_bot_pairs, tests): """Gets the milestone_dict with the newest rev. Checks to see which milestone_dict to use (Clank/Chromium), and updates the 'None' to be the newest revision for one of the specified tests. """ masters = set([m.split('/')[0] for m in master_bot_pairs]) if 'ClankInternal' in masters: milestone_dict = CLANK_MILESTONES.copy() else: milestone_dict = CHROMIUM_MILESTONES.copy() # If we might access the end of the milestone_dict, update it to # be the newest revision instead of 'None'. _UpdateNewestRevInMilestoneDict(master_bot_pairs, tests, milestone_dict) return milestone_dict
d3f8f78bb6aed29d2a7a932b288a84adf22e323b
30,706
def _hrv_nonlinear_poincare_hra(rri, out): """Heart Rate Asymmetry Indices. - Asymmetry of Poincaré plot (or termed as heart rate asymmetry, HRA) - Yan (2017) - Asymmetric properties of long-term and total heart rate variability - Piskorski (2011) """ N = len(rri) - 1 x = rri[:-1] # rri_n, x-axis y = rri[1:] # rri_plus, y-axis diff = y - x decelerate_indices = np.where(diff > 0)[0] # set of points above IL where y > x accelerate_indices = np.where(diff < 0)[0] # set of points below IL where y < x nochange_indices = np.where(diff == 0)[0] # Distances to centroid line l2 centroid_x = np.mean(x) centroid_y = np.mean(y) dist_l2_all = abs((x - centroid_x) + (y - centroid_y)) / np.sqrt(2) # Distances to LI dist_all = abs(y - x) / np.sqrt(2) # Calculate the angles theta_all = abs(np.arctan(1) - np.arctan(y / x)) # phase angle LI - phase angle of i-th point # Calculate the radius r = np.sqrt(x ** 2 + y ** 2) # Sector areas S_all = 1 / 2 * theta_all * r ** 2 # Guzik's Index (GI) den_GI = np.sum(dist_all) num_GI = np.sum(dist_all[decelerate_indices]) out["GI"] = (num_GI / den_GI) * 100 # Slope Index (SI) den_SI = np.sum(theta_all) num_SI = np.sum(theta_all[decelerate_indices]) out["SI"] = (num_SI / den_SI) * 100 # Area Index (AI) den_AI = np.sum(S_all) num_AI = np.sum(S_all[decelerate_indices]) out["AI"] = (num_AI / den_AI) * 100 # Porta's Index (PI) m = N - len(nochange_indices) # all points except those on LI b = len(accelerate_indices) # number of points below LI out["PI"] = (b / m) * 100 # Short-term asymmetry (SD1) sd1d = np.sqrt(np.sum(dist_all[decelerate_indices] ** 2) / (N - 1)) sd1a = np.sqrt(np.sum(dist_all[accelerate_indices] ** 2) / (N - 1)) sd1I = np.sqrt(sd1d ** 2 + sd1a ** 2) out["C1d"] = (sd1d / sd1I) ** 2 out["C1a"] = (sd1a / sd1I) ** 2 out["SD1d"] = sd1d # SD1 deceleration out["SD1a"] = sd1a # SD1 acceleration # out["SD1I"] = sd1I # SD1 based on LI, whereas SD1 is based on centroid line l1 # Long-term asymmetry (SD2) longterm_dec = np.sum(dist_l2_all[decelerate_indices] ** 2) / (N - 1) longterm_acc = np.sum(dist_l2_all[accelerate_indices] ** 2) / (N - 1) longterm_nodiff = np.sum(dist_l2_all[nochange_indices] ** 2) / (N - 1) sd2d = np.sqrt(longterm_dec + 0.5 * longterm_nodiff) sd2a = np.sqrt(longterm_acc + 0.5 * longterm_nodiff) sd2I = np.sqrt(sd2d ** 2 + sd2a ** 2) out["C2d"] = (sd2d / sd2I) ** 2 out["C2a"] = (sd2a / sd2I) ** 2 out["SD2d"] = sd2d # SD2 deceleration out["SD2a"] = sd2a # SD2 acceleration # out["SD2I"] = sd2I # identical with SD2 # Total asymmerty (SDNN) sdnnd = np.sqrt(0.5 * (sd1d ** 2 + sd2d ** 2)) # SDNN deceleration sdnna = np.sqrt(0.5 * (sd1a ** 2 + sd2a ** 2)) # SDNN acceleration sdnn = np.sqrt(sdnnd ** 2 + sdnna ** 2) # should be similar to sdnn in hrv_time out["Cd"] = (sdnnd / sdnn) ** 2 out["Ca"] = (sdnna / sdnn) ** 2 out["SDNNd"] = sdnnd out["SDNNa"] = sdnna return out
fff7f5c071b64fb44f7e8155a5ddf350a4586517
30,707
def headers(sheet): """Returns the values of the sheet's header row (i.e., the first row).""" return [ stringify_value(h) for h in truncate_row(next(sheet.iter_rows(values_only=True))) ]
26160064a3f0509d343140e3018e96fb1f7b91b4
30,708
def internal_server_error(message='Internal server error'): """500 Internal server error response""" errors = { '_internal': message } return error(500, errors)
d4ed017f6720ae3e62e5d6e66eb4d2cabd2a0775
30,709
def get_model(args, test=False): """ Create computation graph and variables. """ nn_in_size = 513 image = nn.Variable([args.batch_size, 3, nn_in_size, nn_in_size]) label = nn.Variable([args.batch_size, 1, nn_in_size, nn_in_size]) mask = nn.Variable([args.batch_size, 1, nn_in_size, nn_in_size]) pred = model.deeplabv3plus_model( image, args.output_stride, args.num_class, test=test, fix_params=False) # Initializing moving variance by 1 params = nn.get_parameters() for key, val in params.items(): if 'bn/var' in key: val.d.fill(1) loss = F.sum(F.softmax_cross_entropy( pred, label, axis=1) * mask) / F.sum(mask) Model = namedtuple('Model', ['image', 'label', 'mask', 'pred', 'loss']) return Model(image, label, mask, pred, loss)
9199034f0776e6d291185d130aadd78d7bf11ea0
30,710
import random def post_config(opt): """post_config""" # init fixed parameters opt.noise_amp_init = opt.noise_amp opt.nfc_init = opt.nfc opt.min_nfc_init = opt.min_nfc opt.scale_factor_init = opt.scale_factor opt.out_ = 'TrainedModels/%s/scale_factor=%f/' % (opt.input_name[:-4], opt.scale_factor) if opt.manualSeed is None: opt.manualSeed = random.randint(0, 10000) print("Random Seed: ", opt.manualSeed) random.seed(opt.manualSeed) set_seed(opt.manualSeed) return opt
b7b7127e560b24a0d8242dfb9912852e2cd33c7d
30,711
import collections def select_device_with_aspects(required_aspects, excluded_aspects=[]): """Selects the root :class:`dpctl.SyclDevice` that has the highest default selector score among devices that have all aspects in the `required_aspects` list, and do not have any aspects in `excluded_aspects` list. The list of SYCL device aspects can be found in SYCL 2020 specs: https://www.khronos.org/registry/SYCL/specs/sycl-2020/html/sycl-2020.html#sec:device-aspects :Example: .. code-block:: python import dpctl # select a GPU that supports double precision dpctl.select_device_with_aspects(['fp64', 'gpu']) # select non-custom device with USM shared allocations dpctl.select_device_with_aspects( ['usm_shared_allocations'], excluded_aspects=['custom']) """ if isinstance(required_aspects, str): required_aspects = [required_aspects] if isinstance(excluded_aspects, str): excluded_aspects = [excluded_aspects] seq = collections.abc.Sequence input_types_ok = isinstance(required_aspects, seq) and isinstance( excluded_aspects, seq ) if not input_types_ok: raise TypeError( "Aspects are expected to be Python sequences, " "e.g. lists, of strings" ) for asp in chain(required_aspects, excluded_aspects): if type(asp) != str: raise TypeError("The list objects must be of a string type") if not hasattr(SyclDevice, "has_aspect_" + asp): raise AttributeError(f"The {asp} aspect is not supported in dpctl") devs = get_devices() max_score = 0 selected_dev = None for dev in devs: aspect_status = all( ( getattr(dev, "has_aspect_" + asp) is True for asp in required_aspects ) ) aspect_status = aspect_status and not ( any( ( getattr(dev, "has_aspect_" + asp) is True for asp in excluded_aspects ) ) ) if aspect_status and dev.default_selector_score > max_score: max_score = dev.default_selector_score selected_dev = dev if selected_dev is None: raise SyclDeviceCreationError( f"Requested device is unavailable: " f"required_aspects={required_aspects}, " f"excluded_aspects={excluded_aspects}" ) return selected_dev
3f34baff8aab39ef88d20c610b203723712823af
30,712
def plus(a:int,b:int)->int: """ plus operation :param a: first number :param b: second number :return: a+b """ return a+b
f54224b8f8c0b599b7cd799aba0d291f11c4c16f
30,714
def linear_upsample_3d(inputs, strides=(2, 2, 2), use_bias=False, trainable=False, name='linear_upsample_3d'): """Linear upsampling layer in 3D using strided transpose convolutions. The upsampling kernel size will be automatically computed to avoid information loss. Args: inputs (tf.Tensor): Input tensor to be upsampled strides (tuple, optional): The strides determine the upsampling factor in each dimension. use_bias (bool, optional): Flag to train an additional bias. trainable (bool, optional): Flag to set the variables to be trainable or not. name (str, optional): Name of the layer. Returns: tf.Tensor: Upsampled Tensor """ static_inp_shape = tuple(inputs.get_shape().as_list()) dyn_inp_shape = tf.shape(inputs) rank = len(static_inp_shape) num_filters = static_inp_shape[-1] strides_5d = [1, ] + list(strides) + [1, ] kernel_size = [2 * s if s > 1 else 1 for s in strides] kernel = get_linear_upsampling_kernel( kernel_spatial_shape=kernel_size, out_filters=num_filters, in_filters=num_filters, trainable=trainable) dyn_out_shape = [dyn_inp_shape[i] * strides_5d[i] for i in range(rank)] dyn_out_shape[-1] = num_filters static_out_shape = [static_inp_shape[i] * strides_5d[i] if isinstance(static_inp_shape[i], int) else None for i in range(rank)] static_out_shape[-1] = num_filters tf.logging.info('Upsampling from {} to {}'.format( static_inp_shape, static_out_shape)) upsampled = tf.nn.conv3d_transpose( value=inputs, filter=kernel, output_shape=dyn_out_shape, strides=strides_5d, padding='SAME', name='upsample') upsampled.set_shape(static_out_shape) return upsampled
42613b0aa245f53cc381a1c0b29ee5de67441c5c
30,715
def sample_ingredient(user: User, name: str = "Cinnamon") -> Ingredient: """Create a sample ingredient""" return Ingredient.objects.create(user=user, name=name)
c3ca73ece2c015608f54dd372749364c6d63b595
30,716
def naive_sample_frequency_spectrum(ts, sample_sets, windows=None, mode="site"): """ Naive definition of the generalised site frequency spectrum. """ method_map = { # "site": naive_site_sample_frequency_spectrum, "branch": naive_branch_sample_frequency_spectrum} return method_map[mode](ts, sample_sets, windows=windows)
0aeb561437a521757fe1c884a063592b8ca2eb2e
30,717
import six import requests import json def make_server_request(request, payload, endpoint, auth=None, method='post'): """ makes a json request to channelstream server endpoint signing the request and sending the payload :param request: :param payload: :param endpoint: :param auth: :return: """ server_port = request.registry.settings["port"] signer = TimestampSigner(request.registry.settings["secret"]) sig_for_server = signer.sign("channelstream") if not six.PY2: sig_for_server = sig_for_server.decode("utf8") secret_headers = { "x-channelstream-secret": sig_for_server, "Content-Type": "application/json", } url = "http://127.0.0.1:%s%s" % (server_port, endpoint) response = getattr(requests, method)( url, data=json.dumps(payload), headers=secret_headers, auth=auth ) if response.status_code >= 400: log.error(response.text) response.raise_for_status() return response
99c1bac6c3f010692f6e4b94b93ea77b4b655fde
30,719
import numpy as np def nan_helpfcn(myarray): """ Helper function to return the locations of Nan values as a boolean array, plus a function to return the index of the array. Code inspired by: http://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array Input: - myarray, 1d numpy array with possible NaNs, e.g. np.array([1,2,NaN,4,NaN,6]) Output: - nans, logical indices of NaNs, e.g. for the example above [False,False,True,False,True,False] - indf, this gives us the indices of the matrix - shifted. this is a lambda function. e.g. indes(nans) where nans was [False,False,True,False,True,False] gives [2,4] This is functionally equivalent to np.array(range(len(myarray))) Example: >>> myarray = np.array=[1,2,np.NaN,4,np.NaN,6]) >>> nanlocs,indf=nan_helpfcn(myarray) >>>nanlocs [False,False,True,False,True,False] >>> indf[nanlocs] [2,4] >>> indf[~nanlocs] [0,1,3,5] """ return np.isnan(myarray), lambda z: z.nonzero()[0]
b5770e6bdfda85bc71fd954aacc4c31dbbd47f13
30,720
def _get_norm_layer(normalization_type='no_norm', name=None): """Get normlization layer. Args: normalization_type: String. The type of normalization_type, only 'no_norm' and 'layer_norm' are supported. name: Name for the norm layer. Returns: layer norm class. """ if normalization_type == 'no_norm': layer = NoNorm(name=name) elif normalization_type == 'layer_norm': layer = tf.keras.layers.LayerNormalization( name=name, axis=-1, epsilon=1e-12, dtype=tf.float32) else: raise NotImplementedError('Only "no_norm" and "layer_norm" and supported.') return layer
8aa307db8c1ea93905cc5adddcec4a04f8718195
30,721
def run_single_camera(cam): """ This function acts as the body of the example; please see NodeMapInfo example for more in-depth comments on setting up cameras. :param cam: Camera to setup and run on. :type cam: CameraPtr :return: True if successful, False otherwise. :rtype: bool """ try: result = True # Retrieve TL device nodemap and print device information nodemap_tldevice = cam.GetTLDeviceNodeMap() result &= print_device_info(nodemap_tldevice) # Initialize camera cam.Init() # Retrieve GenICam nodemap nodemap = cam.GetNodeMap() # Configure callbacks err, callback_height, callback_gain = configure_callbacks(nodemap) if not err: return err # Change height and gain to trigger callbacks result &= change_height_and_gain(nodemap) # Reset callbacks result &= reset_callbacks(nodemap, callback_height, callback_gain) # Deinitialize camera cam.DeInit() except PySpin.SpinnakerException as ex: print 'Error: %s' % ex return False return result
b7a3df5fb0e44ac4ce293d6ff89d4955d662f482
30,722
import six def all_strs_text(obj): """ PyYAML refuses to load strings as 'unicode' on Python 2 - recurse all over obj and convert every string. """ if isinstance(obj, six.binary_type): return obj.decode('utf-8') elif isinstance(obj, list): return [all_strs_text(x) for x in obj] elif isinstance(obj, tuple): return tuple(all_strs_text(x) for x in obj) elif isinstance(obj, dict): return {six.text_type(k): all_strs_text(v) for k, v in six.iteritems(obj)} else: return obj
20b27cf809ed7fbf12b30a357d6aecfeeed88461
30,723
async def login_user(credentials: OAuth2PasswordRequestForm = Depends()): """Endpoint for logging user in.""" user = services.authenticate_user(email=credentials.username, password=credentials.password) if not user: raise HTTPException(status_code=401, detail="Invalid Credentials") return services.create_token(user)
d8e304b7cf718afce7a61c74ab38769d5695e7f5
30,724
def fahrenheit2celsius(f: float) -> float: """Utility function to convert from Fahrenheit to Celsius.""" return (f - 32) * 5/9
5161b29998553ad6ff497e698058f330433d90b3
30,727
def loadFireTurnMap(): """load in hard-coded 11x11 fire turn map, then flip so that access is [x][y] to match Board access""" boardSize = 11 fireMapFile = open( "fireTurnMap.txt", "r" ) data = [[int(n) for n in line.split()] for line in fireMapFile] fireMapFile.close() rotated = [[None for j in range(boardSize)] for i in range(boardSize)] for i, row in enumerate(data): for j, value in enumerate(row): rotated[j][i] = value return rotated
a22350cf8ab488d719cdbaa0e3900c446b59b6f3
30,728
import itertools def get_param_list(params, mode='grid', n_iter=25): """ Get a list with all the parameter combinations that will be tested for optimization. Parameters ---------- params: dictionary Each key corresponds to a parameter. The values correspond to a list of parameters to be explored. In the case of 'grid', all possible parameter combinations will be explored. In the case of 'random', a determined number random distribution of parameters mode: string Possible values are: 'grid' (default) 'random' n_iter: int (optional) Number of parameter setitngs that will be sampled. Only valid for 'random' mode. Otherwise, ignored. Default value is 25. Notice there is a trade off between runtime and quality of the solution. Returns ------- param_list: list List of dictionaries. Each dictionary has a parameter combination to try. """ # Generating a list of dictionaries with all parameter combinations to try. if mode == 'grid': # In this case, we generate a list of dictionaries of ALL # possible parameter value combinations. # Trick from https://stackoverflow.com/a/61335465/948768 keys, values = zip(*params.items()) param_list = [dict(zip(keys, v)) for v in itertools.product(*values)] elif mode == 'random': # In this case, we generate a list of dictionaries with random # combinations of parameter values. param_list = [] for hh in range(n_iter): # Initialization. param_dict = {} for key, value in zip(params.keys(), params.values()): param_dict[key] = get_value_from_distribution(key, value) # Append the generated dictionary to the list. param_list.append(param_dict) else: raise ValueError("Invalid parameter optimization mode. Possible values are 'grid' and 'random'.") return param_list
ed99bdff2a27df05e81f04e0495b60fc845ec5c3
30,729
def _apple_universal_binary_rule_transition_impl(settings, attr): """Rule transition for `apple_universal_binary` supporting forced CPUs.""" forced_cpus = attr.forced_cpus platform_type = attr.platform_type new_settings = dict(settings) # If forced CPUs were given, first we overwrite the existing CPU settings # for the target's platform type with those CPUs. We do this before applying # the base rule transition in case it wants to read that setting. if forced_cpus: new_settings[_platform_specific_cpu_setting_name(platform_type)] = forced_cpus # Next, apply the base transition and get its output settings. new_settings = _apple_rule_base_transition_impl(new_settings, attr) # The output settings from applying the base transition won't have the # platform-specific CPU flags, so we need to re-apply those before returning # our result. For the target's platform type, use the forced CPUs if they # were given or use the original value otherwise. For every other platform # type, re-propagate the original input. # # Note that even if we don't have `forced_cpus`, we must provide values for # all of the platform-specific CPU flags because they are declared outputs # of the transition; the build will fail at analysis time if any are # missing. for other_type, flag in _PLATFORM_TYPE_TO_CPU_FLAG.items(): if forced_cpus and platform_type == other_type: new_settings[flag] = forced_cpus else: new_settings[flag] = settings[flag] return new_settings
e672473db5b117102a2147445c0416f3a22b2b2d
30,730
def xml_get_text(_node): """Helper function to get character data from an XML tree""" rc = list() for node in _node.childNodes: if node.nodeType == node.TEXT_NODE: rc.append(node.data) return unquote(''.join(rc))
0b611c0a95707b4220a114c7fe76c4fefd9d1615
30,731
def volume_get_all(context, marker, limit, sort_keys=None, sort_dirs=None, filters=None, offset=None): """Retrieves all volumes. If no sort parameters are specified then the returned volumes are sorted first by the 'created_at' key and then by the 'id' key in descending order. :param context: context to query under :param marker: the last item of the previous page, used to determine the next page of results to return :param limit: maximum number of items to return :param sort_keys: list of attributes by which results should be sorted, paired with corresponding item in sort_dirs :param sort_dirs: list of directions in which results should be sorted, paired with corresponding item in sort_keys :param filters: dictionary of filters; values that are in lists, tuples, or sets cause an 'IN' operation, while exact matching is used for other values, see _process_volume_filters function for more information :returns: list of matching volumes """ session = get_session() with session.begin(): # Generate the query query = _generate_paginate_query(context, session, marker, limit, sort_keys, sort_dirs, filters, offset) # No volumes would match, return empty list if query is None: return [] return query.all()
664807482b6e26c6e96f8ec697d7d70a5c53d087
30,732
def merge_multirnn_lstm_state(states, w, b): """ Given two multirnn lstm states, merge them into a new state of the same shape, merged by concatation and then projection Args: state1: the first state to mergem, of shape (s1, s2, s3, ...), each s is of shape LSTMStateTuple(c,h), h,c are of shape (batch_size, hidden_size) state2: the second state to merge, shape same as the first w: the projection weight, of shape (hidden_size * 2, hidden_size) b: the projection bias, of shape (hidden_size,) Returns: the merged states """ new_state = [] for i in range(len(states[0])): new_state.append(merge_lstm_states([s[i] for s in states], w, b)) return tuple(new_state)
7ccc6666fafd1e1b6e117dc257598d88777c3e40
30,733
def gen_iocs(indicator_list): """ Generates a list of IOCs from a list of Anomali indicators :param indicator_list: list of Anomali indicators, types ip_address, url, or domain :return: list of IOC objects """ ioc_list = list() for i in indicator_list: pp = process_pattern(i.get('pattern')) if pp: t, v = pp.popitem() else: t, v = None, None if t and v is not None: ioc_list.append( indicators.IOC( v, t, [i.get('labels')], None, process_severity(i.get('labels')), i.get('description'), None, None ) ) return ioc_list
48bb07cf726f9052abbfc7de59e78f49f4b111d3
30,734
import csv def parse_input_specifications() -> dict[str, InputFile]: """ Ingest the input specs file and return a dictionary of the data. """ with open(PATH_INPUT_SPECS, 'r') as f: reader = csv.reader(f) next(reader) input_files = {} for row in reader: if not any(row): continue filename, column, datatype, example, default, notes, source = (c.strip() for c in row) if filename: f = InputFile(filename, source, notes) input_files[filename] = f else: c = InputFileColumn(column, datatype, example, default, notes) f.add_column(c) return input_files
fddc4b3ff1e9a45f09a62981fc12e7da1fa4e25c
30,736
import base64 def image_base64(img): """Return image as base64.""" if isinstance(img, str): img = get_thumbnail(img) with BytesIO() as buffer: img.save(buffer, "jpeg") return base64.b64encode(buffer.getvalue()).decode()
9a7cbbf9fd973831875ea0643547fd5abff2aa69
30,737
def read_file(file): """This function reads the raw data file, gets the scanrate and stepsize and then reads the lines according to cycle number. Once it reads the data for one cycle, it calls read_cycle function to denerate a dataframe. It does the same thing for all the cycles and finally returns a dictionary, the keys of which are the cycle numbers and the values are the corresponding dataframes. Parameters __________ file: raw data file Returns: ________ df_dict : dict dictionary of dataframes with keys as cycle numbers and values as dataframes for each cycle n_cycle: int number of cycles in the raw file voltam_parameters: dict dictionary containing the parameters of the experimental parametrs used for the cyclic voltammetry scan dict_of_df: dictionary of dataframes with keys = cycle numbers and values = dataframes for each cycle n_cycle: number of cycles in the raw file """ voltam_parameters = {} df_dict = {} data = {} param = 0 n_cycle = 0 with open(file, 'r') as f: # print(file + ' Opened') for line in f: if param != 6: if line.startswith('SCANRATE'): voltam_parameters['scan_rate(mV/s)'] = \ float(line.split()[2]) param = param+1 if line.startswith('STEPSIZE'): voltam_parameters['step_size(V)'] = \ float(line.split()[2]) * 0.001 param = param+1 if line.startswith('VINIT'): voltam_parameters['vinit(V)'] = float(line.split()[2]) param = param+1 if line.startswith('VLIMIT1'): voltam_parameters['vlimit_1(V)'] = float(line.split()[2]) param = param+1 if line.startswith('VLIMIT2'): voltam_parameters['vlimit_2(V)'] = float(line.split()[2]) param = param+1 if line.startswith('VFINAL'): voltam_parameters['vfinal(V)'] = float(line.split()[2]) param = param+1 if line.startswith('CURVE'): n_cycle += 1 data['cycle_'+str(n_cycle)] = [] if n_cycle: data['cycle_'+str(n_cycle)].append(line) for i in range(len(data)): df_dict['cycle_'+str(i+1)] = read_cycle(data['cycle_'+str(i+1)]) return df_dict, n_cycle, voltam_parameters
8eb59ad8f8b700a0d0c26386644be97aa2417bb7
30,738
def calculate_performance(data): """Calculates swarm performance using a performance function""" df = pd.DataFrame(data) prev_column = None V = 0 G = 0 C = 0 vcount = 0 gcount = 0 ccount = 0 for column in df: v = calculate_max_speed(df, column, prev_column) g = calculate_vertical_mse(df, column) c = calculate_distance_sd(df, column) if v is not None: V += v vcount += 1 if g is not None: G += g gcount += 1 if c is not None: C += c ccount += 1 prev_column = column V /= vcount G /= gcount C /= ccount print(f'V: {round(V, 2)} | C: {round(C, 2)} | G: {round(G, 2)}') return round((V * 1000) / (C * G), 2)
dbf060501991b5408f8d102f35dbe60b34fae0a9
30,740
def ipn(request): """ Webhook handling for Coinbase Commerce """ if request.method == 'POST': request_sig = request.META.get('HTTP_X_CC_WEBHOOK_SIGNATURE', None) ''' # this was done in flask = request.data.decode('utf-8') try: # signature verification and event object construction event = Webhook.construct_event( json.dumps(request.data), request_sig, settings.ICO_COINBASE_WEBHOOK_SECRET) except Exception as e: return Response( {'message': 'Signature verification failed'}, status=status.HTTP_401_UNAUTHORIZED ) ''' event = request.data['event'] user = User.objects.get(pk=event['data']['metadata']['user_id']) code = event['data']['code'] amount = float(event['data']['local']['amount']) purchased = helpers.calculate_bought(amount) status = event['type'].split(':')[1] if status == 'pending': Transaction.objects.create( user=user, code=code, amount=amount, currency='USD', description=f'[{settings.ICO_STAGE}] Purchased {purchased} {settings.ICO_TOKEN_SYMBOL.upper()}', status=status ) elif status == 'confirmed': tx = Transaction.objects.get(user=user, code=code) tx.status = status tx.save() Transaction.objects.create( user=user, code=helpers.transfer_tokens(user, purchased), amount=purchased, currency=settings.ICO_TOKEN_SYMBOL.upper(), description=f'[{settings.ICO_STAGE}] Received {purchased} {settings.ICO_TOKEN_SYMBOL.upper()}', status=status ) return Response({'message': 'success'}, status=status.HTTP_200_OK)
774ad9ebe0f1be9b65c73e90627640f66fe16d4c
30,741
def get_decomposed_entries(structure_type, species): """ Get decomposed entries for mix types Args: structure_type(str): "garnet" or "perovskite" species (dict): species in dictionary. structure_type(str): garnet or perovskite Returns: decompose entries(list): list of entries prepared from unmix garnets/perovskite decomposed from input mix garnet/perovskite """ def decomposed(specie_complex): """Decompose those have sub-dict to individual dict objects.""" for site, specie in specie_complex.items(): spe_copy = specie_complex.copy() if len(specie) > 1: for spe, amt in specie.items(): spe_copy[site] = {spe: 1} yield spe_copy decompose_entries = [] model, scaler = load_model_and_scaler(structure_type, "unmix") std_formula = STD_FORMULA[structure_type] for unmix_species in decomposed(species): charge = sum([spe.oxi_state * amt * SITE_INFO[structure_type][site]["num_atoms"] for site in SITE_INFO[structure_type].keys() for spe, amt in unmix_species[site].items()]) if not abs(charge - 2 * std_formula['O']) < 0.1: continue formula = spe2form(structure_type, unmix_species) composition = Composition(formula) elements = [el.name for el in composition] chemsy = '-'.join(sorted(elements)) calc_entries = [] if CALC_ENTRIES[structure_type].get(chemsy): calc_entries = [entry for entry in CALC_ENTRIES[structure_type][chemsy] if \ entry.name == Composition(formula).reduced_formula] else: pass if calc_entries: decompose_entries.extend(calc_entries) else: cn_specific = True if structure_type == 'garnet' else False descriptors = get_descriptor(structure_type, unmix_species, cn_specific=cn_specific) form_e = get_form_e(descriptors, model, scaler) # tot_e = get_tote(form_e * std_formula.num_atoms, unmix_species) tot_e = get_tote(structure_type, form_e * std_formula.num_atoms, unmix_species) entry = prepare_entry(structure_type, tot_e, unmix_species) compat = MaterialsProjectCompatibility() entry = compat.process_entry(entry) decompose_entries.append(entry) return decompose_entries
0db24d7be2cacc2c0aed180cf5d31ccde057e358
30,742
def closest_point(p1, p2, s): """closest point on line segment (p1,p2) to s; could be an endpoint or midspan""" #if the line is a single point, the closest point is the only point if p1==p2: return (0,p1) seg_vector = vector_diff(p2,p1) seg_mag = mag(seg_vector) #print( "seg_vector, length", seg_vector, seg_mag ) seg_unit = vector_div( seg_vector, seg_mag ) stop_vector = vector_diff(s,p1) #print( "stop_vector", stop_vector ) #scalar projection of A onto B = (A dot B)/|B| = A dot unit(B) sp = dot_product( stop_vector, seg_unit ) #print( "scalar projection", sp ) if sp < 0: #closest point is startpoint #print( "startpoint" ) return (0, p1) elif sp > seg_mag: #closest point is endpoint #print( "endpoint" ) return (1, p2) else: #closest point is midspan #print( "midpoint" ) return (sp/seg_mag, vector_sum(p1,vector_mult( seg_unit, sp )))
5fbce0ac5b2d87f15b6dd5a146e77b23dba3d743
30,744
def new_name(): """ Returns a new legal identifier in C each time it's called Note: Not thread-safe in its current incarnation >>> name1 = new_name() >>> name2 = new_name() >>> name1 != name2 True """ global _num_names _num_names += 1 return '_id_{}'.format(_num_names)
bd72bfdedc7ccd00e973d9677e116cf5afe07314
30,745
def pollard_brent_f(c, n, x): """Return f(x) = (x^2 + c)%n. Assume c < n. """ x1 = (x * x) % n + c if x1 >= n: x1 -= n assert x1 >= 0 and x1 < n return x1
5037b3feac2f131645fbe6ceb00f0d18417a7c04
30,746
def morphological_transformation(input_dir): """ Performs advanced morphological transformations. Args: input_dir: Input Picture Data Stream. Returns: Picture Data Stream after Rotation Correction Processing. """ raw_image = cv2.imread(input_dir) gray_image = cv2.cvtColor(raw_image, cv2.COLOR_BGR2GRAY) # Gauss Fuzzy De-noising (Setting the Size of Convolution Kernel Affects # the Effect). blur_image = cv2.GaussianBlur(gray_image, (9, 9), 0) # Setting threshold 165 (Threshold affects open-close operation effect). _, threshold = cv2.threshold(blur_image, 165, 255, cv2.THRESH_BINARY) # Define rectangular structural elements. kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # Closed operation (link block) closed = cv2.morphologyEx(threshold, cv2.MORPH_CLOSE, kernel) # # Open Operations (De-noising Points) image = cv2.morphologyEx(closed, cv2.MORPH_OPEN, kernel) return raw_image, image
b5db242cd39a71aea570d3f61b4c7a217565aa5d
30,747
import this # noqa: F401 def import_this(**kwargs): """Print the Zen of Python""" # https://stackoverflow.com/a/23794519 zen = io.StringIO() with contextlib.redirect_stdout(zen): text = f"```{zen.getvalue()}```" return text
1a80b384154f4cfa8b71eeb57807b8e9e3fce322
30,749
def getCurrentPane(): """Retrieve the current pane index as an int.""" return int(tget("display-message -p '#P'"))
f7439d407ef618c7d516ad9eed5c925c49639533
30,750
def get_lcc_size(G,seed_nodes): """ return the lcc size """ # getting subgraph that only consists of the black_nodes g = nx.subgraph(G,list(seed_nodes)) if g.number_of_nodes() != 0: # get all components max_CC = max(nx.connected_component_subgraphs(g), key=len) return len(max_CC.nodes()) # size of largest connected component" else: return 0
6582ab76a5b7a178d22592305d134529b327a2ec
30,751
def IsParalogLink(link, cds1, cds2): """sort out ortholog relationships between transcripts of orthologous genes. """ map_a2b = alignlib_lite.makeAlignmentVector() alignlib_lite.AlignmentFormatEmissions( link.mQueryFrom, link.mQueryAli, link.mSbjctFrom, link.mSbjctAli).copy(map_a2b) if link.mQueryLength < (map_a2b.getRowTo() - map_a2b.getRowFrom() + 1) or \ link.mSbjctLength < (map_a2b.getColTo() - map_a2b.getColFrom() + 1): print "ERRONEOUS LINK: %s" % str(link) raise "length discrepancy" coverage_a = 100.0 * \ (map_a2b.getRowTo() - map_a2b.getRowFrom() + 1) / link.mQueryLength coverage_b = 100.0 * \ (map_a2b.getColTo() - map_a2b.getColFrom() + 1) / link.mSbjctLength # check exon boundaries, look at starts, skip first exon def MyMap(a, x): if x < a.getRowFrom(): return 0 while x <= a.getRowTo(): c = a.mapRowToCol(x) if c: return c x += 1 else: return 0 mapped_boundaries = UniquifyList( map(lambda x: MyMap(map_a2b, x.mPeptideFrom / 3 + 1), cds1[1:])) reference_boundaries = UniquifyList( map(lambda x: x.mPeptideFrom / 3 + 1, cds2[1:])) nmissed = 0 nfound = 0 nmin = min(len(mapped_boundaries), len(reference_boundaries)) nmax = max(len(mapped_boundaries), len(reference_boundaries)) both_single_exon = len(cds1) == 1 and len(cds2) == 1 one_single_exon = len(cds1) == 1 or len(cds2) == 1 if len(mapped_boundaries) < len(reference_boundaries): mless = mapped_boundaries mmore = reference_boundaries else: mmore = mapped_boundaries mless = reference_boundaries # check if exon boundaries are ok for x in mless: is_ok = 0 for c in mmore: if abs(x - c) < param_boundaries_max_slippage: is_ok = 1 break if is_ok: nfound += 1 else: nmissed += 1 # set is_ok for dependent on exon boundaries # in single exon cases, require a check of coverage is_ok = False check_coverage = False if both_single_exon or one_single_exon: is_ok = True check_coverage = True else: if nmin == 1: is_ok = nmissed == 0 elif nmin == 2: is_ok = nmissed <= 1 elif nmin > 2: is_ok = nfound >= 2 cc = min(coverage_a, coverage_b) if param_loglevel >= 3: print "# nquery=", len(cds1), "nsbjct=", len(cds2), "nmin=", nmin, "nmissed=", nmissed, "nfound=", nfound, \ "is_ok=", is_ok, "check_cov=", check_coverage, \ "min_cov=", cc, coverage_a, coverage_b, \ "mapped=", mapped_boundaries, "reference=", reference_boundaries if not is_ok: return True, "different exon boundaries" if check_coverage and cc < param_min_coverage: return True, "low coverage" return False, None
012ad1a195c42127a39cabee9f7380c7cb8f6f9b
30,752
from numpy import array, vstack from scipy.spatial import Voronoi def segments(points): """ Return the bounded segments of the Voronoi diagram of the given points. INPUT: - ``points`` -- a list of complex points OUTPUT: A list of pairs ``(p1, p2)``, where ``p1`` and ``p2`` are the endpoints of the segments in the Voronoi diagram. EXAMPLES:: sage: from sage.schemes.curves.zariski_vankampen import discrim, segments # optional - sirocco sage: R.<x,y> = QQ[] sage: f = y^3 + x^3 - 1 sage: disc = discrim(f) # optional - sirocco sage: segments(disc) # optional - sirocco # abs tol 1e-15 [(-2.84740787203333 - 2.84740787203333*I, -2.14285714285714 + 1.11022302462516e-16*I), (-2.84740787203333 + 2.84740787203333*I, -2.14285714285714 + 1.11022302462516e-16*I), (2.50000000000000 + 2.50000000000000*I, 1.26513881334184 + 2.19128470333546*I), (2.50000000000000 + 2.50000000000000*I, 2.50000000000000 - 2.50000000000000*I), (1.26513881334184 + 2.19128470333546*I, 0.000000000000000), (0.000000000000000, 1.26513881334184 - 2.19128470333546*I), (2.50000000000000 - 2.50000000000000*I, 1.26513881334184 - 2.19128470333546*I), (-2.84740787203333 + 2.84740787203333*I, 1.26513881334184 + 2.19128470333546*I), (-2.14285714285714 + 1.11022302462516e-16*I, 0.000000000000000), (-2.84740787203333 - 2.84740787203333*I, 1.26513881334184 - 2.19128470333546*I)] """ discpoints = array([(CC(a).real(), CC(a).imag()) for a in points]) added_points = 3 * abs(discpoints).max() + 1.0 configuration = vstack([discpoints, array([[added_points, 0], [-added_points, 0], [0, added_points], [0, -added_points]])]) V = Voronoi(configuration) res = [] for rv in V.ridge_vertices: if not -1 in rv: p1 = CC(list(V.vertices[rv[0]])) p2 = CC(list(V.vertices[rv[1]])) res.append((p1, p2)) return res
5d4c62455a605dfb09c1009b44e12ecd726e4c84
30,753
def tpu_ordinal_fn(shard_index_in_host, replicas_per_worker): """Return the TPU ordinal associated with a shard.""" return shard_index_in_host % replicas_per_worker
773313750ce78cf5d32776752cb75201450416ba
30,754
import string def replace_example_chapter(path_to_documentation, chapter_lines): """func(path_to_doc._tx, [new_chapter]) -> [regenerated_documentation] Opens the documentation and searches for the text section separated through the global marks START_MARK/END_MARK. Returns the opened file with that section replaced by chapter_lines. """ lines = [] file = open(path_to_documentation, "rt") state = 0 for line in file.readlines(): if state == 0: lines.append(line) if string.find(line, START_MARK) > 0: state = state + 1 elif state == 1: if string.find(line, END_MARK) > 0: lines.extend(chapter_lines) lines.append(line) state = state + 1 else: lines.append(line) return lines
c4bb2285a55b0235d44a2550a3bad5a9d83583ad
30,755
def terminal(board): """ Returns True if game is over, False otherwise. """ if(winner(board)): return True for i in range(3): for j in range(3): if(board[i][j]==EMPTY): return False return True
0dd194c8281539977596779209d59533022ad16c
30,756
from typing import OrderedDict def get_network(layers, phase): """Get structure of the network. Parameters ---------- layers : list list of layers parsed from network parameters phase : int 0 : train 1 : test """ num_layers = len(layers) network = OrderedDict() for i in xrange(num_layers): layer = layers[i] if check_phase(layer, phase): layer_id = "trans_layer_"+str(i) if layer_id not in network: network[layer_id] = [] prev_blobs = map(str, layer.bottom) next_blobs = map(str, layer.top) for blob in prev_blobs+next_blobs: if blob not in network: network[blob] = [] for blob in prev_blobs: network[blob].append(layer_id) network[layer_id].extend(next_blobs) network = remove_loops(network) network = remove_blobs(network) return network
cfbbcc99195a4e81503a89ce80a6d2314c2deb30
30,757
def create_category_hiearchy(cats, categoryType): """A function that creates a dict of the root and subroot categories""" dict_out = {} for key in cats.keys(): name = cats[key]['name'] parent_name = cats[key]['parent']['name'] cat_type = cats[key]['categoryType'] if cat_type == categoryType: # Check if parent name is Root and should be the key if parent_name == 'Root': # Check to see if key exists if name not in dict_out.keys(): # If not, add key to dict and empty list dict_out[name] = [] else: if parent_name == 'Root': continue # Check if parent_name already key if parent_name not in dict_out.keys(): # If not, add the key and empty list dict_out[parent_name] = [] # Add the subcategory dict_out[parent_name].append(name) return dict_out
f0b19f2a6f56e49855a019a18d9357a31cfaeb2a
30,758
import time def date(): """ Returns the current time formated with HTTP format. @return: `str` """ return time.strftime('%a, %d %b %Y %H:%M:%S GMT')
909f1f31c6c7f0ed03fe0b30785ff454f541a5fc
30,760
def _estimate_gaussian_covariances_tied(resp, X, nk, means, reg_covar): """Estimate the tied covariance matrix. Parameters ---------- resp : array-like of shape (n_samples, n_components) X : array-like of shape (n_samples, n_features) nk : array-like of shape (n_components,) means : array-like of shape (n_components, n_features) reg_covar : float Returns ------- covariance : array, shape (n_features, n_features) The tied covariance matrix of the components. """ avg_X2 = np.dot(X.T, X) avg_means2 = np.dot(nk * means.T, means) covariance = avg_X2 - avg_means2 covariance /= nk.sum() covariance.flat[:: len(covariance) + 1] += reg_covar return covariance
3bf510982698643afd9377e64d8fe569d7626452
30,761
from re import A def rights(value_strategy: SearchStrategy[A] ) -> SearchStrategy[either.Right[A]]: """ Create a search strategy that produces `pfun.either.Right` values Args: value_strategy: search strategy to draw values from Example: >>> rights(integers()).example() Right(0) Return: search strategy that produces `pfun.either.Right` values """ return builds(either.Right, value_strategy)
867db62f02955bf226109bf1cb8f04d4fb3f578c
30,762
def lpad(col, len, pad): """ Left-pad the string column to width `len` with `pad`. >>> df = spark.createDataFrame([('abcd',)], ['s',]) >>> df.select(lpad(df.s, 6, '#').alias('s')).collect() [Row(s=u'##abcd')] """ sc = SparkContext._active_spark_context return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
b2a8b01b06166b4fd4ec09b76b634c8b2e231d86
30,763