content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import typing import random import itertools def get_word(count: typing.Union[int, typing.Tuple[int]] = 1, # pylint: disable=dangerous-default-value sep: str = ' ', func: typing.Optional[typing.Union[str, typing.Callable[[str], str]]] = None, args: typing.Tuple[str] = (), kwargs: typing.Dict[str, str] = {}) -> str: """Return random words. .. code:: python >>> get_word(count=3) 'anim voluptate non' >>> get_word(count=3, func='capitalize') 'Non Labore Ut' >>> get_word(count=3, func=lambda s: s.upper()) 'NISI TEMPOR CILLUM' Args: count (:obj:`Union[int, Tuple[int]]`): Number of random words. To generate random number of words, supply a 2-element tuple of :obj:`int`, the function will use :func:`random.randint` to choose a random integer as the number of random words. sep (str): Seperator between each word. func (:obj:`Optional[Union[str, Callable[[str], str]]]`): Filter function. It can be a function name of :obj:`str`, or a customised function that takes the original :obj:`str` and returns the modified :obj:`str`. args (:obj:`Tuple[str]`): Additional positional arguments for ``func``. kwargs (:obj:`Dict[str, Any]`): Additional keyword arguments for ``func``. Returns: :obj:`str`: Random words. """ if isinstance(count, tuple): count = random.randint(*count) return sep.join(itertools.islice(word(count, func, args, kwargs), count))
3d5e1f82a4f32eae88016c0a89e9295b80d382e5
24,933
def chrom_exp_cusp(toas, freqs, log10_Amp=-7, sign_param=-1.0, t0=54000, log10_tau=1.7, idx=2): """ Chromatic exponential-cusp delay term in TOAs. :param t0: time of exponential minimum [MJD] :param tau: 1/e time of exponential [s] :param log10_Amp: amplitude of cusp :param sign_param: sign of waveform :param idx: index of chromatic dependence :return wf: delay time-series [s] """ t0 *= const.day tau = 10**log10_tau * const.day wf = (10**log10_Amp * np.heaviside(toas - t0, 1) * \ np.exp(- (toas - t0) / tau)) + (10**log10_Amp * \ (1 - np.heaviside(toas - t0, 1)) * np.exp(- (t0 - toas) / tau)) return np.sign(sign_param) * wf * (1400 / freqs) ** idx
4075901c5dcbe10ad8554835c20a0a22d29f1af7
24,936
def powerspectrum_t(flist, mMax=30, rbins=50, paramname=None, parallel=True, spacing='linear'): """ Calculates the power spectrum along the angular direction for a whole simulation (see powerspectrum). Loops through snapshots in a simulation, in parallel. Uses the same radial and angular bins for every timestep Parameters ---------- flist : list A list of filenames OR of SimSnaps for a simulation mMax : int Maximum fourier mode to calculate rbins : int or array Number of radial bins or the binedges to use paramname : str Filename of .param file. Used for loading if flist is a list of filenames parallel : bool Flag to perform this in parallel or not spacing : str If rbins is an int, this defines whether to use 'log' or 'linear' binspacing Returns ------- m : array Number of the fourier modes power : SimArray Power spectrum vs time along the angular direction """ # Set-up radial bins (use the same ones at all time steps) f = flist[0] if isinstance(f, str): f = pynbody.load(f, paramname=paramname) r = f.g['rxy'] rbins = setupbins(r, rbins, spacing) # Prepare arguments nFiles = len(flist) mMax = [mMax] * nFiles rbins = [rbins] * nFiles paramname = [paramname] * nFiles args = zip(flist, mMax, rbins, paramname) # Calculate power if parallel: pool = Pool(cpu_count()) try: results = pool.map(_powerspectrum, args, chunksize=1) finally: pool.close() pool.join() else: results = [] for f in flist: results.append(powerspectrum(f, mMax, rbins, paramname)) # Format returns m = results[0][0] power_units = results[0][1].units nr = len(results[0][1]) power = SimArray(np.zeros([nFiles, nr]), power_units) for i, result in enumerate(results): power[i] = result[1] return m, power
e2dd0ab1fa06a111530350e2ab2da119607dc9eb
24,937
def score(scores, main_channel, whiten_filter): """ Whiten scores using whitening filter Parameters ---------- scores: np.array (n_data, n_features, n_neigh) n_data is the number of spikes n_feature is the number features n_neigh is the number of neighboring channels considered main_channel: np.array (n_data,) The main channel information for each spike whilten_filter: np.array (n_channels, n_neigh, n_neigh) whitening filter as described above Returns ------- whiten_scores: np.array (n_data, n_features, n_neigh) scores whitened after applying whitening filter """ # get necessary parameters n_data, n_features, n_neigh = scores.shape n_channels = whiten_filter.shape[0] # apply whitening filter whitened_scores = np.zeros(scores.shape) for c in range(n_channels): # index of spikes with main channel as c idx = main_channel == c whitened_scores_c = np.matmul( np.reshape(scores[idx], [-1, n_neigh]), whiten_filter[c]) whitened_scores[idx] = np.reshape(whitened_scores_c, [-1, n_features, n_neigh]) return whitened_scores
b416bd38a874f6c8ee3b26b8fec35a19b0604de0
24,938
def make_title(raw_input): """Capitalize and strip""" return raw_input.title().strip()
517977638d72a8e5c8026147246739231be6258f
24,939
def perform(target, write_function=None): """ Perform an HTTP request against a given target gathering some basic timing and content size values. """ fnc = write_function or (lambda x: None) assert target connection = pycurl.Curl() connection.setopt(pycurl.URL, target) connection.setopt(pycurl.FOLLOWLOCATION, True) connection.setopt(pycurl.WRITEFUNCTION, fnc) connection.perform() result = { 'response': connection.getinfo(pycurl.RESPONSE_CODE), 'rtt': round(connection.getinfo(pycurl.CONNECT_TIME), 5), 'response_time': round(connection.getinfo(pycurl.TOTAL_TIME), 5), 'content_size': ( int(connection.getinfo(pycurl.SIZE_DOWNLOAD)) + int(connection.getinfo(pycurl.HEADER_SIZE)) ) * 8 } try: result['bps'] = round( result['content_size'] / result['response_time'], 5 ) except ZeroDivisionError: result['bps'] = 0 return result
0cecdb6bc43acd80ebca701b4607b2013b612d93
24,940
import typing def merge_property_into_method( l: Signature, r: typing.Tuple[Metadata, OutputType] ) -> Signature: """ Merges a property into a method by just using method """ return l
ae626fece9dbd36567f0b8c79cddfbe58c0a2cb4
24,942
def _serve_archive(content_hash, file_name, mime_type): """Serve a file from the archive or by generating an external URL.""" url = archive.generate_url(content_hash, file_name=file_name, mime_type=mime_type) if url is not None: return redirect(url) try: local_path = archive.load_file(content_hash, file_name=file_name) if local_path is None: return Response(status=404) return send_file(local_path, as_attachment=True, conditional=True, attachment_filename=file_name, mimetype=mime_type) finally: archive.cleanup_file(content_hash)
be30f5585efd229518671b99c1560d44510db2c6
24,944
def preprocess(path ,scale = 3): """ This method prepares labels and downscaled image given path of image and scale. Modcrop is used on the image label to ensure length and width of image is divisible by scale. Inputs: path: the image directory path scale: scale to downscale Outputs: input_: downscaled version of image label_: label after applying moderop """ img = imread(path) # Crops image to ensure length and width of img is divisble by # scale for resizing by scale label_ = modcrop(img, scale) # Resize by scaling factor input_ = cv2.resize(label_,None,fx = 1.0/scale ,fy = 1.0/scale, interpolation = cv2.INTER_CUBIC) #kernel_size = (7, 7); #sigma = 3.0; #input_ = cv2.GaussianBlur(input_, kernel_size, sigma); #checkimage(input_) return input_, label_
6b02b81dca775ad9e614b8d285d3784aec433ca2
24,945
import math def get_goal_sample_rate(start, goal): """Modifie la probabilité d'obtenir directement le but comme point selon la distance entre le départ et le but. Utile pour la précision et les performances.""" try : dx = goal[0]-start[0] dy = goal[1]-start[1] d = math.sqrt(dx * dx + dy * dy) except TypeError: goal_sample_rate = 5 return goal_sample_rate if d < 600 : goal_sample_rate = (10-d/140)**2 else : goal_sample_rate = 30 return goal_sample_rate
a48ad7adba534455a149142cfeae9c47e3a25677
24,946
import warnings def sample_cov(prices, returns_data=False, frequency=252, log_returns=False, **kwargs): """ Calculate the annualised sample covariance matrix of (daily) asset returns. :param prices: adjusted closing prices of the asset, each row is a date and each column is a ticker/id. :type prices: pd.DataFrame :param returns_data: if true, the first argument is returns instead of prices. :type returns_data: bool, defaults to False. :param frequency: number of time periods in a year, defaults to 252 (the number of trading days in a year) :type frequency: int, optional :param log_returns: whether to compute using log returns :type log_returns: bool, defaults to False :return: annualised sample covariance matrix :rtype: pd.DataFrame """ if not isinstance(prices, pd.DataFrame): warnings.warn("data is not in a dataframe", RuntimeWarning) prices = pd.DataFrame(prices) if returns_data: returns = prices else: returns = returns_from_prices(prices, log_returns) return fix_nonpositive_semidefinite( returns.cov() * frequency, kwargs.get("fix_method", "spectral") )
3e60ef20b976bf35d9ee818c27dfb7b877fe1f3f
24,947
from typing import Dict def read_prev_timings(junit_report_path: str) -> Dict[str, float]: """Read the JUnit XML report in `junit_report_path` and returns its timings grouped by class name. """ tree = ET.parse(junit_report_path) if tree is None: pytest.exit(f"Could not find timings in JUnit XML {junit_report_path}") assert isinstance(tree, ET.ElementTree) return group_prev_timings(tree.getroot())
8c796845289fc08ffb815d649c732fdd1ae626b3
24,949
def update_model(): """ Updates a model """ data = request.get_json() params = data.get('params', {'model_id': 1}) entry = Model.objects(model_id=params.model_id).first() if not entry: return {'error': ModelNotFoundError()} entry.update(**params) return entry.to_json()
8ba13df354c21e72045d319b3c47d8ef4e291182
24,950
def SmartConnect(protocol='https', host='localhost', port=443, user='root', pwd='', service="hostd", path="/sdk", preferredApiVersions=None, keyFile=None, certFile=None, thumbprint=None, sslContext=None, httpConnectionTimeout=None, connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC, token=None, disableSslCertValidation=False, customHeaders=None): """ Determine the most preferred API version supported by the specified server, then connect to the specified server using that API version, login and return the service instance object. Throws any exception back to caller. The service instance object is also saved in the library for easy access. Clients should modify the service parameter only when connecting to a VMOMI server other than hostd/vpxd. For both of the latter, the default value is fine. @param protocol: What protocol to use for the connection (e.g. https or http). @type protocol: string @param host: Which host to connect to. @type host: string @param port: Port @type port: int @param user: User @type user: string @param pwd: Password @type pwd: string @param service: Service @type service: string @param path: Path @type path: string @param preferredApiVersions: Acceptable API version(s) (e.g. vim.version.version9) If a list of versions is specified the versions should be ordered from most to least preferred. If None is specified, the list of versions support by pyVmomi will be used. @type preferredApiVersions: string or string list @param keyFile: ssl key file path @type keyFile: string @param certFile: ssl cert file path @type certFile: string @param thumbprint: host cert thumbprint @type thumbprint: string @param sslContext: SSL Context describing the various SSL options. It is only supported in Python 2.7.9 or higher. @type sslContext: SSL.Context @param httpConnectionTimeout: Timeout in secs for http requests. @type httpConnectionTimeout: int @param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never closing the connections @type connectionPoolTimeout: int @type token: string @param token: Authentication and Authorization token to use for the connection. The presence of this token overrides the user and pwd parameters. @type disableSslCertValidation: bool @param disableSslCertValidation: Creates an unverified SSL context when True. """ if preferredApiVersions is None: preferredApiVersions = GetServiceVersions('vim25') sslContext = getSslContext(host, sslContext, disableSslCertValidation) supportedVersion = __FindSupportedVersion(protocol, host, port, path, preferredApiVersions, sslContext) if supportedVersion is None: raise Exception("%s:%s is not a VIM server" % (host, port)) portNumber = protocol == "http" and -int(port) or int(port) return Connect(host=host, port=portNumber, user=user, pwd=pwd, service=service, adapter='SOAP', version=supportedVersion, path=path, keyFile=keyFile, certFile=certFile, thumbprint=thumbprint, sslContext=sslContext, httpConnectionTimeout=httpConnectionTimeout, connectionPoolTimeout=connectionPoolTimeout, token=token, disableSslCertValidation=disableSslCertValidation, customHeaders=customHeaders)
19fafca3767b221d0b35c8377c0b367c097a2c8c
24,951
def _pseudoArrayFromScalars(scalarvalues, type): """Wrap a scalar in a buffer so it can be used as an array""" arr = _bufferPool.getBuffer() arr._check_overflow = 1 newtype = type # _numtypedict[type] arr._strides = (newtype.bytes,) arr._type = newtype arr._itemsize = newtype.bytes arr._strides = None if isinstance(scalarvalues, (list, tuple)): arr._shape = (len(scalarvalues),) for i in xrange(len(scalarvalues)): arr[i] = scalarvalues[i] else: arr._shape = () arr[()] = scalarvalues # Modify block buffer attributes to look like vector/vector setup. return arr
ac2953eeff6b6549ef633de2728d72220e61bd76
24,952
def triangulate_ellipse(corners, num_segments=100): """Determines the triangulation of a path. The resulting `offsets` can multiplied by a `width` scalar and be added to the resulting `centers` to generate the vertices of the triangles for the triangulation, i.e. `vertices = centers + width*offsets`. Using the `centers` and `offsets` representation thus allows for the computed triangulation to be independent of the line width. Parameters ---------- corners : np.ndarray 4xD array of four bounding corners of the ellipse. The ellipse will still be computed properly even if the rectangle determined by the corners is not axis aligned. D in {2,3} num_segments : int Integer determining the number of segments to use when triangulating the ellipse Returns ------- vertices : np.ndarray Mx2/Mx3 array coordinates of vertices for triangulating an ellipse. Includes the center vertex of the ellipse, followed by `num_segments` vertices around the boundary of the ellipse (M = `num_segments`+1) triangles : np.ndarray Px3 array of the indices of the vertices for the triangles of the triangulation. Has length (P) given by `num_segments`, (P = M-1 = num_segments) Notes ----- Despite it's name the ellipse will have num_segments-1 segments on their outline. That is to say num_segments=7 will lead to ellipses looking like hexagons. The behavior of this function is not well defined if the ellipse is degenerate in the current plane/volume you are currently observing. """ if not corners.shape[0] == 4: raise ValueError( trans._( "Data shape does not match expected `[4, D]` shape specifying corners for the ellipse", deferred=True, ) ) assert corners.shape in {(4, 2), (4, 3)} center = corners.mean(axis=0) adjusted = corners - center # Take to consecutive corners difference # that give us the 1/2 minor and major axes. ax1 = (adjusted[1] - adjusted[0]) / 2 ax2 = (adjusted[2] - adjusted[1]) / 2 # Compute the transformation matrix from the unit circle # to our current ellipse. # ... it's easy just the 1/2 minor/major axes for the two column # note that our transform shape will depends on wether we are 2D-> 2D (matrix, 2 by 2), # or 2D -> 3D (matrix 2 by 3). transform = np.stack((ax1, ax2)) if corners.shape == (4, 2): assert transform.shape == (2, 2) else: assert transform.shape == (2, 3) # we discretize the unit circle always in 2D. v2d = np.zeros((num_segments + 1, 2), dtype=np.float32) theta = np.linspace(0, np.deg2rad(360), num_segments) v2d[1:, 0] = np.cos(theta) v2d[1:, 1] = np.sin(theta) # ! vertices shape can be 2,M or 3,M depending on the transform. vertices = np.matmul(v2d, transform) # Shift back to center vertices = vertices + center triangles = ( np.arange(num_segments) + np.array([[0], [1], [2]]) ).T * np.array([0, 1, 1]) triangles[-1, 2] = 1 return vertices, triangles
feae8b79020c612185dcdcbd9f3d3b9bd897b11b
24,953
def is_dbenv_loaded(): """ Return True of the dbenv was already loaded (with a call to load_dbenv), False otherwise. """ return settings.LOAD_DBENV_CALLED
a4dc5c6b69e457aedf31f7729dd6bab0a75aaa07
24,954
def parse_python_settings_for_dmlab2d( lab2d_settings: config_dict.ConfigDict) -> Settings: """Flatten lab2d_settings into Lua-friendly properties.""" # Since config_dicts disallow "." in keys, we must use a different character, # "$", in our config and then convert it to "." here. This is particularly # important for levels with config keys like 'player.%default' in DMLab2D. lab2d_settings = _config_dict_to_dict(lab2d_settings) lab2d_settings = settings_helper.flatten_args(lab2d_settings) lab2d_settings_dict = {} for key, value in lab2d_settings.items(): converted_key = key.replace("$", ".") lab2d_settings_dict[converted_key] = str(value) return lab2d_settings_dict
5cdf1d1a9a6b82e23f0dffc022bbc0a352f65766
24,955
import IPython import re def get_new_name(x: str) -> str: """ Obtains a new name for the given site. Args: x: The original name. Returns: The new name. """ y = x.lower() if y == "cervical": return "cervical_spine" m = re.match(r"^([lr])\s+(.+)$", y) if not m: raise ValueError("cannot parse site name: {!r}".format(x)) side = "left" if m.group(1) == "l" else "right" site = m.group(2) if site == "ip": site = "pip1" elif site == "si": site = "sacroiliac" elif re.match(r"tm[jl]", site): site = "tmj" m_toe = re.match(r"^toe(\d)$", site) if m_toe: site = "toe_ip{}".format(m_toe.group(1)) return "{}_{}".format(site, side) IPython.embed() raise Exception()
0481c54c43ddb58758513bd654b7d5b1a7539761
24,956
from typing import Dict from typing import Any def u2f_from_dict(data: Dict[str, Any]) -> U2F: """ Create an U2F instance from a dict. :param data: Credential parameters from database """ return U2F.from_dict(data)
53d90b86fc0a7fd44938b7eb2f9d9f178161642f
24,957
def filterForDoxygen (contents): """ filterForDoxygen(contents) -> contents Massage the content of a python file to better suit Doxygen's expectations. """ contents = filterContents(contents) contents = filterDocStrings(contents) return contents
c216328bed4d9af656dfd61477add1a15ee34bd6
24,959
from typing import List from typing import Callable def calculate_quantum_volume( *, num_qubits: int, depth: int, num_circuits: int, seed: int, device: cirq.google.xmon_device.XmonDevice, samplers: List[cirq.Sampler], compiler: Callable[[cirq.Circuit], cirq.Circuit] = None, repetitions=10_000, ) -> List[QuantumVolumeResult]: """Run the quantum volume algorithm. This algorithm should compute the same values as Algorithm 1 in https://arxiv.org/abs/1811.12926. To summarize, we generate a random model circuit, compute its heavy set, then transpile an implementation onto our architecture. This implementation is run a series of times and if the percentage of outputs that are in the heavy set is greater than 2/3, we consider the quantum volume test passed for that size. Args: num_qubits: The number of qubits for the circuit. depth: The number of gate layers to generate. num_circuits: The number of random circuits to run. seed: A seed to pass into the RandomState. device: The device to run the compiled circuit on. samplers: The samplers to run the algorithm on. compiler: An optional function to compiler the model circuit's gates down to the target devices gate set and the optimize it. repetitions: The number of bitstrings to sample per circuit. Returns: A list of QuantumVolumeResults that contains all of the information for running the algorithm and its results. """ random_state = np.random.RandomState(seed) circuits = prepare_circuits(num_qubits=num_qubits, depth=depth, num_circuits=num_circuits, random_state=random_state) return execute_circuits( circuits=circuits, device=device, compiler=compiler, samplers=samplers, repetitions=repetitions, )
da1f2eb072f5d91ec99be8fa6a447c2661aabb6d
24,960
def get_key(item, key_length): """ key + value = item number of words of key = key_length function returns key """ word = item.strip().split() if key_length == 0: # fix return item elif len(word) == key_length: return item else: return ' '.join(word[0:key_length])
6407d98d62a4d83bf577e82be696b6aee1f6d2e8
24,962
from typing import Tuple def identity(shape: Tuple[int, ...], gain: float = 1) -> JaxArray: """Returns the identity matrix. This initializer was proposed in `A Simple Way to Initialize Recurrent Networks of Rectified Linear Units <https://arxiv.org/abs/1504.00941>`_. Args: shape: Shape of the tensor. It should have exactly rank 2. gain: optional scaling factor. Returns: Tensor initialized to the identity matrix. """ assert len(shape) == 2 return gain * jn.eye(*shape)
59fb436485a04b5861bfdcfbe9bf36f4084aeb3d
24,963
from typing import Optional from typing import Dict def pyreq_nlu_trytrain(httpreq_handler: HTTPRequestHandler, project_id: int, locale: str) -> Optional[Dict]: """ Get try-annotation on utterance with latest run-time NLU model for a Mix project and locale, by sending requests to Mix API endpoint with Python 'requests' package. API endpoint: POST /nlu/api/v1/nlu/<PROJ_ID>/annotations/train?sources=nuance_custom_data&locale=<LOCALE> Request payload: None :param httpreq_handler: HTTPRequestHandler to process requests and responses :param project_id: Mix project ID :param locale: Mix project NLU locale :return: JSON reponse payload from API endpoint """ api_endpoint = f'/nlu/api/v1/nlu/{project_id}/annotations/train?sources=nuance_custom_data&locale={locale}' resp = httpreq_handler.request(url=api_endpoint, method=POST_METHOD, data='{}', default_headers=True, json_resp=True) return resp
7103fae177535f5c7d37ce183e9d828ebdca7b7a
24,964
from typing import Tuple def month_boundaries(month: int, year: int) -> Tuple[datetime_.datetime, datetime_.datetime]: """ Return the boundary datetimes of a given month. """ start_date = datetime_.date(year, month, 1) end_date = start_date + relativedelta(months=1) return (midnight(start_date), midnight(end_date))
f727ae0d8f28bd75f0a326305d172ea45ec29982
24,965
def calculate_Hubble_flow_velocity_from_cMpc(cMpc, cosmology="Planck15"): """ Calculates the Hubble flow recession velocity from comoving distance Parameters ---------- cMpc : array-like, shape (N, ) The distance in units of comoving megaparsecs. Must be 1D or scalar. cosmology : string or astropy.cosmology.core.FLRW The cosmology to assume whilst calculating distance. Default: Planck15. Returns ------- a : array-like, shape (N, ) The scale factor. """ cosmo = get_cosmology_from_name(cosmology) H0 = cosmo.H0 scale_factor = calculate_scale_factor_from_cMpc(cMpc, cosmology=cosmology) proper_dist = cMpc * apu.Mpc / scale_factor velocity = proper_dist * H0 return velocity
994722494de5ae918c3f1855b1b58fec21849f7e
24,966
def join_items( *items, separator="\n", description_mode=None, start="", end="", newlines=1 ): """ joins items using separator, ending with end and newlines Args: *items - the things to join separator - what seperates items description_mode - what mode to use for description start - what to start the string with end - what to end the string with newlines - how many newlines to add after end Returns a string """ output_list = [] if description_mode: for item in items: output_list.append(description( *(item if item else ""), mode=description_mode, newlines=0 )) else: output_list = convert_items(list(items), type_=str) output_list = [item.strip() for item in output_list] output_text = separator.join(output_list).strip() output_text += "" if output_text.endswith(end) else end output_text = start + newline(output_text, newlines) return output_text
0df5b55f10d73600ea2f55b0e9df86c17622e779
24,967
def JacobianSpace(Slist, thetalist): """Computes the space Jacobian for an open chain robot :param Slist: The joint screw axes in the space frame when the manipulator is at the home position, in the format of a matrix with axes as the columns :param thetalist: A list of joint coordinates :return: The space Jacobian corresponding to the inputs (6xn real numbers) Example Input: Slist = np.array([[0, 0, 1, 0, 0.2, 0.2], [1, 0, 0, 2, 0, 3], [0, 1, 0, 0, 2, 1], [1, 0, 0, 0.2, 0.3, 0.4]]).T thetalist = np.array([0.2, 1.1, 0.1, 1.2]) Output: np.array([[ 0, 0.98006658, -0.09011564, 0.95749426] [ 0, 0.19866933, 0.4445544, 0.28487557] [ 1, 0, 0.89120736, -0.04528405] [ 0, 1.95218638, -2.21635216, -0.51161537] [0.2, 0.43654132, -2.43712573, 2.77535713] [0.2, 2.96026613, 3.23573065, 2.22512443]]) """ Js = Slist.copy() T = eye(4) for i in range(1, len(thetalist)): T = T * MatrixExp6(VecTose3(Slist[:, i - 1] \ * thetalist[i - 1])) Js[:, i] = Adjoint(T) * Slist[:, i] return Js
e0f3fba57b2d1595a59b708a452fd2b57c6011e7
24,968
async def delete_bank(org_id: str, bank_id:str, user: users_schemas.User = Depends(is_authenticated), db:Session = Depends(get_db)): """delete a given bank of id bank_id. Args: bank_id: a unique identifier of the bank object. user: authenticates that the user is a logged in user. db (Session): The database for storing the article object. Returns: HTTP_200_OK (sucess response)) Raises HTTP_424_FAILED_DEPENDENCY: failed to delete bank details HTTP_4O4_NOT_FOUND: Bank does not exist. """ bank = await fetch_bank(user=user, id=bank_id, db=db) db.delete(bank) db.commit() return JSONResponse({"detail": f"bank details with {bank_id} successfully deleted"}, status_code=status.HTTP_200_OK)
537159c6c19c6bb1dde02eec13f5c55932f9d6ee
24,969
from typing import Union from typing import Iterable from typing import Any def label_encode( df: pd.DataFrame, column_names: Union[str, Iterable[str], Any] ) -> pd.DataFrame: """ Convert labels into numerical data. This method will create a new column with the string "_enc" appended after the original column's name. Consider this to be syntactic sugar. This method behaves differently from `encode_categorical`. This method creates a new column of numeric data. `encode_categorical` replaces the dtype of the original column with a "categorical" dtype. This method mutates the original DataFrame. Functional usage example: .. code-block:: python label_encode(df, column_names="my_categorical_column") # one way Method chaining example: .. code-block:: python import pandas as pd import janitor categorical_cols = ['col1', 'col2', 'col4'] df = pd.DataFrame(...).label_encode(column_names=categorical_cols) :param df: The pandas DataFrame object. :param str/iterable column_names: A column name or an iterable (list or tuple) of column names. :returns: A pandas DataFrame. """ le = LabelEncoder() if isinstance(column_names, list) or isinstance(column_names, tuple): for col in column_names: if col not in df.columns: raise JanitorError(f"{col} missing from column_names") df[f"{col}_enc"] = le.fit_transform(df[col]) elif isinstance(column_names, str): if column_names not in df.columns: raise JanitorError(f"{column_names} missing from column_names") df[f"{column_names}_enc"] = le.fit_transform(df[column_names]) else: raise JanitorError( "kwarg `column_names` must be a string or iterable!" ) return df
62d937dc8bb02db8a099a5647bf0673005489605
24,970
def get_server_now_with_delta_str(timedelta): """Get the server now date string with delta""" server_now_with_delta = get_server_now_with_delta(timedelta) result = server_now_with_delta.strftime(DATE_FORMAT_NAMEX_SEARCH) return result
f555ed28ec98f9edfa62d7f52627ea06cad9513b
24,972
def GetPrimaryKeyFromURI(uri): """ example: GetPrimaryKeyFromURI(u'mujin:/\u691c\u8a3c\u52d5\u4f5c1_121122.mujin.dae') returns u'%E6%A4%9C%E8%A8%BC%E5%8B%95%E4%BD%9C1_121122' """ return uriutils.GetPrimaryKeyFromURI(uri, fragmentSeparator=uriutils.FRAGMENT_SEPARATOR_AT, primaryKeySeparator=uriutils.PRIMARY_KEY_SEPARATOR_AT)
49a489d02af3195ea7ed0a7f41b9fbb19bb16407
24,973
import math def normalize(score, alpha=15): """ Normalize the score to be between -1 and 1 using an alpha that approximates the max expected value """ norm_score = score/math.sqrt((score*score) + alpha) if norm_score < -1.0: return -1.0 elif norm_score > 1.0: return 1.0 else: return norm_score
ec158416a4199d17948986dfb3f8d659d82e07b7
24,974
from datetime import datetime import pytz def get_timestamp(request): """ hhs_oauth_server.request_logging.RequestTimeLoggingMiddleware adds request._logging_start_dt we grab it or set a timestamp and return it. """ if not hasattr(request, '_logging_start_dt'): return datetime.now(pytz.utc).isoformat() else: return request._logging_start_dt
f3117a66ebfde0b1dc48591e0665c3d7120826fd
24,975
from typing import Optional from typing import List def human_size(bytes: int | float, units: Optional[List[str]] = None) -> str: """ Convert bytes into a more human-friendly format :param bytes: int Number of bytes :param units: Optional[List[str]] units used :return: str Return size in human friendly format: <number> <size_unit> """ if units is None: units = ['bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB'] return f"{round(bytes, 2)} " + units[0] if bytes < 1024 else human_size(bytes / 1024, units[1:])
9b652f0a09024c22dcefa5909c17f7b14d0183f4
24,976
def ray_casting_2d(p: Point, poly: Poly) -> bool: """Implements ray-casting algorithm to check if a point p is inside a (closed) polygon poly""" intersections = [int(rayintersectseg(p, edge)) for edge in poly.edges] return _odd(sum(intersections))
149f797bdcecce483cf87ed012fe7a21370c2ab6
24,977
def average_price(offers): """Returns the average price of a set of items. The first item is ignored as this is hopefully underpriced. The last item is ignored as it is often greatly overpriced. IMPORTANT: It is important to only trade items with are represented on the market in great numbers. This is due to the fact that with lower competition between sellers, the prices are often non-competitive. Keyword arguments: offers -- A list of offers from which to find the average price.""" if len(offers) > 1: remove_last_item = (True if (len(offers) > 3) else False) cheapest_item = offers[0]['price'] if remove_last_item: sum_ = sum(x['price'] for x in offers[1:-1]) else: sum_ = sum(x['price'] for x in offers[1:]) return sum_ / (len(offers) - (2 if remove_last_item else 1))
4849996d13e4c00d845f5fb6a5a150397c9b84f0
24,978
def get_train_data(): """get all the train data from some paths Returns: X: Input data Y_: Compare data """ TrainExamples = [8, 9, 10, 11, 12, 14] # from path set_22 to set_35 path = PATH_SIMPLE + str(5) + '/' X, Y_ = generate(path, isNormalize=True) maxvalue = (get_image(path, isInput=True)[0].max() / 10000) for train in TrainExamples: path = PATH + str(train) + '/' temp_X, temp_Y = generate(path, isNormalize=True) X = np.append(X, temp_X, axis=0) Y_ = np.append(Y_, temp_Y, axis=0) print("Finish generating all the train data!") return X, Y_, maxvalue
01517db3cb4b5987b895c2b435771c745837bf65
24,980
def effective_dimension_vector(emb, normalize=False, is_cov=False): """Effective dimensionality of a set of points in space. Effection dimensionality is the number of orthogonal dimensions needed to capture the overall correlational structure of data. See Del Giudice, M. (2020). Effective Dimensionality: A Tutorial. _Multivariate Behavioral Research, 0(0), 1–16. https://doi.org/10.1080/00273171.2020.1743631. :param emb: embedding vectors :type emb: numpy.ndarray (num_entities, dim) :param q: Parameter for the Renyi entropy function, defaults to 1 :type q: int, optional :param normalize: Set True to center data. For spherical or quasi-spherical data (such as the embedding by word2vec), normalize=False is recommended, defaults to False :type normalize: bool, optional :param is_cov: Set True if `emb` is the covariance matrix, defaults to False :type is_cov: bool, optional :return: effective dimensionality :rtype: float .. highlight:: python .. code-block:: python >>> import emlens >>> import numpy as np >>> emb = np.random.randn(100, 20) >>> ed = emlens.effective_dimension(emb) """ if is_cov: Cov = emb else: if normalize: emb = StandardScaler().fit_transform(emb) Cov = (emb.T @ emb) / emb.shape[0] lam, v = linalg.eig(Cov) order = np.argsort(lam)[::-1] lam = lam[order] v = v[:, order] lam = np.real(lam) lam = np.maximum(lam, 1e-10) p = lam / np.sum(lam) p = p[p > 0] return v, p
5d4247b92216bc9e77eabbd35746c06fec22161c
24,981
def getProductMinInventory(db, productID): """ Gives back the minimum inventory for a given product :param db: database pointer :param productID: int :return: int """ # make the query and receive a single tuple (first() allows us to do this) result = db.session.query(Product).filter(Product.id == productID).first() # grab the name in the keyed tuple received return result.min_inventory
032c95685e1c578f9251d269899f4ee04d93e326
24,982
import re def read_config6(section, option, filename='', verbosity=None): #format result: {aaa:[bbb, ccc], ddd:[eee, fff], ggg:[hhh, qqq], xxx:[yyy:zzz]} """ option: section, option, filename='' format result: {aaa:bbb, ccc:ddd, eee:fff, ggg:hhh, qqq:xxx, yyy:zzz} """ filename = get_config_file(filename, verbosity) data = {} cfg = ConfigParser.RawConfigParser(allow_no_value=True, dict_type=MultiOrderedDict) cfg.read(filename) cfg = cfg.get(section, option) for i in cfg: if ":" in i: d1 = str(i).split(":") d2 = int(str(d1[0]).strip()) for j in d1[1]: d3 = re.split("['|','|']", d1[1]) d4 = str(d3[1]).strip() d5 = str(d3[-2]).strip() data.update({d2:[d4, d5]}) else: pass return data
e80c80b2033b10c03c7ee0b2a5e25c5739777f3f
24,983
def bboxes_iou(boxes1, boxes2): """ boxes: [xmin, ymin, xmax, ymax] format coordinates. """ boxes1 = np.array(boxes1) boxes2 = np.array(boxes2) boxes1_area = (boxes1[..., 2] - boxes1[..., 0]) * (boxes1[..., 3] - boxes1[..., 1]) boxes2_area = (boxes2[..., 2] - boxes2[..., 0]) * (boxes2[..., 3] - boxes2[..., 1]) left_up = np.maximum(boxes1[..., :2], boxes2[..., :2]) right_down = np.minimum(boxes1[..., 2:], boxes2[..., 2:]) inter_section = np.maximum(right_down - left_up, 0.0) inter_area = inter_section[..., 0] * inter_section[..., 1] union_area = boxes1_area + boxes2_area - inter_area ious = np.maximum(1.0 * inter_area / union_area, 0.0) return ious
ca2083dd0138a6bd1ee741fc58739340dc1bac61
24,984
import torch import collections def predict_all_task(trained_model_path, config, subject_specific): """Predict. Parameters ---------- model_path : str Description of parameter `model_path`. config : dict A dictionary of hyper-parameters used in the network. Returns ------- float Predicted labels from the model. """ device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') trained_model = torch.load(trained_model_path, map_location=device) labels = collections.defaultdict(dict) for subject in config['subjects']: for trial in config['trials']: if subject_specific: data_iterator = subject_specific_data_iterator( subject, trial, config) labels[subject][trial] = calculate_predictions( trained_model, data_iterator, config) else: data_iterator = collective_data_iterator(config) labels[subject][trial] = calculate_predictions( trained_model, data_iterator, config) return labels
ae179d41cff63b52c9836f7bbb25675618e1aa86
24,986
def hjorth(X): """ Compute Hjorth mobility and complexity of a time series. Notes ----- To speed up, it is recommended to compute D before calling this function because D may also be used by other functions whereas computing it here again will slow down. Parameters ---------- X : array_like, shape(N,) a 1-D real time series. Returns ------- HM : float Hjorth mobility Comp : float Hjorth complexity References ---------- .. [1] B. Hjorth, "EEG analysis based on time domain properties," Electroencephalography and Clinical Neurophysiology , vol. 29, pp. 306-310, 1970. """ # Compute the first order difference D = np.diff(X) # pad the first difference D = np.hstack([X[0], D]) # n = X.shape[0] M2 = np.float(np.sum(D ** 2))/n TP = np.sum(X ** 2) M4 = np.sum((D[1:] - D[:D.shape[0]-1])**2)/n # Hjorth Mobility and Complexity HM = np.sqrt(M2 / TP) Comp = np.sqrt(np.float(M4) * TP / M2 / M2) return HM, Comp
8ca56b45e2c5af0d28d34c181cc1b6bc915e507d
24,987
def check_and_set_owner(func): """ Decorator that applies to functions expecting the "owner" name as a second argument. It will check if a user exists with this name and if so add to the request instance a member variable called owner_user pointing to the User instance corresponding to the owner. If the owner doesn't exists, the visitor is redirected to 404. """ def _check_and_set_owner(request, owner_name, *args, **kwargs): try: owner_user = User.objects.get(username=owner_name) except User.DoesNotExist: return HttpResponseNotFound() else: request.owner_user = owner_user return func(request, owner_name, *args, **kwargs) return _check_and_set_owner
9c5ba9d0b0bb1058ddf830da5fc59df3724b2c0e
24,988
def schedule(self: Client) -> ScheduleProxy: """Delegates to a :py:class:`mcipc.rcon.je.commands.schedule.ScheduleProxy` """ return ScheduleProxy(self, 'schedule')
a8402088fa9fa697e988b2bdb8f185b14f012873
24,989
def check_permission(permission): """Returns true if the user has the given permission.""" if 'permissions' not in flask.session: return False # Admins always have access to everything. if Permissions.ADMIN in flask.session['permissions']: return True # Otherwise check if the permission is present in their permission list. return permission in flask.session['permissions']
b4c45b15a68a07140c70b3f46001f0ca6a737ea5
24,990
def Energy_value (x): """ Energy of an input signal """ y = np.sum(x**2) return y
cf2c650c20f2a7dac8bf35db21f25b8af428404e
24,993
import re def get_int(): """Read a line of text from standard input and return the equivalent int.""" while True: s = get_string(); if s is None: return None if re.search(r"^[+-]?\d+$", s): try: i = int(s, 10) if type(i) is int: # could become long in Python 2 return i except ValueError: pass print("Retry: ", end="")
e6f4e1c49f4b4bc0306af50283728f016db524d7
24,994
def create_save_featvec_homogenous_time(yourpath, times, intensities, filelabel, version=0, save=True): """Produces the feature vectors for each light curve and saves them all into a single fits file. requires all light curves on the same time axis parameters: * yourpath = folder you want the file saved into * times = a single time axis for all * intensities = array of all light curves (NOT normalized) * sector, camera, ccd = integers * version = what version of feature vector to calculate for all. default is 0 * save = whether or not to save into a fits file returns: list of feature vectors + fits file containing all feature vectors requires: featvec() modified: [lcg 08212020]""" fname_features = yourpath + "/"+ filelabel + "_features_v"+str(version)+".fits" feature_list = [] if version == 0: #median normalize for the v0 features intensities = normalize(intensities) elif version == 1: #mean normalize the intensity so goes to 1 intensities = mean_norm(intensities) print("Begining Feature Vector Creation Now") for n in range(len(intensities)): feature_vector = featvec(times, intensities[n], v=version) feature_list.append(feature_vector) if n % 25 == 0: print(str(n) + " completed") feature_list = np.asarray(feature_list) if save == True: hdr = fits.Header() hdr["VERSION"] = version hdu = fits.PrimaryHDU(feature_list, header=hdr) hdu.writeto(fname_features) else: print("Not saving feature vectors to fits") return feature_list
29250aad4cfa1aa5bbd89b880f93a7a70a775dfb
24,995
import asyncio async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry): """Set up Eaton xComfort Bridge from a config entry.""" ip_address = entry.data.get(CONF_IP_ADDRESS) auth_key = entry.data.get("authkey") bridge = Bridge(ip_address, auth_key) # bridge.logger = lambda x: _LOGGER.warning(x) # hass.async_create_task(bridge.run()) asyncio.create_task(bridge.run()) if DOMAIN not in hass.data: hass.data[DOMAIN] = {} hass.data[DOMAIN][entry.entry_id] = bridge for component in PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True
b24b78054c1e8236b5a29fed7287db4150f9deb0
24,996
def _cpu_string(platform_type, settings): """Generates a <platform>_<arch> string for the current target based on the given parameters.""" if platform_type == "ios": ios_cpus = settings["//command_line_option:ios_multi_cpus"] if ios_cpus: return "ios_{}".format(ios_cpus[0]) cpu_value = settings["//command_line_option:cpu"] if cpu_value.startswith("ios_"): return cpu_value return "ios_x86_64" if platform_type == "macos": macos_cpus = settings["//command_line_option:macos_cpus"] if macos_cpus: return "darwin_{}".format(macos_cpus[0]) return "darwin_x86_64" if platform_type == "tvos": tvos_cpus = settings["//command_line_option:tvos_cpus"] if tvos_cpus: return "tvos_{}".format(tvos_cpus[0]) return "tvos_x86_64" if platform_type == "watchos": watchos_cpus = settings["//command_line_option:watchos_cpus"] if watchos_cpus: return "watchos_{}".format(watchos_cpus[0]) return "watchos_i386" fail("ERROR: Unknown platform type: {}".format(platform_type))
7cf483c45bb209a9e3e0775538934945324281a7
24,997
def Check2DBounds(atomMatch,mol,pcophore): """ checks to see if a particular mapping of features onto a molecule satisfies a pharmacophore's 2D restrictions >>> activeFeats = [ChemicalFeatures.FreeChemicalFeature('Acceptor', Geometry.Point3D(0.0, 0.0, 0.0)), ... ChemicalFeatures.FreeChemicalFeature('Donor',Geometry.Point3D(0.0, 0.0, 0.0))] >>> pcophore= Pharmacophore.Pharmacophore(activeFeats) >>> pcophore.setUpperBound2D(0,1,3) >>> m = Chem.MolFromSmiles('FCC(N)CN') >>> Check2DBounds(((0,),(3,)),m,pcophore) True >>> Check2DBounds(((0,),(5,)),m,pcophore) False """ dm = Chem.GetDistanceMatrix(mol,False,False,False) nFeats = len(atomMatch) for i in range(nFeats): for j in range(i+1,nFeats): lowerB = pcophore._boundsMat2D[j,i] #lowerB = pcophore.getLowerBound2D(i,j) upperB = pcophore._boundsMat2D[i,j] #upperB = pcophore.getUpperBound2D(i,j) dij=10000 for atomI in atomMatch[i]: for atomJ in atomMatch[j]: try: dij = min(dij,dm[atomI,atomJ]) except IndexError: print('bad indices:',atomI,atomJ) print(' shape:',dm.shape) print(' match:',atomMatch) print(' mol:') print(Chem.MolToMolBlock(mol)) raise IndexError if dij<lowerB or dij>upperB: return False return True
d119645de037eeaf536e290766d4dacf9c2e2f08
24,998
def get_sdk_dir(fips_dir) : """return the platform-specific SDK dir""" return util.get_workspace_dir(fips_dir) + '/fips-sdks/' + util.get_host_platform()
f3fcf05a8dd1ae0f14431a84ae570c56dd900c69
24,999
from typing import Any def suggested_params(**kwargs: Any) -> transaction.SuggestedParams: """Return the suggested params from the algod client. Set the provided attributes in ``kwargs`` in the suggested parameters. """ params = _algod_client().suggested_params() for key, value in kwargs.items(): setattr(params, key, value) return params
678f207bdd1152f6a8cb98e9c4964dd43b9dd761
25,000
def norm_rl(df): """ Normalizes read length dependent features """ rl_feat = ["US_r", "US_a", "DS_a", "DS_r", "UXO_r", "UXO_a", "DXO_r", "DXO_a", "UMO_r", "UMO_a", "DMO_r", "DMO_a", "MO_r", "MO_a", "XO_r", "XO_a"] rl = df['MO_r'].max() df[rl_feat] = df[rl_feat]/rl return df
0f7c36a447f04bc647773e99a83f59f3789849d4
25,001
import json def sendRequest(self, channel, params=None): """发送请求""" # 生成请求 d = {} d['event'] = 'addChannel' d['channel'] = channel # 如果有参数,在参数字典中加上api_key和签名字段 if params is not None: params['api_key'] = apiKey params['sign'] = buildMySign(params, secreteKey) d['parameters'] = params # 使用json打包并发送 j = json.dumps(d) # 若触发异常则重连 try: self.ws.send(j) return True except websocket.WebSocketConnectionClosedException: self.reconnect() return False
ddaae8800e0fbcf69ce1e15abba4725ef70eadfa
25,002
def altCase(text: str): """ Returns an Alternate Casing of the Text """ return "".join( [ words.upper() if index % 2 else words.lower() for index, words in enumerate(text) ] )
1d8c25f9b81e360c254ac10ce105f99ca890a87c
25,003
def makeObjectArray(elem, graph, num, tag=sobject_array): """ Create an object array of num objects based upon elem, which becomes the first child of the new object array This function also can create a delay when passed a different tag """ p = elem.getparent() objarray = etree.Element(etree.QName(p, tag)) ref = getNewRef() objarray.set(sreference, ref) Xref.update(ref, objarray, p.get(sreference), makeNewName(tag, objarray), graph, tag=tag) objarray.set(scount, str(num)) p.append(objarray) p.remove(elem) objarray.append(elem) Xref.update(elem.get(sreference), tagref=ref) return changeDelayOrArrayCount(objarray, graph)
5ef896b6514dc0d5ce00bbf0322c9e775fb4a152
25,004
def convert_mg_l_to_mymol_kg(o2, rho_0=1025): """Convert oxygen concentrations in ml/l to mymol/kg.""" converted = o2 * 1/32000 * rho_0/1000 * 1e6 converted.attrs["units"] = "$\mu mol/kg$" return converted
5925cf1f5629a0875bdc777bc3f142b9a664a144
25,006
import xml from typing import List def parse_defines(root: xml.etree.ElementTree.Element, component_id: str) -> List[str]: """Parse pre-processor definitions for a component. Schema: <defines> <define name="EXAMPLE" value="1"/> <define name="OTHER"/> </defines> Args: root: root of element tree. component_id: id of component to return. Returns: list of str NAME=VALUE or NAME for the component. """ xpath = f'./components/component[@id="{component_id}"]/defines/define' return list(_parse_define(define) for define in root.findall(xpath))
0f2b06581d89f9be3ff4d733e1db9b56e951cc89
25,007
def make_f_beta(beta): """Create a f beta function Parameters ---------- beta : float The beta to use where a beta of 1 is the f1-score or F-measure Returns ------- function A function to compute the f_beta score """ beta_2 = beta**2 coeff = (1 + beta_2) def f(global_, local_, node): """Compute the f-measure Parameters ---------- global_ : np.array All of the scores for a given query local_ : np.array The scores for the query at the current node node : skbio.TreeNode The current node being evaluated """ p = len(global_) / len(local_) r = len(local_) / node.ntips return coeff * (p * r) / ((beta_2 * p) + r) return f
f0e6993ac956171c58415e1605706c453d3e6d61
25,008
def _autohint_code(f, script): """Return 'not-hinted' if we don't hint this, else return the ttfautohint code, which might be None if ttfautohint doesn't support the script. Note that LGC and MONO return None.""" if script == 'no-script': return script if not script: script = noto_fonts.script_key_to_primary_script(_get_font_info(f).script) return noto_data.HINTED_SCRIPTS.get(script, 'not-hinted')
4341098cbd9581ef989a65d352493fe28c7ddbd7
25,009
def infostring(message=""): """Info log-string. I normally use this at the end of tasks. Args: message(str): A custom message to add. Returns: (str) """ message.rstrip().replace("\n", " ") return tstamp() + "\t## INFO ## " + message + "\n"
14e3012ad9c6e4c7cd10ea885098e31a3eef3ead
25,010
def geo_point_n(arg, n): """Return the Nth point in a single linestring in the geometry. Negative values are counted backwards from the end of the LineString, so that -1 is the last point. Returns NULL if there is no linestring in the geometry Parameters ---------- arg : geometry n : integer Returns ------- PointN : geometry scalar """ op = ops.GeoPointN(arg, n) return op.to_expr()
6da6e0f362fac5e63093e0b93b5cf75b4f05bb5f
25,012
def replace_pasture_scrubland_with_shrubland(df, start_col, end_col): """Merge pasture and scrubland state transitions into 'shrubland'. 1. Remove transitions /between/ scrubland and pasture and vice versa. 2. Check there are no duplicate transitions which would be caused by an identical set of conditions leading from or to both pasture and scrubland being merged. 3. Rename all instances of either 'scrubland' or 'pasture' to 'shrubland' 4. Check for duplicates again. """ df = remove_transitions_bw_pasture_and_scrubland(df, start_col, end_col) duplicates_start = duplicates_start_with_pasture_or_scrubland(df, start_col, end_col) assert len(duplicates_start.index) == 0, "No duplicates expected." duplicates_end = duplicates_end_with_pasture_or_scrubland(df, start_col, end_col) assert len(duplicates_end.index) == 0, "No duplicates expected." for col in [start_col, end_col]: for lct in [MLct.SCRUBLAND.alias, MLct.PASTURE.alias]: df.loc[:,col] = df[col].replace(lct, AsLct.SHRUBLAND.alias) cond_cols = ["succession", "aspect", "pine", "oak", "deciduous", "water"] cond_cols += [start_col, end_col] assert len(df[df.duplicated(cond_cols)].index) == 0, "There should be "\ + "no duplicated rows." return df
9f3102a157e8fbaad1cca3631a117ab45470bae3
25,013
from typing import List def get_storage_backend_descriptions() -> List[dict]: """ Returns: """ result = list() for backend in SUPPORTED_STORAGE_BACKENDS: result.append(get_storage_backend(backend).metadata) return result
2ec097a7c70788da270849332f845316435ac746
25,014
from typing import Tuple def get_business_with_most_location() -> Tuple: """ Fetches LA API and returns the business with most locations from first page :return Tuple: business name and number of locations """ response = _fetch_businesses_from_la_api() business_to_number_of_location = dict() if response.status_code == 200: businesses_list = response.json() for active_business in businesses_list: business_name = active_business["business_name"] if business_name not in business_to_number_of_location: business_to_number_of_location[business_name] = 1 else: business_to_number_of_location[business_name] += 1 ( business_name_from_max, number_of_locations, ) = _get_max_business_occurrence(business_to_number_of_location) else: raise ServiceUnavailable() return business_name_from_max, number_of_locations
ee0d7f432387e4587f615bd294cc8c8276d5baf1
25,015
from typing import AbstractSet def degrees_to_polynomial(degrees: AbstractSet[int]) -> Poly: """ For each degree in a set, create the polynomial with those terms having coefficient 1 (and all other terms zero), e.g.: {0, 2, 5} -> x**5 + x**2 + 1 """ degrees_dict = dict.fromkeys(degrees, 1) return Poly.from_dict(degrees_dict, x)
6c6a27b499f766fae20c2a9cf97b7ae0352e7dc5
25,016
def validate_set_member_filter(filter_vals, vals_type, valid_vals=None): """ Validate filter values that must be of a certain type or found among a set of known values. Args: filter_vals (obj or Set[obj]): Value or values to filter records by. vals_type (type or Tuple[type]): Type(s) of which all ``filter_vals`` must be instances. valid_vals (Set[obj]): Set of valid values in which all ``filter_vals`` must be found. Return: Set[obj]: Validated and standardized filter values. Raises: TypeError ValueError """ filter_vals = to_collection(filter_vals, vals_type, set) if valid_vals is not None: if not all(filter_val in valid_vals for filter_val in filter_vals): raise ValueError( "not all values in filter are valid: {}".format( filter_vals.difference(valid_vals) ) ) return filter_vals
a48639473ed0ac303776d50fb4fb09fa45a74d8e
25,017
def update_many(token, checkids, fields, customerid=None): """ Updates a field(s) in multiple existing NodePing checks Accepts a token, a list of checkids, and fields to be updated in a NodePing check. Updates the specified fields for the one check. To update many checks with the same value, use update_many :type token: string :param token: Your NodePing API token :type checkids: dict :param checkids: CheckIDs with their check type to update :type fields: dict :param fields: Fields in check that will be updated :type customerid: string :param customerid: subaccount ID :rtype: dict :return: Return information from NodePing query """ updated_checks = [] for checkid, checktype in checkids.items(): url = "{0}/{1}".format(API_URL, checkid) url = _utils.create_url(token, url, customerid) send_fields = fields.copy() send_fields.update({"type": checktype.upper()}) updated_checks.append(_query_nodeping_api.put(url, send_fields)) return updated_checks
b96d3e29c6335b6bbec357e42a008faa654c72ab
25,018
def main(start, end, csv_name, verbose): """Run script conditioned on user-input.""" print("Collecting Pomological Watercolors {s} throught {e}".format(s=start, e=end)) return get_pomological_data(start=start, end=end, csv_name=csv_name, verbose=verbose)
fd0c619f8e24929e705285bc9330ef1d21825d8b
25,019
import hashlib def _sub_fetch_file(url, md5sum=None): """ Sub-routine of _fetch_file :raises: :exc:`DownloadFailed` """ contents = '' try: fh = urlopen(url) contents = fh.read() if md5sum is not None: filehash = hashlib.md5(contents).hexdigest() if md5sum and filehash != md5sum: raise DownloadFailed("md5sum didn't match for %s. Expected %s got %s" % (url, md5sum, filehash)) except URLError as ex: raise DownloadFailed(str(ex)) return contents
0a92aaa55661469686338631913020a99aab0d8c
25,020
def get_path_to_config(config_name: str) -> str: """Returns path to config dir""" return join(get_run_configs_dir(), config_name)
3a4092c66ea18929d001e7bb4e8b5b90b8a38439
25,021
def scan_db_and_save_table_info(data_source_id, db_connection, schema, table): """Scan the database for table info.""" table_info = get_table_info( {}, schema, table, from_db_conn=True, db_conn=db_connection ) old_table_info = fetch_table_info(data_source_id, schema, table, as_obj=True) data_source_metadata = DataSourceMetadata( data_source_id=data_source_id, metadata_type="table_info", metadata_param=get_metadata_param_str([schema, table]), metadata_info=table_info, ) data_source_metadata.save(commit=True) if old_table_info: old_table_info.delete(commit=True) return table_info
050183b68891ff0ab0f45435d29206a5800b704c
25,023
def _get_non_heavy_neighbor_residues(df0, df1, cutoff): """Get neighboring residues for non-heavy atom-based distance.""" non_heavy0 = df0[df0['element'] != 'H'] non_heavy1 = df1[df1['element'] != 'H'] dist = spa.distance.cdist(non_heavy0[['x', 'y', 'z']], non_heavy1[['x', 'y', 'z']]) pairs = np.array(np.where(dist < cutoff)).T if len(pairs) == 0: return [], [] # Use the found pairs to find unique pairings of residues. res0 = non_heavy0.iloc[pairs[:, 0]][['pdb_name', 'model', 'chain', 'residue']] res1 = non_heavy1.iloc[pairs[:, 1]][['pdb_name', 'model', 'chain', 'residue']] res0 = res0.reset_index(drop=True) res1 = res1.reset_index(drop=True) # We concatenate so that we can find unique _pairs_. res = pd.concat((res0, res1), axis=1) res = res.drop_duplicates() # # Split back out now that we have found duplicates. res0 = res.iloc[:, range(4)] res1 = res.iloc[:, range(4, 8)] res0 = res0.reset_index(drop=True) res1 = res1.reset_index(drop=True) return res0, res1
b27a341cb1e5e5dd74c881036d7002a107270cd5
25,024
def j0(ctx, x): """Computes the Bessel function `J_0(x)`. See :func:`besselj`.""" return ctx.besselj(0, x)
c2defd50be3feb3791f5be5709e5312d1e232590
25,025
def mysql2df(host, user, password, db_name, tb_name): """ Return mysql table data as pandas DataFrame. :param host: host name :param user: user name :param password: password :param db_name: name of the pydb from where data will be exported :param tb_name: name of the table from where data will be exported """ # Create a connection object # dialect+driver://username:password@host:port/pydb connect_string = "mysql+pymysql://{}:{}@{}/{}".format(user, password, host, db_name) engine = db.create_engine(connect_string, encoding='latin1', echo=True, pool_pre_ping=True) connection = engine.connect() session = sessionmaker(bind=engine)() metadata = db.MetaData() try: # print the table column names tb = db.Table(tb_name, metadata, autoload=True, autoload_with=engine) print(tb.columns.keys()) # Retrieve table data: 'SELECT * FROM table' sql_query = 'SELECT * FROM {}'.format(tb_name) df = pd.read_sql(sql_query, connection) return df except Exception as e: print('Error: {}'.format(str(e))) finally: engine.dispose() session.close()
a4ea75b9fa13e6cb48650e69f5d8216f24fdaf07
25,026
def is_int(number): """ Check if a variable can be cast as an int. @param number: The number to check """ try: x = int(number) return True except: return False
e8e8956942d96956cb34b424b34fb028620f8be1
25,027
import pathlib def get_versions(api_type=DEFAULT_TYPE): """Search for API object module files of api_type. Args: api_type (:obj:`str`, optional): Type of object module to load, must be one of :data:`API_TYPES`. Defaults to: :data:`DEFAULT_TYPE`. Raises: :exc:`exceptions.NoVersionFoundError`: If no API module files matching :data:`PATTERN` are found. Returns: :obj:`list` of :obj:`dict` """ path = pathlib.Path(__file__).absolute().parent pattern = PATTERN.format(api_type=api_type) matches = [p for p in path.glob(pattern)] if not matches: error = "Unable to find any object modules matching pattern {r!r} in {p!r}" error = error.format(p=format(path), r=pattern) raise exceptions.NoVersionFoundError(error) versions = [] for match in matches: name = match.stem vparts = name.split("_") vtype = vparts.pop(0) vparts = utils.versions.split_ver(vparts) vstr = utils.versions.join_ver(vparts) versions.append( { "ver_str": vstr, "ver_parts": vparts, "api_type": vtype, "module_file": name, "module_path": match, } ) versions = sorted(versions, key=lambda x: x["ver_parts"], reverse=True) return versions
58b2df442901b080db12951ab48991371689e955
25,028
def model_flux(parameters_dict, xfibre, yfibre, wavelength, model_name): """Return n_fibre X n_wavelength array of model flux values.""" parameters_array = parameters_dict_to_array(parameters_dict, wavelength, model_name) return moffat_flux(parameters_array, xfibre, yfibre)
c3cf75fab6b8b4965aefeebf82d40378bcd1de19
25,029
def new_rnn_layer(cfg, num_layer): """Creates new RNN layer for each parameter depending on whether it is bidirectional LSTM or not. Uses the fast LSTM implementation backed by CuDNN if a GPU is available. Note: The normal LSTMs utilize sigmoid recurrent activations so as to retain compatibility CuDNNLSTM: see the following github issue for more details: https://github.com/keras-team/keras/issues/8860 :param cfg: configuration of CharGen instance :param num_layer: ordinal number of the rnn layer being built :return: 3D tensor if return sequence is True """ gpu_no = len(K.tensorflow_backend._get_available_gpus()) if gpu_no > 0: print('GPU is available...') if cfg['bidirectional']: return Bidirectional(CuDNNLSTM(cfg['rnn_size'], return_sequences=True), name='rnn_{}'.format(num_layer)) return CuDNNLSTM(cfg['rnn_size'], return_sequences=True, name='rnn_{}'.format(num_layer)) else: print('No GPU available...') if cfg['bidirectional']: return Bidirectional(LSTM(cfg['rnn_size'], return_sequences=True, recurrent_activation='sigmoid'), name='rnn_{}'.format(num_layer)) return LSTM(cfg['rnn_size'], return_sequences=True, recurrent_activation='sigmoid', name='rnn_{}'.format(num_layer))
341ca96549f40c8607e44b9ef353313107a8fb0a
25,030
def firfreqz(h, omegas): """Evaluate frequency response of an FIR filter at discrete frequencies. Parameters h: array_like FIR filter coefficient array for numerator polynomial. e.g. H(z) = 1 + a*z^-1 + b*z^-2 h = [1, a, b] """ hh = np.zeros(omegas.shape, dtype='complex128') for ii, aa in enumerate(h): hh[:] = hh[:] + aa * np.exp(-1j * omegas*ii) return hh
4463b1dcd73090d2dedbdd0e78066e4d26d19655
25,031
import pickle def write_np2pickle(output_fp: str, array, timestamps: list) -> bool: """ Convert and save Heimann HTPA NumPy array shaped [frames, height, width] to a pickle file. Parameters ---------- output_fp : str Filepath to destination file, including the file name. array : np.array Temperatue distribution sequence, shaped [frames, height, width]. timestamps : list List of timestamps of corresponding array frames. """ ensure_parent_exists(output_fp) with open(output_fp, "wb") as f: pickle.dump((array, timestamps), f) return True
96956829a41f3955440693f0d754b013a218e941
25,032
from re import S import logging import webbrowser def run( client_id_: str, client_secret_: str, server_class=HTTPServer, handler_class=S, port=8080 ) -> str: """ Generates a Mapillary OAuth url and prints to screen as well as opens it automatically in a browser. Declares some global variables to pull data from the HTTP server through the GET endpoint. """ # These global variables are defined so that we can pass data to / get data from the GET endpoint global client_id global client_secret global access_token client_id = client_id_ client_secret = client_secret_ server_address = ("localhost", port) httpd = server_class(server_address, handler_class) logging.info("Starting httpd and opening Mapillary to authenticate...") try: # Print the OAuth link to console and also tries to open it directly in the browser auth_url = AUTH_URL.format(client_id) logging.info( "Please authenticate (if browser didn't automatically open): {}".format(auth_url) ) webbrowser.open_new_tab(auth_url) httpd.serve_forever() except KeyboardInterrupt: pass httpd.server_close() logging.info("Stopping httpd...") return access_token
152e4c0ae5c20b8e39693478ff5d06c1cc5fa8a5
25,033
def sort_by_rank_change(val): """ Sorter by rank change :param val: node :return: nodes' rank value """ return abs(float(val["rank_change"]))
ff5730e7cc765949dcfcfd4a3da32947ce3a411a
25,034
def ping(): """always 200""" status = 200 return flask.Response(response='\n', status=status, mimetype='application/json')
8407d4ef4188badbeff5ba34868d530b06dd5158
25,035
def add_gtid_ranges_to_executed_set(existing_set, *new_ranges): """Takes in a dict like {"uuid1": [[1, 4], [7, 12]], "uuid2": [[1, 100]]} (as returned by e.g. parse_gtid_range_string) and any number of lists of type [{"server_uuid": "uuid", "start": 1, "end": 3}, ...]. Adds all the ranges in the lists to the ranges in the dict and returns a new dict that contains minimal representation with both the old and new ranges.""" all_ranges = [] for server_uuid, ranges in existing_set.items(): for rng in ranges: all_ranges.append({ "end": rng[1], "server_uuid": server_uuid, "start": rng[0], }) for rng in new_ranges: all_ranges.extend(rng) return partition_sort_and_combine_gtid_ranges(all_ranges)
47a71f2a55054d83092ffbb2119bcab7760f28a8
25,037
def fetch_rgb(img): """for outputing rgb values from click event to the terminal. :param img: input image :type img: cv2 image :return: the rgb list :rtype: list """ rgb_list = [] def click_event(event, x, y, flags, param): if event == cv2.EVENT_LBUTTONDOWN: red = img[y, x, 2] blue = img[y, x, 0] green = img[y, x, 1] print(red, green, blue) # prints to command line strRGB = str(red) + "," + str(green) + "," + str(blue) rgb_list.append([red, green, blue]) cv2.imshow('original', img) cv2.imshow('original', img) cv2.setMouseCallback("original", click_event) cv2.waitKey(0) cv2.destroyAllWindows return rgb_list
16ff0359d47eb31a4f9c529740a9813680937e22
25,038
import functools def _get_date_filter_consumer(field): """date.{lt, lte, gt, gte}=<ISO DATE>""" date_filter = make_date_filter(functools.partial(django_date_filter, field_name=field)) def _date_consumer(key, value): if '.' in key and key.split(".")[0] == field: prefix, qualifier = key.split(".", maxsplit=1) try: return date_filter(qualifier, value) except ValueError as e: raise InvalidFilterError(str(e)) return {} return _date_consumer
37b7938ef5cebd29d487ec1e53cfc86d13a726d3
25,039
def data_path(fname): """ Gets a path for a given filename. This ensures that relative filenames to data files can be used from all modules. model.json -> .../src/data/model.json """ return join(dirname(realpath(__file__)), fname)
294c91a041227fd9da6d1c9c8063de283281e85e
25,040
def _parse_special_functions(sym: sp.Expr, toplevel: bool = True) -> sp.Expr: """ Recursively checks the symbolic expression for functions which have be to parsed in a special way, such as piecewise functions :param sym: symbolic expressions :param toplevel: as this is called recursively, are we in the top level expression? """ args = tuple(arg if arg.__class__.__name__ == 'piecewise' and sym.__class__.__name__ == 'piecewise' else _parse_special_functions(arg, False) for arg in sym.args) fun_mappings = { 'times': sp.Mul, 'xor': sp.Xor, 'abs': sp.Abs, 'min': sp.Min, 'max': sp.Max, 'ceil': sp.functions.ceiling, 'floor': sp.functions.floor, 'factorial': sp.functions.factorial, 'arcsin': sp.functions.asin, 'arccos': sp.functions.acos, 'arctan': sp.functions.atan, 'arccot': sp.functions.acot, 'arcsec': sp.functions.asec, 'arccsc': sp.functions.acsc, 'arcsinh': sp.functions.asinh, 'arccosh': sp.functions.acosh, 'arctanh': sp.functions.atanh, 'arccoth': sp.functions.acoth, 'arcsech': sp.functions.asech, 'arccsch': sp.functions.acsch, } if sym.__class__.__name__ in fun_mappings: return fun_mappings[sym.__class__.__name__](*args) elif sym.__class__.__name__ == 'piecewise' \ or isinstance(sym, sp.Piecewise): if isinstance(sym, sp.Piecewise): # this is sympy piecewise, can't be nested denested_args = args else: # this is sbml piecewise, can be nested denested_args = _denest_piecewise(args) return _parse_piecewise_to_heaviside(denested_args) if sym.__class__.__name__ == 'plus' and not sym.args: return sp.Float(0.0) if isinstance(sym, (sp.Function, sp.Mul, sp.Add, sp.Pow)): sym._args = args elif toplevel and isinstance(sym, BooleanAtom): # Replace boolean constants by numbers so they can be differentiated # must not replace in Piecewise function. Therefore, we only replace # it the complete expression consists only of a Boolean value. sym = sp.Float(int(bool(sym))) return sym
b560521ceee7cb4db16b808e44b1e538e236c00e
25,041
def get_airflow_config(version, timestamp, major, minor, patch, date, rc): """Return a dict of the configuration for the Pipeline.""" config = dict(AIRFLOW_CONFIG) if version is not None: config['VERSION'] = version else: config['VERSION'] = config['VERSION'].format( major=major, minor=minor, patch=patch, date=date, rc=rc) config['MFEST_COMMIT'] = config['MFEST_COMMIT'].format(timestamp=timestamp) # This works becuse python format ignores keywork args that arn't pressent. for k, v in config.items(): if k not in ['VERSION', 'MFEST_COMMIT']: config[k] = v.format(VERSION=config['VERSION']) return config
87c76949dba717b801a8d526306d0274eb193cc5
25,044
def find_duplicates(treeroot, tbl=None): """ Find duplicate files in a directory. """ dup = {} if tbl is None: tbl = {} os.path.walk(treeroot, file_walker, tbl) for k,v in tbl.items(): if len(v) > 1: dup[k] = v return dup
0a959e443b7a4f5c67e57b8fc7bf597fee96065a
25,045
def attach_capping(mol1, mol2): """it is connecting all Nterminals with the desired capping Arguments: mol1 {rdKit mol object} -- first molecule to be connected mol2 {rdKit mol object} -- second molecule to be connected - chosen N-capping Returns: rdKit mol object -- mol1 updated (connected with mol2, one or more) """ count = 0 # detects all the N terminals in mol1 for atom in mol1.GetAtoms(): atom.SetProp('Cterm', 'False') if atom.GetSmarts() == '[N:2]' or atom.GetSmarts() == '[NH2:2]' or atom.GetSmarts() == '[NH:2]': count += 1 atom.SetProp('Nterm', 'True') else: atom.SetProp('Nterm', 'False') # detects all the C terminals in mol2 (it should be one) for atom in mol2.GetAtoms(): atom.SetProp('Nterm', 'False') if atom.GetSmarts() == '[C:1]' or atom.GetSmarts() == '[CH:1]': atom.SetProp('Cterm', 'True') else: atom.SetProp('Cterm', 'False') # mol2 is addes to all the N terminal of mol1 for i in range(count): combo = rdmolops.CombineMols(mol1, mol2) Nterm = [] Cterm = [] # saves in two different lists the index of the atoms which has to be connected for atom in combo.GetAtoms(): if atom.GetProp('Nterm') == 'True': Nterm.append(atom.GetIdx()) if atom.GetProp('Cterm') == 'True': Cterm.append(atom.GetIdx()) # creates the amide bond edcombo = rdchem.EditableMol(combo) edcombo.AddBond(Nterm[0], Cterm[0], order=Chem.rdchem.BondType.SINGLE) clippedMol = edcombo.GetMol() # removes tags and lables form the atoms which reacted clippedMol.GetAtomWithIdx(Nterm[0]).SetProp('Nterm', 'False') clippedMol.GetAtomWithIdx(Cterm[0]).SetProp('Cterm', 'False') clippedMol.GetAtomWithIdx(Nterm[0]).SetAtomMapNum(0) clippedMol.GetAtomWithIdx(Cterm[0]).SetAtomMapNum(0) # uptades the 'core' molecule mol1 = clippedMol return mol1
24a80efd94c4a5d4e0ddba478240d7c1b82ad52b
25,046
def gather_point(input, index): """ **Gather Point Layer** Output is obtained by gathering entries of X indexed by `index` and concatenate them together. .. math:: Out = X[Index] .. code-block:: text Given: X = [[1, 2, 3], [3, 4, 5], [5, 6, 7]] Index = [[1, 2] Then: Out = [[3, 4, 5], [5, 6, 7]] Args: input (Variable): The source input with rank>=1, This is a 3-D tensor with shape of [B, N, 3]. index (Variable): The index input with shape of [B, M]. Returns: output (Variable): The output is a tensor with shape of [B,M]. Examples: .. code-block:: python import paddle.fluid as fluid x = fluid.data(name='x', shape=[None, 5, 3], dtype='float32') index = fluid.data(name='index', shape=[None, 1], dtype='int32') output = fluid.layers.gather_point(x, index) """ helper = LayerHelper('gather_point', **locals()) dtype = helper.input_dtype() out = helper.create_variable_for_type_inference(dtype) helper.append_op( type="gather_point", inputs={"X": input, "Index": index}, outputs={"Output": out}) return out
dc4298ccf7df084abfc7d63f88ae7edb03af4010
25,047
def _apply_size_dependent_ordering(input_feature, feature_level, block_level, expansion_size, use_explicit_padding, use_native_resize_op): """Applies Size-Dependent-Ordering when resizing feature maps. See https://arxiv.org/abs/1912.01106 Args: input_feature: input feature map to be resized. feature_level: the level of the input feature. block_level: the desired output level for the block. expansion_size: the expansion size for the block. use_explicit_padding: Whether to use explicit padding. use_native_resize_op: Whether to use native resize op. Returns: A transformed feature at the desired resolution and expansion size. """ padding = 'VALID' if use_explicit_padding else 'SAME' if feature_level >= block_level: # Perform 1x1 then upsampling. node = slim.conv2d( input_feature, expansion_size, [1, 1], activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='Conv1x1') if feature_level == block_level: return node scale = 2**(feature_level - block_level) if use_native_resize_op: input_shape = shape_utils.combined_static_and_dynamic_shape(node) node = tf.image.resize_nearest_neighbor( node, [input_shape[1] * scale, input_shape[2] * scale]) else: node = ops.nearest_neighbor_upsampling(node, scale=scale) else: # Perform downsampling then 1x1. stride = 2**(block_level - feature_level) node = slim.max_pool2d( _maybe_pad(input_feature, use_explicit_padding), [3, 3], stride=[stride, stride], padding=padding, scope='Downsample') node = slim.conv2d( node, expansion_size, [1, 1], activation_fn=None, normalizer_fn=slim.batch_norm, padding=padding, scope='Conv1x1') return node
c44206246102bbddc706be2cb0644676650c4675
25,048
def distance(s1, s2): """Return the Levenshtein distance between strings a and b.""" if len(s1) < len(s2): return distance(s2, s1) # len(s1) >= len(s2) if len(s2) == 0: return len(s1) previous_row = xrange(len(s2) + 1) for i, c1 in enumerate(s1): current_row = [i + 1] for j, c2 in enumerate(s2): insertions = previous_row[j + 1] + 1 # j+1 instead of j since previous_row and current_row are one character longer deletions = current_row[j] + 1 # than s2 substitutions = previous_row[j] + (c1 != c2) current_row.append(min(insertions, deletions, substitutions)) previous_row = current_row return previous_row[-1]
d7bb6e7a374349fd65bde621a29ee110402d18aa
25,049
def check_diversity(group, L): """check if group satisfy l-diversity """ SA_values = set() for index in group: str_value = list_to_str(gl_data[index][-1], cmp) SA_values.add(str_value) if len(SA_values) >= L: return True return False
7e87f96a80651608688d86c9c9e921d793fb6a9e
25,050