content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch def compute_cosine_distance( features, others=None, cuda=False, ): """Computes cosine distance. Args: input1 (torch.Tensor): 2-D feature matrix. input2 (torch.Tensor): 2-D feature matrix. Returns: torch.Tensor: distance matrix. """ if others is None: if cuda: features = features.cuda() features = F.normalize(features, p=2, dim=1) dist_m = 1 - torch.mm(features, features.t()) else: if cuda: features = features.cuda() others = others.cuda() features = F.normalize(features, p=2, dim=1) others = F.normalize(others, p=2, dim=1) dist_m = 1 - torch.mm(features, others.t()) return dist_m.cpu().numpy()
702fe068f99efc1b8dda7f03d361dcceb62c7426
5,465
from typing import Any def convert_dict_keys_case(obj: Any, case_style: str = CaseStyle.CAMEL): """ This function recursively changes the case of all the keys in the obj argument """ case_style = process_case_style(case_style=case_style) if isinstance(obj, (tuple, list)): return type(obj)( [convert_dict_keys_case(item, case_style) for item in obj] ) elif isinstance(obj, dict): return { convert_string_case(key, case_style): convert_dict_keys_case( value, case_style ) for key, value in obj.items() if key } else: return obj
2b09a7d8ace030ba2543f9bdb74e7201e07243e1
5,466
def chain_exception(new_exc, old_exc): """Set the __cause__ attribute on *new_exc* for explicit exception chaining. Returns the inplace modified *new_exc*. """ if DEVELOPER_MODE: new_exc.__cause__ = old_exc return new_exc
ce19e735a26fb03d170f74d6590fb256cd70d70a
5,467
def make_input_fn(x_out, prob_choice): """Use py_func to yield elements from the given generator.""" inp = {"inputs": np.array(x_out).astype(np.int32), "problem_choice": prob_choice} flattened = tf.contrib.framework.nest.flatten(inp) types = [t.dtype for t in flattened] shapes = [[None] * len(t.shape) for t in flattened] first_ex_list = [inp] def py_func(): if first_ex_list: example = first_ex_list.pop() else: example = inp return tf.contrib.framework.nest.flatten(example) def input_fn(): flat_example = tf.py_func(py_func, [], types) _ = [t.set_shape(shape) for t, shape in zip(flat_example, shapes)] example = tf.contrib.framework.nest.pack_sequence_as(inp, flat_example) return example return input_fn
1e5e8717cb348a9114dd15ddce50cb33af50b75c
5,468
import re def url_validate(url): """ URL验证 用于登录传递URL """ regex = r'^\?next=((/\w+)*)' if isinstance(url, str) and re.match(regex, url): return url.split('?next=')[-1] return '/'
7a5aa5866018d1bf16c0f4ede527a770da760e17
5,469
from typing import Tuple def mie_harmonics(x: np.ndarray, L: int) -> Tuple[np.ndarray]: """Calculates the spherical harmonics of the mie field. The harmonics are calculated up to order L using the iterative method. Parameters ---------- x : ndarray The cosine of the angle defined by the line passing through origo parallel to the propagation direction and the evaluation point, with the corner at origo. L : int The order up to which to evaluate the harmonics. The L:th Returns ------- ndarray, ndarray Tuple of ndarray of shape (L, *x.shape) """ PI = np.zeros((L, *x.shape)) TAU = np.zeros((L, *x.shape)) PI[0, :] = 1 PI[1, :] = 3 * x TAU[0, :] = x TAU[1, :] = 6 * x * x - 3 for i in range(3, L + 1): PI[i - 1] = (2 * i - 1) / (i - 1) * x * PI[i - 2] - i / (i - 1) * PI[i - 3] TAU[i - 1] = i * x * PI[i - 1] - (i + 1) * PI[i - 2] return PI, TAU
3998b065737db276142a3a25ee30c866ab52fbbd
5,470
def is_version_dir(vdir): """Check whether the given directory contains an esky app version. Currently it only need contain the "esky-files/bootstrap-mainfest.txt" file. """ if exists(pathjoin(vdir,ESKY_CONTROL_DIR,"bootstrap-manifest.txt")): return True return False
931d46c96523bd63d1087cb612a73d98d6338ae2
5,471
def session_decrypt_raw(encrypted_message, destination_key): """ Decrypts the message from a random session key, encrypted with the destination key. Superior alternative when the destination key is slow (ex RSA). """ block_size = destination_key.block_size encrypted_session_key = encrypted_message[:block_size] message = encrypted_message[block_size:] session_key = AesKey(destination_key.decrypt_raw(encrypted_session_key)) return session_key.decrypt_raw(message)
7b13ea5d689e050aba5f5a4c6de9c0ca5346bb76
5,472
def gef_pybytes(x): """Returns an immutable bytes list from the string given as input.""" return bytes(str(x), encoding="utf-8")
8e8cff61e035ac2ef9f6a2cf462a545a05c0ede8
5,473
def plot_2d_morphing_basis( morpher, xlabel=r"$\theta_0$", ylabel=r"$\theta_1$", xrange=(-1.0, 1.0), yrange=(-1.0, 1.0), crange=(1.0, 100.0), resolution=100, ): """ Visualizes a morphing basis and morphing errors for problems with a two-dimensional parameter space. Parameters ---------- morpher : PhysicsMorpher PhysicsMorpher instance with defined basis. xlabel : str, optional Label for the x axis. Default value: r'$\theta_0$'. ylabel : str, optional Label for the y axis. Default value: r'$\theta_1$'. xrange : tuple of float, optional Range `(min, max)` for the x axis. Default value: (-1., 1.). yrange : tuple of float, optional Range `(min, max)` for the y axis. Default value: (-1., 1.). crange : tuple of float, optional Range `(min, max)` for the color map. Default value: (1., 100.). resolution : int, optional Number of points per axis for the rendering of the squared morphing weights. Default value: 100. Returns ------- figure : Figure Plot as Matplotlib Figure instance. """ basis = morpher.basis assert basis is not None, "No basis defined" assert basis.shape[1] == 2, "Only 2d problems can be plotted with this function" xi, yi = (np.linspace(xrange[0], xrange[1], resolution), np.linspace(yrange[0], yrange[1], resolution)) xx, yy = np.meshgrid(xi, yi) xx, yy = xx.reshape((-1, 1)), yy.reshape((-1, 1)) theta_test = np.hstack([xx, yy]) squared_weights = [] for theta in theta_test: wi = morpher.calculate_morphing_weights(theta, None) squared_weights.append(np.sum(wi * wi) ** 0.5) squared_weights = np.array(squared_weights).reshape((resolution, resolution)) fig = plt.figure(figsize=(6.5, 5)) ax = plt.gca() pcm = ax.pcolormesh( xi, yi, squared_weights, norm=matplotlib.colors.LogNorm(vmin=crange[0], vmax=crange[1]), cmap="viridis_r" ) cbar = fig.colorbar(pcm, ax=ax, extend="both") plt.scatter(basis[:, 0], basis[:, 1], s=50.0, c="black") plt.xlabel(xlabel) plt.ylabel(ylabel) cbar.set_label(r"$\sqrt{\sum w_i^2}$") plt.xlim(xrange[0], xrange[1]) plt.ylim(yrange[0], yrange[1]) plt.tight_layout() return fig
a99fc0a710d42557ec35be646171a25ce640c01e
5,474
import numpy def single_lut_conversion(lookup_table): """ This constructs the function to convert data using a single lookup table. Parameters ---------- lookup_table : numpy.ndarray Returns ------- callable """ _validate_lookup(lookup_table) def converter(data): if not isinstance(data, numpy.ndarray): raise ValueError('requires a numpy.ndarray, got {}'.format(type(data))) if data.dtype.name not in ['uint8', 'uint16']: raise ValueError('requires a numpy.ndarray of uint8 or uint16 dtype, ' 'got {}'.format(data.dtype.name)) if len(data.shape) == 3 and data.shape[2] != 1: raise ValueError('Requires a three-dimensional numpy.ndarray, ' 'with single band in the last dimension. Got shape {}'.format(data.shape)) return lookup_table[data[:, :, 0]] return converter
6d01aa69053d964933bf330d8cf2340ea3f13eba
5,475
def signal_average(cov,bin_edges=None,bin_width=40,kind=3,lmin=None,dlspace=True,return_bins=False,**kwargs): """ dcov = cov * ellfact bin dcov in annuli interpolate back on to ell cov = dcov / ellfact where ellfact = ell**2 if dlspace else 1 """ modlmap = cov.modlmap() assert np.all(np.isfinite(cov)) dcov = cov*modlmap**2. if dlspace else cov.copy() if lmin is None: minell = maps.minimum_ell(dcov.shape,dcov.wcs) else: minell = modlmap[modlmap<=lmin].max() if bin_edges is None: bin_edges = np.append([2],np.arange(minell,modlmap.max(),bin_width)) binner = stats.bin2D(modlmap,bin_edges) cents,c1d = binner.bin(dcov) outcov = enmap.enmap(maps.interp(cents,c1d,kind=kind,fill_value=c1d[-1],**kwargs)(modlmap),dcov.wcs) with np.errstate(invalid='ignore'): outcov = outcov / modlmap**2. if dlspace else outcov outcov[modlmap<2] = 0 assert np.all(np.isfinite(outcov)) if return_bins: return cents,c1d,outcov else: return outcov
3075cdef1a7063c295a60c06b368bd337870c883
5,476
def find_tree_diameter(g): """ Standard awesome problem So for each node, I want to find the maximum distance to another node :param n: :param g: :return: """ # First finding the arbitary node that is maximum distance from root # DFS - First time q = deque() q.append((1,0)) arbitrary_node = None visited = set() curr_max_length = 0 while q: node, length = q.pop() visited.add(node) if length > curr_max_length: curr_max_length = length arbitrary_node = node for nei in g[node]: if nei not in visited: q.append((nei, length + 1)) # Now keep this arbitary node as root, and find the node that is the maximum depth to it # That is the diameter of the tree # DFS second time q2 = deque() q2.append((arbitrary_node, 0)) diameter_of_tree = 0 visited2 = set() while q2: node, length = q2.pop() visited2.add(node) if length >= diameter_of_tree: diameter_of_tree = length for nei in g[node]: if nei not in visited2: q2.append((nei, length + 1)) return diameter_of_tree
393e6f9b95316c005a3a056bdd291047f96853ec
5,477
def edge_list_to_adjacency(edges): """ Create adjacency dictionary based on a list of edges :param edges: edges to create adjacency for :type edges: :py:list :rtype: :py:dict """ adjacency = dict([(n, []) for n in edge_list_to_nodes(edges)]) for edge in edges: adjacency[edge[0]].append(edge[1]) return adjacency
c756baf0cb1182ab79df0846afd97296a0b42679
5,478
def __renormalized_likelihood_above_threshold_lnlikelihood(data, thr=__thr, alpha=models.__alpha, beta=models.__beta, num_mc=models.__num_mc, **kwargs): """ only include data that is above thr, treats them all as signals, and renormalizes the likelihood so that it only covers "detectable data" """ truth = data[:,0]>=thr if np.any(truth): norm = 1-np.exp(models.signalData_lncdf(thr, alpha=alpha, beta=beta, num_mc=num_mc)) ### normalization of likelihood for data above threshold return np.sum(models.signalData_lnpdf(data[truth][:,0], alpha=alpha, beta=beta, num_mc=num_mc) - np.log(norm)) else: return 0
47d8d75ef136eb809fa62f77226734ad5201d49e
5,479
def _build_tags(model_uri, model_python_version=None, user_tags=None): """ :param model_uri: URI to the MLflow model. :param model_python_version: The version of Python that was used to train the model, if the model was trained in Python. :param user_tags: A collection of user-specified tags to append to the set of default tags. """ tags = dict(user_tags) if user_tags is not None else {} tags["model_uri"] = model_uri if model_python_version is not None: tags["python_version"] = model_python_version return tags
8807967b3e9d89dbb7a24542d2709bc9293992df
5,480
def test_token(current_user: usermodels.User = Depends(get_current_user)): """ Test access token """ return current_user
b74580436bba2d02c14a0840fcc0a139e637abd2
5,481
def student_list_prof_request(): """Return a JSON containing adding instructor requests, or raise 401 if not authorized.""" role_student = Role.query.filter_by(name='student').first() if current_user.is_authenticated and has_membership(current_user.id, role_student): list_approved = request.args.get('approved', type=int) or 0 list_pending = request.args.get('pending', type=int) or 0 list_declined = request.args.get('declined', type=int) or 0 prof_requests = [] if list_approved: prof_requests.extend(AddProfRequest.query.filter_by( user_id=current_user.id, approved=ApprovalType.APPROVED, ).all()) if list_pending: prof_requests.extend(AddProfRequest.query.filter_by( user_id=current_user.id, approved=ApprovalType.PENDING, ).all()) if list_declined: prof_requests.extend(AddProfRequest.query.filter_by( user_id=current_user.id, approved=ApprovalType.DECLINED, ).all()) ret = [] for prof_request in prof_requests: ret.append({ 'id': prof_request.id, 'name': prof_request.name, 'department_id': prof_request.department.id, 'course_id': prof_request.course.id, 'term_id': prof_request.term.id, 'approved': prof_request.approved.value, }) return jsonify(ret) else: abort(401)
488a5b342cdb8a83ff94f0c234f5a0996c0c0203
5,482
def set_complete(request, id): """ Marque un ticket comme complet :param request: :param id: """ ticket = Tickets.objects.get(pk=id) ticket.complete = 1 ticket.save() return redirect('/ticket/id=%s' % id)
a37e24751e6899c1cc9f413c6d0a356825b1c79f
5,483
import pathlib def parse_version_from_path(path): """Get version parts from a path name.""" path = pathlib.Path(path).absolute() version = path.name try: parts = version.split("_") ret = {} ret["major"] = try_int(parts[0]) ret["minor"] = try_int(parts[1]) ret["protocol"] = try_int(parts[2]) ret["build"] = try_int(parts[3]) ret["string"] = version.replace("_", ".") ret["file"] = version except Exception: error = "Bad API version in '{p}', must look like: '7_2_314_3181'" error = error.format(p=path) raise Exception(error) return ret
56abb214d6c3b6033a77b10c4d1a1d836ce0f8bd
5,484
from typing import Tuple def unsorted_array(arr: list) -> Tuple[list, int, Tuple[int, int]]: """ Time Complexity: O(n) """ start, end = 0, len(arr) - 1 while start < end and arr[start] < arr[start + 1]: start += 1 while start < end and arr[end] > arr[end - 1]: end -= 1 for el in arr[start : end + 1]: # another way of implementing this part would be to find the min and # max of the subarray and keep on decrementing start/incrementing end while el < arr[start]: start -= 1 while el > arr[end]: end += 1 if start + 1 < end - 1: return arr[start + 1 : end], end - start - 1, (start + 1, end - 1) return [], 0, (-1, -1)
c3370a3e76009ef26ae3e1086e773463c312c6bb
5,486
def get_tol_values(places): # type: (float) -> list """List of tolerances to test Returns: list[tuple[float, float]] -- [(abs_tol, rel_tol)] """ abs_tol = 1.1 / pow(10, places) return [(None, None), (abs_tol, None)]
5af82438abbc0889374d62181ca7f0b7ee3c0fbe
5,487
def index(atom: Atom) -> int: """Index within the parent molecule (int). """ return atom.GetIdx()
64d385588f683a048dfa9d54ea25d85c14f04cb7
5,488
def specification_config() -> GeneratorConfig: """A spec cache of r4""" return load_config("python_pydantic")
ca4936fac7499cf986fb7ae201a07b77d6b7d917
5,489
def _wait_for_stack_ready(stack_name, region, proxy_config): """ Verify if the Stack is in one of the *_COMPLETE states. :param stack_name: Stack to query for :param region: AWS region :param proxy_config: Proxy configuration :return: true if the stack is in the *_COMPLETE status """ log.info("Waiting for stack %s to be ready", stack_name) cfn_client = boto3.client("cloudformation", region_name=region, config=proxy_config) stacks = cfn_client.describe_stacks(StackName=stack_name) stack_status = stacks["Stacks"][0]["StackStatus"] log.info("Stack %s is in status: %s", stack_name, stack_status) return stack_status in [ "CREATE_COMPLETE", "UPDATE_COMPLETE", "UPDATE_ROLLBACK_COMPLETE", "CREATE_FAILED", "UPDATE_FAILED", ]
b9ed5cd161b2baef23da40bc0b67b8b7dfc2f2ea
5,490
from typing import Optional def create_order_number_sequence( shop_id: ShopID, prefix: str, *, value: Optional[int] = None ) -> OrderNumberSequence: """Create an order number sequence.""" sequence = DbOrderNumberSequence(shop_id, prefix, value=value) db.session.add(sequence) try: db.session.commit() except IntegrityError as exc: db.session.rollback() raise OrderNumberSequenceCreationFailed( f'Could not create order number sequence with prefix "{prefix}"' ) from exc return _db_entity_to_order_number_sequence(sequence)
49484a1145e0d2c0dde9fdf2935428b5f68cd190
5,491
def calcMFCC(signal, sample_rate=16000, win_length=0.025, win_step=0.01, filters_num=26, NFFT=512, low_freq=0, high_freq=None, pre_emphasis_coeff=0.97, cep_lifter=22, append_energy=True, append_delta=False): """Calculate MFCC Features. Arguments: signal: 1-D numpy array. sample_rate: Sampling rate. Defaulted to 16KHz. win_length: Window length. Defaulted to 0.025, which is 25ms/frame. win_step: Interval between the start points of adjacent frames. Defaulted to 0.01, which is 10ms. filters_num: Numbers of filters. Defaulted to 26. NFFT: Size of FFT. Defaulted to 512. low_freq: Lowest frequency. high_freq: Highest frequency. pre_emphasis_coeff: Coefficient for pre-emphasis. Pre-emphasis increase the energy of signal at higher frequency. Defaulted to 0.97. cep_lifter: Numbers of lifter for cepstral. Defaulted to 22. append_energy: Whether to append energy. Defaulted to True. append_delta: Whether to append delta to feature. Defaulted to False. Returns: 2-D numpy array with shape (NUMFRAMES, features). Each frame containing filters_num of features. """ (feat, energy) = _fbank(signal, sample_rate, win_length, win_step, filters_num, NFFT, low_freq, high_freq, pre_emphasis_coeff) feat = np.log(feat) feat = dct(feat, type=2, axis=1, norm='ortho') feat = _lifter(feat, cep_lifter) if append_energy: feat[:, 0] = np.log(energy) if append_delta: feat_delta = _delta(feat) feat_delta_delta = _delta(feat_delta) feat = np.concatenate((feat, feat_delta, feat_delta_delta), axis=1) return feat
b10429bec859af3f5e6302e7f2f185bf64178922
5,494
def get_user(domain_id=None, enabled=None, idp_id=None, name=None, password_expires_at=None, protocol_id=None, region=None, unique_id=None): """ Use this data source to get the ID of an OpenStack user. """ __args__ = dict() __args__['domainId'] = domain_id __args__['enabled'] = enabled __args__['idpId'] = idp_id __args__['name'] = name __args__['passwordExpiresAt'] = password_expires_at __args__['protocolId'] = protocol_id __args__['region'] = region __args__['uniqueId'] = unique_id __ret__ = pulumi.runtime.invoke('openstack:identity/getUser:getUser', __args__) return GetUserResult( default_project_id=__ret__.get('defaultProjectId'), domain_id=__ret__.get('domainId'), region=__ret__.get('region'), id=__ret__.get('id'))
e7f6a874816673ebab1fe2f5e807def02fe232d1
5,495
def use(workflow_id, version, client=None): """ Use like ``import``: load the proxy object of a published `Workflow` version. Parameters ---------- workflow_id: str ID of the `Workflow` to retrieve version: str Version of the workflow to retrive client: `.workflows.client.Client`, optional Allows you to use a specific client instance with non-default auth and parameters Returns ------- obj: Proxytype Proxy object of the `Workflow` version. Example ------- >>> import descarteslabs.workflows as wf >>> @wf.publish("[email protected]:ndvi", "0.0.1") # doctest: +SKIP ... def ndvi(img: wf.Image) -> wf.Image: ... nir, red = img.unpack_bands("nir red") ... return (nir - red) / (nir + red) >>> same_function = wf.use("[email protected]:ndvi", "0.0.1") # doctest: +SKIP >>> same_function # doctest: +SKIP <descarteslabs.workflows.types.function.function.Function[Image, {}, Image] object at 0x...> >>> img = wf.Image.from_id("sentinel-2:L1C:2019-05-04_13SDV_99_S2B_v1") >>> same_function(img).compute(geoctx) # geoctx is an arbitrary geocontext for 'img' # doctest: +SKIP ImageResult: ... """ return VersionedGraft.get(workflow_id, version, client=client).object
d8f34e521af161e8840e11c8c888fd959e8584c9
5,496
def logout(): """ Route for logout page. """ logout_user() return redirect(url_for('index'))
4e59dd9a6b59639e24053be072f4ade4fb23d922
5,497
def ipv4_size_check(ipv4_long): """size chek ipv4 decimal Args: ipv4_long (int): ipv4 decimal Returns: boole: valid: True """ if type(ipv4_long) is not int: return False elif 0 <= ipv4_long <= 4294967295: return True else: return False
97c5d5c7472fb81e280f91275b5a88b032ee7927
5,498
def generate_gate_hadamard_mat() -> np.ndarray: """Return the Hilbert-Schmidt representation matrix for an Hadamard (H) gate with respect to the orthonormal Hermitian matrix basis with the normalized identity matrix as the 0th element. The result is a 4 times 4 real matrix. Parameters ---------- Returns ---------- np.ndarray The real Hilbert-Schmidt representation matrix for the gate. """ l = [[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0]] mat = np.array(l, dtype=np.float64) return mat
8dd36adb0cef79cbc758267fb7adf371f7b698b0
5,499
def display_ordinal_value(glyph: str): """Displays the integer value of the given glyph Examples: >>> display_ordinal_value('🐍')\n 128013 >>> display_ordinal_value('G')\n 71 >>> display_ordinal_value('g')\n 103 """ return ord(glyph)
7daa53180023bfec2968308d463ac615a83a4e55
5,501
def mixture_HPX( gases, Xs ): """ Given a mixture of gases and their mole fractions, this method returns the enthalpy, pressure, and composition string needed to initialize the mixture gas in Cantera. NOTE: The method of setting enthalpy usually fails, b/c Cantera uses a Newton iterator to find the temperature that yields the specified enthalpy, and it isn't very robust. Instead, approximate constant Cp's and find T_mix manually, as with the mixture_TPX() method above. """ # -------------- # X mixture_d = {} for gas,wx_i in zip(gases,Xs): for sp in gas.species_names: if sp in mixture_d: mixture_d[sp] += wx_i * gas.mole_fraction(sp) elif gas.moleFraction(sp) != 0.0: mixture_d[sp] = wx_i * gas.mole_fraction(sp) else: pass mixture_s = convert_composition_dict_to_string(mixture_d) # -------------- # H # Compute Tmix with molar heat capacities # # Define: # h_mix = sum_i n_i h_i # # where h is molar enthalpy # compute H_mix H_mix = 0 for gas, wx_i in zip(gases,Xs): Hmix += wx_i * gas.enthalpy_mole # -------------- # P press = 0.0 for gas,wx_i in zip(gases,Xs): press += wx_i * gas.P # ------------------- # Return HPX return H_mix, press, mixture_s
a04a2bdcd1a58eaf26facf2f542d2c1aaba6e611
5,502
import json def authorize(config): """Authorize in GSheets.""" json_credential = json.loads(config['credentials']['gspread']['credential']) credentials = ServiceAccountCredentials.from_json_keyfile_dict(json_credential, scope) return gspread.authorize(credentials)
fd54e0df5a71d2896f925dbd9d4e7536659906f9
5,503
def _without_command(results): """A helper to tune up results so that they lack 'command' which is guaranteed to differ between different cmd types """ out = [] for r in results: r = r.copy() r.pop('command') out.append(r) return out
67927cf56884e0e3b22d0daf37e6c02eaef3849b
5,504
def b(k, a): """ Optimal discretisation of TBSS to minimise error, p. 9. """ return ((k**(a+1)-(k-1)**(a+1))/(a+1))**(1/a)
d563a39710aec05334f38af704371db1dc7f94fc
5,505
def get_network_interfaces(properties): """ Get the configuration that connects the instance to an existing network and assigns to it an ephemeral public IP if specified. """ network_interfaces = [] networks = properties.get('networks', []) if len(networks) == 0 and properties.get('network'): network = { "network": properties.get('network'), "subnetwork": properties.get('subnetwork'), "networkIP": properties.get('networkIP'), } networks.append(network) if (properties.get('hasExternalIp')): network['accessConfigs'] = [{ "type": "ONE_TO_ONE_NAT", }] if properties.get('natIP'): network['accessConfigs'][0]["natIp"] = properties.get('natIP') for network in networks: if not '.' in network['network'] and not '/' in network['network']: network_name = 'global/networks/{}'.format(network['network']) else: network_name = network['network'] network_interface = { 'network': network_name, } netif_optional_props = ['subnetwork', 'networkIP', 'aliasIpRanges', 'accessConfigs'] for prop in netif_optional_props: if network.get(prop): network_interface[prop] = network[prop] network_interfaces.append(network_interface) return network_interfaces
0f8db05c0c8b95f8bde7752a9e9766e479db098f
5,507
def MATCH(*args) -> Function: """ Returns the relative position of an item in a range that matches a specified value. Learn more: https//support.google.com/docs/answer/3093378 """ return Function("MATCH", args)
aa03f558e0948fac023622b6569bb6f504e92cba
5,508
def set_dict_to_zero_with_list(dictionary, key_list): """ Set dictionary keys from given list value to zero Args: dictionary (dict): dictionary to filter key_list (list): keys to turn zero in filtered dictionary Returns: dictionary (dict): the filtered dictionary with keys from input list turned to zero """ #Generate list of unwanted keys unwanted = (set(dictionary.keys()) - set(key_list)) #Delete keys from dictionary for unwanted_key in unwanted: dictionary[unwanted_key] = 0 return dictionary
498c0c4a7444c0bbb33168c2f17bfcf2bd8e805e
5,509
def terminal_condition_for_minitaur_extended_env(env): """Returns a bool indicating that the extended env is terminated. This predicate checks whether 1) the legs are bent inward too much or 2) the body is tilted too much. Args: env: An instance of MinitaurGymEnv """ motor_angles = env.robot.motor_angles leg_pose = minitaur_pose_utils.motor_angles_to_leg_pose(motor_angles) swing_threshold = np.radians(35.0) if (leg_pose[0] > swing_threshold or leg_pose[2] > swing_threshold or # Front leg_pose[1] < -swing_threshold or leg_pose[3] < -swing_threshold): # Rear return True roll, _, _ = env.robot.base_roll_pitch_yaw if abs(roll) > np.radians(30.0): return True return False
be80901777bc7d5c03b152e3c9af9a30c3526d1e
5,510
def print_tree(tree, level=0, current=False): """Pretty-print a dictionary configuration `tree`""" pre = ' ' * level msg = '' for k, v in tree.items(): if k == 'self': msg += print_tree(v, level) continue # Detect subdevice if isinstance(v, dict) and 'self' in v: msg += pre + '|++> ' + k + '\n' msg += print_tree(v, level + 1) continue if not current: continue v = repr(v['current']) if len(v) > 50: v = v[:46] + ' ...' msg += '{}|: {} = {}\n'.format(pre, k, v) return msg
f9697b506e9254b4982a037bdfbeb8a1d27f35bb
5,513
def chaine_polynome(poly): """Renvoie la représentation dy polynôme _poly_ (version simple)""" tab_str = [str(coef) + "*X^" + str(i) if i != 0 else str(coef) for i,coef in enumerate(poly)] return " + ".join(tab_str[::-1])
79fd59afe84c1bd12e3417b9195514664d1bce20
5,514
def get_opr_from_dict(dict_of_opr_vals): """Takes in a dictionary where the keys are temperatures and values are optical rotation values. The dictionary is for all the temperatures and optical rotation values extracted for one molecule. This function determines which of the values in the dictionary to keep. Args: dict_of_opr_vals ([dict]): Keys are temperature and values are optical rotation vals. Returns: [String]: Final optical rotation value for a molecule """ if len(dict_of_opr_vals) > 0: dict_keys = list(dict_of_opr_vals.keys()) if dict_keys.count("") == len(dict_keys): return dict_of_opr_vals[""] if "" in dict_keys: dict_keys.remove("") if dict_keys.count("X") == len(dict_keys): return dict_of_opr_vals["X"] else: try: dict_keys.remove("X") except: pass return dict_of_opr_vals[dict_keys[abs_distance(dict_keys)]] else: return dict_of_opr_vals[0]
c0c688835ffb38fe4fb1a88fd91f8374d854d75a
5,515
import re def tokens(s): """Return a list of strings containing individual words from string s. This function splits on whitespace transitions, and captures apostrophes (for contractions). >>> tokens("I'm fine, how are you?") ["I'm", 'fine', 'how', 'are', 'you'] """ words = re.findall(r"\b[\w']+\b", s) return words
aee0b6fad2f9107c893496f1f3807e80c9d2e44b
5,516
def get_variable_value(schema, definition_ast, input): """Given a variable definition, and any value of input, return a value which adheres to the variable definition, or throw an error.""" type = type_from_ast(schema, definition_ast.type) if not type or not is_input_type(type): raise GraphQLError( 'Variable ${} expected value of type {} which cannot be used as an input type.'.format( definition_ast.variable.name.value, print_ast(definition_ast.type), ), [definition_ast] ) if is_valid_value(type, input): if is_nullish(input): default_value = definition_ast.default_value if default_value: return coerce_value_ast(type, default_value, None) return coerce_value(type, input) raise GraphQLError( 'Variable ${} expected value of type {} but got: {}'.format( definition_ast.variable.name.value, print_ast(definition_ast.type), repr(input) ), [definition_ast] )
09c3fa10dcb25704c6323f78d244b27605a393ed
5,517
def _convert_3d_crop_window_to_2d(crop_window): """Converts a 3D crop window to a 2D crop window. Extracts just the spatial parameters of the crop window and assumes that those apply uniformly across all channels. Args: crop_window: A 3D crop window, expressed as a Tensor in the format [offset_height, offset_width, offset_channel, crop_height, crop_width, crop_channels]. Returns: A 2D crop window as a Tensor in the format [offset_height, offset_width, crop_height, crop_width]. """ with tf.name_scope('3d_crop_window_to_2d'): return tf.gather(crop_window, [0, 1, 3, 4])
e5eb7d97c55c0ab18caf135728bb1daa6e5b2d8c
5,518
def apply_along_axis(func1d, mat, axis): """Numba utility to apply reduction to a given axis.""" assert mat.ndim == 2 assert axis in [0, 1] if axis == 0: result = np.empty(mat.shape[1], mat.dtype) for i in range(len(result)): result[i, :] = func1d(mat[:, i]) else: result = np.empty(mat.shape[0], mat.dtype) for i in range(len(result)): result[i, :] = func1d(mat[i, :]) return result
87f1dcd3ed04e8626a59aaff1caabba6c52ce8d3
5,519
def get_all_article(): """ 获取所有 文章资讯 --- tags: - 资讯文章 API responses: 200: description: 文章资讯更新成功 404: description: 资源不存在 500: description: 服务器异常 """ articles = ArticleLibrary.get_all() return jsonify(articles)
7304e862351730ace03ad8e784665cf844d1c94f
5,520
def cut_fedora_prefix(uri): """ Cut the Fedora URI prefix from a URI. """ return uri[len(FEDORA_URI_PREFIX):]
617b00bc34f4ad69b82858496ecc19bc2a5e6fd2
5,521
def get_database_login_connection(user,password,host,database): """ Return database connection object based on user and database details provided """ connection = psycopg2.connect(user = user, password = password, host = host, port = "5432", database = database, sslmode= "prefer") set_auto_commit(connection) return connection
55b8cd2fb7e9e2acc00ce76c660f709920d59eb8
5,522
def getaddrinfo(host,port,family=0,socktype=socket.SOCK_STREAM,proto=0,allow_cname=True): """Resolve host and port into addrinfo struct. Does the same thing as socket.getaddrinfo, but using `pyxmpp.resolver`. This makes it possible to reuse data (A records from the additional section of DNS reply) returned with SRV records lookup done using this module. :Parameters: - `host`: service domain name. - `port`: service port number or name. - `family`: address family. - `socktype`: socket type. - `proto`: protocol number or name. - `allow_cname`: when False CNAME responses are not allowed. :Types: - `host`: `unicode` or `str` - `port`: `int` or `str` - `family`: `int` - `socktype`: `int` - `proto`: `int` or `str` - `allow_cname`: `bool` :return: list of (family, socktype, proto, canonname, sockaddr). :returntype: `list` of (`int`, `int`, `int`, `str`, (`str`, `int`))""" ret=[] if proto==0: proto=socket.getprotobyname("tcp") elif type(proto)!=int: proto=socket.getprotobyname(proto) if type(port)!=int: port=socket.getservbyname(port,proto) if family not in (0,socket.AF_INET): raise NotImplementedError,"Protocol family other than AF_INET not supported, yet" if ip_re.match(host): return [(socket.AF_INET,socktype,proto,host,(host,port))] host=idna.ToASCII(host) try: r=dns.resolver.query(host, 'A') except dns.exception.DNSException: r=dns.resolver.query(host+".", 'A') if not allow_cname and r.rrset.name!=dns.name.from_text(host): raise ValueError,"Unexpected CNAME record found for %s" % (host,) if r: for rr in r: ret.append((socket.AF_INET,socktype,proto,r.rrset.name,(rr.to_text(),port))) return ret
1e65eb69a2d23dd93b0676be5e739545674aa021
5,523
import copy import json def get_layout_for_dashboard(available_pages_list): """ Makes the dictionary that determines the dashboard layout page. Displays the graphic title to represent the graphic. :param available_pages_list: :return: """ available_pages_list_copy = copy.deepcopy(available_pages_list) for available_page_dict in available_pages_list_copy: graphic_list = available_page_dict[GRAPHIC_CONFIG_FILES] for graphic_index, graphic_path in enumerate(graphic_list): graphic_json = json.loads(load_graphic_config_dict(graphic_path)) graphic_list[graphic_index] = { GRAPHIC_PATH: graphic_path, GRAPHIC_TITLE: graphic_json[GRAPHIC_TITLE], } return available_pages_list_copy
a391a93a70c0fc755657a6b93ef90bd4811b6d4c
5,524
def median(list_in): """ Calculates the median of the data :param list_in: A list :return: float """ list_in.sort() half = int(len(list_in) / 2) if len(list_in) % 2 != 0: return float(list_in[half]) elif len(list_in) % 2 ==0: value = (list_in[half - 1] + list_in[half]) / 2 return float(value)
261487551098b80986cbfb8e4cd28279649ac456
5,525
def search_file(expr, path=None, abspath=False, follow_links=False): """ Given a search path, recursively descend to find files that match a regular expression. Can specify the following options: path - The directory that is searched recursively executable_extension - This string is used to see if there is an implicit extension in the filename executable - Test if the file is an executable (default=False) isfile - Test if the file is file (default=True) """ ans = [] pattern = re.compile(expr) if path is None or path == ".": path = os.getcwd() elif not os.path.exists(path): raise IOError, "Unknown directory '"+path+"'" for root, dirs, files in os.walk(path, topdown=True): for name in files: if pattern.match(name): name = os.path.join(root,name) if follow_links and os.path.islink(name): ans.append( os.path.abspath(os.readlink(name)) ) elif abspath: ans.append( os.path.abspath(name) ) else: ans.append( name ) return ans
f3d2501f535865455646168ecf81a4a12e66fcfa
5,526
import logging def delete_gwlbe(gwlbe_ids): """ Deletes VPC Endpoint (GWLB-E). Accepts: - gwlbe_ids (list of str): ['vpce-svc-xxxx', 'vpce-svc-yyyy'] Usage: - delete_gwlbe(['vpce-xxxx', 'vpce-yyyy']) """ logging.info("Deleting VPC Endpoint Service:") try: response = ec2.delete_vpc_endpoints( VpcEndpointIds=gwlbe_ids ) return response except ClientError as e: logging.error(e) return None
854b9991dda8198de87895ddf7dbc65fbb6746e8
5,527
def subdivide_loop(surface:SurfaceData, number_of_iterations: int = 1) -> SurfaceData: """Make a mesh more detailed by subdividing in a loop. If iterations are high, this can take very long. Parameters ---------- surface:napari.types.SurfaceData number_of_iterations:int See Also -------- ..[0] http://www.open3d.org/docs/0.12.0/tutorial/geometry/mesh.html#Mesh-subdivision """ mesh_in = to_mesh(surface) mesh_out = mesh_in.subdivide_loop(number_of_iterations=number_of_iterations) return to_surface(mesh_out)
85fda9f2626f3fdd48c0a2eecbc4d3dffc49919a
5,528
def register(): """Register a new user. Validates that the username is not already taken. Hashes the password for security. """ if request.method == 'POST': username = request.form['username'] password = request.form['password'] phone = request.form['full_phone'] channel = request.form['channel'] db = get_db() error = None if not username: error = 'Username is required.' elif not phone: error = 'Phone number is required' elif not password: error = 'Password is required.' elif db.execute( 'SELECT id FROM user WHERE username = ?', (username,) ).fetchone() is not None: error = 'User {0} is already registered.'.format(username) if error is None: session['phone'] = phone vsid = start_verification(phone, channel) if vsid is not None: # the verification was sent to the user and the username is valid # redirect to verification check db.execute( 'INSERT INTO user (username, password, phone_number) VALUES (?, ?, ?)', (username, generate_password_hash(password), phone) ) db.commit() return redirect(url_for('auth.verify')) flash(error) return render_template('auth/register.html')
9e1b2c86a20710d56cf5cf737ab1a35d67970179
5,529
def Read_FImage(Object, Channel, iFlags=0): """ Read_FImage(Object, Channel, iFlags=0) -> bool Read_FImage(Object, Channel) -> bool """ return _Channel.Read_FImage(Object, Channel, iFlags)
06af43adbbfaf94e9f26b1ad41d6ba6f7ae5cfe7
5,532
import json import yaml def Export(message, stream=None, schema_path=None): """Writes a message as YAML to a stream. Args: message: Message to write. stream: Output stream, None for writing to a string and returning it. schema_path: JSON schema file path. If None then all message fields are written, otherwise only fields in the schema are written. Returns: Returns the return value of yaml.dump(). If stream is None then the return value is the YAML data as a string. """ result = _ProtoJsonApiTools.Get().encode_message(message) message_dict = json.loads( encoding_helper._IncludeFields(result, message, None)) if schema_path: _FilterYAML(message_dict, schema_path) return yaml.dump(message_dict, stream=stream)
53f74ff11dfe46eab0549ea466c5ea80c6876bd7
5,533
def user_locale_get(handle, user_name, name, caller="user_locale_get"): """ gets locale for the user Args: handle (UcsHandle) user_name (string): username name (string): locale name Returns: AaaUserLocale: managed object Raises: UcsOperationError: if AaaUserLocale is not present Example: user_locale_get(handle, user_name="test", name="testlocale") """ user_dn = _base_dn + "/user-" + user_name dn = user_dn + "/locale-" + name mo = handle.query_dn(dn) if mo is None: raise UcsOperationError(caller, "User locale '%s' does not exist" % dn) return mo
a748d8fd2e349c43dfabd07108943005be95729e
5,534
from typing import Dict from typing import List from typing import Set from typing import Any from functools import reduce def get_set_from_dict_from_dict( instance: Dict[str, Dict[str, List]], field: str ) -> Set[Any]: """ Format of template field within payload Function gets field from instance-dict, which is a dict again. The values of these dicts have to be joined in a set. """ cml = instance.get(field) if cml: return reduce(lambda i1, i2: i1 | i2, [set(values) for values in cml.values()]) else: return set()
75ee6f4d46a4f57012e76b0f02fb20f629b6bf60
5,535
def initiate_os_session(unscoped: str, project: str) -> keystoneauth1.session.Session: """ Create a new openstack session with the unscoped token and project id. Params: unscoped: str project: str Returns: A usable keystone session object for OS client connections Return type: object(keystoneauth1.session.Session) """ os_auth = v3.Token( auth_url=setd["auth_endpoint_url"], token=unscoped, project_id=project ) return keystoneauth1.session.Session( auth=os_auth, verify=True, )
ab96af612721a5043c60e9a76c512301b0b1de6f
5,536
def delete_topic_collection_items(request_ctx, collection_item_id, topic_id, **request_kwargs): """ Deletes the discussion topic. This will also delete the assignment, if it's an assignment discussion. :param request_ctx: The request context :type request_ctx: :class:RequestContext :param collection_item_id: (required) ID :type collection_item_id: string :param topic_id: (required) ID :type topic_id: string :return: Delete a topic :rtype: requests.Response (with void data) """ path = '/v1/collection_items/{collection_item_id}/discussion_topics/{topic_id}' url = request_ctx.base_api_url + path.format(collection_item_id=collection_item_id, topic_id=topic_id) response = client.delete(request_ctx, url, **request_kwargs) return response
3c45e9f0b65e731480c8a81163be01b5cd5fbd83
5,537
from typing import List def xml_section_extract_elsevier(section_root, element_list=None) -> List[ArticleElement]: """ Depth-first search of the text in the sections """ if element_list is None: element_list = list() for child in section_root: if 'label' in child.tag or 'section-title' in child.tag or 'para' in child.tag: target_txt = get_xml_text_iter(child) element_type = None if 'label' in child.tag: element_type = ArticleElementType.SECTION_ID elif 'section-title' in child.tag: element_type = ArticleElementType.SECTION_TITLE elif 'para' in child.tag: element_type = ArticleElementType.PARAGRAPH element = ArticleElement(type=element_type, content=target_txt) element_list.append(element) elif 'section' in child.tag: xml_section_extract_elsevier(section_root=child, element_list=element_list) return element_list
919e1bb7f1ae96b857776f6c81c3e032cfbba4a9
5,538
def get_data_from_string(string, data_type, key=None): """ Getting data from string, type can be either int or float or str. Key is basically starts with necessary string. Key is need only when we parse strings from execution file output (not from test.txt) """ data = [] if data_type in ("int", "float"): data = Text.get_numbers(string, type_=data_type) elif data_type == "str": if key is None: data = Text.get_strings_from_tests(string) else: data = Text.get_strings_from_exec(string, key) return data
10135d96bd0cdb37d38268a795f92d80be294adc
5,539
import requests def greenline(apikey, stop): """ Return processed green line data for a stop. """ # Only green line trips filter_route = "Green-B,Green-C,Green-D,Green-E" # Include vehicle and trip data include = "vehicle,trip" # API request p = {"filter[route]": filter_route, "include": include, "filter[stop]": stop} result = requests.get("https://api-v3.mbta.com/predictions", params=p).json() return processGreenlinePredictions(result)
254a7cb43cf0789b1437b6ee3ea2262b4d22b4ca
5,540
def Get_Unread_Messages(service, userId): """Retrieves all unread messages with attachments, returns list of message ids. Args: service: Authorized Gmail API service instance. userId: User's email address. The special value "me". can be used to indicate the authenticated user. """ message_list = [] message_ids = service.users().messages().list(userId=userId, labelIds='INBOX', alt="json", q='is:unread has:attachment').execute() if message_ids['resultSizeEstimate'] > 0: for message in message_ids['messages']: message_list.append(message['id']) return message_list
2aa28ff1aa093754bd293a831be2dada0e629801
5,541
def rmse(y_true, y_pred): """ rmse description: computes RMSE """ return sqrt(mean_squared_error(y_true, y_pred))
377849b692190ae880221676eb898bbe84e466e5
5,542
from pathlib import Path from typing import Type def read_model_json(path: Path, model: Type[ModelT]) -> ModelT: """ Reading routine. Only keeps Model data """ return model.parse_file(path=path)
0b0fb327efdc1acaff2adce3e5b738a1cabbf30a
5,543
def details(request, id=None): """ Show details about alert :param request: :param id: alert ID :return: """ alert = get_object_or_404(Alert, id=id) context = { "user": request.user, "alert": alert, } return render(request, "alerts/details.html", context)
9522a69fc69eb80da301541073bfc320e991fae8
5,544
def get_class_id_map(): """Get mapping between class_id and class_name""" sql = """ SELECT class_id , class_name FROM classes """ cur.execute(f"{sql};") result = [dict(x) for x in cur.fetchall()] class_map = {} for r in result: class_map[r["class_id"]] = r["class_name"] return class_map
d72df95f3f27cbfb04fe32b09d672ea1cff3cbc6
5,545
def potatoes(p0, w0, p1): """ - p1/100 = water1 / water1 + (1 - p0/100) * w0 => water1 = w0 * p1/100 * (1 - p0/100) / (1 - p1/100) - dry = w0 * (1 - p0/100) - w1 = water1 + dry = w0 * (100 - p0) / (100 - p1) Example: 98/100 = water1 / water1 + (1- 99/100) * 100 water1 = 49 w1 = 49 + 1 = 50 """ w1 = w0 * (100 - p0) / (100 - p1) return int(w1)
f2955a58db3a48c64b6acc4980e663f33332aeea
5,547
def calc_Cinv_CCGT(CC_size_W, CCGT_cost_data): """ Annualized investment costs for the Combined cycle :type CC_size_W : float :param CC_size_W: Electrical size of the CC :rtype InvCa : float :returns InvCa: annualized investment costs in CHF ..[C. Weber, 2008] C.Weber, Multi-objective design and optimization of district energy systems including polygeneration energy conversion technologies., PhD Thesis, EPFL """ # if the Q_design is below the lowest capacity available for the technology, then it is replaced by the least # capacity for the corresponding technology from the database if CC_size_W < CCGT_cost_data['cap_min'][0]: CC_size_W = CCGT_cost_data['cap_min'][0] CCGT_cost_data = CCGT_cost_data[ (CCGT_cost_data['cap_min'] <= CC_size_W) & (CCGT_cost_data['cap_max'] > CC_size_W)] #costs of connection connection_costs = ngas.calc_Cinv_gas(CC_size_W) Inv_a = CCGT_cost_data.iloc[0]['a'] Inv_b = CCGT_cost_data.iloc[0]['b'] Inv_c = CCGT_cost_data.iloc[0]['c'] Inv_d = CCGT_cost_data.iloc[0]['d'] Inv_e = CCGT_cost_data.iloc[0]['e'] Inv_IR = (CCGT_cost_data.iloc[0]['IR_%']) / 100 Inv_LT = CCGT_cost_data.iloc[0]['LT_yr'] Inv_OM = CCGT_cost_data.iloc[0]['O&M_%'] / 100 InvC = Inv_a + Inv_b * (CC_size_W) ** Inv_c + (Inv_d + Inv_e * CC_size_W) * log(CC_size_W) Capex_a_CCGT_USD = (InvC+connection_costs) * (Inv_IR) * (1 + Inv_IR) ** Inv_LT / ((1 + Inv_IR) ** Inv_LT - 1) Opex_fixed_CCGT_USD = InvC * Inv_OM Capex_CCGT_USD = InvC return Capex_a_CCGT_USD, Opex_fixed_CCGT_USD, Capex_CCGT_USD
92ea26dcfc66996dd564da9df73a117a57b308bd
5,548
def get_middle_slice_tiles(data, slice_direction): """Create a strip of intensity-normalized, square middle slices. """ slicer = {"ax": 0, "cor": 1, "sag": 2} all_data_slicer = [slice(None), slice(None), slice(None)] num_slices = data.shape[slicer[slice_direction]] slice_num = int(num_slices / 2) all_data_slicer[slicer[slice_direction]] = slice_num middle_slices = data[tuple(all_data_slicer)] num_slices = middle_slices.shape[2] slice_tiles = [square_and_normalize_slice(middle_slices[..., mid_slice]) for mid_slice in range(num_slices)] return slice_tiles
7ab60139c38fd79a866ed14f065a3333c532162a
5,549
def example_two(): """Serve example two page.""" return render_template('public/examples/two.j2')
759721686f0411d1ee5ad75f76ed5a0158067bae
5,551
def omegaTurn(r_min, w_row, rows): """Determines a path (set of points) representing a omega turn. The resulting path starts at 0,0 with a angle of 0 deg. (pose = 0,0,0). It will turn left or right depending on if rows is positive (right turn) or negative (left turn). Path should be translated and rotated to its proper position in the field by the calling function. Parameters ---------- r_min : float Turning radius of the vehicle. w_row : float The width of a row in the field. rows : int The number of rows between the current row and the target row e.g. Vehicle is turning from the mid-point of row i into the mid-point of row i+N Returns ---------- path : np.array [[x1, x2, x3,...] [y1, y2, y3,...]] The path that the vehicle is to follow. It is defined by a set of x,y points. distance : float The length of the path that accomplishes the requested pi-turn. """ # First check if a omega turn is possible d = rows * w_row # distance from start path to end path if rows * w_row > 2 * r_min: path = np.zeros((0, 0)) # Turn is not possible. Path is empty distance = np.nan # Distance cannot be calculated return (path, distance) if d > 0: # Turn to the right # Create the starting arc for leaving the path (60 points+endpoint) # Arc starts at pi/2 and rotates up/back toward 0, angle will be alpha alpha = np.arccos((r_min + d / 2) / (2 * r_min)) a = np.linspace(np.pi / 2, np.pi / 2 - alpha, 61) x_start = 0 + r_min * np.cos(a) y_start = r_min - r_min * np.sin(a) # Create the final arc for entering the path (60 points+endpoint) a = np.linspace(-1 * np.pi / 2 + alpha, -1 * np.pi/2, 61) x_end = 0 + r_min * np.cos(a) y_end = -1 * d - r_min - r_min * np.sin(a) # Create bulb section bulb_center_x = 2 * r_min * np.sqrt(1 - np.float_power((r_min + d / 2) / (2 * r_min), 2)) bulb_center_y = -1 * d / 2 a = np.linspace(-1 * np.pi/2 - alpha, np.pi / 2 + alpha, 61) x_bulb = bulb_center_x + r_min * np.cos(a) y_bulb = bulb_center_y - r_min * np.sin(a) else: # Create the starting arc for leaving the path (60 points+endpoint) d = d * -1 # Arc starts at pi/2 and rotates up/back toward 0, angle will be alpha alpha = np.arccos((r_min + d / 2) / (2 * r_min)) a = np.linspace(-1 * np.pi/2, -1 * np.pi / 2 + alpha, 61) x_start = 0 + r_min * np.cos(a) y_start = -1 * r_min - r_min * np.sin(a) # Create the final arc for entering the path (60 points+endpoint) a = np.linspace(np.pi / 2 - alpha, np.pi / 2, 61) x_end = 0 + r_min * np.cos(a) y_end = d + r_min - r_min * np.sin(a) # Create bulb section bulb_center_x = 2 * r_min * np.sqrt(1 - np.float_power((r_min + d / 2) / (2 * r_min), 2)) bulb_center_y = d / 2 a = np.linspace(np.pi / 2 + alpha, -1 * np.pi/2 - alpha, 61) x_bulb = bulb_center_x + r_min * np.cos(a) y_bulb = bulb_center_y - r_min * np.sin(a) # Connect segments. Each segment repeats the start and end. x = np.hstack((x_start, x_bulb[1:], x_end[1:])) y = np.hstack((y_start, y_bulb[1:], y_end[1:])) path = np.array((x, y)) distance = (4 * alpha + np.pi) * r_min return path, distance
39d3203d26199c585371e0208228c8b2839a8cd0
5,552
def sparse_ones(indices, dense_shape, dtype=tf.float32, name="sparse_ones"): """ Creates a new `SparseTensor` with the given indices having value 1 Args: indices (`Tensor`): a rank 2 tensor with the `(row,column)` indices for the resulting sparse tensor dense_shape (`Tensor` or `TensorShape`): the output dense shape dtype (`tf.DType`): the tensor type for the values name (`str`): sparse_ones op Returns: sp_tensor (`SparseTensor`): a new sparse tensor with values set to 1 """ with tf.name_scope(name=name): indices = as_tensor(indices, tf.int64) dense_shape = as_tensor(dense_shape, tf.int64) indices_shape = indices.shape values = tf.ones([indices_shape[0]], dtype) return tf.SparseTensor(indices, values, dense_shape)
1dad9ce8d1f1ab1950f744fbfa084884732ea8de
5,553
from typing import Any def ask(*args: Any, **kwargs: Any) -> Any: """Ask a modular question in the statusbar (blocking). Args: message: The message to display to the user. mode: A PromptMode. default: The default value to display. text: Additional text to show option: The option for always/never question answers. Only available with PromptMode.yesno. abort_on: A list of signals which abort the question if emitted. Return: The answer the user gave or None if the prompt was cancelled. """ question = _build_question(*args, **kwargs) # pylint: disable=missing-kwoa global_bridge.ask(question, blocking=True) answer = question.answer question.deleteLater() return answer
f5c65a4cdc83b5c22c4de97e41ed8a740f94ec3d
5,554
import re def get_sale(this_line, cattle, category): """Convert the input into a dictionary, with keys matching the CSV column headers in the scrape_util module. """ cattle = cattle.replace("MARKET","") cattle = cattle.replace(":","") cattle = cattle.strip().title() sale = {'cattle_cattle': cattle} if bool(re.search("TOWN", str(category))): for idx,title in enumerate(category): if title == "TOWN": sale['consignor_city'] = this_line[idx].strip().title() if title == "HEAD": head = this_line[idx] if '-' in head: head = head.split('-')[0] if '/' in head: head = head.split('/')[0] sale['cattle_head'] = head if title == "KIND": cattle = cattle + ' '+ this_line[idx].title() sale['cattle_cattle'] = cattle if title == "WEIGHT": sale['cattle_avg_weight'] = this_line[idx].replace(",","") if title == "PRICE": price = this_line[idx].replace("$","") price = price.replace(",","") if bool(re.search("Pairs", cattle)): sale['cattle_price'] = price else: sale['cattle_price_cwt'] = price else: sale={} sale = {k: v.strip() for k, v in sale.items() if v} return sale
f75e949558c9938a44f64ccce11bacce8d116e9f
5,555
def eig_min(a, eps=1e-7, kmax=1e3, log=False): """ :param a: matrix to find min eigenvalue of :param eps: desired precision :param kmax: max number of iterations allowed :param log: whether to log the iterations """ mu_1 = eig_max_abs(a, eps, kmax, log) return mu_1 - eig_max_abs(mu_1 * np.eye(a.shape[0]) - a, eps, kmax, log)
0c990207fe2b3a77aba636918bf78d9a138b718d
5,557
def relacao(lista): """Crie uma função que recebe uma lista de números reais e retorna uma outra lista de tamanho 3 em que (i) o primeiro elemento é a quantidade de números maiores que zero, (ii) o segundo elemento é a quantidade de números menores que zero e (iii) o último elemento é a quantidade de zeros da lista inicial. Args: lista (list): lista recebida para ser processada pela funcao Returns: list: lista com tamanho três na ordem (maiores, menores e iguais a zero) """ maior = menor = igual = 0 for i in lista: if i > 0: maior += 1 elif i < 0: menor += 1 else: igual += 1 return f'[{maior},{menor},{igual}]'
39e45d8221d5d5b7322ebec5aa3f761d9e2ef413
5,558
def _input_to_dictionary(input_): """Convert. Args: input_: GraphQL "data" dictionary structure from mutation Returns: result: Dict of inputs """ # 'column' is a dict of DB model 'non string' column names and their types column = { 'idx_user': DATA_INT, 'enabled': DATA_INT } result = utils.input_to_dictionary(input_, column=column) return result
263eb2449e8d272ef6c7e147ca7286f70e5cdbf9
5,559
def validate(request): """Validate an authentication request.""" email_token = request.GET.get('a') client_token = request.GET.get('b') user = authenticate(email_token=email_token, counter_token=client_token) if user: login(request, user) return redirect(request.GET.get('success', '/')) else: return HttpResponseForbidden()
5a6fbf9d67a048f973126248c3a5dfcf596e5370
5,560
def strip_chr(bt): """Strip 'chr' from chromosomes for BedTool object Parameters ---------- bt : pybedtools.BedTool BedTool to strip 'chr' from. Returns ------- out : pybedtools.BedTool New BedTool with 'chr' stripped from chromosome names. """ try: df = pd.read_table(bt.fn, header=None, dtype=str) # If the try fails, I assume that's because the file has a trackline. Note # that I don't preserve the trackline (I'm not sure how pybedtools keeps # track of it anyway). except pd.parser.CParserError: df = pd.read_table(bt.fn, header=None, skiprows=1, dtype=str) df[0] = df[0].apply(lambda x: x[3:]) s = '\n'.join(df.astype(str).apply(lambda x: '\t'.join(x), axis=1)) + '\n' out = pbt.BedTool(s, from_string=True) return out
1382a71799f6de081c3ff3092792012ebac25f01
5,561
def fit_slice(fitter, sliceid, lbda_range=[5000, 8000], nslices=5, **kwargs): """ """ fitvalues = fitter.fit_slice(lbda_ranges=lbda_range, metaslices=nslices, sliceid=sliceid, **kwargs) return fitvalues
2d2b4b91b0ba3b0dca908d56e8b5184e5ae36b9e
5,562
import functools def execute_sync(function, sync_type): """ Synchronize with the disassembler for safe database access. Modified from https://github.com/vrtadmin/FIRST-plugin-ida """ @functools.wraps(function) def wrapper(*args, **kwargs): output = [None] # # this inline function definition is technically what will execute # in the context of the main thread. we use this thunk to capture # any output the function may want to return to the user. # def thunk(): output[0] = function(*args, **kwargs) return 1 if is_mainthread(): thunk() else: idaapi.execute_sync(thunk, sync_type) # return the output of the synchronized execution return output[0] return wrapper
54034aa9853c1b04e7bfc2416a34019b87556518
5,563
def mk_living_arrangements(data_id, data): # measurement group 11 """ transforms a f-living-arrangements.json form into the triples used by insertMeasurementGroup to store each measurement that is in the form :param data_id: unique id from the json form :param data: data array from the json form :return: The list of (typeid,valType,value) triples that are used by insertMeasurementGroup to add the measurements """ return [(220, 2, data_id), (95, 6, lwh.mk_category(data['alone'], ['Alone', 'With someone'])), (96, 5, lwh.mk_category(data['arrange'], ['House', 'Apartment', 'Independent living unit', 'Other'])), (97, 2, data['othertext'])]
d4a327c3fc22facf3c4e21fe0b9fd3ce600beebc
5,564
def ids_in(table): """Returns the ids in the given dataframe, either as a list of ints or a single int.""" entity, id_colname = get_entity_and_id_colname(table) # Series.to_list() converts to a list of Python int rather than numpy.int64 # Conversion to the list type and the int type are both necessary for the shared functions ids = table[id_colname].to_list() ids = process_singleton_ids(ids, entity) return ids
5bb4a912c88bc7fc7e47cd14be5520c8cce32faf
5,565
def transformer_ae_base_tpu(): """Base config adjusted for TPU.""" hparams = transformer_ae_base() transformer.update_hparams_for_tpu(hparams) hparams.batch_size = 512 return hparams
a71bb88b10400c867e0ac8fd35c7c3e79a95a119
5,566
def attribute_volume(tree, altitudes, area=None): """ Volume of each node the given tree. The volume :math:`V(n)` of a node :math:`n` is defined recursively as: .. math:: V(n) = area(n) * | altitude(n) - altitude(parent(n)) | + \sum_{c \in children(n)} V(c) :param tree: input tree :param altitudes: node altitudes of the input tree :param area: area of the nodes of the input hierarchy (provided by :func:`~higra.attribute_area` on `tree`) :return: a 1d array """ if area is None: area = hg.attribute_area(tree) height = np.abs(altitudes[tree.parents()] - altitudes) height = height * area volume_leaves = np.zeros(tree.num_leaves(), dtype=np.float64) return hg.accumulate_and_add_sequential(tree, height, volume_leaves, hg.Accumulators.sum)
91c884bcdcd4fde616870258f5d3f1582c420868
5,567
def populate_canary(canary_id, protocol, domain, dns, filename, rdir, settings): """Create actual canary URI / URL.""" if protocol not in ['unc', 'http', 'https']: raise ValidationError('Unknown protocol specified') if dns: domain = f"{canary_id}.{domain}" else: domain = f"{settings.nginx_domain}.{domain}" if protocol == 'unc': if not rdir: canary = f"\\\\{domain}\\templates\\{filename}" else: canary = f"\\\\{domain}\\templates\\{rdir}\\{filename}" else: if not rdir: canary = f"{protocol}://{domain}/images/{filename}" else: canary = f"{protocol}://{domain}/images/{rdir}/{filename}" return canary
48a4a75cd65cd4d555a14d6c06363e46e0ced3f5
5,569
import pkg_resources def get_wastewater_location_data(): """Read in data of wastewater facility location data. :return: dataframe of wastewater location values """ data = pkg_resources.resource_filename('interflow', 'input_data/WW_Facility_Loc.csv') # return dataframe return pd.read_csv(data, dtype={'CWNS_NUMBER': str})
23f0c425eccdf173e8c8563c8d80e5e7b6a9ead1
5,570
def generate_accounts(seeds): """Create private keys and addresses for all seeds. """ return { seed: { 'privatekey': encode_hex(sha3(seed)), 'address': encode_hex(privatekey_to_address(sha3(seed))), } for seed in seeds }
b10b9616b6d4826262c9296bfe389f001e098939
5,571
def get_annotation_df( state: State, piece: Piece, root_type: PitchType, tonic_type: PitchType, ) -> pd.DataFrame: """ Get a df containing the labels of the given state. Parameters ---------- state : State The state containing harmony annotations. piece : Piece The piece which was used as input when creating the given state. root_type : PitchType The pitch type to use for chord root labels. tonic_type : PitchType The pitch type to use for key tonic annotations. Returns ------- annotation_df : pd.DataFrame[type] A DataFrame containing the harmony annotations from the given state. """ labels_list = [] chords, changes = state.get_chords() estimated_chord_labels = np.zeros(len(piece.get_inputs()), dtype=int) for chord, start, end in zip(chords, changes[:-1], changes[1:]): estimated_chord_labels[start:end] = chord keys, changes = state.get_keys() estimated_key_labels = np.zeros(len(piece.get_inputs()), dtype=int) for key, start, end in zip(keys, changes[:-1], changes[1:]): estimated_key_labels[start:end] = key chord_label_list = hu.get_chord_label_list(root_type, use_inversions=True) key_label_list = hu.get_key_label_list(tonic_type) prev_est_key_string = None prev_est_chord_string = None for duration, note, est_chord_label, est_key_label in zip( piece.get_duration_cache(), piece.get_inputs(), estimated_chord_labels, estimated_key_labels, ): if duration == 0: continue est_chord_string = chord_label_list[est_chord_label] est_key_string = key_label_list[est_key_label] # No change in labels if est_chord_string == prev_est_chord_string and est_key_string == prev_est_key_string: continue if est_key_string != prev_est_key_string: labels_list.append( { "label": est_key_string, "mc": note.onset[0], "mc_onset": note.mc_onset, "mn_onset": note.onset[1], } ) if est_chord_string != prev_est_chord_string: labels_list.append( { "label": est_chord_string, "mc": note.onset[0], "mc_onset": note.mc_onset, "mn_onset": note.onset[1], } ) prev_est_key_string = est_key_string prev_est_chord_string = est_chord_string return pd.DataFrame(labels_list)
19cf82dc77708099dc5c21695d30fd1c5d63ceb4
5,572
def prettify(elem): """Return a pretty-printed XML string for the Element.""" rough_string = ET.tostring(elem, "utf-8") reparsed = minidom.parseString(rough_string) return reparsed.toprettyxml(indent=" ")
4469a4e5683dd3196ae188bd09517406ca8276bc
5,573
def parse_new_multipart_upload(data): """ Parser for new multipart upload response. :param data: Response data for new multipart upload. :return: Returns a upload id. """ root = S3Element.fromstring('InitiateMultipartUploadResult', data) return root.get_child_text('UploadId')
02c83634a02ec94de698735b41424e9e53a2576f
5,574
def mech_name_for_species(mech1_csv_str, mech2_csv_str, ich): """ build dictionaries to get the name for a given InCHI string """ mech1_inchi_dct = mechparser.mechanism.species_inchi_name_dct( mech1_csv_str) mech2_inchi_dct = mechparser.mechanism.species_inchi_name_dct( mech2_csv_str) if ich in mech1_inchi_dct: mech1_name = mech1_inchi_dct[ich] else: mech1_name = 'Not in Mechanism' if ich in mech2_inchi_dct: mech2_name = mech2_inchi_dct[ich] else: mech2_name = 'Not in Mechanism' return mech1_name, mech2_name
fe173853dd7b9460a016b370c60fbc6f4eeaac93
5,575
def get_api(api, cors_handler, marshal=None, resp_model=None, parser=None, json_resp=True): """Returns default API decorator for GET request. :param api: Flask rest_plus API :param cors_handler: CORS handler :param marshal: The API marshaller, e.g. api.marshal_list_with :param resp_model: The API response model """ funcs = [ cors_handler, no_cache, log_header(), ] if json_resp: funcs.append(as_json) funcs.append( api.doc(responses={ 403: 'Not Authorized', 404: 'Resource does not exist', }), ) if parser: funcs.insert(-1, api.doc(parser=parser)) if marshal and resp_model: funcs.insert(-1, marshal(resp_model)) return utils.compose(*funcs)
d4774ec394a7365418b60cc0ef7665e702c0da28
5,576
import math def fetch_total_n_items(num_items, uniform_distribution=False): """Get num_items files from internet archive in our dirty categories list""" logger.info(f"Fetching info for {num_items} internetarchive items...") categories_weights = CATEGORIES_WEIGHTS if uniform_distribution: categories_weights = [1/len(DIRTY_CATEGORIES) for x in range(len(DIRTY_CATEGORIES))] how_many_of_each_cat = [math.ceil(w * num_items) for w in categories_weights] logger.info(" ".join([f"{cat}:{quant}" for cat, quant in zip(DIRTY_CATEGORIES, how_many_of_each_cat)])) total_items = [] for amount, category in zip(how_many_of_each_cat, DIRTY_CATEGORIES): query = make_category_query(category) try: total_items.extend(fetch_items_in_query(query, amount)) except Exception as e: logger.error(f"Failed to fetch info for \"{query}\" from internetarchive") return total_items
6b661c4c83c6d7766cb0a57a7f20eaa03ce44ed9
5,577