content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def linear_forward(A, W, b): """ Implement the linear part of a layer's forward propagation. Arguments: A -- activations from previous layer (or input data): (size of previous layer, number of examples) W -- weights matrix: numpy array of shape (size of current layer, size of previous layer) b -- bias vector, numpy array of shape (size of the current layer, 1) Returns: Z -- the input of the activation function, also called pre-activation parameter cache -- a python dictionary containing "A", "W" and "b" ; stored for computing the backward pass efficiently """ Z = np.dot(W, A) + b assert (Z.shape == (W.shape[0], A.shape[1])) cache = (A, W, b) return Z, cache
db9120f983b20ea67e9806c71b32463f47fe2839
26,568
from typing import Sequence from typing import Match import glob async def mp_force(p: 'Player', m: 'Match', msg: Sequence[str]) -> str: """Force a player into the current match by name.""" if len(msg) != 1: return 'Invalid syntax: !mp force <name>' if not (t := await glob.players.get(name=' '.join(msg))): return 'Could not find a user by that name.' await t.join_match(m, m.passwd) return 'Welcome.'
58f8d0c8ad0e623973b49792253344cba430b8d5
26,570
def draw_concat(Gs, Zs, reals, NoiseAmp, in_s, mode, opt): """get image at previous scale""" G_z = in_s if Gs: if mode == 'rand': count = 0 for G, Z_opt, real_curr, real_next, noise_amp in zip(Gs, Zs, reals, reals[1:], NoiseAmp): if count == 0: z = functions.generate_noise([1, Z_opt.shape[2], Z_opt.shape[3]]) z = Tensor(np.broadcast_to(z, (1, 3, z.shape[2], z.shape[3]))) else: z = Tensor(functions.generate_noise([opt.nc_z, Z_opt.shape[2], Z_opt.shape[3]])) G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]] z_in = noise_amp * z + G_z G_z = G(z_in, G_z) G_z = Tensor(imresize(G_z.asnumpy(), 1/opt.scale_factor, opt)) G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[3]] count += 1 if mode == 'rec': count = 0 for G, Z_opt, real_curr, real_next, noise_amp in zip(Gs, Zs, reals, reals[1:], NoiseAmp): G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]] z_in = noise_amp * Z_opt + G_z G_z = G(z_in, G_z) G_z = Tensor(imresize(G_z.asnumpy(), 1/opt.scale_factor, opt)) G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[3]] count += 1 return G_z
a52db339e6e657148dcf74ef815dd265929f545f
26,571
def read(filename): """ Read TOUGHREACT chemical input file. Parameters ---------- filename : str Input file name. """ with open_file(filename, "r") as f: out = read_buffer(f) return out
acf88a707048bed3f0c59aab3c1f328a5f0d8bf5
26,573
def visit_bottomup(f, d): """Visits and rewrites a nested-dict ``d`` from the bottom to the top, using the ``f`` predicate.""" if isinstance(d, dict): return f({k: visit_bottomup(f, v) for (k, v) in d.items()}) else: return f(d)
9fb4884f1280afe06a1819a44e3055c1173284b1
26,574
def processNonTerminal(nt): """ Finds the rule expansion for a nonterminal and returns its expansion. """ return processRHS(grammar.getRHS(nt))
c938ee877e8b66d9ed9aad6cf3708b5d2241279d
26,576
def get_attributes_as_highlighted_html(rid): """Get column descriptions for a given CSV file. The columns are described in EML attribute elements. """ return { k: dex.util.get_etree_as_highlighted_html(v) for k, v in get_attributes_as_etree(rid).items() }
0e19ccca6d09b6cdaac9361a02c5e38e84ae9190
26,577
import timeit import multiprocessing import itertools def bound_update(unary,X,kernel,bound_lambda,bound_iteration =20, batch = False, manual_parallel =False): """ Here in this code, Q refers to Z in our paper. """ start_time = timeit.default_timer() print("Inside Bound Update . . .") N,K = unary.shape; oldE = float('inf') # Initialize the unary and Normalize if manual_parallel == False: # print 'Parallel is FALSE' Q = normalize(-unary) for i in range(bound_iteration): printProgressBar(i + 1, bound_iteration,length=12) additive = -unary mul_kernel = kernel.dot(Q) Q = -bound_lambda*mul_kernel additive = additive -Q Q = normalize(additive) E = entropy_energy(Q,unary,kernel,bound_lambda,batch) # print('entropy_energy is ' +repr(E) + ' at iteration ',i) report_E = E if (i>1 and (abs(E-oldE)<= 1e-5*abs(oldE))): print('Converged') break else: oldE = E.copy(); oldQ = Q.copy(); report_E = E else: print('Manual Parallel is TRUE') Q = normalize(-unary) init(kernel_s_data = n2m(kernel.data)) init(kernel_s_indices = n2m(kernel.indices)) init(kernel_s_indptr = n2m(kernel.indptr)) init(kernel_s_shape = n2m(kernel.shape)) irange = range(N); krange = range(K); for i in range(bound_iteration): printProgressBar(i + 1, bound_iteration,length=15) additive = -unary init(Q_s = n2m(Q)) pool = multiprocessing.Pool(processes=5,initializer=init, initargs = list(SHARED_VARS.items())) pool.map(mpassing,itertools.product(irange,krange)) _,Q = get_shared_arrays('kernel_s_indptr','Q_s') Q = -bound_lambda*Q # assert (Q.all()==SHARED_array['Q_s'].all()) additive -= Q Q = normalize(additive) pool.close() pool.join() pool.terminate() E = entropy_energy(Q,unary,kernel,bound_lambda,batch) # print ('entropy_energy is ' +repr(E) + ' at iteration ',i) if (i>1 and (abs(E-oldE)<=1e-4*abs(oldE))): print('Converged') break else: oldE = E.copy(); oldQ = Q.copy(); report_E = E elapsed = timeit.default_timer() - start_time print('\n Elapsed Time in bound_update', elapsed) l = np.argmax(Q,axis=1) ind = np.argmax(Q,axis=0) C= X[ind,:] return l,C,ind,Q,report_E
c5b1be8fb881d2b2bb8596843747441c5e6ca99a
26,578
from typing import List def get_walkable_field_names(model: Model, field_types: List[Field] = None) -> List[str]: """Get a list with names of all fields that can be walked""" if field_types is None: field_types = [ManyToManyField, ManyToOneRel] fields_to_walk = [] fields = getattr(model, '_meta').get_fields() for field in fields: if isinstance(field, tuple(field_types)): fields_to_walk.append(field.name) return fields_to_walk
0ef53ce31072913861e6e78a22eb387b2e185ca8
26,579
def normalize(email): """ Returns a NormalizedEmail with the appropriate contents """ try: v = validate_email(email) return NormalizedEmail(v['email']) except EmailNotValidError as e: return NormalizedEmail(str(e), error=True)
4379966f68810b22c06b81597b706454f46a8247
26,580
def valid_flavor_list(): """ this includes at least 'BIAS', 'LIGHT' based on forDK.tar.gz samples capitalization inconsistent in forDK.tar.gz samples need to keep an eye out for additional valid flavors to add """ # not sure how to deal with reduced image flavors that I've invented: # REDUCED, INVVAR, BITMASK valid_flavors = ['BIAS', 'LIGHT', 'FLAT', 'MASK'] return valid_flavors
b12451ff4725f5fcea3592373ef6e53cbe04b23c
26,581
from pathlib import Path async def show_tile_with_body( request: Request, response: Response, body: TileRequest, path: Path = Depends(imagepath_parameter), extension: OutputExtension = Depends(extension_path_parameter), headers: ImageRequestHeaders = Depends(), config: Settings = Depends(get_settings) ): """ **`GET with body` - when a GET with URL encoded query parameters is not possible due to URL size limits, a POST with body content must be used.** Get a 8-bit normalized tile optimized for visualisation, with given channels, focal planes and timepoints. If multiple channels are given (slice or selection), they are merged. If multiple focal planes or timepoints are given (slice or selection), a reduction function must be provided. **By default**, all image channels are used and when the image is multidimensional, the tile is extracted from the median focal plane at first timepoint. """ return await _show_tile( request, response, path, **body.dict(), normalized=True, extension=extension, headers=headers, config=config )
6f6767f4831496796e2cf622a0362fab11dee844
26,583
def create_mesh(ob_name, coords, edges=[], faces=[]): """Create point cloud object based on given coordinates and name. Keyword arguments: ob_name -- new object name coords -- float triplets eg: [(-1.0, 1.0, 0.0), (-1.0, -1.0, 0.0)] """ # Create new mesh and a new object me = bpy.data.meshes.new(ob_name + "Mesh") ob = bpy.data.objects.new(ob_name, me) # Make a mesh from a list of vertices/edges/faces me.from_pydata(coords, edges, faces) # Display name and update the mesh ob.show_name = True me.update(calc_edges=True) return ob
b57baf08c2a2b07516e36e741b2aa55054883bf8
26,584
def get_time_from_datetime(data): """ Returns a timedelta64 series the form "hh:mm:ss" from data, being a datetime64 series """ time = data.apply(lambda x: np.timedelta64(x.hour, 'h') + np.timedelta64(x.minute, 'm') + np.timedelta64(x.second, 's')) return time
d7af50beadbb426d7b1d0671cabf16546707a1b6
26,585
def tokenize(text): """Converts text to tokens. Case-folds, removes stop words, lemmatises text. This is the same tokenization as is done on the training data for the model. """ # Set up dict for lemmatisation tag_map = defaultdict(lambda : "n") # by default, assume nouns tag_map['J'] = "a" # adjectives tag_map['V'] = "v" # verbs tag_map['R'] = "r" # adverbs # Get stopword list stops = stopwords.words("english") # Create lemmatizer object lemma = WordNetLemmatizer() # Case fold tokens = word_tokenize(text.lower()) # Tag tokens with parts of speech tokens = [(token[0], tag_map[token[1][0]]) for token in pos_tag(tokens)] # Lemmatise text tokens = [lemma.lemmatize(word=w[0], pos=w[1]) for w in tokens] # Remove stop & short words tokens = [w for w in tokens if w not in stops and len(w) > 2] # Return tokens return tokens
d7c6a124d13bd2d0360a22edc24a4d079bb9d93b
26,587
def do_command(client, command, indices, params=None): """ Do the command. """ if command == "alias": return alias( client, indices, alias=params['name'], remove=params['remove'] ) if command == "allocation": return allocation(client, indices, rule=params['rule']) if command == "bloom": return bloom(client, indices, delay=params['delay']) if command == "close": return close(client, indices) if command == "delete": return delete(client, indices) if command == "open": return opener(client, indices) if command == "optimize": return optimize( client, indices, max_num_segments=params['max_num_segments'], delay=params['delay'], request_timeout=params['request_timeout'] ) if command == "replicas": return replicas(client, indices, replicas=params['count']) if command == "snapshot": return create_snapshot( client, indices=indices, name=params['name'], prefix=params['prefix'], repository=params['repository'], ignore_unavailable=params['ignore_unavailable'], include_global_state=params['include_global_state'], partial=params['partial'], wait_for_completion=params['wait_for_completion'], request_timeout=params['request_timeout'], )
e33325a941ee4ed7e50c90cc8fb1067abe377d52
26,588
def filetostr(filename): """ filetostr """ try: with open(filename, "rb") as stream: return stream.read() except: return None
52683ada0008fb22e1c48301adf3fd7c48c21d06
26,589
def valid_for(days): """Return a text saying for how many days the certificate is valid for or years if it spans over years.""" delta = timedelta(days=days) value = '' if delta.days / 365 > 1: value += '%d years' % (delta.days / 365) else: value += '%d days' % delta.days return value
fe26213d17477602a8c9dc60ed3fe62297df5390
26,590
def vcr_config() -> dict: """VCR config that adds a custom before_record_request callback.""" nessie_test_config.cleanup = False return { "before_record_request": before_record_cb, }
8b61e1c825e1470571445e60da8880fa8dbaa14b
26,592
import mimetypes import binascii def main(): # pylint: disable=R0914,R0912,R0915 """The main method""" images = {} materials = {} meshes = {} objects = {} groups = {} texts = {} # Set up our FileDescription fileid = FileId(filename = bpy.path.abspath(bpy.data.filepath)) file_descr = FileDescription(file=fileid) file_descr.assets = [] def get_file_id(obj): """If the object is in a library, return a file for it, else return this fileid.""" if obj.library: return FileId(filename = relpath(obj.library.filepath)) return fileid """ Images: - Can be packed, so expose them with 'x-blender.image' mimetype. """ for image in bpy.data.images: image_fileid = get_file_id(image) image_mimetype = mimetypes.guess_type(image.filepath)[0] if image.source == 'FILE' and not image.packed_file: image_fileid = FileId(filename = relpath(image.filepath, start=image.library.filepath if image.library else None)) if image.packed_file: image_mimetype = 'application/x-blender.image' asset_descr = AssetDescription(asset = AssetId(subname = image.name, mimetype = image_mimetype, file = image_fileid)) if image.packed_file: asset_descr.metadata = metadata.MetaDataBlenderImage.extract({'image': image}) file_descr.assets.append(asset_descr) images[image.name] = asset_descr """ Materials: Have images as dependency. """ for material in bpy.data.materials: asset_descr = AssetDescription(asset = AssetId(subname = material.name, mimetype = 'application/x-blender.material', file = get_file_id(material))) asset_descr.dependencies = [] image_names = {} for slot in material.texture_slots: if slot and slot.texture and slot.texture.type == 'IMAGE' and slot.texture.image: image_names[slot.texture.image.name] = None for name in image_names: if name in images: dep = images[name].asset asset_descr.dependencies.append(dep) file_descr.assets.append(asset_descr) materials[material.name] = asset_descr """ Meshes: Have materials as dependency And the images assigned its faces. """ for mesh in bpy.data.meshes: mesh.update(calc_tessface=True) asset_descr = AssetDescription(asset = AssetId(subname = mesh.name, mimetype = 'application/x-blender.mesh', file = get_file_id(mesh))) asset_descr.dependencies = [] # Collect materials from the mesh for material in mesh.materials: if material: if material.name in materials: dep = materials[material.name].asset asset_descr.dependencies.append(dep) # Collect images from the faces image_names = {} for face in mesh.uv_textures: for data in face.data: if data.image: image_names[data.image.name] = None for name in image_names: if name in images: dep = images[name].asset asset_descr.dependencies.append(dep) asset_descr.metadata = metadata.MetaDataBlenderMesh.extract({'mesh': mesh}) file_descr.assets.append(asset_descr) meshes[mesh.name] = asset_descr """ Objects: Has a Mesh as a dependency. And materials assigned to the object """ for obj in bpy.data.objects: if obj.type == 'MESH': object_type = '' else: object_type = '-'+str(obj.type).lower() type = obj.type.lower() asset_descr = AssetDescription(asset = AssetId(subname = obj.name, mimetype = 'application/x-blender.object'+object_type, file = get_file_id(obj))) asset_descr.dependencies = [] # Add the mesh as dependency if obj.data and obj.data.name in meshes: dep = meshes[obj.data.name].asset asset_descr.dependencies.append(dep) # Now the materials for slot in obj.material_slots: if slot and slot.material and slot.link == 'OBJECT': if slot.material.name in materials: dep = materials[slot.material.name].asset asset_descr.dependencies.append(dep) asset_descr.metadata = metadata.MetaDataBlenderObject.extract({'object': obj}) file_descr.assets.append(asset_descr) objects[obj.name] = asset_descr """ Group: Has its objects as a dependencies. """ for group in bpy.data.groups: asset_descr = AssetDescription(asset = AssetId(subname = group.name, mimetype = 'application/x-blender.group', file = get_file_id(group))) asset_descr.dependencies = [] # Add the objects as dependencies for obj in group.objects: dep = objects[obj.name].asset asset_descr.dependencies.append(dep) file_descr.assets.append(asset_descr) groups[group.name] = asset_descr """ Texts: Can be packed, so expose them with 'x-blender.text' mimetype. """ for text in bpy.data.texts: text_fileid = get_file_id(text) text_mimetype = 'application/x-blender.text' if not text.is_in_memory: path = text.filepath if text.filepath else 'UNKNOWN' text_fileid = FileId(filename = relpath(path, start=text.library.filepath if text.library else None)) text_mimetype = mimetypes.guess_type(path)[0] if not text_mimetype: text_mimetype = 'text/plain' asset_descr = AssetDescription(asset = AssetId(subname = text.name, mimetype = text_mimetype, file = text_fileid)) file_descr.assets.append(asset_descr) texts[text.name] = asset_descr data = SerializeThriftMsg(file_descr) print('-**-') print(binascii.hexlify(data)) print('-**-')
66494f7904c956f67614cb159c346ad3b0e5283b
26,593
import random def get_best(get_fitness, optimalFitness, geneSet, display, show_ion, target, parent_candidates, seed=None, hull=None, simplex=None, verbose=0, hull_bounds=[0, 1], inner_search=True, parent_cap=25, mutation_cap=1000): """ the primary public function of the engine Parameters ---------- get_fitness : function the fitness function. Usually based on a molecular property. An example can be found in the salt_generator module optimalFitness : float 0-1 the user specifies how close the engine should get to the target (1 = exact) geneSet : object consists of atomtypes (by periodic number), rdkit molecular fragments and custom fragments (that are currently hard coded into the engine). These are the building blocks that the engine can use to mutate the molecular candidate via the _mutate() function display : function for printing results to the screen. Display is called for every accepted mutation show_ion : function for printing results to the screen. show_ion is called when a candidate has achieved the desired fitness score and is returned by the engine target : array, float, or int the desired property value to be achieved by the engine. If an array, a model containing multi-output targets must be supplied to the engine parent_candidates : array an array of smiles strings that the engine uses to choose a starting atomic configuration seed : int, optional optional randint seed for unittest consistency hull : pandas DataFrame, optional nxm pandas DataFrame to use convex hull search strategy. hull columns should be the same properties used in the genetic algorithm fitness test simplex : array, optional array to access boundary datapoints in the convex hull. This is used during target resampling defined by the convex hull/simplex verbose : int, optional, default 0 0 : most verbose. Best child, parent/target resampling, sanitization failure 1 : parent/target resampling, solution metadata, sanitization failure 2 : solution metdata, sanitization failure 3 : target resampling, csv-formatted solution metadata 4 : csv-formatted solution metadata hull_bounds : array, optional if hull and simplex are not none, hull_bounds describes the proximity convex_search should be to the simplex inner_search : bool, optional if hull and simplex are not none, inner_search specifies if convex_search should return values only within the convex hull Returns ---------- child : Chromosome object the accepted molecular configuration. See Chromosome class for details """ mutation_attempts = 0 attempts_since_last_adoption = 0 parents_sampled = 0 targets_sampled = 0 if seed: random.seed(seed) bestParent = _generate_parent(parent_candidates, get_fitness, target) if verbose == 0: display(bestParent, "starting structure", target) if bestParent.Fitness >= optimalFitness: return bestParent while True: with suppress_rdkit_sanity(): child, mutation = _mutate(bestParent, geneSet, get_fitness, target) mutation_attempts += 1 attempts_since_last_adoption += 1 if attempts_since_last_adoption > mutation_cap: if hull is not None and parents_sampled > parent_cap: target = convex_search(hull, simplex, hull_bounds, inner_search) targets_sampled += 1 if verbose in [0, 1, 3]: print("assigning new target: {}".format(target)) child = _generate_parent(parent_candidates, get_fitness, target) parents_sampled += 1 if verbose in [0, 1]: print("starting from new parent") elif bestParent.Fitness >= child.Fitness: continue if verbose == 0: display(child, mutation, target) attempts_since_last_adoption = 0 if child.Fitness >= optimalFitness: sim_score, sim_index = molecular_similarity(child, parent_candidates) molecular_relative = parent_candidates[sim_index] if verbose in [0, 1, 2]: show_ion(child.Genes, target, mutation_attempts, sim_score, molecular_relative) return child bestParent = child
5ebc9757b4518eabf2968bda9dc6cabd10e5cdb1
26,594
from datetime import datetime def generate_info_endpoint_reply(request): """ This just returns a hardcoded introspection string. """ available_api_versions = {} for ver in optimade_supported_versions: available_api_versions[optimade_supported_versions[ver]] = request['baseurl'] + ver response = { "links": { "base_url": request['baseurl'] }, "data": [ { "type": "info", "id": "/", "attributes": { "api_version": 'v'+request['version'], "available_api_versions": available_api_versions, "formats": [ "json" ], "entry_types_by_format": { "json": [ "structure", "calculation" ], }, "available_endpoints": [ "entry", "all", "info" ] } } ], "meta": { "query": { "representation": request['representation'] }, "api_version": request['version'], "time_stamp": datetime.datetime.now().isoformat(), "data_returned": 0, "more_data_available": False, } } return response
93e0b818c95742d0b7db679e0b3d3ade034cc0e8
26,595
def M(s,o): """ M - s - array of predictions (or simulated) o - array of observations (or targets) """ s,o = np.array(s),np.array(o) return np.nansum( np.abs( np.divide(s-o,o) ))/len(o)
5c53c8b02c88a37204f478d71fb74f2536848af0
26,596
def check_link_exist(destination: str): """Check if link already exists and return otherwise return None""" session: Session = db_session.create_session() result = session.execute( f"SELECT short_link FROM links WHERE destination = '{destination}'", ) # fix possible SQL injection for row in result: return row[0] else: return None
57f27da14c4ee551f6eb1e9424cd2b446658ae2f
26,597
from communities.models import Community def smart_404(request): """Returns a 404 message that tries to help the user.""" base_url = settings.HOST_URL not_found = { 'type': None, 'redirect_url': base_url } path_arguments = request.path.split('/')[1:] if path_arguments and path_arguments[0].isdigit(): try: c = Community.objects.get(pk=path_arguments[0]) if path_arguments[1] == 'issues': not_found['type'] = 'no_issue' not_found['redirect_url'] = base_url + '/' + str(c.pk) + '/issues/' if path_arguments[2] == 'procedures': not_found['type'] = 'no_procedure' not_found['redirect_url'] = base_url + '/' + str(c.pk) + '/issues/procedures/' else: not_found['type'] = 'no_community_route' not_found['redirect_url'] = base_url + '/' + str(c.pk) + '/' except Community.DoesNotExist: not_found['type'] = 'no_community' return {'not_found': not_found}
62252efb658054b88933109ce7b162cd43962186
26,598
def get_basemap(): """ Use cached copy of basemap from the script's parent folder otherwise download basemap from imgur """ url = "https://i.imgur.com/yIVogZH.png" image = "basemap.png" location = PATH + image print(location) try: basemap = cbook.get_sample_data(location) except BaseException: download_img(url, image) basemap = cbook.get_sample_data(location) return basemap
b1d8267da582206c60006268b5a576cabbe30d28
26,599
def pairs_to_annotations(annotation_pairs): """ Convert an array of annotations pairs to annotation array. :param annotation_pairs: list(AnnotationPair) - annotations :return: list(Annotation) """ annotations = [] for ap in annotation_pairs: if ap.ann1 is not None: annotations.append(ap.ann1) if ap.ann2 is not None: annotations.append(ap.ann2) return annotations
b0e08889f541b14d596616d08b366f59b7f8ddd3
26,600
def sanitize_param(value, valid_characters=valid_chars, character_map=mapped_chars, invalid_character='X'): """Clean incoming parameters (strings or lists)""" if isinstance(value, string_types): return sanitize_text(value, valid_characters=valid_characters, character_map=character_map, invalid_character=invalid_character) elif isinstance(value, list): return [sanitize_text(x, valid_characters=valid_characters, character_map=character_map, invalid_character=invalid_character) for x in value] else: raise Exception('Unknown parameter type (%s)' % (type(value)))
e3ed0d1a62bdbff0c2b3a204836d4d3afe467ced
26,601
def default_meta(inherit=True): """Initialize default meta for particular plugin. Default Meta is inherited by all children comparing to Meta which is unique per plugin. :param inherit: Whatever to copy parents default meta """ def decorator(plugin): plugin._default_meta_init(inherit) return plugin return decorator
174b37f389160c007e7a609a78b5071031970004
26,602
def get_default_database_name(): """ gets default database name. :rtype: str """ return get_component(DatabasePackage.COMPONENT_NAME).get_default_database_name()
23bf228a284b5880a5155beced606ef4d6f81d16
26,603
def npm_local_packages(): """ Get list of local packages :return: a tuple of dicts """ local_dependencies = {} local_dev_dependencies = {} package_json = get_package_json() for name, version in package_json.get("dependencies", {}).items(): match = LOCAL_PACKAGE.match(version) if match: [local_dependencies[name]] = match.groups() for name, version in package_json.get("devDependencies", {}).items(): match = LOCAL_PACKAGE.match(version) if match: [local_dev_dependencies[name]] = match.groups() return local_dependencies, local_dev_dependencies
cb9f52bb97f402b00e3dac0c6a69332f2199ccd5
26,604
def lagrangian_descriptor(u, v, p_value = 0.5): """ Vector field equation for Lagrangian descriptor. Parameters ---------- v : ndarray, shape(n,2) Vector field at given point. p_value : float, optional Exponent in Lagrangian descriptor definition. 0 is the acton-based LD, 0 < p_value < 1 is the Lp quasinorm, 1 <= p_value < 2 is the Lp norm LD, 2 is the arclength LD. The default is 0.5. Returns ------- LD : ndarray, shape(n,1) Vector field for Lagrangian descriptor dimension. """ if p_value == 0: LD = np.abs(u[:,1]*v[:,0]) elif p_value>0: LD = np.sum(np.abs(v)**p_value, axis=1) else: LD = np.zeros(len(u[:,0])) return LD
ddd6bb7fb8538b6d44f2507e7b065cbe70338c39
26,605
def xymatch(x1, y1, x2, y2, tol=None, nnearest=1): """Fast cross-matching of xy coordinates: from https://gist.github.com/eteq/4599814""" x1 = np.array(x1, copy=False) y1 = np.array(y1, copy=False) x2 = np.array(x2, copy=False) y2 = np.array(y2, copy=False) if x1.shape != y1.shape: raise ValueError('x1 and y1 do not match!') if x2.shape != y2.shape: raise ValueError('x2 and y2 do not match!') # this is equivalent to, but faster than just doing np.array([x1, y1]) coords1 = np.empty((x1.size, 2)) coords1[:, 0] = x1 coords1[:, 1] = y1 # this is equivalent to, but faster than just doing np.array([x2, y2]) coords2 = np.empty((x2.size, 2)) coords2[:, 0] = x2 coords2[:, 1] = y2 kdt = KDT(coords2) if nnearest == 1: ds,idxs2 = kdt.query(coords1) elif nnearest > 1: retval = kdt.query(coords1, nnearest) ds = retval[0] idxs2 = retval[1][:, -1] else: raise ValueError('invalid nnearest ' + str(nnearest)) idxs1 = np.arange(x1.size) if tol is not None: msk = ds < tol idxs1 = idxs1[msk] idxs2 = idxs2[msk] ds = ds[msk] return idxs1, idxs2, ds
0c81add24308fdbe90144776fb4b72e9801ddd11
26,606
def get_infection_probas_mean_field(probas, transmissions): """ - probas[i,s] = P_s^i(t) - transmissions = csr sparse matrix of i, j, lambda_ij(t) - infection_probas[i] = sum_j lambda_ij P_I^j(t) """ infection_probas = transmissions.dot(probas[:, 1]) return infection_probas
70d5585b405bdff54f65bced166dead6ae45d26b
26,607
import time import ast def eval_task(algo, specific_testsets, measures, head_items, crossfold_index, save_path=None, load_path=None, uid_plus_iid_to_row=None): """ Evaluate on specific testsets. This function exists to make testset evaluation easier to parallelize. """ ret = [] if load_path and uid_plus_iid_to_row is None: tic = time.time() load_from = '{}_seed0_fold{}_all_predictions.txt'.format(load_path, crossfold_index) print('load_from', load_from) with open(load_from, 'r') as file_handler: content = ['[' + x.strip('\n') + ']' for x in file_handler.readlines()] assert(content[0] == '[uid,iid,r_ui,est,details,crossfold_index]') all_predictions = [Prediction(*ast.literal_eval(line)[:-1]) for line in content[1:]] uid_plus_iid_to_row = {} for prediction in all_predictions: uid_plus_iid = str(prediction[0]) + '_' + str(prediction[1]) uid_plus_iid_to_row[uid_plus_iid] = prediction print('Loading predictions within eval_task took {}'.format(time.time() - tic)) for key, specific_testset in specific_testsets.items(): start_specific_testset = time.time() if uid_plus_iid_to_row: # if this dict is populated we should use it. if it is empty we can't use it, need to run algo.test predictions = [] tic = time.time() if isinstance(specific_testset, np.ndarray): iterate_on = specific_testset.tolist() else: iterate_on = specific_testset for prediction in iterate_on: uid_plus_iid = str(prediction[0]) + '_' + str(prediction[1]) predictions.append(uid_plus_iid_to_row[uid_plus_iid]) #print('Took {} seconds to load {} predictions from uid_plus_iid_to_row'.format(time.time() - tic, len(predictions))) else: predictions = algo.test(specific_testset) if save_path and load_path is None and uid_plus_iid_to_row is None: # if you just loaded the predictions, don't save them again, waste of time... with open('{}_seed0_fold{}_{}_predictions.txt'.format(save_path, crossfold_index, key), 'w') as file_handler: file_handler.write('uid,iid,r_ui,est,details,crossfold_index\n') for prediction in predictions: file_handler.write(','.join([str(x) for x in prediction] + [str(crossfold_index)]) + '\n') if not predictions: ret.append([key, {}, 0, 0]) continue test_measures = {} for m in measures: tic = time.time() eval_func = getattr(accuracy, m.lower()) result = eval_func(predictions, verbose=0) # NMV 10/26: rewriting this whole chunk b/c we refactored accuracy.py. #if 'ndcg' in m: if m == 'list_metrics': tail_result = eval_func(predictions, verbose=0, head_items=head_items) for metric_name in result.keys(): mean_val, frac_of_users = result[metric_name] tail_mean_val, tail_frac = tail_result[metric_name] test_measures[metric_name] = mean_val test_measures[metric_name + '_frac'] = frac_of_users test_measures['tail' + metric_name] = tail_mean_val test_measures['tail' + metric_name + '_frac'] = tail_frac # sub_measures = m.split('_') # for i_sm, sub_measure in enumerate(sub_measures): # mean_val, frac_of_users = result[i_sm] # tail_mean_val, _ = tail_result[i_sm] # test_measures[sub_measure] = mean_val # test_measures[sub_measure + '_frac'] = frac_of_users # test_measures['tail' + sub_measure] = tail_mean_val else: test_measures[m] = result test_time = time.time() - start_specific_testset ret.append([key, test_measures, test_time, len(specific_testset)]) return ret
4f5171ea4473505237b2c353e164ba4b78d07357
26,608
def newton(RJ, x0, verbose = False, rtol = 1.0e-6, atol = 1.0e-10, miter = 50, linesearch = 'none', bt_tau = 0.5, bt_c = 1.0e-4): """ Manually-code newton-raphson so that I can output convergence info, if requested. Parameters: RJ function return the residual + jacobian x0 initial guess Optional: verbose verbose output rtol relative tolerance atol absolute tolerance miter maximum iterations linesearch available options: "none" and "backtracking" bt_tau tau factor for backtracking line search bt_c c factor for backtracking line search """ R, J = RJ(x0) nR = la.norm(R) nR0 = nR x = np.copy(x0) i = 0 if verbose: print("Iter.\tnR\t\tnR/nR0\t\tcond\t\tlinesearch") print("%i\t%e\t%e\t" % (i, nR, nR / nR0)) while (nR > rtol * nR0) and (nR > atol): a = la.solve(J, R) if linesearch == 'none': f = 1.0 elif linesearch == 'backtracking': f = backtrack(RJ, R, J, x, -a, tau = bt_tau, c = bt_c, verbose = verbose) else: raise ValueError("Unknown linesearch type.") x -= (a * f) R, J = RJ(x) nR = la.norm(R) i += 1 if verbose: print("%i\t%e\t%e\t%e\t%f" % (i, nR, nR / nR0,la.cond(J), f)) if i > miter: if verbose: print("") raise MaximumIterations() if verbose: print("") return x
b6baa3288c6f417ca4ec7284237ea35d4f2442dd
26,609
from typing import List from pathlib import Path def gen_oltp_trace( tpcc_weight: str, tpcc_rates: List[int], pattern_iter: int) -> bool: """ Generates the trace by running OLTP TPCC benchmark on the built database :param tpcc_weight: Weight for the TPCC workload :param tpcc_rates: Arrival rates for each phase in a pattern :param pattern_iter: Number of patterns :return: True when data generation succeeds """ # Remove the old query_trace/query_text.csv Path(DEFAULT_QUERY_TRACE_FILE).unlink(missing_ok=True) # Server is running when this returns oltp_server = TestOLTPBench(DEFAULT_OLTP_SERVER_ARGS) db_server = oltp_server.db_instance db_server.run_db() # Download the OLTP repo and build it oltp_server.run_pre_suite() # Load the workload pattern - based on the tpcc.json in # testing/oltpbench/config test_case_config = DEFAULT_OLTP_TEST_CASE test_case_config["weights"] = tpcc_weight test_case = TestCaseOLTPBench(test_case_config) # Prep the test case build the result dir test_case.run_pre_test() rates = tpcc_rates * pattern_iter config_forecast_data(test_case.xml_config, rates) # Turn on query trace metrics tracing db_server.execute("SET query_trace_metrics_enable='true'", expect_result=False) # Run the actual test ret_val, _, stderr = run_command(test_case.test_command, test_case.test_error_msg, cwd=test_case.test_command_cwd) if ret_val != ErrorCode.SUCCESS: LOG.error(stderr) return False # Clean up, disconnect the DB db_server.stop_db() db_server.delete_wal() if not Path(DEFAULT_QUERY_TRACE_FILE).exists(): LOG.error( f"Missing {DEFAULT_QUERY_TRACE_FILE} at CWD after running OLTP TPCC") return False return True
8ac09fd8f85d7c83944759829775c3dbb1b0741e
26,610
def plot_hmesh(mesh, box=None, proj='pc', figsize=[9,4.5], title=None, do_save=None, do_lsmask='fesom', color_lsmask=[0.6, 0.6, 0.6], linecolor='k', linewidth=0.2, linealpha=0.75, pos_extend=None,): """ ---> plot FESOM2 horizontal mesh: ___INPUT:___________________________________________________________________ mesh : fesom2 mesh object, with all mesh information box : None, list (default: None) regional limitation of plot [lonmin, lonmax, latmin, latmax] proj : str, (default: 'pc') which projection should be used, 'pc'= ccrs.PlateCarree(), 'merc'=ccrs.Mercator(), 'nps'= ccrs.NorthPolarStereo(), 'sps'=ccrs.SouthPolarStereo(), 'rob'=ccrs.Robinson() fig_size : list (default:[9,4.5] ), list with figure width and figure height [w, h] title : None, str,(default:None) give every plot panel a title string IF: None ... no title is plotted 'descript' ... use data 'descript' attribute for title string 'string' ... use given string as title do_save : None, str (default:None) if None figure will by not saved, if string figure will be saved, strings must give directory and filename where to save. do_lsmask : None, str (default: 'fesom') plot land-sea mask. If: None ... no land sea mask is used, 'fesom' ... overlay fesom shapefile land-sea mask using color color_lsmask 'stock' ... use cartopy stock image as land sea mask 'bluemarble' ... use bluemarble image as land sea mask 'etopo' ... use etopo image as land sea mask do_bottom : bool, (default:True) highlight nan bottom topography with gray color defined by color_bot color_lsmask: list, (default: [0.6, 0.6, 0.6]) RGB facecolor value for fesom shapefile land-sea mask patch linecolor : str, list, (default:'k') either color string or RGB list linewidth : float, (default:0.2) linewidth of mesh linealpha : float, (default:0.75) alpha value of mesh ___RETURNS:_________________________________________________________________ fig : returns figure handle ax : returns list with axes handle ____________________________________________________________________________ """ fontsize = 12 str_rescale = None n_rc = [1,1] pos_fac = 1.0 pos_gap = [0.02, 0.02] #___________________________________________________________________________ # make matrix with row colum index to know where to put labels rowlist = np.zeros((n_rc[0],n_rc[1])) collist = np.zeros((n_rc[0],n_rc[1])) for ii in range(0,n_rc[0]): rowlist[ii,:]=ii for ii in range(0,n_rc[1]): collist[:,ii]=ii rowlist = rowlist.flatten() collist = collist.flatten() #___________________________________________________________________________ # create box if not exist if box is None: box = [ -180+mesh.focus, 180+mesh.focus, -90, 90 ] #___________________________________________________________________________ # Create projection if proj=='pc': which_proj=ccrs.PlateCarree() which_transf = None elif proj=='merc': which_proj=ccrs.Mercator() which_transf = ccrs.PlateCarree() elif proj=='nps': which_proj=ccrs.NorthPolarStereo() which_transf = ccrs.PlateCarree() elif proj=='sps': which_proj=ccrs.SouthPolarStereo() which_transf = ccrs.PlateCarree() elif proj=='rob': which_proj=ccrs.Robinson() which_transf = ccrs.PlateCarree() #___________________________________________________________________________ # create lon, lat ticks xticks,yticks = do_ticksteps(mesh, box) #___________________________________________________________________________ # create figure and axes fig, ax = plt.subplots( n_rc[0],n_rc[1], figsize=figsize, subplot_kw =dict(projection=which_proj), gridspec_kw=dict(left=0.06, bottom=0.05, right=0.95, top=0.95, wspace=0.05, hspace=0.05,), constrained_layout=False, ) #___________________________________________________________________________ # flatt axes if there are more than 1 if isinstance(ax, np.ndarray): ax = ax.flatten() else: ax = [ax] nax = len(ax) #___________________________________________________________________________ # create mesh triangulation tri = Triangulation(np.hstack((mesh.n_x,mesh.n_xa)), np.hstack((mesh.n_y,mesh.n_ya)), np.vstack((mesh.e_i[mesh.e_pbnd_0,:],mesh.e_ia))) # Limit points to projection box if proj=='nps' or proj=='sps' or 'pc': e_idxbox = grid_cutbox_e(tri.x, tri.y, tri.triangles, box, which='hard') else: points = which_transf.transform_points(which_proj, tri.x[tri.triangles].sum(axis=1)/3, tri.y[tri.triangles].sum(axis=1)/3) xpts, ypts = points[:,0].flatten().tolist(), points[:,1].flatten().tolist() crs_pts = list(zip(xpts,ypts)) fig_pts = ax[0].transData.transform(crs_pts) ax_pts = ax[0].transAxes.inverted().transform(fig_pts) x, y = ax_pts[:,0], ax_pts[:,1] e_idxbox = (x>=-0.05) & (x<=1.05) & (y>=-0.05) & (y<=1.05) tri.triangles = tri.triangles[e_idxbox,:] #___________________________________________________________________________ # loop over axes for ii in range(0,nax): #_______________________________________________________________________ # set axes extent ax[ii].set_extent(box, crs=ccrs.PlateCarree()) #_______________________________________________________________________ # add grid mesh on top ax[ii].triplot(tri.x, tri.y, tri.triangles, color=linecolor, linewidth=linewidth, alpha=linealpha) #_______________________________________________________________________ # add mesh land-sea mask ax[ii] = do_plotlsmask(ax[ii],mesh, do_lsmask, box, which_proj, color_lsmask=color_lsmask, edgecolor=linecolor, linewidth=0.5) #_______________________________________________________________________ # add gridlines ax[ii] = do_add_gridlines(ax[ii], rowlist[ii], collist[ii], xticks, yticks, proj, which_proj) #_______________________________________________________________________ # set title and axes labels if title is not None: # is title string: if isinstance(title,str) : ax[ii].set_title(title, fontsize=fontsize+2) # is title list of string elif isinstance(title,list): ax[ii].set_title(title[ii], fontsize=fontsize+2) nax_fin = ii+1 #___________________________________________________________________________ # delete axes that are not needed for jj in range(nax_fin, nax): fig.delaxes(ax[jj]) #___________________________________________________________________________ # repositioning of axes and colorbar ax, cbar = do_reposition_ax_cbar(ax, None, rowlist, collist, pos_fac, pos_gap, title=title, proj=proj, extend=pos_extend) #___________________________________________________________________________ # save figure based on do_save contains either None or pathname do_savefigure(do_save, fig) #___________________________________________________________________________ return(fig, ax)
9a921224440f359c33686822411b928ebd939550
26,611
def _get_feature_proportion(features_percentage: int, indices_number: int) -> int: """ Computes a number of features based on the given percentage. """ assert (isinstance(features_percentage, int) and 0 <= features_percentage <= 100 and isinstance(indices_number, int)) feature_proportion = int((features_percentage / 100) * indices_number) if feature_proportion: features_number = feature_proportion else: logger.warning( 'Since the number of features to be extracted was not given ' '%d%% of features will be used. This percentage translates to ' '0 features, therefore the number of features to be used is ' 'overwritten to 1. To prevent this from happening, you should ' 'either explicitly set the number of features via the ' 'features_number parameter or increase the value of the ' 'features_percentage parameter.', features_percentage) features_number = feature_proportion + 1 return features_number
78a5d5515b479b20fcfbbf25cdd2339f0bc8b99f
26,612
def orthoProjectionMatrix(left, right, bottom, top, nearClip=0.01, farClip=100., out=None, dtype=None): """Compute an orthographic projection matrix with provided frustum parameters. Parameters ---------- left : float Left clipping plane coordinate. right : float Right clipping plane coordinate. bottom : float Bottom clipping plane coordinate. top : float Top clipping plane coordinate. nearClip : float Near clipping plane distance from viewer. farClip : float Far clipping plane distance from viewer. out : ndarray, optional Optional output array. Must be same `shape` and `dtype` as the expected output if `out` was not specified. dtype : dtype or str, optional Data type for arrays, can either be 'float32' or 'float64'. If `None` is specified, the data type is inferred by `out`. If `out` is not provided, the default is 'float64'. Returns ------- ndarray 4x4 projection matrix See Also -------- perspectiveProjectionMatrix : Compute a perspective projection matrix. Notes ----- * The returned matrix is row-major. Values are floats with 32-bits of precision stored as a contiguous (C-order) array. """ if out is None: dtype = np.float64 if dtype is None else np.dtype(dtype).type else: dtype = np.dtype(out.dtype).type projMat = np.zeros((4, 4,), dtype=dtype) if out is None else out if out is not None: projMat.fill(0.0) u = dtype(2.0) projMat[0, 0] = u / (right - left) projMat[1, 1] = u / (top - bottom) projMat[2, 2] = -u / (farClip - nearClip) projMat[0, 3] = -((right + left) / (right - left)) projMat[1, 3] = -((top + bottom) / (top - bottom)) projMat[2, 3] = -((farClip + nearClip) / (farClip - nearClip)) projMat[3, 3] = 1.0 return projMat
f1b80b8eeda514ff02142ffe6dcdd761cd789e73
26,613
def get_nsnames(zone): """Get list of nameservers names to query""" if Prefs.NO_NSSET: if not Prefs.ADDITIONAL: print("ERROR: -n requires specifying -a") usage() return Prefs.ADDITIONAL answers = dns.resolver.resolve(zone, 'NS', 'IN') return Prefs.ADDITIONAL + sorted([str(x.target) for x in answers.rrset])
1c5da972922afc0724144545a57bc1d01012dd11
26,614
def pbmcs_10x_cite_seq( save_path: str = "data/", protein_join: str = "inner", run_setup_anndata: bool = True, ) -> anndata.AnnData: """ Filtered PBMCs from 10x Genomics profiled with RNA and protein. Datasets were filtered for doublets and other outliers as in https://github.com/YosefLab/totalVI_reproducibility/blob/master/data/data_filtering_scripts/pbmc_10k/pbmc_10k.py Parameters ---------- save_path Location to use when saving/loading the data. protein_join Whether to take an inner join or outer join of proteins run_setup_anndata If true, runs setup_anndata() on dataset before returning Returns ------- AnnData with batch info (``.obs['batch']``), and protein expression (``.obsm["protein_expression"]``) Missing protein values are zero, when ``protein_join == "outer`` and are identified during ``AnnData`` setup. Examples -------- >>> import scvi >>> adata = scvi.data.pbmcs_10x_cite_seq() """ return _load_pbmcs_10x_cite_seq( save_path=save_path, protein_join=protein_join, run_setup_anndata=run_setup_anndata, )
eccb235496b6c466ffd2e234ab6b20487c7cf233
26,615
def binom(n, k): """Binomial coefficients for :math:`n choose k` :param n,k: non-negative integers :complexity: O(k) """ prod = 1 for i in range(k): prod = (prod * (n - i)) // (i + 1) return prod
73e06e4c312f6634d9a97914f330ade845a9ce00
26,616
def get_not_found_swagger_schema(): """ """ class NotFoundResponseModel(Schema): """ """ type = "object" properties = { "message": { "type": "string", } } return NotFoundResponseModel
c1ac8c85224c2e885ade68593a1d250af09a465b
26,618
def find_project(testrun_url): """ Find a project name from this Polarion testrun URL. :param testrun_url: Polarion test run URL :returns: project name eg "CEPH" or "ContainerNativeStorage" """ url_suffix = testrun_url[59:] index = url_suffix.index('/') return url_suffix[:index]
a19019846fa084398a4967cb99417e7aebc90499
26,619
def c2ip(c2, uname): """ return complete ip address for c2 with substituted username """ return c2['ip_address'].replace('USER', uname)
c6f79b2330e78c8ebc85a3fb99ce1c5be407f158
26,620
def beacon(config): """ Watch the configured directories Example Config .. code-block:: yaml beacons: watchdog: - directories: /path/to/dir: mask: - create - modify - delete - move The mask list can contain the following events (the default mask is create, modify delete, and move): * create - File or directory is created in watched directory * modify - The watched directory is modified * delete - File or directory is deleted from watched directory * move - File or directory is moved or renamed in the watched directory """ _config = {} list(map(_config.update, config)) queue = _get_queue(_config) ret = [] while queue: ret.append(to_salt_event(queue.popleft())) return ret
5981f150276c2f9b9512c33864de02b0ce37094e
26,621
import json def scenario(request): """ Retrieve the parameters and nodes for a scenario Parameters: model_uuid (uuid): required scenario_id (int): required Returns: HttpResponse Example: GET: /component/scenario/ """ model_uuid = request.GET['model_uuid'] scenario_id = request.GET['scenario_id'] request.session['scenario_id'] = scenario_id model = Model.by_uuid(model_uuid) can_edit = model.handle_view_access(request.user) # Scenario Parameters colors = model.color_lookup parameters = Scenario_Param.objects.filter( model_id=model.id, scenario_id=scenario_id, run_parameter__user_visibility=True) # All Loc Techs loc_techs = [] lts = model.loc_techs lts = lts.values('id', 'technology_id', 'technology__pretty_name', 'technology__pretty_tag', 'technology__abstract_tech__icon', 'location_1__pretty_name', 'location_2__pretty_name') for lt in lts: tech_id = lt["technology_id"] color = colors[tech_id] if tech_id in colors.keys() else "#000" loc_techs.append({ "id": lt['id'], "technology_id": lt['technology_id'], "tag": lt["technology__pretty_tag"], "technology": lt["technology__pretty_name"], "location_1": lt["location_1__pretty_name"], "location_2": lt["location_2__pretty_name"], "color": color, "icon": lt["technology__abstract_tech__icon"]}) # Active Loc Techs active_lts = Scenario_Loc_Tech.objects.filter(scenario_id=scenario_id) active_lt_ids = list(active_lts.values_list("loc_tech_id", flat=True)) # Filters Data unique_techs = [v['technology'] for v in loc_techs] unique_tags = [v['tag'] for v in loc_techs] locations = [(v['location_1'], v['location_2']) for v in loc_techs] unique_locations = [item for sublist in locations for item in sublist] context = { "model": model, "parameters": parameters, "can_edit": can_edit} scenario_settings = list(render(request, 'scenario_settings.html', context))[0] context = { "model": model, "colors": colors, "carrier_ins": model.carrier_lookup(True), "carrier_outs": model.carrier_lookup(False), "active_lt_ids": active_lt_ids, "loc_techs": loc_techs, "scenario_id": scenario_id, "unique_techs": sorted(filter(None, set(unique_techs))), "unique_tags": sorted(filter(None, set(unique_tags))), "unique_locations": sorted(filter(None, set(unique_locations))), "can_edit": can_edit} scenario_configuration = list(render(request, 'scenario_configuration.html', context))[0] payload = { 'model_id': model.id, 'scenario_id': scenario_id, 'loc_techs': loc_techs, 'active_lt_ids': active_lt_ids, 'scenario_settings': scenario_settings.decode('utf-8'), 'scenario_configuration': scenario_configuration.decode('utf-8')} return HttpResponse(json.dumps(payload, indent=4), content_type="application/json")
795bad706c97c20b566d9fcc999b7e01b0b79194
26,622
def already_voted(replied: str, user_id: str, db: dataset.Database) -> bool: """Search in the database for an existing vote of the user on the replied message Args: replied: id of the message which the vote is a reply user_id: id of the user who's voting Returns: The return value. True if the user already voted on the message, False otherwise. """ table = db['messages'] row = table.find_one(replied=replied, user_id=user_id) return row is not None
89ec426df156776ab4a494f0dab0079881b45db2
26,623
def create_bi_sequence_embedding(inputs, seq_lengths, repr_dim, vocab_size, emb_name, rnn_scope, reuse_scope=False): """ Bidirectional encoding :param inputs: tensor [d1, ... ,dn] of int32 symbols :param seq_lengths: [s1, ..., sn] lengths of instances in the batch :param repr_dim: dimension of embeddings :param vocab_size: number of symbols :return: return outputs_fw, last_state_fw, outputs_bw, last_state_bw """ # use a shared embedding matrix for now, test if this outperforms separate matrices later embedding_matrix = tf.Variable(tf.random_uniform([vocab_size, repr_dim], -0.1, 0.1, dtype=_FLOAT_TYPE), name=emb_name, trainable=True, dtype=_FLOAT_TYPE) # [batch_size, max_seq_length, input_size] embedded_inputs = tf.nn.embedding_lookup(embedding_matrix, inputs) # dummy test to see if the embedding lookup is working # Reduce along dimension 1 (`n_input`) to get a single vector (row) per input example # embedding_aggregated = tf.reduce_sum(embedded_inputs, [1]) ### first FW LSTM ### with tf.variable_scope(rnn_scope + "_FW") as scope: if reuse_scope == True: scope.reuse_variables() cell_fw = tf.nn.rnn_cell.LSTMCell(repr_dim, state_is_tuple=True) #cell_fw = tf.contrib.rnn.AttentionCellWrapper(cell_fw, 3, state_is_tuple=True) # not working cell_fw = tf.nn.rnn_cell.DropoutWrapper(cell=cell_fw, output_keep_prob=0.9) # outputs shape: [batch_size, max_time, cell.output_size] # last_states shape: [batch_size, cell.state_size] outputs_fw, last_state_fw = tf.nn.dynamic_rnn( cell=cell_fw, dtype=_FLOAT_TYPE, sequence_length=seq_lengths, inputs=embedded_inputs) embedded_inputs_rev = tf.reverse(embedded_inputs, [False, True, False]) # reverse the sequence ### first BW LSTM ### with tf.variable_scope(rnn_scope + "_BW") as scope: if reuse_scope == True: scope.reuse_variables() cell_bw = tf.nn.rnn_cell.LSTMCell(repr_dim, state_is_tuple=True) cell_bw = tf.nn.rnn_cell.DropoutWrapper(cell=cell_bw, output_keep_prob=0.9) # outputs shape: [batch_size, max_time, cell.output_size] # last_states shape: [batch_size, cell.state_size] outputs_bw, last_state_bw = tf.nn.dynamic_rnn( cell=cell_bw, dtype=_FLOAT_TYPE, sequence_length=seq_lengths, inputs=embedded_inputs_rev) return outputs_fw, last_state_fw, outputs_bw, last_state_bw, embedding_matrix
1f160100745801ac4baf3d82d8ee7b76900c0547
26,624
import mmap async def input_checker(user_guess: str) -> bool: """Check if the user's input is actually a word. Method for checking if input is in text file: https://stackoverflow.com/a/4944929""" if len(user_guess) != 5: valid = False else: with open(wordfile_path, encoding='utf-8', errors='ignore') as f, \ mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as s: if s.find(str.encode(user_guess)) != -1: valid = True else: valid = False return valid
60ffad6529b1a7b6d68309cc9804ff6e39e2e539
26,625
def computeBasisFunctionsReferenceElement(edge_orientation, face_orientation, Nord, points): """Compute the basis function for the reference element. :param ndarray edges_orientation: orientation for edges :param ndarray faces_orientation: orientation for faces :param int Nord: polynomial order of nedelec basis functions :param ndarray points: spatial points at which basis functions will be computed :return: basis functions on reference element :rtype: ndarray """ # Get number of points if points.ndim == 1: num_points = 1 points = points.reshape((1,3)) else: num_points = points.shape[0] # Compute number of dofs for element num_dof_in_element = np.int(Nord*(Nord+2)*(Nord+3)/2) # Allocate basis = np.zeros((3, num_dof_in_element, num_points), dtype=np.float) for i in np.arange(num_points): # Get gauss point coordinates X = points[i,:] # Polynomial order (6 edges, 4 faces, 1 volume) Nord_vector = np.ones(11, dtype=np.int)*Nord # Edge orientation (6 edges) NoriE = edge_orientation # Face orientation (4 faces) NoriF = face_orientation # Compute basis for iPoint NrdofE, ShapE, CurlE = shape3DETet(X, Nord_vector, NoriE, NoriF) # Verify consistency of number of dofs for this point if (NrdofE != num_dof_in_element): Print.master(' Number of DOFs is not consistent') exit(-1) # Niref=Ni in reference element Niref = ShapE[0:3, 0:NrdofE] # Store basis functions for i basis[:,:,i] = Niref return basis
10579b5b6af4d5d270faf043186197c345a593d2
26,627
def t_returns(inv, pfl, prices, date): """ Computes the total return of a portfolio. Parameters: - `inv` : :class:`list` investment session `db` row - `pfl` : :class:`string` name of the portfolio - `prices` : :class:`dict` latest investment's ticker prices - `date` : :class:`string` date of the purchase Computes the sum of the shares when the invesment was made to the sum of the shares now. The absolute change and returns are calculated with the same formulas as in :py:func:`check.returns` Returns a :class:`dict` containing the total initial price, the new price, the absolute change, the returns and the date of the purchase. """ t_old = sum(map(lambda key: inv[pfl][key]*inv['prc'][key], inv[pfl].keys())) t_old = round(t_old, 1) t_new = sum(map(lambda key: inv[pfl][key]*prices[key], inv[pfl].keys())) t_new = round(t_new, 1) abs = round(t_new - t_old, 1) rel = round(((t_new - t_old) / t_old) * 100, 2) return {'abs': abs, 'rel': rel, 'old': t_old, 'new': t_new, 'qty': 'NA', 'date': date}
8a928e0806b0e87d2a0539ff905112ad0d3d66ae
26,630
def center_crop_pad(img, buffer=0, min_mean=10): """dynamically center crop image, cropping away black space left and right""" g = np.array(img).mean(-1) h, w = g.shape zeros = g.mean(0) zero_inds = np.where(zeros < min_mean)[0] lo, hi = zero_inds[zero_inds < w // 2].max(), zero_inds[zero_inds > w // 2].min() return expand2square(img.crop((lo - buffer, 0, hi + buffer, h)))
97326539464826441f283303e21a17b6ae2954d6
26,631
def DiagPart(a): """ Diag op that returns only the diagonal elements. """ return np.diagonal(a),
4993f7034042303926f94f3dae28d7d8f8dc5058
26,633
def _ensure_webhook_access(func): """Decorate WS function to ensure user owns the webhook ID.""" @callback @wraps(func) def with_webhook_access(hass, connection, msg): # Validate that the webhook ID is registered to the user of the websocket connection config_entry = hass.data[DOMAIN][DATA_CONFIG_ENTRIES].get(msg["webhook_id"]) if config_entry is None: connection.send_error( msg["id"], websocket_api.ERR_NOT_FOUND, "Webhook ID not found" ) return if config_entry.data[CONF_USER_ID] != connection.user.id: connection.send_error( msg["id"], websocket_api.ERR_UNAUTHORIZED, "User not linked to this webhook ID", ) return func(hass, connection, msg) return with_webhook_access
c1b64e5f435f79e52e8c69788b4354481d2a6f5b
26,635
import copy def episode_to_examples(episode, histsz): """Converts an episode (list of Parleys) into self-feeding compatible examples WARNING: we no longer require a histz when making a self-feeding file. Shortening of the history is typically done in the teacher file or in interactive mode. """ examples = [] history = [] for parley in episode: # Update memories and history # memories.extend(parley.memories) history.append(parley.context) # Concatenate history and add speaker tokens as necessary # if history_size == 1, the bot (p2) only sees the immediately # preceding utterance (the prompt from the human, p1). if histsz < 0: utterances = history context = add_person_tokens(utterances, last_speaker=1) elif histsz == 0: context = '__null__' else: utterances = history[-histsz:] context = add_person_tokens(utterances, last_speaker=1) example = Parley( context, parley.response, parley.reward, copy.deepcopy(parley.candidates), # copy.deepcopy(memories), ) examples.append(example) # Add current turn's response to the history history.append(parley.response) return examples
a95abd0183dc70e195312d82117b16720d2c4353
26,636
import torch def camera_from_polyhedron(polyhedronFcn, camera_distance=1, to_spherical=False, device='cuda:0'): """ Returns the positions of a camera lying on the vertices of a given polyhedron Parameters ---------- polyhedronFcn : callable the polyhedron creation function camera_distance : float (optional) the camera distance from the origin (default is 1) to_spherical : bool (optional) if True, converts the coordinates into spherical (default is False) device : str or torch.device the device the tensors will be stored to (default is 'cuda:0') Returns ------- (Tensor,LongTensor) the positions and the edge tensor of the camera views """ P, T = polyhedronFcn(device=device)[0:2] theta = PI/100 R = torch.tensor([[1, 0, 0], [0, cos(theta), -sin(theta)], [0, sin(theta), cos(theta)]], dtype=torch.float, device=device) P = torch.mul(torch.mm(normr(P), torch.t(R)), camera_distance) if to_spherical: P = cart2sph(P) return P, poly2edge(T)[0]
30c782b616299c101cc7130703563fae1327d364
26,637
def cifar10(args, dataset_paths): """ Loads the CIFAR-10 dataset. Returns: train/valid/test set split dataloaders. """ transf = { 'train': transforms.Compose([ transforms.RandomHorizontalFlip(0.5), transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding), transforms.ToTensor(), # Standardize()]), transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))]), 'test': transforms.Compose([ transforms.ToTensor(), # Standardize()])} transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))]) } config = {'train': True, 'test': False} datasets = {i: CIFAR10(root=dataset_paths[i], transform=transf[i], train=config[i], download=True) for i in config.keys()} # weighted sampler weights for full(f) training set f_s_weights = sample_weights(datasets['train'].targets) # return data, labels dicts for new train set and class-balanced valid set data, labels = random_split(data=datasets['train'].data, labels=datasets['train'].targets, n_classes=10, n_samples_per_class=np.repeat(500, 10).reshape(-1)) # define transforms for train set (without valid data) transf['train_'] = transforms.Compose([ transforms.ToPILImage(), transforms.RandomHorizontalFlip(0.5), transforms.RandomCrop((args.crop_dim, args.crop_dim), padding=args.padding), transforms.ToTensor(), # Standardize()]) transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))]) # define transforms for class-balanced valid set transf['valid'] = transforms.Compose([ transforms.ToPILImage(), transforms.ToTensor(), # Standardize()]) transforms.Normalize((0.49139968, 0.48215841, 0.44653091), (0.24703223, 0.24348513, 0.26158784))]) # save original full training set datasets['train_valid'] = datasets['train'] # make new training set without validation samples datasets['train'] = CustomDataset(data=data['train'], labels=labels['train'], transform=transf['train_']) # make class balanced validation set datasets['valid'] = CustomDataset(data=data['valid'], labels=labels['valid'], transform=transf['valid']) # weighted sampler weights for new training set s_weights = sample_weights(datasets['train'].labels) config = { 'train': WeightedRandomSampler(s_weights, num_samples=len(s_weights), replacement=True), 'train_valid': WeightedRandomSampler(f_s_weights, num_samples=len(f_s_weights), replacement=True), 'valid': None, 'test': None } if args.distributed: config = {'train': DistributedSampler(datasets['train']), 'train_valid': DistributedSampler(datasets['train_valid']), 'valid': None, 'test': None} dataloaders = {i: DataLoader(datasets[i], sampler=config[i], num_workers=8, pin_memory=True, drop_last=True, batch_size=args.batch_size) for i in config.keys()} return dataloaders
867d3a6e7ff4ed72c02583c2eafab2885218c0ad
26,638
def read_words(file="words.txt"): """ Reads a list of words from a file. There needs to be one word per line, for this to work properly. Args: file: the file to read from Returns: An array of all the words in the file """ with open(file, "r") as f: return f.read().lower().splitlines()
d3d82c4f9afc7db73b4f82f4715cab9b2e99973c
26,640
def get_intersphinx_label(is_map, cur_project_dir): """ The top set of keys in the intersphinx map are shortname labels that intersphinx uses to identify different projects A sub-tuple in the dict (here invdata[1]) is a list of possible locations for the project's objects.inv file This utility checks all the locations (only filepath ones) to see if the current project dir name is in the filepath If a match is found this immediately returns the shortname label, which can be used to locate current project data in the intersphinx map This is a 'good guess' to determine which intersphinx entry relates to the current project """ for shortname, invdata in is_map.items(): for invpath in invdata[1]: if invpath and not invpath.startswith("http"): if cur_project_dir in invpath: return shortname return None
87115f45c966b838566d6909d3a66af5359a2a1d
26,641
async def patch_user(user: User): """update a `user` in the list of users""" try: session = Session() selected_user = session.query( UserTable ).filter( UserTable.key == user.key ).first() selected_user.firstname = user.firstname selected_user.lastname = user.lastname selected_user.classname = user.classname session.commit() except sqlalchemy.exc.IntegrityError: return {"status": PatchUserResponseStatus.fail} return {"status": PatchUserResponseStatus.success}
911b2c3f5f5e5c2ec7aa7be7595b5106ccf17b0d
26,642
import torchvision def get_test_dataloader(mean, std, batch_size=16, num_workers=2, shuffle=True,task="cifar100",train=False): """ return training dataloader Args: mean: mean of cifar100 test dataset std: std of cifar100 test dataset path: path to cifar100 test python dataset batch_size: dataloader batchsize num_workers: dataloader num_works shuffle: whether to shuffle Returns: cifar100_test_loader:torch dataloader object """ transform_test = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean, std) ]) #cifar100_test = CIFAR100Test(path, transform=transform_test) if task == "cifar100": cifar100_test = torchvision.datasets.CIFAR100(root='./data', train=train, download=True, transform=transform_test) elif task == "cifar10": cifar100_test = torchvision.datasets.CIFAR10(root='./data', train=train, download=True, transform=transform_test) cifar100_test_loader = DataLoader( cifar100_test, shuffle=shuffle, num_workers=num_workers, batch_size=batch_size) return cifar100_test_loader
402e119430a3d260e0e15238e6f55b91f929848a
26,643
from typing import Set def get(tags: Set[str]): """ get options marked by `tags` Options tagged by wildcard '*' are always returned """ # use specifically tagged options + those tagged with wildcard * return (o for tag in ('*',) + tuple(tags) for o in _options[tag])
164e808c5dcd76febad488b8fb5bf0b76835ec2a
26,644
def jitter_boxes(boxes, noise_scale=0.025): """Jitter the box coordinates by some noise distribution. Args: boxes: a tensor whose last dimension is 4 representing the coordinates of boxes in ymin, xmin, ymax, xmax order. noise_scale: a python float which specifies the magnitude of noise. The rule of thumb is to set this between (0, 0.1]. The default value is found to mimic the noisy detections best empirically. Returns: jittered_boxes: a tensor whose shape is the same as `boxes` representing the jittered boxes. Raises: ValueError: If the last dimension of boxes is not 4. """ if boxes.shape[-1] != 4: raise ValueError( 'boxes.shape[1] is {:d}, but must be 4.'.format(boxes.shape[1])) with tf.name_scope('jitter_boxes'): bbox_jitters = tf.random_normal([4], stddev=noise_scale) ymin = boxes[..., 0:1] xmin = boxes[..., 1:2] ymax = boxes[..., 2:3] xmax = boxes[..., 3:4] width = xmax - xmin height = ymax - ymin new_center_x = (xmin + xmax) / 2.0 + bbox_jitters[0] * width new_center_y = (ymin + ymax) / 2.0 + bbox_jitters[1] * height new_width = width * tf.exp(bbox_jitters[2]) new_height = height * tf.exp(bbox_jitters[3]) jittered_boxes = tf.concat([ new_center_y - new_height * 0.5, new_center_x - new_width * 0.5, new_center_y + new_height * 0.5, new_center_x + new_width * 0.5], axis=-1) return jittered_boxes
e0ac4b003b77190390f397f3ef80a915ca5214d3
26,645
def soil_props(soil_type, depth): """ Parameters c, Ks, n, Beta, s_h, s_w, s_bal, s_fc, bulk_d: Laio et al., 2001, Plants in water-controlled ecosystems: active role in hydrologic processes and response to water stress: II. Probabilistic soil moisture dynamic Parameters p1 through p5: Ezlit et al., 2013, Modification of the McNeal Clay Swelling Model Improves Prediction of Saturated Hydraulic Conductivity as a Function of Applied Water Quality """ # 5.7% clay class_1 = {'c':4.8, 'Ks':1000.0, 'n':0.42, 'Beta':12.7, 's_h':0.08, 's_w':0.11, 's_bal':0.31, 's_fc':0.52, 'bulk_d':1.5, 'p1':0.649, 'p2':0.003, 'p3':8.837, 'p4':4.046, 'p7':0.008, 'p6':6.356, 'p5':30.818, 'CEC': 50} # 16.2% clay class_2 = {'c':6.5, 'Ks':800.0, 'n':0.43, 'Beta':13.8, 's_h':0.14, 's_w':0.18, 's_bal':0.46, 's_fc':0.56, 'bulk_d':1.5, 'p1':1.00, 'p2':0.912, 'p3':1.438, 'p4':7.29, 'p7':0.204, 'p6':4.105, 'p5':-5.054, 'CEC': 150} # 48.5% clay class_3 = {'c':9.8, 'Ks':200.0, 'n':0.45, 'Beta':14.8, 's_h':0.19, 's_w':0.24, 's_bal':0.57, 's_fc':0.65, 'bulk_d':1.2, 'p1':0.449, 'p2':1.005, 'p3':0.846, 'p4':10.968, 'p7':0.53, 'p6':4.0799, 'p5':-11.15, 'CEC': 300} if soil_type == "class_1": soil_dict = {**class_1} elif soil_type == "class_2": soil_dict = {**class_2} elif soil_type == "class_3": soil_dict = {**class_3} gapon = 0.01475 mass = soil_dict['bulk_d']*depth soil_dict.update(Kg=gapon, Zr=depth, Msoil = mass) return soil_dict
a6d421d5606d4a00e6a621a513939af8ce2ad62c
26,646
from typing import List def DoMeshesBelongToSameMainMesh(list_mesh_identifiers: List[str]) -> bool: """checks whether all meshes given a list of mesh identifiers belong to the same main mesh Throws if an mesh identifier does not belong to a mesh """ main_mesh_identifiers = [] for mesh_identifier in list_mesh_identifiers: mesh_obj = salome_utilities.GetSalomeObject(mesh_identifier) if IsMeshProxy(mesh_obj): main_mesh_identifiers.append(mesh_identifier) elif IsSubMeshProxy(mesh_obj) or IsMeshGroup(mesh_obj): main_mesh_identifiers.append(salome_utilities.GetSalomeID(mesh_obj.GetMesh())) else: obj_type = type(mesh_obj) obj_name = salome_utilities.GetObjectName(mesh_identifier) raise Exception('Object with identifier "{}" is not a mesh! Name: "{}" , Type: "{}"'.format(mesh_identifier, obj_name, obj_type)) return len(set(main_mesh_identifiers)) <= 1
2e8e47c0b5bf4e6d67adf5a0a46a35bacba42bce
26,647
def active_roles(account, days_back): """ Returns query for finding active roles (since days_back value). """ query_string = f"""SELECT DISTINCT useridentity.sessioncontext.sessionissuer.arn FROM behold WHERE account = '{account}' AND useridentity.type = 'AssumedRole' AND from_iso8601_timestamp(eventtime) > date_add('day', -{days_back}, now());""" return (query_string, f"athena_results/active_roles/{account}")
e6842696aa40d4f0b30f17d0d53afdcc5d1d0de9
26,650
import asyncio async def run_command(*args, **kwargs): """Shortcut for asyncronous running of a command""" fn = asyncio.subprocess.create_subprocess_exec if kwargs.pop("shell", False): fn = asyncio.subprocess.create_subprocess_shell check = kwargs.pop("check", False) process = await fn(*args, **kwargs) stdout, stderr = await process.communicate() if check: if process.returncode != 0: raise Exception("Command failed: %s" % args) return process.returncode, stdout, stderr
948ccb127afb8cf1c2a1731a5198bc493a1e9fe4
26,652
import torch def scaled_dot_product_attention(q, k, v, mask=None): """ #计算注意力权重。 q, k, v 必须具有匹配的前置维度。 且dq=dk k, v 必须有匹配的倒数第二个维度,例如:seq_len_k = seq_len_v。 #虽然 mask 根据其类型(填充或前瞻)有不同的形状, #但是 mask 必须能进行广播转换以便求和。 #参数: q: 请求的形状 == (..., seq_len_q, depth) k: 主键的形状 == (..., seq_len_k, depth) v: 数值的形状 == (..., seq_len_v, depth_v) seq_len_k = seq_len_v mask: Float 张量,其形状能转换成 (..., seq_len_q, seq_len_k)。默认为None。 #返回值: #输出,注意力权重 """ # matmul(a,b)矩阵乘:a b的最后2个维度要能做乘法,即a的最后一个维度值==b的倒数第2个纬度值, # 除此之外,其他维度值必须相等或为1(为1时会广播) matmul_qk = torch.matmul(q, k.transpose(-1, -2)) # 矩阵乘 =>[..., seq_len_q, seq_len_k] # 缩放matmul_qk dk = torch.tensor(k.shape[-1], dtype=torch.float32) # k的深度dk,或叫做depth_k scaled_attention_logits = matmul_qk / torch.sqrt(dk) # [..., seq_len_q, seq_len_k] # 将 mask 加入到缩放的张量上(重要!) if mask is not None: # mask: [b, 1, 1, seq_len] # mask=1的位置是pad,乘以-1e9(-1*10^9)成为负无穷,经过softmax后会趋于0 scaled_attention_logits += (mask * -1e9) # softmax 在最后一个轴(seq_len_k)上归一化 attention_weights = torch.nn.functional.softmax(scaled_attention_logits, dim=-1) # [..., seq_len_q, seq_len_k] output = torch.matmul(attention_weights, v) # =>[..., seq_len_q, depth_v] return output, attention_weights
3d51de38ca553c3b769bd1ba4936159034cd68e0
26,653
def _get_parent_entity(entities, entity_id): """ Gets the parent entity from the collection, or throws ParentDoesNotExist. """ try: return entities[entity_id] except KeyError: raise ParentDoesNotExist(object_type='Entity', key=entity_id)
d898252058f191a2685803fc6d4495eb75ce56eb
26,654
def tstop(f): """ Dust stopping time """ units = sutil.get_all_units(f) grainSize = f['u_dustGrainSize'] grainDensity = SimArray(sutil.get_snap_param(f, 'dDustGrainDensity'), units['rho_unit']) if sutil.is_isothermal(f): gamma = 1. else: gamma = sutil.get_snap_gamma(f) t = ((grainSize*grainDensity)/(f['rho'] * f['cs'])) * np.sqrt(np.pi*gamma/8.) return t.in_units(units['t_unit'])
e1f13c3b87104d366dd0c5239dd5d24de954897c
26,655
def solve_2d_discrete_observations_continuous_modelling( cond_xy0s_list: tp.List[tp.Tuple[float, float]], cond_xytGammas_list: tp.List[tp.Tuple[float, float, float]], cond_f0s_list: tp.List[float], cond_fGammas_list: tp.List[float], a: float, b: float, c: float, d: float, T: float, f: tp.Callable[[float, float, float], float], g: tp.Callable[[float, float, float], float], ) -> tp.Callable[[float, float, float], float]: """ :param cond_xy0s_list: list of space points for initial conditions: u(cond_x0_i, cond_y0_i, 0) = cond_f0_i :param cond_xytGammas_list: list of space-time for boundary conditions: u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) = cond_fGamma_i :param cond_f0s_list: list of real values for initial conditions: cond_f0_i = u(cond_x0_i, cond_y0_i, 0) :param cond_fGammas_list: list of real values for boundary conditions: cond_fGamma_i = u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) :param a: lower bound of the x-domains of g and u :param b: upper bound of the x-domains of g and u :param c: lower bound of the y-domains of g and u :param d: upper bound of the y-domains of g and u :param T: end time :param f: real-valued function of space and time, represents external perturbations in the system. :param g: Green's function of the linear differential operator L :return: real-valued function u of space and time, least squares solution to L u(x, y, t) = f(x, y, t) under initial conditions u(cond_x0_i, cond_y0_i, 0) = cond_f0_i, and boundary conditions u(cond_xGamma_i, cond_yGamma_i, cond_tGamma_i) = cond_fGamma_i. """ def u_infty(x: float, y: float, t: float) -> float: return integrate.tplquad(lambda t_, x_, y_: g(x - x_, y - y_, t - t_) * f(x_, y_, t_), c, d, a, b, 0, T)[0] vec_u0 = np.array([[ cond_f0_i - u_infty(cond_xy0_i[0], cond_xy0_i[1], 0.0) ] for cond_f0_i, cond_xy0_i in zip(cond_f0s_list, cond_xy0s_list)]) vec_uGamma = np.array([[ cond_fGamma_i - u_infty(cond_xytGamma_i[0], cond_xytGamma_i[1], cond_xytGamma_i[2]) ] for cond_fGamma_i, cond_xytGamma_i in zip(cond_fGammas_list, cond_xytGammas_list)]) vec_u = np.vstack((vec_u0, vec_uGamma)) def A11(x: float, y: float) -> np.array: return np.array([[g( cond_x0_i - x, cond_y0_i - y, 0.0 - 0.0, )] for cond_x0_i, cond_y0_i in cond_xy0s_list]) def A12(x: float, y: float, t: float) -> np.array: return np.array([[g( cond_x0_i - x, cond_y0_i - y, 0.0 - t, )] for cond_x0_i, cond_y0_i in cond_xy0s_list]) def A21(x: float, y: float) -> np.array: return np.array([[g( cond_xGamma_i - x, cond_yGamma_i - y, cond_tGamma_i - 0.0, )] for cond_xGamma_i, cond_yGamma_i, cond_tGamma_i in cond_xytGammas_list]) def A22(x: float, y: float, t: float) -> np.array: return np.array([[g( cond_xGamma_i - x, cond_yGamma_i - y, cond_tGamma_i - t, )] for cond_xGamma_i, cond_yGamma_i, cond_tGamma_i in cond_xytGammas_list]) def A(x: float, y: float, t: float) -> np.matrix: return np.vstack(( np.hstack((A11(x, y), A12(x, y, t))), np.hstack((A21(x, y), A22(x, y, t))), )) len0, lenGamma = len(cond_xy0s_list), len(cond_xytGammas_list) P11 = np.matrix([[( integrate.dblquad(lambda x, y: A11(x, y)[i] * A11(x, y)[j], c, d, a, b)[0] + integrate.dblquad(lambda t, y: A12(a, y, t)[i] * A12(a, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, y: A12(b, y, t)[i] * A12(b, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, x: A12(x, c, t)[i] * A12(x, c, t)[j], a, b, 0, T)[0] + integrate.dblquad(lambda t, x: A12(x, d, t)[i] * A12(x, d, t)[j], a, b, 0, T)[0] ) for j in range(len0)] for i in range(len0)]) P12 = np.matrix([[( integrate.dblquad(lambda x, y: A11(x, y)[i] * A21(x, y)[j], c, d, a, b)[0] + integrate.dblquad(lambda t, y: A12(a, y, t)[i] * A22(a, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, y: A12(b, y, t)[i] * A22(b, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, x: A12(x, c, t)[i] * A22(x, c, t)[j], a, b, 0, T)[0] + integrate.dblquad(lambda t, x: A12(x, d, t)[i] * A22(x, d, t)[j], a, b, 0, T)[0] ) for j in range(lenGamma)] for i in range(len0)]) P21 = np.matrix([[( integrate.dblquad(lambda x, y: A21(x, y)[i] * A11(x, y)[j], c, d, a, b)[0] + integrate.dblquad(lambda t, y: A22(a, y, t)[i] * A12(a, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, y: A22(b, y, t)[i] * A12(b, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, x: A22(x, c, t)[i] * A12(x, c, t)[j], a, b, 0, T)[0] + integrate.dblquad(lambda t, x: A22(x, d, t)[i] * A12(x, d, t)[j], a, b, 0, T)[0] ) for j in range(len0)] for i in range(lenGamma)]) P22 = np.matrix([[( integrate.dblquad(lambda x, y: A21(x, y)[i] * A21(x, y)[j], c, d, a, b)[0] + integrate.dblquad(lambda t, y: A22(a, y, t)[i] * A22(a, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, y: A22(b, y, t)[i] * A22(b, y, t)[j], c, d, 0, T)[0] + integrate.dblquad(lambda t, x: A22(x, c, t)[i] * A22(x, c, t)[j], a, b, 0, T)[0] + integrate.dblquad(lambda t, x: A22(x, d, t)[i] * A22(x, d, t)[j], a, b, 0, T)[0] ) for j in range(lenGamma)] for i in range(lenGamma)]) P = np.vstack(( np.hstack((P11, P12)), np.hstack((P21, P22)), )) def vec_f(x: float, y: float, t: float) -> np.array: return A(x, y, t).T * np.linalg.pinv(P) * vec_u def vec_f0(x: float, y: float, t: float) -> float: return vec_f(x, y, t)[0] def vec_fGamma(x: float, y: float, t: float) -> float: return vec_f(x, y, t)[1] def u_0(x: float, y: float, t: float) -> float: return integrate.dblquad(lambda x_, y_: g(x - x_, y - y_, t - 0.0) * vec_f0(x_, y_, 0.0), c, d, a, b)[0] def u_Gamma(x: float, y: float, t: float) -> float: return integrate.dblquad(lambda t_, y_: g(x - a, y - y_, t - t_) * vec_fGamma(a, y_, t_), c, d, 0, T)[0] + \ integrate.dblquad(lambda t_, y_: g(x - b, y - y_, t - t_) * vec_fGamma(b, y_, t_), c, d, 0, T)[0] + \ integrate.dblquad(lambda t_, x_: g(x - x_, y - c, t - t_) * vec_fGamma(x_, c, t_), a, b, 0, T)[0] + \ integrate.dblquad(lambda t_, x_: g(x - x_, y - d, t - t_) * vec_fGamma(x_, d, t_), a, b, 0, T)[0] def u(x: float, y: float, t: float) -> float: return u_infty(x, y, t) + u_0(x, y, t) + u_Gamma(x, y, t) return u
a8139c014c292b44aee1cf4533a7576413a7e685
26,656
import logging from typing import Callable from typing import Any def log_calls_on_exception( logger: logging.Logger, log_exception: bool = True ) -> GenericDecorator: """ Log calls to the decorated function, when exceptions are raised. Can also decorate classes to log calls to all its methods. :param logger: object to log to :param log_exception: True, to log stacktrace and exception """ def log_function( target: Callable[..., TargetReturnT], # TargetFunctionT, *args: Any, **kwargs: Any ) -> TargetReturnT: try: result = target(*args, **kwargs) except BaseException: if log_exception: logger.exception("Exception") else: logger.info(f"{target.__name__} args: {args!r} {kwargs!r}") raise return result decorator = GenericDecorator(log_function) return decorator
98a186d116547c2929c010b66b9395ba5d5c8603
26,657
def convert_2d_list_to_string(data): """Utility function.""" s = '' for row in data: c = '{' for e in row: c += str(e) + ',' s += c[:-1] + '},\n' return s[:-2]
a6ac2c05f481a339c68ffc3543baba1f1d0d5e8e
26,659
def get_info(sheet, row_num, percentage, sheet_name, mandatory_tables): """ Function is used to create a dictionary that contains the number of flawed records for a particular site. :param sheet (dataframe): pandas dataframe to traverse. Represents a sheet with numbers indicating data quality. row_num (int): row (0-index) with all of the information for the specified site's data quality percentage (boolean): used to determine whether or not the number is a simple record count (e.g. duplicates) versus the percentage of records (e.g. the success rate for each of the tables) sheet_name (str): name for the sheet for use in the error message analytics_type (str): the data quality metric the user wants to investigate mandatory_tables (lst): contains the tables that should be documented for every table and at every date. :return: err_dictionary (dictionary): key:value pairs represent the column name:number that represents the quality of the data NOTE: This function was modified from the e-mail generator. This function, however, logs ALL of the information in the returned error dictionary. This includes 0 values if the data is wholly complete. """ if row_num is not None: data_info = sheet.iloc[row_num, :] # series, row labels and values else: # row in future sheets but not current sheet data_info = sheet.iloc[1, :] # just to get the columns column_names = data_info.keys() null_list = [None] * len(column_names) data_info = pd.Series(null_list, column_names) err_dictionary = {} for col_label, number in data_info.iteritems(): if col_label in mandatory_tables: if number is None or number == 'No Data': # row does not exist err_dictionary[col_label] = float('NaN') else: try: number = float(number) except ValueError: pass else: if number < 0: # just in case raise ValueError("Negative number detected in sheet " "{} for column {}".format( sheet_name, col_label)) elif percentage and number > 100: raise ValueError("Percentage value > 100 detected in " "sheet {} for column {}".format( sheet_name, col_label)) elif percentage and target_low: # proportion w/ errors err_dictionary[col_label] = round(100 - number, 1) elif percentage and not target_low: # effective err_dictionary[col_label] = round(number, 1) elif not percentage and number > -1: err_dictionary[col_label] = int(number) else: pass # do nothing; do not want to document the column # adding all the tables; maintaining consistency for versatility for table in mandatory_tables: if table not in err_dictionary.keys(): err_dictionary[table] = float('NaN') return err_dictionary
76552ee6cd366642d29c945a289b69efda28ba37
26,661
def get_structural_topology_reactions(filename, dset_path="readdy/config/structural_topology_reactions"): """ Construct a dictionary where the keys are reaction ids and value is corresponding name. :param filename: the file name :param dset_path: path to the dataset :return: dictionary of reactions """ result = dict() with h5py.File(filename, "r") as f: if dset_path in f: structural_reactions = f[dset_path] for r in structural_reactions: result[r["id"]] = r["name"] return result
a3bb4f75740b540c8428d760c087df4dea782a4e
26,662
def PyMapping_Keys(space, w_obj): """On success, return a list of the keys in object o. On failure, return NULL. This is equivalent to the Python expression o.keys().""" return space.call_function(space.w_list, space.call_method(w_obj, "keys"))
452b384a421fd675a53ff20d868b8f7353eb3d79
26,663
import aiohttp import json import asyncio async def safebooru(ctx, tag, page='1'): """Searches safebooru. Usage: safebooru [tags]""" async with aiohttp.ClientSession() as session: invoker = ctx.message.author post = await fetch(session, "https://safebooru.org/index.php?page=dapi&s=post&q=index&limit=1&tags={}&pid={}&json=1".format(tag, page)) obj = json.loads(post) try: directory = obj[0]['directory'] file_name = obj[0]['image'] url = 'https://safebooru.org/images/{}/{}'.format(directory, file_name) tags_raw = obj[0]['tags'] rating = obj[0]['rating'] tags = tags_raw[:200] + (tags_raw[200:] and '..') except Exception as e: await ctx.send('Tag not found.', delete_after=5) return 1 embed = discord.Embed( title="Safebooru", url='https://safebooru.org', color=0x00fff) embed.add_field(name='Rating', value=rating, inline=True) embed.add_field(name='Tags', value=tags, inline=True) embed.set_image(url=url) msg = await ctx.send(embed=embed) if int(page) > 1: await msg.add_reaction('\U00002B05') await msg.add_reaction('\U000027A1') await msg.add_reaction('\U0001F1FD') def check(reaction, user): return user == invoker and reaction.message.id == msg.id and (str(reaction.emoji) == '\U00002B05' or str(reaction.emoji) == '\U000027A1' or str(reaction.emoji) == '\U0001F1FD') try: reaction, user = await bot.wait_for('reaction_add', timeout=120.0, check=check) except asyncio.TimeoutError: return 1 else: if str(reaction.emoji) == '\U00002B05': page = int(page) page -= 1 page = str(page) await msg.delete() await ctx.invoke(safebooru, tag, page) elif str(reaction.emoji) == '\U000027A1': page = int(page) page += 1 page = str(page) await msg.delete() await ctx.invoke(safebooru, tag, page) elif str(reaction.emoji) == '\U0001F1FD': await msg.delete() return
782000d62de1d36abc4b81e0bb4b025707e79940
26,664
import re def is_arabicrange(text): """ Checks for an Arabic Unicode block characters @param text: input text @type text: unicode @return: True if all charaters are in Arabic block @rtype: Boolean """ if re.search(u"([^\u0600-\u06ff\ufb50-\ufdff\ufe70-\ufeff\u0750-\u077f])", text): return False return True
70862e901236eb94fec95ac6f7eb673729397e49
26,665
def del_pool(batch_client, config, pool_id=None): # type: (azure.batch.batch_service_client.BatchServiceClient, dict, # str) -> bool """Delete a pool :param batch_client: The batch client to use. :type batch_client: `azure.batch.batch_service_client.BatchServiceClient` :param dict config: configuration dict :param str pool_id: pool id :rtype: bool :return: if pool was deleted """ if util.is_none_or_empty(pool_id): pool_id = settings.pool_id(config) if not util.confirm_action( config, 'delete {} pool'.format(pool_id)): return False logger.info('Deleting pool: {}'.format(pool_id)) batch_client.pool.delete(pool_id) return True
fad5a672920a98305f12e9a7e7c6d665ff874f0a
26,666
def str_repeat(space, s, repeat): """Repeat a string.""" return space.newstr(s * repeat)
3e947da1fa3bf403b0836bd4e7ae0052d310636e
26,667
def alarm(duration=250): """ Red alarm; flashing bright red to dark red. :param int duration: The duration between hi/lo brightness,in milliseconds. :returns: An infinite Flow consisting of 2 transitions. :rtype: Flow """ return Flow(count=0, action=Action.recover, transitions=transitions.alarm(duration))
a501c6a85c78cd37eadba200ca327660945dd4d7
26,668
def mkvc(x, numDims=1): """Creates a vector with the number of dimension specified e.g.:: a = np.array([1, 2, 3]) mkvc(a, 1).shape > (3, ) mkvc(a, 2).shape > (3, 1) mkvc(a, 3).shape > (3, 1, 1) """ if type(x) == np.matrix: x = np.array(x) if hasattr(x, 'tovec'): x = x.tovec() if isinstance(x, Zero): return x assert isinstance(x, np.ndarray), "Vector must be a numpy array" if numDims == 1: return x.flatten(order='F') elif numDims == 2: return x.flatten(order='F')[:, np.newaxis] elif numDims == 3: return x.flatten(order='F')[:, np.newaxis, np.newaxis]
e749e0feadcdf69625355477fd22e2f9d363768f
26,669
def location_distance_meters(a: Location, b: Location) -> float: """Calculates the distance between two points. Returns: A number of meters between two points. """ return location_distance_kilometers(a, b).m
91179bc0fc2647d502a290ecc1df28eda8b149f5
26,670
import json def file_to_dict(file: str): """Dump json file to dictionary""" try: with open(file) as json_file: return json.load(json_file) except json.decoder.JSONDecodeError: print(f'File {file} is not a valid json file. Returning empty dict') return {} except FileNotFoundError: print(f'File {file} does not exist. Returning empty dict') return {}
2265f2ad5e10931e93a08bafd8e8a7e20c91ae93
26,671
def fieldtype(field): """Return classname""" return field.__class__.__name__
afda2f7a13a2d0be991eadf31ac591762c519f05
26,672
from typing import Dict def basic_extractor( data: Dict, ) -> list: """ Returns list of the total_recieved token, the total sent token and the number of transactions the wallet participated in. """ return [data["total_received"],data["total_sent"],data["n_tx"]]
946611423cf98c6104fa49e0ccb82308d741f900
26,673
def expand_stages_cfg(stage_cfgs): """ For a list of stages """ assert isinstance(stage_cfgs, list) ret = [] for x in stage_cfgs: ret.append(expand_stage_cfg(x)) return ret
bb562da9ca5a547fc1c442e3ba8d73b7f7d0768e
26,674
def find_team(): """find a team by using filters from request arguments""" # partial -> allow skipping of required fields ts = TeamSchema( partial=True, only=( "name", "event_id", "team_identifier", "payment_status", "single", "page", ), ) try: _filter = ts.load(request.args) except ValidationError as err: raise FieldValidationFailed(error=err.messages) team_paginated = TeamModel.find(_filter) # returns pagination obj pagination_response = PaginatedResponse( team_paginated, TeamSchema( partial=True, many=True, exclude=("team_identifier", "event_id", "payment.transaction_no"), ), ) return pagination_response.dump()
98aa1a67450aa9c117c5d9d7c158c169c2f7969c
26,675
def clusterbased_permutation_1d_1samp_1sided(results, level=0, p_threshold=0.05, clusterp_threshold=0.05, n_threshold=2, iter=1000): """ 1-sample & 1-sided cluster based permutation test for 2-D results Parameters ---------- results : array A result matrix. The shape of results should be [n_subs, x]. n_subs represents the number of subjects. level : float. Default is 0. An expected value in null hypothesis. (Here, results > level) p_threshold : float. Default is 0.05. The threshold of p-values. clusterp_threshold : float. Default is 0.05. The threshold of cluster-defining p-values. n_threshold : int. Default is 2. The threshold of number of values in one cluster (number of values per cluster > n_threshold). iter : int. Default is 1000. The times for iteration. Returns ------- ps : float The permutation test resultz, p-values. The shape of ps is [x]. The values in ps should be 0 or 1, which represent not significant point or significant point after cluster-based permutation test, respectively. """ nsubs, x = np.shape(results) ps = np.zeros([x]) ts = np.zeros([x]) for t in range(x): ts[t], p = ttest_1samp(results[:, t], level, alternative='greater') if p < p_threshold and ts[t] > 0: ps[t] = 1 else: ps[t] = 0 cluster_index, cluster_n = get_cluster_index_1d_1sided(ps) if cluster_n != 0: cluster_ts = np.zeros([cluster_n]) for i in range(cluster_n): for t in range(x): if cluster_index[t] == i + 1: cluster_ts[i] = cluster_ts[i] + ts[t] permu_ts = np.zeros([iter]) chance = np.full([nsubs], level) print("\nPermutation test") for i in range(iter): permu_cluster_ts = np.zeros([cluster_n]) for j in range(cluster_n): for t in range(x): if cluster_index[t] == j + 1: v = np.hstack((results[:, t], chance)) vshuffle = np.random.permutation(v) v1 = vshuffle[:nsubs] v2 = vshuffle[nsubs:] permu_cluster_ts[j] = permu_cluster_ts[j] + ttest_rel(v1, v2, alternative="greater")[0] permu_ts[i] = np.max(permu_cluster_ts) show_progressbar("Calculating", (i+1)*100/iter) if i == (iter - 1): print("\nCluster-based permutation test finished!\n") for i in range(cluster_n): index = 0 for j in range(iter): if cluster_ts[i] > permu_ts[j]: index = index + 1 if index < iter * (1-clusterp_threshold): for t in range(x): if cluster_index[t] == i + 1: ps[t] = 0 newps = np.zeros([x + 2]) newps[1:x + 1] = ps for i in range(x): if newps[i + 1] == 1 and newps[i] != 1: index = 0 while newps[i + 1 + index] == 1: index = index + 1 if index < n_threshold: newps[i + 1:i + 1 + index] = 0 ps = newps[1:x + 1] return ps
ade205fdd4c256567e0f1ce908f7a711caad2f5d
26,676
def getObjectsContainers(mQueryObject = []): """ Return a list of containers that the passed in objects reside in. @param [] mQueryObject: list of objects you are wanting to know, in which container they exists. @return: key = container name, value = container MObject. @rtype: {} """ containerDict = {} nodeFn = om2.MFnContainerNode() selNodeFn = om2.MFnDependencyNode() containerObjs = getAllDagContainers() for selObj in mQueryObject: for obj in containerObjs: nodeFn.setObject(obj) if selObj in nodeFn.getMembers(): selNodeFn.setObject(selObj) containerName = str(nodeFn.name()) # Adds the object to a dictionary, using the container as the key if containerDict.has_key(nodeFn.name()): containerDict[containerName].append(selNodeFn.object()) else: containerDict[containerName] = [selNodeFn.object()] return containerDict
83da454e85067a2d74f2f251f255a74f3bba41ee
26,677
from webdnn.backend.webgl.attributes.texture_shape import TextureShape from webdnn.backend.webgl.attributes.channel_mode import ChannelMode from typing import Optional def dump_dot(graph: Graph, name: Optional[str] = None) -> str: # pragma: no cover """ Dumps graph into dot language for visualization. Args: graph: Target graph name: Returns: source code of dot language. """ dot_source = "" dot_source += "digraph webdnn_ir {\n" # graph setting dot_source += "graph [\n" if name: dot_source += f"label=\"{name}\"\n" dot_source += "];\n" added_variables = set() def visualize_variable(var: Variable) -> str: if var in added_variables: return "" node_attrs = {} node_attrs["label"] = f"\"{var.name}\n{var.shape}\nOrder={var.order}" if var.has_attribute(TextureShape): node_attrs["label"] += f"\nTextureShape={TextureShape.get(var)}" if var.has_attribute(ChannelMode): node_attrs["label"] += f"\nChannelMode={ChannelMode.get(var).name}" node_attrs["label"] += "\"" if isinstance(var, ConstantVariable): node_attrs["shape"] = "doubleoctagon" else: node_attrs["shape"] = "octagon" if var in graph.inputs: node_attrs["style"] = "\"dashed\"" if var in graph.outputs: node_attrs["style"] = "\"bold\"" dot_source_var = "" dot_source_var += f"var_{id(var)} [\n" dot_source_var += ",".join(f"{attr_key}={attr_value}" for attr_key, attr_value in node_attrs.items()) dot_source_var += "];\n" added_variables.add(var) return dot_source_var for op in listup_operators(graph): op_params = getattr(op, "parameters", {}) op_params_str = "\n".join(f"{k}={v}" for k, v in op_params.items()) dot_source += f"op_{op.name} [label=\"{op.name}\n{op.__class__.__name__}\n{op_params_str}\", shape=box];\n" for connection_name, var in op.inputs.items(): dot_source += visualize_variable(var) dot_source += f"var_{id(var)} -> op_{op.name} [label=\"{connection_name}\"];\n" for connection_name, var in op.outputs.items(): dot_source += visualize_variable(var) dot_source += f"op_{op.name} -> var_{id(var)} [label=\"{connection_name}\"];\n" dot_source += "}" return dot_source
61e993c7383e939109463fd872501a6d7bda3d0d
26,678
def RPL_LUSERCLIENT(sender, receipient, message): """ Reply Code 251 """ return "<" + sender + ">: " + message
4863c4d6945378f315932fadbf8f2615f020c611
26,679
def multiply_inv_gaussians_batch(mus, lambdas): """Multiplies a series of Gaussians that is given as a list of mean vectors and a list of precision matrices. mus: list of mean with shape [..., d] lambdas: list of precision matrices with shape [..., d, d] Returns the mean vector, covariance matrix, and precision matrix of the product """ assert len(mus) == len(lambdas) batch_size = mus[0].shape.as_list()[:-1] d_z = lambdas[0].shape.as_list()[-1] identity_matrix = tf.tile(tf.expand_dims(tf.expand_dims(tf.eye(d_z), axis=0), axis=0), batch_size+[1,1]) lambda_new = tf.reduce_sum(lambdas, axis=0) + identity_matrix mus_summed = tf.reduce_sum([tf.einsum("bcij, bcj -> bci", lamb, mu) for lamb, mu in zip(lambdas, mus)], axis=0) sigma_new = tf.linalg.inv(lambda_new) mu_new = tf.einsum("bcij, bcj -> bci", sigma_new, mus_summed) return mu_new, sigma_new, lambda_new
3239ee6c472506c0b0fcc90c8543deeca0edb02e
26,680
def replace_if_present_else_append( objlist, obj, cmp=lambda a, b: a == b, rename=None): """ Add an object to a list of objects, if that obj does not already exist. If it does exist (`cmp(A, B) == True`), then replace the property in the property_list. The names are compared in a case-insensitive way. Input ===== :objlist, list: list of objects. :obj, object: object to Add Options ======= :cmp, (bool) cmp (A, B): compares A to B. If True, then the objects are the same and B should replace A. If False, then B should be appended to `objlist`. :param rename: Should A be renamed instead of overwritten? If not False, then rename should be a unary function that changes the name of A. :type rename: bool or unary function Output ====== List is modified in place. A reference to the list is returned. """ print(type (objlist)) for i in range(len(objlist)): # was a matching object found in the list? if cmp(objlist[i], obj): # if so, should the old object be renamed? if rename is not None: newA = rename(objlist[i]) # is the renamed object distinct from the object # (`obj`) that is to be added to the list? if cmp(newA, obj): msg = '`rename` does not make {} unique.'.format( str(objlist[i])[:32]) raise ValueError(msg) # now that we have newA, replace the original # object in the list with `obj`... objlist[i] = obj #... and replace_if_present_else_append newA. replace_if_present_else_append( objlist, newA, cmp=cmp, rename=rename) # if the existing object should not be renamed, # simply replace. else: objlist[i] = obj # short circuit to exit the for loop and the function. return objlist # if we get here, then the property was not found. Append. HI objlist.append(obj) return objlist
f76b3a76fe973ef91176f8ff4afd34d52ce89317
26,681
def rating_value(value): """Check that given value is integer and between 1 and 5.""" if 1 <= int(value) <= 5: return int(value) raise ValueError("Expected rating between 1 and 5, but got %s" % value)
cadb45a131a423940e1b3a763935f5e40d84285b
26,682
def hsc_to_hs(ctx): """Process all hsc files into Haskell source files. Args: ctx: Rule context. Returns: list of File: New Haskell source files to use. """ ghc_defs_dump = _make_ghc_defs_dump(ctx) sources = [] for f in ctx.files.srcs: if f.extension == "hsc": sources.append(_process_hsc_file(ctx, ghc_defs_dump, f)) else: sources.append(f) return sources
7672ad9b3679fc663b461f4bcb9eb421293dc185
26,683
from shapely.geometry import Polygon def calculate_iou_box(pts1, pts2): """ Measure the two list of points IoU :param pts1: ann.geo coordinates :param pts2: ann.geo coordinates :return: `float` how Intersection over Union of tho shapes """ try: except (ImportError, ModuleNotFoundError) as err: raise RuntimeError('dtlpy depends on external package. Please install ') from err if len(pts1) == 2: # regular box annotation (2 pts) pt1_left_top = [pts1[0][0], pts1[0][1]] pt1_right_top = [pts1[0][0], pts1[1][1]] pt1_right_bottom = [pts1[1][0], pts1[1][1]] pt1_left_bottom = [pts1[1][0], pts1[0][1]] else: # rotated box annotation (4 pts) pt1_left_top = pts1[0] pt1_right_top = pts1[3] pt1_left_bottom = pts1[1] pt1_right_bottom = pts1[2] poly_1 = Polygon([pt1_left_top, pt1_right_top, pt1_right_bottom, pt1_left_bottom]) if len(pts2) == 2: # regular box annotation (2 pts) pt2_left_top = [pts2[0][0], pts2[0][1]] pt2_right_top = [pts2[0][0], pts2[1][1]] pt2_right_bottom = [pts2[1][0], pts2[1][1]] pt2_left_bottom = [pts2[1][0], pts2[0][1]] else: # rotated box annotation (4 pts) pt2_left_top = pts2[0] pt2_right_top = pts2[3] pt2_left_bottom = pts2[1] pt2_right_bottom = pts2[2] poly_2 = Polygon([pt2_left_top, pt2_right_top, pt2_right_bottom, pt2_left_bottom]) iou = poly_1.intersection(poly_2).area / poly_1.union(poly_2).area return iou
fe915dc952852e28214ce1a16f781c55600fc1ec
26,684
def nextafter(x, direction, dtype, itemsize): """Return the next representable neighbor of x in the appropriate direction.""" assert direction in [-1, 0, +1] assert dtype.kind == "S" or type(x) in (bool, float, int) if direction == 0: return x if dtype.kind == "S": return string_next_after(x, direction, itemsize) if dtype.kind in ['b']: return bool_type_next_after(x, direction, itemsize) elif dtype.kind in ['i', 'u']: return int_type_next_after(x, direction, itemsize) elif dtype.kind == "f": if direction < 0: return np.nextafter(x, x - 1) else: return np.nextafter(x, x + 1) # elif dtype.name == "float32": # if direction < 0: # return PyNextAfterF(x,x-1) # else: # return PyNextAfterF(x,x + 1) # elif dtype.name == "float64": # if direction < 0: # return PyNextAfter(x,x-1) # else: # return PyNextAfter(x,x + 1) raise TypeError("data type ``%s`` is not supported" % dtype)
c14f6695eb4285afe3001ac6db019af26a95c78c
26,685