content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
import torch import colorsys def getAfinityCenter(width, height, point, center, radius=7, img_affinity=None): """ Function to create the affinity maps, e.g., vector maps pointing toward the object center. Args: width: image wight height: image height point: (x,y) center: (x,y) radius: pixel radius img_affinity: tensor to add to return: return a tensor """ tensor = torch.zeros(2,height,width).float() # Create the canvas for the afinity output imgAffinity = Image.new("RGB", (width,height), "black") totensor = transforms.Compose([transforms.ToTensor()]) draw = ImageDraw.Draw(imgAffinity) r1 = radius p = point draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),(255,255,255)) del draw # Compute the array to add the afinity array = (np.array(imgAffinity)/255)[:,:,0] angle_vector = np.array(center) - np.array(point) angle_vector = normalize(angle_vector) affinity = np.concatenate([[array*angle_vector[0]],[array*angle_vector[1]]]) # print (tensor) if not img_affinity is None: # Find the angle vector # print (angle_vector) if length(angle_vector) >0: angle=py_ang(angle_vector) else: angle = 0 # print(angle) c = np.array(colorsys.hsv_to_rgb(angle/360,1,1)) * 255 draw = ImageDraw.Draw(img_affinity) draw.ellipse((p[0]-r1,p[1]-r1,p[0]+r1,p[1]+r1),fill=(int(c[0]),int(c[1]),int(c[2]))) del draw re = torch.from_numpy(affinity).float() + tensor return re, img_affinity
5c25274809820f6318b9756680d2967ba8f08a5d
16,400
def unwrap_phase_iterative_fft(mat, iteration=4, win_for=None, win_back=None, weight_map=None): """ Unwrap a phase image using an iterative FFT-based method as described in Ref. [1]. Parameters ---------- mat : array_like 2D array. Wrapped phase-image in the range of [-Pi; Pi]. iteration : int Number of iteration. win_for : array_like 2D array. FFT-window for the forward transform. Generated if None. win_back : array_like 2D array. FFT-window for the backward transform. Making sure there are no zero-values. Generated if None. weight_map : array_like 2D array. Using a weight map if provided. Returns ------- array_like 2D array. Unwrapped phase-image. References ---------- .. [1] https://doi.org/10.1364/AO.56.007079 """ height, width = mat.shape if win_for is None: win_for = _make_window(2 * height, 2 * width, direction="forward") if win_back is None: win_back = _make_window(2 * height, 2 * width, direction="backward") if weight_map is None: weight_map = np.ones_like(mat) mat_unwrap = unwrap_phase_based_fft(mat * weight_map, win_for, win_back) for i in range(iteration): mat_wrap = _wrap_to_pi(mat_unwrap) mat_diff = mat - mat_wrap nmean = np.mean(mat_diff) mat_diff = _wrap_to_pi(mat_diff - nmean) phase_diff = unwrap_phase_based_fft(mat_diff * weight_map, win_for, win_back) mat_unwrap = mat_unwrap + phase_diff return mat_unwrap
e79bb72441379531f70d32d07c4e6e9299a39062
16,401
def wallunderground(idf, bsdobject, deletebsd=True, setto000=False): """return a wall:underground if bsdobject (buildingsurface:detailed) is an underground wall""" # ('WALL:UNDERGROUND', Wall, s.startswith('Ground')) # test if it is an underground wall if bsdobject.Surface_Type.upper() == 'WALL': # Surface_Type == wall if bsdobject.Outside_Boundary_Condition.upper().startswith('GROUND'): # Outside_Boundary_Condition startswith 'ground' simpleobject = idf.newidfobject('WALL:UNDERGROUND') simpleobject.Name = bsdobject.Name simpleobject.Construction_Name = bsdobject.Construction_Name simpleobject.Zone_Name = bsdobject.Zone_Name simpleobject.Azimuth_Angle = bsdobject.azimuth simpleobject.Tilt_Angle = bsdobject.tilt surforigin = bsdorigin(bsdobject, setto000=setto000) simpleobject.Starting_X_Coordinate = surforigin[0] simpleobject.Starting_Y_Coordinate = surforigin[1] simpleobject.Starting_Z_Coordinate = surforigin[2] simpleobject.Length = bsdobject.width simpleobject.Height = bsdobject.height if deletebsd: idf.removeidfobject(bsdobject) return simpleobject return None
739ca431088f049323b64e80649e6b23930d1318
16,402
def canonical_order(match): """ It does not make sense to define a separate bond between atoms 1 and 2, and between atoms 2 and 1. This function will swap the atoms in the bond if the first atom > second atom. """ # match[0][0:2] contains the ID numbers for the 2 atoms in the match atom0 = match[0][0] atom1 = match[0][1] # match[1][0:1] contains the ID numbers for the 1 bond bond0 = match[1][0] if atom0 < atom1: # return ((atom0, atom1), (bond0)) same thing as: return match else: return ((atom1, atom0), (bond0))
ea268fedaa365e0fad3ea49944cc1d1bb5fa7a51
16,403
def grant_db_access_to_role(role, db): # pylint: disable=invalid-name """Grant the role 'database_name', returns grant permission.""" return grant_obj_permission_to_role(role, db, 'database_access')
5adb5f8f06473b20d6e7f386acb889631e042dde
16,404
import json def execute_compute_job(): """Call the execution of a workflow. --- tags: - services consumes: - application/json parameters: - name: consumerAddress in: query description: The consumer address. required: true type: string - name: serviceAgreementId in: query description: The ID of the service agreement. required: true type: string - name: signature in: query description: Signature of the documentId to verify that the consumer has rights to download the asset. type: string - name: workflowDID in: query description: DID of the workflow that is going to start to be executed. type: string responses: 200: description: Call to the operator-service was successful. 400: description: One of the required attributes is missing. 401: description: Invalid asset data. 500: description: Error """ data = request.args required_attributes = [ 'serviceAgreementId', 'consumerAddress', 'signature', 'workflowDID' ] msg, status = check_required_attributes(required_attributes, data, 'consume') if msg: return msg, status if not (data.get('signature')): return f'`signature is required in the call to "consume".', 400 try: agreement_id = data.get('serviceAgreementId') consumer_address = data.get('consumerAddress') asset_id = keeper_instance().agreement_manager.get_agreement(agreement_id).did did = id_to_did(asset_id) if not was_compute_triggered(agreement_id, did, consumer_address, keeper_instance()): msg = ( 'Checking if the compute was triggered failed. Either consumer address does not ' 'have permission to executre this workflow or consumer address and/or service ' 'agreement id is invalid.') logger.warning(msg) return msg, 401 workflow = DIDResolver(keeper_instance().did_registry).resolve(data.get('workflowDID')) body = {"serviceAgreementId": agreement_id, "workflow": workflow.as_dictionary()} response = requests_session.post( get_config().operator_service_url + '/api/v1/operator/init', data=json.dumps(body), headers={'content-type': 'application/json'}) return jsonify({"workflowId": response.content.decode('utf-8')}) except Exception as e: logger.error(f'Error- {str(e)}', exc_info=1) return f'Error : {str(e)}', 500
3601cc1f9001d23d07ded604f9fa241fe11cebd3
16,405
from typing import List from pathlib import Path def files_filter_ext(files: List[Path], ext: str) -> List[Path]: """Filter files from a list matching a extension. Args: files: List of files. ext: Extension to filter. Returns: List of files that have the extension. """ return [f for f in files if f.suffix == ext]
0ed134583f9fa4868111d1475b8be4d67ba4feb7
16,406
import os import click def update(locale_dir, pot_dir, languages, line_width=76): """ Update specified language's po files from pot. :param unicode locale_dir: path for locale directory :param unicode pot_dir: path for pot directory :param tuple languages: languages to update po files :param number line_width: maximum line wdith of po files :return: {'create': 0, 'update': 0, 'notchanged': 0} :rtype: dict """ status = { 'create': 0, 'update': 0, 'notchanged': 0, } for dirpath, dirnames, filenames in os.walk(pot_dir): for filename in filenames: pot_file = os.path.join(dirpath, filename) base, ext = os.path.splitext(pot_file) if ext != ".pot": continue basename = relpath(base, pot_dir) for lang in languages: po_dir = os.path.join(locale_dir, lang, 'LC_MESSAGES') po_file = os.path.join(po_dir, basename + ".po") cat_pot = c.load_po(pot_file) if os.path.exists(po_file): cat = c.load_po(po_file) msgids = set([m.id for m in cat if m.id]) c.update_with_fuzzy(cat, cat_pot) new_msgids = set([m.id for m in cat if m.id]) if msgids != new_msgids: added = new_msgids - msgids deleted = msgids - new_msgids status['update'] += 1 click.echo('Update: {0} +{1}, -{2}'.format( po_file, len(added), len(deleted))) c.dump_po(po_file, cat, line_width) else: status['notchanged'] += 1 click.echo('Not Changed: {0}'.format(po_file)) else: # new po file status['create'] += 1 click.echo('Create: {0}'.format(po_file)) c.dump_po(po_file, cat_pot, line_width) return status
8c86444999323b0cbf63dbdfcf04781b4d893b12
16,407
import six def interp_to_grid(tran,v,expand_x=True,expand_y=True): """ Return dense matrix for X,Y and V (from v, or tran[v] if v is str) expand_x: defaults to 1 more value in the X dimension than in V, suitable for passing to pcolormesh. expand_y: defaults to 1 more value in the Y dimension than in V, for pcolormesh """ if isinstance(v,six.string_types): v=tran[v] x,y,scal,dz=xr.broadcast(get_d_sample(tran),tran.z_ctr,v,get_z_dz(tran)) # important to use .values, as xarray will otherwise muck with # the indexing # coll_u=plot_utils.pad_pcolormesh(x.values,y.values,scal.values,ax=ax) # But we have some additional information on how to pad Y, so do that # here. # Move to numpy land X=x.values Y=y.values Dz=dz.values if expand_y: # Expands the vertical coordinate in the vertical Ybot=Y-0.5*Dz Yexpand=np.concatenate( (Ybot,Ybot[:,-1:]), axis=1) Yexpand[:,-1]=np.nan Yexpand[:,1:]=np.where( np.isfinite(Yexpand[:,1:]), Yexpand[:,1:], Y+0.5*Dz) # Expands the horizontal coordinate in the vertical Xexpand=np.concatenate( (X,X[:,-1:]), axis=1) else: Yexpand=Y Xexpand=X # And expand in the horizontal def safe_midpnt(a,b): ab=0.5*(a+b) invalid=np.isnan(ab) ab[invalid]=a[invalid] invalid=np.isnan(ab) ab[invalid]=b[invalid] return ab if expand_x: dx=utils.center_to_interval(X[:,0]) Xexpand2=np.concatenate( (Xexpand-0.5*dx[:,None], Xexpand[-1:,:]+0.5*dx[-1:,None]), axis=0) Yexpand2=np.concatenate( (Yexpand[:1,:], safe_midpnt(Yexpand[:-1],Yexpand[1:]), Yexpand[-1:,:]), axis=0) else: Xexpand2=Xexpand Yexpand2=Yexpand return Xexpand2,Yexpand2,scal.values
324e42329588860d6fd45cfa06988f49e56ca504
16,408
def simulation(G, tau, gamma, rho, max_time, number_infected_before_release, release_number, background_inmate_turnover, stop_inflow_at_intervention, p, death_rate, percent_infected, percent_recovered, social_distance, social_distance_tau, initial_infected_list): """Runs a simulation on SIR model. Args: G: Networkx graph tau: transmission rate gamma: recovery rate rho: percent of inmates that are initially infected max_time: # of time steps to run simulation number_infected_before_release: number of infected at which to perform release on next integer time release_number: # of inmates to release at release intervention background_inmate_turnover: background # of inmates added/released at each time step stop_inflow_at_intervention: should we stop the background inflow of inmates at intervention time? p: probability of contact between inmate and other inmates death_rate: percent of recovered inmates that die percent_infected: percent of general population that is infected percent_recovered: percent of general population that is recovered social_distance: boolean flag, if we lower transmission rate after major release social_distance_tau: new transmission rate after major release initial_infected_list: sets node numbers of initial infected (default is 0, this parameter is arbitrary) Returns: t: array of times at which events occur S: # of susceptible inmates at each time I: # of infected inmates at each time R: # of recovered inmates at each time D: # of dead inmates at each time step """ print('Starting simulation...') release_occurred = False background_release_number = background_inmate_turnover data_list = [] recovered_list = [] delta_recovered_list = [] # Check we are using initial_infected_list if initial_infected_list is not None: print('Using initial infected list to set initial infected.') infected_list = initial_infected_list.copy() else: # Choose random initial infections based on rho print('Using rho to set initial infected.') infected_list = list(np.random.choice(list(G.nodes), int(np.ceil(rho * len(G.nodes))), replace=False)) # Loop over time for i in range(max_time): # Run 1 time unit of simulation data = EoN.fast_SIR(G, tau, gamma, initial_infecteds=infected_list, initial_recovereds=recovered_list, tmin=i, tmax=i + 1, return_full_data=True) data_list.append(data) # Update infected and recovered inmate lists infected_list, recovered_list = get_infected(data, i + 1), get_recovered(data, i + 1) # Check if release condition has been met if not release_occurred and len(infected_list) >= number_infected_before_release: background_inmate_turnover, r_n, tau = enact_interventions(background_inmate_turnover, background_release_number, i + 1, infected_list, release_number, social_distance, social_distance_tau, stop_inflow_at_intervention, tau) release_occurred = True else: # If not, use background release rate r_n = background_release_number # Add and release inmates G, infected_list, recovered_list, delta_recovered = recalibrate_graph(G, infected_list, recovered_list, background_inmate_turnover, r_n, p, percent_infected, percent_recovered, death_rate) # Track the number of recovered inmates added or released at each time step delta_recovered_list.append(delta_recovered) # Process raw data into t, S, I, R, D arrays t, S, I, R, D = process_data(data_list, delta_recovered_list, death_rate) print('Simulation completed.\n') return t, S, I, R, D
1f9ab389ce2f301266ce6d796c303cfbb5ab4b44
16,409
def relative(link : str): """Convert relative link to absolute""" return f"#{document.URL.split('#')[1]}/{link}"
5f00da06f5277b4a85512b49e9348d8d22949058
16,410
from typing import Tuple from typing import List def _get_axes_names(ndim: int) -> Tuple[List[str], List[str]]: """Get needed axes names given the number of dimensions. Parameters ---------- ndim : int Number of dimensions. Returns ------- axes : List[str] Axes names. coords : List[str] Coordinates names. """ if ndim == 2: axes = [axis for axis in AXES if axis != Axes.ZPLANE.value] coords = [coord for coord in COORDS if coord != Coordinates.Z.value] elif ndim == 3: axes = AXES coords = COORDS else: raise TypeError('expected 2- or 3-D image') return axes, coords
4f9dc40131443520a2f43c287b7d0ab1428a878f
16,411
def multi_replace(text, replace_dict): """Perform multiple replacements in one go using the replace dictionary in format: { 'search' : 'replace' } :param text: Text to replace :type text: `str` :param replace_dict: The replacement strings in a dict :type replace_dict: `dict` :return: `str` :rtype: """ new_text = text for search, replace in list(replace_dict.items()): new_text = new_text.replace(search, str(replace)) return new_text
dc902c988fa57cd9a3d7f4def6089b78d36664c8
16,412
def function_expr(fn: str, args_expr: str = "") -> str: """ DEPRECATED. Please do not add anything else here. In order to manipulate the query, create a QueryProcessor and register it into your dataset. Generate an expression for a given function name and an already-evaluated args expression. This is a place to define convenience functions that evaluate to more complex expressions. """ if fn.startswith("apdex("): match = APDEX_FUNCTION_RE.match(fn) if match: return "(countIf({col} <= {satisfied}) + (countIf(({col} > {satisfied}) AND ({col} <= {tolerated})) / 2)) / count()".format( col=escape_identifier(match.group(1)), satisfied=match.group(2), tolerated=int(match.group(2)) * 4, ) raise ValueError("Invalid format for apdex()") elif fn.startswith("impact("): match = IMPACT_FUNCTION_RE.match(fn) if match: apdex = "(countIf({col} <= {satisfied}) + (countIf(({col} > {satisfied}) AND ({col} <= {tolerated})) / 2)) / count()".format( col=escape_identifier(match.group(1)), satisfied=match.group(2), tolerated=int(match.group(2)) * 4, ) return "(1 - {apdex}) + ((1 - (1 / sqrt(uniq({user_col})))) * 3)".format( apdex=apdex, user_col=escape_identifier(match.group(3)), ) raise ValueError("Invalid format for impact()") elif fn.startswith("failure_rate("): match = FAILURE_RATE_FUNCTION_RE.match(fn) if match: return "countIf(notIn(transaction_status, tuple({ok}, {cancelled}, {unknown}))) / count()".format( ok=SPAN_STATUS_NAME_TO_CODE["ok"], cancelled=SPAN_STATUS_NAME_TO_CODE["cancelled"], unknown=SPAN_STATUS_NAME_TO_CODE["unknown"], ) raise ValueError("Invalid format for failure_rate()") # For functions with no args, (or static args) we allow them to already # include them as part of the function name, eg, "count()" or "sleep(1)" if not args_expr and fn.endswith(")"): return fn # Convenience topK function eg "top10", "top3" etc. topk = TOPK_FUNCTION_RE.match(fn) if topk: return "topK({})({})".format(topk.group(1), args_expr) # turn uniq() into ifNull(uniq(), 0) so it doesn't return null where # a number was expected. if fn == "uniq": return "ifNull({}({}), 0)".format(fn, args_expr) # emptyIfNull(col) is a simple pseudo function supported by Snuba that expands # to the actual clickhouse function ifNull(col, '') Until we figure out the best # way to disambiguate column names from string literals in complex functions. if fn == "emptyIfNull" and args_expr: return "ifNull({}, '')".format(args_expr) # Workaround for https://github.com/ClickHouse/ClickHouse/issues/11622 # Some distributed queries fail when arrays are passed as array(1,2,3) # and work when they are passed as [1, 2, 3] if get_config("format_clickhouse_arrays", 1) and fn == "array": return f"[{args_expr}]" # default: just return fn(args_expr) return "{}({})".format(fn, args_expr)
81fc9dc55c7602722303c2623d20aa88ce12f532
16,413
import subprocess def many_to_one(nrows=[1000], N=5, percentages=[0.025], p=0.5): """ """ def update_joinable_relation_rows(table1, nrows, selecivity_percentage, \ N, relation_from_percentage=-1, \ relation_to_percentage=-1): """ Sample rows for percentage and update the sampled rows return: updated table 1, table2 """ prows = nrows * selecivity_percentage tbl1_sample = sample(table1, int(prows)) rpercentage = nrows if relation_to_percentage > 0: rpercentage = nrows * relation_to_percentage NumOfP1s = rpercentage / N # print(NumOfP1s, prows, rpercentage) tbl1_sample = tbl1_sample.reset_index(drop=True) P1ForJoin = sample(tbl1_sample, int(NumOfP1s+0.7)) values = list(set([row[1]['P1'] for row in P1ForJoin.iterrows()])) values = values * N if len(values) > nrows: values = values[:nrows] table2 = generate_table(nrows, P1start=nrows+1) tbl2_sample = sample(table2, len(values)) # print(len(values), len(list(set((tbl2_sample.index))))) for i, j in zip(values, list(tbl2_sample.index)): table2.loc[j, 'P1'] = i return table1, table2 # Number of rows per table # nrows = [1000, 3000, 10000, 50000, 100000] # value of N (relation size) # N = [5, 10, 15] # 50 % selectivity - percentage of rows, overall, involvd in join from table1 to table2 # p = 0.5 # percentage of rows that are involved in 1-N relation # percentages = [0.25 , 0.5] for nrow in nrows: subprocess.check_call('mkdir -p ../data/relation_type/N-one/'+ str(int(nrow/1000)) + 'k_rows', shell=True) table1 = generate_table(nrow) table1.to_csv('../data/relation_type/N-one/'+ str(int(nrow/1000)) + 'k_rows/table2.csv', index=False ) for rp in percentages: for n in N: table1, table2 = update_joinable_relation_rows(table1, nrow, p, n, -1, rp) table2.to_csv('../data/relation_type/N-one/'+ str(int(nrow/1000)) + 'k_rows/table1_' + \ str(int(100*p)) + "_" + str(n) + "_" + str(int(100*rp)) + '_percent.csv', index=False )
782a1e60d67c3afaaa06871c68c2989502102d65
16,414
from typing import Sequence def distribute(tensor: np.ndarray, grid_shape: Sequence[int], pmap: bool = True) -> pxla.ShardedDeviceArray: """ Convert a numpy array into a ShardedDeviceArray (distributed according to `grid_shape`). It is assumed that the dimensions of `tensor` are evenly divided by `grid`. Args: tensor: A distributed array to be converted into a local numpy tensor. grid_shape: The shape of the processor grid according to which `tensor` is distributed. Returns: ShardedDeviceArray: The distributed tensor Raises: ValueError: If `tensor.shape` is not evenly divisible by `grid_shape` """ if not np.all([s % p == 0 for s, p in zip(tensor.shape, grid_shape)]): raise ValueError(f"tensor.shape = {tensor.shape} not evenly divisible " f"by grid_shape = {grid_shape}.") ndim = tensor.ndim pshape = np.asarray(grid_shape) shape = misc.flatten( [p, s] for s, p in zip(np.array(tensor.shape) // pshape, pshape)) perm = list(range(0, 2 * ndim, 2)) + list(range(1, 2 * ndim, 2)) reshaped = tensor.reshape(shape).transpose(perm) final_shape = (np.prod(reshaped.shape[:ndim]), *reshaped.shape[ndim:]) A = reshaped.reshape(final_shape) if not pmap: return A return jax.pmap(lambda x: x, devices=jax.local_devices())(A)
5e0ca59a23f1cde027769334a938e5855b17bf62
16,415
import os def input_files_exist(paths): """Ensure all the input files actually exist. Args: paths (list): List of paths. Returns: bool: True if they all exist, False otherwise. """ for path in paths: if not os.path.isfile(path): return False return True
7feba57335cdf435950bef1e01b9ab09c1b5f9c1
16,416
def predict(w, X): """ Returns a vector of predictions. """ return expit(X.dot(w))
c3bcb56cdd700ddf96124792b3f356644680e356
16,417
import argparse def get_argument_parser(): """ Parse CLI arguments and return a map of options. """ parser = argparse.ArgumentParser( description="DC/OS Install and Configuration Utility") mutual_exc = parser.add_mutually_exclusive_group() mutual_exc.add_argument( '--hash-password', action=ArgsAction, dest='password', metavar='password', nargs='?', help='Hash a password and print the results to copy into a config.yaml.' ) mutual_exc.add_argument( '--generate-node-upgrade-script', action=ArgsAction, metavar='installed_cluster_version', dest='installed_cluster_version', nargs='?', help='Generate a script that upgrades DC/OS nodes running installed_cluster_version' ) mutual_exc.add_argument( '--generate-node-upgrade-win-script', action=ArgsAction, metavar='installed_cluster_version', dest='installed_cluster_version', nargs='?', help='Generate a powershell script that upgrades Windows nodes running installed_cluster_version' ) parser.add_argument( '-v', '--verbose', action='store_true', help='Verbose log output (DEBUG).') parser.add_argument( '-p', '--port', type=int, default=9000, help=argparse.SUPPRESS) def add_mode(name, help_msg): mutual_exc.add_argument( '--{}'.format(name), action='store_const', const=name, dest='action', help=help_msg) # Add all arg modes for name, value in dispatch_dict_simple.items(): add_mode(name, value[2]) parser.set_defaults(action='genconf') return parser
8c2a635461c60b2d51ae8edeb0e102c15b91481a
16,418
def maskrgb_to_class(mask, class_map): """ decode rgb mask to classes using class map""" h, w, channels = mask.shape[0], mask.shape[1], mask.shape[2] mask_out = -1 * np.ones((h, w), dtype=int) for k in class_map: matches = np.zeros((h, w, channels), dtype=bool) for c in range(channels): matches[:, :, c] = mask[:, :, c] == k[c] matches_total = np.sum(matches, axis=2) valid_idx = matches_total == channels mask_out[valid_idx] = class_map[k] return mask_out
0af4d42fc2dfba4d56bf990df222895b94b3002d
16,419
def translate_error_code(error_code): """ Return the related Cloud error code for a given device error code """ return (CLOUD_ERROR_CODES.get(error_code) if error_code in CLOUD_ERROR_CODES else error_code)
f6cc38b296b330811e932d3e7227d201ed09fe80
16,420
def generate_oi_quads(): """Return a list of quads representing a single OI, OLDInstance. """ old_instance, err = domain.construct_old_instance( slug='oka', name='Okanagan OLD', url='http://127.0.0.1:5679/oka', leader='', state=domain.NOT_SYNCED_STATE, is_auto_syncing=False) old_instance_quads = aol_mod.instance_to_quads( old_instance, domain.OLD_INSTANCE_TYPE) aol = [] for quad in old_instance_quads: aol = aol_mod.append_to_aol(aol, quad) return aol
6b3466e81014d14f88f17e855d726a608afda946
16,421
import torch def graph_intersection(pred_graph, truth_graph): """ Use sparse representation to compare the predicted graph and the truth graph so as to label the edges in the predicted graph to be 1 as true and 0 as false. """ array_size = max(pred_graph.max().item(), truth_graph.max().item()) + 1 l1 = pred_graph.cpu().numpy() l2 = truth_graph.cpu().numpy() e_1 = sp.sparse.coo_matrix((np.ones(l1.shape[1]), l1), shape=(array_size, array_size)).tocsr() e_2 = sp.sparse.coo_matrix((np.ones(l2.shape[1]), l2), shape=(array_size, array_size)).tocsr() e_intersection = (e_1.multiply(e_2) - ((e_1 - e_2)>0)).tocoo() new_pred_graph = torch.from_numpy(np.vstack([e_intersection.row, e_intersection.col])).long().to(device) y = e_intersection.data > 0 return new_pred_graph, y
c63ae9cb52c9a55d54bfb5237c43b1998c51c482
16,422
import requests def get_qid_for_title(title): """ Gets the best Wikidata candidate from the title of the paper. """ api_call = f"https://www.wikidata.org/w/api.php?action=wbsearchentities&search={title}&language=en&format=json" api_result = requests.get(api_call).json() if api_result["success"] == 1: return(api_result["search"][0]["id"])
663db71c7a1bbf1617941ba81c5fa3b7d359e00b
16,423
def experiment(L, T, dL, dT, dLsystm = 0): """ Performs a g-measurement experiment Args: L: A vector of length measurements of the pendulum T: A vector of period measurements of the pendulum dL: The error in length measurement dT: The error in period measurement dLsystm: Systematic error of length measurement, default value 0 Returns: A dictionary with the mean values of g, the g-error values and the measured period values, for each length """ L = L + dLsystm # Add systematic error, if it exists g = np.power(2*np.pi, 2) * L / np.power(T, 2) # Indirect g measurement from # length and period dg = gError(L, T, dL, dT) # g measurement error gMean = np.sum(g)/g.size # Mean value of g measurements dgMean = np.sqrt(np.sum(dg*dg))/dg.size # Error of mean value of g return {'g':gMean, 'dg':dgMean}
cdf7384518fb92295675eb1b15bec883b50a450f
16,424
def find_jobs(schedd=None, attr_list=None, **constraints): """Query the condor queue for jobs matching the constraints Parameters ---------- schedd : `htcondor.Schedd`, optional open scheduler connection attr_list : `list` of `str` list of attributes to return for each job, defaults to all all other keyword arguments should be ClassAd == value constraints to apply to the scheduler query Returns ------- jobs : `list` of `classad.ClassAd` the job listing for each job found """ if schedd is None: schedd = htcondor.Schedd() qstr = ' && '.join(['%s == %r' % (k, v) for k, v in constraints.items()]).replace("'", '"') if not attr_list: attr_list = [] return list(schedd.query(qstr, attr_list))
9a6a32002a945d186ea40c534dbc28805458cec2
16,425
def create_vocab(sequences, min_count, counts): """Generate character-to-idx mapping from list of sequences.""" vocab = {const.SOS: const.SOS_IDX, const.EOS: const.EOS_IDX, const.SEP: const.SEP_IDX} for seq in sequences: for token in seq: for char in token: if char not in vocab and counts[char] >= min_count: vocab[char] = len(vocab) vocab[const.UNK] = len(vocab) return vocab
40ca7b3ed88d4134c2949223ec93ef871a18a8fb
16,426
def get_ind_sphere(mesh, ind_active, origin, radius): """Retreives the indices of a sphere object coordintes in a mesh.""" return ( (mesh.gridCC[ind_active, 0] <= origin[0] + radius) & (mesh.gridCC[ind_active, 0] >= origin[0] - radius) & (mesh.gridCC[ind_active, 1] <= origin[1] + radius) & (mesh.gridCC[ind_active, 1] >= origin[1] - radius) & (mesh.gridCC[ind_active, 2] <= origin[2] + radius) & (mesh.gridCC[ind_active, 2] >= origin[2] - radius) )
9e246c3c0d3d7750a668476f0d0d90b28c46fc27
16,427
def find_frame_times(eegFile, signal_idx=-1, min_interval=40, every_n=1): """Find imaging frame times in LFP data using the pockels blanking signal. Due to inconsistencies in the fame signal, we look for local maxima. This avoids an arbitrary threshold that misses small spikes or includes two nearby time points that are part of the same frame pulse. Parameters ---------- eegFile : str Path to eeg data file signal_idx : int Index of the pockels signal, e.g. eeg[signal_idx, :], default -1 min_interval : int Minimum radius around local maxima to enforce, default 40 every_n : int Return every nth frame time, useful for multiplane data, default 1 Returns ------- frame times : array, shape (n_frame_times, ) """ pc_signal = loadEEG(eegFile.replace('.eeg', ''))['EEG'][signal_idx, :] # break ties for local maxima by increasing first point by 1 same_idx = np.where(np.diff(pc_signal) == 0)[0] pc_signal[same_idx] += 1 pc_signal = np.abs(np.diff(pc_signal)) frame_times = argrelextrema(pc_signal, np.greater, order=min_interval)[0] return frame_times[::every_n]
7ed6a6a5b3d873132575ed5af1d9132d22e3898b
16,428
def _interpolate_signals(signals, sampling_times, verbose=False): """ Interpolate signals at given sampling times. """ # Reshape all signals to one-dimensional array object (e.g. AnalogSignal) for i, signal in enumerate(signals): if signal.ndim == 2: signals[i] = signal.flatten() elif signal.ndim > 2: raise ValueError('elements in fir_rates must have 2 dimensions') if verbose: print('create time slices of the rates...') # Interpolate in the time bins interpolated_signal = np.vstack([_analog_signal_step_interp( signal, sampling_times).rescale('Hz').magnitude for signal in signals]) * pq.Hz return interpolated_signal
b72d8b5bbd55fb70107e36c551cb558953baed50
16,429
def mean_standard_error_residuals(A, b): """ Mean squared error of the residuals. The sum of squared residuals divided by the residual degrees of freedom. """ n, k = A.shape ssr = sum_of_squared_residuals(A, b) return ssr / (n - k)
6860ea11b2f2af29c9b519ef692ee990d2aef149
16,430
def cel2gal(ra, dec): """ Convert celestial coordinates (J2000) to Galactic coordinates. (Much faster than astropy for small arrays.) Parameters ---------- ra : `numpy.array` dec : `numpy.array` Celestical Coordinates (in degrees) Returns ------- glon : `numpy.array` glat : `numpy.array` Galactic Coordinates (in degrees) """ dec = np.radians(dec) sin_dec = np.sin(dec) cos_dec = np.cos(dec) ra = np.radians(ra) ra_gp = np.radians(192.85948) de_gp = np.radians(27.12825) sin_ra_gp = np.sin(ra - ra_gp) cos_ra_gp = np.cos(ra - ra_gp) lcp = np.radians(122.932) sin_b = (np.sin(de_gp) * sin_dec) + (np.cos(de_gp) * cos_dec * cos_ra_gp) lcpml = np.arctan2(cos_dec * sin_ra_gp, (np.cos(de_gp) * sin_dec) - (np.sin(de_gp) * cos_dec * cos_ra_gp)) glat = np.arcsin(sin_b) glon = (lcp - lcpml + (2. * np.pi)) % (2. * np.pi) return np.degrees(glon), np.degrees(glat)
b1185ce199c0f929c3395c452e619b93e2ee66a9
16,431
def index(request): """Renders main website with welcome message""" return render(request, 'mapper/welcome.html', {})
37194ef3ccc415c6db39f664bc819d7df1b9665a
16,432
def tidy_output(differences): """Format the output given by other functions properly.""" out = [] if differences: out.append("--ACLS--") out.append("User Path Port Protocol") for item in differences: #if item[2] != None: #En algunos casos salían procesos con puerto None out.append("%s %s %s %s" % item) # En item queda un elemento que es el protocolo # no se usa en la salida normal return out
2a7007ae16e91b111f556ea95eedc466a8606494
16,433
from typing import Dict from typing import Collection def get_issues_overview_for(db_user: User, app_url: str) -> Dict[str, Collection]: """ Returns dictionary with keywords 'user' and 'others', which got lists with dicts with infos IMPORTANT: URL's are generated for the frontend! :param db_user: User :param app_url: current applications url :return: dict """ if not db_user or db_user.nickname == nick_of_anonymous_user: return { 'user': [], 'other': [] } if db_user.is_admin(): db_issues_other_users = DBDiscussionSession.query(Issue).filter(Issue.author != db_user).all() else: db_issues_other_users = [issue for issue in db_user.accessible_issues if issue.author != db_user] db_issues_of_user = DBDiscussionSession.query(Issue).filter_by(author=db_user).order_by( Issue.uid.asc()).all() return { 'user': [__create_issue_dict(issue, app_url) for issue in db_issues_of_user], 'other': [__create_issue_dict(issue, app_url) for issue in db_issues_other_users] }
02ab5314a961a7fa398df2d43792fed1321939c6
16,434
def load_file(file): """Returns an AdblockRules object using the rules specified in file.""" with open(file) as f: rules = f.readlines() return AdblockRules(rules)
a9783ec4e8a195af688456af1949e33fd17d3cb7
16,435
def isSV0_QSO(gflux=None, rflux=None, zflux=None, w1flux=None, w2flux=None, gsnr=None, rsnr=None, zsnr=None, w1snr=None, w2snr=None, dchisq=None, maskbits=None, objtype=None, primary=None): """Target Definition of an SV0-like QSO. Returns a boolean array. Parameters ---------- See :func:`~desitarget.cuts.set_target_bits`. Returns ------- :class:`array_like` or :class:`float` ``True`` if and only if the object is an SV-like QSO target. If `floats` are passed, a `float` is returned. Notes ----- - Current version (10/14/19) is version 112 on `the SV wiki`_. - Hardcoded for south=False. - Combines all QSO-like SV classes into one bit. """ if primary is None: primary = np.ones_like(rflux, dtype='?') qsocolor_north = isQSO_cuts( primary=primary, zflux=zflux, rflux=rflux, gflux=gflux, w1flux=w1flux, w2flux=w2flux, dchisq=dchisq, maskbits=maskbits, objtype=objtype, w1snr=w1snr, w2snr=w2snr, south=False ) qsorf_north = isQSO_randomforest( primary=primary, zflux=zflux, rflux=rflux, gflux=gflux, w1flux=w1flux, w2flux=w2flux, dchisq=dchisq, maskbits=maskbits, objtype=objtype, south=False ) qsohizf_north = isQSO_highz_faint( primary=primary, zflux=zflux, rflux=rflux, gflux=gflux, w1flux=w1flux, w2flux=w2flux, dchisq=dchisq, maskbits=maskbits, objtype=objtype, south=False ) qsocolor_high_z_north = isQSO_color_high_z( gflux=gflux, rflux=rflux, zflux=zflux, w1flux=w1flux, w2flux=w2flux, south=False ) qsoz5_north = isQSOz5_cuts( primary=primary, gflux=gflux, rflux=rflux, zflux=zflux, gsnr=gsnr, rsnr=rsnr, zsnr=zsnr, w1flux=w1flux, w2flux=w2flux, w1snr=w1snr, w2snr=w2snr, dchisq=dchisq, maskbits=maskbits, objtype=objtype, south=False ) qsocolor_highz_north = (qsocolor_north & qsocolor_high_z_north) qsorf_highz_north = (qsorf_north & qsocolor_high_z_north) qsocolor_lowz_north = (qsocolor_north & ~qsocolor_high_z_north) qsorf_lowz_north = (qsorf_north & ~qsocolor_high_z_north) qso_north = (qsocolor_lowz_north | qsorf_lowz_north | qsocolor_highz_north | qsorf_highz_north | qsohizf_north | qsoz5_north) # ADM The individual routines return arrays, so we need # ADM a check to preserve the single-object case. if _is_row(rflux): return qso_north[0] return qso_north
f8be10f2d5d52ed0afd06a23cf7a9f1a98af1f25
16,436
def show_df_nans(df, columns=None, plot_width=10, plot_height=8): """ Input: df (pandas dataframe), collist (list) Output: seaborn heatmap plot Description: Create a data frame for features which may be nan. Set NaN values be 1 and numeric values to 0. Plots a heat map where dark squares/lines show where data is missing. The columns to plot can be specified with an input param. Otherwise all columns will be plotted -- which appear crowded. """ if not columns: plot_cols = df.columns else: plot_cols = columns df_viznan = pd.DataFrame(data=1, index=df.index, columns=plot_cols) df_viznan[~pd.isnull(df[plot_cols])] = 0 plt.figure(figsize=(plot_width, plot_height)) plt.title('Dark values are nans') return sns.heatmap(df_viznan.astype(float))
5f93f78eee905c81c7178a2a6ed7167597d4964c
16,437
def nocheck(): """Test client for an app that ignores the IP and signature.""" app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['VALIDATE_IP'] = False app.config['VALIDATE_SIGNATURE'] = False return app.test_client()
c2894b6e47e35ee548dd1ab229037f3fbc7c9efd
16,438
def evaluate_accuracy(file1, file2): """ evaluate accuracy """ count = 0 same_count = 0 f1 = open(file1, 'r') f2 = open(file2, 'r') while 1: line1 = f1.readline().strip('\n') line2 = f2.readline().strip('\n') if (not line1) or (not line2): break count += 1 if int(float(line1)) == int(1 if float(line2) > 0.5 else 0): same_count += 1 logger.info("evaluate accuracy: ") logger.info(float(same_count) / count) return float(same_count) / count
52fdb8054de07fe53f77dd74317f18ed1dfbbb36
16,439
import os def find_files(top, exts): """Return a list of file paths with one of the given extensions. Args: top (str): The top level directory to search in. exts (tuple): a tuple of extensions to search for. Returns: a list of matching file paths. """ return [os.path.join(dirpath, name) for dirpath, dirnames, filenames in os.walk(top) for name in filenames if name.endswith(exts)]
bb13d91b234b7411fd51e66fda96d5622ec11a1d
16,440
def add_hp_label(merged_annotations_column, label_type): """Adds prefix to annotation labels that identify the annotation as belonging to the provided label_type (e.g. 'h@' for host proteins). Parameters ---------- merged_annotations_column : array-like (pandas Series)) An array containing sets of annotations that need to be labeled. e.g. 0 {GO:0010008, GO:0070062, IPR036865, GO:0048471... 1 {GO:0006351, GO:0070062, GO:0007623, GO:004851... 2 {GO:0019888, GO:0006470, GO:0001754, GO:009024... label_type : str The prefix to be appended (without the "@" separator). Returns ------- labeled_annotations : array-like (pandas Series) A new pandas Series where all annotations have received a prefix. """ labeled_annotations = merged_annotations_column.map( lambda x: set([label_type + '@' + i for i in x])) return labeled_annotations
648f548931a1fae5d19291d81f2355a0a00877c3
16,441
import sys import json def JsonObj(data): """ Returns json object from data """ try: if sys.version >= '3.0': return json.loads(str(data)) else: return compat_json(json.loads(str(data), object_hook=compat_json), ignore_dicts=True) except Exception as e: # noqa FIXME(sneak) try: return data.__str__() except: # noqa FIXME(sneak) raise ValueError('JsonObj could not parse %s:\n%s' % (type(data).__name__, data.__class__))
12929f620779057d46605c2e9fc67018675c1303
16,442
def _serialize_value( target_expr: str, value_expr: str, a_type: mapry.Type, auto_id: mapry.py.generate.AutoID, py: mapry.Py) -> str: """ Generate the code to serialize a value. The code serializes the ``value_expr`` into the ``target_expr``. :param target_expr: Python expression of the JSONable to be set :param value_expr: Python expression of the value to be serialized :param a_type: the mapry type of the value :param auto_id: generator of unique identifiers :param py: Python settings :return: generated serialization code """ result = '' serialization_expr = _serialization_expr( value_expr=value_expr, a_type=a_type, py=py) if serialization_expr is not None: result = '{} = {}'.format(target_expr, serialization_expr) elif isinstance(a_type, mapry.Array): result = _serialize_array( target_expr=target_expr, value_expr=value_expr, a_type=a_type, auto_id=auto_id, py=py) elif isinstance(a_type, mapry.Map): result = _serialize_map( target_expr=target_expr, value_expr=value_expr, a_type=a_type, auto_id=auto_id, py=py) else: raise NotImplementedError( "Unhandled serialization of type: {}".format(a_type)) return result
6ec6051715ca34771bf32582bb86280d58af27d3
16,443
def return_union_close(): """union of statements, close statement""" return " return __result"
c1a1b6b6b1164a641a7f9e598eec346af13f2aa7
16,444
from typing import Union from typing import List from typing import Tuple def parse_unchanged(value: Union[str, List[str]]) -> Tuple[bool, Union[str, List[str]]]: """Determine if a value is 'unchanged'. Args: value: value supplied by user """ unchanges = [ SETTING_UNCHANGED, str(SETTING_UNCHANGED), SETTING_UNCHANGED[0], str(SETTING_UNCHANGED[0]), ] if value in unchanges: return True, SETTING_UNCHANGED return False, value
b4f1e155064c053fa1df65d242e920ae4ecf2fe5
16,445
def get_val(tup): """Get the value from an index-value pair""" return tup[1]
5966bbbb28006c46eaf11afaef152573aaaa8d2a
16,446
import os def get_slurm_job_nodes(): """Query the SLURM job environment for the number of nodes""" nodes = os.environ.get('SLURM_JOB_NUM_NODES') if nodes is None: nodes = os.environ.get('SLURM_NNODES') if nodes: return int(nodes) print("Warning: could not determine the number of nodes in this SLURM job (%d). Only using 1" % (get_job_id())) return 1
e7b53ee550eaf03363aa8f377f7f71f458cf3fd7
16,447
import os import subprocess def repoinit(testconfig, profiler=None): """Determines revision and sets up the repo. If given the profiler optional argument, wil init the profiler repo instead of the default one.""" revision = '' #Update the repo if profiler == "gnu-profiler": if testconfig.repo_prof is not None: os.chdir(testconfig.repo_prof) else: raise ValueError('Profiling repo is not defined') elif profiler == "google-profiler": if testconfig.repo_gprof is not None: os.chdir(testconfig.repo_gprof) else: raise ValueError('Profiling repo is not defined') else: os.chdir(testconfig.repo) #Checkout specific branch, else maintain main branch if testconfig.branch != 'master': subprocess.call(['git', 'checkout', testconfig.branch]) rev, _ = subprocess.Popen(['git', 'rev-parse', 'HEAD'],\ stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() revision = str(rev).replace("\\n'", '').replace("b'", '') else: subprocess.call(['git checkout master'], shell=True) #Check a specific revision. Else checkout master. if testconfig.revision: subprocess.call(['git', 'checkout', testconfig.revision]) revision = testconfig.revision elif testconfig.branch == 'master': subprocess.call(['git pull'], shell=True) rev, _ = subprocess.Popen(['git rev-parse HEAD'], stdout=subprocess.PIPE,\ stderr=subprocess.PIPE, shell=True).communicate() revision = str(rev).replace("\\n'", '').replace("b'", '') return revision
b2348bb6ba6eb8284119a9b7bcbe162702338946
16,448
def lnprior_d(d,L=default_L): """ Expotentially declining prior. d, L in kpc (default L=0.5) """ if d < 0: return -np.inf return -np.log(2) - 3*np.log(L) + 2*np.log(d) - d/L
fd7cf591c5095fe8129662794b5c05235eda8941
16,449
def textile(text, **args): """This is Textile. Generates XHTML from a simple markup developed by Dean Allen. This function should be called like this: textile(text, head_offset=0, validate=0, sanitize=0, encoding='latin-1', output='ASCII') """ return Textiler(text).process(**args)
051f2aa254c2c24e80640b0779712d2154a1b67d
16,450
def binary_info_gain(df, feature, y): """ :param df: input dataframe :param feature: column to investigate :param y: column to predict :return: information gain from binary feature column """ return float(sum(np.logical_and(df[feature], df[y])))/len(df[feature])
8aa4bbb6997b913001074e15fcdefb5f6047cab3
16,451
def get_all_instances(region): """ Returns a list of all the type of instances, and their instances, managed by the scheduler """ ec2 = boto3.resource('ec2', region_name=region) rds = boto3.client('rds', region_name=region) return { 'EC2': [EC2Schedulable(ec2, i) for i in ec2.instances.all()], 'RDS': [RDSSchedulable(rds, i) for i in rds.describe_db_instances()['DBInstances']] }
daf6c4cc71f19c7b94f625a283eeb59f4fcae10f
16,452
import warnings import asyncio def futures_navigating(urls: list, amap: bool = True) -> dict: """ 异步 基于 drive url list 通过请求高德接口 获得 路径规划结果 :param urls: :param amap: 开关 :return: """ data_collections = [None] * len(urls) pack_data_result = {} all_tasks = [] # 准备 with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) try: event_loop = asyncio.get_event_loop() except Exception as _: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) event_loop = asyncio.get_event_loop() # 线程池 # for idx in range(len(urls)): # all_tasks.append(event_loop.run_in_executor(register.pool, request_navigating, urls[idx], idx, data_collections)) # event_loop.run_until_complete(asyncio.wait(all_tasks)) # 异步io if amap: event_loop.run_until_complete(async_request_navigating(urls, data_collections)) # 获取结果,只获取 ['route']['paths'][0] ,也即只获取第一种策略的数据 for idx in range(len(urls)): # 如果新url请求失败 if not data_collections[idx]: if amap: register.logger.error(f"futures_navigating request failed,new url:{urls[idx]},url_idx:{idx}") data_collections[idx] = default_data_with_navigating_url(urls[idx], idx, data_collections) api_data_result = data_collections[idx] if not pack_data_result: pack_data_result = api_data_result pack_data_result['route']['paths'] = [pack_data_result['route']['paths'][0]] else: pack_data_result['route']['destination'] = api_data_result['route']['destination'] pack_data_result['route']['taxi_cost'] = str( float(pack_data_result['route']['taxi_cost']) + float(api_data_result['route']['taxi_cost'])) pack_data_result['route']['paths'][0]['distance'] = str( float(pack_data_result['route']['paths'][0]['distance']) + float(api_data_result['route']['paths'][0]['distance'])) pack_data_result['route']['paths'][0]['duration'] = str( float(pack_data_result['route']['paths'][0]['duration']) + float(api_data_result['route']['paths'][0]['duration'])) pack_data_result['route']['paths'][0]['tolls'] = str( float(pack_data_result['route']['paths'][0]['tolls']) + float(api_data_result['route']['paths'][0]['tolls'])) pack_data_result['route']['paths'][0]['toll_distance'] = str( float(pack_data_result['route']['paths'][0]['toll_distance']) + float( api_data_result['route']['paths'][0]['toll_distance'])) pack_data_result['route']['paths'][0]['steps'].extend(api_data_result['route']['paths'][0]['steps']) return pack_data_result
4b65488f2c3ba7ac7ecc46a2da949c15bffbbf9b
16,453
def getdim(s): """If s is a representation of a vector, return the dimension.""" if len(s) > 4 and s[0] == "[" and s[-1] == "]": return len(splitargs(s[1:-1], ["(", "["], [")", "]"])) else: return 0
7ad62245ad2ebf7a262f6442ff209b819b1fa36e
16,454
import matplotlib import scipy from scipy import interpolate def unstruct2grid(coordinates, quantity, cellsize, k_nearest_neighbors=3, boundary=None, crop=True): """Convert unstructured model outputs into gridded arrays. Interpolates model variables (e.g. depth, velocity) from an unstructured grid onto a Cartesian grid using inverse-distance-weighted interpolation. Assumes projected (i.e. "flat") geographic coordinates. Accepts coordinates in meters or decimal degrees. Extent of output rasters are based on extent of coordinates. (Function modeled after ANUGA plot_utils code) **Inputs** : coordinates : `list` List [] of (x,y) pairs or tuples of coordinates at which the interpolation quantities are located (e.g. centroids or vertices of an unstructured hydrodynamic model). quantity : `list` List [] of data to be interpolated with indices matching each (x,y) location given in coordinates. If quantity is depth, list would be formatted as [d1, d2, ... , dn]. cellsize : `float or int` Length along one square cell face. k_nearest_neighbors : `int`, optional Number of nearest neighbors to use in the interpolation. If k>1, inverse-distance-weighted interpolation is used. boundary : `list`, optional List [] of (x,y) coordinates used to delineate the boundary of interpolation. Points outside the polygon will be assigned as nan. Format needs to match requirements of matplotlib.path.Path() crop : `bool`, optional If a boundary is specified, setting crop to True will eliminate any all-NaN borders from the interpolated rasters. **Outputs** : interp_func : `function` Nearest-neighbor interpolation function for gridding additional quantities. Quicker to use this output function on additional variables (e.g. later time-steps of an unsteady model) than to make additional function calls to unstruct2grid. Function assumes data have the same coordinates. It is used as follows: "new_gridded_quantity = interp_func(new_quantity)". gridded_quantity : `numpy.ndarray` Array of quantity after interpolation. """ cellsize = float(cellsize) # Make sure all input values are floats x = [float(i) for i, j in coordinates] y = [float(j) for i, j in coordinates] quantity = np.array([float(i) for i in quantity]) if len(quantity) != len(x): raise ValueError("Coordinate and quantity arrays must be equal length") # Get some dimensions and make x,y grid nx = int(np.ceil((max(x)-min(x))/cellsize)+1) xvect = np.linspace(min(x), min(x)+cellsize*(nx-1), nx) ny = int(np.ceil((max(y)-min(y))/cellsize)+1) yvect = np.linspace(min(y), min(y)+cellsize*(ny-1), ny) gridX, gridY = np.meshgrid(xvect, yvect) inputXY = np.array([x[:], y[:]]).transpose() gridXY_array = np.array([np.concatenate(gridX), np.concatenate(gridY)]).transpose() gridXY_array = np.ascontiguousarray(gridXY_array) # If a boundary has been specified, create array to index outside it if boundary is not None: path = matplotlib.path.Path(boundary) outside = ~path.contains_points(gridXY_array) # Create Interpolation function if k_nearest_neighbors == 1: # Only use nearest neighbor index_qFun = interpolate.NearestNDInterpolator(inputXY, np.arange(len(x), dtype='int64').transpose()) gridqInd = index_qFun(gridXY_array) # Function to do the interpolation def interp_func(data): if isinstance(data, list): data = np.array(data) gridded_data = data[gridqInd].astype(float) if boundary is not None: gridded_data[outside] = np.nan # Crop to bounds gridded_data.shape = (len(yvect), len(xvect)) gridded_data = np.flipud(gridded_data) if boundary is not None and crop is True: mask = ~np.isnan(gridded_data) # Delete all-nan border gridded_data = gridded_data[np.ix_(mask.any(1), mask.any(0))] return gridded_data else: # Inverse-distance interpolation index_qFun = scipy.spatial.cKDTree(inputXY) NNInfo = index_qFun.query(gridXY_array, k=k_nearest_neighbors) # Weights for interpolation nn_wts = 1./(NNInfo[0]+1.0e-100) nn_inds = NNInfo[1] def interp_func(data): if isinstance(data, list): data = np.array(data) denom = 0. num = 0. for i in list(range(k_nearest_neighbors)): denom += nn_wts[:, i] num += data[nn_inds[:, i]].astype(float)*nn_wts[:, i] gridded_data = (num/denom) if boundary is not None: gridded_data[outside] = np.nan # Crop to bounds gridded_data.shape = (len(yvect), len(xvect)) gridded_data = np.flipud(gridded_data) if boundary is not None and crop is True: mask = ~np.isnan(gridded_data) # Delete all-nan border gridded_data = gridded_data[np.ix_(mask.any(1), mask.any(0))] return gridded_data # Finally, call the interpolation function to create array: gridded_quantity = interp_func(quantity) return interp_func, gridded_quantity
18953cce9808ba73513bbf3d1b277bc7e8913192
16,455
import csv import sys import json def users_bulk_update(file, set_fields, jump_to_index, jump_to_user, limit, workers): """ Bulk-update users from a CSV or Excel (.xlsx) file The CSV file *must* contain a "profile.login" OR an "id" column. All columns which do not contain a dot (".") are ignored. You can only update fields of sub-structures, not top level fields in okta (e.g. you *can* update "profile.site", but you *cannot* update "id"). """ def excel_reader(): wb = load_workbook(filename=file) rows = wb.active.rows # Get the header values as keys and move the iterator to the next item keys = [c.value for c in next(rows)] num_keys = len(keys) for row in rows: values = [c.value for c in row] rv = dict(zip(keys, values[:num_keys])) if any(rv.values()): yield rv def csv_reader(): with open(file, "r", encoding="utf-8") as infile: dialect = csv.Sniffer().sniff(infile.read(4096)) infile.seek(0) dr = csv.DictReader(infile, dialect=dialect) for row in dr: if any(row.values()): yield row def file_reader(): dr = excel_reader() \ if splitext(file)[1].lower() == ".xlsx" else csv_reader() if jump_to_user: tmp = next(dr) while jump_to_user not in ( tmp.get("profile.login", ""), tmp.get("id", "")): tmp = next(dr) elif jump_to_index: # prevent both being used at the same time :) for _ in range(jump_to_index): next(dr) _cnt = 0 for row in dr: if limit and _cnt == limit: break yield row _cnt += 1 def update_user_parallel(_row, index): user_id = None # this is a closure, let's use the outer scope's variables # Set preference to "id" first for field in ("id", "profile.login"): if field in _row and user_id is None: user_id = _row.pop(field) # you can't set top-level fields. pop all of them. _row = {k: v for k, v in _row.items() if k.find(".") > -1} # fields_dict - from outer scope. final_dict = _dict_flat_to_nested(_row, defaults=fields_dict) # user_id check if user_id is None: upd_err.append(( index + jump_to_index, final_dict, "missing user_id column (id or profile.login)" )) return try: upd_ok.append(okta_manager.update_user(user_id, final_dict)) except RequestsHTTPError as e: upd_err.append((index + jump_to_index, final_dict, str(e))) print("Bulk update might take a while. Please be patient.", flush=True) upd_ok = [] upd_err = [] fields_dict = {k: v for k, v in map(lambda x: x.split("="), set_fields)} dr = file_reader() with ThreadPoolExecutor(max_workers=workers) as ex: runs = {idx: ex.submit(update_user_parallel, row, idx) for idx, row in enumerate(dr)} for job in as_completed(runs.values()): pass print(f"{len(runs)} - done.", file=sys.stderr) tmp = {"ok": upd_ok, "errors": upd_err} timestamp_str = dt.now().strftime("%Y%m%d_%H%M%S") rv = "" for name, results in tmp.items(): if len(results): file_name = f"okta-bulk-update-{timestamp_str}-{name}.json" with open(file_name, "w") as outfile: outfile.write(json.dumps(results, indent=2, sort_keys=True)) rv += f"{len(results):>4} {name:6} - {file_name}\n" else: rv += f"{len(results):>4} {name:6}\n" return rv + f"{len(upd_ok) + len(upd_err)} total"
e2b172a1f7db08a9a52d334c3d5c706c5e410871
16,456
def numToString(num: int) -> str: """Write a number in base 36 and return it as a string :param num: number to encode :return: number encoded as a base-36 string """ base36 = '' while num: num, i = divmod(num, 36) base36 = BASE36_ALPHABET[i] + base36 return base36 or BASE36_ALPHABET[0]
419759cdbe7b4e0dcf38c3f79f0de6dec4f84131
16,457
import math def cir(request): """ Return current cirplot """ config={ "markerSizeFactor": float(request.GET.get('marker-size-factor', 1)), "markerColorMap": request.GET.get('marker-color-map', 'winter'), "xAxisLabels": [ (math.radians(10), 'Kontra'), (math.radians(90), 'Unentschieden'), (math.radians(170), 'Pro') ] } data = get_data(request) compass = Compass(data=data, config=config) xml = compass.plotAsXml() # XML POST-MODIFICATIONS # following nodes are renderes as last (for on-top/vertical ordering.) selectedNodeIds = [60] selectedNodeEls = [] # set 100% size root = ET.fromstring(xml) # Convert to XML Element Templat root.attrib['width'] = '100%' root.attrib.pop('height') # Add interactivity to dot-nodes # TODO: do more efficient xpath... scatgrid = root.find('.//*[@id="scatgrid"]') if scatgrid: nodes = scatgrid.findall('.//*[@clip-path]/*') for i in range(len(nodes)): node = nodes[i] node.attrib['id'] = "dot%s" % i # Temporary node.attrib['value'] = "%s" % round(compass.dots[i].value) # Temporary node.attrib['pos'] = "%s" % i # Original Position in List (used for z-index reordering) node.attrib['onclick'] = "dmclick(this, %s);" % i node.attrib['onmouseover'] = "dmover(this, %s);" % i if i in selectedNodeIds: selectedNodeEls.append(node) for sel in selectedNodeEls: g = sel.getparent() scatgrid.append(g) # test_list.insert(0, test_list.pop()) pass # Ad new element # ET.SubElement(root,"use", id='placeholder') # Append Background to XML Image # z = compass.config['zoomFactor']/2 # x, y, r = compass._matplotlib_get_polar_chart_position() # bgEl = ET.fromstring("""<g id="bgpattern"> # <defs> # <path id="meab67247b1" d="M 0 7.284288 C 1.931816 7.284288 3.784769 6.516769 5.150769 5.150769 C 6.516769 3.784769 7.284288 1.931816 7.284288 0 C 7.284288 -1.931816 6.516769 -3.784769 5.150769 -5.150769 C 3.784769 -6.516769 1.931816 -7.284288 0 -7.284288 C -1.931816 -7.284288 -3.784769 -6.516769 -5.150769 -5.150769 C -6.516769 -3.784769 -7.284288 -1.931816 -7.284288 0 C -7.284288 1.931816 -6.516769 3.784769 -5.150769 5.150769 C -3.784769 6.516769 -1.931816 7.284288 0 7.284288 z " style="stroke: #1f77b4; stroke-opacity: 0.75"/> # <linearGradient id="myGradient" > # <stop offset="0%%" stop-color="gold" /> # <stop offset="100%%" stop-color="blue" /> # </linearGradient> # </defs> # <circle cx="%s" cy="%s" r="%s" fill="url('#myGradient')" /> # </g>""" % (x*z, y*z, r*z)) # axes1El = root.find('.//*[@id="axes_1"]') # axes1El.insert(1, bgEl) # export XML content = ET.tostring(root, xml_declaration=True, encoding="UTF-8") return content.decode("utf-8")
6483521c929de9cf1fdfc3da2c32584cab83b6aa
16,458
def parse_enumeration(enumeration_bytes): """Parse enumeration_bytes into a list of test_ids.""" # If subunit v2 is available, use it. if bytestream_to_streamresult is not None: return _v2(enumeration_bytes) else: return _v1(enumeration_bytes)
ce7104f30eda416a59cb1397736886422af866fd
16,459
import re def register_user(username, passwd, email): # type: (str, str, str) -> Optional[str] """Returns an error message or None on success.""" if passwd == "": return "The password can't be empty!" if email: # validate the email only if it is provided result = validate_email_address(email) if result: return result username = username.strip() if not re.match(config.get('nick_regex'), username): return "Invalid username!" crypted_pw = encrypt_pw(passwd) with crawl_db(config.get('password_db')) as db: db.c.execute("select username from dglusers where username=? collate nocase", (username,)) result = db.c.fetchone() if result: return "User already exists!" with crawl_db(config.get('password_db')) as db: query = """ INSERT INTO dglusers (username, email, password, flags, env) VALUES (?, ?, ?, 0, '') """ db.c.execute(query, (username, email, crypted_pw)) db.conn.commit() return None
d7960dc9c66ee584787b9a2b25f367fcbc7455e8
16,460
def _train_n_hmm(data: _Array, m_states: int, n_trails: int): """Trains ``n_trails`` HMMs each initialized with a random tpm. Args: data: Possibly unporcessed input data set. m_states: Number of states. n_trails: Number of trails. Returns: Best model regarding to log-likelihood. """ feat = data.round().astype(int) trails = [] for i in range(n_trails): hmm = PoissonHmm(feat, m_states, init_gamma='softmax') hmm.fit(feat) if hmm.success: trails.append(hmm) if len(trails) == 0: return None return min(trails, key=lambda hmm: abs(hmm.quality.nll))
db5864ca45ac6fb939a0d1fd63fcb0b61e0ce6b9
16,461
def get_metric_function(name): """ Get a metric from the supported_sklearn_metric_functions dictionary. Parameters ---------- name : str The name of the metric to get. Returns ------- metric : function The metric function. """ if name in supported_sklearn_metric_functions: return supported_sklearn_metric_functions[name] raise ValueError( "The metric {} is not supported. Supported metrics are: {}".format( name, list(supported_sklearn_metrics) ) )
ba490650f55fd5d9a480fc9b9b94c5e71fefe23c
16,462
def ping(enode, count, destination, interval=None, quiet=False, shell=None): """ Perform a ping and parse the result. :param enode: Engine node to communicate with. :type enode: topology.platforms.base.BaseNode :param int count: Number of packets to send. :param str destination: The destination host. :param float interval: The wait interval in seconds between each packet. :param str shell: Shell name to execute commands. If ``None``, use the Engine Node default shell. :rtype: dict :return: The parsed result of the ping command in a dictionary of the form: :: { 'transmitted': 0, 'received': 0, 'errors': 0, 'loss_pc': 0, 'time_ms': 0 } """ assert count > 0 assert destination addr = ip_address(destination) cmd = 'ping' if addr.version == 6: cmd = 'ping6' cmd = [cmd, '-c', str(count), destination] if interval is not None: assert interval > 0 cmd.append('-i') cmd.append(str(interval)) if quiet: cmd.append('-q') ping_raw = enode(' '.join(cmd), shell=shell) assert ping_raw for line in ping_raw.splitlines(): m = match(PING_RE, line) if m: return { k: (int(v) if v is not None else 0) for k, v in m.groupdict().items() } raise Exception('Could not parse ping result')
e9fce1819ea21ad801468653e534350e863e123b
16,463
def calculate_EHF_severity( T, T_p95_file=None, EHF_p85_file=None, T_p95_period=None, T_p95_dim=None, EHF_p85_period=None, EHF_p85_dim=None, rolling_dim="time", T_name="t_ref", ): """ Calculate the severity of the Excess Heat Factor index, defined as: EHF_severity = EHF / EHF_p85 where "_p85" denotes the 85th percentile of all positive values using all days in the year and the Excess Heat Factor (EHF) is defined as: EHF = max(0, EHI_sig) * max(1, EHI_accl) with EHI_sig = (T_i + T_i+1 + T_i+2) / 3 – T_p95 EHI_accl = (T_i + T_i+1 + T_i+2) / 3 – (T_i–1 + ... + T_i–30) / 30 T is the daily mean temperature (commonly calculated as the mean of the min and max daily temperatures, usually with daily maximum typically preceding the daily minimum, and the two observations relate to the same 9am-to-9am 24-h period) and T_p95 is the 95th percentile of T using all days in the year. Parameters ---------- T : xarray DataArray Array of daily mean temperature T_p95_file : xarray DataArray, optional Path to a file with the 95th percentiles of T using all days in the year. This should be relative to the project directory. If not provided, T_p95_period and T_p95_dim must be provided EHF_p85_file : xarray DataArray, optional Path to a file with the 85th percentiles of positive EHF using all days in the year. This should be relative to the project directory. If not provided, EHF_p85_period and EHF_p85_dim must be provided T_p95_period : list of str, optional Size 2 iterable containing strings indicating the start and end dates of the period over which to calculate T_p95. Only used if T_p95 is None T_p95_dim : str or list of str, optional The dimension(s) over which to calculate T_p95. Only used if T_p95 is None EHF_p85_period : list of str, optional Size 2 iterable containing strings indicating the start and end dates of the period over which to calculate EHF_p85. Only used if EHF_p85 is None EHF_p85_dim : str or list of str, optional The dimension(s) over which to calculate EHF_p85. Only used if EHF_p85 is None rolling_dim : str, optional The dimension over which to compute the rolling averages in the definition of EHF T_name : str, optional The name of the temperature variable in T References ---------- Nairn et al. 2015: https://doi.org/10.3390/ijerph120100227 """ if EHF_p85_file is None: if (EHF_p85_period is not None) & (EHF_p85_dim is not None): calculate_EHF_p85 = True else: raise ValueError( ( "Must provide either thresholds of the 85th percentile of EHF (E_p85) or details " "of the climatological period and dimensions to use to calculate these thresholds " "(EHF_p85_period and EHF_p85_dim)" ) ) else: EHF_p85_file = PROJECT_DIR / EHF_p85_file EHF_p85 = xr.open_zarr(EHF_p85_file) calculate_EHF_p85 = False EHF = calculate_EHF(T, T_p95_file, T_p95_period, T_p95_dim, rolling_dim, T_name) if calculate_EHF_p85: EHF_p85 = calculate_percentile_thresholds( EHF.where(EHF > 0), 0.85, EHF_p85_period, EHF_p85_dim, frequency=None ) EHF_sev = EHF / EHF_p85 EHF_sev = EHF_sev.rename({"ehf": "ehf_severity"}) EHF_sev["ehf_severity"].attrs["long_name"] = "Severity of the Excess Heat Factor" EHF_sev["ehf_severity"].attrs["standard_name"] = "excess_heat_factor_severity" EHF_sev["ehf_severity"].attrs["units"] = "-" return EHF_sev
e82d54f0bd67c5cd4c938dbdd335aed70fe3c521
16,464
import argparse def parse_args(): """Parse input arguments Return: parsed arguments struncture """ parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest='type') subparsers.required = True parser_file = subparsers.add_parser('file') parser_file.add_argument( "-i", "--input", help="Input file name.", required=True) parser_file.add_argument( "-d", "--database", help="Token database.", required=True) parser_file.add_argument( "-o", "--output", help="Output file name.", required=True) parser_file = subparsers.add_parser('serial') parser_file.add_argument( "-i", "--input", help="Input serial port name.", required=True) parser_file.add_argument( "-d", "--database", help="Token database.", required=True) parser_file.add_argument( "-o", "--output", help="Output file name. Write to stdout and to file.") return parser.parse_args()
bb2fe92206bd786492dda2668ffa9c2d0d54004e
16,465
from typing import Dict from typing import List from typing import Tuple def arrange_train_data(keypoints: Dict, beg_end_times: List[Tuple], fps: float, MAX_PERSONS: int) -> Dict: """ Arrange data into frames. Add gestures present or not based on time ranges. Generate each frame and also, add dummy when necessary. """ data = {} for key in keypoints.keys(): persons = list(keypoints[key].keys()) persons.remove("start_frame") persons.remove("end_frame") count_persons = len(persons) gestures_xy = [] start_frame, end_frame = keypoints[key]["start_frame"], keypoints[key]["end_frame"] start_time_ms = start_frame/fps*1000 end_time_ms = end_frame/fps*1000 for per_ind in range(1, count_persons+1): per_str = str(per_ind) gestures_xy.append(keypoints[key][per_str]["person_keypoints"]) # dummy to always have MAX_PERSONS (training to be done in matrices (Required_keypoints x Max_persons x window)) dummy = generate_dummy_keypoints() dummy_frames_list = [] for _ in range(start_frame, end_frame+1): dummy_frames_list.append(dummy) for i in range(MAX_PERSONS - count_persons): gestures_xy.append(dummy_frames_list) frame_division_gestures = list(zip(*gestures_xy)) frames_dict = {} for i, frame in enumerate(frame_division_gestures): frames_dict[str(start_frame + i)] = { "frames": frame, "gesture": False } data[key] = frames_dict for be_time in beg_end_times: if be_time[0] > end_time_ms or be_time[1] < start_time_ms: continue elif be_time[0] < start_time_ms and be_time[1] < end_time_ms: bt = start_time_ms et = be_time[1] elif be_time[0] > start_time_ms and be_time[1] < end_time_ms: bt = be_time[0] et = be_time[1] elif be_time[0] < start_time_ms and be_time[1] > end_time_ms: bt = start_time_ms et = end_time_ms elif be_time[0] > start_time_ms and be_time[1] > end_time_ms: bt = be_time[0] et = end_time_ms # Now using bt and et, find the frame indices with gesture begin_at_frame_ind = int(bt*fps/1000+0.5) no_of_frames = int((et-bt)*fps/1000+0.5) end_at_frame_ind = begin_at_frame_ind + no_of_frames if end_at_frame_ind > int((list(data[key].keys()))[-1]): end_at_frame_ind = int((list(data[key].keys()))[-1]) for frame_no in range(begin_at_frame_ind, end_at_frame_ind+1): data[key][str(frame_no)]["gesture"] = True return data
ef433809c5e59d2b870a84c9cfce9e31faa4659a
16,466
def length(list): """Return the number of items in the list.""" if list == (): return 0 else: _, tail = list return 1 + length(tail)
35864cd8cdd065463592d3737077a4d06b38aad1
16,467
def buzz(x): """ Takes an input `x` and checks to see if x is a number, and if so, also a multiple of 5. If it is both, return 'Buzz'. Otherwise, return the input. """ return 'Buzz' if isinstance(x, Number) and x % 5 == 0 else x
b24a37816d218a6cc1d960bfd767cb1a2052067d
16,468
from typing import Iterable from typing import Any from typing import Tuple def _tuple_of_big_endian_int(bit_groups: Iterable[Any]) -> Tuple[int, ...]: """Returns the big-endian integers specified by groups of bits. Args: bit_groups: Groups of descending bits, each specifying a big endian integer with the 1s bit at the end. Returns: A tuple containing the integer for each group. """ return tuple(value.big_endian_bits_to_int(bits) for bits in bit_groups)
78d3b739a8d8a3f724dca2b226e976cde93426dd
16,469
def make_simple_server(service, handler, host="localhost", port=9090): """Return a server of type TSimple Server. Based on thriftpy's make_server(), but return TSimpleServer instead of TThreadedServer. Since TSimpleServer's constructor doesn't accept kwargs, some arguments of make_server can't be used here. By default: client_timeout: None protocol: TBinaryProtocolFactory transport: TBufferedTransportFactory """ processor = TProcessor(service, handler) if host and port: server_socket = TServerSocket( host=host, port=port, client_timeout=None) else: raise ValueError("Either host/port or unix_socket must be provided.") server = TSimpleServer(processor, server_socket) return server
42fc9f0bdcfbe4a509d5a682821ea3e71386f699
16,470
def morph(clm1, clm2, t, lmax): """Interpolate linearly the two sets of sph harm. coeeficients.""" clm = (1 - t) * clm1 + t * clm2 grid_reco = clm.expand(lmax=lmax) # cut "high frequency" components agrid_reco = grid_reco.to_array() pts = [] for i, longs in enumerate(agrid_reco): ilat = grid_reco.lats()[i] for j, value in enumerate(longs): ilong = grid_reco.lons()[j] th = np.deg2rad(90 - ilat) ph = np.deg2rad(ilong) r = value + rbias p = np.array([sin(th) * cos(ph), sin(th) * sin(ph), cos(th)]) * r pts.append(p) return pts
a6e6ca0070cc38b54f2bfd41b0fe69e2a5bb21f8
16,471
import re from typing import OrderedDict def read_avg_residuemap(infile): """ Read sequence definition from PSN avg file, returning sequence Map :param infile: File handle pointing to WORDOM avgpsn output file :return: Returns an internal.map.Map object mapping the .pdb residues to WORDOM id's from "Seq" section of the avgpsn-file """ m_start = re.compile("^\*\*\* Seq \*\*\*") m_end = re.compile("^============") m_entry = re.compile("^\s*\d+\s+.:.\d+\s+\d+\.\d+\s*$") residuemap = OrderedDict() reading = False for line in infile: if reading: # Stop reading if end of interaction strength section if m_end.search(line): break else: if m_entry.search(line): [num, resname, normfact] = line.split() residuemap[resname] = int(num) # Start reading when header found elif m_start.search(line): reading = True return residuemap
92c4cbe53edcd3d894a038d7cb9308c653e37146
16,472
import argparse import sys def get_arguments(): """Run argparse and return arguments.""" try: # Use argparse to handle devices as arguments description = 'htop like application for PostgreSQL replication ' + \ 'activity monitoring.' parser = argparse.ArgumentParser(description=description) # -c / --connectstring parser.add_argument( '-c', '--connectstring', dest='connstr', default='', help='Connectstring (default: "").', metavar='CONNECTSTRING') # -r / --role parser.add_argument( '-r', '--role', dest='role', default=None, help='Role (default: "").', metavar='ROLE') # -C / --no-color parser.add_argument( '-C', '--no-color', dest='nocolor', action='store_true', help="Disable color usage.",) # --debug parser.add_argument( '-x', '--debug', dest='debug', action='store_true', help="Enable debug mode for traceback tracking.") args = parser.parse_args() except (argparse.ArgumentError, argparse.ArgumentTypeError) as err: print('pg_activity: error: %s' % str(err)) print('Try "pg_activity --help" for more information.') sys.exit(1) return args
819981583720294a13a1babeb6aa60a9d4c536b1
16,473
def schedule_remove(retval=None): """ schedule(retval=stackless.current) -- switch to the next runnable tasklet. The return value for this call is retval, with the current tasklet as default. schedule_remove(retval=stackless.current) -- ditto, and remove self. """ _scheduler_remove(getcurrent()) r = schedule(retval) return r
8ba10819d4de5cc676583e7b05036be49e6958cf
16,474
def check_region(read, pair, region): """ determine whether or not reads map to specific region of scaffold """ if region is False: return True for mapping in read, pair: if mapping is False: continue start, length = int(mapping[3]), len(mapping[9]) r = [start, start + length - 1] if get_overlap(r, region) > 0: return True return False
c71378d9ee674a117635634e3153f3cb7650fab3
16,475
import sys def create_template_AL_AR(phi, diff_coef, adv_coef, bc_top_type, bc_bot_type, dt, dx, N): """ creates 2 matrices for transport equation AL and AR Args: phi (TYPE): vector of porosity(phi) or 1-phi diff_coef (float): diffusion coefficient adv_coef (float): advection coefficient bc_top_type (string): type of boundary condition bc_bot_type (string): type of boundary condition dt (float): time step dx (float): spatial step N (int): size of mesh Returns: array: AL and AR matrices """ # TODO: error source somewhere in non constant # porosity profile. Maybe we also need d phi/dx s = phi * diff_coef * dt / dx / dx q = phi * adv_coef * dt / dx AL = spdiags( [-s / 2 - q / 4, phi + s, -s / 2 + q / 4], [-1, 0, 1], N, N, format='csr') # .toarray() AR = spdiags( [s / 2 + q / 4, phi - s, s / 2 - q / 4], [-1, 0, 1], N, N, format='csr') # .toarray() if bc_top_type in ['dirichlet', 'constant']: AL[0, 0] = phi[0] AL[0, 1] = 0 AR[0, 0] = phi[0] AR[0, 1] = 0 elif bc_top_type in ['neumann', 'flux']: AL[0,0] = phi[0] + s[0] # + adv_coef * s[0] * dx / diff_coef] - q[0] * adv_coef * dx / diff_coef] / 2 AL[0, 1] = -s[0] AR[0,0] = phi[0] - s[0] # - adv_coef * s[0] * dx / diff_coef] + q[0] * adv_coef * dx / diff_coef] / 2 AR[0, 1] = s[0] else: print('\nABORT!!!: Not correct top boundary condition type...') sys.exit() if bc_bot_type in ['dirichlet', 'constant']: AL[-1, -1] = phi[-1] AL[-1, -2] = 0 AR[-1, -1] = phi[-1] AR[-1, -2] = 0 elif bc_bot_type in ['neumann', 'flux']: AL[-1, -1] = phi[-1] + s[-1] AL[-1, -2] = -s[-1] # / 2 - s[-1] / 2 AR[-1, -1] = phi[-1] - s[-1] AR[-1, -2] = s[-1] # / 2 + s[-1] / 2 else: print('\nABORT!!!: Not correct bottom boundary condition type...') sys.exit() return AL, AR
51b835ab2464189c8d0f10fae25da923f9fb6a07
16,476
def millisecond_to_clocktime(value): """Convert a millisecond time to internal GStreamer time.""" return value * Gst.MSECOND
8359b65a015febedba8bb6b68d310d70b1b8e1a6
16,477
def SE2_exp(v): """ SE2 matrix exponential """ theta, x, y = v if np.abs(theta) < 1e-6: A = 1 - theta**2/6 + theta**4/120 B = theta/2 - theta**3/24 + theta**5/720 else: A = np.sin(theta)/theta B = (1 - np.cos(theta))/theta V = np.array([[A, -B], [B, A]]) R = np.array([ [np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) u = np.array([[x, y]]).T return np.block([ [R, V.dot(u)], [0, 0, 1]])
f94454bf4b134fac7b45d89d4c3798b1d6c201fa
16,478
def skip_on_hw(func): """Test decorator for skipping tests which should not be run on HW.""" def decorator(f): def decorated(self, *args, **kwargs): if has_ci_ipus(): self.skipTest("Skipping test on HW") return f(self, *args, **kwargs) return decorated return decorator(func)
6cbde5ce9b70c9d9f71330470f5f2ae913f56021
16,479
def rxns4tag(tag, rdict=None, ver='1.7', wd=None): """ Get a list of all reactions with a given p/l tag Notes ----- - This function is useful, but update to GEOS-Chem flexchem ( in >v11) will make it redundent and therefore this is not being maintained. """ # --- get reaction dictionary if isinstance(rdict, type(None)): rdict = rxn_dict_from_smvlog(wd, ver=ver) # --- Caveats - # to adapt for long line errors in fortran written output errs = ['LO3_36'] # + ['LO3_87'] cerrs = ['RD95'] # + ['LR48'] # To account for reaction where not all channels result in Ox loss errs += ['RD48'] cerrs += ['LO3_87'] if any([(tag == i) for i in errs]): tag = cerrs[errs.index(tag)] # -- loop reactions, if tag in reaction return reaction rxns = [] for n, rxn in enumerate(rdict.values()): expanded_rxn_str = [i.split('+') for i in rxn] expanded_rxn_str = [ item for sublist in expanded_rxn_str for item in sublist] # ( Issue) Why endswith? Restore to use if contains any tag # if any( [ (i.endswith(tag) ) for i in rxn]): # This is because otherwise 'LR10' would be read as 'LR100' # if any( [tag in i for i in rxn]): # <= This will lead to false +ve # However, fortran print statment err for ( LO3_87 ) if any([i.endswith(tag) for i in expanded_rxn_str]): rxns.append([list(rdict.keys())[n]] + rxn) return rxns
32243bcdb66c9320679d580da2b6f9ee086179d2
16,480
def DateTime_GetCurrentYear(*args, **kwargs): """DateTime_GetCurrentYear(int cal=Gregorian) -> int""" return _misc_.DateTime_GetCurrentYear(*args, **kwargs)
5f25e4387e72497673ea49d6f67f06e9894e29af
16,481
def exp_lr_scheduler(optimizer, epoch, init_lr=0.01, lr_decay_epoch=10): """Decay learning rate by a f# model_out_path ="./model/W_epoch_{}.pth".format(epoch) # torch.save(model_W, model_out_path) actor of 0.1 every lr_decay_epoch epochs.""" lr = init_lr * (0.8**(epoch // lr_decay_epoch)) print('LR is set to {}'.format(lr)) for param_group in optimizer.param_groups: param_group['lr'] = lr return optimizer
2095eda8493e0e53bca5e6f1b9149b544c34da61
16,482
def wait(duration): """ Waits the duration, in seconds, you specify. Args: duration (:any:`DoubleValue`): time, in seconds, this function waits. You may specify fractions of seconds. Returns: float: actual seconds waited. This wait is non-blocking, so other tasks will run while this wait executes. """ init_time = DoubleValue(0) init_time.value = seqtime() while seqtime() - init_time.value < duration.value: nivs_yield() init_time.value = seqtime() return init_time.value
0a40028bbe88d290cc309dc94ef7be48522ded02
16,483
from datetime import datetime def yesterday_handler(update: Update, context: CallbackContext): """ Diary content upload handler. Uploads incoming messages to db as a note for yesterday. """ # get user timezone user_timezone = Dao.get_user_timezone(update.effective_user) # calculate time at user's user_datetime = update.effective_message.date.astimezone(user_timezone) # get yesterday user_yesterday = user_datetime - datetime.timedelta(days=1) # save message content save_message_content_by_date(update, context, user_yesterday) return ConversationHandler.END
7b37d1157fc40ec01703aac92a5b176d14ab2f27
16,484
def get_lens_pos(sequence): """ Calculate positions of lenses. Returns ------- List of tuples with index and position of OPE in sequence. """ d = 0.0 d_ = [] for idx, ope in enumerate(sequence): if ope.is_lens(): d_.append((idx, d)) else: d += ope.get_travel_length() return d_
f1f1ebb4212406e78e48613584f67f5e2b6f2265
16,485
def disorientation(orientation_matrix, orientation_matrix1, crystal_structure=None): """Compute the disorientation another crystal orientation. Considering all the possible crystal symmetries, the disorientation is defined as the combination of the minimum misorientation angle and the misorientation axis lying in the fundamental zone, which can be used to bring the two lattices into coincidence. .. note:: Both orientations are supposed to have the same symmetry. This is not necessarily the case in multi-phase materials. :param orientation: an instance of :py:class:`~pymicro.crystal.microstructure.Orientation` class describing the other crystal orientation from which to compute the angle. :param crystal_structure: an instance of the `Symmetry` class describing the crystal symmetry, triclinic (no symmetry) by default. :returns tuple: the misorientation angle in radians, the axis as a numpy vector (crystal coordinates), the axis as a numpy vector (sample coordinates). """ the_angle = np.pi symmetries = crystal_structure.symmetry_operators() (gA, gB) = (orientation_matrix, orientation_matrix1) # nicknames for (g1, g2) in [(gA, gB), (gB, gA)]: for j in range(symmetries.shape[0]): sym_j = symmetries[j] oj = np.dot(sym_j, g1) # the crystal symmetry operator is left applied for i in range(symmetries.shape[0]): sym_i = symmetries[i] oi = np.dot(sym_i, g2) delta = np.dot(oi, oj.T) mis_angle = misorientation_angle_from_delta(delta) if mis_angle < the_angle: # now compute the misorientation axis, should check if it lies in the fundamental zone mis_axis = misorientation_axis_from_delta(delta) the_angle = mis_angle the_axis = mis_axis the_axis_xyz = np.dot(oi.T, the_axis) return the_angle, the_axis, the_axis_xyz
eefca78d7736de073646c97190f736bedb302136
16,486
from re import DEBUG def response_minify(response): """ minify html response to decrease site traffic """ if not DEBUG and response.content_type == u'text/html; charset=utf-8': response.set_data( minify(response.get_data(as_text=True)) ) return response return response
f766a6afe2bd7113ea630db739cab21a0ce9b1f8
16,487
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer): """Loads a data file into a list of `InputBatch`s.""" label_map = {label : i for i, label in enumerate(label_list,1)} features = [] for (ex_index, example) in enumerate(examples): # example : InputExample obj text_spc_tokens = example.text_a aspect_tokens = example.text_b sentence_label = example.sentence_label aspect_label = example.aspect_label polaritiylist = example.polarity # 标记aspect和非aspect tokens = [] labels = [] polarities = [] valid = [] label_mask = [] text_spc_tokens.extend(['[SEP]']) text_spc_tokens.extend(aspect_tokens) enum_tokens = text_spc_tokens # text_scp_tokens : sentence tokens + [SEP] + aspect tokens 注意并不是规范的BERT-SPC格式 sentence_label.extend(['[SEP]']) # sentence_label.extend(['O']) sentence_label.extend(aspect_label) label_lists = sentence_label # if len(enum_tokens) != len(label_lists): # print(enum_tokens) # print(label_lists) for i, word in enumerate(enum_tokens): # spc tokens, 注意这里的enum_tokens并不是标准的bert spc格式, 后边会添加新的符号使之符合标准 token = tokenizer.tokenize(word) # bert tokenizer, 使用bert进行分词 tokens.extend(token) label_1 = label_lists[i] polarity_1 = polaritiylist[i] for m in range(len(token)): if m == 0: labels.append(label_1) polarities.append(polarity_1) valid.append(1) label_mask.append(1) else: # 如果bert对token进一步细分,就会到这里 valid.append(0) if len(tokens) >= max_seq_length - 1: # 为啥剔除后边2个而不是更多? tokens = tokens[0:(max_seq_length - 2)] polarities = polarities[0:(max_seq_length - 2)] labels = labels[0:(max_seq_length - 2)] valid = valid[0:(max_seq_length - 2)] label_mask = label_mask[0:(max_seq_length - 2)] ntokens = [] segment_ids = [] label_ids = [] ntokens.append("[CLS]") segment_ids.append(0) valid.insert(0,1) label_mask.insert(0,1) label_ids.append(label_map["[CLS]"]) # label_ids.append(label_map["O"]) for i, token in enumerate(tokens): ntokens.append(token) segment_ids.append(0) if len(labels) > i: # 感觉这个判断是多余的 label_ids.append(label_map[labels[i]]) ntokens.append("[SEP]") # 得到标准的bert spc格式 segment_ids.append(0) valid.append(1) label_mask.append(1) label_ids.append(label_map["[SEP]"]) # label_ids.append(label_map["O"]) input_ids_spc = tokenizer.convert_tokens_to_ids(ntokens) input_mask = [1] * len(input_ids_spc) label_mask = [1] * len(label_ids) # import numpy as np while len(input_ids_spc) < max_seq_length: # pad input_ids_spc.append(0) input_mask.append(0) segment_ids.append(0) label_ids.append(0) valid.append(1) label_mask.append(0) while len(label_ids) < max_seq_length: label_ids.append(0) label_mask.append(0) while len(polarities) < max_seq_length: polarities.append(-1) assert len(input_ids_spc) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length assert len(valid) == max_seq_length assert len(label_mask) == max_seq_length # if ex_index < 5: # print("*** Example ***") # print("guid: %s" % (example.guid)) # print("tokens: %s" % " ".join( # [str(x) for x in ntokens])) # print("input_ids: %s" % " ".join([str(x) for x in input_ids_spc])) # print("input_mask: %s" % " ".join([str(x) for x in input_mask])) # print("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) # # print("label: %s (id = %d)" % (example.label, label_ids)) # # input_ids_spc = np.array(input_ids_spc) # label_ids = np.array(label_ids) # labels = np.array(labels) # valid = np.array(valid) features.append( InputFeatures(input_ids_spc=input_ids_spc, input_mask=input_mask, # spc 非pad部分的 attention mask segment_ids=segment_ids, # 全为0, bert 的 token_type_ids label_id=label_ids, # aspect抽取的label polarities=polarities, # aspect 对应的情感倾向, 非aspect的标记值是-1 valid_ids=valid, # label_mask=label_mask)) # label_mask和input_mask没区别 return features
5d844b9d88fa7bbd5532547b772fef9c1811e039
16,488
def to_json_compatible_object(obj): """ This function returns a representation of a UAVCAN structure (message, request, or response), or a DSDL entity (array or primitive), or a UAVCAN transfer, as a structure easily able to be transformed into json or json-like serialization Args: obj: Object to convert. Returns: structure which can easily be transformed into a json-like serialization """ if not isinstance(obj, CompoundValue) and hasattr(obj, 'transfer'): output = dict() if hasattr(obj, 'message'): payload = obj.message output['transfer_type'] = 'Message' elif hasattr(obj, 'request'): payload = obj.request output['transfer_type'] = 'Request' elif hasattr(obj, 'response'): payload = obj.response output['transfer_type'] = 'Response' else: raise ValueError('Cannot generate JSON-compatible object representation for %r' % type(obj)) output['source_node_id'] = obj.transfer.source_node_id output['dest_node_id'] = obj.transfer.dest_node_id output['ts_monotonic'] = obj.transfer.ts_monotonic output['ts_real'] = obj.transfer.ts_real output['transfer_priority'] = obj.transfer.transfer_priority output['datatype'] = '{}'.format(payload._type) output['fields'] = _to_json_compatible_object_impl(payload) return output else: return _to_json_compatible_object_impl(obj)
131fa3a43abf55fd0f51b1e3160ba2f1486d2e25
16,489
def rot_box_kalman_filter(initial_state, Q_std, R_std): """ Tracks a 2D rectangular object (e.g. a bounding box) whose state includes position, centroid velocity, dimensions, and rotation angle. Parameters ---------- initial_state : sequence of floats [x, vx, y, vy, w, h, phi] Q_std : float Standard deviation to use for process noise covariance matrix R_std : float Standard deviation to use for measurement noise covariance matrix Returns ------- kf : filterpy.kalman.KalmanFilter instance """ kf = KalmanFilter(dim_x=7, dim_z=5) dt = 1.0 # time step # state mean and covariance kf.x = np.array([initial_state]).T kf.P = np.eye(kf.dim_x) * 500. # no control inputs kf.u = 0. # state transition matrix kf.F = np.eye(kf.dim_x) kf.F[0, 1] = kf.F[2, 3] = dt # measurement matrix - maps from state space to observation space, so # shape is dim_z x dim_x. kf.H = np.zeros([kf.dim_z, kf.dim_x]) # z = Hx. H has nonzero coefficients for the following components of kf.x: # x y w h phi kf.H[0, 0] = kf.H[1, 2] = kf.H[2, 4] = kf.H[3, 5] = kf.H[4, 6] = 1.0 # measurement noise covariance kf.R = np.eye(kf.dim_z) * R_std**2 # process noise covariance for x-vx or y-vy pairs q = Q_discrete_white_noise(dim=2, dt=dt, var=Q_std**2) # diagonal process noise sub-matrix for width, height, and phi qq = Q_std**2*np.eye(3) # process noise covariance matrix for full state kf.Q = block_diag(q, q, qq) return kf
ac0bca07b6d7b08c3b27439855fac93bddffcb91
16,490
def validate_schema(path, schema_type): """Validate a single file against its schema""" if schema_type not in _VALID_SCHEMA_TYPES.keys(): raise ValueError(f"No validation schema found for '{schema_type}'") return globals()["validate_" + schema_type](path)
8883226eff948de2b05d442157818ab0b3904e47
16,491
def import_vote_internal(vote, principal, file, mimetype): """ Tries to import the given csv, xls or xlsx file. This is the format used by onegov.ballot.Vote.export(). This function is typically called automatically every few minutes during an election day - we use bulk inserts to speed up the import. :return: A list containing errors. """ csv, error = load_csv( file, mimetype, expected_headers=INTERNAL_VOTE_HEADERS, dialect='excel' ) if error: return [error] ballot_results = {} errors = [] added_entity_ids = {} ballot_types = set() status = 'unknown' entities = principal.entities[vote.date.year] for line in csv.lines: line_errors = [] status = line.status or 'unknown' if status not in STATI: line_errors.append(_("Invalid status")) ballot_type = line.type if ballot_type not in BALLOT_TYPES: line_errors.append(_("Invalid ballot type")) ballot_types.add(ballot_type) added_entity_ids.setdefault(ballot_type, set()) ballot_results.setdefault(ballot_type, []) # the id of the entity entity_id = None try: entity_id = validate_integer(line, 'entity_id') except ValueError as e: line_errors.append(e.args[0]) else: if entity_id not in entities and entity_id in EXPATS: entity_id = 0 if entity_id in added_entity_ids[ballot_type]: line_errors.append( _("${name} was found twice", mapping={ 'name': entity_id })) if entity_id and entity_id not in entities: line_errors.append( _("${name} is unknown", mapping={ 'name': entity_id })) else: added_entity_ids[ballot_type].add(entity_id) # Skip expats if not enabled if entity_id == 0 and not vote.expats: continue # Counted counted = line.counted.strip().lower() == 'true' # the yeas try: yeas = validate_integer(line, 'yeas') except ValueError as e: line_errors.append(e.args[0]) # the nays try: nays = validate_integer(line, 'nays') except ValueError as e: line_errors.append(e.args[0]) # the eligible voters try: eligible_voters = validate_integer(line, 'eligible_voters') except ValueError as e: line_errors.append(e.args[0]) # the empty votes try: empty = validate_integer(line, 'empty') except ValueError as e: line_errors.append(e.args[0]) # the invalid votes try: invalid = validate_integer(line, 'invalid') except ValueError as e: line_errors.append(e.args[0]) # now let's do some sanity checks try: if not eligible_voters: line_errors.append(_("No eligible voters")) if (yeas + nays + empty + invalid) > eligible_voters: line_errors.append(_("More cast votes than eligible voters")) except UnboundLocalError: pass # pass the errors if line_errors: errors.extend( FileImportError(error=err, line=line.rownumber) for err in line_errors ) continue # all went well (only keep doing this as long as there are no errors) if not errors: entity = entities.get(entity_id, {}) ballot_results[ballot_type].append( dict( name=entity.get('name', ''), district=entity.get('district', ''), counted=counted, yeas=yeas, nays=nays, eligible_voters=eligible_voters, entity_id=entity_id, empty=empty, invalid=invalid ) ) if errors: return errors if not any((len(results) for results in ballot_results.values())): return [FileImportError(_("No data found"))] # Add the missing entities for ballot_type in ballot_types: remaining = set(entities.keys()) if vote.expats: remaining.add(0) remaining -= added_entity_ids[ballot_type] for entity_id in remaining: entity = entities.get(entity_id, {}) ballot_results[ballot_type].append( dict( name=entity.get('name', ''), district=entity.get('district', ''), counted=False, entity_id=entity_id ) ) # Add the results to the DB vote.clear_results() vote.status = status ballot_ids = {b: vote.ballot(b, create=True).id for b in ballot_types} session = object_session(vote) session.flush() session.bulk_insert_mappings( BallotResult, ( dict(**result, ballot_id=ballot_ids[ballot_type]) for ballot_type in ballot_types for result in ballot_results[ballot_type] ) ) return []
03eacf90418fd68bcf24c0a731f2d1216beb786b
16,492
def get_mail_count(imap, mailbox_list): """ Gets the total number of emails on specified account. Args: imap <imaplib.IMAP4_SSL>: the account to check mailbox_list [<str>]: a list of mailboxes Must be surrounded by double quotes Returns: <int>: total emails """ total = 0 num_mailboxes = len(mailbox_list) for idx, mailbox in enumerate(mailbox_list): print("Counting mail: %d (Mailbox %d of %d) " \ % (total, idx+1, num_mailboxes), end='\r') total += int(imap.select(mailbox)[1][0]) imap.close() print("Counting mail: %d (Mailbox %d of %d) " \ % (total, idx+1, num_mailboxes)) return total
8c8fd2d6849d58860f3bd6c20335e7a399bee99d
16,493
def get_bdb_path_by_shoulder_model(shoulder_model, root_path=None): """Get the path to a BerkeleyDB minter file in a minter directory hierarchy. The path may or may not exist. The caller may be obtaining the path in which to create a new minter, so the path is not checked. Args: shoulder_model (Shoulder): The Django ORM model for the shoulder to use for the minting. The model may be a legacy record for N2T based minting, or a record from a minter created in EZID. root_path (str, optional): Path to the root of the minter directory hierarchy. If not provided, the default for EZID is used. Returns: pathlib2.Path """ m = shoulder_model minter_uri = m.minter.strip() if not minter_uri: raise nog.exc.MinterNotSpecified( 'A minter has not been specified (minter field in the database is empty)' ) return pathlib2.Path( _get_bdb_root(root_path), '/'.join(minter_uri.split('/')[-2:]), 'nog.bdb', ).resolve()
c694306d18ed940cf229a46dae6fb72d2207418e
16,494
def getDefuzzificationMethod(name): """Get an instance of a defuzzification method with given name. Normally looks into the fuzzy.defuzzify package for a suitable class. """ m = __import__("fuzzy.defuzzify."+name, fromlist=[name]) c = m.__dict__[name] return c()
c3306ba9fc4ce21eae9adb1bde1b04505dd6b24f
16,495
def celestial(func): """ Transform a point x from cartesian coordinates to celestial coordinates and returns the function evaluated at the probit point y """ def f_transf(ref, x, *args, **kwargs): y = cartesian_to_celestial(x) return func(ref, y, *args) return f_transf
e6980abfbc0833639b9d1eb716633d1c6d6dcda2
16,496
def _rescale_to_width( img: Image, target_width: int): """Helper function to rescale image to `target_width`. Parameters ---------- img : PIL.Image Input image object to be rescaled. target_width : int Target width (in pixels) for rescaling. Returns ------- PIL.Image Rescaled image object """ w, h = img.size rescaled_img = img.resize(_scale_wh_by_target_width(w, h, target_width)) return rescaled_img
38efe7bbbd1681abfd2d48bcf4765b817a28fa27
16,497
def compute_exact_R_P(final_patterns, centones_tab): """ Function tha computes Recall and Precision with exact matches """ true = 0 for tab in final_patterns: for centon in centones_tab[tab]: check = False for p in final_patterns[tab]: if centon == p: check = True if check: true += 1 all_centones = len([x for y in centones_tab.values() for x in y]) all_ours = len([x for y in final_patterns.values() for x in y]) overall_recall = true / all_centones overall_precision = true / all_ours return overall_recall, overall_precision
d82e6fccacdc09bb078d6e954150c143994df1ca
16,498
def build_embedding(embedding_matrix, max_len, name): """ Build embedding by lda :param max_len: :param name: :return: """ # build embedding with initial weights topic_emmd = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], weights=[embedding_matrix], input_length=max_len, trainable=True, name=name) return topic_emmd
87a89462bfb2eee285099353f78e151394c7c74a
16,499