content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def mean_IoU(Y_true, Y_pred): """ Calculate the mean IoU score between two lists of labeled masks. :param Y_true: a list of labeled masks (numpy arrays) - the ground truth :param Y_pred: a list labeled predicted masks (numpy arrays) for images with the original dimensions :return: mean IoU score for corresponding images """ image_precs = [] for y_true,y_pred in zip(Y_true,Y_pred): true_objects = len(np.unique(y_true)) pred_objects = len(np.unique(y_pred)) # Compute intersection between all objects intersection = np.histogram2d(y_true.flatten(), y_pred.flatten(), bins=(true_objects, pred_objects))[0] # Compute areas (needed for finding the union between all objects) area_true = np.histogram(y_true, bins = true_objects)[0] area_pred = np.histogram(y_pred, bins = pred_objects)[0] area_true = np.expand_dims(area_true, -1) area_pred = np.expand_dims(area_pred, 0) # Compute union union = area_true + area_pred - intersection # Exclude background from the analysis intersection = intersection[1:,1:] union = union[1:,1:] union[union == 0] = 1e-9 # Compute the intersection over union iou = intersection / union # Precision helper function def precision_at(threshold, iou): matches = iou > threshold true_positives = np.sum(matches, axis=1) == 1 # Correct objects false_positives = np.sum(matches, axis=0) == 0 # Missed objects false_negatives = np.sum(matches, axis=1) == 0 # Extra objects tp, fp, fn = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives) return tp, fp, fn # Loop over IoU thresholds prec = [] for t in np.arange(0.5, 1.0, 0.05): tp, fp, fn = precision_at(t, iou) p = tp / (tp + fp + fn) prec.append(p) image_precs.append(prec) return [np.mean(image_precs), image_precs]
79581b1015512653f428a93c0e61cd5d451f031e
32,829
def get_matrix_header(filename): """ Returns the entries, rows, and cols of a matrix market file. """ with open(filename) as f: entries = 0 rows = 0 cols = 0 for line in f.readlines(): if line.startswith('%'): continue line = line.split() entries = int(line[0]) rows = int(line[1]) cols = int(line[2]) return entries, rows, cols
66200661715cb9a67522ced7b13d4140a3905c28
32,830
def get_slot_names(slotted_instance): """Get all slot names in a class with slots.""" # thanks: https://stackoverflow.com/a/6720815/782170 return slotted_instance.__slots__
bd0f5b58964444396ceae7facb916012a4fb7c8a
32,831
def make_standard_fisher_regularizer(make_logits, scope, should_regularize, perturbation, differentiate_probability): """Creates per-example logits and the per-example standard Fisher-Rao norm. This function assumes the model of a categorical distribution generated by a softmax function. The standard Fisher-Rao norm uses the model distribution computed from the logits by the softmax function to estimate the Fisher information matrix. The empirical training distribution is used for the input values. Args: make_logits: Function, returns `Tensor` representing the per-example logits. The expected shape of the tensor is such that the number of categories is the last dimension. scope: String, name of `VariableScope` to use for the `Variable` objects that represent the regularized parameter. should_regularize: Function, takes a variable name as String and returns Boolean that decides whether the variable should be regularized. The passed variable name includes the name of the scope. perturbation: Float, finite difference perturbation constant. The choice of perturbation constant represents a tradeoff between rounding and approximation error and should depend on floating point precision and parameter norm. differentiate_probability: Boolean, determines whether the label probability distribution should be differentiated. Returns: A tuple of `Tensor` objects representing the per-example logits and the scalar standard Fisher-Rao norm regularization loss. Raises: ValueError: if the last dimension of the logits shape is not statically inferrable. """ collector = VariableCollector() with tf.variable_scope(scope, custom_getter=collector.collector_getter): logits = make_logits() if logits.shape[-1].value is None: raise ValueError("The size of the last dimension of the logits vector must" " be statically inferrable.") with tf.variable_scope( scope, custom_getter= make_perturbation_getter(should_regularize, collector, perturbation)): perturbed_logits = make_logits() log_probs = tf.nn.log_softmax(logits, axis=-1) perturbed_log_probs = tf.nn.log_softmax(perturbed_logits, axis=-1) stop_probs = tf.stop_gradient(tf.exp(log_probs)) log_prob_derivative = (tf.square((perturbed_log_probs - log_probs) / perturbation)) if differentiate_probability: prob_regularizer_loss = (log_prob_derivative * stop_probs + tf.stop_gradient(log_prob_derivative) * log_probs * stop_probs - tf.stop_gradient(log_prob_derivative * log_probs * stop_probs)) else: prob_regularizer_loss = log_prob_derivative * stop_probs regularizer = logits.shape[-1].value * tf.reduce_mean(prob_regularizer_loss) return (logits, regularizer)
b8edc41ccce39511e147fdfeb919c30ad65e8a85
32,832
def objective(y_objective, sim_param, curve_param): """ Calculates the objective function (RMS-VIF) given the control point y-values of a given particle :param y_objective: control point y-values of the particle :param sim_param: Instance of sim_param :param curve_param: Instance of curve_param :return: RMS-VIF """ curve = initialize_NURBS(curve_param, sim_param, y_objective) # builds NURBS from the control point y-values R = calculate_R_from_curve(curve, sim_param) # builds the PDE-FIND system matrix from the NURBS IC rms_vif = calculate_rms_vif(R) # Calculate RMS-VIF from the matrix R return rms_vif
8191aa0cc346ea596c88d3cdff86c3a3abc3ccca
32,833
def shortest_first_name(names): """Returns the shortest first name (str)""" names = dedup_and_title_case_names(names) name_dict = [] for i in names: i = i.split() name_dict.append({'name':i[0], 'surname': i[1]}) short_name_sort = sorted(name_dict, key=lambda k: len(k['name'])) return short_name_sort[0]['name']
868d5d977d4ef3aa4264fa1644ca5f142920bbe4
32,835
def timezone_validator(self, response): """Match timezone code in libraries/timezone. Arguments --------- response: "String containing current answer" Raises ------ ValidationError: "Display a short description with available formats" Returns ------- boolean: True """ timezone_list = open('libraries/timezone').read() if ('{response}\n'.format(response=response) not in timezone_list) or \ (response == ''): raise ValidationError('', reason=self.trad( 'Invalid timezone: {response} (e.q., Europe/Paris)').format( response=response)) return True
080e56256a72a254e1c5940d16a5a89b693a3ad6
32,836
def reset_clb_srv(req): """ Service when reset of state planner :param req: :return: """ global reset_pose, pose_msgs , goal_sub, global_offset rospy.loginfo_once("reset pose") resp = TriggerResponse() resp.success = True resp.message = "Reset pose: True" reset_pose = pose_msgs.pose.pose.position print "reset position:", reset_pose global_offset.linear.x = 0. global_offset.linear.y = 0. global_offset.linear.z = 0. return resp
14deae77fd35fa256760164978eacf7cee1421ad
32,837
import tkinter def Calcola(): """Calcolate point of task!""" esci=False while esci is False: try: check=False while check is False: DeadLine=input("inserisci la deadline in giorni ") try: DeadLine = int(DeadLine) check=True except ValueError: root = tkinter.Tk() root.withdraw() mb.showerror("errore","errore non è stato inserito un numero") root.destroy() check=False while check is False: livello=input("Livello(da 1-10 secondo la gravità del problema) ") try: livello = int(livello) if livello<1 or livello>10: root = tkinter.Tk() root.withdraw() mb.showerror("errore","errore il valore inserito non è valido") root.destroy() else: check=True except ValueError: root = tkinter.Tk() root.withdraw() mb.showerror("errore","errore non è stato inserito un numero") root.destroy() check=False while check is False: difficolta=input("inserire il livello di difficolta stimato da 1-10 ") try: difficolta = int(difficolta) if difficolta<1 or difficolta>10: root = tkinter.Tk() root.withdraw() mb.showerror("errore","errore il valore inserito non è valido") root.destroy() else: check=True esci=True except ValueError: root = tkinter.Tk() root.withdraw() mb.showerror("errore","errore non è stato inserito un numero") root.destroy() except KeyboardInterrupt: root = tkinter.Tk() root.withdraw() mb.showerror("errore","non puoi uscire") root.destroy() punteggio=250-DeadLine-livello-difficolta return punteggio
7f9d03a2c45a3dd06172368213000c38ffa6532d
32,838
import six def disabled(name): """ Ensure an Apache module is disabled. .. versionadded:: 2016.3.0 name Name of the Apache module """ ret = {"name": name, "result": True, "comment": "", "changes": {}} is_enabled = __salt__["apache.check_mod_enabled"](name) if is_enabled: if __opts__["test"]: msg = "Apache module {0} is set to be disabled.".format(name) ret["comment"] = msg ret["changes"]["old"] = name ret["changes"]["new"] = None ret["result"] = None return ret status = __salt__["apache.a2dismod"](name)["Status"] if isinstance(status, six.string_types) and "disabled" in status: ret["result"] = True ret["changes"]["old"] = name ret["changes"]["new"] = None else: ret["result"] = False ret["comment"] = "Failed to disable {0} Apache module".format(name) if isinstance(status, six.string_types): ret["comment"] = ret["comment"] + " ({0})".format(status) return ret else: ret["comment"] = "{0} already disabled.".format(name) return ret
edc69d3ad8b03c739a01e28d50aacbf00db54d9c
32,839
def get_stops_in_polygon(feed, polygon, geo_stops=None): """ Return the slice of ``feed.stops`` that contains all stops that lie within the given Shapely Polygon object that is specified in WGS84 coordinates. Parameters ---------- feed : Feed polygon : Shapely Polygon Specified in WGS84 coordinates geo_stops : Geopandas GeoDataFrame A geographic version of ``feed.stops`` which will be computed if not given. Specify this parameter in batch jobs to avoid unnecessary computation. Returns ------- DataFrame Subset of ``feed.stops`` Notes ----- - Requires GeoPandas - Assume the following feed attributes are not ``None``: * ``feed.stops``, if ``geo_stops`` is not given """ if geo_stops is not None: f = geo_stops.copy() else: f = geometrize_stops(feed.stops) cols = f.columns f['hit'] = f['geometry'].within(polygon) f = f[f['hit']][cols] return ungeometrize_stops(f)
cf42652a1a00f9f70f51d5bc9597733ca9d89cf6
32,840
def define_stkvar(*args): """ define_stkvar(pfn, name, off, flags, ti, nbytes) -> bool Define/redefine a stack variable. @param pfn: pointer to function (C++: func_t *) @param name: variable name, NULL means autogenerate a name (C++: const char *) @param off: offset of the stack variable in the frame. negative values denote local variables, positive - function arguments. (C++: sval_t) @param flags: variable type flags ( byte_flag() for a byte variable, for example) (C++: flags_t) @param ti: additional type information (like offsets, structs, etc) (C++: const opinfo_t *) @param nbytes: number of bytes occupied by the variable (C++: asize_t) @return: success """ return _ida_frame.define_stkvar(*args)
bbd52e35a92dcd84afe990e0519a2a5e26abe5b8
32,841
def spherical_polar_area(r, lon, lat): """Calculates the area bounding an array of latitude and longitude points. Parameters ---------- r : float Radius of sphere. lon : 1d array Longitude points. [Degrees] lat : 1d array Longitude points. [Degrees] Returns ------- areas: 2d array """ mid_dlon = (lon[2:] - lon[:-2]) / 2.0 s_dlon = lon[1] - lon[0] e_dlon = lon[-1] - lon[-2] dlon = np.hstack((s_dlon, mid_dlon, e_dlon)) mid_dlat = (lat[2:] - lat[:-2]) / 2.0 s_dlat = lat[1] - lat[0] e_dlat = lat[-1] - lat[-2] dlat = np.hstack((s_dlat, mid_dlat, e_dlat)) dlon, dlat = np.deg2rad(dlon), np.deg2rad(dlat) gdlon, gdlat = np.meshgrid(dlon, dlat) solid_angle = gdlon.T * gdlat.T * np.cos(np.deg2rad(lat)) return solid_angle.T * r ** 2
a07b73f6e04ee64b06d1e663dfff7ff971d00bf5
32,842
def row_plays(hand, row): """Return the set of legal plays in the specified row. A row play is a (start, 'WORD') pair, """ results = set() # for each anchor and for each legal prefix, add all legal suffixes and save any valid words in results for (i, square) in enumerate(row[1: -1], start=1): if isinstance(square, Anchor): prefix, max_size = legal_prefix(i, row) # there are already letters in the board, to the left of this anchor if prefix: start = i - len(prefix) add_suffixes(hand, prefix, start, row, results, anchored=False) # the board is empty to the left of this anchor else: for prefix in find_prefixes(hand): if len(prefix) <= max_size: start = i - len(prefix) add_suffixes(removed(hand, prefix), prefix, start, row, results, anchored=False) return results
5b98f30fb8a932f31bc9bc0b22f21480880c3302
32,843
def nearest_pillar(grid, xy, ref_k0 = 0, kp = 0): """Returns the (j0, i0) indices of the primary pillar with point closest in x,y plane to point xy.""" # note: currently works with unmasked data and using primary pillars only pe_i = grid.extent_kji[2] + 1 sum_dxy2 = grid.pillar_distances_sqr(xy, ref_k0 = ref_k0, kp = kp) ji = np.nanargmin(sum_dxy2) j, i = divmod(ji, pe_i) return (j, i)
2338698963cb6bddca8d9702e000e5be125e6e86
32,844
def read_state(file, statename): """ read and select state from file Args: file (str): path to state shapefile statename (str): name of state (i.e. California) """ all = gpd.read_file("../data/states.shp") state = all[all['STATE_NAME'] == statename] return state
0732d01dfb466ac00622964dfd3a9d0655367fbf
32,845
def get_random_state(seed): """ Get a random number from the whole range of large integer values. """ np.random.seed(seed) return np.random.randint(MAX_INT)
abf99c5547d146bfc9e6e1d33e7970d7090ba6d2
32,846
def get_start_and_end_time(file_or_file_object): """ Returns the start and end time of a MiniSEED file or file-like object. :type file_or_file_object: str or file :param file_or_file_object: MiniSEED file name or open file-like object containing a MiniSEED record. :return: tuple (start time of first record, end time of last record) This method will return the start time of the first record and the end time of the last record. Keep in mind that it will not return the correct result if the records in the MiniSEED file do not have a chronological ordering. The returned end time is the time of the last data sample and not the time that the last sample covers. .. rubric:: Example >>> from obspy.core.util import get_example_file >>> filename = get_example_file( ... "BW.BGLD.__.EHE.D.2008.001.first_10_records") >>> get_start_and_end_time(filename) # doctest: +NORMALIZE_WHITESPACE (UTCDateTime(2007, 12, 31, 23, 59, 59, 915000), UTCDateTime(2008, 1, 1, 0, 0, 20, 510000)) It also works with an open file pointer. The file pointer itself will not be changed. >>> f = open(filename, 'rb') >>> get_start_and_end_time(f) # doctest: +NORMALIZE_WHITESPACE (UTCDateTime(2007, 12, 31, 23, 59, 59, 915000), UTCDateTime(2008, 1, 1, 0, 0, 20, 510000)) And also with a MiniSEED file stored in a BytesIO >>> import io >>> file_object = io.BytesIO(f.read()) >>> get_start_and_end_time(file_object) # doctest: +NORMALIZE_WHITESPACE (UTCDateTime(2007, 12, 31, 23, 59, 59, 915000), UTCDateTime(2008, 1, 1, 0, 0, 20, 510000)) >>> file_object.close() If the file pointer does not point to the first record, the start time will refer to the record it points to. >>> _ = f.seek(512) >>> get_start_and_end_time(f) # doctest: +NORMALIZE_WHITESPACE (UTCDateTime(2008, 1, 1, 0, 0, 1, 975000), UTCDateTime(2008, 1, 1, 0, 0, 20, 510000)) The same is valid for a file-like object. >>> file_object = io.BytesIO(f.read()) >>> get_start_and_end_time(file_object) # doctest: +NORMALIZE_WHITESPACE (UTCDateTime(2008, 1, 1, 0, 0, 1, 975000), UTCDateTime(2008, 1, 1, 0, 0, 20, 510000)) >>> f.close() """ # Get the starttime of the first record. info = get_record_information(file_or_file_object) starttime = info['starttime'] # Get the end time of the last record. info = get_record_information( file_or_file_object, (info['number_of_records'] - 1) * info['record_length']) endtime = info['endtime'] return starttime, endtime
4c094a8fb1d9e186a0bbcbff25b7af8ac5461131
32,847
from typing import Optional import json import asyncio async def send_message(msg: str) -> Optional[str]: """ Send a message to the websocket and return the response Args: msg: The message to be sent Returns: The response message or None if it was not defined """ if not (WS and WS.open): print('Reconnecting websocket') await connect_ws() print(f'Sending message {msg}') data = { 'text': msg } json_data = json.dumps(data) await WS.send(json_data) try: resp = json.loads(await asyncio.wait_for(WS.recv(), 3)) except asyncio.TimeoutError: return new_message = resp['text'] return new_message
d6395c3f74baf1900d99f605a08b77990637be8e
32,848
def _getname(storefile): """returns the filename""" if storefile is None: raise ValueError("This method cannot magically produce a filename when given None as input.") if not isinstance(storefile, basestring): if not hasattr(storefile, "name"): storefilename = _getdummyname(storefile) else: storefilename = storefile.name else: storefilename = storefile return storefilename
fa423102a3ff8355af4784eaa39b2a065da80ec4
32,849
def flow_lines(sol, nlines, time_length, scale=0.5): """ compute the flow lines of the solution Parameters ---------- sol : :py:class:`Simulation<pylbm.simulation.Simulation>` the solution given by pylbm nlines : int (number of flow lines) time_length : double (time during which we follow the lines) scale : double (velocity scale (default 0.5)) Returns ------- list lines """ u_x = sol.m[QX] / sol.m[RHO] u_y = sol.m[QY] / sol.m[RHO] # if scale is None: # scale = max(np.linalg.norm(u_x, np.inf), np.linalg.norm(u_y, np.inf)) lines = [] xmin, xmax = sol.domain.geom.bounds[0] ymin, ymax = sol.domain.geom.bounds[1] dx = sol.domain.dx nx, ny = sol.domain.shape_in for _ in range(nlines): # begin a new line cont = True # boolean to continue the line x = xmin + (xmax-xmin) * np.random.rand() y = ymin + (ymax-ymin) * np.random.rand() line_x, line_y = [x], [y] t = 0 while cont: i, j = int((x-xmin)/(xmax-xmin)*nx), int((y-ymin)/(ymax-ymin)*ny) uxij, uyij = u_x[i, j], u_y[i, j] if uxij == 0 and uyij == 0: cont = False else: dt = dx*scale / np.sqrt(uxij**2+uyij**2) x += uxij*dt y += uyij*dt t += dt if x < xmin or x >= xmax or y < ymin or y >= ymax: cont = False else: line_x.append(x) line_y.append(y) if t >= time_length: cont = False lines.append([np.array(line_x), np.array(line_y)]) return lines
fea6667aa8b3012918a66ae3f6e94b9b0a4439ad
32,850
def minOperations(n): """ finds min. operations to reach and string """ if type(n) != int or n <= 1: return 0 res = 0 i = 2 while(i <= n + 1): if (n % i == 0): res += i n /= i else: i += 1 return res
c26cbd71c6e675adea79938b6e7248a4c093e63f
32,851
def matrix2dictionary(matrix): """ convert matrix to dictionary of comparisons """ pw = {} for line in matrix: line = line.strip().split('\t') if line[0].startswith('#'): names = line[1:] continue a = line[0] for i, pident in enumerate(line[1:]): b = names[i] if a not in pw: pw[a] = {} if b not in pw: pw[b] = {} if pident != '-': pident = float(pident) pw[a][b] = pident pw[b][a] = pident return pw
fd53dc4f80ff45d4eb41939af54be1d712ee2fa4
32,852
def combine_fastq_output_files(files_to_combine, out_prefix, remove_temp_output): """ Combines fastq output created by BMTagger/bowtie2 on multiple databases and returns a list of output files. Also updates the log file with read counts for the input and output files. """ # print out the reads for all files utilities.log_read_count_for_files(files_to_combine,"Total reads after removing those found in reference database") # create lists of all of the output files for pair 1 and for pair 2 files_for_pair1 = [f[0] for f in files_to_combine] try: files_for_pair2 = [f[1] for f in files_to_combine] except IndexError: files_for_pair2 = [] # select an output prefix based on if the outputs are paired or not output_file = out_prefix + "_1" + config.fastq_file_extension if not files_for_pair2: output_file = out_prefix + config.fastq_file_extension # create intersect file from all output files for pair 1 intersect_fastq(files_for_pair1, output_file, remove_temp_output) output_files=[output_file] # create an intersect file from all output files for pair 2 if files_for_pair2: output_file = out_prefix + "_2" + config.fastq_file_extension intersect_fastq(files_for_pair2, output_file, remove_temp_output) output_files.append(output_file) # Get the read counts for the newly merged files utilities.log_read_count_for_files(output_files,"Total reads after merging results from multiple databases") # remove temp files if set if remove_temp_output: for group in [files_for_pair1, files_for_pair2]: for filename in group: utilities.remove_file(filename) return output_files
1421878edf7e44b46b386d7d4465090cc22acfa4
32,853
import json def deliver_dap(): """ Endpoint for submissions only intended for DAP. POST request requires the submission JSON to be uploaded as "submission" and the filename passed in the query parameters. """ logger.info('Processing DAP submission') filename = request.args.get("filename") meta = MetaWrapper(filename) files = request.files submission_bytes = files[SUBMISSION_FILE].read() survey_dict = json.loads(submission_bytes.decode()) data_bytes = submission_bytes meta.set_dap(survey_dict, data_bytes) return process(meta, data_bytes)
e7c2753319f512eaa1328fcb3cc808a17a5933b8
32,854
def api_run_delete(run_id): """Delete the given run and corresponding entities.""" data = current_app.config["data"] # type: DataStorage RunFacade(data).delete_run(run_id) return "DELETED run %s" % run_id
c2a07caa95d9177eb8fe4b6f27caf368d1a9fbdd
32,855
import numpy def gfalternate_createdataandstatsdict(ldt_tower,data_tower,attr_tower,alternate_info): """ Purpose: Creates the data_dict and stat_dict to hold data and statistics during gap filling from alternate data sources. Usage: Side effects: Called by: Calls: Author: PRI Date: May 2015 """ data_dict = {} stat_dict = {} label_tower = alternate_info["label_tower"] label_composite = alternate_info["label_composite"] data_dict["DateTime"] = {"data":ldt_tower} data_dict[label_tower] = {"attr":attr_tower, "output_list":[label_tower,label_composite], "data":data_tower} data_dict[label_composite] = {"data":numpy.ma.masked_all_like(data_tower), "fitcorr":numpy.ma.masked_all_like(data_tower), "attr":attr_tower} stat_dict[label_tower] = {"startdate":alternate_info["startdate"],"enddate":alternate_info["enddate"]} stat_dict[label_composite] = {"startdate":alternate_info["startdate"],"enddate":alternate_info["enddate"]} return data_dict,stat_dict
a1690fb9e53abcd6b23e33046d82c10a2ca7abc0
32,856
import re def doGeneMapping(model): """ Function that maps enzymes and genes to reactions This function works only if the GPR associations are defined as follows: (g1 and g2 and g6) or ((g3 or g10) and g12) - *model* Pysces model - *GPRdict* dictionary with (iso)enzymes mapped to reactions - *SubUdict* dictionary with genes mapped to isoenzymes """ def unique_list(seq): """Function to remove duplicates""" seen = set() seen_add = seen.add return [x for x in seq if not (x in seen or seen_add(x))] # Get GPR associations for reactions (strings) and # and split according to keywords 'and', 'or' GPRdict = {} model.createGeneAssociationsFromAnnotations() reactions = model.getReactionIds() SubUdict = {} no_associations = 0 for r_ in reactions: try: ass = model.getReaction(r_).getAnnotations() if 'GENE ASSOCIATION' in ass: g_ = ass['GENE ASSOCIATION'] elif 'GENE_ASSOCIATION' in ass: g_ = ass['GENE_ASSOCIATION'] elif 'gene_association' in ass: g_ = ass['gene_association'] elif 'gene association' in ass: g_ = ass['gene association'] if g_ != 'None' and g_ != '' : # Enzymes # g_ = g_.split(') or (') g_ = re.split(r'\)\s+or\s+\(|\)\s+or\s+|\s+or\s+\(',g_) S_list = [] for enzyme in g_: enzyme = enzyme.replace(')','') enzyme = enzyme.replace('(','') # Isoenzymes enzyme = enzyme.replace(' or ','_or_') # Subunits subunits = enzyme.split(' and ') subunits_mod = [] for s in subunits: # remove extra space tmp = s.replace(' ','') # replace possible dashes tmp = tmp.replace('-','_') # add gene prefix tmp = 'GENE_' + tmp subunits_mod.append(tmp) S_list.append(subunits_mod) # Dictionary for isoenzymes for enzymes in S_list: for gene in enzymes: gene = gene.replace(' ','') if 'or' in gene: # SubUdict[gene] = gene.split('_or_') SubUdict[gene] = unique_list(gene.split('_or_')) # GPRdict[r_] = S_list GPRdict[r_] = [unique_list(s) for s in S_list] except: no_associations+=1 print '{} of {} reactions have no GPR Associations' .format(no_associations,len(reactions)) # print GPRdict # print SubUdict # raw_input() return GPRdict, SubUdict
d2e12f7161aca69afa28f7bb0d528d9b922b25b4
32,857
def delay_to_midnight(): """Calculates the delay between the current time and midnight""" current_time = get_current_time() delay = time_conversions.hhmm_to_seconds("24:00") - time_conversions.hhmm_to_seconds(current_time) return delay
14b33591e58975cd5a4f95d3602e6a1494131267
32,858
def predict(model, imgs): """ Predict the labels of a set of images using the VGG16 model. Args: imgs (ndarray) : An array of N images (size: N x width x height x channels). Returns: preds (np.array) : Highest confidence value of the predictions for each image. idxs (np.ndarray): Class index of the predictions with the max confidence. classes (list) : Class labels of the predictions with the max confidence. """ # predict probability of each class for each image all_preds = model.predict(imgs) # for each image get the index of the class with max probability idxs = np.argmax(all_preds, axis=1) # get the values of the highest probability for each image preds = [all_preds[i, idxs[i]] for i in range(len(idxs))] # get the label of the class with the highest probability for each image classes = [model.classes[idx] for idx in idxs] return preds, idxs, classes
0f992412a9067608e99a6976c6ef65b466ef7572
32,859
def _endmsg(rd) -> str: """ Returns an end message with elapsed time """ msg = "" s = "" if rd.hours > 0: if rd.hours > 1: s = "s" msg += colors.bold(str(rd.hours)) + " hour" + s + " " s = "" if rd.minutes > 0: if rd.minutes > 1: s = "s" msg += colors.bold(str(rd.minutes)) + " minute" + s + " " # if rd.seconds > 0: # msg+=str(rd.seconds) # else: # msg+="0." milliseconds = int(rd.microseconds / 1000) if milliseconds > 0: msg += colors.bold(str(rd.seconds) + "." + str(milliseconds)) msg += " seconds" return msg
635792c4ebf772926f492e5976ee4ac7caf92cca
32,861
import bz2 import zlib import lzma def decompress(fcn): """Decorator that decompresses returned data. libmagic is used to identify the MIME type of the data and the function will keep decompressing until no supported compression format is identified. """ def wrapper(cls, raw=False, *args, **kw): data = fcn(cls) if raw: # return raw data without decompressing return data mime_type, mime_subtype = magic.from_buffer(data, mime=True).split('/') while mime_subtype in ('x-bzip2', 'x-bzip', 'bzip', 'x-gzip', 'gzip', 'x-xz'): if mime_subtype in ('x-bzip2', 'x-bzip', 'bzip'): data = bz2.decompress(data) elif mime_subtype in ('x-gzip', 'gzip'): data = zlib.decompress(data, 16 + zlib.MAX_WBITS) elif mime_subtype in ('x-xz'): data = lzma.decompress(data) mime_type, mime_subtype = magic.from_buffer(data, mime=True).split('/') return data return wrapper
ba5d1540da70c4f92604888f5fd10b879bd62371
32,862
def get_satellite_params(platform=None): """ Helper function to generate Landsat or Sentinel query information for quick use during NRT cube creation or sync only. Parameters ---------- platform: str Name of a satellite platform, Landsat or Sentinel only. params """ # check platform name if platform is None: raise ValueError('Must provide a platform name.') elif platform.lower() not in ['landsat', 'sentinel']: raise ValueError('Platform must be Landsat or Sentinel.') # set up dict params = {} # get porams depending on platform if platform.lower() == 'landsat': # get collections collections = [ 'ga_ls5t_ard_3', 'ga_ls7e_ard_3', 'ga_ls8c_ard_3'] # get bands bands = [ 'nbart_red', 'nbart_green', 'nbart_blue', 'nbart_nir', 'nbart_swir_1', 'nbart_swir_2', 'oa_fmask'] # get resolution resolution = 30 # build dict params = { 'collections': collections, 'bands': bands, 'resolution': resolution} else: # get collections collections = [ 's2a_ard_granule', 's2b_ard_granule'] # get bands bands = [ 'nbart_red', 'nbart_green', 'nbart_blue', 'nbart_nir_1', 'nbart_swir_2', 'nbart_swir_3', 'fmask'] # get resolution resolution = 10 # build dict params = { 'collections': collections, 'bands': bands, 'resolution': resolution} return params
2298c100eed431a48a9531bc3038c5ab8565025d
32,863
import random def generate_random_token(length = 64): """ Generates a random token of specified length. """ lrange = 16 ** length hexval = "%0{}x".format(length) return hexval % (random.randrange(lrange))
5140dc2a07cb336387fd3e71b3b1edc746cccb44
32,864
from typing import Optional from typing import Union from typing import Tuple def permute_sse_metric( name: str, ref: np.ndarray, est: np.ndarray, compute_permutation: bool = False, fs: Optional[int] = None) -> Union[float, Tuple[float, list]]: """ Computation of SiSNR/PESQ/STOI in permutation/non-permutation mode Args: name: metric name ref: array, reference signal (N x S or S, ground truth) est: array, enhanced/separated signal (N x S or S) compute_permutation: return permutation order or not fs: sample rate of the audio """ if name == "sisnr": return _permute_eval(aps_sisnr, ref, est, compute_permutation=compute_permutation, fs=fs) elif name == "pesq": return _permute_eval(aps_pesq, ref, est, compute_permutation=compute_permutation, fs=fs) elif name == "stoi": return _permute_eval(aps_stoi, ref, est, compute_permutation=compute_permutation, fs=fs) elif name == "sdr": if ref.ndim == 1: ref, est = ref[None, :], est[None, :] sdr, _, _, _, ali = bss_eval_images(ref[..., None], est[..., None], compute_permutation=True) if compute_permutation: return sdr.mean(), ali[:, 0].tolist() else: return sdr[0, 0] else: raise ValueError(f"Unknown name of the metric: {name}")
4e6398852231fa74b8999158dc5f20833f53643b
32,866
from typing import Literal def test_if() -> None: """if-elif-else.""" PositiveOrNegative = Literal[-1, 0, 1] def positive_negative(number: int) -> PositiveOrNegative: """Return -1 for negative numbers, 1 for positive numbers, and 0 for 0.""" result: PositiveOrNegative if number < 0: result = -1 elif number == 0: result = 0 else: result = 1 return result assert positive_negative(100) == 1 assert positive_negative(0) == 0 assert positive_negative(-99) == -1
771c5e5375b161d5eed0efc00db1094a7996169a
32,870
def ba2str(ba): """Convert Bluetooth address to string""" string = [] for b in ba.b: string.append('{:02X}'.format(b)) string.reverse() return ':'.join(string).upper()
765fc9dbbea5afdd32c6d09c18f428e3693e20bf
32,871
import requests def delete_user(client: Client, user_id: str) -> bool: """Deletes disabled user account via the `/users/{user_id}` endpoint. :param client: Client object :param user_id: The ID of the user account :return: `True` if succeeded, `False` otherwise """ params = {'version': get_user_info(client, user_id)['version']} response = requests.delete( f'{client.base_url}/api/2/users/{user_id}', headers=client.auth_header, params=params, ) handle_error_response(response) return response.status_code == 204
f385ca6e1108f95c00e28dbd99ffa640fefed761
32,873
import requests def get_token(corp_id: str, corp_secret: str): """获取access_token https://open.work.weixin.qq.com/api/doc/90000/90135/91039 """ req = requests.get( f'https://qyapi.weixin.qq.com/cgi-bin/gettoken?corpid={corp_id}&corpsecret={corp_secret}' ) return req.json().get('access_token')
9a9c3fcdb74312b5d2d7c62588aea3cf78796ec9
32,874
def _update_run_op(beta1, beta2, eps, global_step, lr, weight_decay, param, m, v, gradient, decay_flag, optim_filter): """ Update parameters. Args: beta1 (Tensor): The exponential decay rate for the 1st moment estimations. Should be in range (0.0, 1.0). beta2 (Tensor): The exponential decay rate for the 2nd moment estimations. Should be in range (0.0, 1.0). eps (Tensor): Term added to the denominator to improve numerical stability. Should be greater than 0. lr (Tensor): Learning rate. weight_decay (Number): Weight decay. Should be equal to or greater than 0. global_step (Tensor): Global step. param (Tensor): Parameters. m (Tensor): m value of parameters. v (Tensor): v value of parameters. gradient (Tensor): Gradient of parameters. decay_flag (bool): Specifies whether param update with weight decay. optim_filter(bool): Applies parameter update or not. Returns: Tensor, the new value of v after updating. """ if optim_filter: op_mul = P.Mul() op_sqrt = P.Sqrt() op_rsqrt = P.Rsqrt() op_square = P.Square() op_cast = P.Cast() op_reshape = P.Reshape() op_shape = P.Shape() op_pow = P.Pow() op_norm = layer.Norm() op_select = P.Select() op_greater = P.Greater() op_fill = P.Fill() op_dtype = P.DType() param_fp32 = op_cast(param, mstype.float32) m_fp32 = op_cast(m, mstype.float32) v_fp32 = op_cast(v, mstype.float32) gradient_fp32 = op_cast(gradient, mstype.float32) next_m = op_mul(beta1, m_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta1, gradient_fp32) next_v = op_mul(beta2, v_fp32) + op_mul(op_cast(num_one, mstype.float32) - beta2, op_square(gradient_fp32)) next_mm = next_m / (op_cast(num_one, mstype.float32) - op_pow(beta1, op_cast(global_step + num_one, mstype.float32))) next_vv = next_v / (op_cast(num_one, mstype.float32) - op_pow(beta2, op_cast(global_step + num_one, mstype.float32))) w_norm = op_norm(param_fp32) g_norm = op_norm(gradient_fp32) g_norm_hat = op_norm(op_mul(next_mm, op_rsqrt(next_vv + eps)) + weight_decay * param_fp32) zeros = F.zeros_like(w_norm) ones = op_fill(op_dtype(w_norm), op_shape(w_norm), 1.0) trust_ratio = op_select( op_greater(w_norm, zeros), op_select(op_greater(g_norm, zeros), w_norm / g_norm_hat, ones), ones) tens = op_fill(op_dtype(trust_ratio), op_shape(trust_ratio), 10.0) trust_ratio = C.clip_by_value(trust_ratio, zeros, tens) update = next_mm / (op_sqrt(next_vv) + eps) if decay_flag: update = update + op_mul(weight_decay, param_fp32) update_with_lr = op_mul(op_mul(trust_ratio, lr), update) next_param = param_fp32 - op_reshape(update_with_lr, op_shape(param_fp32)) next_param = F.depend(next_param, F.assign(param, op_cast(next_param, F.dtype(param)))) next_param = F.depend(next_param, F.assign(m, op_cast(next_m, F.dtype(m)))) next_param = F.depend(next_param, F.assign(v, op_cast(next_v, F.dtype(v)))) return op_cast(next_param, F.dtype(param)) return gradient
292f493ff83aba5e95a7b4ddce6b454ce4600e2c
32,875
def make_initial_ledger(toodir=None): """Set up the initial ToO ledger with one ersatz observation. Parameters ---------- toodir : :class:`str`, optional, defaults to ``None`` The directory to treat as the Targets of Opportunity I/O directory. If ``None`` then look up from the $TOO_DIR environment variable. Returns ------- :class:`~astropy.table.Table` A Table of the initial, example values for the ToO ledger. The initial (.ecsv) ledger is also written to toodir or $TOO_DIR. """ # ADM get the ToO directory (or check it exists). tdir = get_too_dir(toodir) # ADM retrieve the file name to which to write. fn = get_filename(tdir) # ADM make a single line of the ledger with some indicative values. data = np.zeros(3, dtype=indatamodel.dtype) data["RA"] = 359.999999, 101.000001, 201.5 data["DEC"] = -89.999999, -89.999999, -89.999999 data["PMRA"] = 13.554634, 4.364553, 12.734214 data["PMDEC"] = 10.763842, -10.763842, -10.763842 data["REF_EPOCH"] = 2015.5, 2015.5, 2015.5 data["CHECKER"] = "ADM", "AM", "ADM" data["TOO_TYPE"] = "TILE", "FIBER", "TILE" data["TOO_PRIO"] = "HI", "LO", "HI" data["MJD_BEGIN"] = 40811.04166667, 41811.14166667, 42811.14 data["MJD_END"] = 40811.95833333, 41811.85833333, 42811.85 data["OCLAYER"] = "BRIGHT", "DARK", "DARK" # ADM write out the results. _write_too_files(fn, data, ecsv=True) return data
9f377c54b65973bd26b868a3a7ca11dd96a7e1e6
32,876
def trace_sqrt_product_tf(cov1, cov2): """ This function calculates trace(sqrt(cov1 * cov2)) This code is inspired from: https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/contrib/gan/python/eval/python/classifier_metrics_impl.py :param cov1: :param cov2: :return: """ sqrt_cov1 = sqrt_sym_mat_tf(cov1) cov_121 = tf.matmul(tf.matmul(sqrt_cov1, cov2), sqrt_cov1) return tf.trace(sqrt_sym_mat_tf(cov_121))
f685080ce644a889aff633fb44cccf452746c5e9
32,877
from typing import Any from typing import Optional import json def opennem_serialize(obj: Any, indent: Optional[int] = None) -> str: """Use custom OpenNEM serializer which supports custom types and GeoJSON""" obj_deserialized = None if not obj_deserialized: obj_deserialized = json.dumps(obj, cls=OpenNEMGeoJSONEncoder, indent=indent) return obj_deserialized
77bcb41130b5d8d95f5460cc45f4e78b9ef8bdf5
32,878
def min_vector(first_atom, second_atom, cell=None): """Helper to find mimimum image criterion distance.""" if cell is None: cell = first_atom._parent.cell.cell return min_vect(first_atom.pos, first_atom.fractional, second_atom.pos, second_atom.fractional, cell)
aa159ff5379b8087f05c8b4c88e3ee71a5d2765f
32,879
def dice_coeff_2label(pred, target): """This definition generalize to real valued pred and target vector. This should be differentiable. pred: tensor with first dimension as batch target: tensor with first dimension as batch """ target = target.data.cpu() # pred = torch.sigmoid(pred) # pred = pred.data.cpu() # pred[pred > 0.75] = 1 # pred[pred <= 0.75] = 0 # print target.shape # print pred.shape if len(pred.shape) == 3: return dice_coefficient_numpy(pred[0, ...], target[0, ...]), dice_coefficient_numpy(pred[1, ...], target[1, ...]) else: dice_cup = [] dice_disc = [] for i in range(pred.shape[0]): cup, disc = dice_coefficient_numpy(pred[i, 0, ...], target[i, 0, ...]), dice_coefficient_numpy(pred[i, 1, ...], target[i, 1, ...]) dice_cup.append(cup) dice_disc.append(disc) return sum(dice_cup) / len(dice_cup), sum(dice_disc) / len(dice_disc)
553f3cdc76f4061512dea27a482f177948f87063
32,880
def cost_arrhenius(p, T, rate): """ Sum of absolute deviations of obs and arrhenius function. Parameters ---------- p : iterable of floats `p[0]` is activation energy [J] x : float or array_like of floats independent variable y : float or array_like of floats dependent variable, observations Returns ------- float sum of absolute deviations """ return np.sum(np.abs(rate-arrhenius_p(T,p)))
f69a98e06e79774e2fa7eef2709f0bdd6adbe3e1
32,881
def get_ou_accounts_by_ou_name(ou_name, accounts_list=None, parent=None): """ Returns the account of an OU by itsname Args: ou_name: name of the OU accounts_list: list of accounts from a previous call due to the recursive next_token: the token for the call in case a recursive occurs Returns: list of dict() with the information of the accounts in the OU """ if accounts_list is None: accounts_list = [] if parent is None: parent = get_root()['Id'] try: ou_info = get_ou_by_name(ou_name, parent) parent = ou_info['Id'] except: raise ValueError(f'Failed to retrieve the organization unit of name {ou_name}') return get_ou_accounts(parent)
6a8d638b18de08937208de45665fd3586dff8c76
32,882
def split_result_of_axis_func_pandas(axis, num_splits, result, length_list=None): """Split the Pandas result evenly based on the provided number of splits. Args: axis: The axis to split across. num_splits: The number of even splits to create. result: The result of the computation. This should be a Pandas DataFrame. length_list: The list of lengths to split this DataFrame into. This is used to return the DataFrame to its original partitioning schema. Returns: A list of Pandas DataFrames. """ if num_splits == 1: return result if length_list is not None: length_list.insert(0, 0) sums = np.cumsum(length_list) if axis == 0: return [result.iloc[sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] else: return [result.iloc[:, sums[i] : sums[i + 1]] for i in range(len(sums) - 1)] # We do this to restore block partitioning chunksize = compute_chunksize(result, num_splits, axis=axis) if axis == 0: return [ result.iloc[chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ] else: return [ result.iloc[:, chunksize * i : chunksize * (i + 1)] for i in range(num_splits) ]
bd136350147db12ed165129310fd5ee22f55b40e
32,883
def is_icypaw_scalar_type_annotation(obj): """Return if the object is usable as an icypaw scalar type annotation.""" if isinstance(obj, type) and issubclass(obj, IcypawScalarType): return True return False
7c3095ff03183a1dce33062b18dac94ee2528170
32,884
def pair_is_inward(read, regionlength): """Determine if pair is pointing inward""" return read_is_inward(read, regionlength) and mate_is_inward(read, regionlength)
4d67b7824093df1cd5d2e020d88894a0877d5d71
32,886
def sort_by_game(game_walker, from_locale, pack): """Sort a pack by the order in which strings appears in the game files. This is one of the slowest sorting method. If the pack contains strings that are not present in the game, they are sorted alphabetically at the end and a message is logged.""" def get_file_path_tuple(file_dict_path_str): return tuple(common.unserialize_dict_path(file_dict_path_str)[0]) def get_packs_by_file(pack): """Return a dict from file_path_tuple to a pack for that file path""" packs_by_file = {} for file_dict_path_str, result in pack.items(): file_path_tuple = get_file_path_tuple(file_dict_path_str) pack = packs_by_file.setdefault(file_path_tuple, {}) pack[file_dict_path_str] = result return packs_by_file packs_by_file = get_packs_by_file(pack) known_files = frozenset(packs_by_file.keys()) game_walker.set_file_path_filter(lambda f_p: tuple(f_p) in known_files) def iterate_game_and_pick_translations(packs_by_file, game_walker): """Iterate with game_walker and drain packs_by_file Return a sorted single pack with elements in the same order as returned by game_walker with translations from packs_by_file. This will drain packs_by_file in the process, so only stale strings will remain there.""" output = {} iterator = game_walker.walk(from_locale, False) current_file = None strings_for_file = None def add_stale_for_current_file(): """Add strings remaining in strings_for_file to stale translations Called after iterating for all strings in one game file.""" if strings_for_file: print("note: sorting", len(strings_for_file), "stale nonexisting strings for", "/".join(current_file)) output.update(common.sort_dict(strings_for_file)) strings_for_file.clear() for file_dict_path_str, _, _, _ in iterator: file_path = get_file_path_tuple(file_dict_path_str) if current_file != file_path: add_stale_for_current_file() current_file = file_path strings_for_file = packs_by_file.pop(file_path, {}) result = strings_for_file.pop(file_dict_path_str, None) if result is not None: output[file_dict_path_str] = result # sort remains of the last file add_stale_for_current_file() return output output = iterate_game_and_pick_translations(packs_by_file, game_walker) # sort the remaining stales file_path, and add them for file_path, stale_pack in common.sort_dict(packs_by_file).items(): print("note: sorting", len(stale_pack), "strings for nonexisting", "/".join(file_path)) output.update(common.sort_dict(stale_pack)) return output
97cebe8db744aef876c7dd0018c49593e7c22888
32,889
from ..base.util.worker_thread import stop_all_threads from typing import Sequence import threading def main(args: Sequence[str]) -> int: """ Entry for the program. """ try: user_args = parse_args(args) bus = bootstrap_petronia(user_args) return run_petronia(bus, user_args) except BaseException as err: # pylint: disable=broad-except if isinstance(err, SystemExit): if err.code == 0: return 0 # Otherwise, treat as a normal error. print_exception(err.__class__, err, err.__traceback__) stop_all_threads() for thread in threading.enumerate(): if thread != threading.current_thread() and thread.isAlive(): print("Thread {0} still alive".format(thread.name)) print("Exiting Petronia with error.") return 1
099c0ab97569028a6ca8af3bc97103c1490c54ff
32,890
def to_query_str(params): """Converts a dict of params to a query string. Args: params (dict): A dictionary of parameters, where each key is a parameter name, and each value is either a string or something that can be converted into a string. If `params` is a list, it will be converted to a comma-delimited string of values (e.g., "thing=1,2,3") Returns: str: A URI query string including the "?" prefix, or an empty string if no params are given (the dict is empty). """ if not params: return '' # PERF: This is faster than a list comprehension and join, mainly # because it allows us to inline the value transform. query_str = '?' for k, v in params.items(): if v is True: v = 'true' elif v is False: v = 'false' elif isinstance(v, list): v = ','.join(map(str, v)) else: v = str(v) query_str += k + '=' + v + '&' return query_str[:-1]
11b27e17525cf05dabf0d36e1709be749e829264
32,891
def histogram(backend, qureg): """ Make a measurement outcome probability histogram for the given qubits. Args: backend (BasicEngine): A ProjectQ backend qureg (list of qubits and/or quregs): The qubits, for which to make the histogram Returns: A tuple (fig, axes, probabilities), where: fig: The histogram as figure axes: The axes of the histogram probabilities (dict): A dictionary mapping outcomes as string to their probabilities Note: Don't forget to call eng.flush() before using this function. """ qubit_list = [] for qb in qureg: if isinstance(qb, list): qubit_list.extend(qb) else: qubit_list.append(qb) if len(qubit_list) > 5: print('Warning: For {0} qubits there are 2^{0} different outcomes'.format(len(qubit_list))) print("The resulting histogram may look bad and/or take too long.") print("Consider calling histogram() with a sublist of the qubits.") if hasattr(backend, 'get_probabilities'): probabilities = backend.get_probabilities(qureg) elif isinstance(backend, Simulator): outcome = [0] * len(qubit_list) n_outcomes = 1 << len(qubit_list) probabilities = {} for i in range(n_outcomes): for pos in range(len(qubit_list)): if (1 << pos) & i: outcome[pos] = 1 else: outcome[pos] = 0 probabilities[''.join([str(bit) for bit in outcome])] = backend.get_probability(outcome, qubit_list) else: raise RuntimeError('Unable to retrieve probabilities from backend') # Empirical figure size for up to 5 qubits fig, axes = plt.subplots(figsize=(min(21.2, 2 + 0.6 * (1 << len(qubit_list))), 7)) names = list(probabilities.keys()) values = list(probabilities.values()) axes.bar(names, values) fig.suptitle('Measurement Probabilities') return (fig, axes, probabilities)
77227d1db0f90420134c18737248989481d384c1
32,892
import bisect def crop(sequence, minimum, maximum, key=None, extend=False): """ Calculates crop indices for given sequence and range. Optionally the range can be extended by adding additional adjacent points to each side. Such extension might be useful to display zoomed lines etc. Note that this method assumes that given sequence is sorted ascendantly. Args: sequence: list or tuple Collection of items ordered by searched value. minimum: float Crop range minimum. maximum: float Crop range maximum. key: callable or None Function to be used to get specific value from item. extend: bool If set to True additional adjacent point is added to each side. Returns: (int, int) Cropping indexes. """ # get indices left_idx = bisect(sequence, minimum, key, 'left') right_idx = bisect(sequence, maximum, key, 'right') # extend range by adjacent values if extend and left_idx > 0: left_idx = bisect(sequence[:left_idx], sequence[left_idx-1], key, 'left') if extend and right_idx < len(sequence): right_idx += bisect(sequence[right_idx:], sequence[right_idx], key, 'right') return left_idx, right_idx
af761dbdbcb40270a4aaf7c55921497e1872f8c1
32,893
def reverse_dict(dict_obj): """Reverse a dict, so each value in it maps to a sorted list of its keys. Parameters ---------- dict_obj : dict A key-value dict. Returns ------- dict A dict where each value maps to a sorted list of all the unique keys that mapped to it. Example ------- >>> dicti = {'a': 1, 'b': 3, 'c': 1} >>> reverse_dict(dicti) {1: ['a', 'c'], 3: ['b']} """ new_dict = {} for key in dict_obj: add_to_dict_val_set(dict_obj=new_dict, key=dict_obj[key], val=key) for key in new_dict: new_dict[key] = sorted(new_dict[key], reverse=False) return new_dict
94ff638e67de94a37754cfae7fd9d2605835b946
32,894
def remote_error_known(): """Return a remote "error" code.""" return {"errorType": 1}
bd848143531a9f8e997af8ef64f2d1ee4ad3670b
32,895
import re def get_throttling_plan(js: str): """Extract the "throttling plan". The "throttling plan" is a list of tuples used for calling functions in the c array. The first element of the tuple is the index of the function to call, and any remaining elements of the tuple are arguments to pass to that function. :param str js: The contents of the base.js asset file. :returns: The full function code for computing the throttlign parameter. """ raw_code = get_throttling_function_code(js) transform_start = r"try{" plan_regex = re.compile(transform_start) match = plan_regex.search(raw_code) transform_plan_raw = find_object_from_startpoint(raw_code, match.span()[1] - 1) # Steps are either c[x](c[y]) or c[x](c[y],c[z]) step_start = r"c\[(\d+)\]\(c\[(\d+)\](,c(\[(\d+)\]))?\)" step_regex = re.compile(step_start) matches = step_regex.findall(transform_plan_raw) transform_steps = [] for match in matches: if match[4] != '': transform_steps.append((match[0],match[1],match[4])) else: transform_steps.append((match[0],match[1])) return transform_steps
c53eba9d018a6e3308f07031c4c8f26101f853dd
32,896
def list_agg(object_list, func): """Aggregation function for a list of objects.""" ret = [] for elm in object_list: ret.append(func(elm)) return ret
b2d8eef9c795e4700d111a3949922df940435809
32,897
from typing import Callable def job_metadata_api(**kwargs: dict) -> Callable[[dict], str]: """ Job Metadata API route. Arguments: kwargs: required keyword arguments Returns: JSON response """ return job_resource.job_metadata_api(**kwargs)
4641930a6c18066eac3dbaaaed99bfbd59560722
32,898
def parse_word(word: str) -> str: """Compile a word of uppercase letters as numeric digits. Non-uppercase letter words are returned unchanged.""" if not word.isupper(): return word compiled_word = " + ".join([letter + "*" + str(10**index) for index, letter in enumerate(word[:: -1])]) return "(" + compiled_word + ")"
aa246c7d5e92035f14476327f5b2b694b383f7e1
32,899
def rayleigh(gamma, M0, TtR): """ Function that takes in a input (output) Mach number and a stagnation temperature ratio and yields an output (input) Mach number, according to the Rayleigh flow equation. The function also outputs the stagnation pressure ratio Inputs: M [dimensionless] gamma [dimensionless] Ttr [dimensionless] Outputs: M1 [dimensionless] Ptr [dimensionless] """ func = lambda M1: (((1.+gamma*M0*M0)**2.*M1*M1*(1.+(gamma-1.)/2.*M1*M1))/((1.+gamma*M1*M1)**2.*M0*M0*(1.+(gamma-1.)/2.*M0*M0))-TtR) #Initializing the array M1_guess = np.ones_like(M0) # Separating supersonic and subsonic solutions i_low = M0 <= 1.0 i_high = M0 > 1.0 #--Subsonic solution Guess M1_guess[i_low]= .01 #--Supersonic solution Guess M1_guess[i_high]= 1.1 # Find Mach number M1 = fsolve(func,M1_guess, factor=0.1) #Calculate stagnation pressure ratio Ptr = ((1.+gamma*M0*M0)/(1.+gamma*M1*M1)*((1.+(gamma-1.)/2.*M1*M1)/(1.+(gamma-1.)/2.*M0*M0))**(gamma/(gamma-1.))) return M1, Ptr
f1432158136bee529ec592f7ef539f2aac19e5f5
32,900
def attack(N, e, c, oracle): """ Recovers the plaintext from the ciphertext using the LSB oracle attack. :param N: the modulus :param e: the public exponent :param c: the encrypted message :param oracle: a function which returns the last bit of a plaintext for a given ciphertext :return: the plaintext """ left = ZZ(0) right = ZZ(N) while right - left > 1: c = (c * pow(2, e, N)) % N if oracle(c) == 0: right = (right + left) / 2 else: left = (right + left) / 2 return int(right)
3fe99894909c07da0dc42fc1101bb45853e3366f
32,901
from typing import Optional from typing import Union from typing import Dict from typing import Any def setup_wandb_logging( trainer: Engine, optimizers: Optional[Union[Optimizer, Dict[str, Optimizer]]] = None, evaluators: Optional[Union[Engine, Dict[str, Engine]]] = None, log_every_iters: int = 100, **kwargs: Any ): """Method to setup WandB logging on trainer and a list of evaluators. Logged metrics are: - Training metrics, e.g. running average loss values - Learning rate(s) - Evaluation metrics Args: trainer (Engine): trainer engine optimizers (torch.optim.Optimizer or dict of torch.optim.Optimizer, optional): single or dictionary of torch optimizers. If a dictionary, keys are used as tags arguments for logging. evaluators (Engine or dict of Engine, optional): single or dictionary of evaluators. If a dictionary, keys are used as tags arguments for logging. log_every_iters (int, optional): interval for loggers attached to iteration events. To log every iteration, value can be set to 1 or None. **kwargs: optional keyword args to be passed to construct the logger. Returns: :class:`~ignite.contrib.handlers.wandb_logger.WandBLogger` """ logger = WandBLogger(**kwargs) _setup_logging(logger, trainer, optimizers, evaluators, log_every_iters) return logger
caf67149d8aa8d2b85f07f96b4d55677183c594b
32,902
def _norm(X, y): """Scales data to [0..1] interval""" X = X.astype(np.float32) / (X.max() - X.min()) return X, y
87189d4c885d77654793373416c0d5c4be498fba
32,903
async def delete_layer(location_id, layer_id): """ Delete layer --- delete: summary: Delete layer tags: - layers parameters: - name: id in: path required: true description: ID of the object to be deleted responses: 200: description: The object which was deleted content: application/json: schema: Layer """ location = g.active_incident.Location.find_by_id(location_id) if location is None: raise exceptions.NotFound(description="Location {} was not found".format(location_id)) layer = location.Layer.find_by_id(layer_id) if layer is None: raise exceptions.NotFound(description="Layer {} was not found".format(layer_id)) layer.delete() return jsonify(layer), HTTPStatus.OK
537ea3907fef2998ca8ae960a05a2e5204b4ab7e
32,906
def fit_and_save_model(params, data, targets): """Fit xgb classifier pipeline with params parameters and save it to disk""" pipe = make_pipeline(StandardScaler(), XGBClassifier(learning_rate=params['learning_rate'], max_depth=int(params['max_depth']))) pipe.fit(data, targets) # Persist the pipeline to disk dump(pipe, 'ADXL345_xgb_gesture.joblib') print('Done saving') return pipe
589dd94f0a258f8eabcbd47b2341a71303c5d6b7
32,907
def relu_backward(dout, cache): """ Backward pass for the ReLU function layer. Arguments: dout: numpy array of gradient of output passed from next layer with any shape cache: tuple (x) Output: x: numpy array of gradient for input with same shape of dout """ x = cache dx = dout * (x >= 0) return dx
3384ddf789ed2a31e25a4343456340a60e5a6e11
32,908
def run_decopath(request): """Process file submission page.""" # Get current user current_user = request.user user_email = current_user.email # Check if user submits pathway analysis results if 'submit_results' in request.POST: # Populate form with data from the request results_form = UploadResultsForm(request.POST, request.FILES) results_form_fc = UploadFoldChangesForm(request.POST, request.FILES) # If form is not valid, return HttpResponseBadRequest if not results_form.is_valid(): err = results_form.errors return HttpResponseBadRequest(f'{err} {INVALID_RESULTS_FORM_MSG}') # If form is not valid, return HttpResponseBadRequest if not results_form_fc.is_valid(): return HttpResponseBadRequest(INVALID_FOLD_CHANGES_MSG) # If form is valid, process file data in request.FILES and throw HTTPBadRequest if improper submission results_file_val = process_user_results(current_user, user_email, results_form, results_form_fc) # Check if file cannot be read and throw error if isinstance(results_file_val, str): return HttpResponseBadRequest(results_file_val) # Check if user submits form to run ORA elif 'run_ora' in request.POST: db_form = ORADatabaseSelectionForm(request.POST, request.FILES) parameters_form = ORAParametersForm(request.POST, request.FILES) form = ORAOptions(request.POST, request.FILES) # dict # Check if form is valid if not db_form.is_valid(): return HttpResponseBadRequest(DB_SELECTION_MSG) # Check if parameters form is valid if not parameters_form.is_valid(): return HttpResponseBadRequest(MAX_MIN_GENES_MSG) # Check if form is valid if not form.is_valid(): return HttpResponseBadRequest(FORM_COMPLETION_MSG) # If form is valid, process file data in request.FILES and throw HTTPBadRequest if improper submission run_ora_val = process_files_run_ora(current_user, user_email, form, db_form, parameters_form) if isinstance(run_ora_val, str): return HttpResponseBadRequest(run_ora_val) elif isinstance(run_ora_val, bool): messages.error(request, DB_SELECTION_MSG) return redirect("/") else: _update_job_user(request, current_user, run_ora_val[0], run_ora_val[1]) # Check if user submits form to run GSEA else: db_form = GSEADatabaseSelectionForm(request.POST, request.FILES) form = UploadGSEAForm(request.POST, request.FILES) fc_form = UploadFoldChangesFormGSEA(request.POST, request.FILES) # Check if form is valid if not db_form.is_valid(): err = db_form.errors return HttpResponseBadRequest(f'{err} {DB_SELECTION_MSG}') if not form.is_valid(): err = form.errors return HttpResponseBadRequest(f'{err} {FORM_COMPLETION_MSG}') if not fc_form.is_valid(): err = fc_form.errors return HttpResponseBadRequest(f'{err} {INVALID_FOLD_CHANGES_MSG}') # If form is valid, process file data in request.FILES and throw HTTPBadRequest if improper submission run_gsea_val = process_files_run_gsea(current_user, user_email, db_form, form, fc_form) if isinstance(run_gsea_val, str): return HttpResponseBadRequest(run_gsea_val) elif isinstance(run_gsea_val, bool): messages.error(request, DB_SELECTION_MSG) return redirect("/") else: _update_job_user(request, current_user, run_gsea_val[0], run_gsea_val[1]) return redirect("/experiments")
d86d21a31004f971e2f77b11040e88dcd2a26ee4
32,909
import tqdm def load_abs_pos_sighan_plus(dataset=None, path_head=""): """ Temporary deprecation ! for abs pos bert """ print("Loading Expanded Abs_Pos Bert SigHan Dataset ...") train_pkg, valid_pkg, test_pkg = load_raw_lattice(path_head=path_head) tokenizer_model_name_path="hfl/chinese-roberta-wwm-ext" tokenizer = AutoTokenizer.from_pretrained(tokenizer_model_name_path) train_dataset, valid_dataset, test_dataset = get_lattice_and_pos_plus(train_pkg, tokenizer), get_lattice_and_pos(valid_pkg, tokenizer), get_lattice_and_pos(test_pkg, tokenizer) def transpose(inputs): features = [] for i in tqdm(range(len(inputs["input_ids"]))): #ugly fix for encoder model (the same length features.append({key:inputs[key][i] for key in inputs.keys()}) #we fix here (truncation return features return transpose(train_dataset), transpose(valid_dataset), transpose(test_dataset)
8da548c4586f42c8a7421482395895b56aa31a10
32,910
def _train_on_tpu_system(model_fn_wrapper, dequeue_fn): """Executes `model_fn_wrapper` multiple times on all TPU shards.""" config = model_fn_wrapper.config.tpu_config iterations_per_loop = config.iterations_per_loop num_shards = config.num_shards single_tpu_train_step = model_fn_wrapper.convert_to_single_tpu_train_step( dequeue_fn) multi_tpu_train_steps_on_single_shard = (lambda: training_loop.repeat( # pylint: disable=g-long-lambda iterations_per_loop, single_tpu_train_step, [_INITIAL_LOSS], name=b'loop')) (loss,) = tpu.shard(multi_tpu_train_steps_on_single_shard, inputs=[], num_shards=num_shards, outputs_from_all_shards=False) return loss
faad0d857b2741b5177f348a6f2b7a54f9470135
32,911
def get_docs_url(model): """ Return the documentation URL for the specified model. """ return f'{settings.STATIC_URL}docs/models/{model._meta.app_label}/{model._meta.model_name}/'
613cb815ff01fa13c6c957f47c0b5f3f7edcff8f
32,912
import random def _fetch_random_words(n=1000): """Generate a random list of words""" # Ensure the same words each run random.seed(42) # Download the corpus if not present nltk.download('words') word_list = nltk.corpus.words.words() random.shuffle(word_list) random_words = word_list[:n] return random_words
aaf257e3b6202555b29bdf34fd9342794a5acf6f
32,913
import json def cache_pdf(pdf, document_number, metadata_url): """Update submission metadata and cache comment PDF.""" url = SignedUrl.generate() content_disposition = generate_content_disposition(document_number, draft=False) s3_client.put_object( Body=json.dumps({'pdfUrl': metadata_url.url}), Bucket=settings.ATTACHMENT_BUCKET, Key=metadata_url.key, ) s3_client.put_object( Body=pdf, ContentType='application/pdf', ContentDisposition=content_disposition, Bucket=settings.ATTACHMENT_BUCKET, Key=url.key, ) return url
44ffd6841380b9454143f4ac8c71ef6ea560030a
32,914
def get_tile_list(geom, zoom=17): """Generate the Tile List for The Tasking List Parameters ---------- geom: shapely geometry of area. zoom : int Zoom Level for Tiles One or more zoom levels. Yields ------ list of tiles that intersect with """ west, south, east, north = geom.bounds tiles = mercantile.tiles(west, south, east, north, zooms=zoom) tile_list = [] for tile in tiles: tile_geom = geometry.shape(mercantile.feature(tile)['geometry']) if tile_geom.intersects(geom): tile_list.append(tile) return tile_list
dcceb93b13ce2bbd9e95f664c12929dee10a1e63
32,915
def findTolerableError(log, file='data/psf4x.fits', oversample=4.0, psfs=10000, iterations=7, sigma=0.75): """ Calculate ellipticity and size for PSFs of different scaling when there is a residual pixel-to-pixel variations. """ #read in PSF and renormalize it data = pf.getdata(file) data /= np.max(data) #PSF scalings for the peak pixel, in electrons scales = np.random.random_integers(1e2, 2e5, psfs) #set the scale for shape measurement settings = dict(sampling=1.0/oversample, itereations=iterations, sigma=sigma) #residual from a perfect no pixel-to-pixel non-uniformity residuals = np.logspace(-7, -1.6, 9)[::-1] #largest first tot = residuals.size res = {} for i, residual in enumerate(residuals): print'%i / %i' % (i+1, tot) R2 = [] e1 = [] e2 = [] e = [] #loop over the PSFs for scale in scales: #random residual pixel-to-pixel variations if oversample < 1.1: residualSurface = np.random.normal(loc=1.0, scale=residual, size=data.shape) elif oversample == 4.0: tmp = np.random.normal(loc=1.0, scale=residual, size=(170, 170)) residualSurface = zoom(tmp, 4.013, order=0) else: sys.exit('ERROR when trying to generate a blocky pixel-to-pixel non-uniformity map...') #make a copy of the PSF and scale it with the given scaling #and then multiply with a residual pixel-to-pixel variation tmp = data.copy() * scale * residualSurface #measure e and R2 from the postage stamp image sh = shape.shapeMeasurement(tmp.copy(), log, **settings) results = sh.measureRefinedEllipticity() #save values e1.append(results['e1']) e2.append(results['e2']) e.append(results['ellipticity']) R2.append(results['R2']) out = dict(e1=np.asarray(e1), e2=np.asarray(e2), e=np.asarray(e), R2=np.asarray(R2)) res[residual] = out return res
02b1771e4a363a74a202dd8d5b559efd68064f4d
32,916
def squeeze_labels(labels): """Set labels to range(0, objects+1)""" label_ids = np.unique([r.label for r in measure.regionprops(labels)]) for new_label, label_id in zip(range(1, label_ids.size), label_ids[1:]): labels[labels == label_id] == new_label return labels
9c78f5e103fa83f891c11477d4cea6fdac6e416d
32,917
import itertools def orient_edges_gs2(edge_dict, Mb, data, alpha): """ Similar algorithm as above, but slightly modified for speed? Need to test. """ d_edge_dict = dict([(rv,[]) for rv in edge_dict]) for X in edge_dict.keys(): for Y in edge_dict[X]: nxy = set(edge_dict[X]) - set(edge_dict[Y]) - {Y} for Z in nxy: if Y not in d_edge_dict[X]: d_edge_dict[X].append(Y) # SET Y -> X B = min(set(Mb[Y]) - {X} - {Z},set(Mb[Z]) - {X} - {Y}) for i in range(len(B)): for S in itertools.combinations(B,i): cols = (Y,Z,X) + tuple(S) pval = mi_test(data[:,cols]) if pval < alpha and X in d_edge_dict[Y]: # Y IS independent of Z given S+X d_edge_dict[Y].remove(X) if X in d_edge_dict[Y]: break return d_edge_dict
272bdd74ed5503851bd4eb5519c505d8583e3141
32,918
def _divide_evenly(start, end, max_width): """ Evenly divides the interval between ``start`` and ``end`` into intervals that are at most ``max_width`` wide. Arguments --------- start : float Start of the interval end : float End of the interval max_width : float Maximum width of the divisions Returns ------- divisions : ndarray Resulting array """ num_partitions = int(ceil((end - start)/max_width)) return linspace(start, end, num_partitions+1)
08647cc55eca35447a08fd4ad3959db56dffc565
32,919
def uncompress_pubkey(pubkey): """ Convert compressed public key to uncompressed public key. Args: pubkey (str): Hex encoded 33Byte compressed public key Return: str: Hex encoded uncompressed 65byte public key (4 + x + y). """ public_pair = encoding.sec_to_public_pair(h2b(pubkey)) return b2h(encoding.public_pair_to_sec(public_pair, compressed=False))
672f89482e5338f1e23cbe21823b9ee6625c792f
32,920
def make_celery(main_flask_app): """Generates the celery object and ties it to the main Flask app object""" celery = Celery(main_flask_app.import_name, include=["feed.celery_periodic.tasks"]) celery.config_from_object(envs.get(main_flask_app.config.get("ENV"), "config.DevConfig")) task_base = celery.Task class ContextTask(task_base): abstract = True def __call__(self, *args, **kwargs): with main_flask_app.app_context(): return task_base.__call__(self, *args, **kwargs) celery.Task = ContextTask return celery
57fc0d7917b409cb36b4f50442dd357d384b3852
32,921
def adjustForWeekdays(dateIn): """ Returns a date based on whether or not the input date is on a weekend. If the input date falls on a Saturday or Sunday, the return is the date on the following Monday. If not, it returns the original date. """ #If Saturday, return the following Monday. if dateIn.weekday() == 5: print "Projected End Date falls on a Saturday, correcting "\ + "to fall on a Monday." return dateIn + timedelta(days = 2) #If Sunday, return the following Monday elif dateIn.weekday() == 6: print "Projected End Date falls on a Sunday, correcting "\ + "to fall on a Monday." return dateIn + timedelta(days = 1) #On any other weekday, return the date else: return dateIn
9db5c3fadbcb8aeb77bfc1498333d6b0f44fd716
32,922
from datetime import datetime def get_user_or_add_user(spotify_id, display_name, display_image=None, token=None): """Fetch an existing user or create a user""" user = User.query.filter(User.spotify_id == spotify_id).first() if user is None: spotify_id = spotify_id spotify_display_name = display_name spotify_image_url = display_image created_at = datetime.now() access_token = None refresh_token = token.refresh_token if token else None user = create_user( spotify_id, spotify_display_name, spotify_image_url, created_at, access_token, refresh_token) elif token: user.refresh_token = token.refresh_token user.spotify_image_url = display_image db.session.commit() return user
27f0fffcaf10e4060860c39f1df54afe21814250
32,925
from bs4 import BeautifulSoup def get_daily_data(y, m, d, icao): """ grab daily weather data for an airport from wunderground.com parameter --------- y: year m: month d: day ICAO: ICAO identification number for an airport return ------ a dictionary containing "Min Temperature": daily minimum temperature "Max Temperature": daily maximum temperature "Precipitation": daily precipitation "Max Humidity": daily maximum humidity "Min Humidify": daily minimum humidify """ # construct url from (y, m, d) url = "http://www.wunderground.com/history/airport/" + icao + '/'+\ str(y) + "/" + str(m) + "/" + str(d) + "/DailyHistory.html" page = urlopen(url) # parse html soup = BeautifulSoup(page, 'html5lib') # return dictionary daily_data = {'Min Temperature':'nan', 'Max Temperature':'nan', 'Precipitation':'nan', 'Maximum Humidity':'nan', 'Minimum Humidity':'nan'} # find rows in the main table all_rows = soup.find(id="historyTable").find_all('tr') for row in all_rows: # attempt to find item name try: item_name = row.findAll('td', class_='indent')[0].get_text() except Exception as e: # if run into error, skip this row continue # temperature and precipitation if item_name in ('Min Temperature','Max Temperature', 'Precipitation'): try: val = row.find_all('span', class_='wx-value')[0].get_text() except Exception as e: continue if is_number(val): daily_data[item_name] = val if item_name in ('Maximum Humidity', 'Minimum Humidity'): try: val = row.find_all('td')[1].get_text() except Exception as e: continue if is_number(val): daily_data[item_name] = val return daily_data
35abfda3ed6f80c213099149d5a3009c03be1d48
32,926
def image_tag_create(context, image_id, value, session=None): """Create an image tag.""" session = session or get_session() tag_ref = models.ImageTag(image_id=image_id, value=value) tag_ref.save(session=session) return tag_ref['value']
5aaa684912ae18fe98beb1d62d1c219239c013c6
32,928
def test_branch_same_shape(): """ Feature: control flow function. Description: Two branch must return the same shape. Expectation: Null. """ class Net(Cell): def __init__(self): super().__init__() self.a = 1 def construct(self, x, y): for k in range(1): if x != 1: for _ in range(1): y = k * x y = self.a + y if x > 5: break if x == 5: for _ in range(1): y = self.a - y if x == y: continue return x + y x = np.array([-1], np.float32) y = np.array([2], np.float32) net = Net() grad_net = F.grad(net, grad_position=(1, 1)) context.set_context(mode=context.GRAPH_MODE) fgrad = grad_net(Tensor(x), Tensor(y)) print(fgrad)
57326097cb0da2c3982aea2cfeee5be19923b4cf
32,929
def potential(__func__=None, **kwds): """ Decorator function instantiating potentials. Usage: @potential def B(parent_name = ., ...) return baz(parent_name, ...) where baz returns the deterministic B's value conditional on its parents. :SeeAlso: Deterministic, deterministic, Stochastic, Potential, stochastic, data, Model """ def instantiate_pot(__func__): junk, parents = _extract( __func__, kwds, keys, 'Potential', probe=False) return Potential(parents=parents, **kwds) keys = ['logp'] instantiate_pot.kwds = kwds if __func__: return instantiate_pot(__func__) return instantiate_pot
5023755aee2d887eb0077cb202c01013dda456e8
32,930
def numBytes(qimage): """Compatibility function btw. PyQt4 and PyQt5""" try: return qimage.numBytes() except AttributeError: return qimage.byteCount()
a2e5bfb28ef679858f0cdb2fb8065ad09b87c037
32,931
def _dt_to_decimal_time(datetime): """Convert a datetime.datetime object into a fraction of a day float. Take the decimal part of the date converted to number of days from 01/01/0001 and return it. It gives fraction of way through day: the time.""" datetime_decimal = date2num(datetime) time_decimal = datetime_decimal - int(datetime_decimal) return time_decimal
febcaa0779cbd24340cc1da297e338f1c4d63385
32,932
def poll_create(event, context): """ Return true if the resource has been created and false otherwise so CloudFormation polls again. """ endpoint_name = get_endpoint_name(event) logger.info("Polling for update of endpoint: %s", endpoint_name) return is_endpoint_ready(endpoint_name)
3ac7dd8a4142912035c48ff41c343e5d56caeba3
32,933
def outsatstats_all(percent, Reads_per_CB, counts, inputbcs): """Take input from downsampled bam stats and returns df of genes, UMIs and reads for each bc. Args: percent (int): The percent the bamfile was downsampled. Reads_per_CB (file path): Space delimited file of the barcodes and # of reads. counts (file path): tab delimited count matrix made from downsampled bam inputbcs (file path): File containing the list of barcodes used Returns: reads (df): A pandas df with columns reads, genes, and UMIs for each bc """ UMIs = pd.read_csv( counts, delimiter='\t', index_col='gene', compression='gzip') # fill in an empty column for any barcodes that # have no UMIs at this read depth for i in inputbcs['Barcode'].tolist(): if i not in UMIs.columns: UMIs[i] = UMIs.shape[0] * [0] # remove barcodes not in the list UMIs = UMIs[inputbcs['Barcode']] # make reads df reads = pd.read_csv(Reads_per_CB, delimiter= '\s', header=None) reads[1] = reads[1].str.split(':', expand=True)[2] reads.columns = ["Reads", "Barcodes"] reads.index = reads['Barcodes'] reads = reads[['Reads']] reads = reads[reads.index.isin(inputbcs['Barcode'])] for i in inputbcs['Barcode'].tolist(): if i not in reads.index: reads.loc[i] = 0 reads = reads.reindex(inputbcs['Barcode'], copy=False) # count number of genes for each barcode and UMIs per barcode reads['Genes'] = np.count_nonzero(UMIs, axis=0) reads['UMI'] = UMIs.sum(axis=0) return(reads)
f6d320e10c3171c543afdc6bdf70d83f5bcfb030
32,934
from typing import Optional from typing import List import random import itertools def generate_sums( n: int, min_terms: int, max_terms: int, *, seed=12345, fold=False, choose_from=None ) -> Optional[List[ExprWithEnv]]: """ Generate the specified number of example expressions (with no duplicates). The constants used in the expressions will normally have a maximum value of 2, but this is increased if the number of examples requested is very large. If choose_from is not None then the expressions will be randomly sampled from a larger set of expressions, where the larger set consists of the expressions that would be generated for n=choose_from. """ assert min_terms <= max_terms if choose_from is None: choose_from = n else: assert choose_from >= n rng = random.Random(seed) approx_different_sums = sum( i * i * i for i in range(min_terms, max_terms + 1) ) # Crude approximation of the total number of examples that it is possible to generate with constants at most 2. sums = set() sums_list = [] for num_attempts in itertools.count(): num_terms = num_attempts % (max_terms - min_terms + 1) + min_terms max_constant_term = num_attempts // approx_different_sums + 2 new_sum = generate_sum(num_terms, rng, max_constant_term, fold=fold) if new_sum not in sums: sums.add(new_sum) sums_list.append(new_sum) if len(sums_list) >= choose_from: return sums_list if choose_from == n else rng.sample(sums_list, n) return None
26dbd81ec62fe15ff6279356bb5f41f894d033d2
32,937
def arr2pil(frame: npt.NDArray[np.uint8]) -> Image.Image: """Convert from ``frame`` (BGR ``npt.NDArray``) to ``image`` (RGB ``Image.Image``) Args: frame (npt.NDArray[np.uint8]) : A BGR ``npt.NDArray``. Returns: Image.Image: A RGB ``Image.Image`` """ return Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
5878d983055d75653a54d2912473eacac3b7501d
32,939
def sophos_firewall_app_category_update_command(client: Client, params: dict) -> CommandResults: """Update an existing object Args: client (Client): Sophos XG Firewall Client params (dict): params to update the object with Returns: CommandResults: Command results object """ return generic_save_and_get(client, APP_CATEGORY['endpoint_tag'], params, app_category_builder, APP_CATEGORY['table_headers'])
1ba77c9b2ad172e2d9c508c833a7d4b08fb5b876
32,940
import collections def find_identities(l): """ Takes in a list and returns a dictionary with seqs as keys and positions of identical elements in list as values. argvs: l = list, e.g. mat[:,x] """ # the number of items in the list will be the number of unique types uniq = [item for item, count in collections.Counter(l).items()] # Initialise a dictionary that will hold the results identDict = {} for item in uniq: identDict[item] = [ x for x in range(len(l)) if l[x] == item ] return identDict
db7b64cc430ab149de7d14e4f4a88abafbadbe34
32,941
def decode_event_to_internal2(event): """ Enforce the binary encoding of address for internal usage. """ data = event.event_data # Note: All addresses inside the event_data must be decoded. if data['event'] == EVENT_TOKEN_ADDED2: data['token_network_address'] = to_canonical_address(data['args']['token_network_address']) data['token_address'] = to_canonical_address(data['args']['token_address']) elif data['event'] == EVENT_CHANNEL_NEW2: data['participant1'] = to_canonical_address(data['args']['participant1']) data['participant2'] = to_canonical_address(data['args']['participant2']) elif data['event'] == EVENT_CHANNEL_NEW_BALANCE2: data['participant'] = to_canonical_address(data['args']['participant']) elif data['event'] == EVENT_CHANNEL_WITHDRAW: data['participant'] = to_canonical_address(data['args']['participant']) elif data['event'] == EVENT_CHANNEL_UNLOCK: data['participant'] = to_canonical_address(data['args']['participant']) elif data['event'] == EVENT_BALANCE_PROOF_UPDATED: data['closing_participant'] = to_canonical_address(data['args']['closing_participant']) elif data['event'] == EVENT_CHANNEL_CLOSED: data['closing_participant'] = to_canonical_address(data['args']['closing_participant']) elif data['event'] == EVENT_SECRET_REVEALED: data['secrethash'] = data['args']['secrethash'] data['secret'] = data['args']['secret'] return event
fdacba3f496f5aa3715b8f9c4f26c54a21ca3472
32,942
def _repeated_features(n, n_informative, X): """Randomly select and copy n features from X, from the col range [0 ... n_informative]. """ Xrep = np.zeros((X.shape[0], n)) for jj in range(n): rand_info_col = np.random.random_integers(0, n_informative - 1) Xrep[:, jj] = X[:, rand_info_col] return Xrep
f15811a34bcc94fff77812a57a2f68178f7a8802
32,943
def create_auto_edge_set(graph, transport_guid): """Set up an automatic MultiEdgeSet for the intersite graph From within MS-ADTS 6.2.2.3.4.4 :param graph: the intersite graph object :param transport_guid: a transport type GUID :return: a MultiEdgeSet """ e_set = MultiEdgeSet() # use a NULL guid, not associated with a SiteLinkBridge object e_set.guid = misc.GUID() for site_link in graph.edges: if site_link.con_type == transport_guid: e_set.edges.append(site_link) return e_set
5f6832506d0f31795dd82f92416bb532cc7237fe
32,944
def jaccard(box_a, box_b): """Compute the jaccard overlap of two sets of boxes. The jaccard overlap is simply the intersection over union of two boxes. Here we operate on ground truth boxes and default boxes. E.g.: A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) Args: box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] Return: jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] """ inter = intersect(box_a, box_b) area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B] area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B] union = area_a + area_b - inter return inter / union
fc72ebcaa47b7f0f1f27a0618cbe7592ada5ad70
32,945
def concat(input, axis, main_program=None, startup_program=None): """ This function concats the input along the axis mentioned and returns that as the output. """ helper = LayerHelper('concat', **locals()) out = helper.create_tmp_variable(dtype=helper.input_dtype()) helper.append_op( type='concat', inputs={'X': input}, outputs={'Out': [out]}, attrs={'axis': axis}) return out
55a6e8704141a45a135402dac10d6793f2ae6a28
32,946