content
stringlengths
35
762k
sha1
stringlengths
40
40
id
int64
0
3.66M
def templateSummary(): """ """ # Load Model tablename = "survey_template" s3db[tablename] s3db.survey_complete crud_strings = s3.crud_strings[tablename] def postp(r, output): if r.interactive: if len(get_vars) > 0: dummy, template_id = get_vars.viewing.split(".") else: template_id = r.id form = s3db.survey_build_template_summary(template_id) output["items"] = form output["sortby"] = [[0, "asc"]] output["title"] = crud_strings.title_analysis_summary output["subtitle"] = crud_strings.subtitle_analysis_summary return output s3.postp = postp # remove CRUD generated buttons in the tabs s3db.configure(tablename, listadd=False, deletable=False, ) output = s3_rest_controller("survey", "template", method = "list", rheader=s3.survey_template_rheader ) s3.actions = None return output
6151e040b8e1c2491b3e282d0bb90fe278bf6dd8
32,710
def get_main_image(): """Rendering the scatter chart""" yearly_temp = [] yearly_hum = [] for city in data: yearly_temp.append(sum(get_city_temperature(city))/12) yearly_hum.append(sum(get_city_humidity(city))/12) plt.clf() plt.scatter(yearly_hum, yearly_temp, alpha=0.5) plt.title('Yearly Average Temperature/Humidity') plt.xlim(70, 95) plt.ylabel('Yearly Average Temperature') plt.xlabel('Yearly Average Relative Humidity') for i, txt in enumerate(CITIES): plt.annotate(txt, (yearly_hum[i], yearly_temp[i])) img = BytesIO() plt.savefig(img) img.seek(0) return img
b9845a44b868353e878b53beb6faf9c17bdf07d6
32,712
def get_PhotoImage(path, scale=1.0): """Generate a TKinter-compatible photo image, given a path, and a scaling factor. Parameters ---------- path : str Path to the image file. scale : float, default: 1.0 Scaling factor. Returns ------- img : `PIL.ImageTk.PhotoImage <https://pillow.readthedocs.io/en/4.2.x/\ reference/ImageTk.html#PIL.ImageTk.PhotoImage>`_ Tkinter-compatible image. This can be incorporated into a GUI using tk.Label(parent, image=img) """ image = Image.open(path).convert('RGBA') [w, h] = image.size new_w = int(w * scale) new_h = int(h * scale) image = image.resize((new_w, new_h), Image.ANTIALIAS) return ImageTk.PhotoImage(image)
02402574e0641a1caced9fe0b07434db5c84dee5
32,713
import gc def MCLA(hdf5_file_name, cluster_runs, verbose = False, N_clusters_max = None): """Meta-CLustering Algorithm for a consensus function. Parameters ---------- hdf5_file_name : file handle or string cluster_runs : array of shape (n_partitions, n_samples) verbose : bool, optional (default = False) N_clusters_max : int, optional (default = None) Returns ------- A vector specifying the cluster label to which each sample has been assigned by the MCLA approximation algorithm for consensus clustering. Reference --------- A. Strehl and J. Ghosh, "Cluster Ensembles - A Knowledge Reuse Framework for Combining Multiple Partitions". In: Journal of Machine Learning Research, 3, pp. 583-617. 2002 """ print('\n*****') print('INFO: Cluster_Ensembles: MCLA: consensus clustering using MCLA.') if N_clusters_max == None: N_clusters_max = int(np.nanmax(cluster_runs)) + 1 N_runs = cluster_runs.shape[0] N_samples = cluster_runs.shape[1] print("INFO: Cluster_Ensembles: MCLA: preparing graph for meta-clustering.") hypergraph_adjacency = load_hypergraph_adjacency(hdf5_file_name) w = hypergraph_adjacency.sum(axis = 1) N_rows = hypergraph_adjacency.shape[0] print("INFO: Cluster_Ensembles: MCLA: done filling hypergraph adjacency matrix. " "Starting computation of Jaccard similarity matrix.") # Next, obtain a matrix of pairwise Jaccard similarity scores between the rows of the hypergraph adjacency matrix. with tables.open_file(hdf5_file_name, 'r+') as fileh: FILTERS = get_compression_filter(4 * (N_rows ** 2)) similarities_MCLA = fileh.create_carray(fileh.root.consensus_group, 'similarities_MCLA', tables.Float32Atom(), (N_rows, N_rows), "Matrix of pairwise Jaccard " "similarity scores", filters = FILTERS) scale_factor = 100.0 print("INFO: Cluster_Ensembles: MCLA: " "starting computation of Jaccard similarity matrix.") squared_MCLA = hypergraph_adjacency.dot(hypergraph_adjacency.transpose()) squared_sums = hypergraph_adjacency.sum(axis = 1) squared_sums = np.squeeze(np.asarray(squared_sums)) chunks_size = get_chunk_size(N_rows, 7) for i in range(0, N_rows, chunks_size): n_dim = min(chunks_size, N_rows - i) temp = squared_MCLA[i:min(i+chunks_size, N_rows), :].todense() temp = np.squeeze(np.asarray(temp)) x = squared_sums[i:min(i+chunks_size, N_rows)] x = x.reshape(-1, 1) x = np.dot(x, np.ones((1, squared_sums.size))) y = np.dot(np.ones((n_dim, 1)), squared_sums.reshape(1, -1)) temp = np.divide(temp, x + y - temp) temp *= scale_factor Jaccard_matrix = np.rint(temp) similarities_MCLA[i:min(i+chunks_size, N_rows)] = Jaccard_matrix del Jaccard_matrix, temp, x, y gc.collect() # Done computing the matrix of pairwise Jaccard similarity scores. print("INFO: Cluster_Ensembles: MCLA: done computing the matrix of " "pairwise Jaccard similarity scores.") cluster_labels = cmetis(hdf5_file_name, N_clusters_max, w) cluster_labels = one_to_max(cluster_labels) # After 'cmetis' returns, we are done with clustering hyper-edges # We are now ready to start the procedure meant to collapse meta-clusters. N_consensus = np.amax(cluster_labels) + 1 fileh = tables.open_file(hdf5_file_name, 'r+') FILTERS = get_compression_filter(4 * N_consensus * N_samples) clb_cum = fileh.create_carray(fileh.root.consensus_group, 'clb_cum', tables.Float32Atom(), (N_consensus, N_samples), 'Matrix of mean memberships, forming meta-clusters', filters = FILTERS) chunks_size = get_chunk_size(N_samples, 7) for i in range(0, N_consensus, chunks_size): x = min(chunks_size, N_consensus - i) matched_clusters = np.where(cluster_labels == np.reshape(np.arange(i, min(i + chunks_size, N_consensus)), newshape = (x, 1))) M = np.zeros((x, N_samples)) for j in range(x): coord = np.where(matched_clusters[0] == j)[0] M[j] = np.asarray(hypergraph_adjacency[matched_clusters[1][coord], :].mean(axis = 0)) clb_cum[i:min(i+chunks_size, N_consensus)] = M # Done with collapsing the hyper-edges into a single meta-hyper-edge, # for each of the (N_consensus - 1) meta-clusters. del hypergraph_adjacency gc.collect() # Each object will now be assigned to its most associated meta-cluster. chunks_size = get_chunk_size(N_consensus, 4) N_chunks, remainder = divmod(N_samples, chunks_size) if N_chunks == 0: null_columns = np.where(clb_cum[:].sum(axis = 0) == 0)[0] else: szumsz = np.zeros(0) for i in range(N_chunks): M = clb_cum[:, i*chunks_size:(i+1)*chunks_size] szumsz = np.append(szumsz, M.sum(axis = 0)) if remainder != 0: M = clb_cum[:, N_chunks*chunks_size:N_samples] szumsz = np.append(szumsz, M.sum(axis = 0)) null_columns = np.where(szumsz == 0)[0] if null_columns.size != 0: print("INFO: Cluster_Ensembles: MCLA: {} objects with all zero associations " "in 'clb_cum' matrix of meta-clusters.".format(null_columns.size)) clb_cum[:, null_columns] = np.random.rand(N_consensus, null_columns.size) random_state = np.random.RandomState() tmp = fileh.create_carray(fileh.root.consensus_group, 'tmp', tables.Float32Atom(), (N_consensus, N_samples), "Temporary matrix to help with " "collapsing to meta-hyper-edges", filters = FILTERS) chunks_size = get_chunk_size(N_samples, 2) N_chunks, remainder = divmod(N_consensus, chunks_size) if N_chunks == 0: tmp[:] = random_state.rand(N_consensus, N_samples) else: for i in range(N_chunks): tmp[i*chunks_size:(i+1)*chunks_size] = random_state.rand(chunks_size, N_samples) if remainder !=0: tmp[N_chunks*chunks_size:N_consensus] = random_state.rand(remainder, N_samples) expr = tables.Expr("clb_cum + (tmp / 10000)") expr.set_output(clb_cum) expr.eval() expr = tables.Expr("abs(tmp)") expr.set_output(tmp) expr.eval() chunks_size = get_chunk_size(N_consensus, 2) N_chunks, remainder = divmod(N_samples, chunks_size) if N_chunks == 0: sum_diag = tmp[:].sum(axis = 0) else: sum_diag = np.empty(0) for i in range(N_chunks): M = tmp[:, i*chunks_size:(i+1)*chunks_size] sum_diag = np.append(sum_diag, M.sum(axis = 0)) if remainder != 0: M = tmp[:, N_chunks*chunks_size:N_samples] sum_diag = np.append(sum_diag, M.sum(axis = 0)) fileh.remove_node(fileh.root.consensus_group, "tmp") # The corresponding disk space will be freed after a call to 'fileh.close()'. inv_sum_diag = np.reciprocal(sum_diag.astype(float)) if N_chunks == 0: clb_cum *= inv_sum_diag max_entries = np.amax(clb_cum, axis = 0) else: max_entries = np.zeros(N_samples) for i in range(N_chunks): clb_cum[:, i*chunks_size:(i+1)*chunks_size] *= inv_sum_diag[i*chunks_size:(i+1)*chunks_size] max_entries[i*chunks_size:(i+1)*chunks_size] = np.amax(clb_cum[:, i*chunks_size:(i+1)*chunks_size], axis = 0) if remainder != 0: clb_cum[:, N_chunks*chunks_size:N_samples] *= inv_sum_diag[N_chunks*chunks_size:N_samples] max_entries[N_chunks*chunks_size:N_samples] = np.amax(clb_cum[:, N_chunks*chunks_size:N_samples], axis = 0) cluster_labels = np.zeros(N_samples, dtype = int) winner_probabilities = np.zeros(N_samples) chunks_size = get_chunk_size(N_samples, 2) for i in reversed(range(0, N_consensus, chunks_size)): ind = np.where(np.tile(max_entries, (min(chunks_size, N_consensus - i), 1)) == clb_cum[i:min(i+chunks_size, N_consensus)]) cluster_labels[ind[1]] = i + ind[0] winner_probabilities[ind[1]] = clb_cum[(ind[0] + i, ind[1])] # Done with competing for objects. cluster_labels = one_to_max(cluster_labels) print("INFO: Cluster_Ensembles: MCLA: delivering " "{} clusters.".format(np.unique(cluster_labels).size)) print("INFO: Cluster_Ensembles: MCLA: average posterior " "probability is {}".format(np.mean(winner_probabilities))) if cluster_labels.size <= 7: print("INFO: Cluster_Ensembles: MCLA: the winning posterior probabilities are:") print(winner_probabilities) print("'INFO: Cluster_Ensembles: MCLA: the full posterior probabilities are:") print(clb_cum) fileh.remove_node(fileh.root.consensus_group, "clb_cum") fileh.close() return cluster_labels
f7059c0afd6f346d82ec36eae90f6c3fa8459dad
32,714
from datetime import datetime def iso_date(iso_string): """ from iso string YYYY-MM-DD to python datetime.date Note: if only year is supplied, we assume month=1 and day=1 This function is not longer used, dates from lists always are strings """ if len(iso_string) == 4: iso_string = iso_string + '-01-01' d = datetime.strptime(iso_string, '%Y-%m-%d') return d.date()
7f29b22744d384187e293c546d4c28790c211e99
32,716
def summary_dist_xdec(res, df1, df2): """ res is dictionary of summary-results DataFrames. df1 contains results variables for baseline policy. df2 contains results variables for reform policy. returns augmented dictionary of summary-results DataFrames. """ # create distribution tables grouped by xdec res['dist1_xdec'] = \ create_distribution_table(df1, 'weighted_deciles', 'expanded_income') df2['expanded_income_baseline'] = df1['expanded_income'] res['dist2_xdec'] = \ create_distribution_table(df2, 'weighted_deciles', 'expanded_income_baseline') del df2['expanded_income_baseline'] # return res dictionary return res
408bf1c8916d5338dbc01f41acb57dcc37e009e9
32,717
def moore_to_basu(moore, rr, lam): """Returns the coordinates, speeds, and accelerations in BasuMandal2007's convention. Parameters ---------- moore : dictionary A dictionary containg values for the q's, u's and u dots. rr : float Rear wheel radius. lam : float Steer axis tilt. Returns ------- basu : dictionary A dictionary containing the coordinates, speeds and accelerations. """ m = moore basu = {} s3 = sin(m['q3']) c3 = cos(m['q3']) s4 = sin(m['q4']) c4 = cos(m['q4']) basu['x'] = rr * s3 * s4 - m['q1'] basu['y'] = rr * c3 * s4 + m['q2'] basu['z'] = rr * c4 basu['theta'] = -m['q3'] basu['psi'] = pi / 2.0 - m['q4'] basu['phi'] = pi + lam - m['q5'] basu['betar'] = -m['q6'] basu['psif'] = -m['q7'] basu['betaf'] = -m['q8'] basu['xd'] = rr * (c3 * s4 * m['u3'] + s3 * c4 * m['u4']) - m['u1'] basu['yd'] = rr * (-s3 * s4 * m['u3'] + c3 * c4 * m['u4']) + m['u2'] basu['zd'] = -rr * m['u4'] * s4 basu['thetad'] = -m['u3'] basu['psid'] = -m['u4'] basu['phid'] = -m['u5'] basu['betard'] = -m['u6'] basu['psifd'] = -m['u7'] basu['betafd'] = -m['u8'] basu['xdd'] = (rr * (-s3 * s4 * m['u3']**2 + c3 * c4 * m['u3'] * m['u4'] + c3 * s4 * m['u3p'] + c3 * c4 * m['u3'] * m['u4'] - s3 * s4 * m['u4']**2 + s3 * c4 * m['u4p']) - m['u1p']) basu['ydd'] = (m['u2p'] - rr * c3 * s4 * m['u3']**2 - rr * s3 * c4 * m['u3'] * m['u4'] - rr * s3 * s4 * m['u3p'] - rr * s3 * c4 * m['u3'] * m['u4'] - rr * c3 * s4 * m['u4']**2 + rr * c3 * c4 * m['u4p']) basu['zdd'] = -rr * (m['u4p'] * s4 + m['u4']**2 * c4) basu['thetadd'] = -m['u3p'] basu['psidd'] = -m['u4p'] basu['phidd'] = -m['u5p'] basu['betardd'] = -m['u6p'] basu['psifdd'] = -m['u7p'] basu['betafdd'] = -m['u8p'] return basu
f599c2f5226dc4a12de73e1ddc360b6176d915ed
32,719
def get_username(sciper): """ return username of user """ attribute = 'uid' response = LDAP_search( pattern_search='(uniqueIdentifier=' + sciper + ')', attribute=attribute ) return response[0]['attributes'][attribute][0]
9da92bb2f1b0b733a137ed0bf62d8817c9c13ed8
32,720
def number_to_string(s, number): """ :param s: word user input :param number: string of int which represent possible anagram :return: word of alphabet """ word = '' for i in number: word += s[int(i)] return word
7997b20264d0750e2b671a04aacb56c2a0559d8c
32,721
def mean(num_lst): """ Calculates the mean of a list of numbers Parameters ---------- num_lst : list List of numbers to calculate the average of Returns ------- The average/mean of num_lst Examples -------- >>> mean([1,2,3,4,5]) 3.0 """ return sum(num_lst) / len(num_lst)
bc6f86fc793bad165afc8f319a3094f3fae91361
32,722
import pandas def development_create_database(df_literature, df_inorganics, df_predictions, inp): """ Create mass transition database. Create mass transition database based on literature, inorganic and prediction data. Parameters ---------- df_literature : dataframe Dataframe with parsed literature data. df_inorganics : dataframe Dataframe with parsed inorganics data. df_predictions : dataframe Dataframe with parsed prediction data. inp : dict Input dictionary. Returns ------- df_database : dataframe Dataframe with joined mass transitions. """ # Preallocate database df_database = pandas.DataFrame() # Append database with input df_database = df_database.append(df_literature, sort = False) df_database = df_database.append(df_inorganics, sort = False) df_database = df_database.append(df_predictions, sort = False) # Fill database with standard parameter df_database['EP [V]'].fillna(value = 10, inplace = True) df_database['CXP [V]'].fillna(value = 4, inplace = True) df_database['DP [V]'].fillna(value = 0, inplace = True) df_database = df_database.reset_index(drop = True) # Regression for DP inp = development_create_database_linear_declustering_potential(df_literature, inp) DP_slope = inp['DP_slope'] DP_intercept = inp['DP_intercept'] # Cycle analytes for index, row in df_database.iterrows(): # Set declustering potential if row['DP [V]'] == 0: df_database.at[index,'DP [V]'] = round(DP_slope*row['Q1 [m/z]'] + DP_intercept) elif row['DP [V]'] >= 200: df_database.at[index,'DP [V]'] = 200 else: None # Set minimal and maximal collision energy if row['CE [V]'] < 5: df_database.at[index,'CE [V]'] = 5 elif row['CE [V]'] > 130: df_database.at[index,'CE [V]'] = 130 else: None # Set entrance potential if row['EP [V]'] > 15: df_database.at[index,'EP [V]'] = 15 # Set collision cell exit potential if row['CXP [V]'] > 55: df_database.at[index, 'CXP [V]'] = 55 # Prioritize mass transitions of inhouse and literature data before predictions df_database = df_database.drop_duplicates(subset=['compound_id','mode','Q1 [m/z]','Q3 [m/z]'], keep = 'first') return df_database
58b52d84ca98d770d3ace4a0b4dae4b369883284
32,723
import requests from typing import IO import hashlib def _stream_to_file( r: requests.Response, file: IO[bytes], chunk_size: int = 2**14, progress_bar_min_bytes: int = 2**25, ) -> str: """Stream the response to the file, returning the checksum. :param progress_bar_min_bytes: Minimum number of bytes to display a progress bar for. Default is 32MB """ # check header to get content length, in bytes total_length = int(r.headers.get("content-length", 0)) md5 = hashlib.md5() streamer = r.iter_content(chunk_size=chunk_size) display_progress = total_length > progress_bar_min_bytes if display_progress: progress.start() task_id = progress.add_task("Downloading", total=total_length) for chunk in streamer: # 16k file.write(chunk) md5.update(chunk) if display_progress: progress.update(task_id, advance=len(chunk)) if display_progress: progress.stop() return md5.hexdigest()
44d0529a5fdb0a14ac4dcddbfecf23442678a75a
32,724
def get_user(key: str, user: int, type_return: str = 'dict', **kwargs): """Retrieve general user information.""" params = { 'k': key, 'u': user, 'm': kwargs['mode'] if 'mode' in kwargs else 0, 'type': kwargs['type_'] if 'type_' in kwargs else None, 'event_days': kwargs['event_days'] if 'event_days' in kwargs else 1} r = req.get(urls['user'], params=params) return from_json(r.text, type_return)
1b7a5c144267c012a69aff2f02f771d848e8883a
32,725
def infer_schema(example, binary_features=[]): """Given a tf.train.Example, infer the Spark DataFrame schema (StructFields). Note: TensorFlow represents both strings and binary types as tf.train.BytesList, and we need to disambiguate these types for Spark DataFrames DTypes (StringType and BinaryType), so we require a "hint" from the caller in the ``binary_features`` argument. Args: :example: a tf.train.Example :binary_features: a list of tf.train.Example features which are expected to be binary/bytearrays. Returns: A DataFrame StructType schema """ def _infer_sql_type(k, v): # special handling for binary features if k in binary_features: return BinaryType() if v.int64_list.value: result = v.int64_list.value sql_type = LongType() elif v.float_list.value: result = v.float_list.value sql_type = DoubleType() else: result = v.bytes_list.value sql_type = StringType() if len(result) > 1: # represent multi-item tensors as Spark SQL ArrayType() of base types return ArrayType(sql_type) else: # represent everything else as base types (and empty tensors as StringType()) return sql_type return StructType([StructField(k, _infer_sql_type(k, v), True) for k, v in sorted(example.features.feature.items())])
bd952c278fafa809342b27755e2208b72bd25964
32,726
import collections def create_batches_of_sentence_ids(sentences, batch_equal_size, max_batch_size): """ Groups together sentences into batches If max_batch_size is positive, this value determines the maximum number of sentences in each batch. If max_batch_size has a negative value, the function dynamically creates the batches such that each batch contains abs(max_batch_size) words. Returns a list of lists with sentences ids. """ batches_of_sentence_ids = [] if batch_equal_size == True: sentence_ids_by_length = collections.OrderedDict() sentence_length_sum = 0.0 for i in range(len(sentences)): length = len(sentences[i]) if length not in sentence_ids_by_length: sentence_ids_by_length[length] = [] sentence_ids_by_length[length].append(i) for sentence_length in sentence_ids_by_length: if max_batch_size > 0: batch_size = max_batch_size else: batch_size = int((-1 * max_batch_size) / sentence_length) for i in range(0, len(sentence_ids_by_length[sentence_length]), batch_size): batches_of_sentence_ids.append(sentence_ids_by_length[sentence_length][i:i + batch_size]) else: current_batch = [] max_sentence_length = 0 for i in range(len(sentences)): current_batch.append(i) if len(sentences[i]) > max_sentence_length: max_sentence_length = len(sentences[i]) if (max_batch_size > 0 and len(current_batch) >= max_batch_size) \ or (max_batch_size <= 0 and len(current_batch)*max_sentence_length >= (-1 * max_batch_size)): batches_of_sentence_ids.append(current_batch) current_batch = [] max_sentence_length = 0 if len(current_batch) > 0: batches_of_sentence_ids.append(current_batch) return batches_of_sentence_ids
8db116e73e791d7eb72f080b408bb52d60481db7
32,728
def com_com_distances_axis(universe, mda_selection_pairs, fstart=0, fend=-1, fstep=1, axis='z'): """Center of mass to Center of mass distance in one dimension (along an axis). This function computes the distance between the centers of mass between pairs of MDAnalysis atoms selections across the the MD trajectory, but only uses the 1d coordinate of the specified axis. Args: universe (MDAnalysis.Universe): The MDAnalysis universe object to run the analysis on. mda_selection_pairs (list): A list of 2 element lists or tuples containing pairs of MDAnalsysis atom selection objects to compute the distance between. fstart (int): Optional, the first frame to include in the analysis. Default: 0 (or the first frame) fend (int): Optional, the last frame to include in the analysis. Default: -1 (or the last frame) fstep (int): Optional, the interval between frames in the analysis when looping from fstart to fend. Default: 1 (or every frame) axis (str): Optional, the 1d axis to compute the distance in. Default: 'z' (or the z axis) Returns: (np.array), (list): Returns two outputs. The first is an Numpy array with the timeseries simulation times corresponding to the frames in the analysis. The second is list of Numpy arrays with the distances; the order in the list corresponds to the atom selection pairs in the mda_selection_pairs input. """ dir_ind = 2 if axis is 'x': dir_ind = 0 elif axis is 'y': dir_ind = 1 #indices = mda_selection.indices fstart, fend = _adjust_frame_range_for_slicing(fstart, fend, len(universe.trajectory)) times = [] pair_dists = [] for pair in mda_selection_pairs: pair_dists.append([]) for frame in universe.trajectory[fstart:fend:fstep]: times.append(frame.time) i = 0 for pair in mda_selection_pairs: sel_1 = pair[0] sel_2 = pair[1] com_1 = sel_1.atoms.center_of_mass() com_2 = sel_2.atoms.center_of_mass() norm_val_1 = com_1[dir_ind] norm_val_2 = com_2[dir_ind] dist = np.abs(norm_val_2 - norm_val_1) pair_dists[i].append(dist) i+=1 times = np.array(times) i=0 for vals in pair_dists: pair_dists[i] = np.array(vals) i+=1 return times, pair_dists
7005d13e1f18597865ca5c5dc14ec09efd7c63e1
32,729
def min_vertex_cover(G, sampler=None, **sampler_args): """Returns an approximate minimum vertex cover. Defines a QUBO with ground states corresponding to a minimum vertex cover and uses the sampler to sample from it. A vertex cover is a set of vertices such that each edge of the graph is incident with at least one vertex in the set. A minimum vertex cover is the vertex cover of smallest size. Parameters ---------- G : NetworkX graph The graph on which to find a minimum vertex cover. sampler A binary quadratic model sampler. A sampler is a process that samples from low energy states in models defined by an Ising equation or a Quadratic Unconstrained Binary Optimization Problem (QUBO). A sampler is expected to have a 'sample_qubo' and 'sample_ising' method. A sampler is expected to return an iterable of samples, in order of increasing energy. If no sampler is provided, one must be provided using the `set_default_sampler` function. sampler_args Additional keyword parameters are passed to the sampler. Returns ------- vertex_cover : list List of nodes that form a minimum vertex cover, as determined by the given sampler. Examples -------- This example uses a sampler from `dimod <https://github.com/dwavesystems/dimod>`_ to find a minimum vertex cover for a Chimera unit cell. Both the horizontal (vertices 0,1,2,3) and vertical (vertices 4,5,6,7) tiles connect to all 16 edges, so repeated executions can return either set. >>> import dwave_networkx as dnx >>> import dimod >>> sampler = dimod.ExactSolver() # small testing sampler >>> G = dnx.chimera_graph(1, 1, 4) >>> G.remove_node(7) # to give a unique solution >>> dnx.min_vertex_cover(G, sampler) [4, 5, 6] Notes ----- Samplers by their nature may not return the optimal solution. This function does not attempt to confirm the quality of the returned sample. References ---------- https://en.wikipedia.org/wiki/Vertex_cover https://en.wikipedia.org/wiki/Quadratic_unconstrained_binary_optimization .. [AL] Lucas, A. (2014). Ising formulations of many NP problems. Frontiers in Physics, Volume 2, Article 5. """ return min_weighted_vertex_cover(G, None, sampler, **sampler_args)
b8681077d0bbb8504cdf5c96250e668bbcfe6d4e
32,730
def quantile_bin_array(data, bins=6): """Returns symbolified array with equal-quantile binning. Parameters ---------- data : array Data array of shape (time, variables). bins : int, optional (default: 6) Number of bins. Returns ------- symb_array : array Converted data of integer type. """ T, N = data.shape # get the bin quantile steps bin_edge = int(np.ceil(T / float(bins))) symb_array = np.zeros((T, N), dtype='int32') # get the lower edges of the bins for every time series edges = np.sort(data, axis=0)[::bin_edge, :].T bins = edges.shape[1] # This gives the symbolic time series symb_array = (data.reshape(T, N, 1) >= edges.reshape(1, N, bins)).sum( axis=2) - 1 return symb_array.astype('int32')
87d8c64a30581b700d1a4674e4527882be99444f
32,732
import _socket def wrap_socket(sock: _socket.socket) -> AsyncSocket: """ Wraps a standard socket into an async socket """ return AsyncSocket(sock)
70a6829bdf9048514ffe5bd5b831952f1ecd8e89
32,733
def _calculate_outer_product_steps(signed_steps, n_steps, dim_x): """Calculate array of outer product of steps. Args: signed_steps (np.ndarray): Square array with either pos or neg steps returned by :func:`~estimagic.differentiation.generate_steps.generate_steps` function n_steps (int): Number of steps needed. For central methods, this is the number of steps per direction. It is 1 if no Richardson extrapolation is used. dim_x (int): Dimension of input vector x. Returns: outer_product_steps (np.ndarray): Array with outer product of steps. Has dimension (n_steps, 1, dim_x, dim_x). """ outer_product_steps = np.array( [np.outer(signed_steps[j], signed_steps[j]) for j in range(n_steps)] ).reshape(n_steps, 1, dim_x, dim_x) return outer_product_steps
18aeadc5cb7866e6b99b5da9a2b9e6bc6ebb7c44
32,734
def compute_lima_on_off_image(n_on, n_off, a_on, a_off, kernel): """Compute Li & Ma significance and flux images for on-off observations. Parameters ---------- n_on : `~gammapy.maps.WcsNDMap` Counts image n_off : `~gammapy.maps.WcsNDMap` Off counts image a_on : `~gammapy.maps.WcsNDMap` Relative background efficiency in the on region a_off : `~gammapy.maps.WcsNDMap` Relative background efficiency in the off region kernel : `astropy.convolution.Kernel2D` Convolution kernel Returns ------- images : dict Dictionary containing result maps Keys are: significance, n_on, background, excess, alpha See also -------- gammapy.stats.significance_on_off """ # Kernel is modified later make a copy here kernel = deepcopy(kernel) kernel.normalize("peak") n_on_conv = n_on.convolve(kernel.array).data a_on_conv = a_on.convolve(kernel.array).data alpha_conv = a_on_conv / a_off.data significance_conv = significance_on_off( n_on_conv, n_off.data, alpha_conv, method="lima" ) with np.errstate(invalid="ignore"): background_conv = alpha_conv * n_off.data excess_conv = n_on_conv - background_conv return { "significance": n_on.copy(data=significance_conv), "n_on": n_on.copy(data=n_on_conv), "background": n_on.copy(data=background_conv), "excess": n_on.copy(data=excess_conv), "alpha": n_on.copy(data=alpha_conv), }
a9bde10722cbed4dab79f157ee478c9b5ba35d86
32,735
def data_preprocess(ex, mode='uniform', z_size=20): """ Convert image dtype and scale imge in range [-1,1] :param z_size: :param ex: :param mode: :return: """ image = ex['image'] image = tf.image.convert_image_dtype(image, tf.float32) image = tf.reshape(image, [-1]) image = image * 2 - 1.0 if mode == 'uniform': input_z = tf.random.uniform( shape=(z_size,), minval=-1.0, maxval=1.0 ) elif mode == 'normal': input_z = tf.random.normal(shape=(z_size,)) return input_z, image
95131b5e03afbc0a3c797570a48d49ca93f15116
32,736
def p2wpkh(pubkey: PubKey, network: str = 'mainnet') -> bytes: """Return the p2wpkh (bech32 native) SegWit address.""" network_index = _NETWORKS.index(network) ec = _CURVES[network_index] pubkey = to_pubkey_bytes(pubkey, True, ec) h160 = hash160(pubkey) return b32address_from_witness(0, h160, network)
ecb4c60871e0dc362d3576d2f02d8e07cd47614e
32,737
import re def is_not_from_subdomain(response, site_dict): """ Ensures the response's url isn't from a subdomain. :param obj response: The scrapy response :param dict site_dict: The site object from the JSON-File :return bool: Determines if the response's url is from a subdomain """ root_url = re.sub(re_url_root, '', site_dict["url"]) return get_allowed_domain(response.url) == root_url
d3fa99cc8a91942de5f3ec9cb8249c62c7488821
32,738
import random def get_affiliation(): """Return a school/organization affiliation.""" return random.choice(AFFILIATIONS)
41356c95447352b9ab96db5783efb5ce511e0d00
32,739
def update_item_feature(train, num_item, user_features, lambda_item, nz_users_indices, robust=False): """ Update item feature matrix :param train: training data, sparse matrix of shape (num_item, num_user) :param num_item: number of items :param user_features: factorized user features, dense matrix of shape (num_feature, num_user) :param lambda_item: ridge regularization parameter :param nz_users_indices: list of arrays, contains the non-zero indices of each row (item) in train :param robust: True to enable robustsness against singular matrices :return: item_features: updated factorized item features, dense matrix of shape (num_feature, num_item) """ num_features = user_features.shape[0] item_features = np.zeros((num_features, num_item)) for item in range(num_item): y = train[item, nz_users_indices[item]].todense().T # non-zero elements of line n° item of train x = user_features[:, nz_users_indices[item]] # corresponding columns of user_features nnz = nz_users_indices[item].shape[0] # Solution to ridge problem min(|X.T @ w - y|^2 + lambda * |w|^2) wy = x.dot(y) if not robust: w = np.linalg.solve(x.dot(x.T) + lambda_item * nnz * np.identity(num_features), wy) else: w = np.linalg.lstsq(x.dot(x.T) + lambda_item * nnz * np.identity(num_features), wy)[0] item_features[:, item] = w.ravel() return item_features
95b56754830cdcd8ddbfcabc25adcad1698903af
32,740
def generate_mutation(model, mutation_type): """ Generate a model mutation. Create the mutation class Parameters: model (dict): the model dictionary from settings mutation_type (str): the mutation type (create, delete, update) Returns: graphene.Mutation.Field: the mutation field """ mutation_class_name = "{}{}".format( model['name'].title(), mutation_type.title()) model_class = import_string(model['path']) arguments = get_arguments(model_class, mutation_type) mutate = get_mutate(model, mutation_type, model_class, mutation_class_name) # create the mutation class globals()[mutation_class_name] = type(mutation_class_name, (graphene.Mutation,), { '__module__': __name__, "Arguments": type("Arguments", (), arguments), "message": graphene.String(), "ingredient": graphene.Field(globals()["{}Type".format(model['name'].title())]), "mutate": mutate }) return globals()[mutation_class_name].Field()
1e1c39a76508c8f33179087786c08203af57c036
32,741
def _wf_to_char(string): """Wordfast &'XX; escapes -> Char""" if string: for code, char in WF_ESCAPE_MAP: string = string.replace(code, char.encode('utf-8')) string = string.replace("\\n", "\n").replace("\\t", "\t") return string
9270f4ff5a03265956d006bd08d04e417a0c5a14
32,743
from datetime import datetime def agg_15_min_load_profile (load_profile_df): """ Aggregates 1-Hz load profile by taking average demand over 15-min increments. """ s_in_15min = 15 * 60 # prepare idx slices start_idxs = np.arange(0, len(load_profile_df), s_in_15min) end_idxs = np.arange(s_in_15min, len(load_profile_df) + s_in_15min, s_in_15min) # generate list of avg kw over 15-min increments avg_15min_kw = [] #init for s_idx, e_idx in zip(start_idxs, end_idxs): avg_15min_kw.append(load_profile_df['power_kW'][s_idx:e_idx].mean()) times = [] #init for hour in range(24): for minute in range(0, 60, 15): times.append(str(datetime.time(hour, minute, 0))) # create pd.DataFrame agg_15min_load_profile_df = pd.DataFrame({'time': times, 'avg_power_kw': avg_15min_kw}) return agg_15min_load_profile_df
6a92abf10b6f976d4b48bc7e6bfb4c0e44b1f4c5
32,744
def get_gitbuilder_hash(project=None, branch=None, flavor=None, machine_type=None, distro=None, distro_version=None): """ Find the hash representing the head of the project's repository via querying a gitbuilder repo. Will return None in the case of a 404 or any other HTTP error. """ # Alternate method for github-hosted projects - left here for informational # purposes # resp = requests.get( # 'https://api.github.com/repos/ceph/ceph/git/refs/heads/master') # hash = .json()['object']['sha'] (arch, release, _os) = get_distro_defaults(distro, machine_type) if distro is None: distro = _os.name bp = get_builder_project()( project, dict( branch=branch, flavor=flavor, os_type=distro, os_version=distro_version, arch=arch, ), ) return bp.sha1
980abab1d3ff8bf1acdd0aec43f6ce5d5a2b6c45
32,746
def get_b16_add_conv_config(): """Returns the ViT-B/16 configuration.""" config = ml_collections.ConfigDict() config.patches = ml_collections.ConfigDict({'size': (16, 16)}) config.split = 'non-overlap' config.slide_step = 12 config.hidden_size = 768 config.transformer = ml_collections.ConfigDict() config.transformer.mlp_dim = 3072 config.transformer.num_heads = 12 config.transformer.num_layers = 12 config.transformer.attention_dropout_rate = 0.0 config.transformer.dropout_rate = 0.1 config.classifier = 'token' config.representation_size = None config.in_planes = 64 config.n_conv_layers = 2 config.kernel_size = 7 config.stride = max(1, (7 // 2) - 1) config.padding = max(1, (7 // 2)) config.activation = nn.ReLU config.conv_bias = False config.pooling_kernel_size = 3 config.pooling_stride = 2 config.pooling_padding = 1 config.max_pool = True return config
eae5f7f33acaf5931b11c7ed2f6d1b554c8a5254
32,747
def real_proto(request) -> programl_pb2.ProgramGraph: """A test fixture which enumerates one of 100 "real" protos.""" return request.param
84f604626a1545e370aa92ab509329cc23e26aa5
32,748
def flat(arr): """Return arr flattened except for last axis.""" shape = arr.shape[:-1] n_features = arr.shape[-1] return arr.reshape(np.product(shape), n_features)
8b9dd1b92c4fffe087345fa74fbc535e2ee41fbf
32,749
from datetime import datetime import time def wait_while(f_logic, timeout, warning_timeout=None, warning_text=None, delay_between_attempts=0.5): """ Внутренний цик выполняется, пока вычисление `f_logic()` трактуется как `True`. """ warning_flag = False start_time = datetime.now() while True: try: result = f_logic() except Exception: pass else: if not result: return True elaps_time = (datetime.now() - start_time).total_seconds() if warning_timeout is not None and elaps_time > warning_timeout and not warning_flag: text_addon = '. {}'.format(warning_text) if warning_text else '' logger.warning("Waiting time exceeded {}{}".format(warning_timeout, text_addon)) warning_flag = True if timeout is not None and elaps_time > timeout: return False time.sleep(delay_between_attempts)
0261083b54b1572833ea146663862fce5fe690a5
32,751
def acute_lymphocytic_leukemia1(): """Human Acute Lymphocytic Leukemia dataset (Patient 1). This dataset was introduced in :cite:`Gawad_2014` and was used in: * :cite:`B-SCITE` Figure 5. * :cite:`infSCITE` Figure S16. The size is n_cells × n_muts = 111 × 20 Returns ------- :class:`anndata.AnnData` An anndata in which `.X` is the input noisy. """ adata = scp.io.read( scp.ul.get_file("scphylo.datasets/real/acute_lymphocytic_leukemia1.h5ad") ) return adata
76f15e7a19da71fe5e4451104698dfaffdbf1799
32,752
def extract_module(start_queue, g, locality="top", max_to_crawl=100, max_depth=10): """ ([rdflib.URI], rdflib.Graph) -> rdflib.Graph resource (rdflib.URI): resource for which we extract module g (rdflib.Graph): RDF graph """ ontomodule = Graph() ontomodule.namespace_manager = g.namespace_manager visited = [] to_crawl = start_queue depth = 0 # crawl until the queue is not empty while to_crawl: print("size of to_crawl: {}, size of visited: {}, depth: {}".format( len(to_crawl), len(visited), depth)) next_node = to_crawl.pop() # control the depth depth = depth + 1 if depth > max_depth: break assert not any(isinstance(x, BNode) for x in to_crawl), "Caught BNodes" if next_node not in visited: # mark nodes which we have already visited visited = visited + [next_node] successor_objs = get_successors(next_node, g, locality=locality) for successor_obj in successor_objs: if len(to_crawl) <= max_to_crawl: to_crawl = to_crawl + successor_obj["uris"] # add all triples for triple in successor_obj["triples"]: ontomodule.add(triple) return ontomodule
78e60670a8c8ed53471062380d5a9cf0aab70848
32,753
import torch def softmax(x): """Softmax activation function Parameters ---------- x : torch.tensor """ return torch.exp(x) / torch.sum(torch.exp(x), dim=1).view(-1, 1)
739219efe04174fe7a2b21fb8aa98816679f8389
32,754
def comp_raw_bool_eqs(eq1str, eq2str): """ Will compare two boolean equations to see if they are the same. The equations can be written using the characters '&', '+' and '!' for 'and', 'or' and 'not' respectively. """ (eqn1, eqn1vars) = OqeFuncUtils.get_vars_bool_eqn(eq1str) (eqn2, eqn2vars) = OqeFuncUtils.get_vars_bool_eqn(eq2str) if eqn1 == '' or eqn2 == '': return -1 varlist = [] for i in eqn1vars: if i not in varlist: varlist.append(i) for i in eqn2vars: if i not in varlist: varlist.append(i) return OqeFuncUtils.comp_bool_eqs(eqn1, eqn2, varlist)
ce7af289add4294bf8a2a7835414cfb51358bc92
32,755
def get_integrated_scene(glm_files, start_scene=None): """Get an integrated scene. Given a set of GLM files, get a scene where quantities are summed or averaged or so. """ ms = satpy.MultiScene.from_files( glm_files, "glm_l2", time_threshold=10, group_keys=["start_time"]) ms.load(["flash_extent_density"]) with xarray.set_options(keep_attrs=True): sc = ms.blend(sum, scene=start_scene) return sc
71744bc23f961e630013ace0a85b1788f92924ff
32,756
def _pt_to_test_name(what, pt, view): """Helper used to convert Sublime point to a test/bench function name.""" fn_names = [] pat = TEST_PATTERN.format(WHAT=what, **globals()) regions = view.find_all(pat, 0, r'\1', fn_names) if not regions: sublime.error_message('Could not find a Rust %s function.' % what) return None # Assuming regions are in ascending order. indices = [i for (i, r) in enumerate(regions) if r.a <= pt] if not indices: sublime.error_message('No %s functions found about the current point.' % what) return None return fn_names[indices[-1]]
580cacda4b31dff3fd05f4218733f5f0c6388ddb
32,757
def load_log_weights(log_weights_root, iw_mode): """Loads the log_weights from the disk. It assumes a file structure of <log_weights_root>/<iw_mode>/*.npy of mulyiple npy files. This function loads all the weights in a single numpy array, concatenating all npy files. Finally, it caches the result in a file stored at <log_weights_root>/<iw_mode>.npy In the further calls, it reuses the cached file. Args: log_weights_root (str or pathlib.Path) iw_mode (str) Returns: np.ndarray: log importance weights """ agg_weights_file = log_weights_root / f"{iw_mode}.npy" agg_weights_dir = log_weights_root / iw_mode assert agg_weights_dir.exists() or agg_weights_file.exists() if not agg_weights_file.exists(): log_weights = np.concatenate( [np.load(weight_file) for weight_file in agg_weights_dir.glob("*.npy")]) np.save(agg_weights_file, log_weights) else: log_weights = np.load(agg_weights_file) print(f"{log_weights_root} / {iw_mode} has {len(log_weights):,} traces") return log_weights
78f633d55e1d3eedc31851a315294e6a15d381a0
32,758
import requests import logging def pipelines_is_ready(): """ Used to show the "pipelines is loading..." message """ url = f"{API_ENDPOINT}/{STATUS}" try: if requests.get(url).status_code < 400: return True except Exception as e: logging.exception(e) sleep(1) # To avoid spamming a non-existing endpoint at startup return False
219b0935092d09311a05816cf0c4345f9abee9f6
32,759
import cmath def gamma_from_RLGC(freq,R,L,G,C): """Get propagation constant gamma from RLGC transmission line parameters""" w=2*np.pi*freq return cmath.sqrt((R+1j*w*L)*(G+1j*w*C))
9e4f09dc233f87b3fa52b9c7488b7fb65791289d
32,761
from typing import Optional from typing import List from typing import Union from pathlib import Path def get_abs_paths(paths: Optional[List[Union[str, Path]]]) -> List[Union[str, Path]]: """Extract the absolute path from the given sources (if any). :param paths: list of source paths, if empty this functions does nothing. """ if paths is None: return [] paths_abs = [] for path in paths: paths_abs.append(as_tcl_value(str(Path(path).absolute()))) return paths_abs
93a785fbf679664b96c5228a9cf008cba7793765
32,762
async def async_setup_gateway_entry(hass: core.HomeAssistant, entry: config_entries.ConfigEntry) -> bool: """Set up the Gateway component from a config entry.""" host = entry.data[CONF_HOST] euid = entry.data[CONF_TOKEN] # Connect to gateway gateway = IT600Gateway(host=host, euid=euid) try: await gateway.connect() await gateway.poll_status() except IT600ConnectionError as ce: _LOGGER.error("Connection error: check if you have specified gateway's HOST correctly.") return False except IT600AuthenticationError as ae: _LOGGER.error("Authentication error: check if you have specified gateway's EUID correctly.") return False hass.data[DOMAIN][entry.entry_id] = gateway gateway_info = gateway.get_gateway_device() device_registry = await dr.async_get_registry(hass) device_registry.async_get_or_create( config_entry_id=entry.entry_id, connections={(dr.CONNECTION_NETWORK_MAC, gateway_info.unique_id)}, identifiers={(DOMAIN, gateway_info.unique_id)}, manufacturer=gateway_info.manufacturer, name=gateway_info.name, model=gateway_info.model, sw_version=gateway_info.sw_version, ) for component in GATEWAY_PLATFORMS: hass.async_create_task( hass.config_entries.async_forward_entry_setup(entry, component) ) return True
d70644b2c4423798007272777bb9c081e79fc778
32,763
import requests def import_from_github(username, repo, commit_hash): """Import a GitHub project into the exegesis database. Returns True on success, False on failure. """ url = 'https://api.github.com/repos/{}/{}/git/trees/{}?recursive=1'.format( username, repo, commit_hash) headers = {'Accept': 'application/vnd.github.v3+json'} r = requests.get(url, headers=headers) if r.status_code == 200: response = r.json() project = Project(name=(username + ':' + repo), source=Project.GITHUB) project.save() # Create the root directory. Directory.objects.create(project=project, fullpath='', dirpath='', name='') for entry in response['tree']: fullpath = entry['path'] last_slash = fullpath.rfind('/') if last_slash != -1: dirpath = fullpath[:last_slash+1] name = fullpath[last_slash+1:] else: dirpath = '' name = fullpath if entry['type'] == 'tree': Directory.objects.create(project=project, fullpath=fullpath, dirpath=dirpath, name=name) else: Snippet.objects.create(project=project, fullpath=fullpath, dirpath=dirpath, name=name, downloaded=False, download_source=entry['url']) return True else: return False
a685fc79ad374ab499823696102d22b5d49201ed
32,764
def substructure_matching_bonds(mol: dm.Mol, query: dm.Mol, **kwargs): """Perform a substructure match using `GetSubstructMatches` but instead of returning only the atom indices also return the bond indices. Args: mol: A molecule. query: A molecule used as a query to match against. kwargs: Any other arguments to pass to `mol.GetSubstructMatches()`. Returns: atom_matches: A list of lists of atom indices. bond_matches: A list of lists of bond indices. """ # NOTE(hadim): If more substructure functions are added here, consider moving it to # a dedicated `substructure` module. # Set default arguments kwargs.setdefault("uniquify", True) # Get the matching atom indices atom_matches = list(mol.GetSubstructMatches(query, **kwargs)) # Get the bond to highligh from the query query_bond_indices = [ (bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()) for bond in query.GetBonds() ] # Retrieve the atom indices query_atom_indices = [atom.GetIdx() for i, atom in enumerate(query.GetAtoms())] bond_matches = [] for match in atom_matches: # Map the atom of the query to the atom of the mol matching the query atom_map = dict(zip(query_atom_indices, match)) # For this match atoms we now, we use the map to retrieve the matching bonds # in the mol. mol_bond_indices = [(atom_map[a1], atom_map[a2]) for a1, a2 in query_bond_indices] # Convert the bond atom indices to bond indices mol_bond_indices = [mol.GetBondBetweenAtoms(a1, a2).GetIdx() for a1, a2 in mol_bond_indices] bond_matches.append(mol_bond_indices) return atom_matches, bond_matches
1b6f4f7e17defae555ea750941be5ec71047cc87
32,767
def remove_element(nums, val): """ :type nums: List[int] :type val: int :rtype: int """ sz = len(nums) while sz > 0 and nums[sz - 1] == val: sz -= 1 i = 0 while i < sz: if nums[i] == val: nums[i], nums[sz - 1] = nums[sz - 1], nums[i] sz -= 1 while sz > 0 and nums[sz - 1] == val: sz -= 1 i += 1 return sz
4d29e8a8d43f191fe83ab0683f5dff005db799ec
32,768
def get_fm_file(file_name): """Read facilitymatcher file into dataframe. If not present, generate the file via script""" file_meta = set_facilitymatcher_meta(file_name, category='') df = load_preprocessed_output(file_meta, paths) if df is None: log.info('%s not found in %s, writing facility matches to file', file_name, output_dir) if file_name == 'FacilityMatchList_forStEWI': write_fm.write_facility_matches() elif file_name == 'FRS_NAICSforStEWI': write_naics.write_NAICS_matches() df = load_preprocessed_output(file_meta, paths) col_dict = {"FRS_ID": "str", "FacilityID": "str", "NAICS": "str"} for k, v in col_dict.items(): if k in df: df[k] = df[k].astype(v) return df
b83743b8f56376148b12fc13c3731471cd24b6a5
32,769
def call_dot(instr): """Call dot, returning stdout and stdout""" dot = Popen('dot -T png'.split(), stdout=PIPE, stderr=PIPE, stdin=PIPE) return dot.communicate(instr)
f77c9f340f3fcbebb101c5f59d57c92b56147a11
32,770
def add_bank_member_signal( banks_table: BanksTable, bank_id: str, bank_member_id: str, signal_type: t.Type[SignalType], signal_value: str, ) -> BankMemberSignal: """ Add a bank member signal. Will deduplicate a signal_value + signal_type tuple before writing to the database. Calling this API also makes the signal (new or existing) available to process into matching indices. """ return banks_table.add_bank_member_signal( bank_id=bank_id, bank_member_id=bank_member_id, signal_type=signal_type, signal_value=signal_value, )
214f064f152648c78d7ee5c6b56fb81c62cb4164
32,771
from typing import List def map_zones(full_system) -> List[Zone]: """Map *zones*.""" zones = [] if full_system: for raw_zone in full_system.get("body", dict()).get("zones", list()): zone = map_zone(raw_zone) if zone: zones.append(zone) return zones
e5996460bc66a2882ac1cabee79fdff6e4da71cd
32,774
def betternn(x, keep_prob): """ Builds a network that learns to recognize digits :param x: input tensor of shape (N_examples, 784) as standard MNIST image is 28x28=7845 :param keep_prob: probability for dropout layer :return: y - a tensor of shape (N_examples, 10) with values equal to probabilities of example being given digit """ # input image is stored as 784 pixels, reshape it to (28,28,1) as it's greyscale # -1 is special value that indicates that this dimension should be inferred to keep # constant size net = Network(tf.reshape(x, [-1, 28, 28, 1])) net.add_layer( # take 5x5 features and create 32 feature maps ConvLayer([5, 5, 1, 32], [32], tf.nn.relu) ).add_layer( # reduce size by factor of 2 PoolLayer() ).add_layer( # this time create 64 feature maps ConvLayer([5, 5, 32, 64], [64], tf.nn.relu) ).add_layer( # reduce size again PoolLayer() ).reshape_output( # reduced size twice (so image is [28,28] -> [7,7]) and created 64 feature maps # so flatten previous output [-1, 7 * 7 * 64] ).add_layer( # create 1024 features FullyConnectedLayer([7 * 7 * 64, 1024], [1024], tf.nn.relu) ).add_layer( # reduce complexity DropoutLayer(keep_prob) ).add_layer( # Map 1024 features to 10 classes representing digits FullyConnectedLayer([1024, 10], [10], tf.nn.softmax) ) return net.output
83b1155daa564b257fc1379811ddbde72d18ec4f
32,775
def get_valid_scsi_ids(devices, reserved_ids): """ Takes a list of dicts devices, and list of ints reserved_ids. Returns: - list of ints valid_ids, which are the SCSI ids that are not reserved - int recommended_id, which is the id that the Web UI should default to recommend """ occupied_ids = [] for d in devices: occupied_ids.append(d["id"]) unoccupied_ids = [i for i in list(range(8)) if i not in reserved_ids + occupied_ids] unoccupied_ids.sort() valid_ids = [i for i in list(range(8)) if i not in reserved_ids] valid_ids.sort(reverse=True) if len(unoccupied_ids) > 0: recommended_id = unoccupied_ids[-1] else: recommended_id = occupied_ids.pop(0) return valid_ids, recommended_id
a5b4341fbee75e7d555c917587678dc5ea918b9f
32,776
def check_password_and_delete(target: dict, password) -> dict: """ :param target: :param password: :return: """ if "password" in target: if md5(str(password).encode()).hexdigest() == target["password"]: target = dell(target["password"]) Bash().delete({ "bash_id": target["bash_id"] }) # the bash have been found with the correct password result = { "code": "200", "result": "The bash have been deleted successfully", } else: # incorrect password result = { "code": "400", "reason": "The password for this bash is incorrect, please try again !", } else: # successfully retrieve a public bash result = { "code": "403", "reason": "This is a public bash, you can't delete it, even if you're the author", } return result
c06f5064d8a85065c3312d09bda9b9602b772ae1
32,777
def lingodoc_trigger_to_BIO(doc): """ :type doc: nlplingo.text.text_theory.Document """ ret = [] for sentence in doc.sentences: token_labels = [] for token_index, token in enumerate(sentence.tokens): token_labels.append(EventTriggerFeatureGenerator.get_event_type_of_token(token, sentence)) bio_labels = transform_sentence_labels_to_BIO(token_labels) token_bio = [] for k, v in zip(sentence.tokens, bio_labels): token_bio.append('{} {}'.format(k.text, v)) ret.append('\n'.join(token_bio)) return ret
7237650ef6e9649b3d0e14867d907c9f6aa5b71a
32,778
def build_coiled_coil_model(): """Generates and returns a coiled-coil model.""" model_and_info = build_and_record_model( request, model_building.HelixType.ALPHA) return jsonify(model_and_info)
9eeca3d1de581559129d3a4fe529c4e13727bd71
32,779
def get_input_fn(config, is_training, num_cpu_threads=4): """Creates an `input_fn` closure to be passed to TPUEstimator.""" input_files = [] for input_pattern in config.pretrain_tfrecords.split(","): input_files.extend(tf.io.gfile.glob(input_pattern)) def input_fn(params): """The actual input function.""" batch_size = params["batch_size"] name_to_features = { "input_ori_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64), # "input_mask": tf.io.FixedLenFeature([config.max_seq_length], tf.int64), "segment_ids": tf.io.FixedLenFeature([config.max_seq_length], tf.int64), } d = tf.data.Dataset.from_tensor_slices(tf.constant(input_files)) d = d.repeat() d = d.shuffle(buffer_size=len(input_files)) # `cycle_length` is the number of parallel files that get read. cycle_length = min(num_cpu_threads, len(input_files)) # `sloppy` mode means that the interleaving is not exact. This adds # even more randomness to the training pipeline. d = d.apply( tf.data.experimental.parallel_interleave( tf.data.TFRecordDataset, sloppy=is_training, cycle_length=cycle_length)) d = d.shuffle(buffer_size=100) # We must `drop_remainder` on training because the TPU requires fixed # size dimensions. For eval, we assume we are evaluating on the CPU or GPU # and we *don"t* want to drop the remainder, otherwise we wont cover # every sample. d = d.apply( tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, num_parallel_batches=num_cpu_threads, drop_remainder=True)) d = d.apply(tf.data.experimental.ignore_errors()) return d return input_fn
23992d8d4fd1bd09f12aa4c28c695a47edca420e
32,780
def control_modes_available(): """API to call the GetCtrlModesCountSrv service to get the list of available modes in ctrl_pkg (autonomous/manual/calibration). Returns: dict: Execution status if the API call was successful, list of available modes and error reason if call fails. """ webserver_node = webserver_publisher_node.get_webserver_node() webserver_node.get_logger().info("Providing the number of available modes") try: get_ctrl_modes_req = GetCtrlModesSrv.Request() get_ctrl_modes_res = call_service_sync(webserver_node.get_ctrl_modes_cli, get_ctrl_modes_req) control_modes_available = list() for mode in get_ctrl_modes_res.modes: control_modes_available.append(constants.MODE_DICT[mode]) data = { "control_modes_available": control_modes_available, "success": True } return jsonify(data) except Exception as ex: webserver_node.get_logger().error(f"Unable to reach get ctrl modes service: {ex}") return jsonify(success=False, reason="Error")
eec46b860791d305cce14659a633b461c73143f0
32,781
def reproject_bbox(source_epsg=4326, dest_epsg=None, bbox=None): """ Basic function to reproject given coordinate bounding box (in WGS84). """ # checks # reproject bounding box l, b, r, t = bbox return transform_bounds(src_crs=source_epsg, dst_crs=dest_epsg, left=l, bottom=b, right=r, top=t)
449a1cb793cb2239ed9d46449d03f77500fab031
32,782
from bs4 import BeautifulSoup import re def parse_cluster_card_info(soup: BeautifulSoup): """ App lists from GET requests follow a redirect to the /cluster page, which contains different HTML and selectors. :param soup: A BeautifulSoup object of an app's card :return: A dictionary of available basic app info """ icon = soup.select_one("img") details_soup = soup.select_one("div.RZEgze") relative_url = details_soup.select_one("div.p63iDd > a") url = relative_url.attrs.get("href") if relative_url else None app_id = None if url: app_id = extract_id_query(url) title = details_soup.select_one("div.WsMG1c.nnK0zc") developer_soup = details_soup.select_one("a.mnKHRc") developer = None developer_id = None if developer_soup: developer = developer_soup.select_one("div.KoLSrc") developer_url = developer_soup.attrs.get("href") if developer else None developer_id = extract_id_query(developer_url) description = details_soup.select_one("div.b8cIId.f5NCO") score_soup = details_soup.select_one("div.pf5lIe div") score = None if score_soup: matches = re.search(r"([0-9]\.[0-9]) star", score_soup.text) score = matches.groups()[0] if matches else None price = None price_button = details_soup.select_one("button span.VfPpfd") if price_button: price = price_button.text full_price = None full_price_button = details_soup.select_one("button span.SUZt4c") if full_price_button: full_price = full_price_button.text free = price is None if free is True: price = "0" full_price = "0" return { "app_id": app_id, "url": url, "icon": icon.attrs.get("data-src") if icon else None, "title": title.text if title else None, "developer": developer.text if developer else None, "developer_id": developer_id, "description": description.text if description else None, "score": score, "full_price": full_price, "price": price, "free": free, }
0d4d0ba75a4e29b4d33e1f1a4e40239adcd80626
32,784
def get_joint_occurrence_df(df, row_column, col_column, top_k=10): """ Form a DataFrame where: - index is composed of top_k top values in row_column. - columns are composed of top_k top values in col_column. - cell values are the number of times that the index and column values occur together in the given DataFrame. Note: Index of the DataFrame must be unique. """ df_s = df[[row_column, col_column]].copy() # Get top row and column values. top_rows = df[row_column].value_counts().iloc[:top_k] top_cols = df[col_column].value_counts().iloc[:top_k] # Drop rows that don't have a genre and style in the top list. filter_lambda = lambda x: \ x[row_column] in top_rows and x[col_column] in top_cols df_s = df_s[df_s.apply(filter_lambda, axis=1)] fname = 'get_joint_occurrence_df' print("{}: looking at co-occurrence of {} and {}".format( fname, row_column, col_column)) print(" dropped {}/{} rows that don't have both vals in top-{}.".format( df_s.shape[0] - df_s.shape[0], df_s.shape[0], top_k)) # Construct joint occurence matrix JM = np.zeros((top_k, top_k)) for i, row in enumerate(top_rows.index): for j, col in enumerate(top_cols.index): JM[i, j] = ( (df_s[row_column] == row) & (df_s[col_column] == col) ).sum() df_m = pd.DataFrame(JM, columns=top_cols.index, index=top_rows.index) return df_m
19701a0a355733c1eb8d3aa3046fc6e00daed120
32,785
def get_total_obs_num_samples(obs_length=None, num_blocks=None, length_mode='obs_length', num_antennas=1, sample_rate=3e9, block_size=134217728, num_bits=8, num_pols=2, num_branches=1024, num_chans=64): """ Calculate number of required real voltage time samples for as given `obs_length` or `num_blocks`, without directly using a `RawVoltageBackend` object. Parameters ---------- obs_length : float, optional Length of observation in seconds, if in `obs_length` mode num_blocks : int, optional Number of data blocks to record, if in `num_blocks` mode length_mode : str, optional Mode for specifying length of observation, either `obs_length` in seconds or `num_blocks` in data blocks num_antennas : int Number of antennas sample_rate : float Sample rate in Hz block_size : int Block size used in recording GUPPI RAW files num_bits : int Number of bits in requantized data (for saving into file). Can be 8 or 4. num_pols : int Number of polarizations recorded num_branches : int Number of branches in polyphase filterbank num_chans : int Number of coarse channels written to file Returns ------- num_samples : int Number of samples """ tbin = num_branches / sample_rate chan_bw = 1 / tbin bytes_per_sample = 2 * num_pols * num_bits / 8 if length_mode == 'obs_length': if obs_length is None: raise ValueError("Value not given for 'obs_length'.") num_blocks = int(obs_length * chan_bw * num_antennas * num_chans * bytes_per_sample / block_size) elif length_mode == 'num_blocks': if num_blocks is None: raise ValueError("Value not given for 'num_blocks'.") pass else: raise ValueError("Invalid option given for 'length_mode'.") return num_blocks * int(block_size / (num_antennas * num_chans * bytes_per_sample)) * num_branches
0d5c3de03723c79d31c7f77ece29226daaf4f442
32,786
def number( x: Scalar, c: str = 'csl', w: int = 5, ) -> str: """ Return a notation of the number x in context c. Input: x (Scalar): number c (str): context w (int): width of the output string Output: s (str): notation of x """ S = 0 if x>=0 else 1 # 0 for + / 1 for - m, e = f"{x:e}".split('e') m, e = eval(m), int(e) # mantissa and exponent A, B = f"{x:f}".rstrip('0').strip('-').split('.') a, b = len(A), len(B) # number of digits before / after the comma if isinstance(x, int) or x%1==0: if a+S<=w or a<=3: if c == 'stm': return f"{S*'-'+A.zfill(w-S)}" if c == 'ttl': return f"$ {S*'-'+A} $" if c == 'csl': return format(S*'-'+A, f'>{w}') else: if a+S<=w-2 and np.abs(x)>0.1: if c == 'ttl': return f"$ {S*'-'}{A}.{B[:w-S-1]} $" if c == 'csl': return format(f"{S*'-'}{A}.{B[:w-S-1-a]}", f'>{w}') u = len(str(e)) if c == 'stm': z = 0 q = len(str(m).strip('0').strip('.'))-2-S while z+1<=q and w>=S+1+(z+1)+1+len(str(e-(z+1))): z += 1 return f"{m*10**z:1.0f}e{e-z}" if c == 'ttl': return fr"$ {format(m, f'1.{max(0, w-S-3-u)}f')} \times 10^{{ {e} }} $" if c == 'csl': return format(f"{format(m, f'1.{max(0, w-S-3-u)}f')}e{e}", f'>{w}') raise ValueError(f"unknown context: {c}")
3a36d66f9166e82bb51e39e483f9366f83f72ff8
32,788
def remove_helm_repo(repos_array): """ Execute 'helm repo remove' command on input values repos_array is an array of strings as: ['stable', 'local', 'nalkinscloud', ...] :param repos_array: array of strings :return: return code and value from execution command as dict """ status = 0 value = 'no errors found' for repo in repos_array: completed_process_object = run(["helm", "repo", "remove", repo], stdout=PIPE, stderr=PIPE) # In case of a non 0 return code, update return from last iteration if completed_process_object.returncode != 0: status = completed_process_object.returncode value = completed_process_object.stderr.decode('utf-8') + " *** Additional errors may occurred" return {'status': status, 'value': value}
4b2a778122caaabf1b7cca971d2d9f2b57dbf84e
32,789
def evaluate_fio(baselines: dict, results: dict, test_name: str, failures: int, tolerance: int) -> int: """ Evaluate the fio test results against the baseline. Determine if the fio test results meet the expected threshold and display the outcome with appropriate units. Parameters ---------- baselines : dict A ``dictionary`` of the baseline to compare results against. results : dict A ``dictionary`` of the parsed results. test_name : str A ``string`` of the name of the test being parsed. failures : int An ``integer`` of the number of results that have not met the threshold. tolerance : int An ``int`` of the percentage below the threshold to still mark as passing. Returns ------- int Returns an ``integer`` of the number of results that have not met the threshold. """ for test, value in baselines.items(): if test_name not in results.keys(): continue if test_name == 'bandwidth': unit = '(GB/s)' expected = value / 1000000000 got = round(results[test_name][test] / 1000000000, 3) elif test_name == 'iops': unit = '(k IOPS)' expected = value / 1000 got = round(results[test_name][test] / 1000, 3) print(f' {TEST_MAPPING[test_name]} {test.title()} {unit}') text = f' Expected: {expected}, Got: {got}' result = metric_passes(expected, got, tolerance) output, failures = result_text(result, failures) text += f', Result: {output}' print(text) return failures
d5ad4ca319163409a526fe9d8b43be13de49680a
32,790
def f1_3D(x1, x2, x3): """ x1 dependant from example 2.1 in iterative methods """ return -0.2*x2 - 0.2*x3 + 0.8
09e5a337f5fa62a4c3cddd9ae0dd701867c52b22
32,791
import typing def encrypt(message: typing.Union[str, bytes], n: int, e: int) -> bytes: """ Encrypt MESSAGE with public key specified by N and E """ pub_key = rsa.PublicKey(n, e) if isinstance(message, str): message = message.encode("utf-8") elif isinstance(message, bytes): pass else: raise Exception("Please format your message to binary or string") message = rsa.encrypt(message, pub_key) return message
fb89606b0d3263c5d10479970868c5336d14679b
32,792
def _safe_div(numerator, denominator): """Divides two tensors element-wise, returning 0 if the denominator is <= 0. Args: numerator: A real `Tensor`. denominator: A real `Tensor`, with dtype matching `numerator`. Returns: 0 if `denominator` <= 0, else `numerator` / `denominator` """ t = tf.truediv(numerator, denominator) zero = tf.zeros_like(t, dtype=denominator.dtype) condition = tf.greater(denominator, zero) zero = tf.cast(zero, t.dtype) return tf.where(condition, t, zero)
04e9856b1283bf83cd63bb56c65f1d1e2667bcc6
32,793
def build_embedding_model(): """ Build model by stacking up a preprocessing layer and an encoding layer. Returns ------- tf.keras.Model The embedding model, taking a list of strings as input, and outputting embeddings for each token of the input strings """ # Links for the pre-trained TensorFlow Hub preprocessing # and encoding layers tfhub_preprocessing = 'https://tfhub.dev/tensorflow/' \ 'bert_en_uncased_preprocess/3' tfhub_encoder = 'https://tfhub.dev/tensorflow/small_bert/' \ 'bert_en_uncased_L-2_H-128_A-2/1' # Define model input type and name inputs = tf.keras.layers.Input(shape=(), dtype=tf.string, name='snippet') # Define preprocessing layer preprocessing_layer = hub.KerasLayer(tfhub_preprocessing, name='preprocessing') # Define encoding layer encoder = hub.KerasLayer(tfhub_encoder, trainable=True, name='BERT_encoder') # Stack up the three layers outputs = encoder(preprocessing_layer(inputs)) # Retrieve token embeddings i.e. the 'sequence_output' values model_outputs = outputs['sequence_output'] # Return model return tf.keras.Model(inputs, model_outputs)
ecf835f8543d9815c3c88b5b596c0c14d9524b66
32,794
def event_date_row(event_names_and_dates): """ Returns the third row of the attendance csv. This is just a list of event dates. :param list[(str, datetime)] event_names_and_dates: A list of names and dates for each event that should appear on the csv :returns: the row to be printed :rtype: [str] """ # =" " has to be added around the dates to make sure it isn't auto-formatted by Excel event_dates = ['="' + str(dates) + '"' for _, dates in event_names_and_dates] return ['', '', '', '', '', '', ''] + event_dates
5b51eaef8cde99040a1aff9a0c6abaaef5e52896
32,795
def moving_tajima_d(ac, size, start=0, stop=None, step=None, min_sites=3): """Calculate the value of Tajima's D in moving windows of `size` variants. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. size : int The window size (number of variants). start : int, optional The index at which to start. stop : int, optional The index at which to stop. step : int, optional The number of variants between start positions of windows. If not given, defaults to the window size, i.e., non-overlapping windows. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- d : ndarray, float, shape (n_windows,) Tajima's D. Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> D = allel.moving_tajima_d(ac, size=4, step=2) >>> D array([0.1676558 , 2.01186954, 5.70029703]) """ d = moving_statistic(values=ac, statistic=tajima_d, size=size, start=start, stop=stop, step=step, min_sites=min_sites) return d
df5217180cf5b25ccb09ee88974d82290bff43ce
32,796
def sample_member(user, name='Attila'): """ Create and return a sample tag :param user: :param name: :return: """ return Member.objects.create(user=user, name=name)
ac171a5da2495436596bd6e597b0b9ea498c8bcf
32,797
import uuid def _create_feed(client, customer_id): """Creates a page feed with URLs Args: client: an initialized GoogleAdsClient instance. customer_id: a client customer ID str. Returns: A FeedDetails instance with information about the newly created feed. """ # Retrieve a new feed operation object. feed_operation = client.get_type("FeedOperation") # Create a new feed. feed = feed_operation.create feed.name = f"DSA Feed #{uuid.uuid4()}" feed.origin = client.enums.FeedOriginEnum.USER feed_attribute_type_enum = client.enums.FeedAttributeTypeEnum # Create the feed's attributes. feed_attribute_url = client.get_type("FeedAttribute") feed_attribute_url.type_ = feed_attribute_type_enum.URL_LIST feed_attribute_url.name = "Page URL" feed_attribute_label = client.get_type("FeedAttribute") feed_attribute_label.type_ = feed_attribute_type_enum.STRING_LIST feed_attribute_label.name = "Label" feed.attributes.extend([feed_attribute_url, feed_attribute_label]) # Retrieve the feed service. feed_service = client.get_service("FeedService") # Send the feed operation and add the feed. response = feed_service.mutate_feeds( customer_id=customer_id, operations=[feed_operation] ) return response.results[0].resource_name
738e940abd1a7ec90382c4011b26d757c8a916a4
32,798
def cal_pj_task_ind(_st_date, _ed_date): """ 计算产品研发中心资源投入到非产品事务的指标。 :param _st_date: 起始日期 :param _ed_date: 截止日期 :return: 统计指标 """ global extTask _pj_info = handler.get_project_info("project_t") # logging.log(logging.WARN, ">>> cal_pj_task_ind( %s, %s )" % (_st_date, _ed_date)) _pj_sum = 0 _npj_sum = 0 _project = {} for _issue in extTask: if _issue["updated"] is None: continue _issue_updated_date = _issue["updated"].split('T')[0] """判断任务是否在指定的时间段内""" if handler.is_date_bef(_issue_updated_date, _st_date) or handler.is_date_aft(_issue_updated_date, _ed_date): continue _group = _issue['issue'].split('-')[0] _month = int(_issue_updated_date.split('-')[1]) if _issue['project_alias'] is not None: _pj_name = scan_project_name(_pj_info, _issue['project_alias']) if _pj_name is None: _pj_name = _issue['project_alias'] if _pj_name not in _project: _project[_pj_name] = {_group: _issue['spent_time'], 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, "member": [] } _project[_pj_name][_month] = _issue['spent_time'] _project[_pj_name][13] = _issue['spent_time'] else: if _group not in _project[_pj_name]: _project[_pj_name][_group] = _issue['spent_time'] else: _project[_pj_name][_group] += _issue['spent_time'] _project[_pj_name][_month] += _issue['spent_time'] _project[_pj_name][13] += _issue['spent_time'] _pj_sum += _issue['spent_time'] _project[_pj_name]["member"].append( { "member": _issue["users"], "date": _issue["updated"], "summary": _issue["summary"], "spent_time": _issue["spent_time"] } ) else: """试图从summary中寻找项目信息""" _pj_name = scan_project_name(_pj_info, _issue['summary']) if _pj_name is not None: """有项目信息""" if _pj_name not in _project: _project[_pj_name] = {_group: _issue['spent_time'], 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, "member": [] } _project[_pj_name][_month] = _issue['spent_time'] _project[_pj_name][13] = _issue['spent_time'] else: if _group not in _project[_pj_name]: _project[_pj_name][_group] = _issue['spent_time'] else: _project[_pj_name][_group] += _issue['spent_time'] _project[_pj_name][_month] += _issue['spent_time'] _project[_pj_name][13] += _issue['spent_time'] _pj_sum += _issue['spent_time'] _project[_pj_name]["member"].append( { "member": _issue["users"], "date": _issue["updated"], "summary": _issue["summary"], "spent_time": _issue["spent_time"] } ) else: """无项目信息""" print(u">>> %s" % _issue['summary']) if u'其它' not in _project: _project[u'其它'] = {_group: _issue['spent_time'], 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0, 10: 0, 11: 0, 12: 0, 13: 0, "member": [] } _project[u'其它'][_month] = _issue['spent_time'] _project[u'其它'][13] = _issue['spent_time'] else: if _group not in _project[u'其它']: _project[u'其它'][_group] = _issue['spent_time'] else: _project[u'其它'][_group] += _issue['spent_time'] _project[u'其它'][_month] += _issue['spent_time'] _project[u'其它'][13] += _issue['spent_time'] _npj_sum += _issue['spent_time'] _project[u'其它']["member"].append( { "member": _issue["users"], "date": _issue["updated"], "summary": _issue["summary"], "spent_time": _issue["spent_time"] } ) # logging.log(logging.WARN, ">>> return %s, %d, %d" % (_project, _pj_sum, _npj_sum)) return _project, _pj_sum, _npj_sum
7a998a01a87abbc1f80147cf067b231429124001
32,799
def get_page_index(obj, amongst_live_pages=True): """ Get oage's index (a number) within its siblings. :param obj: Wagtail page object :param amongst_live_pages: Get index amongst live pages if True or all pages if False. :return: Index of a page if found or None if page doesn't have an index. """ qs = obj.__class__.objects.filter(depth=obj.depth).values_list('pk', flat=True) if amongst_live_pages: qs = qs.live() if obj.depth > 1: # making sure the non-root nodes share a parent parentpath = obj._get_basepath(obj.path, obj.depth - 1) qs = qs.filter( path__range=obj._get_children_path_interval(parentpath)) try: index = list(qs).index(obj.pk) return index except ValueError: return None
fd1950533a398019ab0d3e208a1587f86b134a13
32,801
import requests def contact_ocsp_server(certs): """Sends an OCSP request to the responding server for a certificate chain""" chain = convert_to_oscrypto(certs) req = create_ocsp_request(chain[0], chain[1]) URI = extract_ocsp_uri(certs[0]) data = requests.post(URI, data=req, stream=True, headers={'Content-Type': 'application/ocsp-request'}) response = ocsp.OCSPResponse.load(data.raw.data) parsed = parse_ocsp(response) return parsed
15c32e72d41e6db275f5ef1d91ee7ccf05eb8cde
32,802
import warnings def reset_index_inplace(df: pd.DataFrame, *args, **kwargs) -> pd.DataFrame: """ Return the dataframe with an inplace resetting of the index. This method mutates the original DataFrame. Compared to non-inplace resetting, this avoids data copying, thus providing a potential speedup. In Pandas, `reset_index()`, when used in place, does not return a `DataFrame`, preventing this option's usage in the function-chaining scheme. `reset_index_inplace()` provides one the ability to save computation time and memory while still being able to use the chaining syntax core to pyjanitor. This function, therefore, is the chaining equivalent of: .. code-block:: python data = {"class": ["bird", "bird", "bird", "mammal", "mammal"], "max_speed": [389, 389, 24, 80, 21], "index": ["falcon", "falcon", "parrot", "Lion", "Monkey"]} df = ( pd.DataFrame(data).set_index("index") .drop_duplicates() ) df.reset_index(inplace=True) instead, being called simply as: .. code-block:: python df = ( pd.DataFrame(data).set_index("index") .drop_duplicates() .reset_index_inplace() ) All supplied parameters are sent directly to `DataFrame.reset_index()`. :param df: A pandas DataFrame. :param args: Arguments supplied to `DataFrame.reset_index()` :param kwargs: Arguments supplied to `DataFrame.reset_index()` :returns: A pandas DataFrame with reset indexes. """ # Deprecation Warning warnings.warn( "reset_index_inplace will be deprecated in the " "upcoming 0.18 release. Use .reset_index() instead", DeprecationWarning, ) kwargs.update(inplace=True) df.reset_index(*args, **kwargs) return df
6c10d67ffdaf195c0a9eea3ae473bb0e93e6e9ef
32,803
def get_current_user(): """Get the current logged in user, or None.""" if environment.is_local_development(): return User('user@localhost') current_request = request_cache.get_current_request() if local_config.AuthConfig().get('enable_loas'): loas_user = current_request.headers.get('X-AppEngine-LOAS-Peer-Username') if loas_user: return User(loas_user + '@google.com') iap_email = get_iap_email(current_request) if iap_email: return User(iap_email) cache_backing = request_cache.get_cache_backing() oauth_email = getattr(cache_backing, '_oauth_email', None) if oauth_email: return User(oauth_email) cached_email = getattr(cache_backing, '_cached_email', None) if cached_email: return User(cached_email) session_cookie = get_session_cookie() if not session_cookie: return None try: decoded_claims = decode_claims(get_session_cookie()) except AuthError: logs.log_warn('Invalid session cookie.') return None if not decoded_claims.get('email_verified'): return None email = decoded_claims.get('email') if not email: return None # We cache the email for this request if we've validated the user to make # subsequent get_current_user() calls fast. setattr(cache_backing, '_cached_email', email) return User(email)
3b80285c87358dc33595ca97ecee3cf38cf96034
32,805
import calendar import time def genericGetCreationDate(page): """ Go to each date position and attempt to get date """ randSleep() allDates = [] signaturePositions = findSignatures(page) for p in signaturePositions: timestamp = getTimestampFromSERP(p, page) # print('timestamp/locationOfSignature:', timestamp) try: epoch = calendar.timegm( time.strptime(timestamp, '%b %d, %Y')) date = time.strftime('%Y-%m-%dT%H:%M:%S', time.gmtime(epoch)) allDates.append(date) except Exception: pass return getLowest(allDates)
3d3f6e9a0b1ce9b49f887ba4d1d99493b75d2f9e
32,806
def depth(data): """ For each event, it finds the deepest layer in which the shower has deposited some E. """ maxdepth = 2 * (data[2].sum(axis=(1, 2)) != 0) maxdepth[maxdepth == 0] = 1 * ( data[1][maxdepth == 0].sum(axis=(1, 2)) != 0 ) return maxdepth
aa48c88c516382aebe2a8b761b21f650afea82b1
32,807
def transform_user_extensions(user_extension_json): """ Transforms the raw extensions JSON from the API into a list of extensions mapped to users :param user_extension_json: The JSON text blob returned from the CRXcavator API :return: Tuple containing unique users list, unique extension list, and extension mapping for ingestion """ user_extensions = user_extension_json.items() users_set = set() extensions = [] extensions_by_user = [] for extension in user_extensions: for details in extension[1].items(): extension_id = extension[0] version = details[0] extensions.append({ 'extension_id': extension_id, 'version': version, 'name': details[1]['name'], }) for user in details[1]['users']: if user is None: logger.info(f'bad user for {extension_id}{version}') continue users_set.add(user) extensions_by_user.append({ 'id': f"{extension_id}|{version}", 'user': user, }) if len(users_set) == 0: raise ValueError('No users returned from CRXcavator') if len(extensions) == 0: raise ValueError('No extensions information returned from CRXcavator') if len(extensions_by_user) == 0: raise ValueError('No user->extension mapping returned from CRXcavator') return list(users_set), extensions, extensions_by_user
89d91781028eb4335fff2c9bca446f50b5e91a3f
32,809
def shower_array_rot(shower_array, alt, az): """ Given a series of point on the Z axis, perform a rotation of alt around Y and az around Z Parameters ---------- shower_array: numpy array of shape (N,3) giving N points coordinates alt: altitude shower direction - float az: azimuth shower direction - float Returns ------- Numpy array of shape (N,3) giving N points coordinates """ rotated_shower_array = geo.rotation_matrix_z(az) * geo.rotation_matrix_y(pi / 2. - alt) * shower_array.T return np.array(rotated_shower_array.T)
d98a6a4589b8945e8dff1272608307915f499fd6
32,810
from typing import Optional def get_dscp_configuration(dscp_configuration_name: Optional[str] = None, resource_group_name: Optional[str] = None, opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDscpConfigurationResult: """ Use this data source to access information about an existing resource. :param str dscp_configuration_name: The name of the resource. :param str resource_group_name: The name of the resource group. """ __args__ = dict() __args__['dscpConfigurationName'] = dscp_configuration_name __args__['resourceGroupName'] = resource_group_name if opts is None: opts = pulumi.InvokeOptions() if opts.version is None: opts.version = _utilities.get_version() __ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20200601:getDscpConfiguration', __args__, opts=opts, typ=GetDscpConfigurationResult).value return AwaitableGetDscpConfigurationResult( associated_network_interfaces=__ret__.associated_network_interfaces, destination_ip_ranges=__ret__.destination_ip_ranges, destination_port_ranges=__ret__.destination_port_ranges, etag=__ret__.etag, location=__ret__.location, markings=__ret__.markings, name=__ret__.name, protocol=__ret__.protocol, provisioning_state=__ret__.provisioning_state, qos_collection_id=__ret__.qos_collection_id, resource_guid=__ret__.resource_guid, source_ip_ranges=__ret__.source_ip_ranges, source_port_ranges=__ret__.source_port_ranges, tags=__ret__.tags, type=__ret__.type)
80e0a388581cd7028c7a8351737808e578778611
32,811
def aggregate_returns(df_daily_rets, convert_to): """ Aggregates returns by week, month, or year. Parameters ---------- df_daily_rets : pd.Series Daily returns of the strategy, noncumulative. - See full explanation in tears.create_full_tear_sheet (returns). convert_to : str Can be 'weekly', 'monthly', or 'yearly'. Returns ------- pd.Series Aggregated returns. """ def cumulate_returns(x): return cum_returns(x)[-1] if convert_to == WEEKLY: return df_daily_rets.groupby( [lambda x: x.year, lambda x: x.isocalendar()[1]]).apply(cumulate_returns) elif convert_to == MONTHLY: return df_daily_rets.groupby( [lambda x: x.year, lambda x: x.month]).apply(cumulate_returns) elif convert_to == YEARLY: return df_daily_rets.groupby( [lambda x: x.year]).apply(cumulate_returns) else: ValueError( 'convert_to must be {}, {} or {}'.format(WEEKLY, MONTHLY, YEARLY) )
7ccf2763420df055a59f15b679ba2c8265744a41
32,812
import random def impute(x,nu): """ Impute to missing values: for each row of x this function find the nearest row in eucledian distance in a sample of nu rows of x and replace the missing value of the former row with the corrisponding values of the latter row """ remember=x[:,22] N,D = x.shape idx = get_jet_masks(x) x, x = missing_values(x, x) x,_,_ = standardize (x) cols = set(range(D)) # class 1 col1 = set([4,5,6,12,26,27,28]) col1n = cols-col1 idx23 = np.array(idx[2])+np.array(idx[3]) x1 = x[idx[1],:] x23 = x[idx23,:] for j in col1: for i in range(x[idx[1]].shape[0]): key = random.sample(range(x23.shape[0]), nu) k = np.argmin(abs((x23[key,:][:,list(col1n)]-x[i,list(col1n)])).sum(axis=1)) x1[i,j]= x23[key,:][k,j] x[idx[1],:] = x1 # class 0 col0= set([23,24,25,29]).union(col1) col0n = cols-col0 idx123 = np.array(idx[1])+np.array(idx[2])+np.array(idx[3]) x0=x[idx[0],:] x123=x[idx123,:] for j in col0: for i in range(x[idx[1]].shape[0]): key = random.sample(range(x123.shape[0]), nu) k = np.argmin(abs((x123[key,:][:,list(col0n)]-x[i,list(col0n)])).sum(axis=1)) x0[i,j]= x123[key,:][k,j] x[idx[0],:] = x0 x[:,22]=remember return x
e13562e28eaafcfaa7848eae9cca7ac9d435d1f4
32,814
def get_data_splits(comment_type_str=None, ignore_ast=False): """Retrieves train/validation/test sets for the given comment_type_str. comment_type_str -- Return, Param, Summary, or None (if None, uses all comment types) ignore_ast -- Skip loading ASTs (they take a long time)""" dataset, high_level_details = load_processed_data(comment_type_str, ignore_ast) train_examples = dataset['train'] valid_examples = dataset['valid'] test_examples = dataset['test'] return train_examples, valid_examples, test_examples, high_level_details
b9451c5539c7ce235b7bb7f3251e3caa8e4b77c7
32,815
from typing import Tuple def get_slide_roi_masks(slide_path, halo_roi_path, annotation_name, slide_id:str=None, output_dir:str=None) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: """get roi masks from slides Given a slide, halo annotation xml file, generate labels from xml polygons, then crop both the image and mask to ROI (rectangle) region Optionally: save the RGB image, downsampled image sample, and interger label mask as a tiff Args: slide_path (str): path to input slide halo_roi_path (str): path to halo annotation file annotation_name (str): name of annotation slide_id (Optional, str): slide id output_dir (Optional, str): destination to save RGB image, thumbnail and mask Returns: Tuple[np.ndarray, np.ndarray, np.ndarray]: the cropped image as RGB numpy array, a downsampled array (as sample for stains), and mask array as single channel """ slide = openslide.OpenSlide(slide_path) wsi_shape = slide.dimensions[1], slide.dimensions[0] # Annotation file has flipped dimensions w.r.t openslide conventions annotation_mask = convert_xml_to_mask(halo_roi_path, wsi_shape, annotation_name) x_roi, y_roi = convert_halo_xml_to_roi(halo_roi_path) print (x_roi, y_roi) # print ((min(x_roi), min(y_roi)), 0, (abs(x_roi[1] - x_roi[0]), abs(y_roi[1] - y_roi[1]))) slide_image_cropped = slide.read_region((min(x_roi), min(y_roi)), 0, (abs(x_roi[1] - x_roi[0]), abs(y_roi[1] - y_roi[0]))).convert('RGB') print (slide_image_cropped) slide_array = np.array(slide_image_cropped, dtype=np.uint8) sample_array = np.array(slide_image_cropped.resize( (slide_image_cropped.size[0] // 80, slide_image_cropped.size[1] // 80) ), dtype=np.uint8) mask_array = annotation_mask[ min(y_roi):max(y_roi), min(x_roi):max(x_roi)].astype(np.uint8) if slide_id is not None and output_dir is not None: with tifffile.TiffWriter(f'{output_dir}/{slide_id}/{slide_id}_slideImage_roi_inRGB.tiff', bigtiff=True) as tiff: tiff.save(slide_array) with tifffile.TiffWriter(f'{output_dir}/{slide_id}/{slide_id}_slideSample_roi_inRGB.tiff', bigtiff=True) as tiff: tiff.save(sample_array) with tifffile.TiffWriter(f'{output_dir}/{slide_id}/{slide_id}_annotMask_roi_uint8.tiff', bigtiff=True) as tiff: tiff.save(mask_array) return slide_array, sample_array, mask_array
b79e9b8d93416c404110b56582b4ae1e9030bd7c
32,816
def generate_stop_enex_nb(entries: tp.Array2d, ts: tp.Array, stop: tp.MaybeArray[float], trailing: tp.MaybeArray[bool], entry_wait: int, exit_wait: int, pick_first: bool, flex_2d: bool) -> tp.Tuple[tp.Array2d, tp.Array2d]: """Generate one after another using `generate_enex_nb` and `stop_choice_nb`. Returns two arrays: new entries and exits. !!! note Has the same logic as calling `generate_stop_ex_nb` with `skip_until_exit=True`, but removes all entries that come before the next exit.""" temp_idx_arr = np.empty((entries.shape[0],), dtype=np.int_) return generate_enex_nb( entries.shape, entry_wait, exit_wait, True, pick_first, first_choice_nb, (entries,), stop_choice_nb, (ts, stop, trailing, exit_wait, pick_first, temp_idx_arr, flex_2d) )
a34108bd324498e70155f5dcc8c8c4364982c43f
32,817
def mangle_type(typ): """ Mangle Numba type """ if typ in N2C: typename = N2C[typ] else: typename = str(typ) return mangle_type_c(typename)
944952e184d1d33f9424c9e1118920f69f757e86
32,818
def lorentz(sample_len=1000, sigma=10, rho=28, beta=8 / 3, step=0.01): """This function generates a Lorentz time series of length sample_len, with standard parameters sigma, rho and beta. """ x = np.zeros([sample_len]) y = np.zeros([sample_len]) z = np.zeros([sample_len]) # Initial conditions taken from 'Chaos and Time Series Analysis', J. Sprott x[0] = 0 y[0] = -0.01 z[0] = 9 for t in range(sample_len - 1): x[t + 1] = x[t] + sigma * (y[t] - x[t]) * step y[t + 1] = y[t] + (x[t] * (rho - z[t]) - y[t]) * step z[t + 1] = z[t] + (x[t] * y[t] - beta * z[t]) * step x.shape += (1,) y.shape += (1,) z.shape += (1,) return np.concatenate((x, y, z), axis=1)
c8dc9de84dde15453fe99ed8fb55eddcdd628648
32,819
def draw_lane_lines_on_all_images(images, cols=2, rows=3, figsize=(15, 13)): """ This method calls draw_windows_and_fitted_lines Fn for each image and then show the grid of output images. """ no_of_images = len(images) fig, axes = plt.subplots(rows, cols, figsize=figsize) indexes = range(cols * rows) image_path_with_fitted_parameters = [] for ax, index in zip(axes.flat, indexes): if index < no_of_images: image_path, image = images[index] left_fit, right_fit, left_fit_m, right_fit_m = draw_windows_and_fitted_lines(image, ax) ax.set_title(image_path) ax.axis('off') image_path_with_fitted_parameters.append((image_path, left_fit, right_fit, left_fit_m, right_fit_m)) fig.show() return image_path_with_fitted_parameters
27975a95836b784fece0547375b4d79868bbd3b6
32,820
import re def get_field_order(address, latin=False): """ Returns expected order of address form fields as a list of lists. Example for PL: >>> get_field_order({'country_code': 'PL'}) [[u'name'], [u'company_name'], [u'street_address'], [u'postal_code', u'city']] """ rules = get_validation_rules(address) address_format = ( rules.address_latin_format if latin else rules.address_format) address_lines = address_format.split('%n') replacements = {'%%%s' % code: field_name for code, field_name in FIELD_MAPPING.items()} all_lines = [] for line in address_lines: fields = re.split('(%.)', line) single_line = [replacements.get(field) for field in fields] single_line = list(filter(None, single_line)) all_lines.append(single_line) return all_lines
d86c36ab1026fdad6e0b66288708786d8d2cb906
32,821
def env_start(): """ returns numpy array """ global maze, current_position current_position = 500 return current_position
ed377adedc48159607a4bb08ea6e3624575ec723
32,822
def bootstrap_acceleration(d): """ Bootstrap (BCA) acceleration term. Args: d : Jackknife differences Returns: a : Acceleration """ return np.sum(d**3) / np.sum(d**2)**(3.0/2.0) / 6.0
fbd9a05934d4c822863df0ba0b138db840f34955
32,823
def normalize_map(x): """ normalize map input :param x: map input (H, W, ch) :return np.ndarray: normalized map (H, W, ch) """ # rescale to [0, 2], later zero padding will produce equivalent obstacle return x * (2.0/255.0)
f750df26c8e6f39553ada82e247e21b2e3d6aabd
32,824
def today(): """Get the today of int date :return: int date of today """ the_day = date.today() return to_int_date(the_day)
81c497cdf33050b6e8de31b0098d10acfb555444
32,825
from datetime import datetime def contact(request): """Renders the contact page.""" assert isinstance(request, HttpRequest) return render( request, 'app/contact.html', { 'title':'联系我们', 'message':'你可以通过以下方式和我们取得联系', 'year':datetime.now().year, } )
65056534556a8503d897b38468b43da968d4223d
32,826
import math def plagdet_score(rec, prec, gran): """Combines recall, precision, and granularity to a allow for ranking.""" if (rec == 0 and prec == 0) or prec < 0 or rec < 0 or gran < 1: return 0 return ((2 * rec * prec) / (rec + prec)) / math.log(1 + gran, 2)
f8debf876d55296c3945d0d41c7701588a1869b6
32,827
def continuum(spec,bin=50,perc=60,norder=4): """ Derive the continuum of a spectrum.""" nx = len(spec) x = np.arange(nx) # Loop over bins and find the maximum nbins = nx//bin xbin1 = np.zeros(nbins,float) ybin1 = np.zeros(nbins,float) for i in range(nbins): xbin1[i] = np.mean(x[i*bin:i*bin+bin]) ybin1[i] = np.percentile(spec[i*bin:i*bin+bin],perc) # Fit polynomial to the binned values coef1 = np.polyfit(xbin1,ybin1,norder) cont1 = np.poly1d(coef1)(x) # Now remove large negative outliers and refit gdmask = np.zeros(nx,bool) gdmask[(spec/cont1)>0.8] = True xbin = np.zeros(nbins,float) ybin = np.zeros(nbins,float) for i in range(nbins): xbin[i] = np.mean(x[i*bin:i*bin+bin][gdmask[i*bin:i*bin+bin]]) ybin[i] = np.percentile(spec[i*bin:i*bin+bin][gdmask[i*bin:i*bin+bin]],perc) # Fit polynomial to the binned values coef = np.polyfit(xbin,ybin,norder) cont = np.poly1d(coef)(x) return cont,coef
42709c9361707ef8b614030906e9db5ea38087b3
32,828