signature
stringlengths
8
3.44k
body
stringlengths
0
1.41M
docstring
stringlengths
1
122k
id
stringlengths
5
17
def tctc(data, tau, epsilon, sigma, kappa=<NUM_LIT:0>, largedataset=False, rule='<STR_LIT>', noise=None, raw_signal='<STR_LIT>', output='<STR_LIT>', tempdir=None, njobs=<NUM_LIT:1>, largestonly=False):
<EOL>if largedataset:<EOL><INDENT>raise NotImplementedError(<EOL>'<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>N_data = data.shape[<NUM_LIT:1>]<EOL>if noise is not None:<EOL><INDENT>if len(noise.shape) == <NUM_LIT:1>:<EOL><INDENT>noise = np.array(noise, ndmin=<NUM_LIT:2>).transpose()<EOL><DEDENT>N_data = data.shape[<NUM_LIT:1>]<EOL>data = np.hstack([data, noise])<EOL><DEDENT>N = data.shape[<NUM_LIT:1>]<EOL>if raw_signal == '<STR_LIT>':<EOL><INDENT>d = np.array([np.abs(data[:, n]-data[:, m])<EOL>for n in range(data.shape[-<NUM_LIT:1>]) for m in range(data.shape[-<NUM_LIT:1>])])<EOL>d = np.reshape(d, [data.shape[-<NUM_LIT:1>], data.shape[-<NUM_LIT:1>], data.shape[<NUM_LIT:0>]])<EOL><DEDENT>elif raw_signal == '<STR_LIT>':<EOL><INDENT>analytic_signal = hilbert(data.transpose())<EOL>instantaneous_phase = np.angle(analytic_signal)<EOL>d = np.zeros([data.shape[<NUM_LIT:1>], data.shape[<NUM_LIT:1>], data.shape[<NUM_LIT:0>]])<EOL>for n in range(data.shape[<NUM_LIT:1>]):<EOL><INDENT>for m in range(data.shape[<NUM_LIT:1>]):<EOL><INDENT>d[n, m, :] = np.remainder(<EOL>np.abs(instantaneous_phase[n, :] - instantaneous_phase[m, :]), np.pi)<EOL><DEDENT><DEDENT><DEDENT>dat_shape = [int(d.shape[-<NUM_LIT:1>]), int(d.shape[<NUM_LIT:0>])]<EOL>tctc_mat = np.zeros([dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:0>]])<EOL>tctc_mat[:, :, :][d <= epsilon] = <NUM_LIT:1><EOL>t1 = <NUM_LIT:1><EOL>t2 = <NUM_LIT:2><EOL>while t1 != t2:<EOL><INDENT>t1 = tctc_mat.sum()<EOL>cliques = []<EOL>if tctc_mat.sum() > <NUM_LIT:0>:<EOL><INDENT>if rule == '<STR_LIT>':<EOL><INDENT>cliques = [list(filter(lambda x: (len(x) >= sigma) and (len(set(x).intersection(np.arange(N_data, N+<NUM_LIT:1>))) == <NUM_LIT:0>), nx.find_cliques(<EOL>nx.Graph(tctc_mat[:, :, t])))) for t in range(tctc_mat.shape[-<NUM_LIT:1>])]<EOL><DEDENT>elif rule == '<STR_LIT>':<EOL><INDENT>cliques = [list(map(list, filter(lambda x: (len(x) >= sigma) and (len(set(x).intersection(np.arange(N_data, N+<NUM_LIT:1>))) == <NUM_LIT:0>), nx.connected_components(<EOL>nx.Graph(tctc_mat[:, :, t]))))) for t in range(tctc_mat.shape[-<NUM_LIT:1>])]<EOL><DEDENT>tctc_mat = np.zeros([dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:0>]])<EOL>for t in range(dat_shape[<NUM_LIT:0>]):<EOL><INDENT>for c in cliques[t]:<EOL><INDENT>cv = [[i] for i in c]<EOL>tctc_mat[cv, c, t] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>if tctc_mat.sum() > <NUM_LIT:0>:<EOL><INDENT>tctc_mat = np.dstack([np.zeros([dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:1>], <NUM_LIT:1>]), tctc_mat, np.zeros(<EOL>[dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:1>], tau+kappa])])<EOL>tctc_mat_community = np.array(tctc_mat.flatten())<EOL>tctc_mat_dif = np.append(tctc_mat_community, <NUM_LIT:0>)<EOL>tctc_mat_dif = np.diff(tctc_mat_dif)<EOL>start_ones = np.where(tctc_mat_dif == <NUM_LIT:1>)[<NUM_LIT:0>]<EOL>end_ones = np.where(tctc_mat_dif == -<NUM_LIT:1>)[<NUM_LIT:0>]<EOL>skip_ind = np.where(start_ones[<NUM_LIT:1>:]-end_ones[:-<NUM_LIT:1>] <= kappa)[<NUM_LIT:0>]<EOL>start_ones = np.delete(start_ones, skip_ind+<NUM_LIT:1>)<EOL>end_ones = np.delete(end_ones, skip_ind)<EOL>traj_len = end_ones - start_ones<EOL>ind = start_ones[traj_len >= tau] + <NUM_LIT:1><EOL>l2 = traj_len[traj_len >= tau]<EOL>tctc_mat = np.zeros(tctc_mat_community.shape)<EOL>for i in range(len(ind)):<EOL><INDENT>tctc_mat[ind[i]:ind[i]+l2[i]] = <NUM_LIT:1><EOL><DEDENT>tctc_mat = tctc_mat.reshape(<EOL>dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:1>], dat_shape[<NUM_LIT:0>]+kappa+tau+<NUM_LIT:1>)<EOL>tctc_mat = tctc_mat[:, :, <NUM_LIT:1>:dat_shape[<NUM_LIT:0>]+<NUM_LIT:1>]<EOL><DEDENT>t2 = tctc_mat.sum()<EOL><DEDENT>tctc_mat = tctc_mat[:N_data, :N_data]<EOL>if output == '<STR_LIT>':<EOL><INDENT>return tctc_mat<EOL><DEDENT>elif output == '<STR_LIT>':<EOL><INDENT>if np.sum(tctc_mat) != <NUM_LIT:0>:<EOL><INDENT>df = partition_inference(<EOL>tctc_mat, cliques, tau, sigma, kappa)<EOL>return df<EOL><DEDENT>else:<EOL><INDENT>return []<EOL><DEDENT><DEDENT><DEDENT>
r""" Runs TCTC community detection Parameters ---------- data : array Multiariate series with dimensions: "time, node" that belong to a network. tau : int tau specifies the minimum number of time-points of each temporal community must last. epsilon : float epsilon specifies the distance points in a community can be away from each other. sigma : int sigma specifies the minimum number of nodes that must be in a community. kappa : int kappa specifies the number of consecutive time-points that can break the distance or size rules. largedataset : bool If true, runs with HDF5 (beta) rule : str Can be 'convoy' or 'flock'. - flock entials all nodes are max epsilon apart in a communiy. - convoy entails that there is at least one node that is epsilon apart. noise : array (defauly None) Timeseries of dimensions "time, N" where N is the number of noise time series added. Any community that contains this time series is excluded. raw_signal : str Can be amplitude or phase output : str Can be array or df or None tempdir : str Specify where the temporary directory is if largedataset is True njobs : int number of jobs (not implemented yet) largestonly : bool (default False) If True only considers largest communities in rule application (should generally be false) Returns ----------- tctc : array, df
f1957:m1
def temporal_louvain(tnet, resolution=<NUM_LIT:1>, intersliceweight=<NUM_LIT:1>, n_iter=<NUM_LIT:100>, negativeedge='<STR_LIT:ignore>', randomseed=None, consensus_threshold=<NUM_LIT:0.5>, temporal_consensus=True, njobs=<NUM_LIT:1>):
tnet = process_input(tnet, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'], '<STR_LIT>')<EOL>resolution = resolution / tnet.T<EOL>supranet = create_supraadjacency_matrix(<EOL>tnet, intersliceweight=intersliceweight)<EOL>if negativeedge == '<STR_LIT:ignore>':<EOL><INDENT>supranet = supranet[supranet['<STR_LIT>'] > <NUM_LIT:0>]<EOL><DEDENT>nxsupra = tnet_to_nx(supranet)<EOL>np.random.seed(randomseed)<EOL>while True:<EOL><INDENT>comtmp = []<EOL>with ProcessPoolExecutor(max_workers=njobs) as executor:<EOL><INDENT>job = {executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter)}<EOL>for j in as_completed(job):<EOL><INDENT>comtmp.append(j.result())<EOL><DEDENT><DEDENT>comtmp = np.stack(comtmp)<EOL>comtmp = comtmp.transpose()<EOL>comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='<STR_LIT:F>')<EOL>if n_iter == <NUM_LIT:1>: <EOL><INDENT>break <EOL><DEDENT>nxsupra_old = nxsupra<EOL>nxsupra = make_consensus_matrix(comtmp, consensus_threshold)<EOL>if nxsupra is None:<EOL><INDENT>break<EOL><DEDENT>if (nx.to_numpy_array(nxsupra, nodelist=np.arange(tnet.N*tnet.T)) == nx.to_numpy_array(nxsupra_old, nodelist=np.arange(tnet.N*tnet.T))).all():<EOL><INDENT>break<EOL><DEDENT><DEDENT>communities = comtmp[:, :, <NUM_LIT:0>]<EOL>if temporal_consensus == True:<EOL><INDENT>communities = make_temporal_consensus(communities)<EOL><DEDENT>return communities<EOL>
r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ----------
f1958:m0
def make_consensus_matrix(com_membership, th=<NUM_LIT:0.5>):
com_membership = np.array(com_membership)<EOL>D = []<EOL>for i in range(com_membership.shape[<NUM_LIT:0>]):<EOL><INDENT>for j in range(i+<NUM_LIT:1>, com_membership.shape[<NUM_LIT:0>]):<EOL><INDENT>con = np.sum((com_membership[i, :] - com_membership[j, :])<EOL>== <NUM_LIT:0>, axis=-<NUM_LIT:1>) / com_membership.shape[-<NUM_LIT:1>]<EOL>twhere = np.where(con > th)[<NUM_LIT:0>]<EOL>D += list(zip(*[np.repeat(i, len(twhere)).tolist(), np.repeat(j,<EOL>len(twhere)).tolist(), twhere.tolist(), con[twhere].tolist()]))<EOL><DEDENT><DEDENT>if len(D) > <NUM_LIT:0>:<EOL><INDENT>D = pd.DataFrame(D, columns=['<STR_LIT:i>', '<STR_LIT>', '<STR_LIT:t>', '<STR_LIT>'])<EOL>D = TemporalNetwork(from_df=D)<EOL>D = create_supraadjacency_matrix(D, intersliceweight=<NUM_LIT:0>)<EOL>Dnx = tnet_to_nx(D)<EOL><DEDENT>else:<EOL><INDENT>Dnx = None<EOL><DEDENT>return Dnx<EOL>
r""" Makes the consensus matrix . Parameters ---------- com_membership : array Shape should be node, time, iteration. th : float threshold to cancel noisey edges Returns ------- D : array consensus matrix
f1958:m2
def make_temporal_consensus(com_membership):
com_membership = np.array(com_membership)<EOL>com_membership[:, <NUM_LIT:0>] = clean_community_indexes(com_membership[:, <NUM_LIT:0>])<EOL>for t in range(<NUM_LIT:1>, com_membership.shape[<NUM_LIT:1>]):<EOL><INDENT>ct, counts_t = np.unique(com_membership[:, t], return_counts=True)<EOL>ct = ct[np.argsort(counts_t)[::-<NUM_LIT:1>]]<EOL>c1back = np.unique(com_membership[:, t-<NUM_LIT:1>])<EOL>new_index = np.zeros(com_membership.shape[<NUM_LIT:0>])<EOL>for n in ct:<EOL><INDENT>if len(c1back) > <NUM_LIT:0>:<EOL><INDENT>d = np.ones(int(c1back.max())+<NUM_LIT:1>)<EOL>for m in c1back:<EOL><INDENT>v1 = np.zeros(com_membership.shape[<NUM_LIT:0>])<EOL>v2 = np.zeros(com_membership.shape[<NUM_LIT:0>])<EOL>v1[com_membership[:, t] == n] = <NUM_LIT:1><EOL>v2[com_membership[:, t-<NUM_LIT:1>] == m] = <NUM_LIT:1><EOL>d[int(m)] = jaccard(v1, v2)<EOL><DEDENT>bestval = np.argmin(d)<EOL><DEDENT>else:<EOL><INDENT>bestval = new_index.max() + <NUM_LIT:1><EOL><DEDENT>new_index[com_membership[:, t] == n] = bestval<EOL>c1back = np.array(np.delete(c1back, np.where(c1back == bestval)))<EOL><DEDENT>com_membership[:, t] = new_index<EOL><DEDENT>return com_membership<EOL>
r""" Matches community labels accross time-points Jaccard matching is in a greedy fashiong. Matching the largest community at t with the community at t-1. Parameters ---------- com_membership : array Shape should be node, time. Returns ------- D : array temporal consensus matrix using Jaccard distance
f1958:m3
def drop_bids_suffix(fname):
if '<STR_LIT:/>' in fname:<EOL><INDENT>split = fname.split('<STR_LIT:/>')<EOL>dirnames = '<STR_LIT:/>'.join(split[:-<NUM_LIT:1>]) + '<STR_LIT:/>'<EOL>fname = split[-<NUM_LIT:1>]<EOL><DEDENT>else:<EOL><INDENT>dirnames = '<STR_LIT>'<EOL><DEDENT>tags = [tag for tag in fname.split('<STR_LIT:_>') if '<STR_LIT:->' in tag]<EOL>fname_head = '<STR_LIT:_>'.join(tags)<EOL>fileformat = '<STR_LIT:.>' + '<STR_LIT:.>'.join(fname.split('<STR_LIT:.>')[<NUM_LIT:1>:])<EOL>return dirnames + fname_head, fileformat<EOL>
Given a filename sub-01_run-01_preproc.nii.gz, it will return ['sub-01_run-01', '.nii.gz'] Parameters ---------- fname : str BIDS filename with suffice. Directories should not be included. Returns ------- fname_head : str BIDS filename with fileformat : str The file format (text after suffix) Note ------ This assumes that there are no periods in the filename
f1959:m2
def load_tabular_file(fname, return_meta=False, header=True, index_col=True):
if index_col:<EOL><INDENT>index_col = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>index_col = None<EOL><DEDENT>if header:<EOL><INDENT>header = <NUM_LIT:0><EOL><DEDENT>else:<EOL><INDENT>header = None<EOL><DEDENT>df = pd.read_csv(fname, header=header, index_col=index_col, sep='<STR_LIT:\t>')<EOL>if return_meta:<EOL><INDENT>json_fname = fname.replace('<STR_LIT>', '<STR_LIT>')<EOL>meta = pd.read_json(json_fname)<EOL>return df, meta<EOL><DEDENT>else:<EOL><INDENT>return df<EOL><DEDENT>
Given a file name loads as a pandas data frame Parameters ---------- fname : str file name and path. Must be tsv. return_meta : header : bool (default True) if there is a header in the tsv file, true will use first row in file. index_col : bool (default None) if there is an index column in the csv or tsv file, true will use first row in file. Returns ------- df : pandas The loaded file info : pandas, if return_meta=True Meta infomration in json file (if specified)
f1959:m4
def get_sidecar(fname, allowedfileformats='<STR_LIT:default>'):
if allowedfileformats == '<STR_LIT:default>':<EOL><INDENT>allowedfileformats = ['<STR_LIT>', '<STR_LIT>']<EOL><DEDENT>for f in allowedfileformats:<EOL><INDENT>fname = fname.split(f)[<NUM_LIT:0>]<EOL><DEDENT>fname += '<STR_LIT>'<EOL>if os.path.exists(fname):<EOL><INDENT>with open(fname) as fs:<EOL><INDENT>sidecar = json.load(fs)<EOL><DEDENT><DEDENT>else:<EOL><INDENT>sidecar = {}<EOL><DEDENT>if '<STR_LIT>' not in sidecar:<EOL><INDENT>sidecar['<STR_LIT>'] = {}<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = False<EOL>sidecar['<STR_LIT>']['<STR_LIT>'] = []<EOL><DEDENT>return sidecar<EOL>
Loads sidecar or creates one
f1959:m5
def process_exclusion_criteria(exclusion_criteria):
relfun = []<EOL>threshold = []<EOL>for ec in exclusion_criteria:<EOL><INDENT>if ec[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>relfun.append(np.greater_equal)<EOL>threshold.append(float(ec[<NUM_LIT:2>:]))<EOL><DEDENT>elif ec[<NUM_LIT:0>:<NUM_LIT:2>] == '<STR_LIT>':<EOL><INDENT>relfun.append(np.less_equal)<EOL>threshold.append(float(ec[<NUM_LIT:2>:]))<EOL><DEDENT>elif ec[<NUM_LIT:0>] == '<STR_LIT:>>':<EOL><INDENT>relfun.append(np.greater)<EOL>threshold.append(float(ec[<NUM_LIT:1>:]))<EOL><DEDENT>elif ec[<NUM_LIT:0>] == '<STR_LIT:<>':<EOL><INDENT>relfun.append(np.less)<EOL>threshold.append(float(ec[<NUM_LIT:1>:]))<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT><DEDENT>return relfun, threshold<EOL>
Parses an exclusion critera string to get the function and threshold. Parameters ---------- exclusion_criteria : list list of strings where each string is of the format [relation][threshold]. E.g. \'<0.5\' or \'>=1\' Returns ------- relfun : list list of numpy functions for the exclusion criteria threshold : list list of floats for threshold for each relfun
f1959:m7
def graphlet2contact(G, params=None):
<EOL>if params == None:<EOL><INDENT>params = {}<EOL><DEDENT>if G.shape[<NUM_LIT:0>] != G.shape[<NUM_LIT:1>]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if len(G.shape) == <NUM_LIT:2>:<EOL><INDENT>G = np.atleast_3d(G)<EOL><DEDENT>if len(G.shape) != <NUM_LIT:3>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' in params.keys():<EOL><INDENT>if params['<STR_LIT>']:<EOL><INDENT>if len(params['<STR_LIT>']) != G.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in params.keys():<EOL><INDENT>params['<STR_LIT>'] = np.atleast_1d(np.array(params['<STR_LIT>']))<EOL>if len(params['<STR_LIT>']) != <NUM_LIT:1>:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>params['<STR_LIT>'] = np.squeeze(params['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' not in params.keys() or params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>params['<STR_LIT>'] = gen_nettype(G, <NUM_LIT:1>)<EOL><DEDENT>if params['<STR_LIT>'] not in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = <NUM_LIT:1><EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>params['<STR_LIT>'] = list(params['<STR_LIT>'])<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = <NUM_LIT:1><EOL><DEDENT>nt = params['<STR_LIT>']<EOL>G = set_diagonal(G, <NUM_LIT:0>)<EOL>if nt[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>G = [np.triu(G[:, :, t], k=<NUM_LIT:1>) for t in range(<NUM_LIT:0>, G.shape[<NUM_LIT:2>])]<EOL>G = np.transpose(G, [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL><DEDENT>edg = np.where(np.abs(G) > <NUM_LIT:0>)<EOL>sortTime = np.argsort(edg[<NUM_LIT:2>])<EOL>contacts = np.array([tuple([edg[<NUM_LIT:0>][i], edg[<NUM_LIT:1>][i], edg[<NUM_LIT:2>][i]])<EOL>for i in sortTime])<EOL>if nt[<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>values = list(G[edg[<NUM_LIT:0>][sortTime], edg[<NUM_LIT:1>][sortTime], edg[<NUM_LIT:2>][sortTime]])<EOL><DEDENT>C = params<EOL>C['<STR_LIT>'] = contacts<EOL>C['<STR_LIT>'] = G.shape<EOL>C['<STR_LIT>'] = '<STR_LIT>'<EOL>C['<STR_LIT>'] = '<STR_LIT>'<EOL>if nt[<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>C['<STR_LIT>'] = values<EOL><DEDENT>return C<EOL>
Converts graphlet (snapshot) representation of temporal network and converts it to contact representation representation of network. Contact representation are more efficient for memory storing. Also includes metadata which can made it easier for plotting. A contact representation contains all non-zero edges. Parameters ---------- G : array_like Temporal network. params : dict, optional Dictionary of parameters for contact representation. *Fs* : int, default=1 sampling rate. *timeunit* : str, default='' Sampling rate in for units (e.g. seconds, minutes, years). *nettype* : str, default='auto' Define what type of network. Can be: 'auto': detects automatically; 'wd': weighted, directed; 'bd': binary, directed; 'wu': weighted, undirected; 'bu': binary, undirected. *diagonal* : int, default = 0. What should the diagonal be. (note: does could be expanded to take vector of unique diagonal values in the future, but not implemented now) *timetype* : str, default='discrete' Time units can be The params file becomes the foundation of 'C'. Any other information in params, will added to C. *nodelabels* : list Set nod labels. *t0*: int Time label at first index. Returns ------- C : dict Contact representation of temporal network. Includes 'contacts', 'values' (if nettype[0]='w'),'nettype','netshape', 'Fs', 'dimord' and 'timeunit', 'timetype'.
f1961:m0
def contact2graphlet(C):
<EOL>if '<STR_LIT>' not in C.keys():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if C['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in C.keys():<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if C['<STR_LIT>'] not in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'}:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in C.keys():<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if not isinstance(C['<STR_LIT>'], tuple):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if len(C['<STR_LIT>']) != <NUM_LIT:3>:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if C['<STR_LIT>'][<NUM_LIT:0>] == '<STR_LIT:w>' and '<STR_LIT>' not in C.keys():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if '<STR_LIT>' not in C.keys():<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if C['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>print('<STR_LIT>')<EOL><DEDENT>nt = C['<STR_LIT>']<EOL>G = np.zeros(C['<STR_LIT>'])<EOL>idx = np.array(list(map(list, C['<STR_LIT>'])))<EOL>if nt[<NUM_LIT:0>] == '<STR_LIT:b>':<EOL><INDENT>G[idx[:, <NUM_LIT:0>], idx[:, <NUM_LIT:1>], idx[:, <NUM_LIT:2>]] = <NUM_LIT:1><EOL>if nt[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>G[idx[:, <NUM_LIT:1>], idx[:, <NUM_LIT:0>], idx[:, <NUM_LIT:2>]] = <NUM_LIT:1><EOL><DEDENT><DEDENT>elif nt[<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>G[idx[:, <NUM_LIT:0>], idx[:, <NUM_LIT:1>], idx[:, <NUM_LIT:2>]] = C['<STR_LIT>']<EOL>if nt[<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>G[idx[:, <NUM_LIT:1>], idx[:, <NUM_LIT:0>], idx[:, <NUM_LIT:2>]] = C['<STR_LIT>']<EOL><DEDENT><DEDENT>if C['<STR_LIT>'] != <NUM_LIT:0>:<EOL><INDENT>G = set_diagonal(G, C['<STR_LIT>'])<EOL><DEDENT>return G<EOL>
Converts contact representation to graphlet (snaptshot) representation. Graphlet representation discards all meta information in the contact representation. Parameters ---------- C : dict A contact representation. Must include keys: 'dimord', 'netshape', 'nettype', 'contacts' and, if weighted, 'values'. Returns ------- G : array Graphlet representation of temporal network. Note ---- Returning elements of G will be float, even if binary graph.
f1961:m1
def binarize_percent(netin, level, sign='<STR_LIT>', axis='<STR_LIT:time>'):
netin, netinfo = process_input(netin, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])<EOL>netin = set_diagonal(netin, <NUM_LIT:0>)<EOL>if axis == '<STR_LIT>' and netinfo['<STR_LIT>'][-<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>triu = np.triu_indices(netinfo['<STR_LIT>'][<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL>netin = netin[triu[<NUM_LIT:0>], triu[<NUM_LIT:1>], :]<EOL>netin = netin.transpose()<EOL><DEDENT>if sign == '<STR_LIT>':<EOL><INDENT>net_sorted = np.argsort(np.abs(netin), axis=-<NUM_LIT:1>)<EOL><DEDENT>elif sign == '<STR_LIT>':<EOL><INDENT>net_sorted = np.argsort(netin, axis=-<NUM_LIT:1>)<EOL><DEDENT>elif sign == '<STR_LIT>':<EOL><INDENT>net_sorted = np.argsort(-<NUM_LIT:1>*netin, axis=-<NUM_LIT:1>)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>netout = np.zeros(netinfo['<STR_LIT>'])<EOL>if axis == '<STR_LIT:time>':<EOL><INDENT>for i in range(netinfo['<STR_LIT>'][<NUM_LIT:0>]):<EOL><INDENT>for j in range(netinfo['<STR_LIT>'][<NUM_LIT:1>]):<EOL><INDENT>netout[i, j, net_sorted[i, j, -<EOL>int(round(net_sorted.shape[-<NUM_LIT:1>])*level):]] = <NUM_LIT:1><EOL><DEDENT><DEDENT><DEDENT>elif axis == '<STR_LIT>':<EOL><INDENT>netout_tmp = np.zeros(netin.shape)<EOL>for i in range(netout_tmp.shape[<NUM_LIT:0>]):<EOL><INDENT>netout_tmp[i, net_sorted[i, -<EOL>int(round(net_sorted.shape[-<NUM_LIT:1>])*level):]] = <NUM_LIT:1><EOL><DEDENT>netout_tmp = netout_tmp.transpose()<EOL>netout[triu[<NUM_LIT:0>], triu[<NUM_LIT:1>], :] = netout_tmp<EOL>netout[triu[<NUM_LIT:1>], triu[<NUM_LIT:0>], :] = netout_tmp<EOL><DEDENT>netout = set_diagonal(netout, <NUM_LIT:0>)<EOL>if netinfo['<STR_LIT>'] == '<STR_LIT:C>':<EOL><INDENT>netinfo['<STR_LIT>'] = '<STR_LIT:b>' + netinfo['<STR_LIT>'][<NUM_LIT:1>]<EOL>netout = graphlet2contact(netout, netinfo)<EOL>netout.pop('<STR_LIT>')<EOL>netout.pop('<STR_LIT>')<EOL>netout['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>return netout<EOL>
Binarizes a network proprtionally. When axis='time' (only one available at the moment) then the top values for each edge time series are considered. Parameters ---------- netin : array or dict network (graphlet or contact representation), level : float Percent to keep (expressed as decimal, e.g. 0.1 = top 10%) sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. axis : str, default='time' Specify which dimension thresholding is applied against. Can be 'time' (takes top % for each edge time-series) or 'graphlet' (takes top % for each graphlet) Returns ------- netout : array or dict (depending on input) Binarized network
f1961:m2
def binarize_rdp(netin, level, sign='<STR_LIT>', axis='<STR_LIT:time>'):
netin, netinfo = process_input(netin, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])<EOL>trajectory = rdp(netin, level)<EOL>contacts = []<EOL>for n in range(trajectory['<STR_LIT:index>'].shape[<NUM_LIT:0>]):<EOL><INDENT>if sign == '<STR_LIT>':<EOL><INDENT>sel = trajectory['<STR_LIT>'][n][trajectory['<STR_LIT>']<EOL>[n][trajectory['<STR_LIT>'][n]] > <NUM_LIT:0>]<EOL><DEDENT>elif sign == '<STR_LIT>':<EOL><INDENT>sel = trajectory['<STR_LIT>'][n][trajectory['<STR_LIT>']<EOL>[n][trajectory['<STR_LIT>'][n]] < <NUM_LIT:0>]<EOL><DEDENT>else:<EOL><INDENT>sel = trajectory['<STR_LIT>']<EOL><DEDENT>i_ind = np.repeat(trajectory['<STR_LIT:index>'][n, <NUM_LIT:0>], len(sel))<EOL>j_ind = np.repeat(trajectory['<STR_LIT:index>'][n, <NUM_LIT:1>], len(sel))<EOL>contacts.append(np.array([i_ind, j_ind, sel]).transpose())<EOL><DEDENT>contacts = np.concatenate(contacts)<EOL>netout = dict(netinfo)<EOL>netout['<STR_LIT>'] = contacts<EOL>netout['<STR_LIT>'] = '<STR_LIT:b>' + netout['<STR_LIT>'][<NUM_LIT:1>]<EOL>netout['<STR_LIT>'] = '<STR_LIT>'<EOL>netout['<STR_LIT>'] = '<STR_LIT>'<EOL>netout['<STR_LIT>'] = <NUM_LIT:0><EOL>if netinfo['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>netout = contact2graphlet(netout)<EOL><DEDENT>else:<EOL><INDENT>netout.pop('<STR_LIT>')<EOL><DEDENT>return netout<EOL>
Binarizes a network based on RDP compression. Parameters ---------- netin : array or dict Network (graphlet or contact representation), level : float Delta parameter which is the tolorated error in RDP compression. sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. Returns ------- netout : array or dict (dependning on input) Binarized network
f1961:m3
def binarize_magnitude(netin, level, sign='<STR_LIT>'):
netin, netinfo = process_input(netin, ['<STR_LIT:C>', '<STR_LIT>', '<STR_LIT>'])<EOL>netout = np.zeros(netinfo['<STR_LIT>'])<EOL>if sign == '<STR_LIT>' or sign == '<STR_LIT>':<EOL><INDENT>netout[netin > level] = <NUM_LIT:1><EOL><DEDENT>if sign == '<STR_LIT>' or sign == '<STR_LIT>':<EOL><INDENT>netout[netin < level] = <NUM_LIT:1><EOL><DEDENT>netout = set_diagonal(netout, <NUM_LIT:0>)<EOL>if netinfo['<STR_LIT>'] == '<STR_LIT:C>':<EOL><INDENT>netinfo['<STR_LIT>'] = '<STR_LIT:b>' + netinfo['<STR_LIT>'][<NUM_LIT:1>]<EOL>netout = graphlet2contact(netout, netinfo)<EOL>netout.pop('<STR_LIT>')<EOL>netout.pop('<STR_LIT>')<EOL>netout['<STR_LIT>'] = <NUM_LIT:0><EOL><DEDENT>return netout<EOL>
Parameters ---------- netin : array or dict Network (graphlet or contact representation), level : float Magnitude level threshold at. sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. axis : str, default='time' Specify which dimension thresholding is applied against. Only 'time' option exists at present. Returns ------- netout : array or dict (depending on input) Binarized network
f1961:m4
def binarize(netin, threshold_type, threshold_level, sign='<STR_LIT>', axis='<STR_LIT:time>'):
if threshold_type == '<STR_LIT>':<EOL><INDENT>netout = binarize_percent(netin, threshold_level, sign, axis)<EOL><DEDENT>elif threshold_type == '<STR_LIT>':<EOL><INDENT>netout = binarize_magnitude(netin, threshold_level, sign)<EOL><DEDENT>elif threshold_type == '<STR_LIT>':<EOL><INDENT>netout = binarize_rdp(netin, threshold_level, sign, axis)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>return netout<EOL>
Binarizes a network, returning the network. General wrapper function for different binarization functions. Parameters ---------- netin : array or dict Network (graphlet or contact representation), threshold_type : str What type of thresholds to make binarization. Options: 'rdp', 'percent', 'magnitude'. threshold_level : str Paramter dependent on threshold type. If 'rdp', it is the delta (i.e. error allowed in compression). If 'percent', it is the percentage to keep (e.g. 0.1, means keep 10% of signal). If 'magnitude', it is the amplitude of signal to keep. sign : str, default='pos' States the sign of the thresholding. Can be 'pos', 'neg' or 'both'. If "neg", only negative values are thresholded and vice versa. axis : str Threshold over specfied axis. Valid for percent and rdp. Can be time or graphlet. Returns ------- netout : array or dict (depending on input) Binarized network
f1961:m5
def set_diagonal(G, val=<NUM_LIT:0>):
for t in range(<NUM_LIT:0>, G.shape[<NUM_LIT:2>]):<EOL><INDENT>np.fill_diagonal(G[:, :, t], val)<EOL><DEDENT>return G<EOL>
Generally diagonal is set to 0. This function helps set the diagonal across time. Parameters ---------- G : array temporal network (graphlet) val : value to set diagonal to (default 0). Returns ------- G : array Graphlet representation with new diagonal
f1961:m6
def gen_nettype(G, printWarning=<NUM_LIT:0>):
if set(np.unique(G)) == set([<NUM_LIT:0>, <NUM_LIT:1>]):<EOL><INDENT>weights = '<STR_LIT:b>'<EOL><DEDENT>else:<EOL><INDENT>weights = '<STR_LIT:w>'<EOL><DEDENT>if np.allclose(G.transpose(<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>), G):<EOL><INDENT>direction = '<STR_LIT:u>'<EOL><DEDENT>else:<EOL><INDENT>direction = '<STR_LIT:d>'<EOL><DEDENT>nettype = weights + direction<EOL>return nettype<EOL>
Attempts to identify what nettype input graphlet G is. Diagonal is ignored. Paramters --------- G : array temporal network (graphlet) Returns ------- nettype : str \'wu\', \'bu\', \'wd\', or \'bd\'
f1961:m7
def checkInput(netIn, raiseIfU=<NUM_LIT:1>, conMat=<NUM_LIT:0>):
inputIs = '<STR_LIT>'<EOL>if isinstance(netIn, np.ndarray):<EOL><INDENT>netShape = netIn.shape<EOL>if len(netShape) == <NUM_LIT:3> and netShape[<NUM_LIT:0>] == netShape[<NUM_LIT:1>]:<EOL><INDENT>inputIs = '<STR_LIT>'<EOL><DEDENT>elif netShape[<NUM_LIT:0>] == netShape[<NUM_LIT:1>] and conMat == <NUM_LIT:1>:<EOL><INDENT>inputIs = '<STR_LIT:M>'<EOL><DEDENT><DEDENT>elif isinstance(netIn, dict):<EOL><INDENT>if '<STR_LIT>' in netIn and '<STR_LIT>' in netIn and '<STR_LIT>' in netIn and '<STR_LIT>' in netIn:<EOL><INDENT>if netIn['<STR_LIT>'] in {'<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>'} and netIn['<STR_LIT>'] == '<STR_LIT>' and netIn['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>inputIs = '<STR_LIT:C>'<EOL><DEDENT><DEDENT><DEDENT>elif isinstance(netIn, object):<EOL><INDENT>if hasattr(netIn, '<STR_LIT>'):<EOL><INDENT>inputIs = '<STR_LIT>'<EOL><DEDENT><DEDENT>if raiseIfU == <NUM_LIT:1> and inputIs == '<STR_LIT>':<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>return inputIs<EOL>
This function checks that netIn input is either graphlet (G) or contact (C). Parameters ---------- netIn : array or dict temporal network, (graphlet or contact). raiseIfU : int, default=1. Options 1 or 0. Error is raised if not found to be G or C conMat : int, default=0. Options 1 or 0. If 1, input is allowed to be a 2 dimensional connectivity matrix. Allows output to be 'M' Returns ------- inputtype : str String indicating input type. 'G','C', 'M' or 'U' (unknown). M is special case only allowed when conMat=1 for a 2D connectivity matrix.
f1961:m8
def getDistanceFunction(requested_metric):
distance_options = {<EOL>'<STR_LIT>': distance.braycurtis,<EOL>'<STR_LIT>': distance.canberra,<EOL>'<STR_LIT>': distance.chebyshev,<EOL>'<STR_LIT>': distance.cityblock,<EOL>'<STR_LIT>': distance.correlation,<EOL>'<STR_LIT>': distance.cosine,<EOL>'<STR_LIT>': distance.euclidean,<EOL>'<STR_LIT>': distance.sqeuclidean,<EOL>'<STR_LIT>': distance.dice,<EOL>'<STR_LIT>': distance.hamming,<EOL>'<STR_LIT>': distance.jaccard,<EOL>'<STR_LIT>': distance.kulsinski,<EOL>'<STR_LIT>': distance.matching,<EOL>'<STR_LIT>': distance.rogerstanimoto,<EOL>'<STR_LIT>': distance.russellrao,<EOL>'<STR_LIT>': distance.sokalmichener,<EOL>'<STR_LIT>': distance.sokalsneath,<EOL>'<STR_LIT>': distance.yule,<EOL>}<EOL>if requested_metric in distance_options:<EOL><INDENT>return distance_options[requested_metric]<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>
This function returns a specified distance function. Paramters --------- requested_metric: str Distance function. Can be any function in: https://docs.scipy.org/doc/scipy/reference/spatial.distance.html. Returns ------- requested_metric : distance function
f1961:m9
def process_input(netIn, allowedformats, outputformat='<STR_LIT>'):
inputtype = checkInput(netIn)<EOL>if inputtype == '<STR_LIT>' and '<STR_LIT>' in allowedformats and outputformat != '<STR_LIT>':<EOL><INDENT>G = netIn.df_to_array()<EOL>netInfo = {'<STR_LIT>': netIn.nettype, '<STR_LIT>': netIn.netshape}<EOL><DEDENT>elif inputtype == '<STR_LIT>' and '<STR_LIT>' in allowedformats and outputformat == '<STR_LIT>':<EOL><INDENT>TN = netIn<EOL><DEDENT>elif inputtype == '<STR_LIT:C>' and '<STR_LIT:C>' in allowedformats and outputformat == '<STR_LIT>':<EOL><INDENT>G = contact2graphlet(netIn)<EOL>netInfo = dict(netIn)<EOL>netInfo.pop('<STR_LIT>')<EOL><DEDENT>elif inputtype == '<STR_LIT:C>' and '<STR_LIT:C>' in allowedformats and outputformat == '<STR_LIT>':<EOL><INDENT>TN = TemporalNetwork(from_dict=netIn)<EOL><DEDENT>elif inputtype == '<STR_LIT>' and '<STR_LIT>' in allowedformats and outputformat == '<STR_LIT>':<EOL><INDENT>TN = TemporalNetwork(from_array=netIn)<EOL><DEDENT>elif inputtype == '<STR_LIT>' and '<STR_LIT>' in allowedformats:<EOL><INDENT>netInfo = {}<EOL>netInfo['<STR_LIT>'] = netIn.shape<EOL>netInfo['<STR_LIT>'] = gen_nettype(netIn)<EOL>G = netIn<EOL><DEDENT>elif inputtype == '<STR_LIT:C>' and outputformat == '<STR_LIT:C>':<EOL><INDENT>pass<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if outputformat == '<STR_LIT>' and not isinstance(TN.network, str):<EOL><INDENT>TN.network['<STR_LIT:i>'] = TN.network['<STR_LIT:i>'].astype(int)<EOL>TN.network['<STR_LIT>'] = TN.network['<STR_LIT>'].astype(int)<EOL>TN.network['<STR_LIT:t>'] = TN.network['<STR_LIT:t>'].astype(int)<EOL><DEDENT>if outputformat == '<STR_LIT:C>' or outputformat == '<STR_LIT>':<EOL><INDENT>netInfo['<STR_LIT>'] = inputtype<EOL><DEDENT>if inputtype != '<STR_LIT:C>' and outputformat == '<STR_LIT:C>':<EOL><INDENT>C = graphlet2contact(G, netInfo)<EOL><DEDENT>if outputformat == '<STR_LIT>':<EOL><INDENT>return G, netInfo<EOL><DEDENT>elif outputformat == '<STR_LIT:C>':<EOL><INDENT>return C<EOL><DEDENT>elif outputformat == '<STR_LIT>':<EOL><INDENT>return TN<EOL><DEDENT>
Takes input network and checks what the input is. Parameters ---------- netIn : array, dict, or TemporalNetwork Network (graphlet, contact or object) allowedformats : str Which format of network objects that are allowed. Options: 'C', 'TN', 'G'. outputformat: str, default=G Target output format. Options: 'C' or 'G'. Returns ------- C : dict OR G : array Graphlet representation. netInfo : dict Metainformation about network. OR tnet : object object of TemporalNetwork class
f1961:m10
def clean_community_indexes(communityID):
communityID = np.array(communityID)<EOL>cid_shape = communityID.shape<EOL>if len(cid_shape) > <NUM_LIT:1>:<EOL><INDENT>communityID = communityID.flatten()<EOL><DEDENT>new_communityID = np.zeros(len(communityID))<EOL>for i, n in enumerate(np.unique(communityID)):<EOL><INDENT>new_communityID[communityID == n] = i<EOL><DEDENT>if len(cid_shape) > <NUM_LIT:1>:<EOL><INDENT>new_communityID = new_communityID.reshape(cid_shape)<EOL><DEDENT>return new_communityID<EOL>
Takes input of community assignments. Returns reindexed community assignment by using smallest numbers possible. Parameters ---------- communityID : array-like list or array of integers. Output from community detection algorithems. Returns ------- new_communityID : array cleaned list going from 0 to len(np.unique(communityID))-1 Note ----- Behaviour of funciton entails that the lowest community integer in communityID will recieve the lowest integer in new_communityID.
f1961:m11
def multiple_contacts_get_values(C):
d = collections.OrderedDict()<EOL>for c in C['<STR_LIT>']:<EOL><INDENT>ct = tuple(c)<EOL>if ct in d:<EOL><INDENT>d[ct] += <NUM_LIT:1><EOL><DEDENT>else:<EOL><INDENT>d[ct] = <NUM_LIT:1><EOL><DEDENT><DEDENT>new_contacts = []<EOL>new_values = []<EOL>for (key, value) in d.items():<EOL><INDENT>new_values.append(value)<EOL>new_contacts.append(key)<EOL><DEDENT>C_out = C<EOL>C_out['<STR_LIT>'] = new_contacts<EOL>C_out['<STR_LIT>'] = new_values<EOL>return C_out<EOL>
Given an contact representation with repeated contacts, this function removes duplicates and creates a value Parameters ---------- C : dict contact representation with multiple repeated contacts. Returns ------- :C_out: dict Contact representation with duplicate contacts removed and the number of duplicates is now in the 'values' field.
f1961:m12
def df_to_array(df, netshape, nettype):
if len(df) > <NUM_LIT:0>:<EOL><INDENT>idx = np.array(list(map(list, df.values)))<EOL>G = np.zeros([netshape[<NUM_LIT:0>], netshape[<NUM_LIT:0>], netshape[<NUM_LIT:1>]])<EOL>if idx.shape[<NUM_LIT:1>] == <NUM_LIT:3>:<EOL><INDENT>if nettype[-<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>idx = np.vstack([idx, idx[:, [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>]]])<EOL><DEDENT>idx = idx.astype(int)<EOL>G[idx[:, <NUM_LIT:0>], idx[:, <NUM_LIT:1>], idx[:, <NUM_LIT:2>]] = <NUM_LIT:1><EOL><DEDENT>elif idx.shape[<NUM_LIT:1>] == <NUM_LIT:4>:<EOL><INDENT>if nettype[-<NUM_LIT:1>] == '<STR_LIT:u>':<EOL><INDENT>idx = np.vstack([idx, idx[:, [<NUM_LIT:1>, <NUM_LIT:0>, <NUM_LIT:2>, <NUM_LIT:3>]]])<EOL><DEDENT>weights = idx[:, <NUM_LIT:3>]<EOL>idx = np.array(idx[:, :<NUM_LIT:3>], dtype=int)<EOL>G[idx[:, <NUM_LIT:0>], idx[:, <NUM_LIT:1>], idx[:, <NUM_LIT:2>]] = weights<EOL><DEDENT><DEDENT>else:<EOL><INDENT>G = np.zeros([netshape[<NUM_LIT:0>], netshape[<NUM_LIT:0>], netshape[<NUM_LIT:1>]])<EOL><DEDENT>return G<EOL>
Returns a numpy array (snapshot representation) from thedataframe contact list Parameters: df : pandas df pandas df with columns, i,j,t. netshape : tuple network shape, format: (node, time) nettype : str 'wu', 'wd', 'bu', 'bd' Returns: -------- G : array (node,node,time) array for the network
f1961:m13
def check_distance_funciton_input(distance_func_name, netinfo):
if distance_func_name == '<STR_LIT:default>' and netinfo['<STR_LIT>'][<NUM_LIT:0>] == '<STR_LIT:b>':<EOL><INDENT>print('<STR_LIT>')<EOL>distance_func_name = '<STR_LIT>'<EOL><DEDENT>elif distance_func_name == '<STR_LIT:default>' and netinfo['<STR_LIT>'][<NUM_LIT:0>] == '<STR_LIT:w>':<EOL><INDENT>distance_func_name = '<STR_LIT>'<EOL>print(<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL><DEDENT>return distance_func_name<EOL>
Funciton checks distance_func_name, if it is specified as 'default'. Then given the type of the network selects a default distance function. Parameters ---------- distance_func_name : str distance function name. netinfo : dict the output of utils.process_input Returns ------- distance_func_name : str distance function name.
f1961:m14
def load_parcellation_coords(parcellation_name):
path = tenetopath[<NUM_LIT:0>] + '<STR_LIT>' + parcellation_name + '<STR_LIT>'<EOL>parc = np.loadtxt(path, skiprows=<NUM_LIT:1>, delimiter='<STR_LIT:U+002C>', usecols=[<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:3>])<EOL>return parc<EOL>
Loads coordinates of included parcellations. Parameters ---------- parcellation_name : str options: 'gordon2014_333', 'power2012_264', 'shen2013_278'. Returns ------- parc : array parcellation cordinates
f1961:m15
def make_parcellation(data_path, parcellation, parc_type=None, parc_params=None):
if isinstance(parcellation, str):<EOL><INDENT>parcin = '<STR_LIT>'<EOL>if '<STR_LIT:+>' in parcellation:<EOL><INDENT>parcin = parcellation<EOL>parcellation = parcellation.split('<STR_LIT:+>')[<NUM_LIT:0>]<EOL><DEDENT>if '<STR_LIT>' in parcin:<EOL><INDENT>subcortical = True<EOL><DEDENT>else:<EOL><INDENT>subcortical = None<EOL><DEDENT>if '<STR_LIT>' in parcin:<EOL><INDENT>cerebellar = True<EOL><DEDENT>else:<EOL><INDENT>cerebellar = None<EOL><DEDENT>if not parc_type or not parc_params:<EOL><INDENT>path = tenetopath[<NUM_LIT:0>] + '<STR_LIT>'<EOL>with open(path) as data_file:<EOL><INDENT>defaults = json.load(data_file)<EOL><DEDENT><DEDENT>if not parc_type:<EOL><INDENT>parc_type = defaults[parcellation]['<STR_LIT:type>']<EOL>print('<STR_LIT>')<EOL><DEDENT>if not parc_params:<EOL><INDENT>parc_params = defaults[parcellation]['<STR_LIT>']<EOL>print('<STR_LIT>')<EOL><DEDENT><DEDENT>if parc_type == '<STR_LIT>':<EOL><INDENT>parcellation = load_parcellation_coords(parcellation)<EOL>seed = NiftiSpheresMasker(np.array(parcellation), **parc_params)<EOL>data = seed.fit_transform(data_path)<EOL><DEDENT>elif parc_type == '<STR_LIT>':<EOL><INDENT>path = tenetopath[<NUM_LIT:0>] + '<STR_LIT>' + parcellation + '<STR_LIT>'<EOL>region = NiftiLabelsMasker(path, **parc_params)<EOL>data = region.fit_transform(data_path)<EOL><DEDENT>else:<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if subcortical:<EOL><INDENT>subatlas = fetch_atlas_harvard_oxford('<STR_LIT>')['<STR_LIT>']<EOL>region = NiftiLabelsMasker(subatlas, **parc_params)<EOL>data_sub = region.fit_transform(data_path)<EOL>data = np.hstack([data, data_sub])<EOL><DEDENT>if cerebellar:<EOL><INDENT>path = tenetopath[<NUM_LIT:0>] + '<STR_LIT>'<EOL>region = NiftiLabelsMasker(path, **parc_params)<EOL>data_cerebellar = region.fit_transform(data_path)<EOL>data = np.hstack([data, data_cerebellar])<EOL><DEDENT>return data<EOL>
Performs a parcellation which reduces voxel space to regions of interest (brain data). Parameters ---------- data_path : str Path to .nii image. parcellation : str Specify which parcellation that you would like to use. For MNI: 'gordon2014_333', 'power2012_264', For TAL: 'shen2013_278'. It is possible to add the OH subcotical atlas on top of a cortical atlas (e.g. gordon) by adding: '+OH' (for oxford harvard subcortical atlas) and '+SUIT' for SUIT cerebellar atlas. e.g.: gordon2014_333+OH+SUIT' parc_type : str Can be 'sphere' or 'region'. If nothing is specified, the default for that parcellation will be used. parc_params : dict **kwargs for nilearn functions Returns ------- data : array Data after the parcellation. NOTE ---- These functions make use of nilearn. Please cite nilearn if used in a publicaiton.
f1961:m16
def create_traj_ranges(start, stop, N):
steps = (<NUM_LIT:1.0>/(N-<NUM_LIT:1>)) * (stop - start)<EOL>if np.isscalar(steps):<EOL><INDENT>return steps*np.arange(N) + start<EOL><DEDENT>else:<EOL><INDENT>return steps[:, None]*np.arange(N) + start[:, None]<EOL><DEDENT>
Fills in the trajectory range. # Adapted from https://stackoverflow.com/a/40624614
f1961:m17
def get_dimord(measure, calc=None, community=None):
if not calc:<EOL><INDENT>calc = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>calc = '<STR_LIT:_>' + calc<EOL><DEDENT>if not community:<EOL><INDENT>community = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>community = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' in calc and '<STR_LIT>' in community:<EOL><INDENT>community = '<STR_LIT>'<EOL><DEDENT>if calc == '<STR_LIT>' or calc == '<STR_LIT>':<EOL><INDENT>community = '<STR_LIT>'<EOL><DEDENT>dimord_dict = {<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT:time>',<EOL>}<EOL>if measure + calc + community in dimord_dict:<EOL><INDENT>return dimord_dict[measure + calc + community]<EOL><DEDENT>else:<EOL><INDENT>print('<STR_LIT>')<EOL>return '<STR_LIT>'<EOL><DEDENT>
Get the dimension order of a network measure. Parameters ---------- measure : str Name of funciton in teneto.networkmeasures. calc : str, default=None Calc parameter for the function community : bool, default=None If not null, then community property is assumed to be believed. Returns ------- dimord : str Dimension order. So "node,node,time" would define the dimensions of the network measure.
f1961:m18
def get_network_when(tnet, i=None, j=None, t=None, ij=None, logic='<STR_LIT>', copy=False, asarray=False):
if isinstance(tnet, pd.DataFrame):<EOL><INDENT>network = tnet<EOL>hdf5 = False<EOL><DEDENT>elif isinstance(tnet, object):<EOL><INDENT>network = tnet.network<EOL>hdf5 = tnet.hdf5<EOL><DEDENT>if ij is not None and (i is not None or j is not None):<EOL><INDENT>raise ValueError('<STR_LIT>')<EOL><DEDENT>if i is not None and not isinstance(i, list):<EOL><INDENT>i = [i]<EOL><DEDENT>if j is not None and not isinstance(j, list):<EOL><INDENT>j = [j]<EOL><DEDENT>if t is not None and not isinstance(t, list):<EOL><INDENT>t = [t]<EOL><DEDENT>if ij is not None and not isinstance(ij, list):<EOL><INDENT>ij = [ij]<EOL><DEDENT>if hdf5:<EOL><INDENT>if i is not None and j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(i) + '<STR_LIT>' + '<STR_LIT>' +str(j) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif ij is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(ij) + '<STR_LIT>' + '<STR_LIT>' +str(ij) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif ij is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(ij) + '<STR_LIT>' + '<STR_LIT>' +str(ij) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif i is not None and j is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(i) + '<STR_LIT>' + '<STR_LIT>' + str(j)<EOL><DEDENT>elif i is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(i) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(j) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif i is not None and j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(i) + '<STR_LIT>' + '<STR_LIT>' +str(j) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif i is not None and j is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(i) + '<STR_LIT>' + '<STR_LIT>' + str(j)<EOL><DEDENT>elif i is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(i) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>isinstr = '<STR_LIT>' + str(j) + '<STR_LIT>' + '<STR_LIT>' + str(t)<EOL><DEDENT>elif i is not None:<EOL><INDENT>isinstr = '<STR_LIT>' + str(i)<EOL><DEDENT>elif j is not None:<EOL><INDENT>isinstr = '<STR_LIT>' + str(j)<EOL><DEDENT>elif t is not None:<EOL><INDENT>isinstr = '<STR_LIT>' + str(t)<EOL><DEDENT>elif ij is not None:<EOL><INDENT>isinstr = '<STR_LIT>' + str(ij) + '<STR_LIT>' + '<STR_LIT>' + str(ij)<EOL><DEDENT>df = pd.read_hdf(network, where=isinstr)<EOL><DEDENT>else:<EOL><INDENT>if i is not None and j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(i)) & (<EOL>network['<STR_LIT>'].isin(j)) & (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif ij is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[((network['<STR_LIT:i>'].isin(ij)) | (<EOL>network['<STR_LIT>'].isin(ij))) & (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif ij is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[((network['<STR_LIT:i>'].isin(ij)) | (<EOL>network['<STR_LIT>'].isin(ij))) | (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif i is not None and j is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(i)) & (network['<STR_LIT>'].isin(j))]<EOL><DEDENT>elif i is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(i)) & (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT>'].isin(j)) & (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif i is not None and j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(i)) | (<EOL>network['<STR_LIT>'].isin(j)) | (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif i is not None and j is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(i)) | (network['<STR_LIT>'].isin(j))]<EOL><DEDENT>elif i is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(i)) | (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif j is not None and t is not None and logic == '<STR_LIT>':<EOL><INDENT>df = network[(network['<STR_LIT>'].isin(j)) | (network['<STR_LIT:t>'].isin(t))]<EOL><DEDENT>elif i is not None:<EOL><INDENT>df = network[network['<STR_LIT:i>'].isin(i)]<EOL><DEDENT>elif j is not None:<EOL><INDENT>df = network[network['<STR_LIT>'].isin(j)]<EOL><DEDENT>elif t is not None:<EOL><INDENT>df = network[network['<STR_LIT:t>'].isin(t)]<EOL><DEDENT>elif ij is not None:<EOL><INDENT>df = network[(network['<STR_LIT:i>'].isin(ij)) | (network['<STR_LIT>'].isin(ij))]<EOL><DEDENT>if copy:<EOL><INDENT>df = df.copy()<EOL><DEDENT><DEDENT>if asarray:<EOL><INDENT>df = df.values<EOL><DEDENT>return df<EOL>
Returns subset of dataframe that matches index Parameters ---------- tnet : df or TemporalNetwork TemporalNetwork object or pandas dataframe edgelist i : list or int get nodes in column i (source nodes in directed networks) j : list or int get nodes in column j (target nodes in directed networks) t : list or int get edges at this time-points. ij : list or int get nodes for column i or j (logic and can still persist for t). Cannot be specified along with i or j logic : str options: \'and\' or \'or\'. If \'and\', functions returns rows that corrspond that match all i,j,t arguments. If \'or\', only has to match one of them copy : bool default False. If True, returns a copy of the dataframe. Note relevant if hd5 data. asarray : bool default False. If True, returns the list of edges as an array. Returns ------- df : pandas dataframe Unless asarray are set to true.
f1961:m19
def create_supraadjacency_matrix(tnet, intersliceweight=<NUM_LIT:1>):
newnetwork = tnet.network.copy()<EOL>newnetwork['<STR_LIT:i>'] = (tnet.network['<STR_LIT:i>']) +((tnet.netshape[<NUM_LIT:0>]) * (tnet.network['<STR_LIT:t>']))<EOL>newnetwork['<STR_LIT>'] = (tnet.network['<STR_LIT>']) +((tnet.netshape[<NUM_LIT:0>]) * (tnet.network['<STR_LIT:t>']))<EOL>if '<STR_LIT>' not in newnetwork.columns:<EOL><INDENT>newnetwork['<STR_LIT>'] = <NUM_LIT:1><EOL><DEDENT>newnetwork.drop('<STR_LIT:t>', axis=<NUM_LIT:1>, inplace=True)<EOL>timepointconns = pd.DataFrame()<EOL>timepointconns['<STR_LIT:i>'] = np.arange(<NUM_LIT:0>, (tnet.N*tnet.T)-tnet.N)<EOL>timepointconns['<STR_LIT>'] = np.arange(tnet.N, (tnet.N*tnet.T))<EOL>timepointconns['<STR_LIT>'] = intersliceweight<EOL>supranet = pd.concat([newnetwork, timepointconns]).reset_index(drop=True)<EOL>return supranet<EOL>
Returns a supraadjacency matrix from a temporal network structure Parameters -------- tnet : TemporalNetwork Temporal network (any network type) intersliceweight : int Weight that links the same node from adjacent time-points Returns -------- supranet : dataframe Supraadjacency matrix
f1961:m20
def tnet_to_nx(df, t=None):
if t is not None:<EOL><INDENT>df = get_network_when(df, t=t)<EOL><DEDENT>if '<STR_LIT>' in df.columns:<EOL><INDENT>nxobj = nx.from_pandas_edgelist(<EOL>df, source='<STR_LIT:i>', target='<STR_LIT>', edge_attr='<STR_LIT>')<EOL><DEDENT>else:<EOL><INDENT>nxobj = nx.from_pandas_edgelist(df, source='<STR_LIT:i>', target='<STR_LIT>')<EOL><DEDENT>return nxobj<EOL>
Creates undirected networkx object
f1962:m0
def gen_report(report, sdir='<STR_LIT>', report_name='<STR_LIT>'):
<EOL>if not os.path.exists(sdir):<EOL><INDENT>os.makedirs(sdir)<EOL><DEDENT>if sdir[-<NUM_LIT:1>] != '<STR_LIT:/>':<EOL><INDENT>sdir += '<STR_LIT:/>'<EOL><DEDENT>report_html = '<STR_LIT>'<EOL>if '<STR_LIT>' in report.keys():<EOL><INDENT>report_html += "<STR_LIT>" + report['<STR_LIT>'] + "<STR_LIT>"<EOL>for i in report[report['<STR_LIT>']]:<EOL><INDENT>if i == '<STR_LIT>':<EOL><INDENT>fig, ax = plt.subplots(<NUM_LIT:1>)<EOL>ax.plot(report[report['<STR_LIT>']]['<STR_LIT>'],<EOL>report[report['<STR_LIT>']]['<STR_LIT>'])<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_title(<EOL>'<STR_LIT>' + report[report['<STR_LIT>']]['<STR_LIT>'] + '<STR_LIT>')<EOL>fig.savefig(sdir + '<STR_LIT>')<EOL>report_html += "<STR_LIT>" + "<STR_LIT>"<EOL><DEDENT>else:<EOL><INDENT>report_html += "<STR_LIT>" + i + "<STR_LIT>" +str(report[report['<STR_LIT>']][i]) + "<STR_LIT>"<EOL><DEDENT><DEDENT><DEDENT>if '<STR_LIT>' in report.keys():<EOL><INDENT>report_html += "<STR_LIT>"<EOL>report_html += "<STR_LIT>"<EOL>for i in report['<STR_LIT>']:<EOL><INDENT>report_html += "<STR_LIT:U+0020>" + i + "<STR_LIT:U+002C>"<EOL><DEDENT>for i in report['<STR_LIT>']:<EOL><INDENT>report_html += "<STR_LIT>" + i + "<STR_LIT>"<EOL>for j in report[i]:<EOL><INDENT>if j == '<STR_LIT>':<EOL><INDENT>report_html += "<STR_LIT>" + j + "<STR_LIT>" + "<STR_LIT>"<EOL>lambda_val = np.array(report['<STR_LIT>']['<STR_LIT>'])<EOL>fig, ax = plt.subplots(<NUM_LIT:1>)<EOL>ax.hist(lambda_val[:, -<NUM_LIT:1>])<EOL>ax.set_xlabel('<STR_LIT>')<EOL>ax.set_ylabel('<STR_LIT>')<EOL>ax.set_title('<STR_LIT>')<EOL>fig.savefig(sdir + '<STR_LIT>')<EOL>report_html += "<STR_LIT>" + "<STR_LIT>"<EOL>report_html += "<STR_LIT>" + sdir + "<STR_LIT>"<EOL>np.savetxt(sdir + "<STR_LIT>",<EOL>lambda_val, delimiter="<STR_LIT:U+002C>")<EOL><DEDENT>else:<EOL><INDENT>report_html += "<STR_LIT>" + j + "<STR_LIT>" +str(report[i][j]) + "<STR_LIT>"<EOL><DEDENT><DEDENT><DEDENT><DEDENT>report_html += '<STR_LIT>'<EOL>with open(sdir + report_name, '<STR_LIT:w>') as file:<EOL><INDENT>file.write(report_html)<EOL><DEDENT>file.close()<EOL>
Generates report of derivation and postprocess steps in teneto.derive
f1964:m0
def postpro_fisher(data, report=None):
if not report:<EOL><INDENT>report = {}<EOL><DEDENT>data[data < -<NUM_LIT>] = -<NUM_LIT:1><EOL>data[data > <NUM_LIT>] = <NUM_LIT:1><EOL>fisher_data = <NUM_LIT:0.5> * np.log((<NUM_LIT:1> + data) / (<NUM_LIT:1> - data))<EOL>report['<STR_LIT>'] = {}<EOL>report['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:yes>'<EOL>return fisher_data, report<EOL>
Performs fisher transform on everything in data. If report variable is passed, this is added to the report.
f1965:m0
def postpro_boxcox(data, report=None):
if not report:<EOL><INDENT>report = {}<EOL><DEDENT>mindata = <NUM_LIT:1> - np.nanmin(data)<EOL>data = data + mindata<EOL>ind = np.triu_indices(data.shape[<NUM_LIT:0>], k=<NUM_LIT:1>)<EOL>boxcox_list = np.array([sp.stats.boxcox(np.squeeze(<EOL>data[ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n], :])) for n in range(<NUM_LIT:0>, len(ind[<NUM_LIT:0>]))])<EOL>boxcox_data = np.zeros(data.shape)<EOL>boxcox_data[ind[<NUM_LIT:0>], ind[<NUM_LIT:1>], :] = np.vstack(boxcox_list[:, <NUM_LIT:0>])<EOL>boxcox_data[ind[<NUM_LIT:1>], ind[<NUM_LIT:0>], :] = np.vstack(boxcox_list[:, <NUM_LIT:0>])<EOL>bccheck = np.array(np.transpose(boxcox_data, [<NUM_LIT:2>, <NUM_LIT:0>, <NUM_LIT:1>]))<EOL>bccheck = (bccheck - bccheck.mean(axis=<NUM_LIT:0>)) / bccheck.std(axis=<NUM_LIT:0>)<EOL>bccheck = np.squeeze(np.mean(bccheck, axis=<NUM_LIT:0>))<EOL>np.fill_diagonal(bccheck, <NUM_LIT:0>)<EOL>report['<STR_LIT>'] = {}<EOL>report['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:yes>'<EOL>report['<STR_LIT>']['<STR_LIT>'] = [<EOL>tuple([ind[<NUM_LIT:0>][n], ind[<NUM_LIT:1>][n], boxcox_list[n, -<NUM_LIT:1>]]) for n in range(<NUM_LIT:0>, len(ind[<NUM_LIT:0>]))]<EOL>report['<STR_LIT>']['<STR_LIT>'] = mindata<EOL>report['<STR_LIT>']['<STR_LIT>'] = <NUM_LIT:1><EOL>if np.sum(np.isnan(bccheck)) > <NUM_LIT:0>:<EOL><INDENT>report['<STR_LIT>'] = {}<EOL>report['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>report['<STR_LIT>']['<STR_LIT>'] = (<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>'<EOL>)<EOL>report['<STR_LIT>']['<STR_LIT>'] = (<EOL>'<STR_LIT>'<EOL>)<EOL>boxcox_data = data - mindata<EOL>error_msg = ('<STR_LIT>'<EOL>'<STR_LIT>'<EOL>'<STR_LIT>')<EOL>print(error_msg)<EOL><DEDENT>return boxcox_data, report<EOL>
Performs box cox transform on everything in data. If report variable is passed, this is added to the report.
f1965:m1
def postpro_standardize(data, report=None):
if not report:<EOL><INDENT>report = {}<EOL><DEDENT>data = np.transpose(data, [<NUM_LIT:2>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL>standardized_data = (data - data.mean(axis=<NUM_LIT:0>)) / data.std(axis=<NUM_LIT:0>)<EOL>standardized_data = np.transpose(standardized_data, [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL>report['<STR_LIT>'] = {}<EOL>report['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT:yes>'<EOL>report['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>data = set_diagonal(data, <NUM_LIT:1>)<EOL>return standardized_data, report<EOL>
Standardizes everything in data (along axis -1). If report variable is passed, this is added to the report.
f1965:m2
def postpro_pipeline(data, pipeline, report=None):
postpro_functions = {<EOL>'<STR_LIT>': postpro_fisher,<EOL>'<STR_LIT>': postpro_boxcox,<EOL>'<STR_LIT>': postpro_standardize<EOL>}<EOL>if not report:<EOL><INDENT>report = {}<EOL><DEDENT>if isinstance(pipeline, str):<EOL><INDENT>pipeline = pipeline.split('<STR_LIT:+>')<EOL><DEDENT>report['<STR_LIT>'] = []<EOL>for postpro_step in pipeline:<EOL><INDENT>report['<STR_LIT>'].append(postpro_step)<EOL>postpro_data, report = postpro_functions[postpro_step](data, report)<EOL><DEDENT>return postpro_data, report<EOL>
PARAMETERS ----------- data : array pearson correlation values in temporal matrix form (node,node,time) pipeline : list or str (if string, each steps seperated by + sign). :options: 'fisher','boxcox','standardize' Each of the above 3 can be specified. If fisher is used, it must be before boxcox. If standardize is used it must be after boxcox and fisher. report : bool If true, appended to report. OUTPUT ------- postpro_data : array postprocessed data postprocessing_info : dict Information about postprocessing
f1965:m3
def derive_temporalnetwork(data, params):
report = {}<EOL>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = False<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if params['<STR_LIT>'] == '<STR_LIT:yes>' or params['<STR_LIT>'] == True:<EOL><INDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>' + params['<STR_LIT>']<EOL><DEDENT>if '<STR_LIT>' not in params.keys():<EOL><INDENT>params['<STR_LIT>'] = '<STR_LIT>'<EOL><DEDENT><DEDENT>if params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>data = data.transpose()<EOL><DEDENT>if isinstance(params['<STR_LIT>'], str):<EOL><INDENT>if params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>weights, report = _weightfun_jackknife(data.shape[<NUM_LIT:0>], report)<EOL>relation = '<STR_LIT>'<EOL><DEDENT>elif params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>weights, report = _weightfun_sliding_window(<EOL>data.shape[<NUM_LIT:0>], params, report)<EOL>relation = '<STR_LIT>'<EOL><DEDENT>elif params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>weights, report = _weightfun_tapered_sliding_window(<EOL>data.shape[<NUM_LIT:0>], params, report)<EOL>relation = '<STR_LIT>'<EOL><DEDENT>elif params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == "<STR_LIT>" or params['<STR_LIT>'] == "<STR_LIT>" or params['<STR_LIT>'] == "<STR_LIT>" or params['<STR_LIT>'] == "<STR_LIT>":<EOL><INDENT>weights, report = _weightfun_spatial_distance(data, params, report)<EOL>relation = '<STR_LIT>'<EOL><DEDENT>elif params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == '<STR_LIT>' or params['<STR_LIT>'] == "<STR_LIT>":<EOL><INDENT>R, report = _temporal_derivative(data, params, report)<EOL>relation = '<STR_LIT>'<EOL><DEDENT>else:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>try:<EOL><INDENT>weights = np.array(params['<STR_LIT>'])<EOL>relation = '<STR_LIT>'<EOL><DEDENT>except:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>')<EOL><DEDENT>if weights.shape[<NUM_LIT:0>] != weights.shape[<NUM_LIT:1>]:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>if weights.shape[<NUM_LIT:0>] != data.shape[<NUM_LIT:0>]:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT><DEDENT>if relation == '<STR_LIT>':<EOL><INDENT>R = np.array(<EOL>[DescrStatsW(data, weights[i, :]).corrcoef for i in range(<NUM_LIT:0>, weights.shape[<NUM_LIT:0>])])<EOL>R = R.transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL><DEDENT>if params['<STR_LIT>'] == '<STR_LIT>':<EOL><INDENT>R = R * -<NUM_LIT:1><EOL>jc_z = <NUM_LIT:0><EOL>if '<STR_LIT>' in params.keys():<EOL><INDENT>R = np.transpose(R, [<NUM_LIT:2>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL>R = (R - R.mean(axis=<NUM_LIT:0>)) / R.std(axis=<NUM_LIT:0>)<EOL>jc_z = <NUM_LIT:1><EOL>R = R * params['<STR_LIT>']<EOL>R = R.transpose([<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL><DEDENT>if '<STR_LIT>' in params.keys():<EOL><INDENT>R = np.transpose(R, [<NUM_LIT:2>, <NUM_LIT:0>, <NUM_LIT:1>])<EOL>if jc_z == <NUM_LIT:0>:<EOL><INDENT>R = (R - R.mean(axis=<NUM_LIT:0>)) / R.std(axis=<NUM_LIT:0>)<EOL><DEDENT>R = R + params['<STR_LIT>']<EOL>R = np.transpose(R, [<NUM_LIT:1>, <NUM_LIT:2>, <NUM_LIT:0>])<EOL><DEDENT>R = set_diagonal(R, <NUM_LIT:1>)<EOL><DEDENT>if params['<STR_LIT>'] != '<STR_LIT>':<EOL><INDENT>R, report = postpro_pipeline(<EOL>R, params['<STR_LIT>'], report)<EOL>R = set_diagonal(R, <NUM_LIT:1>)<EOL><DEDENT>if params['<STR_LIT>'] == '<STR_LIT:yes>' or params['<STR_LIT>'] == True:<EOL><INDENT>gen_report(report, params['<STR_LIT>'], params['<STR_LIT>'])<EOL><DEDENT>return R<EOL>
Derives connectivity from the data. A lot of data is inherently built with edges (e.g. communication between two individuals). However other networks are derived from the covariance of time series (e.g. brain networks between two regions). Covariance based metrics deriving time-resolved networks can be done in multiple ways. There are other methods apart from covariance based. Derive a weight vector for each time point and then the corrrelation coefficient for each time point. Paramters ---------- data : array Time series data to perform connectivity derivation on. (Default dimensions are: (time as rows, nodes as columns). Change params{'dimord'} if you want it the other way (see below). params : dict Parameters for each method (see below). Necessary paramters =================== method : str method: "distance","slidingwindow", "taperedslidingwindow", "jackknife", "multiplytemporalderivative". Alternatively, method can be a weight matrix of size time x time. **Different methods have method specific paramaters (see below)** Params for all methods (optional) ================================= postpro : "no" (default). Other alternatives are: "fisher", "boxcox", "standardize" and any combination seperated by a + (e,g, "fisher+boxcox"). See postpro_pipeline for more information. dimord : str Dimension order: 'node,time' (default) or 'time,node'. People like to represent their data differently and this is an easy way to be sure that you are inputing the data in the correct way. analysis_id : str or int add to identify specfic analysis. Generated report will be placed in './report/' + analysis_id + '/derivation_report.html report : bool False by default. If true, A report is saved in ./report/[analysis_id]/derivation_report.html if "yes" report_path : str String where the report is saved. Default is ./report/[analysis_id]/derivation_report.html Methods specific parameters =========================== method == "distance" ~~~~~~~~~~~~~~~~~~~ Distance metric calculates 1/Distance metric weights, and scales between 0 and 1. W[t,t] is excluded from the scaling and then set to 1. params['distance']: str Distance metric (e.g. 'euclidean'). See teneto.utils.getDistanceFunction for more info When method == "slidingwindow" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ params['windowsize'] : int Size of window. When method == "taperedslidingwindow" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ params['windowsize'] : int Size of window. params['distribution'] : str Scipy distribution (e.g. 'norm','expon'). Any distribution here: https://docs.scipy.org/doc/scipy/reference/stats.html params['distribution_params'] : list Each parameter, excluding the data "x" (in their scipy function order) to generate pdf. NOTE !!!!!!!!!! The data x should be considered to be centered at 0 and have a length of window size. (i.e. a window size of 5 entails x is [-2, -1, 0, 1, 2] a window size of 6 entails [-2.5, -1.5, 0.5, 0.5, 1.5, 2.5]) Given x params['distribution_params'] contains the remaining parameters. e.g. normal distribution requires pdf(x, loc, scale) where loc=mean and scale=std. This means that the mean and std have to be provided in distribution_params. Say we have a gaussian distribution, a window size of 21 and params['distribution_params'] is [0,5]. This will lead to a gaussian with its peak at in the middle of each window with a standard deviation of 5. Instead, if we set params['distribution_params'] is [10,5] this will lead to a half gaussian with its peak at the final time point with a standard deviation of 5. When method == "temporalderivative" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ params['windowsize'] : int Size of window. When method == "jackknife" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ No parameters are necessary. Optional parameters: params['weight-var'] : array, (optional) NxN array to weight the JC estimates (standerdized-JC*W). If weightby is selected, do not standerdize in postpro. params['weight-mean'] : array, (optional) NxN array to weight the JC estimates (standerdized-JC+W). If weightby is selected, do not standerdize in postpro. Returns ------- G : array Connectivity estimates (nodes x nodes x time) READ MORE --------- About the general weighted pearson approach used for most methods, see: Thompson & Fransson (2019) A common framework for the problem of deriving estimates of dynamic functional brain connectivity. Neuroimage. (https://doi.org/10.1016/j.neuroimage.2017.12.057) SEE ALSO -------- *postpro_pipeline*, *gen_report*
f1966:m0
def _weightfun_jackknife(T, report):
weights = np.ones([T, T])<EOL>np.fill_diagonal(weights, <NUM_LIT:0>)<EOL>report['<STR_LIT>'] = '<STR_LIT>'<EOL>report['<STR_LIT>'] = '<STR_LIT>'<EOL>return weights, report<EOL>
Creates the weights for the jackknife method. See func: teneto.derive.derive.
f1966:m1
def _weightfun_sliding_window(T, params, report):
weightat0 = np.zeros(T)<EOL>weightat0[<NUM_LIT:0>:params['<STR_LIT>']] = np.ones(params['<STR_LIT>'])<EOL>weights = np.array([np.roll(weightat0, i)<EOL>for i in range(<NUM_LIT:0>, T + <NUM_LIT:1> - params['<STR_LIT>'])])<EOL>report['<STR_LIT>'] = '<STR_LIT>'<EOL>report['<STR_LIT>'] = params<EOL>report['<STR_LIT>']['<STR_LIT>'] = '<STR_LIT>'<EOL>return weights, report<EOL>
Creates the weights for the sliding window method. See func: teneto.derive.derive.
f1966:m2
def _weightfun_tapered_sliding_window(T, params, report):
x = np.arange(-(params['<STR_LIT>'] - <NUM_LIT:1>) / <NUM_LIT:2>, (params['<STR_LIT>']) / <NUM_LIT:2>)<EOL>distribution_parameters = '<STR_LIT:U+002C>'.join(map(str, params['<STR_LIT>']))<EOL>taper = eval('<STR_LIT>' + params['<STR_LIT>'] +<EOL>'<STR_LIT>' + distribution_parameters + '<STR_LIT:)>')<EOL>weightat0 = np.zeros(T)<EOL>weightat0[<NUM_LIT:0>:params['<STR_LIT>']] = taper<EOL>weights = np.array([np.roll(weightat0, i)<EOL>for i in range(<NUM_LIT:0>, T + <NUM_LIT:1> - params['<STR_LIT>'])])<EOL>report['<STR_LIT>'] = '<STR_LIT>'<EOL>report['<STR_LIT>'] = params<EOL>report['<STR_LIT>']['<STR_LIT>'] = taper<EOL>report['<STR_LIT>']['<STR_LIT>'] = x<EOL>return weights, report<EOL>
Creates the weights for the tapered method. See func: teneto.derive.derive.
f1966:m3
def _weightfun_spatial_distance(data, params, report):
distance = getDistanceFunction(params['<STR_LIT>'])<EOL>weights = np.array([distance(data[n, :], data[t, :]) for n in np.arange(<EOL><NUM_LIT:0>, data.shape[<NUM_LIT:0>]) for t in np.arange(<NUM_LIT:0>, data.shape[<NUM_LIT:0>])])<EOL>weights = np.reshape(weights, [data.shape[<NUM_LIT:0>], data.shape[<NUM_LIT:0>]])<EOL>np.fill_diagonal(weights, np.nan)<EOL>weights = <NUM_LIT:1> / weights<EOL>weights = (weights - np.nanmin(weights)) /(np.nanmax(weights) - np.nanmin(weights))<EOL>np.fill_diagonal(weights, <NUM_LIT:1>)<EOL>return weights, report<EOL>
Creates the weights for the spatial distance method. See func: teneto.derive.derive.
f1966:m4
def _temporal_derivative(data, params, report):
<EOL>report = {}<EOL>tdat = data[<NUM_LIT:1>:, :] - data[:-<NUM_LIT:1>, :]<EOL>tdat = tdat / np.std(tdat, axis=<NUM_LIT:0>)<EOL>coupling = np.array([tdat[:, i] * tdat[:, j] for i in np.arange(<NUM_LIT:0>,<EOL>tdat.shape[<NUM_LIT:1>]) for j in np.arange(<NUM_LIT:0>, tdat.shape[<NUM_LIT:1>])])<EOL>coupling = np.reshape(<EOL>coupling, [tdat.shape[<NUM_LIT:1>], tdat.shape[<NUM_LIT:1>], tdat.shape[<NUM_LIT:0>]])<EOL>shape = coupling.shape[:-<NUM_LIT:1>] + (coupling.shape[-<NUM_LIT:1>] -<EOL>params['<STR_LIT>'] + <NUM_LIT:1>, params['<STR_LIT>'])<EOL>strides = coupling.strides + (coupling.strides[-<NUM_LIT:1>],)<EOL>coupling_windowed = np.mean(np.lib.stride_tricks.as_strided(<EOL>coupling, shape=shape, strides=strides), -<NUM_LIT:1>)<EOL>report = {}<EOL>report['<STR_LIT>'] = '<STR_LIT>'<EOL>report['<STR_LIT>'] = {}<EOL>report['<STR_LIT>']['<STR_LIT>'] = params['<STR_LIT>']<EOL>return coupling_windowed, report<EOL>
Performs mtd method. See func: teneto.derive.derive.
f1966:m5
def make_dash_table(df):
table = []<EOL>for index, row in df.iterrows():<EOL><INDENT>html_row = []<EOL>for i in range(len(row)):<EOL><INDENT>html_row.append(html.Td([row[i]]))<EOL><DEDENT>table.append(html.Tr(html_row))<EOL><DEDENT>return table<EOL>
Return a dash definitio of an HTML table for a Pandas dataframe
f1984:m0
def _r():
return '<STR_LIT>' % (random.randint(<NUM_LIT:0>, <NUM_LIT:255>), random.randint(<NUM_LIT:0>, <NUM_LIT:255>), random.randint(<NUM_LIT:0>, <NUM_LIT:255>))<EOL>
generate random color
f1996:m0
def align_yaxis_np(axes):
axes = np.array(axes)<EOL>extrema = np.array([ax.get_ylim() for ax in axes])<EOL>for i in range(len(extrema)):<EOL><INDENT>if np.isclose(extrema[i, <NUM_LIT:0>], <NUM_LIT:0.0>):<EOL><INDENT>extrema[i, <NUM_LIT:0>] = -<NUM_LIT:1><EOL><DEDENT>if np.isclose(extrema[i, <NUM_LIT:1>], <NUM_LIT:0.0>):<EOL><INDENT>extrema[i, <NUM_LIT:1>] = <NUM_LIT:1><EOL><DEDENT><DEDENT>lowers = extrema[:, <NUM_LIT:0>]<EOL>uppers = extrema[:, <NUM_LIT:1>]<EOL>all_positive = False<EOL>all_negative = False<EOL>if lowers.min() > <NUM_LIT:0.0>:<EOL><INDENT>all_positive = True<EOL><DEDENT>if uppers.max() < <NUM_LIT:0.0>:<EOL><INDENT>all_negative = True<EOL><DEDENT>if all_negative or all_positive:<EOL><INDENT>return<EOL><DEDENT>res = abs(uppers+lowers)<EOL>min_index = np.argmin(res)<EOL>multiplier1 = abs(uppers[min_index]/lowers[min_index])<EOL>multiplier2 = abs(lowers[min_index]/uppers[min_index])<EOL>for i in range(len(extrema)):<EOL><INDENT>if i != min_index:<EOL><INDENT>lower_change = extrema[i, <NUM_LIT:1>] * -<NUM_LIT:1>*multiplier2<EOL>upper_change = extrema[i, <NUM_LIT:0>] * -<NUM_LIT:1>*multiplier1<EOL>if upper_change < extrema[i, <NUM_LIT:1>]:<EOL><INDENT>extrema[i, <NUM_LIT:0>] = lower_change<EOL><DEDENT>else:<EOL><INDENT>extrema[i, <NUM_LIT:1>] = upper_change<EOL><DEDENT><DEDENT>extrema[i, <NUM_LIT:0>] *= <NUM_LIT><EOL>extrema[i, <NUM_LIT:1>] *= <NUM_LIT><EOL><DEDENT>[axes[i].set_ylim(*extrema[i]) for i in range(len(extrema))]<EOL>
Align zeros of the two axes, zooming them out by same ratio
f1996:m4
def list_to_cells(lst):
cells = '<STR_LIT>'<EOL>for cell in lst:<EOL><INDENT>to_add = '<STR_LIT>' + '<STR_LIT>'.join(cell) + '<STR_LIT>'<EOL>cells += to_add<EOL><DEDENT>cells = cells[:-<NUM_LIT:1>] + '<STR_LIT>'<EOL>nb = '<STR_LIT:{>' + cells + '<STR_LIT>'<EOL>return nbformat.writes(nbformat.reads(nb, as_version=<NUM_LIT:4>)).encode('<STR_LIT:utf-8>')<EOL>
convert list of cells to notebook form list should be of the form: [[list of strings representing python code for cell]]
f2007:m0
def set_var(var, set_='<STR_LIT>'):
if isinstance(set_, str):<EOL><INDENT>to_set = json.dumps(set_)<EOL><DEDENT>elif isinstance(set_, dict) or isinstance(set_, list):<EOL><INDENT>try:<EOL><INDENT>to_set = json.dumps(set_)<EOL><DEDENT>except ValueError:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT><DEDENT>else:<EOL><INDENT>raise Exception('<STR_LIT>')<EOL><DEDENT>os.environ['<STR_LIT>' + var] = to_set<EOL>
set var outside notebook
f2007:m1
def get_var(var, default='<STR_LIT>'):
ret = os.environ.get('<STR_LIT>' + var)<EOL>if ret is None:<EOL><INDENT>return default<EOL><DEDENT>return json.loads(ret)<EOL>
get var inside notebook
f2007:m2
def scrub_output_pre_save(model, **kwargs):
<EOL>if model['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>if model['<STR_LIT:content>']['<STR_LIT>'] != <NUM_LIT:4>:<EOL><INDENT>return<EOL><DEDENT>for cell in model['<STR_LIT:content>']['<STR_LIT>']:<EOL><INDENT>if cell['<STR_LIT>'] != '<STR_LIT:code>':<EOL><INDENT>continue<EOL><DEDENT>cell['<STR_LIT>'] = []<EOL>cell['<STR_LIT>'] = None<EOL><DEDENT>
scrub output before saving notebooks
f2012:m0
def script_post_save(model, os_path, contents_manager, **kwargs):
from nbconvert.exporters.script import ScriptExporter<EOL>if model['<STR_LIT:type>'] != '<STR_LIT>':<EOL><INDENT>return<EOL><DEDENT>global _script_exporter<EOL>if _script_exporter is None:<EOL><INDENT>_script_exporter = ScriptExporter(parent=contents_manager)<EOL><DEDENT>log = contents_manager.log<EOL>base, ext = os.path.splitext(os_path)<EOL>script, resources = _script_exporter.from_filename(os_path)<EOL>script_fname = base + resources.get('<STR_LIT>', '<STR_LIT>')<EOL>log.info("<STR_LIT>", to_api_path(script_fname, contents_manager.root_dir))<EOL>with io.open(script_fname, '<STR_LIT:w>', encoding='<STR_LIT:utf-8>') as f:<EOL><INDENT>f.write(script)<EOL><DEDENT>
convert notebooks to Python script after save with nbconvert replaces `ipython notebook --script`
f2013:m0
def close(self):
if not self.closed:<EOL><INDENT>self._ipython.events.unregister('<STR_LIT>', self._fill)<EOL>self._box.close()<EOL>self.closed = True<EOL><DEDENT>
Close and remove hooks.
f2015:c0:m1
def _fill(self):
types_to_exclude = ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>',<EOL>'<STR_LIT>', '<STR_LIT>', '<STR_LIT:type>', '<STR_LIT>']<EOL>values = self.namespace.who_ls()<EOL>def eval(expr):<EOL><INDENT>return self.namespace.shell.ev(expr)<EOL><DEDENT>var = [(v,<EOL>type(eval(v)).__name__,<EOL>str(_getsizeof(eval(v))),<EOL>str(_getshapeof(eval(v))) if _getshapeof(eval(v)) else '<STR_LIT>',<EOL>str(eval(v))[:<NUM_LIT:200>])<EOL>for v in values if (v not in ['<STR_LIT>', '<STR_LIT>', '<STR_LIT>', '<STR_LIT>']) & (type(eval(v)).__name__ not in types_to_exclude)]<EOL>self._table.value = '<STR_LIT>' +'<STR_LIT>'.join(['<STR_LIT>'.format(v1, v2, v3, v4, v5) for v1, v2, v3, v4, v5 in var]) +'<STR_LIT>'<EOL>
Fill self with variable information.
f2015:c0:m2
def _ipython_display_(self):
with self._sc:<EOL><INDENT>self._box._ipython_display_()<EOL><DEDENT>
Called when display() or pyout is used to display the Variable Inspector.
f2015:c0:m3
def pivot_pandas_to_excel(soup, show_intermediate_breakdown=False, show_total_breakdown=False):
tables = soup.findAll('<STR_LIT>')<EOL>for table in tables:<EOL><INDENT>table.thead.findChildren('<STR_LIT>')[<NUM_LIT:1>].decompose()<EOL>new_body = Tag(name='<STR_LIT>')<EOL>bc = <NUM_LIT:0><EOL>num_columns_max = max(len(row.findAll()) for row in table.tbody.findAll('<STR_LIT>'))<EOL>num_headers_max = max(len(row.findAll('<STR_LIT>')) for row in table.tbody.findAll('<STR_LIT>'))<EOL>last = False<EOL>for row in table.tbody.findChildren('<STR_LIT>'):<EOL><INDENT>headers = list(row.findChildren('<STR_LIT>'))<EOL>data = list(row.findChildren('<STR_LIT>'))<EOL>if len(headers) > <NUM_LIT:1>:<EOL><INDENT>if '<STR_LIT>' in headers[<NUM_LIT:0>].contents:<EOL><INDENT>last = True<EOL><DEDENT>indent = <NUM_LIT:0><EOL>first_header = (len(headers) == num_headers_max)<EOL>if not last:<EOL><INDENT>for header in headers[:-<NUM_LIT:1>]:<EOL><INDENT>new_row = Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'}) if first_header else Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'})<EOL>if not header.contents:<EOL><INDENT>continue<EOL><DEDENT>new_header = Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>' + str(<NUM_LIT:10>*(num_headers_max - len(headers) + indent)) + '<STR_LIT>'}) if first_header elseTag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>' + str(<NUM_LIT:10>*(num_headers_max - len(headers) + indent)) + '<STR_LIT>'})<EOL>new_header.contents = header.contents<EOL>new_row.insert(<NUM_LIT:0>, new_header)<EOL>for j in range(num_columns_max-<NUM_LIT:1>):<EOL><INDENT>new_row.insert(j+<NUM_LIT:1>, Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'})) if first_header else new_row.insert(j+<NUM_LIT:1>, Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'}))<EOL><DEDENT>new_body.insert(bc, new_row)<EOL>bc += <NUM_LIT:1><EOL>indent += <NUM_LIT:1><EOL>first_header = False<EOL><DEDENT><DEDENT><DEDENT>new_row = Tag(name='<STR_LIT>')<EOL>new_header = Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>',<EOL>'<STR_LIT>': '<STR_LIT>' + str(<NUM_LIT:10>*(num_headers_max-<NUM_LIT:1>)) + '<STR_LIT>'})<EOL>new_header.contents = headers[-<NUM_LIT:1>].contents<EOL>if '<STR_LIT>' in headers[-<NUM_LIT:1>].contents:<EOL><INDENT>if not show_intermediate_breakdown and not last:<EOL><INDENT>continue<EOL><DEDENT>elif not show_intermediate_breakdown:<EOL><INDENT>last = False<EOL>new_row = Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'})<EOL>new_header = Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'})<EOL>new_header.contents = [NavigableString('<STR_LIT>')]<EOL><DEDENT>elif not last:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>if last:<EOL><INDENT>continue<EOL><DEDENT>new_row.insert(<NUM_LIT:0>, new_header)<EOL>cc = <NUM_LIT:1><EOL>for _ in range(num_columns_max - len(data) - <NUM_LIT:1>):<EOL><INDENT>new_header = Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'}) if '<STR_LIT>' in headers[-<NUM_LIT:1>].contents else Tag(name='<STR_LIT>')<EOL>new_row.insert(cc, new_header)<EOL>cc += <NUM_LIT:1><EOL><DEDENT>for dat in data:<EOL><INDENT>new_data = Tag(name='<STR_LIT>') if '<STR_LIT>' not in headers[-<NUM_LIT:1>].contents else Tag(name='<STR_LIT>', attrs={'<STR_LIT:class>': '<STR_LIT>'})<EOL>new_data.contents = dat.contents<EOL>new_row.insert(cc, new_data)<EOL>cc += <NUM_LIT:1><EOL><DEDENT>new_body.insert(bc, new_row)<EOL>bc += <NUM_LIT:1><EOL><DEDENT>table.tbody.replaceWith(new_body)<EOL><DEDENT>return soup<EOL>
pandas style pivot to excel style pivot formatting for outlook/html This function is meant to be provided to the email functionality as a postprocessor. It expects a jupyter or pandas exported html table of a dataframe with the following index: example: # a single pivot pt1 = pd.pivot_table(data, value=['col1', 'col2', 'col3'], index=['index'], columns=['col4'], aggfunc='sum', margins=True).stack('col4') # here we reindex the table to have the appropriate row ordering pt1 = pt1.reindex( pd.MultiIndex( levels=[['COL1', 'COL2', 'All'], ['ROW1', 'ROW2', 'ALL']], labels=[[0, 0, 0, 1, 1, 1, 2], # This is the key, changing the label order (2-ALL) [2, 0, 1, 2, 0, 1, 2]], names=['', ''], sortorder=0) ).fillna(0) show_intermediate_breakdown --> intermediate sumations to be shown? show_total_breakdown --> total sumations to be shown?
f2025:m0
def parse(self, kv):
key, val = kv.split(self.kv_sep, <NUM_LIT:1>)<EOL>keys = key.split(self.keys_sep)<EOL>for k in reversed(keys):<EOL><INDENT>val = {k: val}<EOL><DEDENT>return val<EOL>
Parses key value string into dict Examples: >> parser.parse('test1.test2=value') {'test1': {'test2': 'value'}} >> parser.parse('test=value') {'test': 'value'}
f2032:c0:m1
def read_requirements(filename):
contents = read_file(filename).strip('<STR_LIT:\n>')<EOL>return contents.split('<STR_LIT:\n>') if contents else []<EOL>
Open a requirements file and return list of its lines.
f2033:m0
def read_file(filename):
path = os.path.join(os.path.dirname(__file__), filename)<EOL>with open(path) as f:<EOL><INDENT>return f.read()<EOL><DEDENT>
Open and a file, read it and return its contents.
f2033:m1
def get_metadata(init_file):
return dict(re.findall("<STR_LIT>", init_file))<EOL>
Read metadata from a given file and return a dictionary of them
f2033:m2
@abstractmethod <EOL><INDENT>def send(self, **kwargs):<DEDENT>
Main method which should parse parameters and call transport send method
f2038:c0:m1
def get_api_params(self):
result = self.params<EOL>if type(result) != dict:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(<EOL>self.__class__.__name__<EOL>)<EOL>)<EOL><DEDENT>return result<EOL>
Dictionary with available API parameters :raises ValueError: If value of __class__.params is not dictionary :return: Should return dict with available pushalot API methods :rtype: dict
f2038:c0:m2
def get_api_required_params(self):
result = self.required_params<EOL>if type(result) != list:<EOL><INDENT>raise ValueError(<EOL>'<STR_LIT>'.format(<EOL>self.__class__.__name__<EOL>)<EOL>)<EOL><DEDENT>return result<EOL>
List with required params :return: Dictionary with API parameters :raises ValueError: If value of __class__.required_params is not list :rtype: list
f2038:c0:m3
@abstractproperty <EOL><INDENT>def params(self):<DEDENT>
Return dictionary with available API params Example of dictionary: { 'token': { 'param': 'AuthorizationToken', 'type': str, 'max_len': 32, }, 'is_important': { 'param': 'IsImportant', 'type': bool, }, 'ttl': { 'param': 'TimeToLive', 'type': int, } } Key of dictionary used as argument passed to function. param: in subdictionary is actual param which should be sent to API. type: type of the argument. Can be used for basic validation. Supported types now are boolean and string. All other types ignored, and used as is. max_len: Optional argument for string type. Check that length of the passed argument not exceed the given maximum. :return: Dictionary with available API params :rtype: dict
f2038:c0:m4
@abstractproperty <EOL><INDENT>def required_params(self):<DEDENT>
Return list with required API methods :return: List with required API methods
f2038:c0:m5
def _build_params_from_kwargs(self, **kwargs):
api_methods = self.get_api_params()<EOL>required_methods = self.get_api_required_params()<EOL>ret_kwargs = {}<EOL>for key, val in kwargs.items():<EOL><INDENT>if key not in api_methods:<EOL><INDENT>warnings.warn(<EOL>'<STR_LIT>'.format(key),<EOL>Warning<EOL>)<EOL>continue<EOL><DEDENT>if key not in required_methods and val is None:<EOL><INDENT>continue<EOL><DEDENT>if type(val) != api_methods[key]['<STR_LIT:type>']:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>".format(key)<EOL>)<EOL><DEDENT>if '<STR_LIT>' in api_methods[key]:<EOL><INDENT>if len(val) > api_methods[key]['<STR_LIT>']:<EOL><INDENT>raise ValueError(<EOL>"<STR_LIT>"<EOL>"<STR_LIT>".format(key)<EOL>)<EOL><DEDENT><DEDENT>ret_kwargs[api_methods[key]['<STR_LIT>']] = val<EOL><DEDENT>for item in required_methods:<EOL><INDENT>if item not in ret_kwargs:<EOL><INDENT>raise pushalot.exc.PushalotException(<EOL>"<STR_LIT>".format(item)<EOL>)<EOL><DEDENT><DEDENT>return ret_kwargs<EOL>
Builds parameters from passed arguments Search passed parameters in available methods, prepend specified API key, and return dictionary which can be sent directly to API server. :param kwargs: :type param: dict :raises ValueError: If type of specified parameter doesn't match the expected type. Also raised if some basic validation of passed parameter fails. :raises PushalotException: If required parameter not set. :return: Dictionary with params which can be sent to API server :rtype: dict
f2038:c0:m6
def send(self, title, body,<EOL>link_title=None, link=None, is_important=False,<EOL>is_silent=False, image=None, source=None, ttl=None, **kwargs):
params = self._build_params_from_kwargs(<EOL>token=self._token,<EOL>title=title,<EOL>body=body,<EOL>link_title=link_title,<EOL>link=link,<EOL>is_important=is_important,<EOL>is_silent=is_silent,<EOL>image=image,<EOL>source=source,<EOL>ttl=ttl,<EOL>**kwargs<EOL>)<EOL>return self._transport.send(**params)<EOL>
:param token: Service authorization token :type token: str :param title: Message title, up to 250 characters :type title: str :param body: Message body, up to 32768 characters :type body: str :param link_title: Title of the link, up to 100 characters :type link: str :param link: Link URI, up to 1000 characters :type link: str :param is_important: Determines, is message important :type is_important: bool :param is_silent: Prevents toast notifications on devices :type is_silent: bool :param image: Image URL link, up to 250 characters :type image: str :param source: Notifications source name, up to 25 characters :type source: str :param ttl: Message time to live in minutes (0 .. 43200) :type ttl: int :return: True on success
f2038:c1:m0
def send_message(self, title, body):
return self.send(title=title, body=body)<EOL>
Send message :param title: Message title :type title: str :param body: Message body :type body: str :return: True on success :rtype: bool
f2038:c1:m1
def send_silent_message(self, title, body):
return self.send(title=title, body=body, is_silent=True)<EOL>
Send silent message :param title: Message title :type title: str :param body: Message body :type body: str :return: True on success :rtype: bool
f2038:c1:m2
def send_important_message(self, title, body):
return self.send(title=title, body=body, is_important=True)<EOL>
Send important message :param title: Message title :type title: str :param body: Message body :type body: str :return: True on success :rtype: bool
f2038:c1:m3
def send_with_expiry(self, title, body, ttl):
return self.send(title=title, body=body, ttl=ttl)<EOL>
Send message with time to live :param title: Message title :type title: str :param body: Message body :type body: str :param ttl: Time to live in minutes :type ttl: int :return: True on success :rtype: bool
f2038:c1:m4
def send_with_link(self, title, body, link, link_title=None):
link_title = link_title or link<EOL>return self.send(<EOL>title=title,<EOL>body=body,<EOL>link=link,<EOL>link_title=link_title<EOL>)<EOL>
Send message with link If no link title specified, URL used as title. :param title: Message title :type title: str :param body: Message body :type body: str :param link: URL :type link: str :param link_title: URL title :type link_title: str :return: True on success :rtype: bool
f2038:c1:m5
def send_with_image(self, title, body, image):
return self.send(title=title, body=body, image=image)<EOL>
Send message with image Image thumbnail URL link, has to be properly formatted in absolute form with protocol etc. Recommended image size is 72x72 pixels. Larger images will be scaled down while maintaining aspect ratio. In order to save mobile device data plan, we download images from specified URL on server side and scale it there. This means client apps will never download big images directly by mistake. :param title: Message title :type title: str :param body: Message body :type body: str :param image: URL to image :type image: str :return: True on success :rtype: bool
f2038:c1:m6
@abstractmethod <EOL><INDENT>def send(self, **kwargs):<DEDENT>
Send request to API Only this method required. Method receives dictionary with api requests params, and send request. Should return True if request successfully sent, or throw exception on failure. :raises PushalotBadRequestException: Bad parameters sent to API :raises PushalotNotAcceptableException: API message throttle limit hit :raises: PushalotGoneException: Invalid or blocked authorization token :raises PushalotInternalErrorException: API server error :raises PushalotUnavailableException: API server unavailable :param kwargs: Dictionary with API request parameters :type kwargs: dict :return: True on success :rtype: bool
f2040:c0:m0
def lazy_module(modname, error_strings=None, lazy_mod_class=LazyModule,<EOL>level='<STR_LIT>'):
if error_strings is None:<EOL><INDENT>error_strings = {}<EOL><DEDENT>_set_default_errornames(modname, error_strings)<EOL>mod = _lazy_module(modname, error_strings, lazy_mod_class)<EOL>if level == '<STR_LIT>':<EOL><INDENT>return sys.modules[module_basename(modname)]<EOL><DEDENT>elif level == '<STR_LIT>':<EOL><INDENT>return mod<EOL><DEDENT>else:<EOL><INDENT>raise ValueError("<STR_LIT>")<EOL><DEDENT>
Function allowing lazy importing of a module into the namespace. A lazy module object is created, registered in `sys.modules`, and returned. This is a hollow module; actual loading, and `ImportErrors` if not found, are delayed until an attempt is made to access attributes of the lazy module. A handy application is to use :func:`lazy_module` early in your own code (say, in `__init__.py`) to register all modulenames you want to be lazy. Because of registration in `sys.modules` later invocations of `import modulename` will also return the lazy object. This means that after initial registration the rest of your code can use regular pyhon import statements and retain the lazyness of the modules. Parameters ---------- modname : str The module to import. error_strings : dict, optional A dictionary of strings to use when module-loading fails. Key 'msg' sets the message to use (defaults to :attr:`lazy_import._MSG`). The message is formatted using the remaining dictionary keys. The default message informs the user of which module is missing (key 'module'), what code loaded the module as lazy (key 'caller'), and which package should be installed to solve the dependency (key 'install_name'). None of the keys is mandatory and all are given smart names by default. lazy_mod_class: type, optional Which class to use when instantiating the lazy module, to allow deep customization. The default is :class:`LazyModule` and custom alternatives **must** be a subclass thereof. level : str, optional Which submodule reference to return. Either a reference to the 'leaf' module (the default) or to the 'base' module. This is useful if you'll be using the module functionality in the same place you're calling :func:`lazy_module` from, since then you don't need to run `import` again. Setting *level* does not affect which names/modules get registered in `sys.modules`. For *level* set to 'base' and *modulename* 'aaa.bbb.ccc':: aaa = lazy_import.lazy_module("aaa.bbb.ccc", level='base') # 'aaa' becomes defined in the current namespace, with # (sub)attributes 'aaa.bbb' and 'aaa.bbb.ccc'. # It's the lazy equivalent to: import aaa.bbb.ccc For *level* set to 'leaf':: ccc = lazy_import.lazy_module("aaa.bbb.ccc", level='leaf') # Only 'ccc' becomes set in the current namespace. # Lazy equivalent to: from aaa.bbb import ccc Returns ------- module The module specified by *modname*, or its base, depending on *level*. The module isn't immediately imported. Instead, an instance of *lazy_mod_class* is returned. Upon access to any of its attributes, the module is finally loaded. Examples -------- >>> import lazy_import, sys >>> np = lazy_import.lazy_module("numpy") >>> np Lazily-loaded module numpy >>> np is sys.modules['numpy'] True >>> np.pi # This causes the full loading of the module ... 3.141592653589793 >>> np # ... and the module is changed in place. <module 'numpy' from '/usr/local/lib/python/site-packages/numpy/__init__.py'> >>> import lazy_import, sys >>> # The following succeeds even when asking for a module that's not available >>> missing = lazy_import.lazy_module("missing_module") >>> missing Lazily-loaded module missing_module >>> missing is sys.modules['missing_module'] True >>> missing.some_attr # This causes the full loading of the module, which now fails. ImportError: __main__ attempted to use a functionality that requires module missing_module, but it couldn't be loaded. Please install missing_module and retry. See Also -------- :func:`lazy_callable` :class:`LazyModule`
f2043:m1
def lazy_callable(modname, *names, **kwargs):
if not names:<EOL><INDENT>modname, _, name = modname.rpartition("<STR_LIT:.>")<EOL><DEDENT>lazy_mod_class = _setdef(kwargs, '<STR_LIT>', LazyModule)<EOL>lazy_call_class = _setdef(kwargs, '<STR_LIT>', LazyCallable)<EOL>error_strings = _setdef(kwargs, '<STR_LIT>', {})<EOL>_set_default_errornames(modname, error_strings, call=True)<EOL>if not names:<EOL><INDENT>return _lazy_callable(modname, name, error_strings.copy(),<EOL>lazy_mod_class, lazy_call_class)<EOL><DEDENT>return tuple(_lazy_callable(modname, cname, error_strings.copy(),<EOL>lazy_mod_class, lazy_call_class) for cname in names)<EOL>
Performs lazy importing of one or more callables. :func:`lazy_callable` creates functions that are thin wrappers that pass any and all arguments straight to the target module's callables. These can be functions or classes. The full loading of that module is only actually triggered when the returned lazy function itself is called. This lazy import of the target module uses the same mechanism as :func:`lazy_module`. If, however, the target module has already been fully imported prior to invocation of :func:`lazy_callable`, then the target callables themselves are returned and no lazy imports are made. :func:`lazy_function` and :func:`lazy_function` are aliases of :func:`lazy_callable`. Parameters ---------- modname : str The base module from where to import the callable(s) in *names*, or a full 'module_name.callable_name' string. names : str (optional) The callable name(s) to import from the module specified by *modname*. If left empty, *modname* is assumed to also include the callable name to import. error_strings : dict, optional A dictionary of strings to use when reporting loading errors (either a missing module, or a missing callable name in the loaded module). *error_string* follows the same usage as described under :func:`lazy_module`, with the exceptions that 1) a further key, 'msg_callable', can be supplied to be used as the error when a module is successfully loaded but the target callable can't be found therein (defaulting to :attr:`lazy_import._MSG_CALLABLE`); 2) a key 'callable' is always added with the callable name being loaded. lazy_mod_class : type, optional See definition under :func:`lazy_module`. lazy_call_class : type, optional Analogously to *lazy_mod_class*, allows setting a custom class to handle lazy callables, other than the default :class:`LazyCallable`. Returns ------- wrapper function or tuple of wrapper functions If *names* is passed, returns a tuple of wrapper functions, one for each element in *names*. If only *modname* is passed it is assumed to be a full 'module_name.callable_name' string, in which case the wrapper for the imported callable is returned directly, and not in a tuple. Notes ----- Unlike :func:`lazy_module`, which returns a lazy module that eventually mutates into the fully-functional version, :func:`lazy_callable` only returns thin wrappers that never change. This means that the returned wrapper object never truly becomes the one under the module's namespace, even after successful loading of the module in *modname*. This is fine for most practical use cases, but may break code that relies on the usage of the returned objects oter than calling them. One such example is the lazy import of a class: it's fine to use the returned wrapper to instantiate an object, but it can't be used, for instance, to subclass from. Examples -------- >>> import lazy_import, sys >>> fn = lazy_import.lazy_callable("numpy.arange") >>> sys.modules['numpy'] Lazily-loaded module numpy >>> fn(10) array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) >>> sys.modules['numpy'] <module 'numpy' from '/usr/local/lib/python3.5/site-packages/numpy/__init__.py'> >>> import lazy_import, sys >>> cl = lazy_import.lazy_callable("numpy.ndarray") # a class >>> obj = cl([1, 2]) # This works OK (and also triggers the loading of numpy) >>> class MySubclass(cl): # This fails because cls is just a wrapper, >>> pass # not an actual class. See Also -------- :func:`lazy_module` :class:`LazyCallable` :class:`LazyModule`
f2043:m3
def _load_module(module):
modclass = type(module)<EOL>if not issubclass(modclass, LazyModule):<EOL><INDENT>raise TypeError("<STR_LIT>")<EOL><DEDENT>with _ImportLockContext():<EOL><INDENT>parent, _, modname = module.__name__.rpartition('<STR_LIT:.>')<EOL>logger.debug("<STR_LIT>".format(modname))<EOL>if not hasattr(modclass, '<STR_LIT>'):<EOL><INDENT>return<EOL><DEDENT>modclass._LOADING = True<EOL>try:<EOL><INDENT>if parent:<EOL><INDENT>logger.debug("<STR_LIT>".format(parent))<EOL>setattr(sys.modules[parent], modname, module)<EOL><DEDENT>if not hasattr(modclass, '<STR_LIT>'):<EOL><INDENT>logger.debug("<STR_LIT>"<EOL>.format(modname))<EOL>return<EOL><DEDENT>cached_data = _clean_lazymodule(module)<EOL>try:<EOL><INDENT>reload_module(module) <EOL><DEDENT>except:<EOL><INDENT>logger.debug("<STR_LIT>"<EOL>.format(modname))<EOL>_reset_lazymodule(module, cached_data)<EOL>raise<EOL><DEDENT>else:<EOL><INDENT>logger.debug("<STR_LIT>".format(modname))<EOL>delattr(modclass, '<STR_LIT>')<EOL>_reset_lazy_submod_refs(module)<EOL><DEDENT><DEDENT>except (AttributeError, ImportError) as err:<EOL><INDENT>logger.debug("<STR_LIT>"<EOL>.format(modname, err.__class__.__name__, err))<EOL>logger.lazy_trace()<EOL>if ((six.PY3 and isinstance(err, AttributeError)) and not<EOL>err.args[<NUM_LIT:0>] == "<STR_LIT>"):<EOL><INDENT>raise<EOL><DEDENT>msg = modclass._lazy_import_error_msgs['<STR_LIT>']<EOL>raise_from(ImportError(<EOL>msg.format(**modclass._lazy_import_error_strings)), None)<EOL><DEDENT><DEDENT>
Ensures that a module, and its parents, are properly loaded
f2043:m5
def _setdef(argdict, name, defaultvalue):
if not name in argdict or argdict[name] is None:<EOL><INDENT>argdict[name] = defaultvalue<EOL><DEDENT>return argdict[name]<EOL>
Like dict.setdefault but sets the default value also if None is present.
f2043:m6
def _caller_name(depth=<NUM_LIT:2>, default='<STR_LIT>'):
<EOL>try:<EOL><INDENT>return sys._getframe(depth).f_globals['<STR_LIT>']<EOL><DEDENT>except AttributeError:<EOL><INDENT>return default<EOL><DEDENT>
Returns the name of the calling namespace.
f2043:m9
def _clean_lazymodule(module):
modclass = type(module)<EOL>_clean_lazy_submod_refs(module)<EOL>modclass.__getattribute__ = ModuleType.__getattribute__<EOL>modclass.__setattr__ = ModuleType.__setattr__<EOL>cls_attrs = {}<EOL>for cls_attr in _CLS_ATTRS:<EOL><INDENT>try:<EOL><INDENT>cls_attrs[cls_attr] = getattr(modclass, cls_attr)<EOL>delattr(modclass, cls_attr)<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>return cls_attrs<EOL>
Removes all lazy behavior from a module's class, for loading. Also removes all module attributes listed under the module's class deletion dictionaries. Deletion dictionaries are class attributes with names specified in `_DELETION_DICT`. Parameters ---------- module: LazyModule Returns ------- dict A dictionary of deleted class attributes, that can be used to reset the lazy state using :func:`_reset_lazymodule`.
f2043:m10
def _reset_lazymodule(module, cls_attrs):
modclass = type(module)<EOL>del modclass.__getattribute__<EOL>del modclass.__setattr__<EOL>try:<EOL><INDENT>del modclass._LOADING<EOL><DEDENT>except AttributeError:<EOL><INDENT>pass<EOL><DEDENT>for cls_attr in _CLS_ATTRS:<EOL><INDENT>try:<EOL><INDENT>setattr(modclass, cls_attr, cls_attrs[cls_attr])<EOL><DEDENT>except KeyError:<EOL><INDENT>pass<EOL><DEDENT><DEDENT>_reset_lazy_submod_refs(module)<EOL>
Resets a module's lazy state from cached data.
f2043:m12
def get_deep_search_results(self, address, zipcode):
url = '<STR_LIT>'<EOL>params = {<EOL>'<STR_LIT:address>': address,<EOL>'<STR_LIT>': zipcode,<EOL>'<STR_LIT>': self.api_key<EOL>}<EOL>return self.get_data(url, params)<EOL>
GetDeepSearchResults API
f2049:c0:m1
def get_updated_property_details(self, zpid):
url = '<STR_LIT>'<EOL>params = {<EOL>'<STR_LIT>': zpid,<EOL>'<STR_LIT>': self.api_key<EOL>}<EOL>return self.get_data(url, params)<EOL>
GetUpdatedPropertyDetails API
f2049:c0:m2
@property<EOL><INDENT>def area_unit(self):<DEDENT>
return u'<STR_LIT>'<EOL>
lotSizeSqFt
f2049:c1:m2
@property<EOL><INDENT>def last_sold_price_currency(self):<DEDENT>
return self.data.find(<EOL>self.attribute_mapping['<STR_LIT>']).attrib["<STR_LIT>"]<EOL>
lastSoldPrice currency
f2049:c1:m3
def __init__(self, data, *args, **kwargs):
self.data = data.findall('<STR_LIT>')[<NUM_LIT:0>]<EOL>for attr in self.attribute_mapping.__iter__():<EOL><INDENT>try:<EOL><INDENT>self.__setattr__(attr, self.get_attr(attr))<EOL><DEDENT>except AttributeError:<EOL><INDENT>print ('<STR_LIT>' % attr)<EOL><DEDENT><DEDENT>
Creates instance of GeocoderResult from the provided XML data array
f2049:c2:m0
def __init__(self, data, *args, **kwargs):
self.data = data.findall('<STR_LIT>')[<NUM_LIT:0>]<EOL>for attr in self.attribute_mapping.__iter__():<EOL><INDENT>try:<EOL><INDENT>self.__setattr__(attr, self.get_attr(attr))<EOL><DEDENT>except AttributeError:<EOL><INDENT>print ('<STR_LIT>' % attr)<EOL><DEDENT><DEDENT>
Creates instance of GeocoderResult from the provided XML data array
f2049:c3:m0
def register_images(im0, im1, *, rmMean=True, correctScale=True):
<EOL>im0 = np.asarray(im0, dtype=np.float32)<EOL>im1 = np.asarray(im1, dtype=np.float32)<EOL>if rmMean:<EOL><INDENT>im0 = im0 - im0.mean()<EOL>im1 = im1 - im1.mean()<EOL><DEDENT>f0, f1 = dft_optsize_same(im0, im1)<EOL>angle, scale = find_rotation_scale(f0, f1, isccs=True)<EOL>if not correctScale:<EOL><INDENT>if np.abs(<NUM_LIT:1> - scale) > <NUM_LIT>:<EOL><INDENT>warnings.warn("<STR_LIT>")<EOL><DEDENT>scale = <NUM_LIT:1><EOL><DEDENT>im2 = rotate_scale(im1, angle, scale)<EOL>f2 = dft_optsize(im2, shape=f0.shape)<EOL>y, x = find_shift_dft(f0, f2, isccs=True)<EOL>return angle, scale, [y, x], im2<EOL>
Finds the rotation, scaling and translation of im1 relative to im0 Parameters ---------- im0: First image im1: Second image rmMean: Set to true to remove the mean (Default) Returns ------- angle: The angle difference scale: The scale difference [y, x]: The offset im2: The rotated and translated second image Notes ----- The algorithm uses gaussian fit for subpixel precision. The best case would be to have two squares images of the same size. The algorithm is faster if the size is a power of 2.
f2052:m0
def find_rotation_scale(im0, im1, isccs=False):
<EOL>im0 = np.asarray(im0, dtype=np.float32)<EOL>im1 = np.asarray(im1, dtype=np.float32)<EOL>truesize = None<EOL>if isccs:<EOL><INDENT>truesize = im0.shape<EOL>im0 = centered_mag_sq_ccs(im0)<EOL>im1 = centered_mag_sq_ccs(im1)<EOL><DEDENT>lp1, log_base = polar_fft(im1, logpolar=True, isshiftdft=isccs,<EOL>logoutput=True, truesize=truesize)<EOL>lp0, log_base = polar_fft(im0, logpolar=True, isshiftdft=isccs,<EOL>logoutput=True, truesize=truesize,<EOL>nangle=lp1.shape[<NUM_LIT:0>], radiimax=lp1.shape[<NUM_LIT:1>])<EOL>angle, scale = find_shift_dft(lp0, lp1)<EOL>angle *= np.pi / lp1.shape[<NUM_LIT:0>]<EOL>scale = log_base ** (scale)<EOL>return angle, scale<EOL>
Compares the images and return the best guess for the rotation angle, and scale difference. Parameters ---------- im0: 2d array First image im1: 2d array Second image isccs: boolean, default False Set to True if the images are alredy DFT and in CCS representation Returns ------- angle: number The angle difference scale: number The scale difference Notes ----- Uses find_shift_dft
f2052:m1
def find_shift_dft(im0, im1, isccs=False, subpix=True):
<EOL>im0 = np.asarray(im0, dtype=np.float32)<EOL>im1 = np.asarray(im1, dtype=np.float32)<EOL>if not isccs:<EOL><INDENT>im0, im1 = dft_optsize_same(im0, im1)<EOL><DEDENT>else:<EOL><INDENT>assert(im0.shape == im1.shape)<EOL><DEDENT>mulSpec = cv2.mulSpectrums(im0, im1, flags=<NUM_LIT:0>, conjB=True)<EOL>normccs = cv2.sqrt(cv2.mulSpectrums(im0, im0, flags=<NUM_LIT:0>, conjB=True) *<EOL>cv2.mulSpectrums(im1, im1, flags=<NUM_LIT:0>, conjB=True))<EOL>xc = cv2.dft(ccs_normalize(mulSpec, normccs),<EOL>flags=cv2.DFT_REAL_OUTPUT | cv2.DFT_INVERSE)<EOL>blurRadii = <NUM_LIT:2><EOL>xc = cv2.copyMakeBorder(xc, blurRadii, blurRadii, blurRadii, blurRadii,<EOL>borderType=cv2.BORDER_WRAP)<EOL>xc = cv2.GaussianBlur(xc, (<NUM_LIT:2> * blurRadii + <NUM_LIT:1>, <NUM_LIT:2> * blurRadii + <NUM_LIT:1>), <NUM_LIT>)<EOL>xc = xc[blurRadii:-blurRadii, blurRadii:-blurRadii]<EOL>shape = np.asarray(xc.shape)<EOL>idx = np.asarray(np.unravel_index(np.argmax(xc), shape))<EOL>"""<STR_LIT>"""<EOL><INDENT>if toremove:<EOL><INDENT>plt.figure(<NUM_LIT:1>)<EOL>l=len(xc[:,<NUM_LIT:0>])<EOL>plt.plot(np.arange(l)/l,xc[:,<NUM_LIT:0>])<EOL>print(l,xc[-<NUM_LIT:1>,<NUM_LIT:0>])<EOL>plt.figure(<NUM_LIT:2>)<EOL><DEDENT><DEDENT>if subpix:<EOL><INDENT>idx = np.asarray([get_peak_pos(xc[:, idx[<NUM_LIT:1>]], wrap=True),<EOL>get_peak_pos(xc[idx[<NUM_LIT:0>], :], wrap=True)])<EOL><DEDENT>else:<EOL><INDENT>idx[idx > shape // <NUM_LIT:2>] -= shape[idx > shape // <NUM_LIT:2>]<EOL><DEDENT>return idx<EOL>
Find the shift between two images using the DFT method Parameters ---------- im0: 2d array First image im1: 2d array Second image isccs: Boolean, default false Set to True if the images are alredy DFT and in CCS representation subpix: boolean, default True Set to True (default) if you want subpixel precision Returns ------- [y, x]: 2 numbers The offset Notes ----- This algorithm detect a shift using the global phase difference of the DFTs If the images are already DFT and in the CCS format, set isccs to true. In that case the images should have the same size. If subpix is True, a gaussian fit is used for subpix precision
f2052:m2
def find_shift_cc(im0, im1, ylim=None, xlim=None, subpix=True):
<EOL>im0 = np.asarray(im0, dtype=np.float32)<EOL>im1 = np.asarray(im1, dtype=np.float32)<EOL>im0 = im0 - np.nanmean(im0)<EOL>im1 = im1 - np.nanmean(im1)<EOL>shape0 = np.asarray(im0.shape)<EOL>shape1 = np.asarray(im1.shape)<EOL>offset = <NUM_LIT:1> - shape1<EOL>pad = np.lib.pad(-offset, (<NUM_LIT:1>, <NUM_LIT:1>), mode='<STR_LIT>')<EOL>if ylim is not None:<EOL><INDENT>pad[<NUM_LIT:0>] = -ylim[<NUM_LIT:0>]<EOL>pad[<NUM_LIT:1>] = ylim[<NUM_LIT:1>] + (shape1 - shape0)[<NUM_LIT:0>]<EOL><DEDENT>if xlim is not None:<EOL><INDENT>pad[<NUM_LIT:2>] = -xlim[<NUM_LIT:0>]<EOL>pad[<NUM_LIT:3>] = xlim[<NUM_LIT:1>] + (shape1 - shape0)[<NUM_LIT:1>]<EOL><DEDENT>im0, offset = pad_img(im0, pad)<EOL>xc = cv2.matchTemplate(im0, im1, cv2.TM_CCORR)<EOL>idx = np.asarray(np.unravel_index(np.argmax(xc), xc.shape))<EOL>if subpix:<EOL><INDENT>idx = np.asarray([get_peak_pos(xc[:, idx[<NUM_LIT:1>]], wrap=False),<EOL>get_peak_pos(xc[idx[<NUM_LIT:0>], :], wrap=False)])<EOL><DEDENT>else:<EOL><INDENT>idx[idx > shape // <NUM_LIT:2>] -= shape[idx > shape // <NUM_LIT:2>]<EOL><DEDENT>return idx + offset<EOL>
Finds the best shift between im0 and im1 using cross correlation Parameters ---------- im0: 2d array First image im1: 2d array Second image ylim: 2 numbers, optional The y limits of the search (if None full range is searched) xlim: 2 numbers, optional Ibidem with x Returns ------- [y, x]: 2 numbers The offset Notes ----- The origin of im1 in the im0 referential is returned ylim and xlim limit the possible output. No subpixel precision
f2052:m3
def combine_images(imgs, register=True):
imgs = np.asarray(imgs, dtype="<STR_LIT:float>")<EOL>if register:<EOL><INDENT>for i in range(<NUM_LIT:1>, imgs.shape[<NUM_LIT:0>]):<EOL><INDENT>ret = register_images(imgs[<NUM_LIT:0>, :, :], imgs[i, :, :])<EOL>imgs[i, :, :] = rotate_scale_shift(imgs[i, :, :], *ret[:<NUM_LIT:3>], np.nan)<EOL><DEDENT><DEDENT>return np.mean(imgs, <NUM_LIT:0>)<EOL>
Combine similar images into one to reduce the noise Parameters ---------- imgs: list of 2d array Series of images register: Boolean, default False True if the images should be register before combination Returns ------- im: 2d array The result image Notes ----- This is an example of the usage of the library
f2052:m4
def orientation_angle(im, approxangle=None, *, isshiftdft=False, truesize=None,<EOL>rotateAngle=None):
im = np.asarray(im)<EOL>if rotateAngle is not None and not isshiftdft:<EOL><INDENT>scale = np.sqrt(<NUM_LIT> * (<NUM_LIT:1> + (np.tan(rotateAngle) - <NUM_LIT:1>)**<NUM_LIT:2> /<EOL>(np.tan(rotateAngle) + <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>im = rotate_scale(im, rotateAngle, scale)<EOL><DEDENT>lp = polar_fft(im, isshiftdft=isshiftdft,<EOL>logoutput=False, interpolation='<STR_LIT>',<EOL>truesize=truesize)<EOL>adis = lp.sum(-<NUM_LIT:1>)<EOL>if approxangle is not None:<EOL><INDENT>amin = clamp_angle(approxangle - np.pi / <NUM_LIT:4> - np.pi / <NUM_LIT:2>)<EOL>amax = clamp_angle(approxangle + np.pi / <NUM_LIT:4> - np.pi / <NUM_LIT:2>)<EOL>angles = np.linspace(-np.pi / <NUM_LIT:2>, np.pi / <NUM_LIT:2>,<EOL>lp.shape[<NUM_LIT:0>], endpoint=False)<EOL>if amin > amax:<EOL><INDENT>adis[np.logical_and(angles > amax, angles < amin)] = adis.min()<EOL><DEDENT>else:<EOL><INDENT>adis[np.logical_or(angles > amax, angles < amin)] = adis.min()<EOL><DEDENT><DEDENT>ret = get_peak_pos(adis, wrap=True)<EOL>anglestep = np.pi / lp.shape[<NUM_LIT:0>]<EOL>"""<STR_LIT>"""<EOL>ret = clamp_angle(ret * anglestep)<EOL>if rotateAngle is not None:<EOL><INDENT>ret = clamp_angle(ret - rotateAngle)<EOL><DEDENT>return ret<EOL>
Give the highest contribution to the orientation Parameters ---------- im: 2d array The image approxangle: number, optional The approximate angle (None if unknown) isshiftdft: Boolean, default False True if the image has been processed (DFT, fftshift) truesize: 2 numbers, optional Truesize of the image if isshiftdft is True rotateAngle: number, optional The diagonals are more sensitives than the axis. rotate the image to avoid pixel orientation (flat or diagonal) Returns ------- angle: number The orientation of the image Notes ----- if approxangle is specified, search only within +- pi/4
f2052:m5
def orientation_angle_2(im, nangle=None, isshiftdft=False, rotateAngle=None):
im = np.asarray(im, dtype=np.float32)<EOL>if rotateAngle is not None and not isshiftdft:<EOL><INDENT>scale = np.sqrt(<NUM_LIT> * (<NUM_LIT:1> + (np.tan(rotateAngle) - <NUM_LIT:1>)**<NUM_LIT:2> /<EOL>(np.tan(rotateAngle) + <NUM_LIT:1>)**<NUM_LIT:2>))<EOL>im = rotate_scale(im, rotateAngle, scale)<EOL><DEDENT>else:<EOL><INDENT>rotateAngle = <NUM_LIT:0><EOL><DEDENT>if not isshiftdft:<EOL><INDENT>im = centered_mag_sq_ccs(dft_optsize(im))<EOL><DEDENT>qshape = np.asarray([im.shape[<NUM_LIT:0>] // <NUM_LIT:2>, im.shape[<NUM_LIT:1>]])<EOL>center = np.asarray([qshape[<NUM_LIT:0>], <NUM_LIT:0>])<EOL>im[qshape[<NUM_LIT:0>], <NUM_LIT:0>] = <NUM_LIT:0><EOL>if nangle is None:<EOL><INDENT>nangle = np.min(im.shape) <EOL><DEDENT>theta = np.linspace(-np.pi / <NUM_LIT:2>, np.pi / <NUM_LIT:2>, nangle,<EOL>endpoint=False, dtype=np.float32)<EOL><INDENT>X=np.arange(im.shape[<NUM_LIT:1>])-center[<NUM_LIT:1>]<EOL>Y=-(np.arange(im.shape[<NUM_LIT:0>])-center[<NUM_LIT:0>])<EOL>YG, XG =np.meshgrid(Y,X,indexing='<STR_LIT>')<EOL><DEDENT>YG, XG = np.ogrid[-center[<NUM_LIT:0>]:im.shape[<NUM_LIT:0>] - center[<NUM_LIT:0>],<EOL>-center[<NUM_LIT:1>]:im.shape[<NUM_LIT:1>] - center[<NUM_LIT:1>]]<EOL>YG = -YG<EOL>values = np.empty(theta.shape)<EOL>ylim = center[<NUM_LIT:0>] + <NUM_LIT:1><EOL>upper = np.arctan((YG + <NUM_LIT>) / (XG + <NUM_LIT>))<EOL>lower = upper.copy()<EOL>upper[:ylim, <NUM_LIT:1>:] = upper[:ylim, :-<NUM_LIT:1>]<EOL>upper[:ylim, <NUM_LIT:0>] = np.pi<EOL>lower[:-<NUM_LIT:1>, :] = lower[<NUM_LIT:1>:, :]<EOL>lower[-<NUM_LIT:1>, :] = np.arctan((YG[-<NUM_LIT:1>, :] - <NUM_LIT>) / (XG[-<NUM_LIT:1>, :] + <NUM_LIT>))<EOL>lower[ylim:, <NUM_LIT:1>:] = lower[ylim:, :-<NUM_LIT:1>]<EOL>lower[ylim:, <NUM_LIT:0>] = -np.pi<EOL>for i, t in enumerate(theta.flat):<EOL><INDENT>cond = np.logical_and(t > lower,<EOL>t < upper)<EOL>values[i] = np.sum(im[cond])<EOL><DEDENT>"""<STR_LIT>"""<EOL>return clamp_angle(theta[np.argmax(values)] + np.pi / <NUM_LIT:2> + rotateAngle)<EOL>
Give the highest contribution to the orientation Parameters ---------- im: 2d array The image nangle: number, optional The number of angles checked for isshiftdft: Boolean, default False True if the image has been processed (DFT, fftshift) rotateAngle: number, optional The diagonals are more sensitives than the axis. rotate the image to avoid pixel orientation (flat or diagonal) Returns ------- angle: number The orientation of the image
f2052:m6
def dft_optsize(im, shape=None):
im = np.asarray(im)<EOL>initshape = im.shape<EOL>if shape is None:<EOL><INDENT>ys = cv2.getOptimalDFTSize(initshape[<NUM_LIT:0>])<EOL>xs = cv2.getOptimalDFTSize(initshape[<NUM_LIT:1>])<EOL>shape = [ys, xs]<EOL><DEDENT>im = cv2.copyMakeBorder(im, <NUM_LIT:0>, shape[<NUM_LIT:0>] - initshape[<NUM_LIT:0>],<EOL><NUM_LIT:0>, shape[<NUM_LIT:1>] - initshape[<NUM_LIT:1>],<EOL>borderType=cv2.BORDER_CONSTANT, value=<NUM_LIT:0>)<EOL>f = cv2.dft(im, nonzeroRows=initshape[<NUM_LIT:0>])<EOL>return f<EOL>
Resize image for optimal DFT and computes it Parameters ---------- im: 2d array The image shape: 2 numbers, optional The shape of the output image (None will optimize the shape) Returns ------- dft: 2d array The dft in CCS representation Notes ----- Th shape shoulb be a product of 2, 3, and 5
f2052:m7
def dft_optsize_same(im0, im1):
im0 = np.asarray(im0)<EOL>im1 = np.asarray(im1)<EOL>shape0 = im0.shape<EOL>shape1 = im1.shape<EOL>ys = max(cv2.getOptimalDFTSize(shape0[<NUM_LIT:0>]),<EOL>cv2.getOptimalDFTSize(shape1[<NUM_LIT:0>]))<EOL>xs = max(cv2.getOptimalDFTSize(shape0[<NUM_LIT:1>]),<EOL>cv2.getOptimalDFTSize(shape1[<NUM_LIT:1>]))<EOL>shape = [ys, xs]<EOL>f0 = dft_optsize(im0, shape=shape)<EOL>f1 = dft_optsize(im1, shape=shape)<EOL>return f0, f1<EOL>
Resize 2 image same size for optimal DFT and computes it Parameters ---------- im0: 2d array First image im1: 2d array Second image Returns ------- dft0: 2d array The dft of the first image dft1: 2d array The dft of the second image Notes ----- dft0 and dft1 will have the same size
f2052:m8
def rotate_scale(im, angle, scale, borderValue=<NUM_LIT:0>, interp=cv2.INTER_CUBIC):
im = np.asarray(im, dtype=np.float32)<EOL>rows, cols = im.shape<EOL>M = cv2.getRotationMatrix2D(<EOL>(cols / <NUM_LIT:2>, rows / <NUM_LIT:2>), -angle * <NUM_LIT> / np.pi, <NUM_LIT:1> / scale)<EOL>im = cv2.warpAffine(im, M, (cols, rows),<EOL>borderMode=cv2.BORDER_CONSTANT,<EOL>flags=interp,<EOL>borderValue=borderValue) <EOL>return im<EOL>
Rotates and scales the image Parameters ---------- im: 2d array The image angle: number The angle, in radians, to rotate scale: positive number The scale factor borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array the rotated and scaled image Notes ----- The output image has the same size as the input. Therefore the image may be cropped in the process.
f2052:m9
def shift_image(im, shift, borderValue=<NUM_LIT:0>):
im = np.asarray(im, dtype=np.float32)<EOL>rows, cols = im.shape<EOL>M = np.asarray([[<NUM_LIT:1>, <NUM_LIT:0>, shift[<NUM_LIT:1>]], [<NUM_LIT:0>, <NUM_LIT:1>, shift[<NUM_LIT:0>]]], dtype=np.float32)<EOL>return cv2.warpAffine(im, M, (cols, rows),<EOL>borderMode=cv2.BORDER_CONSTANT,<EOL>flags=cv2.INTER_CUBIC,<EOL>borderValue=borderValue)<EOL>
shift the image Parameters ---------- im: 2d array The image shift: 2 numbers (y,x) the shift in y and x direction borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array The shifted image Notes ----- The output image has the same size as the input. Therefore the image will be cropped in the process.
f2052:m10
def rotate_scale_shift(im, angle, scale, shift, borderValue=<NUM_LIT:0>):
im = np.asarray(im, dtype=np.float32)<EOL>rows, cols = im.shape<EOL>M = cv2.getRotationMatrix2D(<EOL>(cols / <NUM_LIT:2>, rows / <NUM_LIT:2>), -angle * <NUM_LIT> / np.pi, <NUM_LIT:1> / scale)<EOL>M[<NUM_LIT:0>, <NUM_LIT:2>] += shift[<NUM_LIT:1>]<EOL>M[<NUM_LIT:1>, <NUM_LIT:2>] += shift[<NUM_LIT:0>]<EOL>im = cv2.warpAffine(im, M, (cols, rows),<EOL>borderMode=cv2.BORDER_CONSTANT,<EOL>flags=cv2.INTER_CUBIC,<EOL>borderValue=borderValue) <EOL>return im<EOL>
Rotates and scales the image Parameters ---------- im: 2d array The image angle: number The angle, in radians, to rotate scale: positive number The scale factor shift: 2 numbers (y,x) the shift in y and x direction borderValue: number, default 0 The value for the pixels outside the border (default 0) Returns ------- im: 2d array the rotated, scaled, and shifted image Notes ----- The output image has the same size as the input. Therefore the image may be cropped in the process.
f2052:m11
def polar_fft(im, nangle=None, radiimax=None, *, isshiftdft=False,<EOL>truesize=None, logpolar=False, logoutput=False,<EOL>interpolation='<STR_LIT>'):
im = np.asarray(im, dtype=np.float32)<EOL>if not isshiftdft:<EOL><INDENT>truesize = im.shape<EOL>im = im - im.mean()<EOL>im = centered_mag_sq_ccs(dft_optsize(im))<EOL><DEDENT>assert(truesize is not None)<EOL>qshape = np.asarray([im.shape[<NUM_LIT:0>] // <NUM_LIT:2>, im.shape[<NUM_LIT:1>]])<EOL>center = np.asarray([qshape[<NUM_LIT:0>], <NUM_LIT:0>])<EOL>if nangle is None:<EOL><INDENT>nangle = np.min(truesize) <EOL><INDENT>nangle-=<NUM_LIT:2><EOL><DEDENT><DEDENT>theta = np.linspace(-np.pi / <NUM_LIT:2>, np.pi / <NUM_LIT:2>, nangle,<EOL>endpoint=False, dtype=np.float32)<EOL>if radiimax is None:<EOL><INDENT>radiimax = qshape.min()<EOL><DEDENT>if logpolar:<EOL><INDENT>log_base = np.exp(np.log(radiimax) / radiimax)<EOL>radius = ((log_base ** np.arange(<NUM_LIT:0>, radiimax, dtype=np.float32))<EOL>/ radiimax)<EOL><DEDENT>else:<EOL><INDENT>radius = np.linspace(<NUM_LIT:0>, <NUM_LIT:1>, radiimax, endpoint=False,<EOL>dtype=np.float32)<EOL><DEDENT>y = cv2.gemm(np.sin(theta), radius, qshape[<NUM_LIT:0>], <NUM_LIT:0>, <NUM_LIT:0>,<EOL>flags=cv2.GEMM_2_T) + center[<NUM_LIT:0>]<EOL>x = cv2.gemm(np.cos(theta), radius, qshape[<NUM_LIT:1>], <NUM_LIT:0>, <NUM_LIT:0>,<EOL>flags=cv2.GEMM_2_T) + center[<NUM_LIT:1>]<EOL>interp = cv2.INTER_LINEAR<EOL>if interpolation == '<STR_LIT>':<EOL><INDENT>interp = cv2.INTER_CUBIC<EOL><DEDENT>if interpolation == '<STR_LIT>':<EOL><INDENT>interp = cv2.INTER_NEAREST<EOL><DEDENT>output = cv2.remap(im, x, y, interp) <EOL>if logoutput:<EOL><INDENT>output = cv2.log(output)<EOL><DEDENT>if logpolar:<EOL><INDENT>return output, log_base<EOL><DEDENT>else:<EOL><INDENT>return output<EOL><DEDENT>
Return dft in polar (or log-polar) units, the angle step (and the log base) Parameters ---------- im: 2d array The image nangle: number, optional The number of angles in the polar representation radiimax: number, optional The number of radius in the polar representation isshiftdft: boolean, default False True if the image is pre processed (DFT + fftshift) truesize: 2 numbers, required if isshiftdft is True The true size of the image logpolar: boolean, default False True if want the log polar representation instead of polar logoutput: boolean, default False True if want the log of the output interpolation: string, default 'bilinear' ('bicubic', 'bilinear', 'nearest') The interpolation technique. (For now, avoid bicubic) Returns ------- im: 2d array The (log) polar representation of the input image log_base: number, only if logpolar is True the log base if this is log polar representation Notes ----- radiimax is the maximal radius (log of radius if logpolar is true). if not provided, it is deduced from the image size To get log-polar, set logpolar to True log_base is the base of the log. It is deduced from radiimax. Two images that will be compared should therefore have the same radiimax.
f2052:m12
def pad_img(im, pad):
im = np.asarray(im)<EOL>pad = np.asarray(pad)<EOL>shape = im.shape<EOL>offset = -pad[::<NUM_LIT:2>]<EOL>cut = pad < <NUM_LIT:0><EOL>if cut.any():<EOL><INDENT>cut *= pad<EOL>cut[::<NUM_LIT:2>] *= -<NUM_LIT:1><EOL>cut[<NUM_LIT:1>::<NUM_LIT:2>] += (cut[<NUM_LIT:1>::<NUM_LIT:2>] == <NUM_LIT:0>) * shape<EOL>im = im[cut[<NUM_LIT:0>]:cut[<NUM_LIT:1>], cut[<NUM_LIT:2>]:cut[<NUM_LIT:3>]]<EOL><DEDENT>ppad = pad > <NUM_LIT:0><EOL>if ppad.any():<EOL><INDENT>pad = pad * ppad<EOL>ypad = (pad[<NUM_LIT:0>], pad[<NUM_LIT:1>])<EOL>xpad = (pad[<NUM_LIT:2>], pad[<NUM_LIT:3>])<EOL>im = np.lib.pad(im, (ypad, xpad), mode='<STR_LIT>')<EOL><DEDENT>return im, offset<EOL>
Pad positively with 0 or negatively (cut) Parameters ---------- im: 2d array The image pad: 4 numbers (ytop, ybottom, xleft, xright) or (imin, imax, jmin, jmax) Returns ------- im: 2d array The padded (or cropped) image offset: 2 numbers The offset related to the input image Notes ----- This changes the size of the image
f2052:m13
def clamp_angle(angle):
return (angle + np.pi / <NUM_LIT:2>) % np.pi - np.pi / <NUM_LIT:2><EOL>
return a between -pi/2 and pi/2 (in fourrier plane, +pi is the same) Parameters ---------- angle: number The angle to be clamped Returns ------- angle: number The clamped angle
f2052:m14
def ccs_normalize(compIM, ccsnorm):
compIM = np.asarray(compIM)<EOL>ccsnorm = np.asarray(ccsnorm)<EOL>ys = ccsnorm.shape[<NUM_LIT:0>]<EOL>xs = ccsnorm.shape[<NUM_LIT:1>]<EOL>ccsnorm[<NUM_LIT:2>::<NUM_LIT:2>, <NUM_LIT:0>] = ccsnorm[<NUM_LIT:1>:ys - <NUM_LIT:1>:<NUM_LIT:2>, <NUM_LIT:0>]<EOL>ccsnorm[:, <NUM_LIT:2>::<NUM_LIT:2>] = ccsnorm[:, <NUM_LIT:1>:xs - <NUM_LIT:1>:<NUM_LIT:2>]<EOL>if xs % <NUM_LIT:2> is <NUM_LIT:0>:<EOL><INDENT>ccsnorm[<NUM_LIT:2>::<NUM_LIT:2>, xs - <NUM_LIT:1>] = ccsnorm[<NUM_LIT:1>:ys - <NUM_LIT:1>:<NUM_LIT:2>, xs - <NUM_LIT:1>]<EOL><DEDENT>ccsnorm[ccsnorm == <NUM_LIT:0>] = np.nextafter(<NUM_LIT:0.>, <NUM_LIT:1.>, dtype = ccsnorm.dtype)<EOL>res = compIM / ccsnorm<EOL>return res<EOL>
normalize the ccs representation Parameters ---------- compIM: 2d array The CCS image in CCS representation ccsnorm: 2d array The normalization matrix in ccs representation Returns ------- compIM: 2d array The normalized CCS image Notes ----- (basically an element wise division for CCS) Should probably not be used from outside
f2052:m15