language
stringclasses
6 values
original_string
stringlengths
25
887k
text
stringlengths
25
887k
Python
def query(self, query_data, **kwargs): '''Queries the History Playground for the given queries in each of the datasets specified @param self: Playground object @param queries: n-gram query terms to search for @param datasets: datasets to search against @return: List of dictionaries containing the time series ''' if self.__auth_token != None: session = requests.Session() headers = { 'Authorization' : 'Bearer ' + self.__auth_token, 'Content-Type': 'application/json; charset=UTF-8' } payload = { 'corpora': query_data[1], 'terms': self.__annotate_terms(query_data), 'display':[kwargs.pop('display','rank')], 'lang':self.__expand(['english'], query_data[1]), 'dateFormat':self.__expand(['YYYY'], query_data[1]), 'interval':self.__expand(['1'], query_data[1]), 'resolution':self.__expand(['years'], query_data[1]), 'minDate':kwargs.pop('minDate', ''), 'maxDate':kwargs.pop('maxDate', ''), 'smooth':kwargs.pop('smooth', False), 'confidence':kwargs.pop('confidence', False), 'bestFit':kwargs.pop('bestfit', False), 'detrend':kwargs.pop('detrend', False), 'diff':kwargs.pop('diff', False), 'zscore':kwargs.pop('standardize', False), 'multiterm':kwargs.pop('multiterm', False), 'changepoints':kwargs.pop('changepoints', False)} data = session.post(self._base_url + 'ngram', headers=headers, data = json.dumps(payload)) return data.json() else: print('Please login first.')
def query(self, query_data, **kwargs): '''Queries the History Playground for the given queries in each of the datasets specified @param self: Playground object @param queries: n-gram query terms to search for @param datasets: datasets to search against @return: List of dictionaries containing the time series ''' if self.__auth_token != None: session = requests.Session() headers = { 'Authorization' : 'Bearer ' + self.__auth_token, 'Content-Type': 'application/json; charset=UTF-8' } payload = { 'corpora': query_data[1], 'terms': self.__annotate_terms(query_data), 'display':[kwargs.pop('display','rank')], 'lang':self.__expand(['english'], query_data[1]), 'dateFormat':self.__expand(['YYYY'], query_data[1]), 'interval':self.__expand(['1'], query_data[1]), 'resolution':self.__expand(['years'], query_data[1]), 'minDate':kwargs.pop('minDate', ''), 'maxDate':kwargs.pop('maxDate', ''), 'smooth':kwargs.pop('smooth', False), 'confidence':kwargs.pop('confidence', False), 'bestFit':kwargs.pop('bestfit', False), 'detrend':kwargs.pop('detrend', False), 'diff':kwargs.pop('diff', False), 'zscore':kwargs.pop('standardize', False), 'multiterm':kwargs.pop('multiterm', False), 'changepoints':kwargs.pop('changepoints', False)} data = session.post(self._base_url + 'ngram', headers=headers, data = json.dumps(payload)) return data.json() else: print('Please login first.')
Python
def available_datasets(self): '''Returns a list of the available datasets that the user can query against to compute time series. @return: list of datasets which can be queried ''' return ['bna','caa']
def available_datasets(self): '''Returns a list of the available datasets that the user can query against to compute time series. @return: list of datasets which can be queried ''' return ['bna','caa']
Python
def describe_dataset(self, dataset): '''Returns a human-readable description of the dataset @return: String describing the dataset ''' switch = { 'bna': 'British Newspaper Archive (bna).', 'caa': 'Chronicling America Archive (caa).' } return switch.get(dataset, 'Invalid dataset')
def describe_dataset(self, dataset): '''Returns a human-readable description of the dataset @return: String describing the dataset ''' switch = { 'bna': 'British Newspaper Archive (bna).', 'caa': 'Chronicling America Archive (caa).' } return switch.get(dataset, 'Invalid dataset')
Python
def plot_series(series,**kwargs): ''' Plot a single series returned from the History Playground, i.e. plot_series(series[0]) ''' if series != None: display = kwargs.pop('display','rank') ylabel = __get_ylabel(display) dates = series[display].keys() values = series[display].values() fig = plt.plot(dates, values, label=series['term'] + ' (' + series['corpus'] + ')') plt.xticks(rotation=45) plt.legend(loc='upper left') plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.gca().set_xticklabels(__every_nth(dates,10)) plt.setp(fig, linewidth=2.0) plt.xlabel('Year') plt.ylabel(ylabel) plt.show()
def plot_series(series,**kwargs): ''' Plot a single series returned from the History Playground, i.e. plot_series(series[0]) ''' if series != None: display = kwargs.pop('display','rank') ylabel = __get_ylabel(display) dates = series[display].keys() values = series[display].values() fig = plt.plot(dates, values, label=series['term'] + ' (' + series['corpus'] + ')') plt.xticks(rotation=45) plt.legend(loc='upper left') plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.gca().set_xticklabels(__every_nth(dates,10)) plt.setp(fig, linewidth=2.0) plt.xlabel('Year') plt.ylabel(ylabel) plt.show()
Python
def plot_all_series(series,**kwargs): ''' Plot all series returned from the History Playground ''' if series != None: display = kwargs.pop('display','rank') ylabel = __get_ylabel(display) for s in series: dates = s[display].keys() values = s[display].values() fig = plt.plot(dates, values, label=s['term'] + ' (' + s['corpus'] + ')') plt.setp(fig, linewidth=2.0) plt.xticks(rotation=45) plt.legend(loc='upper left') plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.gca().set_xticklabels(__every_nth(dates,10)) plt.xlabel('Year') plt.ylabel(ylabel) plt.show()
def plot_all_series(series,**kwargs): ''' Plot all series returned from the History Playground ''' if series != None: display = kwargs.pop('display','rank') ylabel = __get_ylabel(display) for s in series: dates = s[display].keys() values = s[display].values() fig = plt.plot(dates, values, label=s['term'] + ' (' + s['corpus'] + ')') plt.setp(fig, linewidth=2.0) plt.xticks(rotation=45) plt.legend(loc='upper left') plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) plt.gca().set_xticklabels(__every_nth(dates,10)) plt.xlabel('Year') plt.ylabel(ylabel) plt.show()
Python
def __get_ylabel(display): ''' Set the y axis label depending on the time series display type''' ylabel = 'Work Rank Score' if display != 'rank': ylabel = 'Relative Frequency' return ylabel
def __get_ylabel(display): ''' Set the y axis label depending on the time series display type''' ylabel = 'Work Rank Score' if display != 'rank': ylabel = 'Relative Frequency' return ylabel
Python
def send_message(self, chat_id, text, reply_markup=None, emojize=True): """ Send message to telegram bot. """ if emojize: text = emoji.emojize(text, use_aliases=True) self.bot.send_message(chat_id, text, reply_markup=reply_markup)
def send_message(self, chat_id, text, reply_markup=None, emojize=True): """ Send message to telegram bot. """ if emojize: text = emoji.emojize(text, use_aliases=True) self.bot.send_message(chat_id, text, reply_markup=reply_markup)
Python
def run_script(script, stdin=None): """Returns (stdout, stderr), raises error on non-zero return code""" # Note: by using a list here (['bash', ...]) you avoid quoting issues, as the # arguments are passed in exactly this order (spaces, quotes, and newlines won't # cause problems): proc = subprocess.Popen(['bash', '-c', script], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = proc.communicate() if proc.returncode: print("script error") print(script) print(proc.returncode) raise ScriptException(proc.returncode, stdout, stderr, script) return stdout, stderr
def run_script(script, stdin=None): """Returns (stdout, stderr), raises error on non-zero return code""" # Note: by using a list here (['bash', ...]) you avoid quoting issues, as the # arguments are passed in exactly this order (spaces, quotes, and newlines won't # cause problems): proc = subprocess.Popen(['bash', '-c', script], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) stdout, stderr = proc.communicate() if proc.returncode: print("script error") print(script) print(proc.returncode) raise ScriptException(proc.returncode, stdout, stderr, script) return stdout, stderr
Python
def return_embeddings(embedding: str, vocabulary_size: int, embedding_dim: int, worddicts: OrderedDict) -> np.ndarray: """Create array of word embeddings.""" word_embeddings = np.zeros((vocabulary_size, dim_word)) with open(embedding, 'r') as f: for line in f: words=line.split() word = words[0] vector = words[1:] len_vec = len(vector) if(len_vec>300): diff = len_vec-300 word = word.join(vector[:diff]) vector = vector[diff:] if word in worddicts and worddicts[word] < vocabulary_size: vector = [float(x) for x in vector] word_embeddings[worddicts[word], :] = vector[0:300] return word_embeddings
def return_embeddings(embedding: str, vocabulary_size: int, embedding_dim: int, worddicts: OrderedDict) -> np.ndarray: """Create array of word embeddings.""" word_embeddings = np.zeros((vocabulary_size, dim_word)) with open(embedding, 'r') as f: for line in f: words=line.split() word = words[0] vector = words[1:] len_vec = len(vector) if(len_vec>300): diff = len_vec-300 word = word.join(vector[:diff]) vector = vector[diff:] if word in worddicts and worddicts[word] < vocabulary_size: vector = [float(x) for x in vector] word_embeddings[worddicts[word], :] = vector[0:300] return word_embeddings
Python
def checkpoint_valid(epoch: int): """Create a checkpoint for a valid run.""" model_out_path = "model_valid_{}.pth".format(epoch) torch.save(model, model_out_path) logger.debug("Checkpoint saved to {}".format(model_out_path))
def checkpoint_valid(epoch: int): """Create a checkpoint for a valid run.""" model_out_path = "model_valid_{}.pth".format(epoch) torch.save(model, model_out_path) logger.debug("Checkpoint saved to {}".format(model_out_path))
Python
def export_mrsa_inputfile(export=False): """ Select top 10% genes per component and export csv file to run clusterProfiler in R """ tensor, matrix, _ = form_tensor() rna = import_rna() d = perform_CMTF(tensor, matrix) d = pd.DataFrame(d.mFactor) d.columns = range(1, 10) d.loc[:, 'ID'] = rna.index d = pd.melt(frame=d, id_vars='ID', value_vars=d.columns[:-1], var_name="Components", value_name="Weights") dfs = [] for ii in set(d["Components"]): c = d[d["Components"] == ii] up = np.percentile(c["Weights"], 90) low = np.percentile(c["Weights"], 10) c = c[(c["Weights"] >= up) | (c["Weights"] <= low)] dfs.append(c) out = pd.concat(dfs) if export: out.to_csv(path + "MRSA_gsea_input_ENSEMBL.csv") return out
def export_mrsa_inputfile(export=False): """ Select top 10% genes per component and export csv file to run clusterProfiler in R """ tensor, matrix, _ = form_tensor() rna = import_rna() d = perform_CMTF(tensor, matrix) d = pd.DataFrame(d.mFactor) d.columns = range(1, 10) d.loc[:, 'ID'] = rna.index d = pd.melt(frame=d, id_vars='ID', value_vars=d.columns[:-1], var_name="Components", value_name="Weights") dfs = [] for ii in set(d["Components"]): c = d[d["Components"] == ii] up = np.percentile(c["Weights"], 90) low = np.percentile(c["Weights"], 10) c = c[(c["Weights"] >= up) | (c["Weights"] <= low)] dfs.append(c) out = pd.concat(dfs) if export: out.to_csv(path + "MRSA_gsea_input_ENSEMBL.csv") return out
Python
def translate_geneIDs(toID="entrezgene", export=False): """ Translate gene accessions. In this case to ENTREZID by default. """ d = export_mrsa_inputfile() mg = mygene.MyGeneInfo() gg = mg.getgenes(d["ID"], fields=toID, as_dataframe=True) d[str(toID)] = gg[toID].values out = d.dropna() if export: out.to_csv(path + "MRSA_gsea_input_ENTREZ.csv") return out[[toID, "Components"]]
def translate_geneIDs(toID="entrezgene", export=False): """ Translate gene accessions. In this case to ENTREZID by default. """ d = export_mrsa_inputfile() mg = mygene.MyGeneInfo() gg = mg.getgenes(d["ID"], fields=toID, as_dataframe=True) d[str(toID)] = gg[toID].values out = d.dropna() if export: out.to_csv(path + "MRSA_gsea_input_ENTREZ.csv") return out[[toID, "Components"]]
Python
def export_gmt_files(files): """To convert the output .csv files to gmt: 1. Manually remove the index column and column labels. 2. Save file as .txt 3. Then change the suffix as .gmt """ path = "tfac/data/gsea_libraries/" for f in files: translate_gene_sets(pd.read_csv(path + f + ".csv", header=None), path, f)
def export_gmt_files(files): """To convert the output .csv files to gmt: 1. Manually remove the index column and column labels. 2. Save file as .txt 3. Then change the suffix as .gmt """ path = "tfac/data/gsea_libraries/" for f in files: translate_gene_sets(pd.read_csv(path + f + ".csv", header=None), path, f)
Python
def plot_results(weights): """ Plots model coefficients for each component in CMTF factorization. Parameters: weights (numpy.array): model weights associated with each component Returns: fig (matplotlib.Figure): bar plot depicting model coefficients for each CMTF component """ fig_size = (4, 4) layout = { 'nrows': 1, 'ncols': 1 } axs, fig, _ = getSetup( fig_size, layout ) axs[0].bar( range(1, len(weights) + 1), weights ) axs[0].set_xlabel('Component', fontsize=12) axs[0].set_ylabel('Model Coefficient', fontsize=12) axs[0].set_xticks(np.arange(1, len(weights) + 1)) axs[0].set_xticklabels(np.arange(1, len(weights) + 1)) return fig
def plot_results(weights): """ Plots model coefficients for each component in CMTF factorization. Parameters: weights (numpy.array): model weights associated with each component Returns: fig (matplotlib.Figure): bar plot depicting model coefficients for each CMTF component """ fig_size = (4, 4) layout = { 'nrows': 1, 'ncols': 1 } axs, fig, _ = getSetup( fig_size, layout ) axs[0].bar( range(1, len(weights) + 1), weights ) axs[0].set_xlabel('Component', fontsize=12) axs[0].set_ylabel('Model Coefficient', fontsize=12) axs[0].set_xticks(np.arange(1, len(weights) + 1)) axs[0].set_xticklabels(np.arange(1, len(weights) + 1)) return fig
Python
def plot_results(accuracies): """ Plots model accuracy relative to different data sources. Parameters: accuracies (pandas.Series): model accuracy w/r to data types Returns: fig (matplotlib.Figure): bar plot depicting model accuracy w/r to data types """ fig_size = (4, 4) layout = { 'ncols': 1, 'nrows': 1 } axs, fig, _ = getSetup( fig_size, layout ) labels = [re.sub(r'\d', '/', d_type[1:]) for d_type in accuracies.index] axs[0].bar(range(len(accuracies)), accuracies) axs[0].set_xticks(range(len(accuracies))) axs[0].set_xticklabels(labels, rotation=45, ha='right', fontsize=10) axs[0].set_ylabel('Mean Accuracy', fontsize=12) axs[0].set_xlabel('Datatypes Available', fontsize=12) return fig
def plot_results(accuracies): """ Plots model accuracy relative to different data sources. Parameters: accuracies (pandas.Series): model accuracy w/r to data types Returns: fig (matplotlib.Figure): bar plot depicting model accuracy w/r to data types """ fig_size = (4, 4) layout = { 'ncols': 1, 'nrows': 1 } axs, fig, _ = getSetup( fig_size, layout ) labels = [re.sub(r'\d', '/', d_type[1:]) for d_type in accuracies.index] axs[0].bar(range(len(accuracies)), accuracies) axs[0].set_xticks(range(len(accuracies))) axs[0].set_xticklabels(labels, rotation=45, ha='right', fontsize=10) axs[0].set_ylabel('Mean Accuracy', fontsize=12) axs[0].set_xlabel('Datatypes Available', fontsize=12) return fig
Python
def makeFigure(): """Get a list of the axis objects and create a figure""" # Get list of axis objects fig_size = (9, 3) layout = { 'ncols': 3, 'nrows': 1 } ax, f, _ = getSetup( fig_size, layout ) rep = 10 comps = np.arange(1, 11) try: chords_df = pd.read_csv('tfac/data/fig3_chords_df.csv') except FileNotFoundError: print("Building chords...") # Imputing chords dataframe chords_df = pd.concat([pd.DataFrame({'Components': comps, 'R2X': evaluate_missing(comps, 15, chords=True)[0]}) for _ in range(rep)], axis=0) chords_df.to_csv('tfac/data/fig3_chords_df.csv', index=False) chords_df = chords_df.groupby('Components').agg({'R2X': ['mean', 'sem']}) try: single_df = pd.read_csv('tfac/data/fig3_single_df.csv') except FileNotFoundError: print("Building singles...") # Single imputations dataframe single_df = pd.concat([pd.DataFrame(np.vstack((evaluate_missing(comps, 15, chords=False)[0:2], comps)).T, columns=['CMTF', 'PCA', 'Components']) for _ in range(rep)], axis=0) single_df.to_csv('tfac/data/fig3_single_df.csv', index=False) single_df = single_df.groupby(['Components']).agg(['mean', 'sem']) Q2Xchord = chords_df['R2X']['mean'] Q2Xerrors = chords_df['R2X']['sem'] ax[0].scatter(comps, Q2Xchord, s=10) ax[0].errorbar(comps, Q2Xchord, yerr=Q2Xerrors, fmt='none') ax[0].set_ylabel("Q2X of Imputation") ax[0].set_xlabel("Number of Components") ax[0].set_xticks([x for x in comps]) ax[0].set_xticklabels([x for x in comps]) ax[0].set_ylim(0, 1) CMTFR2X = single_df['CMTF']['mean'] CMTFErr = single_df['CMTF']['sem'] PCAR2X = single_df['PCA']['mean'] PCAErr = single_df['PCA']['sem'] ax[1].plot(comps - 0.1, CMTFR2X, ".", label="CMTF") ax[1].plot(comps + 0.1, PCAR2X, ".", label="PCA") ax[1].errorbar(comps - 0.1, CMTFR2X, yerr=CMTFErr, fmt='none', ecolor='b') ax[1].errorbar(comps + 0.1, PCAR2X, yerr=PCAErr, fmt='none', ecolor='darkorange') ax[1].set_ylabel("Q2X of Imputation") ax[1].set_xlabel("Number of Components") ax[1].set_xticks([x for x in comps]) ax[1].set_xticklabels([x for x in comps]) ax[1].set_ylim(0, 1) ax[1].legend(loc=4) # Add subplot labels subplotLabel(ax) return f
def makeFigure(): """Get a list of the axis objects and create a figure""" # Get list of axis objects fig_size = (9, 3) layout = { 'ncols': 3, 'nrows': 1 } ax, f, _ = getSetup( fig_size, layout ) rep = 10 comps = np.arange(1, 11) try: chords_df = pd.read_csv('tfac/data/fig3_chords_df.csv') except FileNotFoundError: print("Building chords...") # Imputing chords dataframe chords_df = pd.concat([pd.DataFrame({'Components': comps, 'R2X': evaluate_missing(comps, 15, chords=True)[0]}) for _ in range(rep)], axis=0) chords_df.to_csv('tfac/data/fig3_chords_df.csv', index=False) chords_df = chords_df.groupby('Components').agg({'R2X': ['mean', 'sem']}) try: single_df = pd.read_csv('tfac/data/fig3_single_df.csv') except FileNotFoundError: print("Building singles...") # Single imputations dataframe single_df = pd.concat([pd.DataFrame(np.vstack((evaluate_missing(comps, 15, chords=False)[0:2], comps)).T, columns=['CMTF', 'PCA', 'Components']) for _ in range(rep)], axis=0) single_df.to_csv('tfac/data/fig3_single_df.csv', index=False) single_df = single_df.groupby(['Components']).agg(['mean', 'sem']) Q2Xchord = chords_df['R2X']['mean'] Q2Xerrors = chords_df['R2X']['sem'] ax[0].scatter(comps, Q2Xchord, s=10) ax[0].errorbar(comps, Q2Xchord, yerr=Q2Xerrors, fmt='none') ax[0].set_ylabel("Q2X of Imputation") ax[0].set_xlabel("Number of Components") ax[0].set_xticks([x for x in comps]) ax[0].set_xticklabels([x for x in comps]) ax[0].set_ylim(0, 1) CMTFR2X = single_df['CMTF']['mean'] CMTFErr = single_df['CMTF']['sem'] PCAR2X = single_df['PCA']['mean'] PCAErr = single_df['PCA']['sem'] ax[1].plot(comps - 0.1, CMTFR2X, ".", label="CMTF") ax[1].plot(comps + 0.1, PCAR2X, ".", label="PCA") ax[1].errorbar(comps - 0.1, CMTFR2X, yerr=CMTFErr, fmt='none', ecolor='b') ax[1].errorbar(comps + 0.1, PCAR2X, yerr=PCAErr, fmt='none', ecolor='darkorange') ax[1].set_ylabel("Q2X of Imputation") ax[1].set_xlabel("Number of Components") ax[1].set_xticks([x for x in comps]) ax[1].set_xticklabels([x for x in comps]) ax[1].set_ylim(0, 1) ax[1].legend(loc=4) # Add subplot labels subplotLabel(ax) return f
Python
def makeFigure(): """Get a list of the axis objects and create a figure""" # Get list of axis objects fig_size = (9, 3) layout = { 'ncols': 3, 'nrows': 1 } ax, f, _ = getSetup( fig_size, layout ) comps = np.arange(1, 12) CMTFR2X = np.zeros(comps.shape) PCAR2X = np.zeros(comps.shape) sizeTfac = np.zeros(comps.shape) tOrig, mOrig, _ = form_tensor() tMat = flatten_to_mat(tOrig, mOrig) sizePCA = comps * np.sum(tMat.shape) for i, cc in enumerate(comps): outt = PCA(tMat, ncomp=cc, missing="fill-em", standardize=False, demean=False, normalize=False) recon = outt.scores @ outt.loadings.T PCAR2X[i] = calcR2X(recon, mIn=tMat) tFac = perform_CMTF(tOrig, mOrig, r=cc) CMTFR2X[i] = tFac.R2X sizeTfac[i] = tensor_degFreedom(tFac) ax[0].scatter(comps, CMTFR2X, s=10) ax[0].set_ylabel("CMTF R2X") ax[0].set_xlabel("Number of Components") ax[0].set_xticks([x for x in comps]) ax[0].set_xticklabels([x for x in comps]) ax[0].set_ylim(0, 1) ax[0].set_xlim(0.5, np.amax(comps) + 0.5) ax[1].set_xscale("log", base=2) ax[1].plot(sizeTfac, 1.0 - CMTFR2X, ".", label="CMTF") ax[1].plot(sizePCA, 1.0 - PCAR2X, ".", label="PCA") ax[1].set_ylabel("Normalized Unexplained Variance") ax[1].set_xlabel("Size of Reduced Data") ax[1].set_ylim(bottom=0.0) ax[1].set_xlim(2 ** 8, 2 ** 12) ax[1].xaxis.set_major_formatter(ScalarFormatter()) ax[1].legend() # Add subplot labels subplotLabel(ax) return f
def makeFigure(): """Get a list of the axis objects and create a figure""" # Get list of axis objects fig_size = (9, 3) layout = { 'ncols': 3, 'nrows': 1 } ax, f, _ = getSetup( fig_size, layout ) comps = np.arange(1, 12) CMTFR2X = np.zeros(comps.shape) PCAR2X = np.zeros(comps.shape) sizeTfac = np.zeros(comps.shape) tOrig, mOrig, _ = form_tensor() tMat = flatten_to_mat(tOrig, mOrig) sizePCA = comps * np.sum(tMat.shape) for i, cc in enumerate(comps): outt = PCA(tMat, ncomp=cc, missing="fill-em", standardize=False, demean=False, normalize=False) recon = outt.scores @ outt.loadings.T PCAR2X[i] = calcR2X(recon, mIn=tMat) tFac = perform_CMTF(tOrig, mOrig, r=cc) CMTFR2X[i] = tFac.R2X sizeTfac[i] = tensor_degFreedom(tFac) ax[0].scatter(comps, CMTFR2X, s=10) ax[0].set_ylabel("CMTF R2X") ax[0].set_xlabel("Number of Components") ax[0].set_xticks([x for x in comps]) ax[0].set_xticklabels([x for x in comps]) ax[0].set_ylim(0, 1) ax[0].set_xlim(0.5, np.amax(comps) + 0.5) ax[1].set_xscale("log", base=2) ax[1].plot(sizeTfac, 1.0 - CMTFR2X, ".", label="CMTF") ax[1].plot(sizePCA, 1.0 - PCAR2X, ".", label="PCA") ax[1].set_ylabel("Normalized Unexplained Variance") ax[1].set_xlabel("Size of Reduced Data") ax[1].set_ylim(bottom=0.0) ax[1].set_xlim(2 ** 8, 2 ** 12) ax[1].xaxis.set_major_formatter(ScalarFormatter()) ax[1].legend() # Add subplot labels subplotLabel(ax) return f
Python
def bootstrap_weights(): """ Predicts samples with unknown outcomes. Parameters: None Returns: weights (pandas.DataFrame): mean and StD of component weights w/r to prediction targets """ tensor, matrix, patient_data = form_tensor() patient_data = patient_data.reset_index(drop=True) patient_data = patient_data.loc[patient_data['status'] != 'Unknown'] components = perform_CMTF(tensor, matrix) components = components[1][0] components = components[patient_data.index, :] stats = ['Mean', 'StD'] index = pd.MultiIndex.from_product([TARGETS, stats]) weights = pd.DataFrame( index=index, columns=list(range(1, components.shape[1] + 1)) ) for target in TARGETS: coef = [] for sample in range(N_BOOTSTRAP): data, labels = resample(components, patient_data.loc[:, target]) if target == 'age': _, _coef = predict_regression(data, labels, return_coef=True) else: _, _, _coef = run_model(data, labels, return_coef=True) coef.append(_coef) coef = scale(coef, axis=1) weights.loc[(target, 'Mean'), :] = np.mean(coef, axis=0) weights.loc[(target, 'StD'), :] = np.std(coef, axis=0, ddof=1) return weights
def bootstrap_weights(): """ Predicts samples with unknown outcomes. Parameters: None Returns: weights (pandas.DataFrame): mean and StD of component weights w/r to prediction targets """ tensor, matrix, patient_data = form_tensor() patient_data = patient_data.reset_index(drop=True) patient_data = patient_data.loc[patient_data['status'] != 'Unknown'] components = perform_CMTF(tensor, matrix) components = components[1][0] components = components[patient_data.index, :] stats = ['Mean', 'StD'] index = pd.MultiIndex.from_product([TARGETS, stats]) weights = pd.DataFrame( index=index, columns=list(range(1, components.shape[1] + 1)) ) for target in TARGETS: coef = [] for sample in range(N_BOOTSTRAP): data, labels = resample(components, patient_data.loc[:, target]) if target == 'age': _, _coef = predict_regression(data, labels, return_coef=True) else: _, _, _coef = run_model(data, labels, return_coef=True) coef.append(_coef) coef = scale(coef, axis=1) weights.loc[(target, 'Mean'), :] = np.mean(coef, axis=0) weights.loc[(target, 'StD'), :] = np.std(coef, axis=0, ddof=1) return weights
Python
def tfac_setup(): """ Import cytokine data and correlate tfac components to cytokines and data sources. Parameters: None Returns: subjects (pandas.DataFrame): patient correlations to tfac components cytos (pandas.DataFrame): cytokine correlations to tfac components source (pandas.DataFrame): cytokine source correlations to tfac components pat_info (pandas.DataFrame): patient meta-data """ tensor, matrix, pat_info = form_tensor() plasma, _ = import_cytokines() cytokines = plasma.index pat_info.loc[:, 'sorted'] = range(pat_info.shape[0]) pat_info = pat_info.sort_values(['cohort', 'type', 'status']) sort_idx = pat_info.loc[:, 'sorted'] pat_info = pat_info.drop('sorted', axis=1) pat_info = pat_info.T factors = perform_CMTF(tensor, matrix) col_names = [f"Cmp. {i}" for i in np.arange(1, factors.rank + 1)] subjects = pd.DataFrame( factors.factors[0][sort_idx, :], columns=col_names, index=[str(x) for x in pat_info.columns] ) cytos = pd.DataFrame( factors.factors[1], columns=col_names, index=cytokines ) source = pd.DataFrame( factors.factors[2], columns=col_names, index=["Serum", "Plasma"] ) return subjects, cytos, source, pat_info
def tfac_setup(): """ Import cytokine data and correlate tfac components to cytokines and data sources. Parameters: None Returns: subjects (pandas.DataFrame): patient correlations to tfac components cytos (pandas.DataFrame): cytokine correlations to tfac components source (pandas.DataFrame): cytokine source correlations to tfac components pat_info (pandas.DataFrame): patient meta-data """ tensor, matrix, pat_info = form_tensor() plasma, _ = import_cytokines() cytokines = plasma.index pat_info.loc[:, 'sorted'] = range(pat_info.shape[0]) pat_info = pat_info.sort_values(['cohort', 'type', 'status']) sort_idx = pat_info.loc[:, 'sorted'] pat_info = pat_info.drop('sorted', axis=1) pat_info = pat_info.T factors = perform_CMTF(tensor, matrix) col_names = [f"Cmp. {i}" for i in np.arange(1, factors.rank + 1)] subjects = pd.DataFrame( factors.factors[0][sort_idx, :], columns=col_names, index=[str(x) for x in pat_info.columns] ) cytos = pd.DataFrame( factors.factors[1], columns=col_names, index=cytokines ) source = pd.DataFrame( factors.factors[2], columns=col_names, index=["Serum", "Plasma"] ) return subjects, cytos, source, pat_info
Python
def export_results(train_samples, validation_samples): """ Reformats prediction DataFrames and saves as .csv. Parameters: train_samples (pandas.Series): predictions for training samples validation_samples (pandas.Series): predictions for validation samples Returns: None """ validation_samples = validation_samples.astype(str) train_samples = train_samples.astype(str) validation_samples = validation_samples.replace('0', 'ARMB') validation_samples = validation_samples.replace('1', 'APMB') train_samples = train_samples.replace('0', 'ARMB') train_samples = train_samples.replace('1', 'APMB') validation_samples.to_csv( join( PATH_HERE, '..', 'output', 'validation_predictions.txt' ) ) train_samples.to_csv( join( PATH_HERE, '..', 'output', 'train_predictions.txt' ) )
def export_results(train_samples, validation_samples): """ Reformats prediction DataFrames and saves as .csv. Parameters: train_samples (pandas.Series): predictions for training samples validation_samples (pandas.Series): predictions for validation samples Returns: None """ validation_samples = validation_samples.astype(str) train_samples = train_samples.astype(str) validation_samples = validation_samples.replace('0', 'ARMB') validation_samples = validation_samples.replace('1', 'APMB') train_samples = train_samples.replace('0', 'ARMB') train_samples = train_samples.replace('1', 'APMB') validation_samples.to_csv( join( PATH_HERE, '..', 'output', 'validation_predictions.txt' ) ) train_samples.to_csv( join( PATH_HERE, '..', 'output', 'train_predictions.txt' ) )
Python
def run_unknown(data_types, patient_data): """ Predicts samples with unknown outcomes. Parameters: data_types (list[tuple]): data sources to predict patient_data (pandas.DataFrame): patient metadata Returns: predictions (pandas.Series): predictions for each data source """ predictions = pd.DataFrame( index=patient_data.index ) predictions = predictions.loc[patient_data['status'] == 'Unknown'] for data_type in data_types: source = data_type[0] data = data_type[1] labels = patient_data.loc[data.index, 'status'] _predictions = predict_validation(data, labels) predictions.loc[_predictions.index, source] = _predictions return predictions
def run_unknown(data_types, patient_data): """ Predicts samples with unknown outcomes. Parameters: data_types (list[tuple]): data sources to predict patient_data (pandas.DataFrame): patient metadata Returns: predictions (pandas.Series): predictions for each data source """ predictions = pd.DataFrame( index=patient_data.index ) predictions = predictions.loc[patient_data['status'] == 'Unknown'] for data_type in data_types: source = data_type[0] data = data_type[1] labels = patient_data.loc[data.index, 'status'] _predictions = predict_validation(data, labels) predictions.loc[_predictions.index, source] = _predictions return predictions
Python
def run_cv(data_types, patient_data): """ Predicts samples with known outcomes via cross-validation. Parameters: data_types (list[tuple]): data sources to predict patient_data (pandas.DataFrame): patient metadata Returns: predictions (pandas.Series): predictions for each data source """ predictions = pd.DataFrame( index=patient_data.index ) for data_type in data_types: source = data_type[0] data = data_type[1] labels = patient_data.loc[data.index, 'status'] _predictions = predict_known(data, labels) predictions.loc[_predictions.index, source] = _predictions predictions.loc[:, 'Actual'] = patient_data.loc[:, 'status'] return predictions
def run_cv(data_types, patient_data): """ Predicts samples with known outcomes via cross-validation. Parameters: data_types (list[tuple]): data sources to predict patient_data (pandas.DataFrame): patient metadata Returns: predictions (pandas.Series): predictions for each data source """ predictions = pd.DataFrame( index=patient_data.index ) for data_type in data_types: source = data_type[0] data = data_type[1] labels = patient_data.loc[data.index, 'status'] _predictions = predict_known(data, labels) predictions.loc[_predictions.index, source] = _predictions predictions.loc[:, 'Actual'] = patient_data.loc[:, 'status'] return predictions
Python
def predict_validation(data, labels, predict_proba=False, return_coef=False): """ Trains a LogisticRegressionCV model using samples with known outcomes, then predicts samples with unknown outcomes. Parameters: data (pandas.DataFrame): data to classify labels (pandas.Series): labels for samples in data predict_proba (bool, default: False): predict probability of positive case return_coef (bool, default:False): return model coefficients Returns: predictions (pandas.Series): predictions for samples with unknown outcomes """ validation_data = import_validation_patient_metadata() validation_samples = set(validation_data.index) & set(labels.index) train_labels = labels.drop(validation_samples) test_labels = labels.loc[validation_samples] if isinstance(data, pd.Series): train_data = data.loc[train_labels.index] test_data = data.loc[test_labels.index] else: train_data = data.loc[train_labels.index, :] test_data = data.loc[test_labels.index, :] _, model = run_model(train_data, train_labels) if isinstance(data, pd.Series): train_data = train_data.values.reshape(-1, 1) test_data = test_data.values.reshape(-1, 1) model.fit(train_data, train_labels) if predict_proba: predicted = model.predict_proba(test_data) predicted = predicted[:, -1] else: predicted = model.predict(test_data) predictions = pd.Series(predicted) predictions.index = test_labels.index if return_coef: return predictions, model.coef_[0] else: return predictions
def predict_validation(data, labels, predict_proba=False, return_coef=False): """ Trains a LogisticRegressionCV model using samples with known outcomes, then predicts samples with unknown outcomes. Parameters: data (pandas.DataFrame): data to classify labels (pandas.Series): labels for samples in data predict_proba (bool, default: False): predict probability of positive case return_coef (bool, default:False): return model coefficients Returns: predictions (pandas.Series): predictions for samples with unknown outcomes """ validation_data = import_validation_patient_metadata() validation_samples = set(validation_data.index) & set(labels.index) train_labels = labels.drop(validation_samples) test_labels = labels.loc[validation_samples] if isinstance(data, pd.Series): train_data = data.loc[train_labels.index] test_data = data.loc[test_labels.index] else: train_data = data.loc[train_labels.index, :] test_data = data.loc[test_labels.index, :] _, model = run_model(train_data, train_labels) if isinstance(data, pd.Series): train_data = train_data.values.reshape(-1, 1) test_data = test_data.values.reshape(-1, 1) model.fit(train_data, train_labels) if predict_proba: predicted = model.predict_proba(test_data) predicted = predicted[:, -1] else: predicted = model.predict(test_data) predictions = pd.Series(predicted) predictions.index = test_labels.index if return_coef: return predictions, model.coef_[0] else: return predictions
Python
def predict_regression(data, labels, return_coef=False): """ Predicts value for all samples in data via cross-validation. Parameters: data (pandas.DataFrame): data to classify labels (pandas.Series): labels for samples in data return_coef (bool, default: False): return model coefficients Returns: predictions (pandas.Series): predictions for samples """ model = LinearRegression() skf = StratifiedKFold( n_splits=5, shuffle=True, random_state=42 ) if isinstance(data, pd.Series): data = data.values.reshape(-1, 1) predictions = cross_val_predict( model, data, labels, cv=skf, n_jobs=-1 ) if len(predictions.shape) > 1: predictions = predictions[:, -1] predictions = pd.Series( predictions, index=labels.index ) if return_coef: model.fit(data, labels) return predictions, model.coef_ else: return predictions
def predict_regression(data, labels, return_coef=False): """ Predicts value for all samples in data via cross-validation. Parameters: data (pandas.DataFrame): data to classify labels (pandas.Series): labels for samples in data return_coef (bool, default: False): return model coefficients Returns: predictions (pandas.Series): predictions for samples """ model = LinearRegression() skf = StratifiedKFold( n_splits=5, shuffle=True, random_state=42 ) if isinstance(data, pd.Series): data = data.values.reshape(-1, 1) predictions = cross_val_predict( model, data, labels, cv=skf, n_jobs=-1 ) if len(predictions.shape) > 1: predictions = predictions[:, -1] predictions = pd.Series( predictions, index=labels.index ) if return_coef: model.fit(data, labels) return predictions, model.coef_ else: return predictions
Python
def run_model(data, labels, return_coef=False): """ Runs provided LogisticRegressionCV model with the provided data and labels. Parameters: data (pandas.DataFrame): DataFrame of CMTF components labels (pandas.Series): Labels for provided data return_coef (bool, default: False): return model coefficients Returns: score (float): Accuracy for best-performing model (considers l1-ratio and C) model (sklearn.LogisticRegressionCV) """ skf = RepeatedStratifiedKFold( n_splits=10, n_repeats=15 ) if isinstance(labels, pd.Series): labels = labels.reset_index(drop=True) else: labels = pd.Series(labels) labels = labels[labels != 'Unknown'] if isinstance(data, pd.Series): data = data.iloc[labels.index] data = data.values.reshape(-1, 1) elif isinstance(data, pd.DataFrame): data = data.iloc[labels.index, :] else: data = data[labels.index, :] model = LogisticRegressionCV( l1_ratios=[0.8], solver="saga", penalty="elasticnet", n_jobs=-1, cv=skf, max_iter=100000, scoring='balanced_accuracy', multi_class='ovr' ) model.fit(data, labels) coef = None if return_coef: coef = model.coef_[0] scores = np.mean(list(model.scores_.values())[0], axis=0) model = LogisticRegression( C=model.C_[0], l1_ratio=model.l1_ratio_[0], solver="saga", penalty="elasticnet", n_jobs=-1, max_iter=100000, ) if return_coef: return np.max(scores), model, coef else: return np.max(scores), model
def run_model(data, labels, return_coef=False): """ Runs provided LogisticRegressionCV model with the provided data and labels. Parameters: data (pandas.DataFrame): DataFrame of CMTF components labels (pandas.Series): Labels for provided data return_coef (bool, default: False): return model coefficients Returns: score (float): Accuracy for best-performing model (considers l1-ratio and C) model (sklearn.LogisticRegressionCV) """ skf = RepeatedStratifiedKFold( n_splits=10, n_repeats=15 ) if isinstance(labels, pd.Series): labels = labels.reset_index(drop=True) else: labels = pd.Series(labels) labels = labels[labels != 'Unknown'] if isinstance(data, pd.Series): data = data.iloc[labels.index] data = data.values.reshape(-1, 1) elif isinstance(data, pd.DataFrame): data = data.iloc[labels.index, :] else: data = data[labels.index, :] model = LogisticRegressionCV( l1_ratios=[0.8], solver="saga", penalty="elasticnet", n_jobs=-1, cv=skf, max_iter=100000, scoring='balanced_accuracy', multi_class='ovr' ) model.fit(data, labels) coef = None if return_coef: coef = model.coef_[0] scores = np.mean(list(model.scores_.values())[0], axis=0) model = LogisticRegression( C=model.C_[0], l1_ratio=model.l1_ratio_[0], solver="saga", penalty="elasticnet", n_jobs=-1, max_iter=100000, ) if return_coef: return np.max(scores), model, coef else: return np.max(scores), model
Python
def evaluate_accuracy(data): """ Evaluates the model's accuracy for a given subject factors matrix. Parameters: Subject factors matrix. Returns: Model accuracy """ _, _, patient_data = form_tensor() labels = patient_data.loc[:, 'status'] score, _ = run_model(data, labels) return score
def evaluate_accuracy(data): """ Evaluates the model's accuracy for a given subject factors matrix. Parameters: Subject factors matrix. Returns: Model accuracy """ _, _, patient_data = form_tensor() labels = patient_data.loc[:, 'status'] score, _ = run_model(data, labels) return score
Python
def flatten_to_mat(tensor, matrix=None): """ Flatten a tensor and a matrix into just a matrix """ n = tensor.shape[0] tMat = np.reshape(tensor, (n, -1)) tMat = tMat[:, ~np.all(np.isnan(tMat), axis=0)] if matrix is not None: tMat = np.hstack((tMat, matrix)) return tMat
def flatten_to_mat(tensor, matrix=None): """ Flatten a tensor and a matrix into just a matrix """ n = tensor.shape[0] tMat = np.reshape(tensor, (n, -1)) tMat = tMat[:, ~np.all(np.isnan(tMat), axis=0)] if matrix is not None: tMat = np.hstack((tMat, matrix)) return tMat
Python
def gen_missing(cube, missing_num, emin=6): """ Generate a cube with missing values """ choose_cube = np.isfinite(cube) fill_cube = np.zeros_like(cube, dtype=int) # Generate a bare minimum cube # fill each individual with emin elements for ii in range(cube.shape[0]): idxs = np.argwhere(choose_cube[ii, :]) if len(idxs) <= 0: continue jk = idxs[np.random.choice(idxs.shape[0], emin, replace=False)] if cube.ndim == 3: fill_cube[ii, jk[:, 0], jk[:, 1]] = 1 choose_cube[ii, jk[:, 0], jk[:, 1]] = 0 elif cube.ndim == 2: fill_cube[ii, jk[:, 0]] = 1 choose_cube[ii, jk[:, 0]] = 0 # fill each non-empty chord with emin elements fill_feat = np.any(np.isfinite(cube), axis=0) * emin - np.sum(fill_cube, axis=0) for jk in np.argwhere(fill_feat > 0): idxs = np.argwhere(choose_cube[:, jk[0], jk[1]]) if cube.ndim == 3 else np.argwhere(choose_cube[:, jk[0]]) if len(idxs) <= 0: continue iis = idxs[np.random.choice(idxs.shape[0], fill_feat[jk[0], jk[1]], replace=False)] if cube.ndim == 3: fill_cube[iis, jk[0], jk[1]] = 1 choose_cube[iis, jk[0], jk[1]] = 0 elif cube.ndim == 2: fill_cube[iis, jk[0]] = 1 choose_cube[iis, jk[0]] = 0 assert np.all((np.sum(fill_cube, axis=0) >= emin) == np.any(np.isfinite(cube), axis=0)) # fill up the rest to the missing nums to_fill = np.sum(np.isfinite(cube)) - missing_num - np.sum(fill_cube) assert to_fill <= np.sum(choose_cube) assert to_fill > 0 idxs = np.argwhere(choose_cube) ijk = idxs[np.random.choice(idxs.shape[0], to_fill, replace=False)] if cube.ndim == 3: fill_cube[ijk[:, 0], ijk[:, 1], ijk[:, 2]] = 1 elif cube.ndim == 2: fill_cube[ijk[:, 0], ijk[:, 1]] = 1 gen_cube = np.copy(cube) gen_cube[fill_cube == 0] = np.nan return gen_cube
def gen_missing(cube, missing_num, emin=6): """ Generate a cube with missing values """ choose_cube = np.isfinite(cube) fill_cube = np.zeros_like(cube, dtype=int) # Generate a bare minimum cube # fill each individual with emin elements for ii in range(cube.shape[0]): idxs = np.argwhere(choose_cube[ii, :]) if len(idxs) <= 0: continue jk = idxs[np.random.choice(idxs.shape[0], emin, replace=False)] if cube.ndim == 3: fill_cube[ii, jk[:, 0], jk[:, 1]] = 1 choose_cube[ii, jk[:, 0], jk[:, 1]] = 0 elif cube.ndim == 2: fill_cube[ii, jk[:, 0]] = 1 choose_cube[ii, jk[:, 0]] = 0 # fill each non-empty chord with emin elements fill_feat = np.any(np.isfinite(cube), axis=0) * emin - np.sum(fill_cube, axis=0) for jk in np.argwhere(fill_feat > 0): idxs = np.argwhere(choose_cube[:, jk[0], jk[1]]) if cube.ndim == 3 else np.argwhere(choose_cube[:, jk[0]]) if len(idxs) <= 0: continue iis = idxs[np.random.choice(idxs.shape[0], fill_feat[jk[0], jk[1]], replace=False)] if cube.ndim == 3: fill_cube[iis, jk[0], jk[1]] = 1 choose_cube[iis, jk[0], jk[1]] = 0 elif cube.ndim == 2: fill_cube[iis, jk[0]] = 1 choose_cube[iis, jk[0]] = 0 assert np.all((np.sum(fill_cube, axis=0) >= emin) == np.any(np.isfinite(cube), axis=0)) # fill up the rest to the missing nums to_fill = np.sum(np.isfinite(cube)) - missing_num - np.sum(fill_cube) assert to_fill <= np.sum(choose_cube) assert to_fill > 0 idxs = np.argwhere(choose_cube) ijk = idxs[np.random.choice(idxs.shape[0], to_fill, replace=False)] if cube.ndim == 3: fill_cube[ijk[:, 0], ijk[:, 1], ijk[:, 2]] = 1 elif cube.ndim == 2: fill_cube[ijk[:, 0], ijk[:, 1]] = 1 gen_cube = np.copy(cube) gen_cube[fill_cube == 0] = np.nan return gen_cube
Python
def evaluate_missing(comps, numSample=15, chords=True): """ Wrapper for chord loss or individual loss """ cube, glyCube, _ = form_tensor() if chords: missingCube = np.copy(cube) for _ in range(numSample): idxs = np.argwhere(np.isfinite(missingCube)) i, j, k = idxs[np.random.choice(idxs.shape[0], 1)][0] missingCube[:, j, k] = np.nan else: missingCube = gen_missing(np.copy(cube), numSample) return impute_accuracy(missingCube, glyCube, comps, PCAcompare=(not chords))
def evaluate_missing(comps, numSample=15, chords=True): """ Wrapper for chord loss or individual loss """ cube, glyCube, _ = form_tensor() if chords: missingCube = np.copy(cube) for _ in range(numSample): idxs = np.argwhere(np.isfinite(missingCube)) i, j, k = idxs[np.random.choice(idxs.shape[0], 1)][0] missingCube[:, j, k] = np.nan else: missingCube = gen_missing(np.copy(cube), numSample) return impute_accuracy(missingCube, glyCube, comps, PCAcompare=(not chords))
Python
def Q2X(tImp): """ Calculate Q2X. For average imputation purpose only. """ tMask = np.isfinite(tIn) mMask = np.isfinite(mIn) vTop = np.sum(np.square(tImp * tMask - np.nan_to_num(tIn))) + \ np.sum(np.square(impute_glyCube * mMask - np.nan_to_num(mIn))) vBottom = np.sum(np.square(np.nan_to_num(tIn))) + np.sum(np.square(np.nan_to_num(mIn))) return 1.0 - vTop / vBottom
def Q2X(tImp): """ Calculate Q2X. For average imputation purpose only. """ tMask = np.isfinite(tIn) mMask = np.isfinite(mIn) vTop = np.sum(np.square(tImp * tMask - np.nan_to_num(tIn))) + \ np.sum(np.square(impute_glyCube * mMask - np.nan_to_num(mIn))) vBottom = np.sum(np.square(np.nan_to_num(tIn))) + np.sum(np.square(np.nan_to_num(mIn))) return 1.0 - vTop / vBottom
Python
def import_patient_metadata(): """ Returns patient meta data, including cohort and outcome. Returns: patient_data (pandas.DataFrame): Patient outcomes and cohorts """ patient_data = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'patient_metadata.txt'), delimiter=',', index_col=0 ) return patient_data
def import_patient_metadata(): """ Returns patient meta data, including cohort and outcome. Returns: patient_data (pandas.DataFrame): Patient outcomes and cohorts """ patient_data = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'patient_metadata.txt'), delimiter=',', index_col=0 ) return patient_data
Python
def import_validation_patient_metadata(): """ Returns validation patient meta data, including cohort and outcome. Returns: patient_data (pandas.DataFrame): Validation patient outcomes and cohorts """ patient_data = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'validation_patient_metadata.txt'), delimiter=',', index_col=0 ) return patient_data
def import_validation_patient_metadata(): """ Returns validation patient meta data, including cohort and outcome. Returns: patient_data (pandas.DataFrame): Validation patient outcomes and cohorts """ patient_data = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'validation_patient_metadata.txt'), delimiter=',', index_col=0 ) return patient_data
Python
def import_cytokines(scale_cyto=True): """ Return plasma and serum cytokine data. Parameters: scale_cyto (bool, default:True): scale cytokine values Returns: plasma_cyto (pandas.DataFrame): plasma cytokine data serum_cyto (pandas.DataFrame): serum cytokine data """ plasma_cyto = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'plasma_cytokines.txt'), delimiter=',', index_col=0 ) serum_cyto = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'serum_cytokines.txt'), delimiter=',', index_col=0 ) plasma_cyto['IL-12(p70)'] = np.clip(plasma_cyto['IL-12(p70)'], 1.0, np.inf) serum_cyto['IL-12(p70)'] = np.clip(serum_cyto['IL-12(p70)'], 1.0, np.inf) if scale_cyto: plasma_cyto = scale_cytokines(plasma_cyto) serum_cyto = scale_cytokines(serum_cyto) return plasma_cyto.T, serum_cyto.T
def import_cytokines(scale_cyto=True): """ Return plasma and serum cytokine data. Parameters: scale_cyto (bool, default:True): scale cytokine values Returns: plasma_cyto (pandas.DataFrame): plasma cytokine data serum_cyto (pandas.DataFrame): serum cytokine data """ plasma_cyto = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'plasma_cytokines.txt'), delimiter=',', index_col=0 ) serum_cyto = pd.read_csv( join(PATH_HERE, 'tfac', 'data', 'mrsa', 'serum_cytokines.txt'), delimiter=',', index_col=0 ) plasma_cyto['IL-12(p70)'] = np.clip(plasma_cyto['IL-12(p70)'], 1.0, np.inf) serum_cyto['IL-12(p70)'] = np.clip(serum_cyto['IL-12(p70)'], 1.0, np.inf) if scale_cyto: plasma_cyto = scale_cytokines(plasma_cyto) serum_cyto = scale_cytokines(serum_cyto) return plasma_cyto.T, serum_cyto.T
Python
def scale_cytokines(cyto): """ Scales provided cytokine data--performs a log-transform, then zero-mean centers. Parameters: cyto (pandas.DataFrame): cytokine data Returns: cyto (pandas.DataFrame): scaled cytokine data """ cyto = cyto.transform(np.log) cyto -= cyto.mean(axis=0) return cyto
def scale_cytokines(cyto): """ Scales provided cytokine data--performs a log-transform, then zero-mean centers. Parameters: cyto (pandas.DataFrame): cytokine data Returns: cyto (pandas.DataFrame): scaled cytokine data """ cyto = cyto.transform(np.log) cyto -= cyto.mean(axis=0) return cyto
Python
def add_missing_columns(data, patients): """ Adds patients that do not appear in data as empty columns (all NaNs); removes any patients in data not present in patients. Parameters: data (pandas.DataFrame): cytokine/RNA data patients (iterable): patients that must appear in data Returns: data (pandas.DataFrame): cytokine/RNA data with missing columns added; sorted by patient numbers """ # Remove patients who are missing outcome labels shared = set(data.columns) & set(patients) data = data.loc[:, shared] missing = patients.difference(data.columns) data = pd.concat( [ data, pd.DataFrame( data=np.nan, index=data.index, columns=missing ) ], axis=1 ) data = data.sort_index(axis=1) return data
def add_missing_columns(data, patients): """ Adds patients that do not appear in data as empty columns (all NaNs); removes any patients in data not present in patients. Parameters: data (pandas.DataFrame): cytokine/RNA data patients (iterable): patients that must appear in data Returns: data (pandas.DataFrame): cytokine/RNA data with missing columns added; sorted by patient numbers """ # Remove patients who are missing outcome labels shared = set(data.columns) & set(patients) data = data.loc[:, shared] missing = patients.difference(data.columns) data = pd.concat( [ data, pd.DataFrame( data=np.nan, index=data.index, columns=missing ) ], axis=1 ) data = data.sort_index(axis=1) return data
Python
def form_tensor(variance_scaling: float = OPTIMAL_SCALING): """ Forms a tensor of cytokine data and a matrix of RNA expression data for CMTF decomposition. Parameters: variance_scaling (float, default:1.0): RNA/cytokine variance scaling Returns: tensor (numpy.array): tensor of cytokine data matrix (numpy.array): matrix of RNA expression data patient_data (pandas.DataFrame): patient data, including status, data types, and cohort """ plasma_cyto, serum_cyto = import_cytokines() rna = import_rna() patient_data = import_patient_metadata() patients = set(patient_data.index) serum_cyto = add_missing_columns(serum_cyto, patients).to_numpy(dtype=float) plasma_cyto = add_missing_columns(plasma_cyto, patients).to_numpy(dtype=float) rna = add_missing_columns(rna, patients).to_numpy(dtype=float) tensor = np.stack( (serum_cyto, plasma_cyto) ).T tensor /= np.nanvar(tensor) rna /= np.nanvar(rna) return np.copy(tensor * variance_scaling), np.copy(rna.T), patient_data
def form_tensor(variance_scaling: float = OPTIMAL_SCALING): """ Forms a tensor of cytokine data and a matrix of RNA expression data for CMTF decomposition. Parameters: variance_scaling (float, default:1.0): RNA/cytokine variance scaling Returns: tensor (numpy.array): tensor of cytokine data matrix (numpy.array): matrix of RNA expression data patient_data (pandas.DataFrame): patient data, including status, data types, and cohort """ plasma_cyto, serum_cyto = import_cytokines() rna = import_rna() patient_data = import_patient_metadata() patients = set(patient_data.index) serum_cyto = add_missing_columns(serum_cyto, patients).to_numpy(dtype=float) plasma_cyto = add_missing_columns(plasma_cyto, patients).to_numpy(dtype=float) rna = add_missing_columns(rna, patients).to_numpy(dtype=float) tensor = np.stack( (serum_cyto, plasma_cyto) ).T tensor /= np.nanvar(tensor) rna /= np.nanvar(rna) return np.copy(tensor * variance_scaling), np.copy(rna.T), patient_data
Python
def run_cv(data_types, patient_data): """ Predicts samples with known outcomes via cross-validation. Parameters: data_types (list[tuple]): data sources to predict patient_data (pandas.DataFrame): patient metadata Returns: predictions (pandas.Series): predictions for each data source """ predictions = pd.DataFrame( index=patient_data.index ) sex_predictions = pd.DataFrame( index=patient_data.index ) race_predictions = pd.DataFrame( index=patient_data.index ) probabilities = predictions.copy() prediction_types = [ predictions, sex_predictions, race_predictions ] columns = [ 'status', 'gender', 'race' ] for column, df in zip(columns, prediction_types): for data_type in data_types: source = data_type[0] data = data_type[1] labels = patient_data.loc[data.index, column] _predictions = predict_known(data, labels) if column == 'status': _probabilities = predict_known( data, labels, method='predict_proba' ) probabilities.loc[_probabilities.index, source] = _probabilities df.loc[_predictions.index, source] = _predictions df.loc[:, 'Actual'] = patient_data.loc[:, column] return predictions, probabilities, sex_predictions, race_predictions
def run_cv(data_types, patient_data): """ Predicts samples with known outcomes via cross-validation. Parameters: data_types (list[tuple]): data sources to predict patient_data (pandas.DataFrame): patient metadata Returns: predictions (pandas.Series): predictions for each data source """ predictions = pd.DataFrame( index=patient_data.index ) sex_predictions = pd.DataFrame( index=patient_data.index ) race_predictions = pd.DataFrame( index=patient_data.index ) probabilities = predictions.copy() prediction_types = [ predictions, sex_predictions, race_predictions ] columns = [ 'status', 'gender', 'race' ] for column, df in zip(columns, prediction_types): for data_type in data_types: source = data_type[0] data = data_type[1] labels = patient_data.loc[data.index, column] _predictions = predict_known(data, labels) if column == 'status': _probabilities = predict_known( data, labels, method='predict_proba' ) probabilities.loc[_probabilities.index, source] = _probabilities df.loc[_predictions.index, source] = _predictions df.loc[:, 'Actual'] = patient_data.loc[:, column] return predictions, probabilities, sex_predictions, race_predictions
Python
def run_age_regression(data, patient_data): """ Predicts patient age from provided data. Parameters: data (pandas.DataFrame): data to predict age patient_data (pandas.DataFrame): patient metadata, including age """ labels = patient_data.loc[:, 'age'] age_predictions = predict_regression(data, labels) age_predictions.name = 'CMTF' age_predictions = pd.DataFrame(age_predictions) age_predictions.loc[:, 'Actual'] = labels.loc[age_predictions.index] return age_predictions
def run_age_regression(data, patient_data): """ Predicts patient age from provided data. Parameters: data (pandas.DataFrame): data to predict age patient_data (pandas.DataFrame): patient metadata, including age """ labels = patient_data.loc[:, 'age'] age_predictions = predict_regression(data, labels) age_predictions.name = 'CMTF' age_predictions = pd.DataFrame(age_predictions) age_predictions.loc[:, 'Actual'] = labels.loc[age_predictions.index] return age_predictions
Python
def export_results(train_samples, train_probabilities, validation_samples, validation_probabilities, sex_predictions, race_predictions, age_predictions): """ Reformats prediction DataFrames and saves as .txt. Parameters: train_samples (pandas.DataFrame): predictions for training samples train_probabilities (pandas.DataFrame): probabilities of persistence for training samples validation_samples (pandas.DataFrame): predictions for validation samples validation_probabilities (pandas.DataFrame): probabilities of persistence for validation samples sex_predictions (pandas.DataFrame): sex predictions for samples with known outcomes and sex race_predictions (pandas.DataFrame): race predictions for samples with known outcomes and race age_predictions (pandas.DataFrame): age predictions for samples with known outcomes and age Returns: None """ train_samples = train_samples.astype(str) train_probabilities = train_probabilities.astype(str) train_samples = train_samples.replace('0', 'ARMB') train_samples = train_samples.replace('1', 'APMB') train_samples.to_csv( join( PATH_HERE, '..', 'output', 'train_predictions.txt' ) ) train_probabilities.to_csv( join( PATH_HERE, '..', 'output', 'train_probabilities.txt' ) ) validation_samples.to_csv( join( PATH_HERE, '..', 'output', 'validation_predictions.txt' ) ) validation_probabilities.to_csv( join( PATH_HERE, '..', 'output', 'validation_probabilities.txt' ) ) sex_predictions.to_csv( join( PATH_HERE, '..', 'output', 'sex_predictions.txt' ) ) race_predictions.to_csv( join( PATH_HERE, '..', 'output', 'race_predictions.txt' ) ) age_predictions.to_csv( join( PATH_HERE, '..', 'output', 'age_predictions.txt' ) )
def export_results(train_samples, train_probabilities, validation_samples, validation_probabilities, sex_predictions, race_predictions, age_predictions): """ Reformats prediction DataFrames and saves as .txt. Parameters: train_samples (pandas.DataFrame): predictions for training samples train_probabilities (pandas.DataFrame): probabilities of persistence for training samples validation_samples (pandas.DataFrame): predictions for validation samples validation_probabilities (pandas.DataFrame): probabilities of persistence for validation samples sex_predictions (pandas.DataFrame): sex predictions for samples with known outcomes and sex race_predictions (pandas.DataFrame): race predictions for samples with known outcomes and race age_predictions (pandas.DataFrame): age predictions for samples with known outcomes and age Returns: None """ train_samples = train_samples.astype(str) train_probabilities = train_probabilities.astype(str) train_samples = train_samples.replace('0', 'ARMB') train_samples = train_samples.replace('1', 'APMB') train_samples.to_csv( join( PATH_HERE, '..', 'output', 'train_predictions.txt' ) ) train_probabilities.to_csv( join( PATH_HERE, '..', 'output', 'train_probabilities.txt' ) ) validation_samples.to_csv( join( PATH_HERE, '..', 'output', 'validation_predictions.txt' ) ) validation_probabilities.to_csv( join( PATH_HERE, '..', 'output', 'validation_probabilities.txt' ) ) sex_predictions.to_csv( join( PATH_HERE, '..', 'output', 'sex_predictions.txt' ) ) race_predictions.to_csv( join( PATH_HERE, '..', 'output', 'race_predictions.txt' ) ) age_predictions.to_csv( join( PATH_HERE, '..', 'output', 'age_predictions.txt' ) )
Python
def baseline(status, oldState, newState): """ Baseline reward function, only assign 1 on GOAL """ info = {} reward = 0 kickable = newState[0][12] info['kickable'] = True if kickable == 1 else False if status == GOAL: reward = 1 return reward, info
def baseline(status, oldState, newState): """ Baseline reward function, only assign 1 on GOAL """ info = {} reward = 0 kickable = newState[0][12] info['kickable'] = True if kickable == 1 else False if status == GOAL: reward = 1 return reward, info
Python
def goal_dist(status, oldState, newState, r): """ baseline (1 for GOAL) + r reward on the distance to goal """ info = {} reward = 0 goal_dist = newState[0][15] kickable = newState[0][12] info['kickable'] = True if kickable == 1 else False if status == GOAL: reward += 1 elif status == IN_GAME: # goal dist only affect when agent has the ball if kickable == 1: reward += goal_dist * r_d return reward, info
def goal_dist(status, oldState, newState, r): """ baseline (1 for GOAL) + r reward on the distance to goal """ info = {} reward = 0 goal_dist = newState[0][15] kickable = newState[0][12] info['kickable'] = True if kickable == 1 else False if status == GOAL: reward += 1 elif status == IN_GAME: # goal dist only affect when agent has the ball if kickable == 1: reward += goal_dist * r_d return reward, info
Python
def preprocessState(self, state): """as a baseline , we dont do any preprocess just return a 68-d vector when using low-level features """ # state at current timestep return np.reshape(state, (1, -1))
def preprocessState(self, state): """as a baseline , we dont do any preprocess just return a 68-d vector when using low-level features """ # state at current timestep return np.reshape(state, (1, -1))
Python
def computePrediction(state, action, valueNetwork, device): """ a wrapper for the forward method of ValueNetwork return: tensor """ assert len(state.shape) == 2 # state is a 2-D tensor assert action in (0, 1, 2, 3) # action is an int in [0, 1, 2, 3] action_values = valueNetwork(state) return action_values[:, action]
def computePrediction(state, action, valueNetwork, device): """ a wrapper for the forward method of ValueNetwork return: tensor """ assert len(state.shape) == 2 # state is a 2-D tensor assert action in (0, 1, 2, 3) # action is an int in [0, 1, 2, 3] action_values = valueNetwork(state) return action_values[:, action]
Python
def saveLog(log_data, filename): """ save the log Data to the given filename """ with open(filename, 'wb') as f: pickle.dump(log_data, f)
def saveLog(log_data, filename): """ save the log Data to the given filename """ with open(filename, 'wb') as f: pickle.dump(log_data, f)
Python
def ctc_beam_search(self, probs: torch.tensor, beam_size: int = 100) -> List[Tuple[str, float]]: """ Performs beam search and returns a list of pairs (hypothesis, hypothesis probability). """ assert len(probs.shape) == 2 char_length, voc_size = probs.shape assert voc_size == len(self.ind2char) if isinstance(probs, torch.Tensor): probs = probs.numpy() beams = self.ctc_decoder.decode_beams(probs, beam_width=beam_size, token_min_logp=-15.) hypos = [] for beam in beams: hypos.append(( beam[0], # hypothesis beam[-1] # log prob )) return hypos
def ctc_beam_search(self, probs: torch.tensor, beam_size: int = 100) -> List[Tuple[str, float]]: """ Performs beam search and returns a list of pairs (hypothesis, hypothesis probability). """ assert len(probs.shape) == 2 char_length, voc_size = probs.shape assert voc_size == len(self.ind2char) if isinstance(probs, torch.Tensor): probs = probs.numpy() beams = self.ctc_decoder.decode_beams(probs, beam_width=beam_size, token_min_logp=-15.) hypos = [] for beam in beams: hypos.append(( beam[0], # hypothesis beam[-1] # log prob )) return hypos
Python
def collate_fn(dataset_items: List[dict]): """ Collate and pad fields in dataset items """ result_batch = {} for key in ["duration", "text", "audio_path"]: result_batch[key] = [item[key] for item in dataset_items] for key in ["audio", "spectrogram", "text_encoded"]: vals = [] for item in dataset_items: val = item[key] if key == "spectrogram": val = val.transpose(1, 2) val = val.squeeze(0) vals.append(val) result_batch[key] = pad_sequence(vals, batch_first=True, padding_value=-1) result_batch[f"{key}_length"] = torch.IntTensor([item.size(0) for item in vals]) return result_batch
def collate_fn(dataset_items: List[dict]): """ Collate and pad fields in dataset items """ result_batch = {} for key in ["duration", "text", "audio_path"]: result_batch[key] = [item[key] for item in dataset_items] for key in ["audio", "spectrogram", "text_encoded"]: vals = [] for item in dataset_items: val = item[key] if key == "spectrogram": val = val.transpose(1, 2) val = val.squeeze(0) vals.append(val) result_batch[key] = pad_sequence(vals, batch_first=True, padding_value=-1) result_batch[f"{key}_length"] = torch.IntTensor([item.size(0) for item in vals]) return result_batch
Python
def allow_request(self, request, view): """ Implement the check to see if the request should be throttled. On success calls `throttle_success`. On failure calls `throttle_failure`. """ if self.rate is None: return True self.key = self.get_cache_key(request, view) self.history = cache.get(self.key, []) self.now = self.timer() # Drop any requests from the history which have now passed the # throttle duration while self.history and self.history[-1] <= self.now - self.duration: self.history.pop() if len(self.history) >= self.num_requests: return self.throttle_failure(request, view) return self.throttle_success(request, view)
def allow_request(self, request, view): """ Implement the check to see if the request should be throttled. On success calls `throttle_success`. On failure calls `throttle_failure`. """ if self.rate is None: return True self.key = self.get_cache_key(request, view) self.history = cache.get(self.key, []) self.now = self.timer() # Drop any requests from the history which have now passed the # throttle duration while self.history and self.history[-1] <= self.now - self.duration: self.history.pop() if len(self.history) >= self.num_requests: return self.throttle_failure(request, view) return self.throttle_success(request, view)
Python
def throttle_success(self, request, view): """ Inserts the current request's timestamp along with the key into the cache. """ self.history.insert(0, self.now) cache.set(self.key, self.history, self.duration) return True
def throttle_success(self, request, view): """ Inserts the current request's timestamp along with the key into the cache. """ self.history.insert(0, self.now) cache.set(self.key, self.history, self.duration) return True
Python
def throttle_failure(self, request, view): """ Called when a request to the API has failed due to throttling. """ if self.settings.THROTTLE_LOG: logger.warning("Request throttled.", extra={'request': request}) return False
def throttle_failure(self, request, view): """ Called when a request to the API has failed due to throttling. """ if self.settings.THROTTLE_LOG: logger.warning("Request throttled.", extra={'request': request}) return False
Python
def allow_request(self, request, view): """Determine whether this is a whitelisted request.""" if app_settings.THROTTLE_APIKEY_LIST: key = request.META.get(app_settings.THROTTLE_APIKEY_HEADER.upper().replace('-', '_')) if not key: key = request.GET.get(app_settings.THROTTLE_APIKEY_PARAM) if key and key in app_settings.THROTTLE_APIKEY_LIST: return True self.ident = request.META.get( self.settings.THROTTLE_IP_HEADER, None) if self.ident in app_settings.THROTTLE_IP_WHITELIST: return True # Not whitelisted; continue checking by IP return super(AnonRateThrottle, self).allow_request(request, view)
def allow_request(self, request, view): """Determine whether this is a whitelisted request.""" if app_settings.THROTTLE_APIKEY_LIST: key = request.META.get(app_settings.THROTTLE_APIKEY_HEADER.upper().replace('-', '_')) if not key: key = request.GET.get(app_settings.THROTTLE_APIKEY_PARAM) if key and key in app_settings.THROTTLE_APIKEY_LIST: return True self.ident = request.META.get( self.settings.THROTTLE_IP_HEADER, None) if self.ident in app_settings.THROTTLE_IP_WHITELIST: return True # Not whitelisted; continue checking by IP return super(AnonRateThrottle, self).allow_request(request, view)
Python
def polygon_to_multipolygon(geom): """ Convert polygons to multipolygons so all features are homogenous in the database. """ if geom.__class__.__name__ == 'Polygon': g = OGRGeometry(OGRGeomType('MultiPolygon')) g.add(geom) return g elif geom.__class__.__name__ == 'MultiPolygon': return geom else: raise ValueError('Geom is neither Polygon nor MultiPolygon.')
def polygon_to_multipolygon(geom): """ Convert polygons to multipolygons so all features are homogenous in the database. """ if geom.__class__.__name__ == 'Polygon': g = OGRGeometry(OGRGeomType('MultiPolygon')) g.add(geom) return g elif geom.__class__.__name__ == 'MultiPolygon': return geom else: raise ValueError('Geom is neither Polygon nor MultiPolygon.')
Python
def temp_shapefile_from_zip(zip_path): """Given a path to a ZIP file, unpack it into a temp dir and return the path to the shapefile that was in there. Doesn't clean up after itself unless there was an error. If you want to cleanup later, you can derive the temp dir from this path. """ try: zf = ZipFile(zip_path) except BadZipfile as e: raise BadZipfile(str(e) + ": " + zip_path) tempdir = mkdtemp() shape_path = None # Copy the zipped files to a temporary directory, preserving names. for name in zf.namelist(): if name.endswith("/"): continue data = zf.read(name) outfile = os.path.join(tempdir, os.path.basename(name)) if name.endswith('.shp'): shape_path = outfile f = open(outfile, 'w') f.write(data) f.close() return tempdir, shape_path
def temp_shapefile_from_zip(zip_path): """Given a path to a ZIP file, unpack it into a temp dir and return the path to the shapefile that was in there. Doesn't clean up after itself unless there was an error. If you want to cleanup later, you can derive the temp dir from this path. """ try: zf = ZipFile(zip_path) except BadZipfile as e: raise BadZipfile(str(e) + ": " + zip_path) tempdir = mkdtemp() shape_path = None # Copy the zipped files to a temporary directory, preserving names. for name in zf.namelist(): if name.endswith("/"): continue data = zf.read(name) outfile = os.path.join(tempdir, os.path.basename(name)) if name.endswith('.shp'): shape_path = outfile f = open(outfile, 'w') f.write(data) f.close() return tempdir, shape_path
Python
async def delete_recipe_category( category: str, session: Session = Depends(generate_session) ): """Removes a recipe category from the database. Deleting a category does not impact a recipe. The category will be removed from any recipes that contain it""" db.categories.delete(session, category) return SnackResponse.error(f"Category Deleted: {category}")
async def delete_recipe_category( category: str, session: Session = Depends(generate_session) ): """Removes a recipe category from the database. Deleting a category does not impact a recipe. The category will be removed from any recipes that contain it""" db.categories.delete(session, category) return SnackResponse.error(f"Category Deleted: {category}")
Python
def import_nextcloud_directory( type: str, file_name: str, session: Session = Depends(generate_session) ): """ Imports all the recipes in a given directory """ file_path = MIGRATION_DIR.joinpath(type, file_name) if type == "nextcloud": return nextcloud_migrate(session, file_path) elif type == "chowdown": return chowdow_migrate(session, file_path) else: return SnackResponse.error("Incorrect Migration Type Selected")
def import_nextcloud_directory( type: str, file_name: str, session: Session = Depends(generate_session) ): """ Imports all the recipes in a given directory """ file_path = MIGRATION_DIR.joinpath(type, file_name) if type == "nextcloud": return nextcloud_migrate(session, file_path) elif type == "chowdown": return chowdow_migrate(session, file_path) else: return SnackResponse.error("Incorrect Migration Type Selected")
Python
def delete_migration_data(type: str, file_name: str): """ Removes migration data from the file system """ remove_path = MIGRATION_DIR.joinpath(type, file_name) if remove_path.is_file(): remove_path.unlink() elif remove_path.is_dir(): shutil.rmtree(remove_path) else: SnackResponse.error("File/Folder not found.") return SnackResponse.error(f"Migration Data Remove: {remove_path.absolute()}")
def delete_migration_data(type: str, file_name: str): """ Removes migration data from the file system """ remove_path = MIGRATION_DIR.joinpath(type, file_name) if remove_path.is_file(): remove_path.unlink() elif remove_path.is_dir(): shutil.rmtree(remove_path) else: SnackResponse.error("File/Folder not found.") return SnackResponse.error(f"Migration Data Remove: {remove_path.absolute()}")
Python
def upload_nextcloud_zipfile(type: str, archive: UploadFile = File(...)): """ Upload a .zip File to later be imported into Mealie """ dir = MIGRATION_DIR.joinpath(type) dir.mkdir(parents=True, exist_ok=True) dest = dir.joinpath(archive.filename) with dest.open("wb") as buffer: shutil.copyfileobj(archive.file, buffer) if dest.is_file: return SnackResponse.success("Migration data uploaded") else: return SnackResponse.error("Failure uploading file")
def upload_nextcloud_zipfile(type: str, archive: UploadFile = File(...)): """ Upload a .zip File to later be imported into Mealie """ dir = MIGRATION_DIR.joinpath(type) dir.mkdir(parents=True, exist_ok=True) dest = dir.joinpath(archive.filename) with dest.open("wb") as buffer: shutil.copyfileobj(archive.file, buffer) if dest.is_file: return SnackResponse.success("Migration data uploaded") else: return SnackResponse.error("Failure uploading file")
Python
def delete(session: Session, recipe_slug: str) -> str: """ Removes the recipe from the database by slug """ delete_image(recipe_slug) db.recipes.delete(session, recipe_slug) return "Document Deleted"
def delete(session: Session, recipe_slug: str) -> str: """ Removes the recipe from the database by slug """ delete_image(recipe_slug) db.recipes.delete(session, recipe_slug) return "Document Deleted"
Python
def update_image(session: Session, slug: str, extension: str = None) -> str: """A helper function to pass the new image name and extension into the database. Args: slug (str): The current recipe slug extension (str): the file extension of the new image """ return db.recipes.update_image(session, slug, extension)
def update_image(session: Session, slug: str, extension: str = None) -> str: """A helper function to pass the new image name and extension into the database. Args: slug (str): The current recipe slug extension (str): the file extension of the new image """ return db.recipes.update_image(session, slug, extension)
Python
async def delete_recipe_tag(tag: str, session: Session = Depends(generate_session)): """Removes a recipe tag from the database. Deleting a tag does not impact a recipe. The tag will be removed from any recipes that contain it""" db.tags.delete(session, tag) return SnackResponse.error(f"Tag Deleted: {tag}")
async def delete_recipe_tag(tag: str, session: Session = Depends(generate_session)): """Removes a recipe tag from the database. Deleting a tag does not impact a recipe. The tag will be removed from any recipes that contain it""" db.tags.delete(session, tag) return SnackResponse.error(f"Tag Deleted: {tag}")
Python
def update_webhook_schedule(): """ A scheduled background job that runs every 15 minutes to poll the database for changes and reschedule the webhook time """ session = create_session() settings = db.settings.get(session, "main") settings = SiteSettings(**settings) time = cron_parser(settings.webhooks.webhookTime) job = JOB_STORE.get("webhooks") scheduler.reschedule_job( job.scheduled_task.id, trigger="cron", hour=time.hours, minute=time.minutes, ) session.close() logger.info(scheduler.print_jobs())
def update_webhook_schedule(): """ A scheduled background job that runs every 15 minutes to poll the database for changes and reschedule the webhook time """ session = create_session() settings = db.settings.get(session, "main") settings = SiteSettings(**settings) time = cron_parser(settings.webhooks.webhookTime) job = JOB_STORE.get("webhooks") scheduler.reschedule_job( job.scheduled_task.id, trigger="cron", hour=time.hours, minute=time.minutes, ) session.close() logger.info(scheduler.print_jobs())
Python
def create_theme(data: SiteTheme, session: Session = Depends(generate_session)): """ Creates a site color theme database entry """ db.themes.create(session, data.dict()) return SnackResponse.success("Theme Saved")
def create_theme(data: SiteTheme, session: Session = Depends(generate_session)): """ Creates a site color theme database entry """ db.themes.create(session, data.dict()) return SnackResponse.success("Theme Saved")
Python
def today(session: Session) -> str: """ Returns the meal slug for Today """ meal_plan = db.meals.get_all(session, limit=1, order_by="startDate") meal_docs = [Meal(**meal) for meal in meal_plan["meals"]] for meal in meal_docs: if meal.date == date.today(): return meal.slug return "No Meal Today"
def today(session: Session) -> str: """ Returns the meal slug for Today """ meal_plan = db.meals.get_all(session, limit=1, order_by="startDate") meal_docs = [Meal(**meal) for meal in meal_plan["meals"]] for meal in meal_docs: if meal.date == date.today(): return meal.slug return "No Meal Today"
Python
def filter_by_category(categories: list, session: Session = Depends(generate_session)): """ pass a list of categories and get a list of recipes associated with those categories """ #! This should be refactored into a single database call, but I couldn't figure it out in_category = [ db.categories.get(session, slugify(cat), limit=1) for cat in categories ] in_category = [cat.get("recipes") for cat in in_category] in_category = [item for sublist in in_category for item in sublist] return in_category
def filter_by_category(categories: list, session: Session = Depends(generate_session)): """ pass a list of categories and get a list of recipes associated with those categories """ #! This should be refactored into a single database call, but I couldn't figure it out in_category = [ db.categories.get(session, slugify(cat), limit=1) for cat in categories ] in_category = [cat.get("recipes") for cat in in_category] in_category = [item for sublist in in_category for item in sublist] return in_category
Python
async def filter_by_tags(tags: list, session: Session = Depends(generate_session)): """ pass a list of tags and get a list of recipes associated with those tags""" #! This should be refactored into a single database call, but I couldn't figure it out in_tags = [db.tags.get(session, slugify(tag), limit=1) for tag in tags] in_tags = [tag.get("recipes") for tag in in_tags] in_tags = [item for sublist in in_tags for item in sublist] return in_tags
async def filter_by_tags(tags: list, session: Session = Depends(generate_session)): """ pass a list of tags and get a list of recipes associated with those tags""" #! This should be refactored into a single database call, but I couldn't figure it out in_tags = [db.tags.get(session, slugify(tag), limit=1) for tag in tags] in_tags = [tag.get("recipes") for tag in in_tags] in_tags = [item for sublist in in_tags for item in sublist] return in_tags
Python
def clean(recipe_data: dict, url=None) -> dict: """Main entrypoint to clean a recipe extracted from the web and format the data into an accectable format for the database Args: recipe_data (dict): raw recipe dicitonary Returns: dict: cleaned recipe dictionary """ recipe_data["totalTime"] = Cleaner.time(recipe_data.get("totalTime")) recipe_data["description"] = Cleaner.html(recipe_data.get("description", "")) recipe_data["prepTime"] = Cleaner.time(recipe_data.get("prepTime")) recipe_data["performTime"] = Cleaner.time(recipe_data.get("performTime")) recipe_data["recipeYield"] = Cleaner.yield_amount( recipe_data.get("recipeYield") ) recipe_data["recipeIngredient"] = Cleaner.ingredient( recipe_data.get("recipeIngredient") ) recipe_data["recipeInstructions"] = Cleaner.instructions( recipe_data["recipeInstructions"] ) recipe_data["image"] = Cleaner.image(recipe_data["image"]) recipe_data["slug"] = slugify(recipe_data["name"]) recipe_data["orgURL"] = url return recipe_data
def clean(recipe_data: dict, url=None) -> dict: """Main entrypoint to clean a recipe extracted from the web and format the data into an accectable format for the database Args: recipe_data (dict): raw recipe dicitonary Returns: dict: cleaned recipe dictionary """ recipe_data["totalTime"] = Cleaner.time(recipe_data.get("totalTime")) recipe_data["description"] = Cleaner.html(recipe_data.get("description", "")) recipe_data["prepTime"] = Cleaner.time(recipe_data.get("prepTime")) recipe_data["performTime"] = Cleaner.time(recipe_data.get("performTime")) recipe_data["recipeYield"] = Cleaner.yield_amount( recipe_data.get("recipeYield") ) recipe_data["recipeIngredient"] = Cleaner.ingredient( recipe_data.get("recipeIngredient") ) recipe_data["recipeInstructions"] = Cleaner.instructions( recipe_data["recipeInstructions"] ) recipe_data["image"] = Cleaner.image(recipe_data["image"]) recipe_data["slug"] = slugify(recipe_data["name"]) recipe_data["orgURL"] = url return recipe_data
Python
def read_chowdown_file(recipe_file: Path) -> Recipe: """Parse through the yaml file to try and pull out the relavent information. Some issues occur when ":" are used in the text. I have no put a lot of effort into this so there may be better ways of going about it. Currently, I get about 80-90% of recipes from repos I've tried. Args: recipe_file (Path): Path to the .yml file Returns: Recipe: Recipe class object """ with open(recipe_file, "r") as stream: recipe_description: str = str recipe_data: dict = {} try: for x, item in enumerate(yaml.load_all(stream, Loader=Loader)): if x == 0: recipe_data = item elif x == 1: recipe_description = str(item) except yaml.YAMLError: return reformat_data = { "name": recipe_data.get("title"), "description": recipe_description, "image": recipe_data.get("image", ""), "recipeIngredient": recipe_data.get("ingredients"), "recipeInstructions": recipe_data.get("directions"), "tags": recipe_data.get("tags").split(","), } new_recipe = Recipe(**reformat_data) reformated_list = [] for instruction in new_recipe.recipeInstructions: reformated_list.append({"text": instruction}) new_recipe.recipeInstructions = reformated_list return new_recipe
def read_chowdown_file(recipe_file: Path) -> Recipe: """Parse through the yaml file to try and pull out the relavent information. Some issues occur when ":" are used in the text. I have no put a lot of effort into this so there may be better ways of going about it. Currently, I get about 80-90% of recipes from repos I've tried. Args: recipe_file (Path): Path to the .yml file Returns: Recipe: Recipe class object """ with open(recipe_file, "r") as stream: recipe_description: str = str recipe_data: dict = {} try: for x, item in enumerate(yaml.load_all(stream, Loader=Loader)): if x == 0: recipe_data = item elif x == 1: recipe_description = str(item) except yaml.YAMLError: return reformat_data = { "name": recipe_data.get("title"), "description": recipe_description, "image": recipe_data.get("image", ""), "recipeIngredient": recipe_data.get("ingredients"), "recipeInstructions": recipe_data.get("directions"), "tags": recipe_data.get("tags").split(","), } new_recipe = Recipe(**reformat_data) reformated_list = [] for instruction in new_recipe.recipeInstructions: reformated_list.append({"text": instruction}) new_recipe.recipeInstructions = reformated_list return new_recipe
Python
def update( self, session, name: str = None, description: str = None, image: str = None, recipeYield: str = None, recipeIngredient: List[str] = None, recipeInstructions: List[dict] = None, totalTime: str = None, prepTime: str = None, performTime: str = None, slug: str = None, categories: List[str] = None, tags: List[str] = None, dateAdded: datetime.date = None, notes: List[dict] = None, rating: int = None, orgURL: str = None, extras: dict = None, ): """Updated a database entry by removing nested rows and rebuilds the row through the __init__ functions""" list_of_tables = [RecipeIngredient, RecipeInstruction, ApiExtras] RecipeModel._sql_remove_list(session, list_of_tables, self.id) self.__init__( session=session, name=name, description=description, image=image, recipeYield=recipeYield, recipeIngredient=recipeIngredient, recipeInstructions=recipeInstructions, totalTime=totalTime, prepTime=prepTime, performTime=performTime, slug=slug, categories=categories, tags=tags, dateAdded=dateAdded, notes=notes, rating=rating, orgURL=orgURL, extras=extras, )
def update( self, session, name: str = None, description: str = None, image: str = None, recipeYield: str = None, recipeIngredient: List[str] = None, recipeInstructions: List[dict] = None, totalTime: str = None, prepTime: str = None, performTime: str = None, slug: str = None, categories: List[str] = None, tags: List[str] = None, dateAdded: datetime.date = None, notes: List[dict] = None, rating: int = None, orgURL: str = None, extras: dict = None, ): """Updated a database entry by removing nested rows and rebuilds the row through the __init__ functions""" list_of_tables = [RecipeIngredient, RecipeInstruction, ApiExtras] RecipeModel._sql_remove_list(session, list_of_tables, self.id) self.__init__( session=session, name=name, description=description, image=image, recipeYield=recipeYield, recipeIngredient=recipeIngredient, recipeInstructions=recipeInstructions, totalTime=totalTime, prepTime=prepTime, performTime=performTime, slug=slug, categories=categories, tags=tags, dateAdded=dateAdded, notes=notes, rating=rating, orgURL=orgURL, extras=extras, )
Python
def create_from_json(data: Recipe, db: Session = Depends(generate_session)) -> str: """ Takes in a JSON string and loads data into the database as a new entry""" new_recipe_slug = data.save_to_db(db) return new_recipe_slug
def create_from_json(data: Recipe, db: Session = Depends(generate_session)) -> str: """ Takes in a JSON string and loads data into the database as a new entry""" new_recipe_slug = data.save_to_db(db) return new_recipe_slug
Python
def parse_recipe_url(url: RecipeURLIn, db: Session = Depends(generate_session)): """ Takes in a URL and attempts to scrape data and load it into the database """ recipe = create_from_url(url.url) recipe.save_to_db(db) return recipe.slug
def parse_recipe_url(url: RecipeURLIn, db: Session = Depends(generate_session)): """ Takes in a URL and attempts to scrape data and load it into the database """ recipe = create_from_url(url.url) recipe.save_to_db(db) return recipe.slug
Python
def update_recipe( recipe_slug: str, data: Recipe, db: Session = Depends(generate_session) ): """ Updates a recipe by existing slug and data. """ new_slug = data.update(db, recipe_slug) return new_slug
def update_recipe( recipe_slug: str, data: Recipe, db: Session = Depends(generate_session) ): """ Updates a recipe by existing slug and data. """ new_slug = data.update(db, recipe_slug) return new_slug
Python
def update_recipe_image( recipe_slug: str, image: bytes = File(...), extension: str = Form(...), session: Session = Depends(generate_session), ): """ Removes an existing image and replaces it with the incoming file. """ response = write_image(recipe_slug, image, extension) Recipe.update_image(session, recipe_slug, extension) return response
def update_recipe_image( recipe_slug: str, image: bytes = File(...), extension: str = Form(...), session: Session = Depends(generate_session), ): """ Removes an existing image and replaces it with the incoming file. """ response = write_image(recipe_slug, image, extension) Recipe.update_image(session, recipe_slug, extension) return response
Python
def _query_one( self, session: Session, match_value: str, match_key: str = None ) -> SqlAlchemyBase: """Query the sql database for one item an return the sql alchemy model object. If no match key is provided the primary_key attribute will be used. Args: \n match_value (str): The value to use in the query match_key (str, optional): the key/property to match against. Defaults to None. Returns: Union[Session, SqlAlchemyBase]: Will return both the session and found model """ if match_key == None: match_key = self.primary_key result = ( session.query(self.sql_model).filter_by(**{match_key: match_value}).one() ) return result
def _query_one( self, session: Session, match_value: str, match_key: str = None ) -> SqlAlchemyBase: """Query the sql database for one item an return the sql alchemy model object. If no match key is provided the primary_key attribute will be used. Args: \n match_value (str): The value to use in the query match_key (str, optional): the key/property to match against. Defaults to None. Returns: Union[Session, SqlAlchemyBase]: Will return both the session and found model """ if match_key == None: match_key = self.primary_key result = ( session.query(self.sql_model).filter_by(**{match_key: match_value}).one() ) return result
Python
def create(self, session: Session, document: dict) -> dict: """Creates a new database entry for the given SQL Alchemy Model. Args: \n session (Session): A Database Session document (dict): A python dictionary representing the data structure Returns: dict: A dictionary representation of the database entry """ new_document = self.sql_model(session=session, **document) session.add(new_document) return_data = new_document.dict() session.commit() return return_data
def create(self, session: Session, document: dict) -> dict: """Creates a new database entry for the given SQL Alchemy Model. Args: \n session (Session): A Database Session document (dict): A python dictionary representing the data structure Returns: dict: A dictionary representation of the database entry """ new_document = self.sql_model(session=session, **document) session.add(new_document) return_data = new_document.dict() session.commit() return return_data
Python
def update_meal_plan( plan_id: str, meal_plan: MealPlan, session: Session = Depends(generate_session) ): """ Updates a meal plan based off ID """ meal_plan.process_meals(session) meal_plan.update(session, plan_id) return SnackResponse.info("Mealplan Updated")
def update_meal_plan( plan_id: str, meal_plan: MealPlan, session: Session = Depends(generate_session) ): """ Updates a meal plan based off ID """ meal_plan.process_meals(session) meal_plan.update(session, plan_id) return SnackResponse.info("Mealplan Updated")
Python
def delete_meal_plan(plan_id, session: Session = Depends(generate_session)): """ Removes a meal plan from the database """ MealPlan.delete(session, plan_id) return SnackResponse.error("Mealplan Deleted")
def delete_meal_plan(plan_id, session: Session = Depends(generate_session)): """ Removes a meal plan from the database """ MealPlan.delete(session, plan_id) return SnackResponse.error("Mealplan Deleted")
Python
def available_imports(): """Returns a list of avaiable .zip files for import into Mealie.""" imports = [] templates = [] for archive in BACKUP_DIR.glob("*.zip"): backup = LocalBackup(name=archive.name, date=archive.stat().st_ctime) imports.append(backup) for template in TEMPLATE_DIR.glob("*.*"): templates.append(template.name) imports.sort(key=operator.attrgetter("date"), reverse=True) return Imports(imports=imports, templates=templates)
def available_imports(): """Returns a list of avaiable .zip files for import into Mealie.""" imports = [] templates = [] for archive in BACKUP_DIR.glob("*.zip"): backup = LocalBackup(name=archive.name, date=archive.stat().st_ctime) imports.append(backup) for template in TEMPLATE_DIR.glob("*.*"): templates.append(template.name) imports.sort(key=operator.attrgetter("date"), reverse=True) return Imports(imports=imports, templates=templates)
Python
def export_database(data: BackupJob, session: Session = Depends(generate_session)): """Generates a backup of the recipe database in json format.""" export_path = backup_all( session=session, tag=data.tag, templates=data.templates, export_recipes=data.options.recipes, export_settings=data.options.settings, export_themes=data.options.themes, ) try: return SnackResponse.success("Backup Created at " + export_path) except: HTTPException( status_code=400, detail=SnackResponse.error("Error Creating Backup. See Log File"), )
def export_database(data: BackupJob, session: Session = Depends(generate_session)): """Generates a backup of the recipe database in json format.""" export_path = backup_all( session=session, tag=data.tag, templates=data.templates, export_recipes=data.options.recipes, export_settings=data.options.settings, export_themes=data.options.themes, ) try: return SnackResponse.success("Backup Created at " + export_path) except: HTTPException( status_code=400, detail=SnackResponse.error("Error Creating Backup. See Log File"), )
Python
def upload_backup_zipfile(archive: UploadFile = File(...)): """ Upload a .zip File to later be imported into Mealie """ dest = BACKUP_DIR.joinpath(archive.filename) with dest.open("wb") as buffer: shutil.copyfileobj(archive.file, buffer) if dest.is_file: return SnackResponse.success("Backup uploaded") else: return SnackResponse.error("Failure uploading file")
def upload_backup_zipfile(archive: UploadFile = File(...)): """ Upload a .zip File to later be imported into Mealie """ dest = BACKUP_DIR.joinpath(archive.filename) with dest.open("wb") as buffer: shutil.copyfileobj(archive.file, buffer) if dest.is_file: return SnackResponse.success("Backup uploaded") else: return SnackResponse.error("Failure uploading file")
Python
async def upload_nextcloud_zipfile(file_name: str): """ Upload a .zip File to later be imported into Mealie """ file = BACKUP_DIR.joinpath(file_name) if file.is_file: return FileResponse( file, media_type="application/octet-stream", filename=file_name ) else: return SnackResponse.error("No File Found")
async def upload_nextcloud_zipfile(file_name: str): """ Upload a .zip File to later be imported into Mealie """ file = BACKUP_DIR.joinpath(file_name) if file.is_file: return FileResponse( file, media_type="application/octet-stream", filename=file_name ) else: return SnackResponse.error("No File Found")
Python
def import_database( file_name: str, import_data: ImportJob, session: Session = Depends(generate_session) ): """ Import a database backup file generated from Mealie. """ import_db = ImportDatabase( session=session, zip_archive=import_data.name, import_recipes=import_data.recipes, force_import=import_data.force, rebase=import_data.rebase, import_settings=import_data.settings, import_themes=import_data.themes, ) imported = import_db.run() return imported
def import_database( file_name: str, import_data: ImportJob, session: Session = Depends(generate_session) ): """ Import a database backup file generated from Mealie. """ import_db = ImportDatabase( session=session, zip_archive=import_data.name, import_recipes=import_data.recipes, force_import=import_data.force, rebase=import_data.rebase, import_settings=import_data.settings, import_themes=import_data.themes, ) imported = import_db.run() return imported
Python
def delete_backup(file_name: str): """ Removes a database backup from the file system """ try: BACKUP_DIR.joinpath(file_name).unlink() except: HTTPException( status_code=400, detail=SnackResponse.error("Unable to Delete Backup. See Log File"), ) return SnackResponse.error(f"{file_name} Deleted")
def delete_backup(file_name: str): """ Removes a database backup from the file system """ try: BACKUP_DIR.joinpath(file_name).unlink() except: HTTPException( status_code=400, detail=SnackResponse.error("Unable to Delete Backup. See Log File"), ) return SnackResponse.error(f"{file_name} Deleted")
Python
def load_feed_as_graph(feed: ptg.gtfs.feed, start_time: int, end_time: int, name: str=None, existing_graph: nx.MultiDiGraph=None, connection_threshold: float=50.0, walk_speed_kmph: float=4.5, fallback_stop_cost: bool=FALLBACK_STOP_COST_DEFAULT, interpolate_times: bool=True, impute_walk_transfers: bool=False, use_multiprocessing: bool=False): """ Convert a feed object into a NetworkX Graph, connect to an existing NetworkX graph if one is supplied Parameters ---------- feed : partridge.feed A feed object from Partridge holding a representation of the desired schedule ids and their releated scheudule data from an operator GTFS start_time : int Represented in seconds after midnight; indicates the start time with which to take the subset of the target feed schedule to be used to measure impedance between stops along the route, as well as cost (wait time) to board at each stop end_time : int Represented in seconds after midnight; indicates the end time with which to take the subset of the target feed schedule to be used to measure impedance between stops along the route, as well as cost (wait time) to board at each stop name : str Name of the operator, which is used to create a unique ID for each of the stops, routes, etc. in the feed being supplied existing_graph : networkx.Graph An existing graph containing other operator or schedule data connection_threshold : float Treshold by which to create a connection with an existing stop in the existing_graph graph, measured in meters walk_speed_kmph : float Walk speed in km/h, that is used to determine the cost in time when walking between two nodes that get an internal connection created fallback_stop_cost: bool Cost in seconds to board a line at a stop if no other data is able to be calculated from schedule data for that stop to determine what wait time is. Example of this situation would be when there is only one scheduled stop time found for the stop id. interpolate_times : bool A boolean flag to indicate whether or not to infill intermediary stops that do not have all intermediary stop arrival times specified in the GTFS schedule. impute_walk_transfers : bool A flag to indicate whether to add in walk connections between nodes that are close enough, as measured using connection_trheshold use_multiprocessing: bool A flag to indicate whether or not to leverage multiprocessing where available to attempt to speed up trivially parallelizable operations. Returns ------- G networkx.Graph, the loaded, combined representation of the schedule data from the feed subset by the time parameters provided """ # Generate a random name for name if it is None if not name: name = generate_random_name() # Some sanity checking, to make sure only positive values are provided if (start_time < 0) or (end_time < 0): raise InvalidTimeBracket('Invalid start or end target times provided.') if end_time <= start_time: raise InvalidTimeBracket('Invalid ordering: Start time ' 'is greater than end time.') (summary_edge_costs, wait_times_by_stop) = generate_summary_graph_elements(feed, start_time, end_time, fallback_stop_cost, interpolate_times, use_multiprocessing) # This is a flag used to check if we need to run any additional steps # after the feed is returned to ensure that new nodes and edge can connect # with existing ones (if they exist/a graph is passed in) existing_graph_supplied = bool(existing_graph) # G is either a new MultiDiGraph or one pass from before if existing_graph_supplied: # TODO: If passed from before we should run some checks to ensure # it is valid as well as set a flag to create join points with # other feeds so that they can be linked when the next is added. G = existing_graph else: G = generate_empty_md_graph(name) return populate_graph(G, name, feed, wait_times_by_stop, summary_edge_costs, connection_threshold, walk_speed_kmph, impute_walk_transfers)
def load_feed_as_graph(feed: ptg.gtfs.feed, start_time: int, end_time: int, name: str=None, existing_graph: nx.MultiDiGraph=None, connection_threshold: float=50.0, walk_speed_kmph: float=4.5, fallback_stop_cost: bool=FALLBACK_STOP_COST_DEFAULT, interpolate_times: bool=True, impute_walk_transfers: bool=False, use_multiprocessing: bool=False): """ Convert a feed object into a NetworkX Graph, connect to an existing NetworkX graph if one is supplied Parameters ---------- feed : partridge.feed A feed object from Partridge holding a representation of the desired schedule ids and their releated scheudule data from an operator GTFS start_time : int Represented in seconds after midnight; indicates the start time with which to take the subset of the target feed schedule to be used to measure impedance between stops along the route, as well as cost (wait time) to board at each stop end_time : int Represented in seconds after midnight; indicates the end time with which to take the subset of the target feed schedule to be used to measure impedance between stops along the route, as well as cost (wait time) to board at each stop name : str Name of the operator, which is used to create a unique ID for each of the stops, routes, etc. in the feed being supplied existing_graph : networkx.Graph An existing graph containing other operator or schedule data connection_threshold : float Treshold by which to create a connection with an existing stop in the existing_graph graph, measured in meters walk_speed_kmph : float Walk speed in km/h, that is used to determine the cost in time when walking between two nodes that get an internal connection created fallback_stop_cost: bool Cost in seconds to board a line at a stop if no other data is able to be calculated from schedule data for that stop to determine what wait time is. Example of this situation would be when there is only one scheduled stop time found for the stop id. interpolate_times : bool A boolean flag to indicate whether or not to infill intermediary stops that do not have all intermediary stop arrival times specified in the GTFS schedule. impute_walk_transfers : bool A flag to indicate whether to add in walk connections between nodes that are close enough, as measured using connection_trheshold use_multiprocessing: bool A flag to indicate whether or not to leverage multiprocessing where available to attempt to speed up trivially parallelizable operations. Returns ------- G networkx.Graph, the loaded, combined representation of the schedule data from the feed subset by the time parameters provided """ # Generate a random name for name if it is None if not name: name = generate_random_name() # Some sanity checking, to make sure only positive values are provided if (start_time < 0) or (end_time < 0): raise InvalidTimeBracket('Invalid start or end target times provided.') if end_time <= start_time: raise InvalidTimeBracket('Invalid ordering: Start time ' 'is greater than end time.') (summary_edge_costs, wait_times_by_stop) = generate_summary_graph_elements(feed, start_time, end_time, fallback_stop_cost, interpolate_times, use_multiprocessing) # This is a flag used to check if we need to run any additional steps # after the feed is returned to ensure that new nodes and edge can connect # with existing ones (if they exist/a graph is passed in) existing_graph_supplied = bool(existing_graph) # G is either a new MultiDiGraph or one pass from before if existing_graph_supplied: # TODO: If passed from before we should run some checks to ensure # it is valid as well as set a flag to create join points with # other feeds so that they can be linked when the next is added. G = existing_graph else: G = generate_empty_md_graph(name) return populate_graph(G, name, feed, wait_times_by_stop, summary_edge_costs, connection_threshold, walk_speed_kmph, impute_walk_transfers)
Python
def load_synthetic_network_as_graph( reference_geojson: Dict, name: str=None, existing_graph: nx.MultiDiGraph=None, connection_threshold: float=50.0, walk_speed_kmph: float=4.5, impute_walk_transfers: bool=True): """ Convert a formatter transit FeatureCollection into a directed network graph. Utilizing a correctly formatted transit FeatureCollection, generate a directed networ graph (or add to an existing one), based off of features included in the reference_geojson parameter. """ # Generate a random name for name if it is None if not name: name = generate_random_name() # This is a flag used to check if we need to run any additional steps # after the feed is returned to ensure that new nodes and edge can connect # with existing ones (if they exist/a graph is passed in) existing_graph_supplied = bool(existing_graph) # G is either a new MultiDiGraph or one pass from before if existing_graph_supplied: # TODO: If passed from before we should run some checks to ensure # it is valid as well as set a flag to create join points with # other feeds so that they can be linked when the next is added. G = existing_graph else: G = generate_empty_md_graph(name) # TODO: Refactor reference_geojson to become a class that includes # validation on instantiation return make_synthetic_system_network( G, name, reference_geojson, connection_threshold, walk_speed_kmph, impute_walk_transfers)
def load_synthetic_network_as_graph( reference_geojson: Dict, name: str=None, existing_graph: nx.MultiDiGraph=None, connection_threshold: float=50.0, walk_speed_kmph: float=4.5, impute_walk_transfers: bool=True): """ Convert a formatter transit FeatureCollection into a directed network graph. Utilizing a correctly formatted transit FeatureCollection, generate a directed networ graph (or add to an existing one), based off of features included in the reference_geojson parameter. """ # Generate a random name for name if it is None if not name: name = generate_random_name() # This is a flag used to check if we need to run any additional steps # after the feed is returned to ensure that new nodes and edge can connect # with existing ones (if they exist/a graph is passed in) existing_graph_supplied = bool(existing_graph) # G is either a new MultiDiGraph or one pass from before if existing_graph_supplied: # TODO: If passed from before we should run some checks to ensure # it is valid as well as set a flag to create join points with # other feeds so that they can be linked when the next is added. G = existing_graph else: G = generate_empty_md_graph(name) # TODO: Refactor reference_geojson to become a class that includes # validation on instantiation return make_synthetic_system_network( G, name, reference_geojson, connection_threshold, walk_speed_kmph, impute_walk_transfers)
Python
def save_df(df, path, compression='snappy', use_dictionary=True): """ Save a pandas DataFrame to a parquet file """ try: df.to_parquet(path, compression=compression, use_dictionary=use_dictionary) except Exception as e: print(e)
def save_df(df, path, compression='snappy', use_dictionary=True): """ Save a pandas DataFrame to a parquet file """ try: df.to_parquet(path, compression=compression, use_dictionary=use_dictionary) except Exception as e: print(e)
Python
def random_walk_prevalence(mu,sigma,initial_prevalence=0.01,shape=None): '''Create a random walk prevalence process with drift mu and variance sigma Parameters ---------- mu : float drift of random walk in invlogit space sigma : float standard deviation of random walk in invlogit space shape : list shape of random walk in format (n_samples,n_months) Returns ------- numpy array Samples of random walk process with shape (n_samples,n_months) ''' # time-points and samples n_samples,n_months = shape # sample errors es = np.random.normal(loc=mu,scale=sigma,size=(n_samples,n_months)) # create random walks with starting point equivalent to when prevalence is 0.01 ws = logit(initial_prevalence) + np.cumsum(es,axis=1) # convert random walk into probability ps = expit(ws) return ps
def random_walk_prevalence(mu,sigma,initial_prevalence=0.01,shape=None): '''Create a random walk prevalence process with drift mu and variance sigma Parameters ---------- mu : float drift of random walk in invlogit space sigma : float standard deviation of random walk in invlogit space shape : list shape of random walk in format (n_samples,n_months) Returns ------- numpy array Samples of random walk process with shape (n_samples,n_months) ''' # time-points and samples n_samples,n_months = shape # sample errors es = np.random.normal(loc=mu,scale=sigma,size=(n_samples,n_months)) # create random walks with starting point equivalent to when prevalence is 0.01 ws = logit(initial_prevalence) + np.cumsum(es,axis=1) # convert random walk into probability ps = expit(ws) return ps
Python
def generate_data(N_mean = 10000, N_sd = 1000, p0 = 0.01, mu_w=1., sigma_w = 0.1, kappa_F = 0.25, kappa_N = 0.02, kappa_THN = 0.87, p_d = 0.1): ''' Generate sample data for fentanyl adulterant model with Take-home naloxone kits Parameters ---------- N_mean : float mean of population N_sd : float sd of population p0 : float initial probability of fentanyl in supply mu_w : float drift of fentanyl (logit space) sigma_w : float scale of fentanyl (logit space) kappa_F : float probability of overdose on fentanyl kappa_N : float probability of overdose not on fentanyl kappa_THN : float probability of fentanyl use p_d : float probability of death following an overdose without intervention Returns ------- Dictionary of observables for a single sample ''' # simulate fentanyl in illicit drug supply pF = random_walk_prevalence(mu_w,sigma_w,initial_prevalence=p0,shape=(1,12)) # calculate overdose rate od_rate = kappa_F*pF + kappa_N*(1-pF) # simulate a population size N = np.random.normal(loc=N_mean,scale=N_sd) # simulate kits distributed kits_distributed = np.cumsum(np.random.gamma(0.5,scale=300,size=(1,12)).round()) # simulate kits used p_THN = kappa_THN*kits_distributed/N # death rate modified due to use of THN kits death_rate = (1. - p_THN)*p_d # simulate overdoses f_overdoses = np.random.binomial(N,kappa_F*pF) nf_overdoses = np.random.binomial(N,kappa_N*(1.-pF)) overdoses = f_overdoses + nf_overdoses # simulate deaths f_deaths = np.random.binomial(f_overdoses,death_rate) nf_deaths = np.random.binomial(nf_overdoses,death_rate) deaths = f_deaths + nf_deaths # kits used kits_used = np.random.binomial(overdoses,p_THN) return {'overdoses': overdoses.flatten(),'probability fentanyl': pF.flatten(), 'deaths': deaths.flatten(),'fentanyl deaths':f_deaths.flatten(), 'kits distributed': kits_distributed.flatten(), 'kits used':kits_used.flatten()}
def generate_data(N_mean = 10000, N_sd = 1000, p0 = 0.01, mu_w=1., sigma_w = 0.1, kappa_F = 0.25, kappa_N = 0.02, kappa_THN = 0.87, p_d = 0.1): ''' Generate sample data for fentanyl adulterant model with Take-home naloxone kits Parameters ---------- N_mean : float mean of population N_sd : float sd of population p0 : float initial probability of fentanyl in supply mu_w : float drift of fentanyl (logit space) sigma_w : float scale of fentanyl (logit space) kappa_F : float probability of overdose on fentanyl kappa_N : float probability of overdose not on fentanyl kappa_THN : float probability of fentanyl use p_d : float probability of death following an overdose without intervention Returns ------- Dictionary of observables for a single sample ''' # simulate fentanyl in illicit drug supply pF = random_walk_prevalence(mu_w,sigma_w,initial_prevalence=p0,shape=(1,12)) # calculate overdose rate od_rate = kappa_F*pF + kappa_N*(1-pF) # simulate a population size N = np.random.normal(loc=N_mean,scale=N_sd) # simulate kits distributed kits_distributed = np.cumsum(np.random.gamma(0.5,scale=300,size=(1,12)).round()) # simulate kits used p_THN = kappa_THN*kits_distributed/N # death rate modified due to use of THN kits death_rate = (1. - p_THN)*p_d # simulate overdoses f_overdoses = np.random.binomial(N,kappa_F*pF) nf_overdoses = np.random.binomial(N,kappa_N*(1.-pF)) overdoses = f_overdoses + nf_overdoses # simulate deaths f_deaths = np.random.binomial(f_overdoses,death_rate) nf_deaths = np.random.binomial(nf_overdoses,death_rate) deaths = f_deaths + nf_deaths # kits used kits_used = np.random.binomial(overdoses,p_THN) return {'overdoses': overdoses.flatten(),'probability fentanyl': pF.flatten(), 'deaths': deaths.flatten(),'fentanyl deaths':f_deaths.flatten(), 'kits distributed': kits_distributed.flatten(), 'kits used':kits_used.flatten()}
Python
def save_generate_data(data,filename='./data/data_sample.csv'): ''' Save data to CSV file from output of generate_data Parameters ---------- data : dict output of method generate_data filename : str filename to save to Returns ------- None ''' #add time n_months=12 data['month'] = np.arange(1,n_months+1) df = pd.DataFrame(data) df[['month','overdoses','fentanyl deaths','deaths','kits distributed','kits used']].to_csv(filename,index=False)
def save_generate_data(data,filename='./data/data_sample.csv'): ''' Save data to CSV file from output of generate_data Parameters ---------- data : dict output of method generate_data filename : str filename to save to Returns ------- None ''' #add time n_months=12 data['month'] = np.arange(1,n_months+1) df = pd.DataFrame(data) df[['month','overdoses','fentanyl deaths','deaths','kits distributed','kits used']].to_csv(filename,index=False)
Python
def merge_mesh(x1, ngroups1, conn1, mat_ids1, x2, ngroups2, conn2, mat_ids2, cmap): """ Merge two meshes in common coordinates found in x1, x2. Notes ----- Assumes the same number and kind of element groups in both meshes! """ n1 = x1.shape[0] n2 = x2.shape[0] err = nm.sum(nm.sum(nm.abs(x1[cmap[:,0],:-1] - x2[cmap[:,1],:-1]))) if abs(err) > (10.0 * eps): raise ValueError('nonmatching meshes! (error: %e)' % err) mask = nm.ones((n2,), dtype=nm.int32) mask[cmap[:,1]] = 0 remap = nm.cumsum(mask) + n1 - 1 remap[cmap[:,1]] = cmap[:,0] i2 = nm.setdiff1d(nm.arange( n2, dtype=nm.int32), cmap[:,1]) xx = nm.r_[x1, x2[i2]] ngroups = nm.r_[ngroups1, ngroups2[i2]] conn = nm.vstack((conn1, remap[conn2])) mat_ids = None if (mat_ids1 is not None) and (mat_ids2 is not None): mat_ids = nm.concatenate((mat_ids1, mat_ids2)) return xx, ngroups, conn, mat_ids
def merge_mesh(x1, ngroups1, conn1, mat_ids1, x2, ngroups2, conn2, mat_ids2, cmap): """ Merge two meshes in common coordinates found in x1, x2. Notes ----- Assumes the same number and kind of element groups in both meshes! """ n1 = x1.shape[0] n2 = x2.shape[0] err = nm.sum(nm.sum(nm.abs(x1[cmap[:,0],:-1] - x2[cmap[:,1],:-1]))) if abs(err) > (10.0 * eps): raise ValueError('nonmatching meshes! (error: %e)' % err) mask = nm.ones((n2,), dtype=nm.int32) mask[cmap[:,1]] = 0 remap = nm.cumsum(mask) + n1 - 1 remap[cmap[:,1]] = cmap[:,0] i2 = nm.setdiff1d(nm.arange( n2, dtype=nm.int32), cmap[:,1]) xx = nm.r_[x1, x2[i2]] ngroups = nm.r_[ngroups1, ngroups2[i2]] conn = nm.vstack((conn1, remap[conn2])) mat_ids = None if (mat_ids1 is not None) and (mat_ids2 is not None): mat_ids = nm.concatenate((mat_ids1, mat_ids2)) return xx, ngroups, conn, mat_ids
Python
def fix_double_nodes(coor, ngroups, conns): """ Detect and attempt fixing double nodes in a mesh. The double nodes are nodes having the same coordinates w.r.t. precision given by `eps`. """ n_nod, dim = coor.shape cmap = find_map(coor, nm.zeros((0,dim)), allow_double=True) if cmap.size: output('double nodes in input mesh!') output('trying to fix...') while cmap.size: # Just like in Variable.equation_mapping()... ii = nm.argsort(cmap[:,1]) scmap = cmap[ii] eq = nm.arange(n_nod) eq[scmap[:,1]] = -1 eqi = eq[eq >= 0] eq[eqi] = nm.arange(eqi.shape[0]) remap = eq.copy() remap[scmap[:,1]] = eq[scmap[:,0]] output(coor.shape) coor = coor[eqi] ngroups = ngroups[eqi] output(coor.shape) ccs = [] for conn in conns: ccs.append(remap[conn]) conns = ccs cmap = find_map(coor, nm.zeros((0,dim)), allow_double=True) output('...done') return coor, ngroups, conns
def fix_double_nodes(coor, ngroups, conns): """ Detect and attempt fixing double nodes in a mesh. The double nodes are nodes having the same coordinates w.r.t. precision given by `eps`. """ n_nod, dim = coor.shape cmap = find_map(coor, nm.zeros((0,dim)), allow_double=True) if cmap.size: output('double nodes in input mesh!') output('trying to fix...') while cmap.size: # Just like in Variable.equation_mapping()... ii = nm.argsort(cmap[:,1]) scmap = cmap[ii] eq = nm.arange(n_nod) eq[scmap[:,1]] = -1 eqi = eq[eq >= 0] eq[eqi] = nm.arange(eqi.shape[0]) remap = eq.copy() remap[scmap[:,1]] = eq[scmap[:,0]] output(coor.shape) coor = coor[eqi] ngroups = ngroups[eqi] output(coor.shape) ccs = [] for conn in conns: ccs.append(remap[conn]) conns = ccs cmap = find_map(coor, nm.zeros((0,dim)), allow_double=True) output('...done') return coor, ngroups, conns
Python
def make_mesh(coor, ngroups, conns, mesh_in): """Create a mesh reusing mat_ids and descs of mesh_in.""" mat_ids = [] for ii, conn in enumerate(conns): mat_id = nm.empty((conn.shape[0],), dtype=nm.int32) mat_id.fill(mesh_in.mat_ids[ii][0]) mat_ids.append(mat_id) mesh_out = Mesh.from_data('merged mesh', coor, ngroups, conns, mat_ids, mesh_in.descs) return mesh_out
def make_mesh(coor, ngroups, conns, mesh_in): """Create a mesh reusing mat_ids and descs of mesh_in.""" mat_ids = [] for ii, conn in enumerate(conns): mat_id = nm.empty((conn.shape[0],), dtype=nm.int32) mat_id.fill(mesh_in.mat_ids[ii][0]) mat_ids.append(mat_id) mesh_out = Mesh.from_data('merged mesh', coor, ngroups, conns, mat_ids, mesh_in.descs) return mesh_out
Python
def from_file(filename=None, io='auto', prefix_dir=None, omit_facets=False): """ Read a mesh from a file. Parameters ---------- filename : string or function or MeshIO instance or Mesh instance The name of file to read the mesh from. For convenience, a mesh creation function or a MeshIO instance or directly a Mesh instance can be passed in place of the file name. io : *MeshIO instance Passing *MeshIO instance has precedence over filename. prefix_dir : str If not None, the filename is relative to that directory. omit_facets : bool If True, do not read cells of lower dimension than the space dimension (faces and/or edges). Only some MeshIO subclasses support this! """ if isinstance(filename, Mesh): return filename if io == 'auto': if filename is None: output('filename or io must be specified!') raise ValueError else: io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir) cell_types = ', '.join(supported_cell_types[io.format]) output('reading mesh [%s] (%s)...' % (cell_types, io.filename)) tt = time.clock() trunk = io.get_filename_trunk() mesh = Mesh(trunk) mesh = io.read(mesh, omit_facets=omit_facets) output('...done in %.2f s' % (time.clock() - tt)) mesh._set_shape_info() return mesh
def from_file(filename=None, io='auto', prefix_dir=None, omit_facets=False): """ Read a mesh from a file. Parameters ---------- filename : string or function or MeshIO instance or Mesh instance The name of file to read the mesh from. For convenience, a mesh creation function or a MeshIO instance or directly a Mesh instance can be passed in place of the file name. io : *MeshIO instance Passing *MeshIO instance has precedence over filename. prefix_dir : str If not None, the filename is relative to that directory. omit_facets : bool If True, do not read cells of lower dimension than the space dimension (faces and/or edges). Only some MeshIO subclasses support this! """ if isinstance(filename, Mesh): return filename if io == 'auto': if filename is None: output('filename or io must be specified!') raise ValueError else: io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir) cell_types = ', '.join(supported_cell_types[io.format]) output('reading mesh [%s] (%s)...' % (cell_types, io.filename)) tt = time.clock() trunk = io.get_filename_trunk() mesh = Mesh(trunk) mesh = io.read(mesh, omit_facets=omit_facets) output('...done in %.2f s' % (time.clock() - tt)) mesh._set_shape_info() return mesh
Python
def from_data(name, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None): """ Create a mesh from mesh IO data. """ mesh = Mesh(name) mesh._set_io_data(coors=coors, ngroups=ngroups, conns=conns, mat_ids=mat_ids, descs=descs, nodal_bcs=nodal_bcs) mesh._set_shape_info() return mesh
def from_data(name, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None): """ Create a mesh from mesh IO data. """ mesh = Mesh(name) mesh._set_io_data(coors=coors, ngroups=ngroups, conns=conns, mat_ids=mat_ids, descs=descs, nodal_bcs=nodal_bcs) mesh._set_shape_info() return mesh
Python
def _set_io_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None): """ Set mesh data. Parameters ---------- coors : array Coordinates of mesh nodes. ngroups : array Node groups. conns : list of arrays The array of mesh elements (connectivities) for each element group. mat_ids : list of arrays The array of material ids for each element group. descs: list of strings The element type for each element group. nodal_bcs : dict of arrays, optional The nodes defining regions for boundary conditions referred to by the dict keys in problem description files. """ ac = nm.ascontiguousarray coors = ac(coors, dtype=nm.float64) if ngroups is None: ngroups = nm.zeros((coors.shape[0],), dtype=nm.int32) self.descs = descs self.nodal_bcs = get_default(nodal_bcs, {}) from sfepy.discrete.common.extmods.cmesh import CMesh self.cmesh = CMesh.from_data(coors, ac(ngroups), [ac(conn, dtype=nm.int32) for conn in conns], ac(nm.concatenate(mat_ids)), descs)
def _set_io_data(self, coors, ngroups, conns, mat_ids, descs, nodal_bcs=None): """ Set mesh data. Parameters ---------- coors : array Coordinates of mesh nodes. ngroups : array Node groups. conns : list of arrays The array of mesh elements (connectivities) for each element group. mat_ids : list of arrays The array of material ids for each element group. descs: list of strings The element type for each element group. nodal_bcs : dict of arrays, optional The nodes defining regions for boundary conditions referred to by the dict keys in problem description files. """ ac = nm.ascontiguousarray coors = ac(coors, dtype=nm.float64) if ngroups is None: ngroups = nm.zeros((coors.shape[0],), dtype=nm.int32) self.descs = descs self.nodal_bcs = get_default(nodal_bcs, {}) from sfepy.discrete.common.extmods.cmesh import CMesh self.cmesh = CMesh.from_data(coors, ac(ngroups), [ac(conn, dtype=nm.int32) for conn in conns], ac(nm.concatenate(mat_ids)), descs)
Python
def create_conn_graph(self, verbose=True): """ Create a graph of mesh connectivity. Returns ------- graph : csr_matrix The mesh connectivity graph as a SciPy CSR matrix. """ from sfepy.discrete.common.extmods.cmesh import create_mesh_graph shape = (self.n_nod, self.n_nod) output('graph shape:', shape, verbose=verbose) if nm.prod(shape) == 0: output('no graph (zero size)!', verbose=verbose) return None output('assembling mesh graph...', verbose=verbose) tt = time.clock() conn = self.get_conn(self.descs[0]) nnz, prow, icol = create_mesh_graph(shape[0], shape[1], 1, [conn], [conn]) output('...done in %.2f s' % (time.clock() - tt), verbose=verbose) output('graph nonzeros: %d (%.2e%% fill)' \ % (nnz, float(nnz) / nm.prod(shape)), verbose=verbose) data = nm.ones((nnz,), dtype=nm.bool) graph = sp.csr_matrix((data, icol, prow), shape) return graph
def create_conn_graph(self, verbose=True): """ Create a graph of mesh connectivity. Returns ------- graph : csr_matrix The mesh connectivity graph as a SciPy CSR matrix. """ from sfepy.discrete.common.extmods.cmesh import create_mesh_graph shape = (self.n_nod, self.n_nod) output('graph shape:', shape, verbose=verbose) if nm.prod(shape) == 0: output('no graph (zero size)!', verbose=verbose) return None output('assembling mesh graph...', verbose=verbose) tt = time.clock() conn = self.get_conn(self.descs[0]) nnz, prow, icol = create_mesh_graph(shape[0], shape[1], 1, [conn], [conn]) output('...done in %.2f s' % (time.clock() - tt), verbose=verbose) output('graph nonzeros: %d (%.2e%% fill)' \ % (nnz, float(nnz) / nm.prod(shape)), verbose=verbose) data = nm.ones((nnz,), dtype=nm.bool) graph = sp.csr_matrix((data, icol, prow), shape) return graph
Python
def process_options(options): """ Application options setup. Sets default values for missing non-compulsory options. """ get = options.get volume = get('volume', None) volumes = get('volumes', None) if volume is None and volumes is None: raise ValueError('missing "volume" in options!') return Struct(print_digits=get('print_digits', 3), float_format=get('float_format', '%8.3e'), coefs_filename=get('coefs_filename', 'coefs'), tex_names=get('tex_names', None), coefs=get('coefs', None, 'missing "coefs" in options!'), requirements=get('requirements', None, 'missing "requirements" in options!'), return_all=get('return_all', False), macro_deformation=get('macro_deformation', None), mesh_update_variable=get('mesh_update_variable', None), mesh_update_corrector=get('mesh_update_corrector', None), multiprocessing=get('multiprocessing', True), use_mpi=get('use_mpi', False), store_micro_idxs=get('store_micro_idxs', []), volume=volume, volumes=volumes)
def process_options(options): """ Application options setup. Sets default values for missing non-compulsory options. """ get = options.get volume = get('volume', None) volumes = get('volumes', None) if volume is None and volumes is None: raise ValueError('missing "volume" in options!') return Struct(print_digits=get('print_digits', 3), float_format=get('float_format', '%8.3e'), coefs_filename=get('coefs_filename', 'coefs'), tex_names=get('tex_names', None), coefs=get('coefs', None, 'missing "coefs" in options!'), requirements=get('requirements', None, 'missing "requirements" in options!'), return_all=get('return_all', False), macro_deformation=get('macro_deformation', None), mesh_update_variable=get('mesh_update_variable', None), mesh_update_corrector=get('mesh_update_corrector', None), multiprocessing=get('multiprocessing', True), use_mpi=get('use_mpi', False), store_micro_idxs=get('store_micro_idxs', []), volume=volume, volumes=volumes)
Python
def update_micro_coors(self, ret_val=False): """ Update microstructures coordinates according to the deformation gradient and corrector functions. """ dim = self.macro_deformation.shape[1] mtx_e = self.macro_deformation - nm.eye(dim) ncoors = self.micro_coors ncoors += la.dot_sequences(ncoors, mtx_e, 'ABT') if self.updating_corrs is not None: upd_var = self.app_options.mesh_update_variable for ii, corr in enumerate(self.updating_corrs): update_corr = nm.array( [corr.states[jj][upd_var] for jj in corr.components]).T gg = mtx_e[ii, ...].reshape((dim**2, 1)) ncoors[ii] += nm.dot(update_corr, gg).reshape(ncoors[ii].shape) if ret_val: return ncoors
def update_micro_coors(self, ret_val=False): """ Update microstructures coordinates according to the deformation gradient and corrector functions. """ dim = self.macro_deformation.shape[1] mtx_e = self.macro_deformation - nm.eye(dim) ncoors = self.micro_coors ncoors += la.dot_sequences(ncoors, mtx_e, 'ABT') if self.updating_corrs is not None: upd_var = self.app_options.mesh_update_variable for ii, corr in enumerate(self.updating_corrs): update_corr = nm.array( [corr.states[jj][upd_var] for jj in corr.components]).T gg = mtx_e[ii, ...].reshape((dim**2, 1)) ncoors[ii] += nm.dot(update_corr, gg).reshape(ncoors[ii].shape) if ret_val: return ncoors
Python
def insert(self, word: str) -> None: """ Inserts a word into the trie. """ curr = self.root for letter in word: if letter not in curr.children: curr.children[letter] = TrieNode(letter) curr = curr.children[letter] curr.isWord = True
def insert(self, word: str) -> None: """ Inserts a word into the trie. """ curr = self.root for letter in word: if letter not in curr.children: curr.children[letter] = TrieNode(letter) curr = curr.children[letter] curr.isWord = True
Python
def search(self, word: str) -> bool: """ Returns if the word is in the trie. """ curr = self.root for letter in word: if letter not in curr.children: return False curr = curr.children[letter] return curr.isWord
def search(self, word: str) -> bool: """ Returns if the word is in the trie. """ curr = self.root for letter in word: if letter not in curr.children: return False curr = curr.children[letter] return curr.isWord
Python
def startsWith(self, prefix: str) -> bool: """ Returns if there is any word in the trie that starts with the given prefix. """ curr = self.root for letter in prefix: if letter not in curr.children: return False curr = curr.children[letter] if curr.children or curr.isWord: return True return False
def startsWith(self, prefix: str) -> bool: """ Returns if there is any word in the trie that starts with the given prefix. """ curr = self.root for letter in prefix: if letter not in curr.children: return False curr = curr.children[letter] if curr.children or curr.isWord: return True return False
Python
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None: """ Do not return anything, modify nums1 in-place instead. """ # Algo - We are bubbling max element to the end of the nums1 at every iteration aindex = len(nums1) - len(nums2) - 1 # index at where nums1 max element is bindex = len(nums2) - 1 # index at where nums2 max element is merge_index = len(nums1) - 1 # last index at where we start out insertion while aindex >= 0 and bindex >= 0: if nums1[aindex] >= nums2[bindex]: nums1[merge_index] = nums1[aindex] aindex -= 1 else: nums1[merge_index] = nums2[bindex] bindex -= 1 merge_index -= 1 # If b index > 0 while bindex >= 0: nums1[merge_index] = nums2[bindex] merge_index -= 1 bindex -= 1
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None: """ Do not return anything, modify nums1 in-place instead. """ # Algo - We are bubbling max element to the end of the nums1 at every iteration aindex = len(nums1) - len(nums2) - 1 # index at where nums1 max element is bindex = len(nums2) - 1 # index at where nums2 max element is merge_index = len(nums1) - 1 # last index at where we start out insertion while aindex >= 0 and bindex >= 0: if nums1[aindex] >= nums2[bindex]: nums1[merge_index] = nums1[aindex] aindex -= 1 else: nums1[merge_index] = nums2[bindex] bindex -= 1 merge_index -= 1 # If b index > 0 while bindex >= 0: nums1[merge_index] = nums2[bindex] merge_index -= 1 bindex -= 1