repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_documentation_string
stringlengths
1
47.2k
func_code_url
stringlengths
85
339
gagneurlab/concise
concise/utils/plot.py
add_letter_to_axis
def add_letter_to_axis(ax, let, col, x, y, height): """Add 'let' with position x,y and height height to matplotlib axis 'ax'. """ if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
python
def add_letter_to_axis(ax, let, col, x, y, height): """Add 'let' with position x,y and height height to matplotlib axis 'ax'. """ if len(let) == 2: colors = [col, "white"] elif len(let) == 1: colors = [col] else: raise ValueError("3 or more Polygons are not supported") for polygon, color in zip(let, colors): new_polygon = affinity.scale( polygon, yfact=height, origin=(0, 0, 0)) new_polygon = affinity.translate( new_polygon, xoff=x, yoff=y) patch = PolygonPatch( new_polygon, edgecolor=color, facecolor=color) ax.add_patch(patch) return
Add 'let' with position x,y and height height to matplotlib axis 'ax'.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L174-L192
gagneurlab/concise
concise/utils/plot.py
seqlogo
def seqlogo(letter_heights, vocab="DNA", ax=None): """Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis """ ax = ax or plt.gca() assert letter_heights.shape[1] == len(VOCABS[vocab]) x_range = [1, letter_heights.shape[0]] pos_heights = np.copy(letter_heights) pos_heights[letter_heights < 0] = 0 neg_heights = np.copy(letter_heights) neg_heights[letter_heights > 0] = 0 for x_pos, heights in enumerate(letter_heights): letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys()))) y_pos_pos = 0.0 y_neg_pos = 0.0 for height, letter in letters_and_heights: color = VOCABS[vocab][letter] polygons = letter_polygons[letter] if height > 0: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height) y_pos_pos += height else: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height) y_neg_pos += height # if add_hline: # ax.axhline(color="black", linewidth=1) ax.set_xlim(x_range[0] - 1, x_range[1] + 1) ax.grid(False) ax.set_xticks(list(range(*x_range)) + [x_range[-1]]) ax.set_aspect(aspect='auto', adjustable='box') ax.autoscale_view()
python
def seqlogo(letter_heights, vocab="DNA", ax=None): """Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis """ ax = ax or plt.gca() assert letter_heights.shape[1] == len(VOCABS[vocab]) x_range = [1, letter_heights.shape[0]] pos_heights = np.copy(letter_heights) pos_heights[letter_heights < 0] = 0 neg_heights = np.copy(letter_heights) neg_heights[letter_heights > 0] = 0 for x_pos, heights in enumerate(letter_heights): letters_and_heights = sorted(zip(heights, list(VOCABS[vocab].keys()))) y_pos_pos = 0.0 y_neg_pos = 0.0 for height, letter in letters_and_heights: color = VOCABS[vocab][letter] polygons = letter_polygons[letter] if height > 0: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_pos_pos, height) y_pos_pos += height else: add_letter_to_axis(ax, polygons, color, 0.5 + x_pos, y_neg_pos, height) y_neg_pos += height # if add_hline: # ax.axhline(color="black", linewidth=1) ax.set_xlim(x_range[0] - 1, x_range[1] + 1) ax.grid(False) ax.set_xticks(list(range(*x_range)) + [x_range[-1]]) ax.set_aspect(aspect='auto', adjustable='box') ax.autoscale_view()
Make a logo plot # Arguments letter_heights: "motif length" x "vocabulary size" numpy array Can also contain negative values. vocab: str, Vocabulary name. Can be: DNA, RNA, AA, RNAStruct. ax: matplotlib axis
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/plot.py#L196-L234
gagneurlab/concise
concise/legacy/analyze.py
get_cv_accuracy
def get_cv_accuracy(res): """ Extract the cv accuracy from the model """ ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
python
def get_cv_accuracy(res): """ Extract the cv accuracy from the model """ ac_list = [(accuracy["train_acc_final"], accuracy["test_acc_final"] ) for accuracy, weights in res] ac = np.array(ac_list) perf = { "mean_train_acc": np.mean(ac[:, 0]), "std_train_acc": np.std(ac[:, 0]), "mean_test_acc": np.mean(ac[:, 1]), "std_test_acc": np.std(ac[:, 1]), } return perf
Extract the cv accuracy from the model
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/analyze.py#L9-L26
gagneurlab/concise
concise/preprocessing/sequence.py
one_hot2string
def one_hot2string(arr, vocab): """Convert a one-hot encoded array back to string """ tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
python
def one_hot2string(arr, vocab): """Convert a one-hot encoded array back to string """ tokens = one_hot2token(arr) indexToLetter = _get_index_dict(vocab) return [''.join([indexToLetter[x] for x in row]) for row in tokens]
Convert a one-hot encoded array back to string
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L32-L38
gagneurlab/concise
concise/preprocessing/sequence.py
tokenize
def tokenize(seq, vocab, neutral_vocab=[]): """Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1` """ # Req: all vocabs have the same length if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] nchar = len(vocab[0]) for l in vocab + neutral_vocab: assert len(l) == nchar assert len(seq) % nchar == 0 # since we are using striding vocab_dict = _get_vocab_dict(vocab) for l in neutral_vocab: vocab_dict[l] = -1 # current performance bottleneck return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
python
def tokenize(seq, vocab, neutral_vocab=[]): """Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1` """ # Req: all vocabs have the same length if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] nchar = len(vocab[0]) for l in vocab + neutral_vocab: assert len(l) == nchar assert len(seq) % nchar == 0 # since we are using striding vocab_dict = _get_vocab_dict(vocab) for l in neutral_vocab: vocab_dict[l] = -1 # current performance bottleneck return [vocab_dict[seq[(i * nchar):((i + 1) * nchar)]] for i in range(len(seq) // nchar)]
Convert sequence to integers # Arguments seq: Sequence to encode vocab: Vocabulary to use neutral_vocab: Neutral vocabulary -> assign those values to -1 # Returns List of length `len(seq)` with integers from `-1` to `len(vocab) - 1`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L41-L66
gagneurlab/concise
concise/preprocessing/sequence.py
token2one_hot
def token2one_hot(tvec, vocab_size): """ Note: everything out of the vucabulary is transformed into `np.zeros(vocab_size)` """ arr = np.zeros((len(tvec), vocab_size)) tvec_range = np.arange(len(tvec)) tvec = np.asarray(tvec) arr[tvec_range[tvec >= 0], tvec[tvec >= 0]] = 1 return arr
python
def token2one_hot(tvec, vocab_size): """ Note: everything out of the vucabulary is transformed into `np.zeros(vocab_size)` """ arr = np.zeros((len(tvec), vocab_size)) tvec_range = np.arange(len(tvec)) tvec = np.asarray(tvec) arr[tvec_range[tvec >= 0], tvec[tvec >= 0]] = 1 return arr
Note: everything out of the vucabulary is transformed into `np.zeros(vocab_size)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L82-L91
gagneurlab/concise
concise/preprocessing/sequence.py
encodeSequence
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot"): """Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`. """ if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] # we add 1 to be compatible with keras: https://keras.io/layers/embeddings/ # indexes > 0, 0 = padding element return np.stack(arr_list)
python
def encodeSequence(seq_vec, vocab, neutral_vocab, maxlen=None, seq_align="start", pad_value="N", encode_type="one_hot"): """Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`. """ if isinstance(neutral_vocab, str): neutral_vocab = [neutral_vocab] if isinstance(seq_vec, str): raise ValueError("seq_vec should be an iterable returning " + "strings not a string itself") assert len(vocab[0]) == len(pad_value) assert pad_value in neutral_vocab assert encode_type in ["one_hot", "token"] seq_vec = pad_sequences(seq_vec, maxlen=maxlen, align=seq_align, value=pad_value) if encode_type == "one_hot": arr_list = [token2one_hot(tokenize(seq, vocab, neutral_vocab), len(vocab)) for i, seq in enumerate(seq_vec)] elif encode_type == "token": arr_list = [1 + np.array(tokenize(seq, vocab, neutral_vocab)) for seq in seq_vec] # we add 1 to be compatible with keras: https://keras.io/layers/embeddings/ # indexes > 0, 0 = padding element return np.stack(arr_list)
Convert a list of genetic sequences into one-hot-encoded array. # Arguments seq_vec: list of strings (genetic sequences) vocab: list of chars: List of "words" to use as the vocabulary. Can be strings of length>0, but all need to have the same length. For DNA, this is: ["A", "C", "G", "T"]. neutral_vocab: list of chars: Values used to pad the sequence or represent unknown-values. For DNA, this is: ["N"]. maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? encode_type: "one_hot" or "token". "token" represents each vocab element as a positive integer from 1 to len(vocab) + 1. neutral_vocab is represented with 0. # Returns Array with shape for encode_type: - "one_hot": `(len(seq_vec), maxlen, len(vocab))` - "token": `(len(seq_vec), maxlen)` If `maxlen=None`, it gets the value of the longest sequence length from `seq_vec`.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L94-L141
gagneurlab/concise
concise/preprocessing/sequence.py
encodeDNA
def encodeDNA(seq_vec, maxlen=None, seq_align="start"): """Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` """ return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
python
def encodeDNA(seq_vec, maxlen=None, seq_align="start"): """Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ``` """ return encodeSequence(seq_vec, vocab=DNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
Convert the DNA sequence into 1-hot-encoding numpy array # Arguments seq_vec: list of chars List of sequences that can have different lengths maxlen: int or None, Should we trim (subset) the resulting sequence. If None don't trim. Note that trims wrt the align parameter. It should be smaller than the longest sequence. seq_align: character; 'end' or 'start' To which end should we align sequences? # Returns 3D numpy array of shape (len(seq_vec), trim_seq_len(or maximal sequence length if None), 4) # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> X_seq = encodeDNA(sequence_vec, seq_align="end", maxlen=8) >>> X_seq.shape (2, 8, 4) >>> print(X_seq) [[[0 0 0 1] [1 0 0 0] [0 1 0 0] [0 0 0 1] [0 1 0 0] [1 0 0 0] [0 0 1 0] [1 0 0 0]] [[0 0 0 0] [0 0 0 0] [0 0 0 1] [0 1 0 0] [0 0 0 1] [0 0 0 1] [0 0 0 1] [1 0 0 0]]] ```
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L144-L196
gagneurlab/concise
concise/preprocessing/sequence.py
encodeRNA
def encodeRNA(seq_vec, maxlen=None, seq_align="start"): """Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA """ return encodeSequence(seq_vec, vocab=RNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
python
def encodeRNA(seq_vec, maxlen=None, seq_align="start"): """Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA """ return encodeSequence(seq_vec, vocab=RNA, neutral_vocab="N", maxlen=maxlen, seq_align=seq_align, pad_value="N", encode_type="one_hot")
Convert the RNA sequence into 1-hot-encoding numpy array as for encodeDNA
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L199-L208
gagneurlab/concise
concise/preprocessing/sequence.py
encodeCodon
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)` """ if ignore_stop_codons: vocab = CODONS neutral_vocab = STOP_CODONS + ["NNN"] else: vocab = CODONS + STOP_CODONS neutral_vocab = ["NNN"] # replace all U's with A's? seq_vec = [str(seq).replace("U", "T") for seq in seq_vec] return encodeSequence(seq_vec, vocab=vocab, neutral_vocab=neutral_vocab, maxlen=maxlen, seq_align=seq_align, pad_value="NNN", encode_type=encode_type)
python
def encodeCodon(seq_vec, ignore_stop_codons=True, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)` """ if ignore_stop_codons: vocab = CODONS neutral_vocab = STOP_CODONS + ["NNN"] else: vocab = CODONS + STOP_CODONS neutral_vocab = ["NNN"] # replace all U's with A's? seq_vec = [str(seq).replace("U", "T") for seq in seq_vec] return encodeSequence(seq_vec, vocab=vocab, neutral_vocab=neutral_vocab, maxlen=maxlen, seq_align=seq_align, pad_value="NNN", encode_type=encode_type)
Convert the Codon sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/DNA sequences ignore_stop_codons: boolean; if True, STOP_CODONS are omitted from one-hot encoding. maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen / 3, 61 if ignore_stop_codons else 64)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L211-L240
gagneurlab/concise
concise/preprocessing/sequence.py
encodeAA
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)` """ return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
python
def encodeAA(seq_vec, maxlen=None, seq_align="start", encode_type="one_hot"): """Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)` """ return encodeSequence(seq_vec, vocab=AMINO_ACIDS, neutral_vocab="_", maxlen=maxlen, seq_align=seq_align, pad_value="_", encode_type=encode_type)
Convert the Amino-acid sequence into 1-hot-encoding numpy array # Arguments seq_vec: List of strings/amino-acid sequences maxlen: Maximum sequence length. See `pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `pad_sequences` for more detail encode_type: can be `"one_hot"` or `token` for token encoding of codons (incremental integer ). # Returns numpy.ndarray of shape `(len(seq_vec), maxlen, 22)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L243-L261
gagneurlab/concise
concise/preprocessing/sequence.py
pad_sequences
def pad_sequences(sequence_vec, maxlen=None, align="end", value="N"): """Pad and/or trim a list of sequences to have common length. Procedure: 1. Pad the sequence with N's or any other string or list element (`value`) 2. Subset the sequence # Note See also: https://keras.io/preprocessing/sequence/ Aplicable also for lists of characters # Arguments sequence_vec: list of chars or lists List of sequences that can have various lengths value: Neutral element to pad the sequence with. Can be `str` or `list`. maxlen: int or None; Final lenght of sequences. If None, maxlen is set to the longest sequence length. align: character; 'start', 'end' or 'center' To which end to align the sequences when triming/padding. See examples bellow. # Returns List of sequences of the same class as sequence_vec # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> pad_sequences(sequence_vec, 10, align="start", value="N") ['CTTACTCAGA', 'TCTTTANNNN'] >>> pad_sequences(sequence_vec, 10, align="end", value="N") ['CTTACTCAGA', 'NNNNTCTTTA'] >>> pad_sequences(sequence_vec, 4, align="center", value="N") ['ACTC', 'CTTT'] ``` """ # neutral element type checking assert isinstance(value, list) or isinstance(value, str) assert isinstance(value, type(sequence_vec[0])) assert not isinstance(sequence_vec, str) assert isinstance(sequence_vec[0], list) or isinstance(sequence_vec[0], str) max_seq_len = max([len(seq) for seq in sequence_vec]) if maxlen is None: maxlen = max_seq_len else: maxlen = int(maxlen) if max_seq_len < maxlen: print("WARNING: Maximum sequence length (%s) is less than maxlen (%s)" % (max_seq_len, maxlen)) max_seq_len = maxlen # check the case when len > 1 for seq in sequence_vec: if not len(seq) % len(value) == 0: raise ValueError("All sequences need to be dividable by len(value)") if not maxlen % len(value) == 0: raise ValueError("maxlen needs to be dividable by len(value)") # pad and subset def pad(seq, max_seq_len, value="N", align="end"): seq_len = len(seq) assert max_seq_len >= seq_len if align is "end": n_left = max_seq_len - seq_len n_right = 0 elif align is "start": n_right = max_seq_len - seq_len n_left = 0 elif align is "center": n_left = (max_seq_len - seq_len) // 2 + (max_seq_len - seq_len) % 2 n_right = (max_seq_len - seq_len) // 2 else: raise ValueError("align can be of: end, start or center") # normalize for the length n_left = n_left // len(value) n_right = n_right // len(value) return value * n_left + seq + value * n_right def trim(seq, maxlen, align="end"): seq_len = len(seq) assert maxlen <= seq_len if align is "end": return seq[-maxlen:] elif align is "start": return seq[0:maxlen] elif align is "center": dl = seq_len - maxlen n_left = dl // 2 + dl % 2 n_right = seq_len - dl // 2 return seq[n_left:n_right] else: raise ValueError("align can be of: end, start or center") padded_sequence_vec = [pad(seq, max(max_seq_len, maxlen), value=value, align=align) for seq in sequence_vec] padded_sequence_vec = [trim(seq, maxlen, align=align) for seq in padded_sequence_vec] return padded_sequence_vec
python
def pad_sequences(sequence_vec, maxlen=None, align="end", value="N"): """Pad and/or trim a list of sequences to have common length. Procedure: 1. Pad the sequence with N's or any other string or list element (`value`) 2. Subset the sequence # Note See also: https://keras.io/preprocessing/sequence/ Aplicable also for lists of characters # Arguments sequence_vec: list of chars or lists List of sequences that can have various lengths value: Neutral element to pad the sequence with. Can be `str` or `list`. maxlen: int or None; Final lenght of sequences. If None, maxlen is set to the longest sequence length. align: character; 'start', 'end' or 'center' To which end to align the sequences when triming/padding. See examples bellow. # Returns List of sequences of the same class as sequence_vec # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> pad_sequences(sequence_vec, 10, align="start", value="N") ['CTTACTCAGA', 'TCTTTANNNN'] >>> pad_sequences(sequence_vec, 10, align="end", value="N") ['CTTACTCAGA', 'NNNNTCTTTA'] >>> pad_sequences(sequence_vec, 4, align="center", value="N") ['ACTC', 'CTTT'] ``` """ # neutral element type checking assert isinstance(value, list) or isinstance(value, str) assert isinstance(value, type(sequence_vec[0])) assert not isinstance(sequence_vec, str) assert isinstance(sequence_vec[0], list) or isinstance(sequence_vec[0], str) max_seq_len = max([len(seq) for seq in sequence_vec]) if maxlen is None: maxlen = max_seq_len else: maxlen = int(maxlen) if max_seq_len < maxlen: print("WARNING: Maximum sequence length (%s) is less than maxlen (%s)" % (max_seq_len, maxlen)) max_seq_len = maxlen # check the case when len > 1 for seq in sequence_vec: if not len(seq) % len(value) == 0: raise ValueError("All sequences need to be dividable by len(value)") if not maxlen % len(value) == 0: raise ValueError("maxlen needs to be dividable by len(value)") # pad and subset def pad(seq, max_seq_len, value="N", align="end"): seq_len = len(seq) assert max_seq_len >= seq_len if align is "end": n_left = max_seq_len - seq_len n_right = 0 elif align is "start": n_right = max_seq_len - seq_len n_left = 0 elif align is "center": n_left = (max_seq_len - seq_len) // 2 + (max_seq_len - seq_len) % 2 n_right = (max_seq_len - seq_len) // 2 else: raise ValueError("align can be of: end, start or center") # normalize for the length n_left = n_left // len(value) n_right = n_right // len(value) return value * n_left + seq + value * n_right def trim(seq, maxlen, align="end"): seq_len = len(seq) assert maxlen <= seq_len if align is "end": return seq[-maxlen:] elif align is "start": return seq[0:maxlen] elif align is "center": dl = seq_len - maxlen n_left = dl // 2 + dl % 2 n_right = seq_len - dl // 2 return seq[n_left:n_right] else: raise ValueError("align can be of: end, start or center") padded_sequence_vec = [pad(seq, max(max_seq_len, maxlen), value=value, align=align) for seq in sequence_vec] padded_sequence_vec = [trim(seq, maxlen, align=align) for seq in padded_sequence_vec] return padded_sequence_vec
Pad and/or trim a list of sequences to have common length. Procedure: 1. Pad the sequence with N's or any other string or list element (`value`) 2. Subset the sequence # Note See also: https://keras.io/preprocessing/sequence/ Aplicable also for lists of characters # Arguments sequence_vec: list of chars or lists List of sequences that can have various lengths value: Neutral element to pad the sequence with. Can be `str` or `list`. maxlen: int or None; Final lenght of sequences. If None, maxlen is set to the longest sequence length. align: character; 'start', 'end' or 'center' To which end to align the sequences when triming/padding. See examples bellow. # Returns List of sequences of the same class as sequence_vec # Example ```python >>> sequence_vec = ['CTTACTCAGA', 'TCTTTA'] >>> pad_sequences(sequence_vec, 10, align="start", value="N") ['CTTACTCAGA', 'TCTTTANNNN'] >>> pad_sequences(sequence_vec, 10, align="end", value="N") ['CTTACTCAGA', 'NNNNTCTTTA'] >>> pad_sequences(sequence_vec, 4, align="center", value="N") ['ACTC', 'CTTT'] ```
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/sequence.py#L264-L365
gagneurlab/concise
concise/utils/position.py
extract_landmarks
def extract_landmarks(gtf, landmarks=ALL_LANDMARKS): """Given an gene annotation GFF/GTF file, # Arguments gtf: File path or a loaded `pd.DataFrame` with columns: seqname, feature, start, end, strand landmarks: list or a dictionary of landmark extractors (function or name) # Note When landmark extractor names are used, they have to be implemented in the module `concise.preprocessing.position` # Returns Dictionary of pd.DataFrames with landmark positions (columns: seqname, position, strand) """ if isinstance(gtf, str): _logger.info("Reading gtf file..") gtf = read_gtf(gtf) _logger.info("Done") _logger.info("Running landmark extractors..") # landmarks to a dictionary with a function assert isinstance(landmarks, (list, tuple, set, dict)) if isinstance(landmarks, dict): landmarks = {k: _get_fun(v) for k, v in landmarks.items()} else: landmarks = {_to_string(fn_str): _get_fun(fn_str) for fn_str in landmarks} r = {k: _validate_pos(v(gtf)) for k, v in landmarks.items()} _logger.info("Done!") return r
python
def extract_landmarks(gtf, landmarks=ALL_LANDMARKS): """Given an gene annotation GFF/GTF file, # Arguments gtf: File path or a loaded `pd.DataFrame` with columns: seqname, feature, start, end, strand landmarks: list or a dictionary of landmark extractors (function or name) # Note When landmark extractor names are used, they have to be implemented in the module `concise.preprocessing.position` # Returns Dictionary of pd.DataFrames with landmark positions (columns: seqname, position, strand) """ if isinstance(gtf, str): _logger.info("Reading gtf file..") gtf = read_gtf(gtf) _logger.info("Done") _logger.info("Running landmark extractors..") # landmarks to a dictionary with a function assert isinstance(landmarks, (list, tuple, set, dict)) if isinstance(landmarks, dict): landmarks = {k: _get_fun(v) for k, v in landmarks.items()} else: landmarks = {_to_string(fn_str): _get_fun(fn_str) for fn_str in landmarks} r = {k: _validate_pos(v(gtf)) for k, v in landmarks.items()} _logger.info("Done!") return r
Given an gene annotation GFF/GTF file, # Arguments gtf: File path or a loaded `pd.DataFrame` with columns: seqname, feature, start, end, strand landmarks: list or a dictionary of landmark extractors (function or name) # Note When landmark extractor names are used, they have to be implemented in the module `concise.preprocessing.position` # Returns Dictionary of pd.DataFrames with landmark positions (columns: seqname, position, strand)
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/position.py#L16-L48
gagneurlab/concise
concise/utils/position.py
_validate_pos
def _validate_pos(df): """Validates the returned positional object """ assert isinstance(df, pd.DataFrame) assert ["seqname", "position", "strand"] == df.columns.tolist() assert df.position.dtype == np.dtype("int64") assert df.strand.dtype == np.dtype("O") assert df.seqname.dtype == np.dtype("O") return df
python
def _validate_pos(df): """Validates the returned positional object """ assert isinstance(df, pd.DataFrame) assert ["seqname", "position", "strand"] == df.columns.tolist() assert df.position.dtype == np.dtype("int64") assert df.strand.dtype == np.dtype("O") assert df.seqname.dtype == np.dtype("O") return df
Validates the returned positional object
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/position.py#L131-L139
gagneurlab/concise
concise/utils/tf_helper.py
huber_loss
def huber_loss(tensor, k=1, scope=None): """Define a huber loss https://en.wikipedia.org/wiki/Huber_loss tensor: tensor to regularize. k: value of k in the huber loss scope: Optional scope for op_scope. Huber loss: f(x) = if |x| <= k: 0.5 * x^2 else: k * |x| - 0.5 * k^2 Returns: the L1 loss op. """ # assert k >= 0 with tf.name_scope(scope, 'L1Loss', [tensor]): loss = tf.reduce_mean(tf.select(tf.abs(tensor) < k, 0.5 * tf.square(tensor), k * tf.abs(tensor) - 0.5 * k ^ 2) ) return loss
python
def huber_loss(tensor, k=1, scope=None): """Define a huber loss https://en.wikipedia.org/wiki/Huber_loss tensor: tensor to regularize. k: value of k in the huber loss scope: Optional scope for op_scope. Huber loss: f(x) = if |x| <= k: 0.5 * x^2 else: k * |x| - 0.5 * k^2 Returns: the L1 loss op. """ # assert k >= 0 with tf.name_scope(scope, 'L1Loss', [tensor]): loss = tf.reduce_mean(tf.select(tf.abs(tensor) < k, 0.5 * tf.square(tensor), k * tf.abs(tensor) - 0.5 * k ^ 2) ) return loss
Define a huber loss https://en.wikipedia.org/wiki/Huber_loss tensor: tensor to regularize. k: value of k in the huber loss scope: Optional scope for op_scope. Huber loss: f(x) = if |x| <= k: 0.5 * x^2 else: k * |x| - 0.5 * k^2 Returns: the L1 loss op.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/tf_helper.py#L26-L47
gagneurlab/concise
concise/data/attract.py
get_metadata
def get_metadata(): """ Get pandas.DataFrame with metadata about the Attract PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - Gene_name - Gene_id - Mutated (if the target gene is mutated) - Organism - Motif (concsensus motif) - Len (lenght of the motif) - Experiment_description(when available) - Database (Database from where the motifs were extracted PDB: Protein data bank, C: Cisbp-RNA, R:RBPDB, S: Spliceaid-F, AEDB:ASD) - Pubmed (pubmed ID) - Experiment (type of experiment; short description) - Family (domain) - Score (Qscore refer to the paper) """ dt = pd.read_table(ATTRACT_METADTA) dt.rename(columns={"Matrix_id": "PWM_id"}, inplace=True) # put to firt place cols = ['PWM_id'] + [col for col in dt if col != 'PWM_id'] # rename Matrix_id to PWM_id return dt[cols]
python
def get_metadata(): """ Get pandas.DataFrame with metadata about the Attract PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - Gene_name - Gene_id - Mutated (if the target gene is mutated) - Organism - Motif (concsensus motif) - Len (lenght of the motif) - Experiment_description(when available) - Database (Database from where the motifs were extracted PDB: Protein data bank, C: Cisbp-RNA, R:RBPDB, S: Spliceaid-F, AEDB:ASD) - Pubmed (pubmed ID) - Experiment (type of experiment; short description) - Family (domain) - Score (Qscore refer to the paper) """ dt = pd.read_table(ATTRACT_METADTA) dt.rename(columns={"Matrix_id": "PWM_id"}, inplace=True) # put to firt place cols = ['PWM_id'] + [col for col in dt if col != 'PWM_id'] # rename Matrix_id to PWM_id return dt[cols]
Get pandas.DataFrame with metadata about the Attract PWM's. Columns: - PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm - Gene_name - Gene_id - Mutated (if the target gene is mutated) - Organism - Motif (concsensus motif) - Len (lenght of the motif) - Experiment_description(when available) - Database (Database from where the motifs were extracted PDB: Protein data bank, C: Cisbp-RNA, R:RBPDB, S: Spliceaid-F, AEDB:ASD) - Pubmed (pubmed ID) - Experiment (type of experiment; short description) - Family (domain) - Score (Qscore refer to the paper)
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/attract.py#L11-L35
gagneurlab/concise
concise/data/attract.py
get_pwm_list
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(ATTRACT_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
python
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(ATTRACT_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(l[str(m)] + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
Get a list of Attract PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/attract.py#L38-L51
gagneurlab/concise
concise/losses.py
mask_loss
def mask_loss(loss, mask_value=MASK_VALUE): """Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ``` """ loss_fn = kloss.deserialize(loss) def masked_loss_fn(y_true, y_pred): # currently not suppoerd with NA's: # - there is no K.is_nan impolementation in keras.backend # - https://github.com/fchollet/keras/issues/1628 mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) # we divide by the mean to correct for the number of done loss evaluations return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask) masked_loss_fn.__name__ = loss + "_masked" return masked_loss_fn
python
def mask_loss(loss, mask_value=MASK_VALUE): """Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ``` """ loss_fn = kloss.deserialize(loss) def masked_loss_fn(y_true, y_pred): # currently not suppoerd with NA's: # - there is no K.is_nan impolementation in keras.backend # - https://github.com/fchollet/keras/issues/1628 mask = K.cast(K.not_equal(y_true, mask_value), K.floatx()) # we divide by the mean to correct for the number of done loss evaluations return loss_fn(y_true * mask, y_pred * mask) / K.mean(mask) masked_loss_fn.__name__ = loss + "_masked" return masked_loss_fn
Generates a new loss function that ignores values where `y_true == mask_value`. # Arguments loss: str; name of the keras loss function from `keras.losses` mask_value: int; which values should be masked # Returns function; Masked version of the `loss` # Example ```python categorical_crossentropy_masked = mask_loss("categorical_crossentropy") ```
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/losses.py#L9-L36
gagneurlab/concise
concise/effects/gradient.py
gradient_pred
def gradient_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None): """Gradient-based (saliency) variant effect prediction Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant for a given output. This value is then multiplied by the input, as recommended by [Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf). # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. # Returns Dictionary with three different entries: - ref: Gradient * input at the mutation position using the reference sequence. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference ('diff') - alt: Gradient * input at the mutation position using the alternative sequence. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference ('diff') - diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference. """ seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} for k in seqs: if not isinstance(seqs[k], (list, tuple, np.ndarray)): raise Exception("At the moment only models with list, tuple or np.ndarray inputs are supported.") assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] # Generate the necessary gradient functions sal_funcs = __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation) # ANALOGOUS TO ISM: # predict preds = {} for k in seqs: preds[k] = {} if "_rc" in k: mutated_positions_here = get_seq_len(ref)[1] - 1 - mutation_positions else: mutated_positions_here = mutation_positions for l in out_annotation: preds[k][l] = predict_vals(input_data=seqs[k], apply_function=__get_direct_saliencies__, score_func=sal_funcs[l], mutated_positions=mutated_positions_here, model = model) diff_ret_dGrad = {} pred_out = {"ref": {}, "alt": {}} for k in preds["ref"]: # TODO make list (and dict)-ready diff_fwd = general_diff(preds["alt"][k]["dGrad"], preds["ref"][k]["dGrad"]) diff_rc = general_diff(preds["alt_rc"][k]["dGrad"], preds["ref_rc"][k]["dGrad"]) sel = general_sel(diff_fwd, diff_rc) replace_by_sel(diff_fwd, diff_rc, sel) diff_ret_dGrad[k] = diff_fwd # Overwrite the fwd values with rc values if rc was selected replace_by_sel(preds["ref"][k]["dGrad"], preds["ref_rc"][k]["dGrad"], sel) replace_by_sel(preds["alt"][k]["dGrad"], preds["alt_rc"][k]["dGrad"], sel) pred_out["ref"][k] = preds["ref"][k]["dGrad"] pred_out["alt"][k] = preds["alt"][k]["dGrad"] return {"diff": pd.DataFrame(diff_ret_dGrad), "ref": pd.DataFrame(pred_out["ref"]), "alt": pd.DataFrame(pred_out["alt"])}
python
def gradient_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None): """Gradient-based (saliency) variant effect prediction Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant for a given output. This value is then multiplied by the input, as recommended by [Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf). # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. # Returns Dictionary with three different entries: - ref: Gradient * input at the mutation position using the reference sequence. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference ('diff') - alt: Gradient * input at the mutation position using the alternative sequence. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference ('diff') - diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference. """ seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} for k in seqs: if not isinstance(seqs[k], (list, tuple, np.ndarray)): raise Exception("At the moment only models with list, tuple or np.ndarray inputs are supported.") assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] # Generate the necessary gradient functions sal_funcs = __generate_direct_saliency_functions__(model, out_annotation_all_outputs, out_annotation) # ANALOGOUS TO ISM: # predict preds = {} for k in seqs: preds[k] = {} if "_rc" in k: mutated_positions_here = get_seq_len(ref)[1] - 1 - mutation_positions else: mutated_positions_here = mutation_positions for l in out_annotation: preds[k][l] = predict_vals(input_data=seqs[k], apply_function=__get_direct_saliencies__, score_func=sal_funcs[l], mutated_positions=mutated_positions_here, model = model) diff_ret_dGrad = {} pred_out = {"ref": {}, "alt": {}} for k in preds["ref"]: # TODO make list (and dict)-ready diff_fwd = general_diff(preds["alt"][k]["dGrad"], preds["ref"][k]["dGrad"]) diff_rc = general_diff(preds["alt_rc"][k]["dGrad"], preds["ref_rc"][k]["dGrad"]) sel = general_sel(diff_fwd, diff_rc) replace_by_sel(diff_fwd, diff_rc, sel) diff_ret_dGrad[k] = diff_fwd # Overwrite the fwd values with rc values if rc was selected replace_by_sel(preds["ref"][k]["dGrad"], preds["ref_rc"][k]["dGrad"], sel) replace_by_sel(preds["alt"][k]["dGrad"], preds["alt_rc"][k]["dGrad"], sel) pred_out["ref"][k] = preds["ref"][k]["dGrad"] pred_out["alt"][k] = preds["alt"][k]["dGrad"] return {"diff": pd.DataFrame(diff_ret_dGrad), "ref": pd.DataFrame(pred_out["ref"]), "alt": pd.DataFrame(pred_out["alt"])}
Gradient-based (saliency) variant effect prediction Based on the idea of [saliency maps](https://arxiv.org/pdf/1312.6034.pdf) the gradient-based prediction of variant effects uses the `gradient` function of the Keras backend to estimate the importance of a variant for a given output. This value is then multiplied by the input, as recommended by [Shrikumar et al., 2017](https://arxiv.org/pdf/1605.01713.pdf). # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. # Returns Dictionary with three different entries: - ref: Gradient * input at the mutation position using the reference sequence. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference ('diff') - alt: Gradient * input at the mutation position using the alternative sequence. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference ('diff') - diff: 'alt' - 'ref'. Forward or reverse-complement sequence is chose based on sequence direction caused the bigger absolute difference.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/effects/gradient.py#L230-L319
gagneurlab/concise
concise/data/hocomoco.py
get_pwm_list
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(HOCOMOCO_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(_normalize_pwm(l[m]) + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
python
def get_pwm_list(pwm_id_list, pseudocountProb=0.0001): """Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances. """ l = load_motif_db(HOCOMOCO_PWM) l = {k.split()[0]: v for k, v in l.items()} pwm_list = [PWM(_normalize_pwm(l[m]) + pseudocountProb, name=m) for m in pwm_id_list] return pwm_list
Get a list of HOCOMOCO PWM's. # Arguments pwm_id_list: List of id's from the `PWM_id` column in `get_metadata()` table pseudocountProb: Added pseudocount probabilities to the PWM # Returns List of `concise.utils.pwm.PWM` instances.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/data/hocomoco.py#L43-L56
gagneurlab/concise
concise/legacy/concise.py
Concise.get_weights
def get_weights(self): """ Returns: dict: Model's trained weights. """ if self.is_trained() is False: # print("Model not fitted yet. Use object.fit() to fit the model.") return None var_res = self._var_res weights = self._var_res_to_weights(var_res) # save to the side weights["final_bias_fit"] = weights["final_bias"] weights["feature_weights_fit"] = weights["feature_weights"] return weights
python
def get_weights(self): """ Returns: dict: Model's trained weights. """ if self.is_trained() is False: # print("Model not fitted yet. Use object.fit() to fit the model.") return None var_res = self._var_res weights = self._var_res_to_weights(var_res) # save to the side weights["final_bias_fit"] = weights["final_bias"] weights["feature_weights_fit"] = weights["feature_weights"] return weights
Returns: dict: Model's trained weights.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L412-L427
gagneurlab/concise
concise/legacy/concise.py
Concise._var_res_to_weights
def _var_res_to_weights(self, var_res): """ Get model weights """ # transform the weights into our form motif_base_weights_raw = var_res["motif_base_weights"][0] motif_base_weights = np.swapaxes(motif_base_weights_raw, 0, 2) # get weights motif_weights = var_res["motif_weights"] motif_bias = var_res["motif_bias"] final_bias = var_res["final_bias"] feature_weights = var_res["feature_weights"] # get the GAM prediction: spline_pred = None spline_weights = None if self._param["n_splines"] is not None: spline_pred = self._splines["X_spline"].dot(var_res["spline_weights"]) if self._param["spline_exp"] is True: spline_pred = np.exp(spline_pred) else: spline_pred = (spline_pred + 1) spline_pred.reshape([-1]) spline_weights = var_res["spline_weights"] weights = {"motif_base_weights": motif_base_weights, "motif_weights": motif_weights, "motif_bias": motif_bias, "final_bias": final_bias, "feature_weights": feature_weights, "spline_pred": spline_pred, "spline_weights": spline_weights } return weights
python
def _var_res_to_weights(self, var_res): """ Get model weights """ # transform the weights into our form motif_base_weights_raw = var_res["motif_base_weights"][0] motif_base_weights = np.swapaxes(motif_base_weights_raw, 0, 2) # get weights motif_weights = var_res["motif_weights"] motif_bias = var_res["motif_bias"] final_bias = var_res["final_bias"] feature_weights = var_res["feature_weights"] # get the GAM prediction: spline_pred = None spline_weights = None if self._param["n_splines"] is not None: spline_pred = self._splines["X_spline"].dot(var_res["spline_weights"]) if self._param["spline_exp"] is True: spline_pred = np.exp(spline_pred) else: spline_pred = (spline_pred + 1) spline_pred.reshape([-1]) spline_weights = var_res["spline_weights"] weights = {"motif_base_weights": motif_base_weights, "motif_weights": motif_weights, "motif_bias": motif_bias, "final_bias": final_bias, "feature_weights": feature_weights, "spline_pred": spline_pred, "spline_weights": spline_weights } return weights
Get model weights
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L436-L472
gagneurlab/concise
concise/legacy/concise.py
Concise._get_var_res
def _get_var_res(self, graph, var, other_var): """ Get the weights from our graph """ with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # all_vars = tf.all_variables() # print("All variable names") # print([var.name for var in all_vars]) # print("All variable values") # print(sess.run(all_vars)) var_res = self._get_var_res_sess(sess, var) return var_res
python
def _get_var_res(self, graph, var, other_var): """ Get the weights from our graph """ with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # all_vars = tf.all_variables() # print("All variable names") # print([var.name for var in all_vars]) # print("All variable values") # print(sess.run(all_vars)) var_res = self._get_var_res_sess(sess, var) return var_res
Get the weights from our graph
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L519-L532
gagneurlab/concise
concise/legacy/concise.py
Concise._convert_to_var
def _convert_to_var(self, graph, var_res): """ Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var """ with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
python
def _convert_to_var(self, graph, var_res): """ Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var """ with graph.as_default(): var = {} for key, value in var_res.items(): if value is not None: var[key] = tf.Variable(value, name="tf_%s" % key) else: var[key] = None return var
Create tf.Variables from a list of numpy arrays var_res: dictionary of numpy arrays with the key names corresponding to var
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L534-L547
gagneurlab/concise
concise/legacy/concise.py
Concise.train
def train(self, X_feat, X_seq, y, X_feat_valid=None, X_seq_valid=None, y_valid=None, n_cores=3): """Train the CONCISE model :py:attr:`X_feat`, :py:attr:`X_seq`, py:attr:`y` are preferrably returned by the :py:func:`concise.prepare_data` function. Args: X_feat: Numpy (float) array of shape :code:`(N, D)`. Feature design matrix storing :code:`N` training samples and :code:`D` features X_seq: Numpy (float) array of shape :code:`(N, 1, N_seq, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.(:code:`N`-seqeuences of length :code:`N_seq`) y: Numpy (float) array of shape :code:`(N, 1)`. Response variable. X_feat_valid: :py:attr:`X_feat` used for model validation. X_seq_valid: :py:attr:`X_seq` used for model validation. y: :py:attr:`y` used for model validation. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. """ if X_feat_valid is None and X_seq_valid is None and y_valid is None: X_feat_valid = X_feat X_seq_valid = X_seq y_valid = y print("Using training samples also for validation ") # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) X_seq_valid = np.expand_dims(X_seq_valid, axis=1) # TODO: implement the re-training feature if self.is_trained() is True: print("Model already fitted. Re-training feature not implemented yet") return # input check assert X_seq.shape[0] == X_feat.shape[0] == y.shape[0] assert y.shape == (X_feat.shape[0], self._num_tasks) # extract data specific parameters self._param["seq_length"] = X_seq.shape[2] self._param["n_add_features"] = X_feat.shape[1] # more input check if not self._param["seq_length"] == X_seq_valid.shape[2]: raise Exception("sequence lengths don't match") # setup splines if self._param["n_splines"] is not None: padd_loss = self._param["motif_length"] - 1 # how much shorter is our sequence, since we don't use padding X_spline, S, _ = splines.get_gam_splines(start=0, end=self._param["seq_length"] - padd_loss - 1, # -1 due to zero-indexing n_bases=self._param["n_splines"], spline_order=3, add_intercept=False) self._splines = {"X_spline": X_spline, "S": S } # setup graph and variables self._graph = tf.Graph() self._var = self._get_var_initialization(self._graph, X_feat_train=X_feat, y_train=y) self._other_var = self._build_graph(self._graph, self._var) # TODO: save the intialized parameters var_res_init = self._get_var_res(self._graph, self._var, self._other_var) self.init_weights = self._var_res_to_weights(var_res=var_res_init) # finally train the model # - it saves the accuracy if self._param["optimizer"] == "adam": _train = self._train_adam elif self._param["optimizer"] == "lbfgs": _train = self._train_lbfgs else: raise Exception("Optimizer {} not implemented".format(self._param["optimizer"])) self._var_res = _train(X_feat, X_seq, y, X_feat_valid, X_seq_valid, y_valid, graph=self._graph, var=self._var, other_var=self._other_var, early_stop_patience=self._param["early_stop_patience"], n_cores=n_cores) self._model_fitted = True # TODO: maybe: # - add y_train_accuracy # - y_train return True
python
def train(self, X_feat, X_seq, y, X_feat_valid=None, X_seq_valid=None, y_valid=None, n_cores=3): """Train the CONCISE model :py:attr:`X_feat`, :py:attr:`X_seq`, py:attr:`y` are preferrably returned by the :py:func:`concise.prepare_data` function. Args: X_feat: Numpy (float) array of shape :code:`(N, D)`. Feature design matrix storing :code:`N` training samples and :code:`D` features X_seq: Numpy (float) array of shape :code:`(N, 1, N_seq, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.(:code:`N`-seqeuences of length :code:`N_seq`) y: Numpy (float) array of shape :code:`(N, 1)`. Response variable. X_feat_valid: :py:attr:`X_feat` used for model validation. X_seq_valid: :py:attr:`X_seq` used for model validation. y: :py:attr:`y` used for model validation. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. """ if X_feat_valid is None and X_seq_valid is None and y_valid is None: X_feat_valid = X_feat X_seq_valid = X_seq y_valid = y print("Using training samples also for validation ") # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) X_seq_valid = np.expand_dims(X_seq_valid, axis=1) # TODO: implement the re-training feature if self.is_trained() is True: print("Model already fitted. Re-training feature not implemented yet") return # input check assert X_seq.shape[0] == X_feat.shape[0] == y.shape[0] assert y.shape == (X_feat.shape[0], self._num_tasks) # extract data specific parameters self._param["seq_length"] = X_seq.shape[2] self._param["n_add_features"] = X_feat.shape[1] # more input check if not self._param["seq_length"] == X_seq_valid.shape[2]: raise Exception("sequence lengths don't match") # setup splines if self._param["n_splines"] is not None: padd_loss = self._param["motif_length"] - 1 # how much shorter is our sequence, since we don't use padding X_spline, S, _ = splines.get_gam_splines(start=0, end=self._param["seq_length"] - padd_loss - 1, # -1 due to zero-indexing n_bases=self._param["n_splines"], spline_order=3, add_intercept=False) self._splines = {"X_spline": X_spline, "S": S } # setup graph and variables self._graph = tf.Graph() self._var = self._get_var_initialization(self._graph, X_feat_train=X_feat, y_train=y) self._other_var = self._build_graph(self._graph, self._var) # TODO: save the intialized parameters var_res_init = self._get_var_res(self._graph, self._var, self._other_var) self.init_weights = self._var_res_to_weights(var_res=var_res_init) # finally train the model # - it saves the accuracy if self._param["optimizer"] == "adam": _train = self._train_adam elif self._param["optimizer"] == "lbfgs": _train = self._train_lbfgs else: raise Exception("Optimizer {} not implemented".format(self._param["optimizer"])) self._var_res = _train(X_feat, X_seq, y, X_feat_valid, X_seq_valid, y_valid, graph=self._graph, var=self._var, other_var=self._other_var, early_stop_patience=self._param["early_stop_patience"], n_cores=n_cores) self._model_fitted = True # TODO: maybe: # - add y_train_accuracy # - y_train return True
Train the CONCISE model :py:attr:`X_feat`, :py:attr:`X_seq`, py:attr:`y` are preferrably returned by the :py:func:`concise.prepare_data` function. Args: X_feat: Numpy (float) array of shape :code:`(N, D)`. Feature design matrix storing :code:`N` training samples and :code:`D` features X_seq: Numpy (float) array of shape :code:`(N, 1, N_seq, 4)`. It represents 1-hot encoding of the DNA/RNA sequence.(:code:`N`-seqeuences of length :code:`N_seq`) y: Numpy (float) array of shape :code:`(N, 1)`. Response variable. X_feat_valid: :py:attr:`X_feat` used for model validation. X_seq_valid: :py:attr:`X_seq` used for model validation. y: :py:attr:`y` used for model validation. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L554-L640
gagneurlab/concise
concise/legacy/concise.py
Concise._predict_in_session
def _predict_in_session(self, sess, other_var, X_feat, X_seq, variable="y_pred"): """ Predict y (or any other variable) from inside the tf session. Variable has to be in other_var """ # other_var["tf_X_seq"]: X_seq, tf_y: y, feed_dict = {other_var["tf_X_feat"]: X_feat, other_var["tf_X_seq"]: X_seq} y_pred = sess.run(other_var[variable], feed_dict=feed_dict) return y_pred
python
def _predict_in_session(self, sess, other_var, X_feat, X_seq, variable="y_pred"): """ Predict y (or any other variable) from inside the tf session. Variable has to be in other_var """ # other_var["tf_X_seq"]: X_seq, tf_y: y, feed_dict = {other_var["tf_X_feat"]: X_feat, other_var["tf_X_seq"]: X_seq} y_pred = sess.run(other_var[variable], feed_dict=feed_dict) return y_pred
Predict y (or any other variable) from inside the tf session. Variable has to be in other_var
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L642-L651
gagneurlab/concise
concise/legacy/concise.py
Concise._accuracy_in_session
def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y): """ Compute the accuracy from inside the tf session """ y_pred = self._predict_in_session(sess, other_var, X_feat, X_seq) return ce.mse(y_pred, y)
python
def _accuracy_in_session(self, sess, other_var, X_feat, X_seq, y): """ Compute the accuracy from inside the tf session """ y_pred = self._predict_in_session(sess, other_var, X_feat, X_seq) return ce.mse(y_pred, y)
Compute the accuracy from inside the tf session
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L653-L658
gagneurlab/concise
concise/legacy/concise.py
Concise._train_lbfgs
def _train_lbfgs(self, X_feat_train, X_seq_train, y_train, X_feat_valid, X_seq_valid, y_valid, graph, var, other_var, early_stop_patience=None, n_cores=3): """ Train the model actual model Updates weights / variables, computes and returns the training and validation accuracy """ tic = time.time() # take out the parameters for conveience n_epochs = self._param["n_epochs"] print_every = self._param["print_every"] step_size = self._param["step_size"] num_steps = n_epochs print('Number of epochs:', n_epochs) # print("Number of steps per epoch:", num_steps) # print("Number of total steps:", num_steps * n_epochs) # move into the graph and start the model loss_history = [] train_acc_vec = [] valid_acc_vec = [] step_history = [] with tf.Session(graph=graph, config=tf.ConfigProto( use_per_session_threads=True, inter_op_parallelism_threads=n_cores, intra_op_parallelism_threads=n_cores)) as sess: sess.run(other_var["init"]) best_performance = None best_performance_epoch = 0 for step in range(n_epochs): # run the model (sess.run) # compute the optimizer, loss and train_prediction in the graph # save the last two as l and predictions # put thet data into TF form: feed_dict = {other_var["tf_X_seq"]: X_seq_train, other_var["tf_y"]: y_train, other_var["tf_X_feat"]: X_feat_train, other_var["tf_step_size"]: step_size} # run the optimizer # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/opt/python/training/external_optimizer.py#L115 other_var["optimizer"].minimize(sess, feed_dict=feed_dict) l = sess.run(other_var["loss"], feed_dict=feed_dict) loss_history.append(l) # keep storing the full loss history # sometimes print the actual training prediction (l) if (step % print_every == 0): train_accuracy = self._accuracy_in_session(sess, other_var, X_feat_train, X_seq_train, y_train) valid_accuracy = self._accuracy_in_session(sess, other_var, X_feat_valid, X_seq_valid, y_valid) # append the prediction accuracies train_acc_vec.append(train_accuracy) valid_acc_vec.append(valid_accuracy) step_history.append(step / num_steps) print('Step %4d: loss %f, train mse: %f, validation mse: %f' % (step, l, train_accuracy, valid_accuracy)) # check if this is the best accuracy if best_performance is None or valid_accuracy <= best_performance: best_performance = valid_accuracy best_performance_epoch = step if early_stop_patience is not None and step > best_performance_epoch + early_stop_patience: print("Early stopping. best_performance_epoch: %d, best_performance: %f" % (best_performance_epoch, best_performance)) break # get the test accuracies train_accuracy_final = self._accuracy_in_session(sess, other_var, X_feat_train, X_seq_train, y_train) valid_accuracy_final = self._accuracy_in_session(sess, other_var, X_feat_valid, X_seq_valid, y_valid) print('Validation accuracy final: %f' % valid_accuracy_final) # store the fitted weights var_res = self._get_var_res_sess(sess, var) # store also the quasi splines fit if self._param["n_splines"] is not None: self._splines["quasi_X"] = [self._predict_in_session(sess, other_var, X_feat_train[i:(i + 1)], X_seq_train[i:(i + 1)], variable="spline_quasi_X") for i in range(X_feat_train.shape[0])] # transform into the appropriate form self._splines["quasi_X"] = np.concatenate([x[0][np.newaxis] for x in self._splines["quasi_X"]]) accuracy = { "loss_history": np.array(loss_history), "step_history": np.array(step_history), "train_acc_history": np.array(train_acc_vec), "val_acc_history": np.array(valid_acc_vec), "train_acc_final": train_accuracy_final, "val_acc_final": valid_accuracy_final, "best_val_acc": best_performance, "best_val_acc_epoch": best_performance_epoch, "test_acc_final": None, # test_accuracy_final, "y_test": None, # y_test, "y_test_prediction": None, # test_prediction.eval(), "id_vec_test": None # id_vec_test } self._accuracy = accuracy toc = time.time() exec_time = toc - tic self._exec_time = exec_time print('That took %fs' % exec_time) # weights = {"motif_base_weights": motif_base_weights, # "motif_weights": motif_weights, # "motif_bias": motif_bias, # "final_bias": final_bias, # "feature_weights": feature_weights, # "spline_pred": spline_pred # } return var_res
python
def _train_lbfgs(self, X_feat_train, X_seq_train, y_train, X_feat_valid, X_seq_valid, y_valid, graph, var, other_var, early_stop_patience=None, n_cores=3): """ Train the model actual model Updates weights / variables, computes and returns the training and validation accuracy """ tic = time.time() # take out the parameters for conveience n_epochs = self._param["n_epochs"] print_every = self._param["print_every"] step_size = self._param["step_size"] num_steps = n_epochs print('Number of epochs:', n_epochs) # print("Number of steps per epoch:", num_steps) # print("Number of total steps:", num_steps * n_epochs) # move into the graph and start the model loss_history = [] train_acc_vec = [] valid_acc_vec = [] step_history = [] with tf.Session(graph=graph, config=tf.ConfigProto( use_per_session_threads=True, inter_op_parallelism_threads=n_cores, intra_op_parallelism_threads=n_cores)) as sess: sess.run(other_var["init"]) best_performance = None best_performance_epoch = 0 for step in range(n_epochs): # run the model (sess.run) # compute the optimizer, loss and train_prediction in the graph # save the last two as l and predictions # put thet data into TF form: feed_dict = {other_var["tf_X_seq"]: X_seq_train, other_var["tf_y"]: y_train, other_var["tf_X_feat"]: X_feat_train, other_var["tf_step_size"]: step_size} # run the optimizer # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/contrib/opt/python/training/external_optimizer.py#L115 other_var["optimizer"].minimize(sess, feed_dict=feed_dict) l = sess.run(other_var["loss"], feed_dict=feed_dict) loss_history.append(l) # keep storing the full loss history # sometimes print the actual training prediction (l) if (step % print_every == 0): train_accuracy = self._accuracy_in_session(sess, other_var, X_feat_train, X_seq_train, y_train) valid_accuracy = self._accuracy_in_session(sess, other_var, X_feat_valid, X_seq_valid, y_valid) # append the prediction accuracies train_acc_vec.append(train_accuracy) valid_acc_vec.append(valid_accuracy) step_history.append(step / num_steps) print('Step %4d: loss %f, train mse: %f, validation mse: %f' % (step, l, train_accuracy, valid_accuracy)) # check if this is the best accuracy if best_performance is None or valid_accuracy <= best_performance: best_performance = valid_accuracy best_performance_epoch = step if early_stop_patience is not None and step > best_performance_epoch + early_stop_patience: print("Early stopping. best_performance_epoch: %d, best_performance: %f" % (best_performance_epoch, best_performance)) break # get the test accuracies train_accuracy_final = self._accuracy_in_session(sess, other_var, X_feat_train, X_seq_train, y_train) valid_accuracy_final = self._accuracy_in_session(sess, other_var, X_feat_valid, X_seq_valid, y_valid) print('Validation accuracy final: %f' % valid_accuracy_final) # store the fitted weights var_res = self._get_var_res_sess(sess, var) # store also the quasi splines fit if self._param["n_splines"] is not None: self._splines["quasi_X"] = [self._predict_in_session(sess, other_var, X_feat_train[i:(i + 1)], X_seq_train[i:(i + 1)], variable="spline_quasi_X") for i in range(X_feat_train.shape[0])] # transform into the appropriate form self._splines["quasi_X"] = np.concatenate([x[0][np.newaxis] for x in self._splines["quasi_X"]]) accuracy = { "loss_history": np.array(loss_history), "step_history": np.array(step_history), "train_acc_history": np.array(train_acc_vec), "val_acc_history": np.array(valid_acc_vec), "train_acc_final": train_accuracy_final, "val_acc_final": valid_accuracy_final, "best_val_acc": best_performance, "best_val_acc_epoch": best_performance_epoch, "test_acc_final": None, # test_accuracy_final, "y_test": None, # y_test, "y_test_prediction": None, # test_prediction.eval(), "id_vec_test": None # id_vec_test } self._accuracy = accuracy toc = time.time() exec_time = toc - tic self._exec_time = exec_time print('That took %fs' % exec_time) # weights = {"motif_base_weights": motif_base_weights, # "motif_weights": motif_weights, # "motif_bias": motif_bias, # "final_bias": final_bias, # "feature_weights": feature_weights, # "spline_pred": spline_pred # } return var_res
Train the model actual model Updates weights / variables, computes and returns the training and validation accuracy
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L660-L782
gagneurlab/concise
concise/legacy/concise.py
Concise.predict
def predict(self, X_feat, X_seq): """ Predict the response variable :py:attr:`y` for new input data (:py:attr:`X_feat`, :py:attr:`X_seq`). Args: X_feat: Feature design matrix. Same format as :py:attr:`X_feat` in :py:meth:`train` X_seq: Sequenc design matrix. Same format as :py:attr:`X_seq` in :py:meth:`train` """ # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) return self._get_other_var(X_feat, X_seq, variable="y_pred")
python
def predict(self, X_feat, X_seq): """ Predict the response variable :py:attr:`y` for new input data (:py:attr:`X_feat`, :py:attr:`X_seq`). Args: X_feat: Feature design matrix. Same format as :py:attr:`X_feat` in :py:meth:`train` X_seq: Sequenc design matrix. Same format as :py:attr:`X_seq` in :py:meth:`train` """ # insert one dimension - backcompatiblity X_seq = np.expand_dims(X_seq, axis=1) return self._get_other_var(X_feat, X_seq, variable="y_pred")
Predict the response variable :py:attr:`y` for new input data (:py:attr:`X_feat`, :py:attr:`X_seq`). Args: X_feat: Feature design matrix. Same format as :py:attr:`X_feat` in :py:meth:`train` X_seq: Sequenc design matrix. Same format as :py:attr:`X_seq` in :py:meth:`train`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L922-L934
gagneurlab/concise
concise/legacy/concise.py
Concise._get_other_var
def _get_other_var(self, X_feat, X_seq, variable="y_pred"): """ Get the value of a variable from other_vars (from a tf-graph) """ if self.is_trained() is False: print("Model not fitted yet. Use object.fit() to fit the model.") return # input check: assert X_seq.shape[0] == X_feat.shape[0] # TODO - check this # sequence can be wider or thinner? # assert self._param["seq_length"] == X_seq.shape[2] assert self._param["n_add_features"] == X_feat.shape[1] # setup graph and variables graph = tf.Graph() var = self._convert_to_var(graph, self._var_res) other_var = self._build_graph(graph, var) with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # predict y = self._predict_in_session(sess, other_var, X_feat, X_seq, variable) return y
python
def _get_other_var(self, X_feat, X_seq, variable="y_pred"): """ Get the value of a variable from other_vars (from a tf-graph) """ if self.is_trained() is False: print("Model not fitted yet. Use object.fit() to fit the model.") return # input check: assert X_seq.shape[0] == X_feat.shape[0] # TODO - check this # sequence can be wider or thinner? # assert self._param["seq_length"] == X_seq.shape[2] assert self._param["n_add_features"] == X_feat.shape[1] # setup graph and variables graph = tf.Graph() var = self._convert_to_var(graph, self._var_res) other_var = self._build_graph(graph, var) with tf.Session(graph=graph) as sess: sess.run(other_var["init"]) # predict y = self._predict_in_session(sess, other_var, X_feat, X_seq, variable) return y
Get the value of a variable from other_vars (from a tf-graph)
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L936-L961
gagneurlab/concise
concise/legacy/concise.py
Concise.to_dict
def to_dict(self): """ Returns: dict: Concise represented as a dictionary. """ final_res = { "param": self._param, "unused_param": self.unused_param, "execution_time": self._exec_time, "output": {"accuracy": self.get_accuracy(), "weights": self.get_weights(), "splines": self._splines } } return final_res
python
def to_dict(self): """ Returns: dict: Concise represented as a dictionary. """ final_res = { "param": self._param, "unused_param": self.unused_param, "execution_time": self._exec_time, "output": {"accuracy": self.get_accuracy(), "weights": self.get_weights(), "splines": self._splines } } return final_res
Returns: dict: Concise represented as a dictionary.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1003-L1017
gagneurlab/concise
concise/legacy/concise.py
Concise._set_var_res
def _set_var_res(self, weights): """ Transform the weights to var_res """ if weights is None: return # layer 1 motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0) motif_base_weights = motif_base_weights_raw[np.newaxis] motif_bias = weights["motif_bias"] feature_weights = weights["feature_weights"] spline_weights = weights["spline_weights"] # filter motif_weights = weights["motif_weights"] final_bias = weights["final_bias"] var_res = { "motif_base_weights": motif_base_weights, "motif_bias": motif_bias, "spline_weights": spline_weights, "feature_weights": feature_weights, "motif_weights": motif_weights, "final_bias": final_bias } # cast everything to float32 var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()} self._var_res = var_res
python
def _set_var_res(self, weights): """ Transform the weights to var_res """ if weights is None: return # layer 1 motif_base_weights_raw = np.swapaxes(weights["motif_base_weights"], 2, 0) motif_base_weights = motif_base_weights_raw[np.newaxis] motif_bias = weights["motif_bias"] feature_weights = weights["feature_weights"] spline_weights = weights["spline_weights"] # filter motif_weights = weights["motif_weights"] final_bias = weights["final_bias"] var_res = { "motif_base_weights": motif_base_weights, "motif_bias": motif_bias, "spline_weights": spline_weights, "feature_weights": feature_weights, "motif_weights": motif_weights, "final_bias": final_bias } # cast everything to float32 var_res = {key: value.astype(np.float32) if value is not None else None for key, value in var_res.items()} self._var_res = var_res
Transform the weights to var_res
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1028-L1059
gagneurlab/concise
concise/legacy/concise.py
Concise.from_dict
def from_dict(cls, obj_dict): """ Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object. """ # convert the output into a proper form obj_dict['output'] = helper.rec_dict_to_numpy_dict(obj_dict["output"]) helper.dict_to_numpy_dict(obj_dict['output']) if "trained_global_model" in obj_dict.keys(): raise Exception("Found trained_global_model feature in dictionary. Use ConciseCV.load to load this file.") dc = Concise(**obj_dict["param"]) # touch the hidden arguments dc._param = obj_dict["param"] if obj_dict["output"]["weights"] is None: dc._model_fitted = False else: dc._model_fitted = True dc._exec_time = obj_dict["execution_time"] dc.unused_param = obj_dict["unused_param"] dc._accuracy = obj_dict["output"]["accuracy"] dc._splines = obj_dict["output"]["splines"] weights = obj_dict["output"]["weights"] if weights is not None: # fix the dimensionality of X_feat in case it was 0 dimensional if weights["feature_weights"].shape == (0,): weights["feature_weights"].shape = (0, obj_dict["param"]["num_tasks"]) dc._set_var_res(weights) return dc
python
def from_dict(cls, obj_dict): """ Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object. """ # convert the output into a proper form obj_dict['output'] = helper.rec_dict_to_numpy_dict(obj_dict["output"]) helper.dict_to_numpy_dict(obj_dict['output']) if "trained_global_model" in obj_dict.keys(): raise Exception("Found trained_global_model feature in dictionary. Use ConciseCV.load to load this file.") dc = Concise(**obj_dict["param"]) # touch the hidden arguments dc._param = obj_dict["param"] if obj_dict["output"]["weights"] is None: dc._model_fitted = False else: dc._model_fitted = True dc._exec_time = obj_dict["execution_time"] dc.unused_param = obj_dict["unused_param"] dc._accuracy = obj_dict["output"]["accuracy"] dc._splines = obj_dict["output"]["splines"] weights = obj_dict["output"]["weights"] if weights is not None: # fix the dimensionality of X_feat in case it was 0 dimensional if weights["feature_weights"].shape == (0,): weights["feature_weights"].shape = (0, obj_dict["param"]["num_tasks"]) dc._set_var_res(weights) return dc
Load the object from a dictionary (produced with :py:func:`Concise.to_dict`) Returns: Concise: Loaded Concise object.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1062-L1098
gagneurlab/concise
concise/legacy/concise.py
Concise.load
def load(cls, file_path): """ Load the object from a JSON file (saved with :py:func:`Concise.save`). Returns: Concise: Loaded Concise object. """ # convert back to numpy data = helper.read_json(file_path) return Concise.from_dict(data)
python
def load(cls, file_path): """ Load the object from a JSON file (saved with :py:func:`Concise.save`). Returns: Concise: Loaded Concise object. """ # convert back to numpy data = helper.read_json(file_path) return Concise.from_dict(data)
Load the object from a JSON file (saved with :py:func:`Concise.save`). Returns: Concise: Loaded Concise object.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1101-L1111
gagneurlab/concise
concise/legacy/concise.py
ConciseCV._get_folds
def _get_folds(n_rows, n_folds, use_stored): """ Get the used CV folds """ # n_folds = self._n_folds # use_stored = self._use_stored_folds # n_rows = self._n_rows if use_stored is not None: # path = '~/concise/data-offline/lw-pombe/cv_folds_5.json' with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) # check if we have the same number of rows and folds: if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) # store in a list i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
python
def _get_folds(n_rows, n_folds, use_stored): """ Get the used CV folds """ # n_folds = self._n_folds # use_stored = self._use_stored_folds # n_rows = self._n_rows if use_stored is not None: # path = '~/concise/data-offline/lw-pombe/cv_folds_5.json' with open(os.path.expanduser(use_stored)) as json_file: json_data = json.load(json_file) # check if we have the same number of rows and folds: if json_data['N_rows'] != n_rows: raise Exception('N_rows from folds doesnt match the number of rows of X_seq, X_feat, y') if json_data['N_folds'] != n_folds: raise Exception('n_folds dont match', json_data['N_folds'], n_folds) kf = [(np.array(train), np.array(test)) for (train, test) in json_data['folds']] else: kf = KFold(n_splits=n_folds).split(np.zeros((n_rows, 1))) # store in a list i = 1 folds = [] for train, test in kf: fold = "fold_" + str(i) folds.append((fold, train, test)) i = i + 1 return folds
Get the used CV folds
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1149-L1180
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.train
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False): """Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`). """ # TODO: input check - dimensions self._use_stored_folds = use_stored_folds self._n_folds = n_folds self._n_rows = X_feat.shape[0] # TODO: - fix the get_cv_accuracy # save: # - each model # - each model's performance # - each model's predictions # - globally: # - mean perfomance # - sd performance # - predictions self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds) cv_obj = {} if id_vec is None: id_vec = np.arange(1, self._n_rows + 1) best_val_acc_epoch_l = [] for fold, train, test in self._kf: X_feat_train = X_feat[train] X_seq_train = X_seq[train] y_train = y[train] X_feat_test = X_feat[test] X_seq_test = X_seq[test] y_test = y[test] id_vec_test = id_vec[test] print(fold, "/", n_folds) # copy the object dc = copy.deepcopy(self._concise_model) dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores ) dc._test(X_feat_test, X_seq_test, y_test, id_vec_test) cv_obj[fold] = dc best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"]) self._cv_model = cv_obj # additionaly train the global model if train_global_model: dc = copy.deepcopy(self._concise_model) # overwrite n_epochs with the best average number of best epochs dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean()) print("tranining global model with n_epochs = " + str(dc._param["n_epochs"])) dc.train(X_feat, X_seq, y, n_cores=n_cores ) dc._test(X_feat, X_seq, y, id_vec) self._concise_global_model = dc
python
def train(self, X_feat, X_seq, y, id_vec=None, n_folds=10, use_stored_folds=None, n_cores=1, train_global_model=False): """Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`). """ # TODO: input check - dimensions self._use_stored_folds = use_stored_folds self._n_folds = n_folds self._n_rows = X_feat.shape[0] # TODO: - fix the get_cv_accuracy # save: # - each model # - each model's performance # - each model's predictions # - globally: # - mean perfomance # - sd performance # - predictions self._kf = self._get_folds(self._n_rows, self._n_folds, self._use_stored_folds) cv_obj = {} if id_vec is None: id_vec = np.arange(1, self._n_rows + 1) best_val_acc_epoch_l = [] for fold, train, test in self._kf: X_feat_train = X_feat[train] X_seq_train = X_seq[train] y_train = y[train] X_feat_test = X_feat[test] X_seq_test = X_seq[test] y_test = y[test] id_vec_test = id_vec[test] print(fold, "/", n_folds) # copy the object dc = copy.deepcopy(self._concise_model) dc.train(X_feat_train, X_seq_train, y_train, X_feat_test, X_seq_test, y_test, n_cores=n_cores ) dc._test(X_feat_test, X_seq_test, y_test, id_vec_test) cv_obj[fold] = dc best_val_acc_epoch_l.append(dc.get_accuracy()["best_val_acc_epoch"]) self._cv_model = cv_obj # additionaly train the global model if train_global_model: dc = copy.deepcopy(self._concise_model) # overwrite n_epochs with the best average number of best epochs dc._param["n_epochs"] = int(np.array(best_val_acc_epoch_l).mean()) print("tranining global model with n_epochs = " + str(dc._param["n_epochs"])) dc.train(X_feat, X_seq, y, n_cores=n_cores ) dc._test(X_feat, X_seq, y, id_vec) self._concise_global_model = dc
Train the Concise model in cross-validation. Args: X_feat: See :py:func:`concise.Concise.train` X_seq: See :py:func:`concise.Concise.train` y: See :py:func:`concise.Concise.train` id_vec: List of character id's used to differentiate the trainig samples. Returned by :py:func:`concise.prepare_data`. n_folds (int): Number of CV-folds to use. use_stored_folds (chr or None): File path to a .json file containing the fold information (as returned by :py:func:`concise.ConciseCV.get_folds`). If None, the folds are generated. n_cores (int): Number of CPU cores used for training. If available, GPU is used for training and this argument is ignored. train_global_model (bool): In addition to training the model in cross-validation, should the global model be fitted (using all the samples from :code:`(X_feat, X_seq, y)`).
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1193-L1265
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.get_CV_prediction
def get_CV_prediction(self): """ Returns: np.ndarray: Predictions on the hold-out folds (unseen data, corresponds to :py:attr:`y`). """ # TODO: get it from the test_prediction ... # test_id, prediction # sort by test_id predict_vec = np.zeros((self._n_rows, self._concise_model._num_tasks)) for fold, train, test in self._kf: acc = self._cv_model[fold].get_accuracy() predict_vec[test, :] = acc["y_test_prediction"] return predict_vec
python
def get_CV_prediction(self): """ Returns: np.ndarray: Predictions on the hold-out folds (unseen data, corresponds to :py:attr:`y`). """ # TODO: get it from the test_prediction ... # test_id, prediction # sort by test_id predict_vec = np.zeros((self._n_rows, self._concise_model._num_tasks)) for fold, train, test in self._kf: acc = self._cv_model[fold].get_accuracy() predict_vec[test, :] = acc["y_test_prediction"] return predict_vec
Returns: np.ndarray: Predictions on the hold-out folds (unseen data, corresponds to :py:attr:`y`).
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1267-L1279
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.get_CV_accuracy
def get_CV_accuracy(self): """ Returns: float: Prediction accuracy in CV. """ accuracy = {} for fold, train, test in self._kf: acc = self._cv_model[fold].get_accuracy() accuracy[fold] = acc["test_acc_final"] return accuracy
python
def get_CV_accuracy(self): """ Returns: float: Prediction accuracy in CV. """ accuracy = {} for fold, train, test in self._kf: acc = self._cv_model[fold].get_accuracy() accuracy[fold] = acc["test_acc_final"] return accuracy
Returns: float: Prediction accuracy in CV.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1281-L1290
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.to_dict
def to_dict(self): """ Returns: dict: ConciseCV represented as a dictionary. """ param = { "n_folds": self._n_folds, "n_rows": self._n_rows, "use_stored_folds": self._use_stored_folds } if self._concise_global_model is None: trained_global_model = None else: trained_global_model = self._concise_global_model.to_dict() obj_dict = {"param": param, "folds": self._kf, "init_model": self._concise_model.to_dict(), "trained_global_model": trained_global_model, "output": {fold: model.to_dict() for fold, model in self.get_CV_models().items()} } return obj_dict
python
def to_dict(self): """ Returns: dict: ConciseCV represented as a dictionary. """ param = { "n_folds": self._n_folds, "n_rows": self._n_rows, "use_stored_folds": self._use_stored_folds } if self._concise_global_model is None: trained_global_model = None else: trained_global_model = self._concise_global_model.to_dict() obj_dict = {"param": param, "folds": self._kf, "init_model": self._concise_model.to_dict(), "trained_global_model": trained_global_model, "output": {fold: model.to_dict() for fold, model in self.get_CV_models().items()} } return obj_dict
Returns: dict: ConciseCV represented as a dictionary.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1306-L1328
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.from_dict
def from_dict(cls, obj_dict): """ Load the object from a dictionary (produced with :py:func:`ConciseCV.to_dict`) Returns: ConciseCV: Loaded ConciseCV object. """ default_model = Concise() cvdc = ConciseCV(default_model) cvdc._from_dict(obj_dict) return cvdc
python
def from_dict(cls, obj_dict): """ Load the object from a dictionary (produced with :py:func:`ConciseCV.to_dict`) Returns: ConciseCV: Loaded ConciseCV object. """ default_model = Concise() cvdc = ConciseCV(default_model) cvdc._from_dict(obj_dict) return cvdc
Load the object from a dictionary (produced with :py:func:`ConciseCV.to_dict`) Returns: ConciseCV: Loaded ConciseCV object.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1331-L1340
gagneurlab/concise
concise/legacy/concise.py
ConciseCV._from_dict
def _from_dict(self, obj_dict): """ Initialize a model from the dictionary """ self._n_folds = obj_dict["param"]["n_folds"] self._n_rows = obj_dict["param"]["n_rows"] self._use_stored_folds = obj_dict["param"]["use_stored_folds"] self._concise_model = Concise.from_dict(obj_dict["init_model"]) if obj_dict["trained_global_model"] is None: self._concise_global_model = None else: self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"]) self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]] self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()}
python
def _from_dict(self, obj_dict): """ Initialize a model from the dictionary """ self._n_folds = obj_dict["param"]["n_folds"] self._n_rows = obj_dict["param"]["n_rows"] self._use_stored_folds = obj_dict["param"]["use_stored_folds"] self._concise_model = Concise.from_dict(obj_dict["init_model"]) if obj_dict["trained_global_model"] is None: self._concise_global_model = None else: self._concise_global_model = Concise.from_dict(obj_dict["trained_global_model"]) self._kf = [(fold, np.asarray(train), np.asarray(test)) for fold, train, test in obj_dict["folds"]] self._cv_model = {fold: Concise.from_dict(model_dict) for fold, model_dict in obj_dict["output"].items()}
Initialize a model from the dictionary
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1342-L1358
gagneurlab/concise
concise/legacy/concise.py
ConciseCV.load
def load(cls, file_path): """ Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object. """ data = helper.read_json(file_path) return ConciseCV.from_dict(data)
python
def load(cls, file_path): """ Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object. """ data = helper.read_json(file_path) return ConciseCV.from_dict(data)
Load the object from a JSON file (saved with :py:func:`ConciseCV.save`) Returns: ConciseCV: Loaded ConciseCV object.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/legacy/concise.py#L1370-L1378
gagneurlab/concise
concise/utils/pwm.py
pwm_array2pssm_array
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pwm array to pssm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return np.log(arr / b).astype(arr.dtype)
python
def pwm_array2pssm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pwm array to pssm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return np.log(arr / b).astype(arr.dtype)
Convert pwm array to pssm array
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/pwm.py#L239-L244
gagneurlab/concise
concise/utils/pwm.py
pssm_array2pwm_array
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pssm array to pwm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return (np.exp(arr) * b).astype(arr.dtype)
python
def pssm_array2pwm_array(arr, background_probs=DEFAULT_BASE_BACKGROUND): """Convert pssm array to pwm array """ b = background_probs2array(background_probs) b = b.reshape([1, 4, 1]) return (np.exp(arr) * b).astype(arr.dtype)
Convert pssm array to pwm array
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/pwm.py#L247-L252
gagneurlab/concise
concise/utils/pwm.py
load_motif_db
def load_motif_db(filename, skipn_matrix=0): """Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays """ # read-lines if filename.endswith(".gz"): f = gzip.open(filename, 'rt', encoding='utf-8') else: f = open(filename, 'r') lines = f.readlines() f.close() motifs_dict = {} motif_lines = "" motif_name = None def lines2matrix(lines): return np.loadtxt(StringIO(lines)) for line in lines: if line.startswith(">"): if motif_lines: # lines -> matrix motifs_dict[motif_name] = lines2matrix(motif_lines) motif_name = line[1:].strip() motif_lines = "" else: motif_lines += line[skipn_matrix:] if motif_lines and motif_name is not None: motifs_dict[motif_name] = lines2matrix(motif_lines) return motifs_dict
python
def load_motif_db(filename, skipn_matrix=0): """Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays """ # read-lines if filename.endswith(".gz"): f = gzip.open(filename, 'rt', encoding='utf-8') else: f = open(filename, 'r') lines = f.readlines() f.close() motifs_dict = {} motif_lines = "" motif_name = None def lines2matrix(lines): return np.loadtxt(StringIO(lines)) for line in lines: if line.startswith(">"): if motif_lines: # lines -> matrix motifs_dict[motif_name] = lines2matrix(motif_lines) motif_name = line[1:].strip() motif_lines = "" else: motif_lines += line[skipn_matrix:] if motif_lines and motif_name is not None: motifs_dict[motif_name] = lines2matrix(motif_lines) return motifs_dict
Read the motif file in the following format ``` >motif_name <skip n>0.1<delim>0.2<delim>0.5<delim>0.6 ... >motif_name2 .... ``` Delim can be anything supported by np.loadtxt # Arguments filename: str, file path skipn_matrix: integer, number of characters to skip when reading the numeric matrix (for Encode = 2) # Returns Dictionary of numpy arrays
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/pwm.py#L255-L306
gagneurlab/concise
concise/effects/dropout.py
dropout_pred
def dropout_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, dropout_iterations=30): """Dropout-based variant effect prediction This method is based on the ideas in [Gal et al.](https://arxiv.org/pdf/1506.02142.pdf) where dropout layers are also actived in the model prediction phase in order to estimate model uncertainty. The advantage of this method is that instead of a point estimate of the model output the distribution of the model output is estimated. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. dropout_iterations: Number of prediction iterations to be performed in order to estimate the output distribution. Values greater than 30 are recommended to get a reliable p-value. # Returns Dictionary with a set of measures of the model uncertainty in the variant position. The ones of interest are: - do_{ref, alt}_mean: Mean of the model predictions given the respective input sequence and dropout. - Forward or reverse-complement sequences are chosen as for 'do_pv'. - do_{ref, alt}_var: Variance of the model predictions given the respective input sequence and dropout. - Forward or reverse-complement sequences are chosen as for 'do_pv'. - do_diff: 'do_alt_mean' - 'do_alt_mean', which is an estimate similar to ISM using diff_type "diff". - do_pv: P-value of a paired t-test, comparing the predictions of ref with the ones of alt. Forward or - reverse-complement sequences are chosen based on which pair has the lower p-value. """ prefix = "do" seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] # Instead of loading the model from a json file I will transfer the model architecture + weights in memory model_config = model._updated_config() alt_config = replace_dict_values(model_config, u"Dropout", u"BiDropout") # Custom objects have to be added before correctly! alt_model = keras.layers.deserialize(alt_config) # Transfer weights and biases alt_model.set_weights(model.get_weights()) # ANALOGOUS TO ISM: # predict preds = {} for k in seqs: preds[k] = pred_do(alt_model, seqs[k], output_filter_mask=output_filter_mask, dropout_iterations=dropout_iterations) t, prob = ttest_ind(preds["ref"], preds["alt"], axis=0) t_rc, prob_rc = ttest_ind(preds["ref_rc"], preds["alt_rc"], axis=0) logit_prob = None logit_prob_rc = None pred_range = get_range(preds) # In case the predictions are bound to [0,1] it might make sense to use logit on the data, as the model output # could be probalilities if np.all([(pred_range[k] >= 0) and (pred_range[k] <= 1) for k in pred_range]): logit_preds = apply_over_single(preds, logit) logit_prob = apply_over_double(logit_preds["ref"], logit_preds["alt"], apply_func=ttest_ind, select_return_elm=1, axis=0) logit_prob_rc = apply_over_double(logit_preds["ref_rc"], logit_preds["alt_rc"], apply_func=ttest_ind, select_return_elm=1, axis=0) # fwd and rc are independent here... so this can be done differently here... sel = (np.abs(prob) > np.abs(prob_rc)).astype(np.int) # Select the LOWER p-value among fwd and rc out_dict = {} out_dict["%s_pv" % prefix] = pd.DataFrame(overwite_by(prob, prob_rc, sel), columns=out_annotation) if logit_prob is not None: logit_sel = (np.abs(logit_prob) > np.abs(logit_prob_rc)).astype(np.int) out_dict["%s_logit_pv" % prefix] = pd.DataFrame(overwite_by(logit_prob, logit_prob_rc, logit_sel), columns=out_annotation) pred_means = {} pred_vars = {} pred_cvar2 = {} for k in preds: pred_means[k] = np.mean(preds[k], axis=0) pred_vars[k] = np.var(preds[k], axis=0) pred_cvar2[k] = pred_vars[k] / (pred_means[k] ** 2) mean_cvar = np.sqrt((pred_cvar2["ref"] + pred_cvar2["alt"]) / 2) mean_cvar_rc = np.sqrt((pred_cvar2["ref_rc"] + pred_cvar2["alt_rc"]) / 2) mean_cvar = overwite_by(mean_cvar, mean_cvar_rc, sel) ref_mean = overwite_by(pred_means["ref"], pred_means["ref_rc"], sel) alt_mean = overwite_by(pred_means["alt"], pred_means["alt_rc"], sel) ref_var = overwite_by(pred_vars["ref"], pred_vars["ref_rc"], sel) alt_var = overwite_by(pred_vars["alt"], pred_vars["alt_rc"], sel) out_dict["%s_ref_mean" % prefix] = pd.DataFrame(ref_mean, columns=out_annotation) out_dict["%s_alt_mean" % prefix] = pd.DataFrame(alt_mean, columns=out_annotation) out_dict["%s_ref_var" % prefix] = pd.DataFrame(ref_var, columns=out_annotation) out_dict["%s_alt_var" % prefix] = pd.DataFrame(alt_var, columns=out_annotation) out_dict["%s_cvar" % prefix] = pd.DataFrame(mean_cvar, columns=out_annotation) out_dict["%s_diff" % prefix] = out_dict["%s_alt_mean" % prefix] - out_dict["%s_ref_mean" % prefix] return out_dict
python
def dropout_pred(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, dropout_iterations=30): """Dropout-based variant effect prediction This method is based on the ideas in [Gal et al.](https://arxiv.org/pdf/1506.02142.pdf) where dropout layers are also actived in the model prediction phase in order to estimate model uncertainty. The advantage of this method is that instead of a point estimate of the model output the distribution of the model output is estimated. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. dropout_iterations: Number of prediction iterations to be performed in order to estimate the output distribution. Values greater than 30 are recommended to get a reliable p-value. # Returns Dictionary with a set of measures of the model uncertainty in the variant position. The ones of interest are: - do_{ref, alt}_mean: Mean of the model predictions given the respective input sequence and dropout. - Forward or reverse-complement sequences are chosen as for 'do_pv'. - do_{ref, alt}_var: Variance of the model predictions given the respective input sequence and dropout. - Forward or reverse-complement sequences are chosen as for 'do_pv'. - do_diff: 'do_alt_mean' - 'do_alt_mean', which is an estimate similar to ISM using diff_type "diff". - do_pv: P-value of a paired t-test, comparing the predictions of ref with the ones of alt. Forward or - reverse-complement sequences are chosen based on which pair has the lower p-value. """ prefix = "do" seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] # Instead of loading the model from a json file I will transfer the model architecture + weights in memory model_config = model._updated_config() alt_config = replace_dict_values(model_config, u"Dropout", u"BiDropout") # Custom objects have to be added before correctly! alt_model = keras.layers.deserialize(alt_config) # Transfer weights and biases alt_model.set_weights(model.get_weights()) # ANALOGOUS TO ISM: # predict preds = {} for k in seqs: preds[k] = pred_do(alt_model, seqs[k], output_filter_mask=output_filter_mask, dropout_iterations=dropout_iterations) t, prob = ttest_ind(preds["ref"], preds["alt"], axis=0) t_rc, prob_rc = ttest_ind(preds["ref_rc"], preds["alt_rc"], axis=0) logit_prob = None logit_prob_rc = None pred_range = get_range(preds) # In case the predictions are bound to [0,1] it might make sense to use logit on the data, as the model output # could be probalilities if np.all([(pred_range[k] >= 0) and (pred_range[k] <= 1) for k in pred_range]): logit_preds = apply_over_single(preds, logit) logit_prob = apply_over_double(logit_preds["ref"], logit_preds["alt"], apply_func=ttest_ind, select_return_elm=1, axis=0) logit_prob_rc = apply_over_double(logit_preds["ref_rc"], logit_preds["alt_rc"], apply_func=ttest_ind, select_return_elm=1, axis=0) # fwd and rc are independent here... so this can be done differently here... sel = (np.abs(prob) > np.abs(prob_rc)).astype(np.int) # Select the LOWER p-value among fwd and rc out_dict = {} out_dict["%s_pv" % prefix] = pd.DataFrame(overwite_by(prob, prob_rc, sel), columns=out_annotation) if logit_prob is not None: logit_sel = (np.abs(logit_prob) > np.abs(logit_prob_rc)).astype(np.int) out_dict["%s_logit_pv" % prefix] = pd.DataFrame(overwite_by(logit_prob, logit_prob_rc, logit_sel), columns=out_annotation) pred_means = {} pred_vars = {} pred_cvar2 = {} for k in preds: pred_means[k] = np.mean(preds[k], axis=0) pred_vars[k] = np.var(preds[k], axis=0) pred_cvar2[k] = pred_vars[k] / (pred_means[k] ** 2) mean_cvar = np.sqrt((pred_cvar2["ref"] + pred_cvar2["alt"]) / 2) mean_cvar_rc = np.sqrt((pred_cvar2["ref_rc"] + pred_cvar2["alt_rc"]) / 2) mean_cvar = overwite_by(mean_cvar, mean_cvar_rc, sel) ref_mean = overwite_by(pred_means["ref"], pred_means["ref_rc"], sel) alt_mean = overwite_by(pred_means["alt"], pred_means["alt_rc"], sel) ref_var = overwite_by(pred_vars["ref"], pred_vars["ref_rc"], sel) alt_var = overwite_by(pred_vars["alt"], pred_vars["alt_rc"], sel) out_dict["%s_ref_mean" % prefix] = pd.DataFrame(ref_mean, columns=out_annotation) out_dict["%s_alt_mean" % prefix] = pd.DataFrame(alt_mean, columns=out_annotation) out_dict["%s_ref_var" % prefix] = pd.DataFrame(ref_var, columns=out_annotation) out_dict["%s_alt_var" % prefix] = pd.DataFrame(alt_var, columns=out_annotation) out_dict["%s_cvar" % prefix] = pd.DataFrame(mean_cvar, columns=out_annotation) out_dict["%s_diff" % prefix] = out_dict["%s_alt_mean" % prefix] - out_dict["%s_ref_mean" % prefix] return out_dict
Dropout-based variant effect prediction This method is based on the ideas in [Gal et al.](https://arxiv.org/pdf/1506.02142.pdf) where dropout layers are also actived in the model prediction phase in order to estimate model uncertainty. The advantage of this method is that instead of a point estimate of the model output the distribution of the model output is estimated. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. dropout_iterations: Number of prediction iterations to be performed in order to estimate the output distribution. Values greater than 30 are recommended to get a reliable p-value. # Returns Dictionary with a set of measures of the model uncertainty in the variant position. The ones of interest are: - do_{ref, alt}_mean: Mean of the model predictions given the respective input sequence and dropout. - Forward or reverse-complement sequences are chosen as for 'do_pv'. - do_{ref, alt}_var: Variance of the model predictions given the respective input sequence and dropout. - Forward or reverse-complement sequences are chosen as for 'do_pv'. - do_diff: 'do_alt_mean' - 'do_alt_mean', which is an estimate similar to ISM using diff_type "diff". - do_pv: P-value of a paired t-test, comparing the predictions of ref with the ones of alt. Forward or - reverse-complement sequences are chosen based on which pair has the lower p-value.
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/effects/dropout.py#L164-L287
gagneurlab/concise
concise/utils/fasta.py
iter_fasta
def iter_fasta(file_path): """Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ``` """ fh = open(file_path) # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" headerStr = header.__next__()[1:].strip() # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.__next__()) yield (headerStr, seq)
python
def iter_fasta(file_path): """Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ``` """ fh = open(file_path) # ditch the boolean (x[0]) and just keep the header or sequence since # we know they alternate. faiter = (x[1] for x in groupby(fh, lambda line: line[0] == ">")) for header in faiter: # drop the ">" headerStr = header.__next__()[1:].strip() # join all sequence lines to one. seq = "".join(s.strip() for s in faiter.__next__()) yield (headerStr, seq)
Returns an iterator over the fasta file Given a fasta file. yield tuples of header, sequence Code modified from Brent Pedersen's: "Correct Way To Parse A Fasta File In Python" # Example ```python fasta = fasta_iter("hg19.fa") for header, seq in fasta: print(header) ```
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/fasta.py#L11-L39
gagneurlab/concise
concise/utils/fasta.py
write_fasta
def write_fasta(file_path, seq_list, name_list=None): """Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list` """ if name_list is None: name_list = [str(i) for i in range(len(seq_list))] # needs to be dict or seq with open(file_path, "w") as f: for i in range(len(seq_list)): f.write(">" + name_list[i] + "\n" + seq_list[i] + "\n")
python
def write_fasta(file_path, seq_list, name_list=None): """Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list` """ if name_list is None: name_list = [str(i) for i in range(len(seq_list))] # needs to be dict or seq with open(file_path, "w") as f: for i in range(len(seq_list)): f.write(">" + name_list[i] + "\n" + seq_list[i] + "\n")
Write a fasta file # Arguments file_path: file path seq_list: List of strings name_list: List of names corresponding to the sequences. If not None, it should have the same length as `seq_list`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/utils/fasta.py#L42-L57
gagneurlab/concise
concise/preprocessing/structure.py
run_RNAplfold
def run_RNAplfold(input_fasta, tmpdir, W=240, L=160, U=1): """ Arguments: W, Int: span - window length L, Int, maxiumm span U, Int, size of unpaired region """ profiles = RNAplfold_PROFILES_EXECUTE for i, P in enumerate(profiles): print("running {P}_RNAplfold... ({i}/{N})".format(P=P, i=i + 1, N=len(profiles))) command = "{bin}/{P}_RNAplfold".format(bin=RNAplfold_BIN_DIR, P=P) file_out = "{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P) args = " -W {W} -L {L} -u {U} < {fa} > {file_out}".format(W=W, L=L, U=U, fa=input_fasta, file_out=file_out) os.system(command + args) # check if the file is empty if os.path.getsize(file_out) == 0: raise Exception("command wrote an empty file: {0}".format(file_out)) print("done!")
python
def run_RNAplfold(input_fasta, tmpdir, W=240, L=160, U=1): """ Arguments: W, Int: span - window length L, Int, maxiumm span U, Int, size of unpaired region """ profiles = RNAplfold_PROFILES_EXECUTE for i, P in enumerate(profiles): print("running {P}_RNAplfold... ({i}/{N})".format(P=P, i=i + 1, N=len(profiles))) command = "{bin}/{P}_RNAplfold".format(bin=RNAplfold_BIN_DIR, P=P) file_out = "{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P) args = " -W {W} -L {L} -u {U} < {fa} > {file_out}".format(W=W, L=L, U=U, fa=input_fasta, file_out=file_out) os.system(command + args) # check if the file is empty if os.path.getsize(file_out) == 0: raise Exception("command wrote an empty file: {0}".format(file_out)) print("done!")
Arguments: W, Int: span - window length L, Int, maxiumm span U, Int, size of unpaired region
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/structure.py#L18-L39
gagneurlab/concise
concise/preprocessing/structure.py
read_RNAplfold
def read_RNAplfold(tmpdir, maxlen=None, seq_align="start", pad_with="E"): """ pad_with = with which 2ndary structure should we pad the sequence? """ assert pad_with in {"P", "H", "I", "M", "E"} def read_profile(tmpdir, P): return [values.strip().split("\t") for seq_name, values in iter_fasta("{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P))] def nelem(P, pad_width): """get the right neutral element """ return 1 if P is pad_with else 0 arr_hime = np.array([pad_sequences(read_profile(tmpdir, P), value=[nelem(P, pad_with)], align=seq_align, maxlen=maxlen) for P in RNAplfold_PROFILES_EXECUTE], dtype="float32") # add the pairness column arr_p = 1 - arr_hime.sum(axis=0)[np.newaxis] arr = np.concatenate((arr_p, arr_hime)) # reshape to: seq, seq_length, num_channels arr = np.moveaxis(arr, 0, 2) return arr
python
def read_RNAplfold(tmpdir, maxlen=None, seq_align="start", pad_with="E"): """ pad_with = with which 2ndary structure should we pad the sequence? """ assert pad_with in {"P", "H", "I", "M", "E"} def read_profile(tmpdir, P): return [values.strip().split("\t") for seq_name, values in iter_fasta("{tmp}/{P}_profile.fa".format(tmp=tmpdir, P=P))] def nelem(P, pad_width): """get the right neutral element """ return 1 if P is pad_with else 0 arr_hime = np.array([pad_sequences(read_profile(tmpdir, P), value=[nelem(P, pad_with)], align=seq_align, maxlen=maxlen) for P in RNAplfold_PROFILES_EXECUTE], dtype="float32") # add the pairness column arr_p = 1 - arr_hime.sum(axis=0)[np.newaxis] arr = np.concatenate((arr_p, arr_hime)) # reshape to: seq, seq_length, num_channels arr = np.moveaxis(arr, 0, 2) return arr
pad_with = with which 2ndary structure should we pad the sequence?
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/structure.py#L42-L69
gagneurlab/concise
concise/preprocessing/structure.py
encodeRNAStructure
def encodeRNAStructure(seq_vec, maxlen=None, seq_align="start", W=240, L=160, U=1, tmpdir="/tmp/RNAplfold/"): """Compute RNA secondary structure with RNAplfold implemented in Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832). # Note Secondary structure is represented as the probability to be in the following states: - `["Pairedness", "Hairpin loop", "Internal loop", "Multi loop", "External region"]` See Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832) for more information. # Arguments seq_vec: list of DNA/RNA sequences maxlen: Maximum sequence length. See `concise.preprocessing.pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `concise.preprocessing.pad_sequences` for more detail W: Int; span - window length L: Int; maxiumm span U: Int; size of unpaired region tmpdir: Where to store the intermediary files of RNAplfold. # Note Recommended parameters: - for human, mouse use W, L, u : 240, 160, 1 - for fly, yeast use W, L, u : 80, 40, 1 # Returns np.ndarray of shape `(len(seq_vec), maxlen, 5)` """ # extend the tmpdir with uuid string to allow for parallel execution tmpdir = tmpdir + "/" + str(uuid4()) + "/" if not isinstance(seq_vec, list): seq_vec = seq_vec.tolist() if not os.path.exists(tmpdir): os.makedirs(tmpdir) fasta_path = tmpdir + "/input.fasta" write_fasta(fasta_path, seq_vec) run_RNAplfold(fasta_path, tmpdir, W=W, L=L, U=U) # 1. split the fasta into pieces # 2. run_RNAplfold for each of them # 3. Read the results return read_RNAplfold(tmpdir, maxlen, seq_align=seq_align, pad_with="E")
python
def encodeRNAStructure(seq_vec, maxlen=None, seq_align="start", W=240, L=160, U=1, tmpdir="/tmp/RNAplfold/"): """Compute RNA secondary structure with RNAplfold implemented in Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832). # Note Secondary structure is represented as the probability to be in the following states: - `["Pairedness", "Hairpin loop", "Internal loop", "Multi loop", "External region"]` See Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832) for more information. # Arguments seq_vec: list of DNA/RNA sequences maxlen: Maximum sequence length. See `concise.preprocessing.pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `concise.preprocessing.pad_sequences` for more detail W: Int; span - window length L: Int; maxiumm span U: Int; size of unpaired region tmpdir: Where to store the intermediary files of RNAplfold. # Note Recommended parameters: - for human, mouse use W, L, u : 240, 160, 1 - for fly, yeast use W, L, u : 80, 40, 1 # Returns np.ndarray of shape `(len(seq_vec), maxlen, 5)` """ # extend the tmpdir with uuid string to allow for parallel execution tmpdir = tmpdir + "/" + str(uuid4()) + "/" if not isinstance(seq_vec, list): seq_vec = seq_vec.tolist() if not os.path.exists(tmpdir): os.makedirs(tmpdir) fasta_path = tmpdir + "/input.fasta" write_fasta(fasta_path, seq_vec) run_RNAplfold(fasta_path, tmpdir, W=W, L=L, U=U) # 1. split the fasta into pieces # 2. run_RNAplfold for each of them # 3. Read the results return read_RNAplfold(tmpdir, maxlen, seq_align=seq_align, pad_with="E")
Compute RNA secondary structure with RNAplfold implemented in Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832). # Note Secondary structure is represented as the probability to be in the following states: - `["Pairedness", "Hairpin loop", "Internal loop", "Multi loop", "External region"]` See Kazan et al 2010, [doi](https://doi.org/10.1371/journal.pcbi.1000832) for more information. # Arguments seq_vec: list of DNA/RNA sequences maxlen: Maximum sequence length. See `concise.preprocessing.pad_sequences` for more detail seq_align: How to align the sequences of variable lengths. See `concise.preprocessing.pad_sequences` for more detail W: Int; span - window length L: Int; maxiumm span U: Int; size of unpaired region tmpdir: Where to store the intermediary files of RNAplfold. # Note Recommended parameters: - for human, mouse use W, L, u : 240, 160, 1 - for fly, yeast use W, L, u : 80, 40, 1 # Returns np.ndarray of shape `(len(seq_vec), maxlen, 5)`
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/preprocessing/structure.py#L92-L138
gagneurlab/concise
concise/effects/ism.py
ism
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"): """In-silico mutagenesis Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to calculate the difference between the outputs created by reference and alternative sequence and two different methods to select whether to use the output generated from the forward or from the reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a background of predicted effects of random SNPs. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned. rc_handling: "average" or "maximum". Either average over the predictions derived from forward and reverse-complement predictions ('average') or pick the prediction with the bigger absolute value ('maximum'). # Returns Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values for each (selected) model output and input sequence """ seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert diff_type in ["log_odds", "diff"] assert rc_handling in ["average", "maximum"] assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] preds = {} for k in seqs: # preds[k] = model.predict(seqs[k]) preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask]) if diff_type == "log_odds": if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]): warnings.warn("Using log_odds on model outputs that are not bound [0,1]") diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"])) diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"])) elif diff_type == "diff": diffs = preds["alt"] - preds["ref"] diffs_rc = preds["alt_rc"] - preds["ref_rc"] if rc_handling == "average": diffs = np.mean([diffs, diffs_rc], axis=0) elif rc_handling == "maximum": replace_filt = np.abs(diffs) < np.abs(diffs_rc) diffs[replace_filt] = diffs_rc[replace_filt] diffs = pd.DataFrame(diffs, columns=out_annotation) return {"ism": diffs}
python
def ism(model, ref, ref_rc, alt, alt_rc, mutation_positions, out_annotation_all_outputs, output_filter_mask=None, out_annotation=None, diff_type="log_odds", rc_handling="maximum"): """In-silico mutagenesis Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to calculate the difference between the outputs created by reference and alternative sequence and two different methods to select whether to use the output generated from the forward or from the reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a background of predicted effects of random SNPs. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned. rc_handling: "average" or "maximum". Either average over the predictions derived from forward and reverse-complement predictions ('average') or pick the prediction with the bigger absolute value ('maximum'). # Returns Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values for each (selected) model output and input sequence """ seqs = {"ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc} assert diff_type in ["log_odds", "diff"] assert rc_handling in ["average", "maximum"] assert np.all([np.array(get_seq_len(ref)) == np.array(get_seq_len(seqs[k])) for k in seqs.keys() if k != "ref"]) assert get_seq_len(ref)[0] == mutation_positions.shape[0] assert len(mutation_positions.shape) == 1 # determine which outputs should be selected if output_filter_mask is None: if out_annotation is None: output_filter_mask = np.arange(out_annotation_all_outputs.shape[0]) else: output_filter_mask = np.where(np.in1d(out_annotation_all_outputs, out_annotation))[0] # make sure the labels are assigned correctly out_annotation = out_annotation_all_outputs[output_filter_mask] preds = {} for k in seqs: # preds[k] = model.predict(seqs[k]) preds[k] = np.array(model.predict(seqs[k])[..., output_filter_mask]) if diff_type == "log_odds": if np.any([(preds[k].min() < 0 or preds[k].max() > 1) for k in preds]): warnings.warn("Using log_odds on model outputs that are not bound [0,1]") diffs = np.log(preds["alt"] / (1 - preds["alt"])) - np.log(preds["ref"] / (1 - preds["ref"])) diffs_rc = np.log(preds["alt_rc"] / (1 - preds["alt_rc"])) - np.log(preds["ref_rc"] / (1 - preds["ref_rc"])) elif diff_type == "diff": diffs = preds["alt"] - preds["ref"] diffs_rc = preds["alt_rc"] - preds["ref_rc"] if rc_handling == "average": diffs = np.mean([diffs, diffs_rc], axis=0) elif rc_handling == "maximum": replace_filt = np.abs(diffs) < np.abs(diffs_rc) diffs[replace_filt] = diffs_rc[replace_filt] diffs = pd.DataFrame(diffs, columns=out_annotation) return {"ism": diffs}
In-silico mutagenesis Using ISM in with diff_type 'log_odds' and rc_handling 'maximum' will produce predictions as used in [DeepSEA](http://www.nature.com/nmeth/journal/v12/n10/full/nmeth.3547.html). ISM offers two ways to calculate the difference between the outputs created by reference and alternative sequence and two different methods to select whether to use the output generated from the forward or from the reverse-complement sequences. To calculate "e-values" as mentioned in DeepSEA the same ISM prediction has to be performed on a randomised set of 1 million 1000genomes, MAF-matched variants to get a background of predicted effects of random SNPs. # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. output_filter_mask: Mask of boolean values indicating which model outputs should be used. Use this or 'out_annotation' out_annotation: List of outputs labels for which of the outputs (in case of a multi-task model) the predictions should be calculated. diff_type: "log_odds" or "diff". When set to 'log_odds' calculate scores based on log_odds, which assumes the model output is a probability. When set to 'diff' the model output for 'ref' is subtracted from 'alt'. Using 'log_odds' with outputs that are not in the range [0,1] nan will be returned. rc_handling: "average" or "maximum". Either average over the predictions derived from forward and reverse-complement predictions ('average') or pick the prediction with the bigger absolute value ('maximum'). # Returns Dictionary with the key `ism` which contains a pandas DataFrame containing the calculated values for each (selected) model output and input sequence
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/effects/ism.py#L9-L84
gagneurlab/concise
concise/hyopt.py
_train_and_eval_single
def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={}): """Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation """ def _format_keras_history(history): """nicely format keras history """ return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath) return eval_model(model, valid, add_eval_metrics), hist
python
def _train_and_eval_single(train, valid, model, batch_size=32, epochs=300, use_weight=False, callbacks=[], eval_best=False, add_eval_metrics={}): """Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation """ def _format_keras_history(history): """nicely format keras history """ return {"params": history.params, "loss": merge_dicts({"epoch": history.epoch}, history.history), } if use_weight: sample_weight = train[2] else: sample_weight = None # train the model logger.info("Fit...") history = History() model.fit(train[0], train[1], batch_size=batch_size, validation_data=valid[:2], epochs=epochs, sample_weight=sample_weight, verbose=2, callbacks=[history] + callbacks) # get history hist = _format_keras_history(history) # load and eval the best model if eval_best: mcp = [x for x in callbacks if isinstance(x, ModelCheckpoint)] assert len(mcp) == 1 model = load_model(mcp[0].filepath) return eval_model(model, valid, add_eval_metrics), hist
Fit and evaluate a keras model eval_best: if True, load the checkpointed model for evaluation
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L315-L351
gagneurlab/concise
concise/hyopt.py
eval_model
def eval_model(model, test, add_eval_metrics={}): """Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics """ # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
python
def eval_model(model, test, add_eval_metrics={}): """Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics """ # evaluate the model logger.info("Evaluate...") # - model_metrics model_metrics_values = model.evaluate(test[0], test[1], verbose=0, batch_size=test[1].shape[0]) # evaluation is done in a single pass to have more precise metics model_metrics = dict(zip(_listify(model.metrics_names), _listify(model_metrics_values))) # - eval_metrics y_true = test[1] y_pred = model.predict(test[0], verbose=0) eval_metrics = {k: v(y_true, y_pred) for k, v in add_eval_metrics.items()} # handle the case where the two metrics names intersect # - omit duplicates from eval_metrics intersected_keys = set(model_metrics).intersection(set(eval_metrics)) if len(intersected_keys) > 0: logger.warning("Some metric names intersect: {0}. Ignoring the add_eval_metrics ones". format(intersected_keys)) eval_metrics = _delete_keys(eval_metrics, intersected_keys) return merge_dicts(model_metrics, eval_metrics)
Evaluate model's performance on the test-set. # Arguments model: Keras model test: test-dataset. Tuple of inputs `x` and target `y` - `(x, y)`. add_eval_metrics: Additional evaluation metrics to use. Can be a dictionary or a list of functions accepting arguments: `y_true`, `y_predicted`. Alternatively, you can provide names of functions from the `concise.eval_metrics` module. # Returns dictionary with evaluation metrics
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L354-L389
gagneurlab/concise
concise/hyopt.py
get_model
def get_model(model_fn, train_data, param): """Feed model_fn with train_data and param """ model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
python
def get_model(model_fn, train_data, param): """Feed model_fn with train_data and param """ model_param = merge_dicts({"train_data": train_data}, param["model"], param.get("shared", {})) return model_fn(**model_param)
Feed model_fn with train_data and param
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L392-L396
gagneurlab/concise
concise/hyopt.py
_delete_keys
def _delete_keys(dct, keys): """Returns a copy of dct without `keys` keys """ c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
python
def _delete_keys(dct, keys): """Returns a copy of dct without `keys` keys """ c = deepcopy(dct) assert isinstance(keys, list) for k in keys: c.pop(k) return c
Returns a copy of dct without `keys` keys
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L701-L708
gagneurlab/concise
concise/hyopt.py
_mean_dict
def _mean_dict(dict_list): """Compute the mean value across a list of dictionaries """ return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()}
python
def _mean_dict(dict_list): """Compute the mean value across a list of dictionaries """ return {k: np.array([d[k] for d in dict_list]).mean() for k in dict_list[0].keys()}
Compute the mean value across a list of dictionaries
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L711-L715
gagneurlab/concise
concise/hyopt.py
CMongoTrials.get_trial
def get_trial(self, tid): """Retrieve trial by tid """ lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid]
python
def get_trial(self, tid): """Retrieve trial by tid """ lid = np.where(np.array(self.tids) == tid)[0][0] return self.trials[lid]
Retrieve trial by tid
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L116-L120
gagneurlab/concise
concise/hyopt.py
CMongoTrials.count_by_state_unsynced
def count_by_state_unsynced(self, arg): """Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long """ if self.kill_timeout is not None: self.delete_running(self.kill_timeout) return super(CMongoTrials, self).count_by_state_unsynced(arg)
python
def count_by_state_unsynced(self, arg): """Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long """ if self.kill_timeout is not None: self.delete_running(self.kill_timeout) return super(CMongoTrials, self).count_by_state_unsynced(arg)
Extends the original object in order to inject checking for stalled jobs and killing them if they are running for too long
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L166-L172
gagneurlab/concise
concise/hyopt.py
CMongoTrials.delete_running
def delete_running(self, timeout_last_refresh=0, dry_run=False): """Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds """ running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None)
python
def delete_running(self, timeout_last_refresh=0, dry_run=False): """Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds """ running_all = self.handle.jobs_running() running_timeout = [job for job in running_all if coarse_utcnow() > job["refresh_time"] + timedelta(seconds=timeout_last_refresh)] if len(running_timeout) == 0: # Nothing to stop self.refresh_tids(None) return None if dry_run: logger.warning("Dry run. Not removing anything.") logger.info("Removing {0}/{1} running jobs. # all jobs: {2} ". format(len(running_timeout), len(running_all), len(self))) now = coarse_utcnow() logger.info("Current utc time: {0}".format(now)) logger.info("Time horizont: {0}".format(now - timedelta(seconds=timeout_last_refresh))) for job in running_timeout: logger.info("Removing job: ") pjob = job.to_dict() del pjob["misc"] # ignore misc when printing logger.info(pprint.pformat(pjob)) if not dry_run: self.handle.delete(job) logger.info("Job deleted") self.refresh_tids(None)
Delete jobs stalled in the running state for too long timeout_last_refresh, int: number of seconds
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L174-L205
gagneurlab/concise
concise/hyopt.py
CMongoTrials.train_history
def train_history(self, tid=None): """Get train history as pd.DataFrame """ def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
python
def train_history(self, tid=None): """Get train history as pd.DataFrame """ def result2history(result): if isinstance(result["history"], list): return pd.concat([pd.DataFrame(hist["loss"]).assign(fold=i) for i, hist in enumerate(result["history"])]) else: return pd.DataFrame(result["history"]["loss"]) # use all if tid is None: tid = self.valid_tid() res = [result2history(t["result"]).assign(tid=t["tid"]) for t in self.trials if t["tid"] in _listify(tid)] df = pd.concat(res) # reorder columns fold_name = ["fold"] if "fold" in df else [] df = _put_first(df, ["tid"] + fold_name + ["epoch"]) return df
Get train history as pd.DataFrame
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L216-L238
gagneurlab/concise
concise/hyopt.py
CMongoTrials.as_df
def as_df(self, ignore_vals=["history"], separator=".", verbose=True): """Return a pd.DataFrame view of the whole experiment """ def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first)
python
def as_df(self, ignore_vals=["history"], separator=".", verbose=True): """Return a pd.DataFrame view of the whole experiment """ def add_eval(res): if "eval" not in res: if isinstance(res["history"], list): # take the average across all folds eval_names = list(res["history"][0]["loss"].keys()) eval_metrics = np.array([[v[-1] for k, v in hist["loss"].items()] for hist in res["history"]]).mean(axis=0).tolist() res["eval"] = {eval_names[i]: eval_metrics[i] for i in range(len(eval_metrics))} else: res["eval"] = {k: v[-1] for k, v in res["history"]["loss"].items()} return res def add_n_epoch(df): df_epoch = self.train_history().groupby("tid")["epoch"].max().reset_index() df_epoch.rename(columns={"epoch": "n_epoch"}, inplace=True) return pd.merge(df, df_epoch, on="tid", how="left") results = self.get_ok_results(verbose=verbose) rp = [_flatten_dict(_delete_keys(add_eval(x), ignore_vals), separator) for x in results] df = pd.DataFrame.from_records(rp) df = add_n_epoch(df) first = ["tid", "loss", "status"] return _put_first(df, first)
Return a pd.DataFrame view of the whole experiment
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/hyopt.py#L283-L311
gagneurlab/concise
concise/effects/snp_effects.py
effect_from_model
def effect_from_model(model, ref, ref_rc, alt, alt_rc, methods, mutation_positions, out_annotation_all_outputs, extra_args=None, **argv): """Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions """ assert isinstance(methods, list) if isinstance(extra_args, list): assert(len(extra_args) == len(methods)) else: extra_args = [None] * len(methods) main_args = {"model": model, "ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc, "mutation_positions": mutation_positions, "out_annotation_all_outputs": out_annotation_all_outputs} pred_results = {} for method, xargs in zip(methods, extra_args): if xargs is not None: if isinstance(xargs, dict): for k in argv: if k not in xargs: xargs[k] = argv[k] else: xargs = argv for k in main_args: xargs[k] = main_args[k] res = method(**xargs) pred_results[method.__name__] = res return pred_results
python
def effect_from_model(model, ref, ref_rc, alt, alt_rc, methods, mutation_positions, out_annotation_all_outputs, extra_args=None, **argv): """Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions """ assert isinstance(methods, list) if isinstance(extra_args, list): assert(len(extra_args) == len(methods)) else: extra_args = [None] * len(methods) main_args = {"model": model, "ref": ref, "ref_rc": ref_rc, "alt": alt, "alt_rc": alt_rc, "mutation_positions": mutation_positions, "out_annotation_all_outputs": out_annotation_all_outputs} pred_results = {} for method, xargs in zip(methods, extra_args): if xargs is not None: if isinstance(xargs, dict): for k in argv: if k not in xargs: xargs[k] = argv[k] else: xargs = argv for k in main_args: xargs[k] = main_args[k] res = method(**xargs) pred_results[method.__name__] = res return pred_results
Convenience function to execute multiple effect predictions in one call # Arguments model: Keras model ref: Input sequence with the reference genotype in the mutation position ref_rc: Reverse complement of the 'ref' argument alt: Input sequence with the alternative genotype in the mutation position alt_rc: Reverse complement of the 'alt' argument methods: A list of prediction functions to be executed, e.g.: from concise.effects.ism.ism. Using the same function more often than once (even with different parameters) will overwrite the results of the previous calculation of that function. mutation_positions: Position on which the mutation was placed in the forward sequences out_annotation_all_outputs: Output labels of the model. extra_args: None or a list of the same length as 'methods'. The elements of the list are dictionaries with additional arguments that should be passed on to the respective functions in 'methods'. Arguments defined here will overwrite arguments that are passed to all methods. **argv: Additional arguments to be passed on to all methods, e.g,: out_annotation. # Returns Dictionary containing the results of the individual calculations, the keys are the names of the executed functions
https://github.com/gagneurlab/concise/blob/d15262eb1e590008bc96ba31e93bfbdbfa1a9fd4/concise/effects/snp_effects.py#L5-L53
bitshares/uptick
uptick/markets.py
trades
def trades(ctx, market, limit, start, stop): """ List trades in a market """ market = Market(market, bitshares_instance=ctx.bitshares) t = [["time", "quote", "base", "price"]] for trade in market.trades(limit, start=start, stop=stop): t.append( [ str(trade["time"]), str(trade["quote"]), str(trade["base"]), "{:f} {}/{}".format( trade["price"], trade["base"]["asset"]["symbol"], trade["quote"]["asset"]["symbol"], ), ] ) print_table(t)
python
def trades(ctx, market, limit, start, stop): """ List trades in a market """ market = Market(market, bitshares_instance=ctx.bitshares) t = [["time", "quote", "base", "price"]] for trade in market.trades(limit, start=start, stop=stop): t.append( [ str(trade["time"]), str(trade["quote"]), str(trade["base"]), "{:f} {}/{}".format( trade["price"], trade["base"]["asset"]["symbol"], trade["quote"]["asset"]["symbol"], ), ] ) print_table(t)
List trades in a market
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L31-L49
bitshares/uptick
uptick/markets.py
ticker
def ticker(ctx, market): """ Show ticker of a market """ market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
python
def ticker(ctx, market): """ Show ticker of a market """ market = Market(market, bitshares_instance=ctx.bitshares) ticker = market.ticker() t = [["key", "value"]] for key in ticker: t.append([key, str(ticker[key])]) print_table(t)
Show ticker of a market
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L56-L64
bitshares/uptick
uptick/markets.py
cancel
def cancel(ctx, orders, account): """ Cancel one or multiple orders """ print_tx(ctx.bitshares.cancel(orders, account=account))
python
def cancel(ctx, orders, account): """ Cancel one or multiple orders """ print_tx(ctx.bitshares.cancel(orders, account=account))
Cancel one or multiple orders
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L78-L81
bitshares/uptick
uptick/markets.py
orderbook
def orderbook(ctx, market): """ Show the orderbook of a particular market """ market = Market(market, bitshares_instance=ctx.bitshares) orderbook = market.orderbook() ta = {} ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["bids"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["bids"].append( [ str(order["quote"]), str(cumsumquote), str(order["base"]), str(cumsumbase), "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), ] ) ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["asks"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["asks"].append( [ "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), str(order["base"]), str(cumsumbase), str(order["quote"]), str(cumsumquote), ] ) t = [["bids", "asks"]] t.append([format_table(ta["bids"]), format_table(ta["asks"])]) print_table(t)
python
def orderbook(ctx, market): """ Show the orderbook of a particular market """ market = Market(market, bitshares_instance=ctx.bitshares) orderbook = market.orderbook() ta = {} ta["bids"] = [["quote", "sum quote", "base", "sum base", "price"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["bids"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["bids"].append( [ str(order["quote"]), str(cumsumquote), str(order["base"]), str(cumsumbase), "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), ] ) ta["asks"] = [["price", "base", "sum base", "quote", "sum quote"]] cumsumquote = Amount(0, market["quote"]) cumsumbase = Amount(0, market["base"]) for order in orderbook["asks"]: cumsumbase += order["base"] cumsumquote += order["quote"] ta["asks"].append( [ "{:f} {}/{}".format( order["price"], order["base"]["asset"]["symbol"], order["quote"]["asset"]["symbol"], ), str(order["base"]), str(cumsumbase), str(order["quote"]), str(cumsumquote), ] ) t = [["bids", "asks"]] t.append([format_table(ta["bids"]), format_table(ta["asks"])]) print_table(t)
Show the orderbook of a particular market
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L88-L135
bitshares/uptick
uptick/markets.py
buy
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account): """ Buy a specific asset at a certain rate against a base asset """ amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
python
def buy(ctx, buy_amount, buy_asset, price, sell_asset, order_expiration, account): """ Buy a specific asset at a certain rate against a base asset """ amount = Amount(buy_amount, buy_asset) price = Price( price, base=sell_asset, quote=buy_asset, bitshares_instance=ctx.bitshares ) print_tx( price.market.buy(price, amount, account=account, expiration=order_expiration) )
Buy a specific asset at a certain rate against a base asset
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L153-L162
bitshares/uptick
uptick/markets.py
openorders
def openorders(ctx, account): """ List open orders of an account """ account = Account( account or config["default_account"], bitshares_instance=ctx.bitshares ) t = [["Price", "Quote", "Base", "ID"]] for o in account.openorders: t.append( [ "{:f} {}/{}".format( o["price"], o["base"]["asset"]["symbol"], o["quote"]["asset"]["symbol"], ), str(o["quote"]), str(o["base"]), o["id"], ] ) print_table(t)
python
def openorders(ctx, account): """ List open orders of an account """ account = Account( account or config["default_account"], bitshares_instance=ctx.bitshares ) t = [["Price", "Quote", "Base", "ID"]] for o in account.openorders: t.append( [ "{:f} {}/{}".format( o["price"], o["base"]["asset"]["symbol"], o["quote"]["asset"]["symbol"], ), str(o["quote"]), str(o["base"]), o["id"], ] ) print_table(t)
List open orders of an account
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L196-L216
bitshares/uptick
uptick/markets.py
cancelall
def cancelall(ctx, market, account): """ Cancel all orders of an account in a market """ market = Market(market) ctx.bitshares.bundle = True market.cancel([x["id"] for x in market.accountopenorders(account)], account=account) print_tx(ctx.bitshares.txbuffer.broadcast())
python
def cancelall(ctx, market, account): """ Cancel all orders of an account in a market """ market = Market(market) ctx.bitshares.bundle = True market.cancel([x["id"] for x in market.accountopenorders(account)], account=account) print_tx(ctx.bitshares.txbuffer.broadcast())
Cancel all orders of an account in a market
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L225-L231
bitshares/uptick
uptick/markets.py
spread
def spread(ctx, market, side, min, max, num, total, order_expiration, account): """ Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books """ from tqdm import tqdm from numpy import linspace market = Market(market) ctx.bitshares.bundle = True if min < max: space = linspace(min, max, num) else: space = linspace(max, min, num) func = getattr(market, side) for p in tqdm(space): func(p, total / float(num), account=account, expiration=order_expiration) print_tx(ctx.bitshares.txbuffer.broadcast())
python
def spread(ctx, market, side, min, max, num, total, order_expiration, account): """ Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books """ from tqdm import tqdm from numpy import linspace market = Market(market) ctx.bitshares.bundle = True if min < max: space = linspace(min, max, num) else: space = linspace(max, min, num) func = getattr(market, side) for p in tqdm(space): func(p, total / float(num), account=account, expiration=order_expiration) print_tx(ctx.bitshares.txbuffer.broadcast())
Place multiple orders \b :param str market: Market pair quote:base (e.g. USD:BTS) :param str side: ``buy`` or ``sell`` quote :param float min: minimum price to place order at :param float max: maximum price to place order at :param int num: Number of orders to place :param float total: Total amount of quote to use for all orders :param int order_expiration: Number of seconds until the order expires from the books
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L246-L273
bitshares/uptick
uptick/markets.py
borrow
def borrow(ctx, amount, symbol, ratio, account): """ Borrow a bitasset/market-pegged asset """ from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx( dex.borrow(Amount(amount, symbol), collateral_ratio=ratio, account=account) )
python
def borrow(ctx, amount, symbol, ratio, account): """ Borrow a bitasset/market-pegged asset """ from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx( dex.borrow(Amount(amount, symbol), collateral_ratio=ratio, account=account) )
Borrow a bitasset/market-pegged asset
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L289-L297
bitshares/uptick
uptick/markets.py
updateratio
def updateratio(ctx, symbol, ratio, account): """ Update the collateral ratio of a call positions """ from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
python
def updateratio(ctx, symbol, ratio, account): """ Update the collateral ratio of a call positions """ from bitshares.dex import Dex dex = Dex(bitshares_instance=ctx.bitshares) print_tx(dex.adjust_collateral_ratio(symbol, ratio, account=account))
Update the collateral ratio of a call positions
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L312-L318
bitshares/uptick
uptick/markets.py
fundfeepool
def fundfeepool(ctx, symbol, amount, account): """ Fund the fee pool of an asset """ print_tx(ctx.bitshares.fund_fee_pool(symbol, amount, account=account))
python
def fundfeepool(ctx, symbol, amount, account): """ Fund the fee pool of an asset """ print_tx(ctx.bitshares.fund_fee_pool(symbol, amount, account=account))
Fund the fee pool of an asset
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L333-L336
bitshares/uptick
uptick/markets.py
bidcollateral
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account ): """ Bid for collateral in the settlement fund """ print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
python
def bidcollateral( ctx, collateral_symbol, collateral_amount, debt_symbol, debt_amount, account ): """ Bid for collateral in the settlement fund """ print_tx( ctx.bitshares.bid_collateral( Amount(collateral_amount, collateral_symbol), Amount(debt_amount, debt_symbol), account=account, ) )
Bid for collateral in the settlement fund
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L353-L364
bitshares/uptick
uptick/markets.py
settle
def settle(ctx, symbol, amount, account): """ Fund the fee pool of an asset """ print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account))
python
def settle(ctx, symbol, amount, account): """ Fund the fee pool of an asset """ print_tx(ctx.bitshares.asset_settle(Amount(amount, symbol), account=account))
Fund the fee pool of an asset
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/markets.py#L379-L382
bitshares/uptick
uptick/votes.py
votes
def votes(ctx, account, type): """ List accounts vesting balances """ if not isinstance(type, (list, tuple)): type = [type] account = Account(account, full=True) ret = {key: list() for key in Vote.types()} for vote in account["votes"]: t = Vote.vote_type_from_id(vote["id"]) ret[t].append(vote) t = [["id", "url", "account"]] for vote in ret["committee"]: t.append( [vote["id"], vote["url"], Account(vote["committee_member_account"])["name"]] ) if "committee" in type: t = [["id", "url", "account", "votes"]] for vote in ret["committee"]: t.append( [ vote["id"], vote["url"], Account(vote["committee_member_account"])["name"], str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})), ] ) print_table(t) if "witness" in type: t = [ [ "id", "account", "url", "votes", "last_confirmed_block_num", "total_missed", "westing", ] ] for vote in ret["witness"]: t.append( [ vote["id"], Account(vote["witness_account"])["name"], vote["url"], str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})), vote["last_confirmed_block_num"], vote["total_missed"], str(Vesting(vote.get("pay_vb")).claimable) if vote.get("pay_vb") else "", ] ) print_table(t) if "worker" in type: t = [["id", "name/url", "daily_pay", "votes", "time", "account"]] for vote in ret["worker"]: votes = Amount({"amount": vote["total_votes_for"], "asset_id": "1.3.0"}) amount = Amount({"amount": vote["daily_pay"], "asset_id": "1.3.0"}) t.append( [ vote["id"], "{name}\n{url}".format(**vote), str(amount), str(votes), "{work_begin_date}\n-\n{work_end_date}".format(**vote), str(Account(vote["worker_account"])["name"]), ] ) print_table(t)
python
def votes(ctx, account, type): """ List accounts vesting balances """ if not isinstance(type, (list, tuple)): type = [type] account = Account(account, full=True) ret = {key: list() for key in Vote.types()} for vote in account["votes"]: t = Vote.vote_type_from_id(vote["id"]) ret[t].append(vote) t = [["id", "url", "account"]] for vote in ret["committee"]: t.append( [vote["id"], vote["url"], Account(vote["committee_member_account"])["name"]] ) if "committee" in type: t = [["id", "url", "account", "votes"]] for vote in ret["committee"]: t.append( [ vote["id"], vote["url"], Account(vote["committee_member_account"])["name"], str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})), ] ) print_table(t) if "witness" in type: t = [ [ "id", "account", "url", "votes", "last_confirmed_block_num", "total_missed", "westing", ] ] for vote in ret["witness"]: t.append( [ vote["id"], Account(vote["witness_account"])["name"], vote["url"], str(Amount({"amount": vote["total_votes"], "asset_id": "1.3.0"})), vote["last_confirmed_block_num"], vote["total_missed"], str(Vesting(vote.get("pay_vb")).claimable) if vote.get("pay_vb") else "", ] ) print_table(t) if "worker" in type: t = [["id", "name/url", "daily_pay", "votes", "time", "account"]] for vote in ret["worker"]: votes = Amount({"amount": vote["total_votes_for"], "asset_id": "1.3.0"}) amount = Amount({"amount": vote["daily_pay"], "asset_id": "1.3.0"}) t.append( [ vote["id"], "{name}\n{url}".format(**vote), str(amount), str(votes), "{work_begin_date}\n-\n{work_end_date}".format(**vote), str(Account(vote["worker_account"])["name"]), ] ) print_table(t)
List accounts vesting balances
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/votes.py#L34-L107
bitshares/uptick
uptick/info.py
info
def info(ctx, objects): """ Obtain all kinds of information """ if not objects: t = [["Key", "Value"]] info = ctx.bitshares.rpc.get_dynamic_global_properties() for key in info: t.append([key, info[key]]) print_table(t) for obj in objects: # Block if re.match("^[0-9]*$", obj): block = Block(obj, lazy=False, bitshares_instance=ctx.bitshares) t = [["Key", "Value"]] for key in sorted(block): value = block[key] if key == "transactions": value = format_tx(value) t.append([key, value]) print_table(t) # Object Id elif re.match("^\d*\.\d*\.\d*$", obj): data = ctx.bitshares.rpc.get_object(obj) if data: t = [["Key", "Value"]] for key in sorted(data): value = data[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("Object %s unknown" % obj, "warning") # Asset elif obj.upper() == obj and re.match("^[A-Z\.]*$", obj): data = Asset(obj) t = [["Key", "Value"]] for key in sorted(data): value = data[key] if isinstance(value, dict): value = format_tx(value) t.append([key, value]) print_table(t) # Public Key elif re.match("^BTS.{48,55}$", obj): account = ctx.bitshares.wallet.getAccountFromPublicKey(obj) if account: t = [["Account"]] t.append([account]) print_table(t) else: print_message("Public Key not known: %s" % obj, "warning") # Account name elif re.match("^[a-zA-Z0-9\-\._]{2,64}$", obj): account = Account(obj, full=True) if account: t = [["Key", "Value"]] for key in sorted(account): value = account[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("Account %s unknown" % obj, "warning") elif ":" in obj: vote = ctx.bitshares.rpc.lookup_vote_ids([obj])[0] if vote: t = [["Key", "Value"]] for key in sorted(vote): value = vote[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("voteid %s unknown" % obj, "warning") else: print_message("Couldn't identify object to read", "warning")
python
def info(ctx, objects): """ Obtain all kinds of information """ if not objects: t = [["Key", "Value"]] info = ctx.bitshares.rpc.get_dynamic_global_properties() for key in info: t.append([key, info[key]]) print_table(t) for obj in objects: # Block if re.match("^[0-9]*$", obj): block = Block(obj, lazy=False, bitshares_instance=ctx.bitshares) t = [["Key", "Value"]] for key in sorted(block): value = block[key] if key == "transactions": value = format_tx(value) t.append([key, value]) print_table(t) # Object Id elif re.match("^\d*\.\d*\.\d*$", obj): data = ctx.bitshares.rpc.get_object(obj) if data: t = [["Key", "Value"]] for key in sorted(data): value = data[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("Object %s unknown" % obj, "warning") # Asset elif obj.upper() == obj and re.match("^[A-Z\.]*$", obj): data = Asset(obj) t = [["Key", "Value"]] for key in sorted(data): value = data[key] if isinstance(value, dict): value = format_tx(value) t.append([key, value]) print_table(t) # Public Key elif re.match("^BTS.{48,55}$", obj): account = ctx.bitshares.wallet.getAccountFromPublicKey(obj) if account: t = [["Account"]] t.append([account]) print_table(t) else: print_message("Public Key not known: %s" % obj, "warning") # Account name elif re.match("^[a-zA-Z0-9\-\._]{2,64}$", obj): account = Account(obj, full=True) if account: t = [["Key", "Value"]] for key in sorted(account): value = account[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("Account %s unknown" % obj, "warning") elif ":" in obj: vote = ctx.bitshares.rpc.lookup_vote_ids([obj])[0] if vote: t = [["Key", "Value"]] for key in sorted(vote): value = vote[key] if isinstance(value, dict) or isinstance(value, list): value = format_tx(value) t.append([key, value]) print_table(t) else: print_message("voteid %s unknown" % obj, "warning") else: print_message("Couldn't identify object to read", "warning")
Obtain all kinds of information
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/info.py#L19-L103
bitshares/uptick
uptick/info.py
fees
def fees(ctx, currency): """ List fees """ from bitsharesbase.operationids import getOperationNameForId from bitshares.market import Market market = Market("%s:%s" % (currency, "BTS")) ticker = market.ticker() if "quoteSettlement_price" in ticker: price = ticker.get("quoteSettlement_price") else: price = ticker.get("latest", 0) price.invert() chain = Blockchain(bitshares_instance=ctx.bitshares) feesObj = chain.chainParameters().get("current_fees") fees = feesObj["parameters"] t = [["Operation", "Type", "Fee", currency]] for fee in fees: for f in fee[1]: t.append( [ highlight(getOperationNameForId(fee[0])), detail(f), detail( str(Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"})) ), detail( str( price * Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"}) ) ), ] ) print_table(t)
python
def fees(ctx, currency): """ List fees """ from bitsharesbase.operationids import getOperationNameForId from bitshares.market import Market market = Market("%s:%s" % (currency, "BTS")) ticker = market.ticker() if "quoteSettlement_price" in ticker: price = ticker.get("quoteSettlement_price") else: price = ticker.get("latest", 0) price.invert() chain = Blockchain(bitshares_instance=ctx.bitshares) feesObj = chain.chainParameters().get("current_fees") fees = feesObj["parameters"] t = [["Operation", "Type", "Fee", currency]] for fee in fees: for f in fee[1]: t.append( [ highlight(getOperationNameForId(fee[0])), detail(f), detail( str(Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"})) ), detail( str( price * Amount({"amount": fee[1].get(f, 0), "asset_id": "1.3.0"}) ) ), ] ) print_table(t)
List fees
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/info.py#L110-L147
bitshares/uptick
uptick/htlc.py
create
def create(ctx, to, amount, symbol, secret, hash, account, expiration): """ Create an HTLC contract """ ctx.blockchain.blocking = True tx = ctx.blockchain.htlc_create( Amount(amount, symbol), to, secret, hash_type=hash, expiration=expiration, account=account, ) tx.pop("trx", None) print_tx(tx) results = tx.get("operation_results", {}) if results: htlc_id = results[0][1] print("Your htlc_id is: {}".format(htlc_id))
python
def create(ctx, to, amount, symbol, secret, hash, account, expiration): """ Create an HTLC contract """ ctx.blockchain.blocking = True tx = ctx.blockchain.htlc_create( Amount(amount, symbol), to, secret, hash_type=hash, expiration=expiration, account=account, ) tx.pop("trx", None) print_tx(tx) results = tx.get("operation_results", {}) if results: htlc_id = results[0][1] print("Your htlc_id is: {}".format(htlc_id))
Create an HTLC contract
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/htlc.py#L28-L45
bitshares/uptick
uptick/htlc.py
redeem
def redeem(ctx, htlc_id, secret, account): """ Redeem an HTLC contract """ print_tx(ctx.blockchain.htlc_redeem(htlc_id, secret, account=account))
python
def redeem(ctx, htlc_id, secret, account): """ Redeem an HTLC contract """ print_tx(ctx.blockchain.htlc_redeem(htlc_id, secret, account=account))
Redeem an HTLC contract
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/htlc.py#L57-L60
pasztorpisti/py-flags
src/flags.py
unique
def unique(flags_class): """ A decorator for flags classes to forbid flag aliases. """ if not is_flags_class_final(flags_class): raise TypeError('unique check can be applied only to flags classes that have members') if not flags_class.__member_aliases__: return flags_class aliases = ', '.join('%s -> %s' % (alias, name) for alias, name in flags_class.__member_aliases__.items()) raise ValueError('duplicate values found in %r: %s' % (flags_class, aliases))
python
def unique(flags_class): """ A decorator for flags classes to forbid flag aliases. """ if not is_flags_class_final(flags_class): raise TypeError('unique check can be applied only to flags classes that have members') if not flags_class.__member_aliases__: return flags_class aliases = ', '.join('%s -> %s' % (alias, name) for alias, name in flags_class.__member_aliases__.items()) raise ValueError('duplicate values found in %r: %s' % (flags_class, aliases))
A decorator for flags classes to forbid flag aliases.
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L26-L33
pasztorpisti/py-flags
src/flags.py
unique_bits
def unique_bits(flags_class): """ A decorator for flags classes to forbid declaring flags with overlapping bits. """ flags_class = unique(flags_class) other_bits = 0 for name, member in flags_class.__members_without_aliases__.items(): bits = int(member) if other_bits & bits: for other_name, other_member in flags_class.__members_without_aliases__.items(): if int(other_member) & bits: raise ValueError("%r: '%s' and '%s' have overlapping bits" % (flags_class, other_name, name)) else: other_bits |= bits
python
def unique_bits(flags_class): """ A decorator for flags classes to forbid declaring flags with overlapping bits. """ flags_class = unique(flags_class) other_bits = 0 for name, member in flags_class.__members_without_aliases__.items(): bits = int(member) if other_bits & bits: for other_name, other_member in flags_class.__members_without_aliases__.items(): if int(other_member) & bits: raise ValueError("%r: '%s' and '%s' have overlapping bits" % (flags_class, other_name, name)) else: other_bits |= bits
A decorator for flags classes to forbid declaring flags with overlapping bits.
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L36-L47
pasztorpisti/py-flags
src/flags.py
process_inline_members_definition
def process_inline_members_definition(members): """ :param members: this can be any of the following: - a string containing a space and/or comma separated list of names: e.g.: "item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3" - tuple/list/Set of strings (names) - Mapping of (name, data) pairs - any kind of iterable that yields (name, data) pairs :return: An iterable of (name, data) pairs. """ if isinstance(members, str): members = ((name, UNDEFINED) for name in members.replace(',', ' ').split()) elif isinstance(members, (tuple, list, collections.Set)): if members and isinstance(next(iter(members)), str): members = ((name, UNDEFINED) for name in members) elif isinstance(members, collections.Mapping): members = members.items() return members
python
def process_inline_members_definition(members): """ :param members: this can be any of the following: - a string containing a space and/or comma separated list of names: e.g.: "item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3" - tuple/list/Set of strings (names) - Mapping of (name, data) pairs - any kind of iterable that yields (name, data) pairs :return: An iterable of (name, data) pairs. """ if isinstance(members, str): members = ((name, UNDEFINED) for name in members.replace(',', ' ').split()) elif isinstance(members, (tuple, list, collections.Set)): if members and isinstance(next(iter(members)), str): members = ((name, UNDEFINED) for name in members) elif isinstance(members, collections.Mapping): members = members.items() return members
:param members: this can be any of the following: - a string containing a space and/or comma separated list of names: e.g.: "item1 item2 item3" OR "item1,item2,item3" OR "item1, item2, item3" - tuple/list/Set of strings (names) - Mapping of (name, data) pairs - any kind of iterable that yields (name, data) pairs :return: An iterable of (name, data) pairs.
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L95-L112
pasztorpisti/py-flags
src/flags.py
FlagsMeta.process_member_definitions
def process_member_definitions(cls, member_definitions): """ The incoming member_definitions contains the class attributes (with their values) that are used to define the flag members. This method can do anything to the incoming list and has to return a final set of flag definitions that assigns bits to the members. The returned member definitions can be completely different or unrelated to the incoming ones. :param member_definitions: A list of (name, data) tuples. :return: An iterable of iterables yielding 3 items: name, bits, data """ members = [] auto_flags = [] all_bits = 0 for name, data in member_definitions: bits, data = cls.flag_attribute_value_to_bits_and_data(name, data) if bits is UNDEFINED: auto_flags.append(len(members)) members.append((name, data)) elif is_valid_bits_value(bits): all_bits |= bits members.append((name, bits, data)) else: raise TypeError("Expected an int value as the bits of flag '%s', received %r" % (name, bits)) # auto-assigning unused bits to members without custom defined bits bit = 1 for index in auto_flags: while bit & all_bits: bit <<= 1 name, data = members[index] members[index] = name, bit, data bit <<= 1 return members
python
def process_member_definitions(cls, member_definitions): """ The incoming member_definitions contains the class attributes (with their values) that are used to define the flag members. This method can do anything to the incoming list and has to return a final set of flag definitions that assigns bits to the members. The returned member definitions can be completely different or unrelated to the incoming ones. :param member_definitions: A list of (name, data) tuples. :return: An iterable of iterables yielding 3 items: name, bits, data """ members = [] auto_flags = [] all_bits = 0 for name, data in member_definitions: bits, data = cls.flag_attribute_value_to_bits_and_data(name, data) if bits is UNDEFINED: auto_flags.append(len(members)) members.append((name, data)) elif is_valid_bits_value(bits): all_bits |= bits members.append((name, bits, data)) else: raise TypeError("Expected an int value as the bits of flag '%s', received %r" % (name, bits)) # auto-assigning unused bits to members without custom defined bits bit = 1 for index in auto_flags: while bit & all_bits: bit <<= 1 name, data = members[index] members[index] = name, bit, data bit <<= 1 return members
The incoming member_definitions contains the class attributes (with their values) that are used to define the flag members. This method can do anything to the incoming list and has to return a final set of flag definitions that assigns bits to the members. The returned member definitions can be completely different or unrelated to the incoming ones. :param member_definitions: A list of (name, data) tuples. :return: An iterable of iterables yielding 3 items: name, bits, data
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L426-L458
pasztorpisti/py-flags
src/flags.py
Flags.from_simple_str
def from_simple_str(cls, s): """ Accepts only the output of to_simple_str(). The output of __str__() is invalid as input. """ if not isinstance(s, str): raise TypeError("Expected an str instance, received %r" % (s,)) return cls(cls.bits_from_simple_str(s))
python
def from_simple_str(cls, s): """ Accepts only the output of to_simple_str(). The output of __str__() is invalid as input. """ if not isinstance(s, str): raise TypeError("Expected an str instance, received %r" % (s,)) return cls(cls.bits_from_simple_str(s))
Accepts only the output of to_simple_str(). The output of __str__() is invalid as input.
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L663-L667
pasztorpisti/py-flags
src/flags.py
Flags.from_str
def from_str(cls, s): """ Accepts both the output of to_simple_str() and __str__(). """ if not isinstance(s, str): raise TypeError("Expected an str instance, received %r" % (s,)) return cls(cls.bits_from_str(s))
python
def from_str(cls, s): """ Accepts both the output of to_simple_str() and __str__(). """ if not isinstance(s, str): raise TypeError("Expected an str instance, received %r" % (s,)) return cls(cls.bits_from_str(s))
Accepts both the output of to_simple_str() and __str__().
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L670-L674
pasztorpisti/py-flags
src/flags.py
Flags.bits_from_str
def bits_from_str(cls, s): """ Converts the output of __str__ into an integer. """ try: if len(s) <= len(cls.__name__) or not s.startswith(cls.__name__): return cls.bits_from_simple_str(s) c = s[len(cls.__name__)] if c == '(': if not s.endswith(')'): raise ValueError return cls.bits_from_simple_str(s[len(cls.__name__)+1:-1]) elif c == '.': member_name = s[len(cls.__name__)+1:] return int(cls.__all_members__[member_name]) else: raise ValueError except ValueError as ex: if ex.args: raise raise ValueError("%s.%s: invalid input: %r" % (cls.__name__, cls.bits_from_str.__name__, s)) except KeyError as ex: raise ValueError("%s.%s: Invalid flag name '%s' in input: %r" % (cls.__name__, cls.bits_from_str.__name__, ex.args[0], s))
python
def bits_from_str(cls, s): """ Converts the output of __str__ into an integer. """ try: if len(s) <= len(cls.__name__) or not s.startswith(cls.__name__): return cls.bits_from_simple_str(s) c = s[len(cls.__name__)] if c == '(': if not s.endswith(')'): raise ValueError return cls.bits_from_simple_str(s[len(cls.__name__)+1:-1]) elif c == '.': member_name = s[len(cls.__name__)+1:] return int(cls.__all_members__[member_name]) else: raise ValueError except ValueError as ex: if ex.args: raise raise ValueError("%s.%s: invalid input: %r" % (cls.__name__, cls.bits_from_str.__name__, s)) except KeyError as ex: raise ValueError("%s.%s: Invalid flag name '%s' in input: %r" % (cls.__name__, cls.bits_from_str.__name__, ex.args[0], s))
Converts the output of __str__ into an integer.
https://github.com/pasztorpisti/py-flags/blob/bc48adb5edd7340ea1a686622d7993b4bcf4bfc2/src/flags.py#L689-L710
bitshares/uptick
uptick/feed.py
newfeed
def newfeed(ctx, symbol, price, market, cer, mssr, mcr, account): """ Publish a price feed! Examples: \b uptick newfeed USD 0.01 USD/BTS uptick newfeed USD 100 BTS/USD Core Exchange Rate (CER) \b If no CER is provided, the cer will be the same as the settlement price with a 5% premium (Only if the 'market' is against the core asset (e.g. BTS)). The CER is always defined against the core asset (BTS). This means that if the backing asset is not the core asset (BTS), then you must specify your own cer as a float. The float `x` will be interpreted as `x BTS/SYMBOL`. """ if cer: cer = Price(cer, quote=symbol, base="1.3.0", bitshares_instance=ctx.bitshares) print_tx( ctx.bitshares.publish_price_feed( symbol, Price(price, market), cer=cer, mssr=mssr, mcr=mcr, account=account ) )
python
def newfeed(ctx, symbol, price, market, cer, mssr, mcr, account): """ Publish a price feed! Examples: \b uptick newfeed USD 0.01 USD/BTS uptick newfeed USD 100 BTS/USD Core Exchange Rate (CER) \b If no CER is provided, the cer will be the same as the settlement price with a 5% premium (Only if the 'market' is against the core asset (e.g. BTS)). The CER is always defined against the core asset (BTS). This means that if the backing asset is not the core asset (BTS), then you must specify your own cer as a float. The float `x` will be interpreted as `x BTS/SYMBOL`. """ if cer: cer = Price(cer, quote=symbol, base="1.3.0", bitshares_instance=ctx.bitshares) print_tx( ctx.bitshares.publish_price_feed( symbol, Price(price, market), cer=cer, mssr=mssr, mcr=mcr, account=account ) )
Publish a price feed! Examples: \b uptick newfeed USD 0.01 USD/BTS uptick newfeed USD 100 BTS/USD Core Exchange Rate (CER) \b If no CER is provided, the cer will be the same as the settlement price with a 5% premium (Only if the 'market' is against the core asset (e.g. BTS)). The CER is always defined against the core asset (BTS). This means that if the backing asset is not the core asset (BTS), then you must specify your own cer as a float. The float `x` will be interpreted as `x BTS/SYMBOL`.
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/feed.py#L42-L67
bitshares/uptick
uptick/feed.py
feeds
def feeds(ctx, assets, pricethreshold, maxage): """ Price Feed Overview """ import builtins witnesses = Witnesses(bitshares_instance=ctx.bitshares) def test_price(p, ref): if math.fabs(float(p / ref) - 1.0) > pricethreshold / 100.0: return click.style(str(p), fg="red") elif math.fabs(float(p / ref) - 1.0) > pricethreshold / 2.0 / 100.0: return click.style(str(p), fg="yellow") else: return click.style(str(p), fg="green") def price_diff(p, ref): d = (float(p) - float(ref)) / float(ref) * 100 if math.fabs(d) >= 5: color = "red" elif math.fabs(d) >= 2.5: color = "yellow" else: color = "green" return click.style("{:8.2f}%".format(d), fg=color) def test_date(d): t = d.replace(tzinfo=None) now = datetime.utcnow() if now < t + timedelta(minutes=maxage): return click.style(str(t), fg="green") if now < t + timedelta(minutes=maxage / 2.0): return click.style(str(t), fg="yellow") else: return click.style(str(t), fg="red") output = "" for asset in tqdm(assets): t = PrettyTable( [ "Asset", "Producer", "Active Witness", "Date", "Settlement Price", "Core Exchange Price", "MCR", "SSPR", "delta", ] ) t.align = "c" t.align["Producer"] = "l" asset = Asset(asset, full=True, bitshares_instance=ctx.bitshares) current_feed = asset.feed feeds = asset.feeds producingwitnesses = builtins.set() witness_accounts = [x["witness_account"] for x in witnesses] for feed in tqdm(feeds): producingwitnesses.add(feed["producer"]["id"]) t.add_row( [ asset["symbol"], feed["producer"]["name"], click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), test_date(feed["date"]), test_price( feed["settlement_price"], current_feed["settlement_price"] ), test_price( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), feed["maintenance_collateral_ratio"] / 10, feed["maximum_short_squeeze_ratio"] / 10, price_diff( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), ] ) for missing in builtins.set(witness_accounts).difference(producingwitnesses): witness = Witness(missing) t.add_row( [ click.style(asset["symbol"], bg="red"), click.style(witness.account["name"], bg="red"), click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), click.style(str(datetime(1970, 1, 1))), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), ] ) output += t.get_string(sortby="Date", reversesort=True) output += "\n" click.echo(output)
python
def feeds(ctx, assets, pricethreshold, maxage): """ Price Feed Overview """ import builtins witnesses = Witnesses(bitshares_instance=ctx.bitshares) def test_price(p, ref): if math.fabs(float(p / ref) - 1.0) > pricethreshold / 100.0: return click.style(str(p), fg="red") elif math.fabs(float(p / ref) - 1.0) > pricethreshold / 2.0 / 100.0: return click.style(str(p), fg="yellow") else: return click.style(str(p), fg="green") def price_diff(p, ref): d = (float(p) - float(ref)) / float(ref) * 100 if math.fabs(d) >= 5: color = "red" elif math.fabs(d) >= 2.5: color = "yellow" else: color = "green" return click.style("{:8.2f}%".format(d), fg=color) def test_date(d): t = d.replace(tzinfo=None) now = datetime.utcnow() if now < t + timedelta(minutes=maxage): return click.style(str(t), fg="green") if now < t + timedelta(minutes=maxage / 2.0): return click.style(str(t), fg="yellow") else: return click.style(str(t), fg="red") output = "" for asset in tqdm(assets): t = PrettyTable( [ "Asset", "Producer", "Active Witness", "Date", "Settlement Price", "Core Exchange Price", "MCR", "SSPR", "delta", ] ) t.align = "c" t.align["Producer"] = "l" asset = Asset(asset, full=True, bitshares_instance=ctx.bitshares) current_feed = asset.feed feeds = asset.feeds producingwitnesses = builtins.set() witness_accounts = [x["witness_account"] for x in witnesses] for feed in tqdm(feeds): producingwitnesses.add(feed["producer"]["id"]) t.add_row( [ asset["symbol"], feed["producer"]["name"], click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), test_date(feed["date"]), test_price( feed["settlement_price"], current_feed["settlement_price"] ), test_price( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), feed["maintenance_collateral_ratio"] / 10, feed["maximum_short_squeeze_ratio"] / 10, price_diff( feed["core_exchange_rate"], current_feed["core_exchange_rate"] ), ] ) for missing in builtins.set(witness_accounts).difference(producingwitnesses): witness = Witness(missing) t.add_row( [ click.style(asset["symbol"], bg="red"), click.style(witness.account["name"], bg="red"), click.style( "X" if feed["producer"]["id"] in witness_accounts else "", bold=True, ), click.style(str(datetime(1970, 1, 1))), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), click.style("missing", bg="red"), ] ) output += t.get_string(sortby="Date", reversesort=True) output += "\n" click.echo(output)
Price Feed Overview
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/feed.py#L78-L179
bitshares/uptick
uptick/ui.py
print_table
def print_table(*args, **kwargs): """ if csv: import csv t = csv.writer(sys.stdout, delimiter=";") t.writerow(header) else: t = PrettyTable(header) t.align = "r" t.align["details"] = "l" """ t = format_table(*args, **kwargs) click.echo(t)
python
def print_table(*args, **kwargs): """ if csv: import csv t = csv.writer(sys.stdout, delimiter=";") t.writerow(header) else: t = PrettyTable(header) t.align = "r" t.align["details"] = "l" """ t = format_table(*args, **kwargs) click.echo(t)
if csv: import csv t = csv.writer(sys.stdout, delimiter=";") t.writerow(header) else: t = PrettyTable(header) t.align = "r" t.align["details"] = "l"
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/ui.py#L125-L137
bitshares/uptick
uptick/rpc.py
rpc
def rpc(ctx, call, arguments, api): """ Construct RPC call directly \b You can specify which API to send the call to: uptick rpc --api assets You can also specify lists using uptick rpc get_objects "['2.0.0', '2.1.0']" """ try: data = list(eval(d) for d in arguments) except: data = arguments ret = getattr(ctx.bitshares.rpc, call)(*data, api=api) print_dict(ret)
python
def rpc(ctx, call, arguments, api): """ Construct RPC call directly \b You can specify which API to send the call to: uptick rpc --api assets You can also specify lists using uptick rpc get_objects "['2.0.0', '2.1.0']" """ try: data = list(eval(d) for d in arguments) except: data = arguments ret = getattr(ctx.bitshares.rpc, call)(*data, api=api) print_dict(ret)
Construct RPC call directly \b You can specify which API to send the call to: uptick rpc --api assets You can also specify lists using uptick rpc get_objects "['2.0.0', '2.1.0']"
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/rpc.py#L16-L33
bitshares/uptick
uptick/committee.py
approvecommittee
def approvecommittee(ctx, members, account): """ Approve committee member(s) """ print_tx(ctx.bitshares.approvecommittee(members, account=account))
python
def approvecommittee(ctx, members, account): """ Approve committee member(s) """ print_tx(ctx.bitshares.approvecommittee(members, account=account))
Approve committee member(s)
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/committee.py#L18-L21
bitshares/uptick
uptick/committee.py
disapprovecommittee
def disapprovecommittee(ctx, members, account): """ Disapprove committee member(s) """ print_tx(ctx.bitshares.disapprovecommittee(members, account=account))
python
def disapprovecommittee(ctx, members, account): """ Disapprove committee member(s) """ print_tx(ctx.bitshares.disapprovecommittee(members, account=account))
Disapprove committee member(s)
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/committee.py#L35-L38
bitshares/uptick
uptick/committee.py
createcommittee
def createcommittee(ctx, url, account): """ Setup a committee account for your account """ print_tx(ctx.bitshares.create_committee_member(url, account=account))
python
def createcommittee(ctx, url, account): """ Setup a committee account for your account """ print_tx(ctx.bitshares.create_committee_member(url, account=account))
Setup a committee account for your account
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/committee.py#L52-L55
bitshares/uptick
uptick/cli.py
set
def set(ctx, key, value): """ Set configuration parameters """ if key == "default_account" and value[0] == "@": value = value[1:] ctx.bitshares.config[key] = value
python
def set(ctx, key, value): """ Set configuration parameters """ if key == "default_account" and value[0] == "@": value = value[1:] ctx.bitshares.config[key] = value
Set configuration parameters
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/cli.py#L40-L45
bitshares/uptick
uptick/cli.py
configuration
def configuration(ctx): """ Show configuration variables """ t = [["Key", "Value"]] for key in ctx.bitshares.config: t.append([key, ctx.bitshares.config[key]]) print_table(t)
python
def configuration(ctx): """ Show configuration variables """ t = [["Key", "Value"]] for key in ctx.bitshares.config: t.append([key, ctx.bitshares.config[key]]) print_table(t)
Show configuration variables
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/cli.py#L51-L57
bitshares/uptick
uptick/cli.py
sign
def sign(ctx, filename): """ Sign a json-formatted transaction """ if filename: tx = filename.read() else: tx = sys.stdin.read() tx = TransactionBuilder(eval(tx), bitshares_instance=ctx.bitshares) tx.appendMissingSignatures() tx.sign() print_tx(tx.json())
python
def sign(ctx, filename): """ Sign a json-formatted transaction """ if filename: tx = filename.read() else: tx = sys.stdin.read() tx = TransactionBuilder(eval(tx), bitshares_instance=ctx.bitshares) tx.appendMissingSignatures() tx.sign() print_tx(tx.json())
Sign a json-formatted transaction
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/cli.py#L65-L75
bitshares/uptick
uptick/cli.py
randomwif
def randomwif(prefix, num): """ Obtain a random private/public key pair """ from bitsharesbase.account import PrivateKey t = [["wif", "pubkey"]] for n in range(0, num): wif = PrivateKey() t.append([str(wif), format(wif.pubkey, prefix)]) print_table(t)
python
def randomwif(prefix, num): """ Obtain a random private/public key pair """ from bitsharesbase.account import PrivateKey t = [["wif", "pubkey"]] for n in range(0, num): wif = PrivateKey() t.append([str(wif), format(wif.pubkey, prefix)]) print_table(t)
Obtain a random private/public key pair
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/cli.py#L97-L106
bitshares/uptick
uptick/witness.py
approvewitness
def approvewitness(ctx, witnesses, account): """ Approve witness(es) """ print_tx(ctx.bitshares.approvewitness(witnesses, account=account))
python
def approvewitness(ctx, witnesses, account): """ Approve witness(es) """ print_tx(ctx.bitshares.approvewitness(witnesses, account=account))
Approve witness(es)
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/witness.py#L20-L23