repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
sequence
docstring
stringlengths
3
17.3k
docstring_tokens
sequence
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
johnnoone/json-spec
src/jsonspec/pointer/__init__.py
extract
def extract(obj, pointer, bypass_ref=False): """Extract member or element of obj according to pointer. :param obj: the object source :param pointer: the pointer :type pointer: Pointer, str :param bypass_ref: bypass JSON Reference event :type bypass_ref: boolean """ return Pointer(pointer).extract(obj, bypass_ref)
python
def extract(obj, pointer, bypass_ref=False): """Extract member or element of obj according to pointer. :param obj: the object source :param pointer: the pointer :type pointer: Pointer, str :param bypass_ref: bypass JSON Reference event :type bypass_ref: boolean """ return Pointer(pointer).extract(obj, bypass_ref)
[ "def", "extract", "(", "obj", ",", "pointer", ",", "bypass_ref", "=", "False", ")", ":", "return", "Pointer", "(", "pointer", ")", ".", "extract", "(", "obj", ",", "bypass_ref", ")" ]
Extract member or element of obj according to pointer. :param obj: the object source :param pointer: the pointer :type pointer: Pointer, str :param bypass_ref: bypass JSON Reference event :type bypass_ref: boolean
[ "Extract", "member", "or", "element", "of", "obj", "according", "to", "pointer", "." ]
f91981724cea0c366bd42a6670eb07bbe31c0e0c
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/pointer/__init__.py#L23-L33
train
etal/biofrills
biofrills/alnutils.py
aa_counts
def aa_counts(aln, weights=None, gap_chars='-.'): """Calculate the amino acid frequencies in a set of SeqRecords. Weights for each sequence in the alignment can be given as a list/tuple, usually calculated with the sequence_weights function. For convenience, you can also pass "weights=True" and the weights will be calculated with sequence_weights here. """ if weights is None: counts = Counter() for rec in aln: seq_counts = Counter(str(rec.seq)) counts.update(seq_counts) else: if weights == True: # For convenience weights = sequence_weights(aln) else: assert len(weights) == len(aln), ( "Length mismatch: weights = %d, alignment = %d" % (len(weights), len(aln))) counts = defaultdict(float) for col in zip(*aln): for aa, wt in zip(col, weights): counts[aa] += wt # Don't count gaps for gap_char in gap_chars: if gap_char in counts: del counts[gap_char] return counts
python
def aa_counts(aln, weights=None, gap_chars='-.'): """Calculate the amino acid frequencies in a set of SeqRecords. Weights for each sequence in the alignment can be given as a list/tuple, usually calculated with the sequence_weights function. For convenience, you can also pass "weights=True" and the weights will be calculated with sequence_weights here. """ if weights is None: counts = Counter() for rec in aln: seq_counts = Counter(str(rec.seq)) counts.update(seq_counts) else: if weights == True: # For convenience weights = sequence_weights(aln) else: assert len(weights) == len(aln), ( "Length mismatch: weights = %d, alignment = %d" % (len(weights), len(aln))) counts = defaultdict(float) for col in zip(*aln): for aa, wt in zip(col, weights): counts[aa] += wt # Don't count gaps for gap_char in gap_chars: if gap_char in counts: del counts[gap_char] return counts
[ "def", "aa_counts", "(", "aln", ",", "weights", "=", "None", ",", "gap_chars", "=", "'-.'", ")", ":", "if", "weights", "is", "None", ":", "counts", "=", "Counter", "(", ")", "for", "rec", "in", "aln", ":", "seq_counts", "=", "Counter", "(", "str", "(", "rec", ".", "seq", ")", ")", "counts", ".", "update", "(", "seq_counts", ")", "else", ":", "if", "weights", "==", "True", ":", "# For convenience", "weights", "=", "sequence_weights", "(", "aln", ")", "else", ":", "assert", "len", "(", "weights", ")", "==", "len", "(", "aln", ")", ",", "(", "\"Length mismatch: weights = %d, alignment = %d\"", "%", "(", "len", "(", "weights", ")", ",", "len", "(", "aln", ")", ")", ")", "counts", "=", "defaultdict", "(", "float", ")", "for", "col", "in", "zip", "(", "*", "aln", ")", ":", "for", "aa", ",", "wt", "in", "zip", "(", "col", ",", "weights", ")", ":", "counts", "[", "aa", "]", "+=", "wt", "# Don't count gaps", "for", "gap_char", "in", "gap_chars", ":", "if", "gap_char", "in", "counts", ":", "del", "counts", "[", "gap_char", "]", "return", "counts" ]
Calculate the amino acid frequencies in a set of SeqRecords. Weights for each sequence in the alignment can be given as a list/tuple, usually calculated with the sequence_weights function. For convenience, you can also pass "weights=True" and the weights will be calculated with sequence_weights here.
[ "Calculate", "the", "amino", "acid", "frequencies", "in", "a", "set", "of", "SeqRecords", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L13-L43
train
etal/biofrills
biofrills/alnutils.py
aa_frequencies
def aa_frequencies(aln, weights=None, gap_chars='-.'): """Frequency of each residue type in an alignment. Alignment is a MultipleSeqAlignment or iterable of SeqRecords. """ counts = aa_counts(aln, weights, gap_chars) # Reduce to frequencies scale = 1.0 / sum(counts.values()) return dict((aa, cnt * scale) for aa, cnt in counts.iteritems())
python
def aa_frequencies(aln, weights=None, gap_chars='-.'): """Frequency of each residue type in an alignment. Alignment is a MultipleSeqAlignment or iterable of SeqRecords. """ counts = aa_counts(aln, weights, gap_chars) # Reduce to frequencies scale = 1.0 / sum(counts.values()) return dict((aa, cnt * scale) for aa, cnt in counts.iteritems())
[ "def", "aa_frequencies", "(", "aln", ",", "weights", "=", "None", ",", "gap_chars", "=", "'-.'", ")", ":", "counts", "=", "aa_counts", "(", "aln", ",", "weights", ",", "gap_chars", ")", "# Reduce to frequencies", "scale", "=", "1.0", "/", "sum", "(", "counts", ".", "values", "(", ")", ")", "return", "dict", "(", "(", "aa", ",", "cnt", "*", "scale", ")", "for", "aa", ",", "cnt", "in", "counts", ".", "iteritems", "(", ")", ")" ]
Frequency of each residue type in an alignment. Alignment is a MultipleSeqAlignment or iterable of SeqRecords.
[ "Frequency", "of", "each", "residue", "type", "in", "an", "alignment", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L46-L54
train
etal/biofrills
biofrills/alnutils.py
blocks
def blocks(aln, threshold=0.5, weights=None): """Remove gappy columns from an alignment.""" assert len(aln) if weights == False: def pct_nongaps(col): return 1 - (float(col.count('-')) / len(col)) else: if weights in (None, True): weights = sequence_weights(aln, 'avg1') def pct_nongaps(col): assert len(col) == len(weights) ngaps = sum(wt * (c == '-') for wt, c in zip(weights, col)) return 1 - (ngaps / len(col)) seqstrs = [str(rec.seq) for rec in aln] clean_cols = [col for col in zip(*seqstrs) if pct_nongaps(col) >= threshold] alphabet = aln[0].seq.alphabet clean_seqs = [Seq(''.join(row), alphabet) for row in zip(*clean_cols)] clean_recs = [] for rec, seq in zip(aln, clean_seqs): newrec = deepcopy(rec) newrec.seq = seq clean_recs.append(newrec) return MultipleSeqAlignment(clean_recs, alphabet=alphabet)
python
def blocks(aln, threshold=0.5, weights=None): """Remove gappy columns from an alignment.""" assert len(aln) if weights == False: def pct_nongaps(col): return 1 - (float(col.count('-')) / len(col)) else: if weights in (None, True): weights = sequence_weights(aln, 'avg1') def pct_nongaps(col): assert len(col) == len(weights) ngaps = sum(wt * (c == '-') for wt, c in zip(weights, col)) return 1 - (ngaps / len(col)) seqstrs = [str(rec.seq) for rec in aln] clean_cols = [col for col in zip(*seqstrs) if pct_nongaps(col) >= threshold] alphabet = aln[0].seq.alphabet clean_seqs = [Seq(''.join(row), alphabet) for row in zip(*clean_cols)] clean_recs = [] for rec, seq in zip(aln, clean_seqs): newrec = deepcopy(rec) newrec.seq = seq clean_recs.append(newrec) return MultipleSeqAlignment(clean_recs, alphabet=alphabet)
[ "def", "blocks", "(", "aln", ",", "threshold", "=", "0.5", ",", "weights", "=", "None", ")", ":", "assert", "len", "(", "aln", ")", "if", "weights", "==", "False", ":", "def", "pct_nongaps", "(", "col", ")", ":", "return", "1", "-", "(", "float", "(", "col", ".", "count", "(", "'-'", ")", ")", "/", "len", "(", "col", ")", ")", "else", ":", "if", "weights", "in", "(", "None", ",", "True", ")", ":", "weights", "=", "sequence_weights", "(", "aln", ",", "'avg1'", ")", "def", "pct_nongaps", "(", "col", ")", ":", "assert", "len", "(", "col", ")", "==", "len", "(", "weights", ")", "ngaps", "=", "sum", "(", "wt", "*", "(", "c", "==", "'-'", ")", "for", "wt", ",", "c", "in", "zip", "(", "weights", ",", "col", ")", ")", "return", "1", "-", "(", "ngaps", "/", "len", "(", "col", ")", ")", "seqstrs", "=", "[", "str", "(", "rec", ".", "seq", ")", "for", "rec", "in", "aln", "]", "clean_cols", "=", "[", "col", "for", "col", "in", "zip", "(", "*", "seqstrs", ")", "if", "pct_nongaps", "(", "col", ")", ">=", "threshold", "]", "alphabet", "=", "aln", "[", "0", "]", ".", "seq", ".", "alphabet", "clean_seqs", "=", "[", "Seq", "(", "''", ".", "join", "(", "row", ")", ",", "alphabet", ")", "for", "row", "in", "zip", "(", "*", "clean_cols", ")", "]", "clean_recs", "=", "[", "]", "for", "rec", ",", "seq", "in", "zip", "(", "aln", ",", "clean_seqs", ")", ":", "newrec", "=", "deepcopy", "(", "rec", ")", "newrec", ".", "seq", "=", "seq", "clean_recs", ".", "append", "(", "newrec", ")", "return", "MultipleSeqAlignment", "(", "clean_recs", ",", "alphabet", "=", "alphabet", ")" ]
Remove gappy columns from an alignment.
[ "Remove", "gappy", "columns", "from", "an", "alignment", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L57-L83
train
etal/biofrills
biofrills/alnutils.py
col_counts
def col_counts(col, weights=None, gap_chars='-.'): """Absolute counts of each residue type in a single column.""" cnt = defaultdict(float) for aa, wt in zip(col, weights): if aa not in gap_chars: cnt[aa] += wt return cnt
python
def col_counts(col, weights=None, gap_chars='-.'): """Absolute counts of each residue type in a single column.""" cnt = defaultdict(float) for aa, wt in zip(col, weights): if aa not in gap_chars: cnt[aa] += wt return cnt
[ "def", "col_counts", "(", "col", ",", "weights", "=", "None", ",", "gap_chars", "=", "'-.'", ")", ":", "cnt", "=", "defaultdict", "(", "float", ")", "for", "aa", ",", "wt", "in", "zip", "(", "col", ",", "weights", ")", ":", "if", "aa", "not", "in", "gap_chars", ":", "cnt", "[", "aa", "]", "+=", "wt", "return", "cnt" ]
Absolute counts of each residue type in a single column.
[ "Absolute", "counts", "of", "each", "residue", "type", "in", "a", "single", "column", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L86-L92
train
etal/biofrills
biofrills/alnutils.py
remove_empty_cols
def remove_empty_cols(records): """Remove all-gap columns from aligned SeqRecords.""" # In case it's a generator, turn it into a list records = list(records) seqstrs = [str(rec.seq) for rec in records] clean_cols = [col for col in zip(*seqstrs) if not all(c == '-' for c in col)] clean_seqs = [''.join(row) for row in zip(*clean_cols)] for rec, clean_seq in zip(records, clean_seqs): yield SeqRecord(Seq(clean_seq, rec.seq.alphabet), id=rec.id, name=rec.name, description=rec.description, dbxrefs=rec.dbxrefs, features=rec.features, annotations=rec.annotations, letter_annotations=rec.letter_annotations)
python
def remove_empty_cols(records): """Remove all-gap columns from aligned SeqRecords.""" # In case it's a generator, turn it into a list records = list(records) seqstrs = [str(rec.seq) for rec in records] clean_cols = [col for col in zip(*seqstrs) if not all(c == '-' for c in col)] clean_seqs = [''.join(row) for row in zip(*clean_cols)] for rec, clean_seq in zip(records, clean_seqs): yield SeqRecord(Seq(clean_seq, rec.seq.alphabet), id=rec.id, name=rec.name, description=rec.description, dbxrefs=rec.dbxrefs, features=rec.features, annotations=rec.annotations, letter_annotations=rec.letter_annotations)
[ "def", "remove_empty_cols", "(", "records", ")", ":", "# In case it's a generator, turn it into a list", "records", "=", "list", "(", "records", ")", "seqstrs", "=", "[", "str", "(", "rec", ".", "seq", ")", "for", "rec", "in", "records", "]", "clean_cols", "=", "[", "col", "for", "col", "in", "zip", "(", "*", "seqstrs", ")", "if", "not", "all", "(", "c", "==", "'-'", "for", "c", "in", "col", ")", "]", "clean_seqs", "=", "[", "''", ".", "join", "(", "row", ")", "for", "row", "in", "zip", "(", "*", "clean_cols", ")", "]", "for", "rec", ",", "clean_seq", "in", "zip", "(", "records", ",", "clean_seqs", ")", ":", "yield", "SeqRecord", "(", "Seq", "(", "clean_seq", ",", "rec", ".", "seq", ".", "alphabet", ")", ",", "id", "=", "rec", ".", "id", ",", "name", "=", "rec", ".", "name", ",", "description", "=", "rec", ".", "description", ",", "dbxrefs", "=", "rec", ".", "dbxrefs", ",", "features", "=", "rec", ".", "features", ",", "annotations", "=", "rec", ".", "annotations", ",", "letter_annotations", "=", "rec", ".", "letter_annotations", ")" ]
Remove all-gap columns from aligned SeqRecords.
[ "Remove", "all", "-", "gap", "columns", "from", "aligned", "SeqRecords", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L103-L118
train
etal/biofrills
biofrills/alnutils.py
sequence_weights
def sequence_weights(aln, scaling='none', gap_chars='-.'): """Weight aligned sequences to emphasize more divergent members. Returns a list of floating-point numbers between 0 and 1, corresponding to the proportional weight of each sequence in the alignment. The first list is the weight of the first sequence in the alignment, and so on. Scaling schemes: - 'sum1': Weights sum to 1.0. - 'max1': Weights are all scaled so the max is 1.0. - 'avg1': Average (mean) weight is 1.0. - 'andy': Average (mean) weight is 0.5, ceiling is 1.0. - 'none': Weights are scaled to sum to the effective number of independent sequences. Method: At each column position, award each different residue an equal share of the weight, and then divide that weight equally among the sequences sharing the same residue. For each sequence, sum the contributions from each position to give a sequence weight. See Henikoff & Henikoff (1994): Position-based sequence weights. """ # Probability is hard, let's estimate by sampling! # Sample k from a population of 20 with replacement; how many unique k were # chosen? Average of 10000 runs for k = 0..100 expectk = [0.0, 1.0, 1.953, 2.861, 3.705, 4.524, 5.304, 6.026, 6.724, 7.397, 8.04, 8.622, 9.191, 9.739, 10.264, 10.758, 11.194, 11.635, 12.049, 12.468, 12.806, 13.185, 13.539, 13.863, 14.177, 14.466, 14.737, 15.005, 15.245, 15.491, 15.681, 15.916, 16.12, 16.301, 16.485, 16.671, 16.831, 16.979, 17.151, 17.315, 17.427, 17.559, 17.68, 17.791, 17.914, 18.009, 18.113, 18.203, 18.298, 18.391, 18.46, 18.547, 18.617, 18.669, 18.77, 18.806, 18.858, 18.934, 18.978, 19.027, 19.085, 19.119, 19.169, 19.202, 19.256, 19.291, 19.311, 19.357, 19.399, 19.416, 19.456, 19.469, 19.5, 19.53, 19.553, 19.562, 19.602, 19.608, 19.629, 19.655, 19.67, 19.681, 19.7, 19.716, 19.724, 19.748, 19.758, 19.765, 19.782, 19.791, 19.799, 19.812, 19.82, 19.828, 19.844, 19.846, 19.858, 19.863, 19.862, 19.871, 19.882] def col_weight(column): """Represent the diversity at a position. Award each different residue an equal share of the weight, and then divide that weight equally among the sequences sharing the same residue. So, if in a position of a multiple alignment, r different residues are represented, a residue represented in only one sequence contributes a score of 1/r to that sequence, whereas a residue represented in s sequences contributes a score of 1/rs to each of the s sequences. """ # Skip columns of all or mostly gaps (i.e. rare inserts) min_nongap = max(2, .2*len(column)) if len([c for c in column if c not in gap_chars]) < min_nongap: return ([0] * len(column), 0) # Count the number of occurrences of each residue type # (Treat gaps as a separate, 21st character) counts = Counter(column) # Get residue weights: 1/rs, where # r = nb. residue types, s = count of a particular residue type n_residues = len(counts) # r freqs = dict((aa, 1.0 / (n_residues * count)) for aa, count in counts.iteritems()) weights = [freqs[aa] for aa in column] return (weights, n_residues) seq_weights = [0] * len(aln) tot_nres = 0.0 # Expected no. different types in independent seqs # Sum the contributions from each position along each sequence # -> total weight for col in zip(*aln): wts, nres = col_weight(col) assert sum(wts) <= 20 tot_nres += expectk[nres] if nres < len(expectk) else 20 for idx, wt in enumerate(wts): seq_weights[idx] += wt # if tot_nres == 0: # raise ValueError("Alignment has no meaningful columns to weight") # Normalize w/ the given scaling criterion if scaling == 'none': avg_seq_len = tot_nres / len(aln) return [wt/avg_seq_len for wt in seq_weights] if scaling == 'max1': scale = 1.0 / max(seq_weights) elif scaling == 'sum1': scale = 1.0 / sum(seq_weights) elif scaling == 'avg1': scale = len(aln) / sum(seq_weights) elif scaling == 'andy': # "Robust" strategy used in CHAIN (Neuwald 2003) scale = len(aln) / sum(seq_weights) return [min(scale * wt, 1.0) for wt in seq_weights] else: raise ValueError("Unknown scaling scheme '%s'" % scaling) return [scale * wt for wt in seq_weights]
python
def sequence_weights(aln, scaling='none', gap_chars='-.'): """Weight aligned sequences to emphasize more divergent members. Returns a list of floating-point numbers between 0 and 1, corresponding to the proportional weight of each sequence in the alignment. The first list is the weight of the first sequence in the alignment, and so on. Scaling schemes: - 'sum1': Weights sum to 1.0. - 'max1': Weights are all scaled so the max is 1.0. - 'avg1': Average (mean) weight is 1.0. - 'andy': Average (mean) weight is 0.5, ceiling is 1.0. - 'none': Weights are scaled to sum to the effective number of independent sequences. Method: At each column position, award each different residue an equal share of the weight, and then divide that weight equally among the sequences sharing the same residue. For each sequence, sum the contributions from each position to give a sequence weight. See Henikoff & Henikoff (1994): Position-based sequence weights. """ # Probability is hard, let's estimate by sampling! # Sample k from a population of 20 with replacement; how many unique k were # chosen? Average of 10000 runs for k = 0..100 expectk = [0.0, 1.0, 1.953, 2.861, 3.705, 4.524, 5.304, 6.026, 6.724, 7.397, 8.04, 8.622, 9.191, 9.739, 10.264, 10.758, 11.194, 11.635, 12.049, 12.468, 12.806, 13.185, 13.539, 13.863, 14.177, 14.466, 14.737, 15.005, 15.245, 15.491, 15.681, 15.916, 16.12, 16.301, 16.485, 16.671, 16.831, 16.979, 17.151, 17.315, 17.427, 17.559, 17.68, 17.791, 17.914, 18.009, 18.113, 18.203, 18.298, 18.391, 18.46, 18.547, 18.617, 18.669, 18.77, 18.806, 18.858, 18.934, 18.978, 19.027, 19.085, 19.119, 19.169, 19.202, 19.256, 19.291, 19.311, 19.357, 19.399, 19.416, 19.456, 19.469, 19.5, 19.53, 19.553, 19.562, 19.602, 19.608, 19.629, 19.655, 19.67, 19.681, 19.7, 19.716, 19.724, 19.748, 19.758, 19.765, 19.782, 19.791, 19.799, 19.812, 19.82, 19.828, 19.844, 19.846, 19.858, 19.863, 19.862, 19.871, 19.882] def col_weight(column): """Represent the diversity at a position. Award each different residue an equal share of the weight, and then divide that weight equally among the sequences sharing the same residue. So, if in a position of a multiple alignment, r different residues are represented, a residue represented in only one sequence contributes a score of 1/r to that sequence, whereas a residue represented in s sequences contributes a score of 1/rs to each of the s sequences. """ # Skip columns of all or mostly gaps (i.e. rare inserts) min_nongap = max(2, .2*len(column)) if len([c for c in column if c not in gap_chars]) < min_nongap: return ([0] * len(column), 0) # Count the number of occurrences of each residue type # (Treat gaps as a separate, 21st character) counts = Counter(column) # Get residue weights: 1/rs, where # r = nb. residue types, s = count of a particular residue type n_residues = len(counts) # r freqs = dict((aa, 1.0 / (n_residues * count)) for aa, count in counts.iteritems()) weights = [freqs[aa] for aa in column] return (weights, n_residues) seq_weights = [0] * len(aln) tot_nres = 0.0 # Expected no. different types in independent seqs # Sum the contributions from each position along each sequence # -> total weight for col in zip(*aln): wts, nres = col_weight(col) assert sum(wts) <= 20 tot_nres += expectk[nres] if nres < len(expectk) else 20 for idx, wt in enumerate(wts): seq_weights[idx] += wt # if tot_nres == 0: # raise ValueError("Alignment has no meaningful columns to weight") # Normalize w/ the given scaling criterion if scaling == 'none': avg_seq_len = tot_nres / len(aln) return [wt/avg_seq_len for wt in seq_weights] if scaling == 'max1': scale = 1.0 / max(seq_weights) elif scaling == 'sum1': scale = 1.0 / sum(seq_weights) elif scaling == 'avg1': scale = len(aln) / sum(seq_weights) elif scaling == 'andy': # "Robust" strategy used in CHAIN (Neuwald 2003) scale = len(aln) / sum(seq_weights) return [min(scale * wt, 1.0) for wt in seq_weights] else: raise ValueError("Unknown scaling scheme '%s'" % scaling) return [scale * wt for wt in seq_weights]
[ "def", "sequence_weights", "(", "aln", ",", "scaling", "=", "'none'", ",", "gap_chars", "=", "'-.'", ")", ":", "# Probability is hard, let's estimate by sampling!", "# Sample k from a population of 20 with replacement; how many unique k were", "# chosen? Average of 10000 runs for k = 0..100", "expectk", "=", "[", "0.0", ",", "1.0", ",", "1.953", ",", "2.861", ",", "3.705", ",", "4.524", ",", "5.304", ",", "6.026", ",", "6.724", ",", "7.397", ",", "8.04", ",", "8.622", ",", "9.191", ",", "9.739", ",", "10.264", ",", "10.758", ",", "11.194", ",", "11.635", ",", "12.049", ",", "12.468", ",", "12.806", ",", "13.185", ",", "13.539", ",", "13.863", ",", "14.177", ",", "14.466", ",", "14.737", ",", "15.005", ",", "15.245", ",", "15.491", ",", "15.681", ",", "15.916", ",", "16.12", ",", "16.301", ",", "16.485", ",", "16.671", ",", "16.831", ",", "16.979", ",", "17.151", ",", "17.315", ",", "17.427", ",", "17.559", ",", "17.68", ",", "17.791", ",", "17.914", ",", "18.009", ",", "18.113", ",", "18.203", ",", "18.298", ",", "18.391", ",", "18.46", ",", "18.547", ",", "18.617", ",", "18.669", ",", "18.77", ",", "18.806", ",", "18.858", ",", "18.934", ",", "18.978", ",", "19.027", ",", "19.085", ",", "19.119", ",", "19.169", ",", "19.202", ",", "19.256", ",", "19.291", ",", "19.311", ",", "19.357", ",", "19.399", ",", "19.416", ",", "19.456", ",", "19.469", ",", "19.5", ",", "19.53", ",", "19.553", ",", "19.562", ",", "19.602", ",", "19.608", ",", "19.629", ",", "19.655", ",", "19.67", ",", "19.681", ",", "19.7", ",", "19.716", ",", "19.724", ",", "19.748", ",", "19.758", ",", "19.765", ",", "19.782", ",", "19.791", ",", "19.799", ",", "19.812", ",", "19.82", ",", "19.828", ",", "19.844", ",", "19.846", ",", "19.858", ",", "19.863", ",", "19.862", ",", "19.871", ",", "19.882", "]", "def", "col_weight", "(", "column", ")", ":", "\"\"\"Represent the diversity at a position.\n\n Award each different residue an equal share of the weight, and then\n divide that weight equally among the sequences sharing the same\n residue.\n\n So, if in a position of a multiple alignment, r different residues\n are represented, a residue represented in only one sequence contributes\n a score of 1/r to that sequence, whereas a residue represented in s\n sequences contributes a score of 1/rs to each of the s sequences.\n \"\"\"", "# Skip columns of all or mostly gaps (i.e. rare inserts)", "min_nongap", "=", "max", "(", "2", ",", ".2", "*", "len", "(", "column", ")", ")", "if", "len", "(", "[", "c", "for", "c", "in", "column", "if", "c", "not", "in", "gap_chars", "]", ")", "<", "min_nongap", ":", "return", "(", "[", "0", "]", "*", "len", "(", "column", ")", ",", "0", ")", "# Count the number of occurrences of each residue type", "# (Treat gaps as a separate, 21st character)", "counts", "=", "Counter", "(", "column", ")", "# Get residue weights: 1/rs, where", "# r = nb. residue types, s = count of a particular residue type", "n_residues", "=", "len", "(", "counts", ")", "# r", "freqs", "=", "dict", "(", "(", "aa", ",", "1.0", "/", "(", "n_residues", "*", "count", ")", ")", "for", "aa", ",", "count", "in", "counts", ".", "iteritems", "(", ")", ")", "weights", "=", "[", "freqs", "[", "aa", "]", "for", "aa", "in", "column", "]", "return", "(", "weights", ",", "n_residues", ")", "seq_weights", "=", "[", "0", "]", "*", "len", "(", "aln", ")", "tot_nres", "=", "0.0", "# Expected no. different types in independent seqs", "# Sum the contributions from each position along each sequence", "# -> total weight", "for", "col", "in", "zip", "(", "*", "aln", ")", ":", "wts", ",", "nres", "=", "col_weight", "(", "col", ")", "assert", "sum", "(", "wts", ")", "<=", "20", "tot_nres", "+=", "expectk", "[", "nres", "]", "if", "nres", "<", "len", "(", "expectk", ")", "else", "20", "for", "idx", ",", "wt", "in", "enumerate", "(", "wts", ")", ":", "seq_weights", "[", "idx", "]", "+=", "wt", "# if tot_nres == 0:", "# raise ValueError(\"Alignment has no meaningful columns to weight\")", "# Normalize w/ the given scaling criterion", "if", "scaling", "==", "'none'", ":", "avg_seq_len", "=", "tot_nres", "/", "len", "(", "aln", ")", "return", "[", "wt", "/", "avg_seq_len", "for", "wt", "in", "seq_weights", "]", "if", "scaling", "==", "'max1'", ":", "scale", "=", "1.0", "/", "max", "(", "seq_weights", ")", "elif", "scaling", "==", "'sum1'", ":", "scale", "=", "1.0", "/", "sum", "(", "seq_weights", ")", "elif", "scaling", "==", "'avg1'", ":", "scale", "=", "len", "(", "aln", ")", "/", "sum", "(", "seq_weights", ")", "elif", "scaling", "==", "'andy'", ":", "# \"Robust\" strategy used in CHAIN (Neuwald 2003)", "scale", "=", "len", "(", "aln", ")", "/", "sum", "(", "seq_weights", ")", "return", "[", "min", "(", "scale", "*", "wt", ",", "1.0", ")", "for", "wt", "in", "seq_weights", "]", "else", ":", "raise", "ValueError", "(", "\"Unknown scaling scheme '%s'\"", "%", "scaling", ")", "return", "[", "scale", "*", "wt", "for", "wt", "in", "seq_weights", "]" ]
Weight aligned sequences to emphasize more divergent members. Returns a list of floating-point numbers between 0 and 1, corresponding to the proportional weight of each sequence in the alignment. The first list is the weight of the first sequence in the alignment, and so on. Scaling schemes: - 'sum1': Weights sum to 1.0. - 'max1': Weights are all scaled so the max is 1.0. - 'avg1': Average (mean) weight is 1.0. - 'andy': Average (mean) weight is 0.5, ceiling is 1.0. - 'none': Weights are scaled to sum to the effective number of independent sequences. Method: At each column position, award each different residue an equal share of the weight, and then divide that weight equally among the sequences sharing the same residue. For each sequence, sum the contributions from each position to give a sequence weight. See Henikoff & Henikoff (1994): Position-based sequence weights.
[ "Weight", "aligned", "sequences", "to", "emphasize", "more", "divergent", "members", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L121-L215
train
etal/biofrills
biofrills/alnutils.py
to_graph
def to_graph(alnfname, weight_func): """Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars). """ import networkx G = networkx.Graph() aln = AlignIO.read(alnfname, 'fasta') for i, arec in enumerate(aln): for brec in aln[i+1:]: ident = weight_func(str(arec.seq), str(brec.seq)) G.add_edge(arec.id, brec.id, weight=ident) return G
python
def to_graph(alnfname, weight_func): """Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars). """ import networkx G = networkx.Graph() aln = AlignIO.read(alnfname, 'fasta') for i, arec in enumerate(aln): for brec in aln[i+1:]: ident = weight_func(str(arec.seq), str(brec.seq)) G.add_edge(arec.id, brec.id, weight=ident) return G
[ "def", "to_graph", "(", "alnfname", ",", "weight_func", ")", ":", "import", "networkx", "G", "=", "networkx", ".", "Graph", "(", ")", "aln", "=", "AlignIO", ".", "read", "(", "alnfname", ",", "'fasta'", ")", "for", "i", ",", "arec", "in", "enumerate", "(", "aln", ")", ":", "for", "brec", "in", "aln", "[", "i", "+", "1", ":", "]", ":", "ident", "=", "weight_func", "(", "str", "(", "arec", ".", "seq", ")", ",", "str", "(", "brec", ".", "seq", ")", ")", "G", ".", "add_edge", "(", "arec", ".", "id", ",", "brec", ".", "id", ",", "weight", "=", "ident", ")", "return", "G" ]
Create a NetworkX graph from a sequence alignment. Nodes are string sequence IDs; edge weights are the output of weight_func between each pair, by default the absolute identity (# identical chars).
[ "Create", "a", "NetworkX", "graph", "from", "a", "sequence", "alignment", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/alnutils.py#L218-L231
train
sludgedesk/metoffer
metoffer.py
guidance_UV
def guidance_UV(index): """Return Met Office guidance regarding UV exposure based on UV index""" if 0 < index < 3: guidance = "Low exposure. No protection required. You can safely stay outside" elif 2 < index < 6: guidance = "Moderate exposure. Seek shade during midday hours, cover up and wear sunscreen" elif 5 < index < 8: guidance = "High exposure. Seek shade during midday hours, cover up and wear sunscreen" elif 7 < index < 11: guidance = "Very high. Avoid being outside during midday hours. Shirt, sunscreen and hat are essential" elif index > 10: guidance = "Extreme. Avoid being outside during midday hours. Shirt, sunscreen and hat essential." else: guidance = None return guidance
python
def guidance_UV(index): """Return Met Office guidance regarding UV exposure based on UV index""" if 0 < index < 3: guidance = "Low exposure. No protection required. You can safely stay outside" elif 2 < index < 6: guidance = "Moderate exposure. Seek shade during midday hours, cover up and wear sunscreen" elif 5 < index < 8: guidance = "High exposure. Seek shade during midday hours, cover up and wear sunscreen" elif 7 < index < 11: guidance = "Very high. Avoid being outside during midday hours. Shirt, sunscreen and hat are essential" elif index > 10: guidance = "Extreme. Avoid being outside during midday hours. Shirt, sunscreen and hat essential." else: guidance = None return guidance
[ "def", "guidance_UV", "(", "index", ")", ":", "if", "0", "<", "index", "<", "3", ":", "guidance", "=", "\"Low exposure. No protection required. You can safely stay outside\"", "elif", "2", "<", "index", "<", "6", ":", "guidance", "=", "\"Moderate exposure. Seek shade during midday hours, cover up and wear sunscreen\"", "elif", "5", "<", "index", "<", "8", ":", "guidance", "=", "\"High exposure. Seek shade during midday hours, cover up and wear sunscreen\"", "elif", "7", "<", "index", "<", "11", ":", "guidance", "=", "\"Very high. Avoid being outside during midday hours. Shirt, sunscreen and hat are essential\"", "elif", "index", ">", "10", ":", "guidance", "=", "\"Extreme. Avoid being outside during midday hours. Shirt, sunscreen and hat essential.\"", "else", ":", "guidance", "=", "None", "return", "guidance" ]
Return Met Office guidance regarding UV exposure based on UV index
[ "Return", "Met", "Office", "guidance", "regarding", "UV", "exposure", "based", "on", "UV", "index" ]
449748d31f913d961d6f0406542bb784e931a95b
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L158-L172
train
sludgedesk/metoffer
metoffer.py
parse_sitelist
def parse_sitelist(sitelist): """Return list of Site instances from retrieved sitelist data""" sites = [] for site in sitelist["Locations"]["Location"]: try: ident = site["id"] name = site["name"] except KeyError: ident = site["@id"] # Difference between loc-spec and text for some reason name = site["@name"] if "latitude" in site: lat = float(site["latitude"]) lon = float(site["longitude"]) else: lat = lon = None s = Site(ident, name, lat, lon) sites.append(s) return sites
python
def parse_sitelist(sitelist): """Return list of Site instances from retrieved sitelist data""" sites = [] for site in sitelist["Locations"]["Location"]: try: ident = site["id"] name = site["name"] except KeyError: ident = site["@id"] # Difference between loc-spec and text for some reason name = site["@name"] if "latitude" in site: lat = float(site["latitude"]) lon = float(site["longitude"]) else: lat = lon = None s = Site(ident, name, lat, lon) sites.append(s) return sites
[ "def", "parse_sitelist", "(", "sitelist", ")", ":", "sites", "=", "[", "]", "for", "site", "in", "sitelist", "[", "\"Locations\"", "]", "[", "\"Location\"", "]", ":", "try", ":", "ident", "=", "site", "[", "\"id\"", "]", "name", "=", "site", "[", "\"name\"", "]", "except", "KeyError", ":", "ident", "=", "site", "[", "\"@id\"", "]", "# Difference between loc-spec and text for some reason", "name", "=", "site", "[", "\"@name\"", "]", "if", "\"latitude\"", "in", "site", ":", "lat", "=", "float", "(", "site", "[", "\"latitude\"", "]", ")", "lon", "=", "float", "(", "site", "[", "\"longitude\"", "]", ")", "else", ":", "lat", "=", "lon", "=", "None", "s", "=", "Site", "(", "ident", ",", "name", ",", "lat", ",", "lon", ")", "sites", ".", "append", "(", "s", ")", "return", "sites" ]
Return list of Site instances from retrieved sitelist data
[ "Return", "list", "of", "Site", "instances", "from", "retrieved", "sitelist", "data" ]
449748d31f913d961d6f0406542bb784e931a95b
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L323-L340
train
sludgedesk/metoffer
metoffer.py
MetOffer._query
def _query(self, data_category, resource_category, field, request, step, isotime=None): """ Request and return data from DataPoint RESTful API. """ rest_url = "/".join([HOST, data_category, resource_category, field, DATA_TYPE, request]) query_string = "?" + "&".join(["res=" + step, "time=" + isotime if isotime is not None else "", "key=" + self.key]) url = rest_url + query_string page = url_lib.urlopen(url) pg = page.read() return pg
python
def _query(self, data_category, resource_category, field, request, step, isotime=None): """ Request and return data from DataPoint RESTful API. """ rest_url = "/".join([HOST, data_category, resource_category, field, DATA_TYPE, request]) query_string = "?" + "&".join(["res=" + step, "time=" + isotime if isotime is not None else "", "key=" + self.key]) url = rest_url + query_string page = url_lib.urlopen(url) pg = page.read() return pg
[ "def", "_query", "(", "self", ",", "data_category", ",", "resource_category", ",", "field", ",", "request", ",", "step", ",", "isotime", "=", "None", ")", ":", "rest_url", "=", "\"/\"", ".", "join", "(", "[", "HOST", ",", "data_category", ",", "resource_category", ",", "field", ",", "DATA_TYPE", ",", "request", "]", ")", "query_string", "=", "\"?\"", "+", "\"&\"", ".", "join", "(", "[", "\"res=\"", "+", "step", ",", "\"time=\"", "+", "isotime", "if", "isotime", "is", "not", "None", "else", "\"\"", ",", "\"key=\"", "+", "self", ".", "key", "]", ")", "url", "=", "rest_url", "+", "query_string", "page", "=", "url_lib", ".", "urlopen", "(", "url", ")", "pg", "=", "page", ".", "read", "(", ")", "return", "pg" ]
Request and return data from DataPoint RESTful API.
[ "Request", "and", "return", "data", "from", "DataPoint", "RESTful", "API", "." ]
449748d31f913d961d6f0406542bb784e931a95b
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L179-L188
train
sludgedesk/metoffer
metoffer.py
MetOffer.stand_alone_imagery
def stand_alone_imagery(self): """ Returns capabilities data for stand alone imagery and includes URIs for the images. """ return json.loads(self._query(IMAGE, FORECAST, SURFACE_PRESSURE, CAPABILITIES, "").decode(errors="replace"))
python
def stand_alone_imagery(self): """ Returns capabilities data for stand alone imagery and includes URIs for the images. """ return json.loads(self._query(IMAGE, FORECAST, SURFACE_PRESSURE, CAPABILITIES, "").decode(errors="replace"))
[ "def", "stand_alone_imagery", "(", "self", ")", ":", "return", "json", ".", "loads", "(", "self", ".", "_query", "(", "IMAGE", ",", "FORECAST", ",", "SURFACE_PRESSURE", ",", "CAPABILITIES", ",", "\"\"", ")", ".", "decode", "(", "errors", "=", "\"replace\"", ")", ")" ]
Returns capabilities data for stand alone imagery and includes URIs for the images.
[ "Returns", "capabilities", "data", "for", "stand", "alone", "imagery", "and", "includes", "URIs", "for", "the", "images", "." ]
449748d31f913d961d6f0406542bb784e931a95b
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L288-L293
train
sludgedesk/metoffer
metoffer.py
MetOffer.map_overlay_forecast
def map_overlay_forecast(self): """Returns capabilities data for forecast map overlays.""" return json.loads(self._query(LAYER, FORECAST, ALL, CAPABILITIES, "").decode(errors="replace"))
python
def map_overlay_forecast(self): """Returns capabilities data for forecast map overlays.""" return json.loads(self._query(LAYER, FORECAST, ALL, CAPABILITIES, "").decode(errors="replace"))
[ "def", "map_overlay_forecast", "(", "self", ")", ":", "return", "json", ".", "loads", "(", "self", ".", "_query", "(", "LAYER", ",", "FORECAST", ",", "ALL", ",", "CAPABILITIES", ",", "\"\"", ")", ".", "decode", "(", "errors", "=", "\"replace\"", ")", ")" ]
Returns capabilities data for forecast map overlays.
[ "Returns", "capabilities", "data", "for", "forecast", "map", "overlays", "." ]
449748d31f913d961d6f0406542bb784e931a95b
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L295-L297
train
sludgedesk/metoffer
metoffer.py
MetOffer.map_overlay_obs
def map_overlay_obs(self): """Returns capabilities data for observation map overlays.""" return json.loads(self._query(LAYER, OBSERVATIONS, ALL, CAPABILITIES, "").decode(errors="replace"))
python
def map_overlay_obs(self): """Returns capabilities data for observation map overlays.""" return json.loads(self._query(LAYER, OBSERVATIONS, ALL, CAPABILITIES, "").decode(errors="replace"))
[ "def", "map_overlay_obs", "(", "self", ")", ":", "return", "json", ".", "loads", "(", "self", ".", "_query", "(", "LAYER", ",", "OBSERVATIONS", ",", "ALL", ",", "CAPABILITIES", ",", "\"\"", ")", ".", "decode", "(", "errors", "=", "\"replace\"", ")", ")" ]
Returns capabilities data for observation map overlays.
[ "Returns", "capabilities", "data", "for", "observation", "map", "overlays", "." ]
449748d31f913d961d6f0406542bb784e931a95b
https://github.com/sludgedesk/metoffer/blob/449748d31f913d961d6f0406542bb784e931a95b/metoffer.py#L299-L301
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/core/instruments.py
Instrument.load_and_append
def load_and_append(instrument_dict, instruments=None, raise_errors=False): """ load instrument from instrument_dict and append to instruments Args: instrument_dict: dictionary of form instrument_dict = { name_of_instrument_1 : {"settings" : settings_dictionary, "class" : name_of_class} name_of_instrument_2 : {"settings" : settings_dictionary, "class" : name_of_class} ... } or instrument_dict = { name_of_instrument_1 : name_of_class, name_of_instrument_2 : name_of_class ... } where name_of_class is either a class or a dictionary of the form {class: name_of__class, filepath: path_to_instr_file} instruments: dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } raise_errors: if true errors are raised, if False they are caught but not raised Returns: dictionary updated_instruments that contains the old and the new instruments and list loaded_failed = [name_of_instrument_1, name_of_instrument_2, ....] that contains the instruments that were requested but could not be loaded """ if instruments is None: instruments = {} updated_instruments = {} updated_instruments.update(instruments) loaded_failed = {} for instrument_name, instrument_class_name in instrument_dict.items(): instrument_settings = None module = None # check if instrument already exists if instrument_name in list(instruments.keys()) \ and instrument_class_name == instruments[instrument_name].__name__: print(('WARNING: instrument {:s} already exists. Did not load!'.format(instrument_name))) loaded_failed[instrument_name] = instrument_name else: instrument_instance = None if isinstance(instrument_class_name, dict): if 'settings' in instrument_class_name: instrument_settings = instrument_class_name['settings'] instrument_filepath = str(instrument_class_name['filepath']) instrument_class_name = str(instrument_class_name['class']) path_to_module, _ = module_name_from_path(instrument_filepath) module = import_module(path_to_module) class_of_instrument = getattr(module, instrument_class_name) try: if instrument_settings is None: # this creates an instance of the class with default settings instrument_instance = class_of_instrument(name=instrument_name) else: # this creates an instance of the class with custom settings instrument_instance = class_of_instrument(name=instrument_name, settings=instrument_settings) except Exception as e: loaded_failed[instrument_name] = e if raise_errors: raise e continue elif isinstance(instrument_class_name, Instrument): instrument_class_name = instrument_class_name.__class__ instrument_filepath = os.path.dirname(inspect.getfile(instrument_class_name)) # here we should also create an instrument instance at some point as in the other cases... # instrument_instance = raise NotImplementedError elif issubclass(instrument_class_name, Instrument): class_of_instrument = instrument_class_name if instrument_settings is None: # this creates an instance of the class with default settings instrument_instance = class_of_instrument(name=instrument_name) else: # this creates an instance of the class with custom settings instrument_instance = class_of_instrument(name=instrument_name, settings=instrument_settings) updated_instruments[instrument_name] = instrument_instance return updated_instruments, loaded_failed
python
def load_and_append(instrument_dict, instruments=None, raise_errors=False): """ load instrument from instrument_dict and append to instruments Args: instrument_dict: dictionary of form instrument_dict = { name_of_instrument_1 : {"settings" : settings_dictionary, "class" : name_of_class} name_of_instrument_2 : {"settings" : settings_dictionary, "class" : name_of_class} ... } or instrument_dict = { name_of_instrument_1 : name_of_class, name_of_instrument_2 : name_of_class ... } where name_of_class is either a class or a dictionary of the form {class: name_of__class, filepath: path_to_instr_file} instruments: dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } raise_errors: if true errors are raised, if False they are caught but not raised Returns: dictionary updated_instruments that contains the old and the new instruments and list loaded_failed = [name_of_instrument_1, name_of_instrument_2, ....] that contains the instruments that were requested but could not be loaded """ if instruments is None: instruments = {} updated_instruments = {} updated_instruments.update(instruments) loaded_failed = {} for instrument_name, instrument_class_name in instrument_dict.items(): instrument_settings = None module = None # check if instrument already exists if instrument_name in list(instruments.keys()) \ and instrument_class_name == instruments[instrument_name].__name__: print(('WARNING: instrument {:s} already exists. Did not load!'.format(instrument_name))) loaded_failed[instrument_name] = instrument_name else: instrument_instance = None if isinstance(instrument_class_name, dict): if 'settings' in instrument_class_name: instrument_settings = instrument_class_name['settings'] instrument_filepath = str(instrument_class_name['filepath']) instrument_class_name = str(instrument_class_name['class']) path_to_module, _ = module_name_from_path(instrument_filepath) module = import_module(path_to_module) class_of_instrument = getattr(module, instrument_class_name) try: if instrument_settings is None: # this creates an instance of the class with default settings instrument_instance = class_of_instrument(name=instrument_name) else: # this creates an instance of the class with custom settings instrument_instance = class_of_instrument(name=instrument_name, settings=instrument_settings) except Exception as e: loaded_failed[instrument_name] = e if raise_errors: raise e continue elif isinstance(instrument_class_name, Instrument): instrument_class_name = instrument_class_name.__class__ instrument_filepath = os.path.dirname(inspect.getfile(instrument_class_name)) # here we should also create an instrument instance at some point as in the other cases... # instrument_instance = raise NotImplementedError elif issubclass(instrument_class_name, Instrument): class_of_instrument = instrument_class_name if instrument_settings is None: # this creates an instance of the class with default settings instrument_instance = class_of_instrument(name=instrument_name) else: # this creates an instance of the class with custom settings instrument_instance = class_of_instrument(name=instrument_name, settings=instrument_settings) updated_instruments[instrument_name] = instrument_instance return updated_instruments, loaded_failed
[ "def", "load_and_append", "(", "instrument_dict", ",", "instruments", "=", "None", ",", "raise_errors", "=", "False", ")", ":", "if", "instruments", "is", "None", ":", "instruments", "=", "{", "}", "updated_instruments", "=", "{", "}", "updated_instruments", ".", "update", "(", "instruments", ")", "loaded_failed", "=", "{", "}", "for", "instrument_name", ",", "instrument_class_name", "in", "instrument_dict", ".", "items", "(", ")", ":", "instrument_settings", "=", "None", "module", "=", "None", "# check if instrument already exists", "if", "instrument_name", "in", "list", "(", "instruments", ".", "keys", "(", ")", ")", "and", "instrument_class_name", "==", "instruments", "[", "instrument_name", "]", ".", "__name__", ":", "print", "(", "(", "'WARNING: instrument {:s} already exists. Did not load!'", ".", "format", "(", "instrument_name", ")", ")", ")", "loaded_failed", "[", "instrument_name", "]", "=", "instrument_name", "else", ":", "instrument_instance", "=", "None", "if", "isinstance", "(", "instrument_class_name", ",", "dict", ")", ":", "if", "'settings'", "in", "instrument_class_name", ":", "instrument_settings", "=", "instrument_class_name", "[", "'settings'", "]", "instrument_filepath", "=", "str", "(", "instrument_class_name", "[", "'filepath'", "]", ")", "instrument_class_name", "=", "str", "(", "instrument_class_name", "[", "'class'", "]", ")", "path_to_module", ",", "_", "=", "module_name_from_path", "(", "instrument_filepath", ")", "module", "=", "import_module", "(", "path_to_module", ")", "class_of_instrument", "=", "getattr", "(", "module", ",", "instrument_class_name", ")", "try", ":", "if", "instrument_settings", "is", "None", ":", "# this creates an instance of the class with default settings", "instrument_instance", "=", "class_of_instrument", "(", "name", "=", "instrument_name", ")", "else", ":", "# this creates an instance of the class with custom settings", "instrument_instance", "=", "class_of_instrument", "(", "name", "=", "instrument_name", ",", "settings", "=", "instrument_settings", ")", "except", "Exception", "as", "e", ":", "loaded_failed", "[", "instrument_name", "]", "=", "e", "if", "raise_errors", ":", "raise", "e", "continue", "elif", "isinstance", "(", "instrument_class_name", ",", "Instrument", ")", ":", "instrument_class_name", "=", "instrument_class_name", ".", "__class__", "instrument_filepath", "=", "os", ".", "path", ".", "dirname", "(", "inspect", ".", "getfile", "(", "instrument_class_name", ")", ")", "# here we should also create an instrument instance at some point as in the other cases...", "# instrument_instance =", "raise", "NotImplementedError", "elif", "issubclass", "(", "instrument_class_name", ",", "Instrument", ")", ":", "class_of_instrument", "=", "instrument_class_name", "if", "instrument_settings", "is", "None", ":", "# this creates an instance of the class with default settings", "instrument_instance", "=", "class_of_instrument", "(", "name", "=", "instrument_name", ")", "else", ":", "# this creates an instance of the class with custom settings", "instrument_instance", "=", "class_of_instrument", "(", "name", "=", "instrument_name", ",", "settings", "=", "instrument_settings", ")", "updated_instruments", "[", "instrument_name", "]", "=", "instrument_instance", "return", "updated_instruments", ",", "loaded_failed" ]
load instrument from instrument_dict and append to instruments Args: instrument_dict: dictionary of form instrument_dict = { name_of_instrument_1 : {"settings" : settings_dictionary, "class" : name_of_class} name_of_instrument_2 : {"settings" : settings_dictionary, "class" : name_of_class} ... } or instrument_dict = { name_of_instrument_1 : name_of_class, name_of_instrument_2 : name_of_class ... } where name_of_class is either a class or a dictionary of the form {class: name_of__class, filepath: path_to_instr_file} instruments: dictionary of form instruments = { name_of_instrument_1 : instance_of_instrument_1, name_of_instrument_2 : instance_of_instrument_2, ... } raise_errors: if true errors are raised, if False they are caught but not raised Returns: dictionary updated_instruments that contains the old and the new instruments and list loaded_failed = [name_of_instrument_1, name_of_instrument_2, ....] that contains the instruments that were requested but could not be loaded
[ "load", "instrument", "from", "instrument_dict", "and", "append", "to", "instruments" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/instruments.py#L244-L349
train
johnnoone/json-spec
src/jsonspec/validators/draft04.py
Draft04Validator.fail
def fail(self, reason, obj, pointer=None): """ Called when validation fails. """ pointer = pointer_join(pointer) err = ValidationError(reason, obj, pointer) if self.fail_fast: raise err else: self.errors.append(err) return err
python
def fail(self, reason, obj, pointer=None): """ Called when validation fails. """ pointer = pointer_join(pointer) err = ValidationError(reason, obj, pointer) if self.fail_fast: raise err else: self.errors.append(err) return err
[ "def", "fail", "(", "self", ",", "reason", ",", "obj", ",", "pointer", "=", "None", ")", ":", "pointer", "=", "pointer_join", "(", "pointer", ")", "err", "=", "ValidationError", "(", "reason", ",", "obj", ",", "pointer", ")", "if", "self", ".", "fail_fast", ":", "raise", "err", "else", ":", "self", ".", "errors", ".", "append", "(", "err", ")", "return", "err" ]
Called when validation fails.
[ "Called", "when", "validation", "fails", "." ]
f91981724cea0c366bd42a6670eb07bbe31c0e0c
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/validators/draft04.py#L654-L664
train
nmdp-bioinformatics/SeqAnn
seqann/blast_cmd.py
get_locus
def get_locus(sequences, kir=False, verbose=False, refdata=None, evalue=10): """ Gets the locus of the sequence by running blastn :param sequences: sequenences to blast :param kir: bool whether the sequences are KIR or not :rtype: ``str`` Example usage: >>> from Bio.Seq import Seq >>> from seqann.blast_cmd import get_locus >>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC') >>> locus = get_locus(sequence) """ if not refdata: refdata = ReferenceData() file_id = str(randomid()) input_fasta = file_id + ".fasta" output_xml = file_id + ".xml" SeqIO.write(sequences, input_fasta, "fasta") blastn_cline = NcbiblastnCommandline(query=input_fasta, db=refdata.blastdb, evalue=evalue, outfmt=5, reward=1, penalty=-3, gapopen=5, gapextend=2, dust='yes', out=output_xml) stdout, stderr = blastn_cline() blast_qresult = SearchIO.read(output_xml, 'blast-xml') # Delete files cleanup(file_id) if len(blast_qresult.hits) == 0: return '' loci = [] for i in range(0, 3): if kir: loci.append(blast_qresult[i].id.split("*")[0]) else: loci.append(blast_qresult[i].id.split("*")[0]) locus = set(loci) if len(locus) == 1: if has_hla(loci[0]) or kir: return loci[0] else: return "HLA-" + loci[0] else: return ''
python
def get_locus(sequences, kir=False, verbose=False, refdata=None, evalue=10): """ Gets the locus of the sequence by running blastn :param sequences: sequenences to blast :param kir: bool whether the sequences are KIR or not :rtype: ``str`` Example usage: >>> from Bio.Seq import Seq >>> from seqann.blast_cmd import get_locus >>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC') >>> locus = get_locus(sequence) """ if not refdata: refdata = ReferenceData() file_id = str(randomid()) input_fasta = file_id + ".fasta" output_xml = file_id + ".xml" SeqIO.write(sequences, input_fasta, "fasta") blastn_cline = NcbiblastnCommandline(query=input_fasta, db=refdata.blastdb, evalue=evalue, outfmt=5, reward=1, penalty=-3, gapopen=5, gapextend=2, dust='yes', out=output_xml) stdout, stderr = blastn_cline() blast_qresult = SearchIO.read(output_xml, 'blast-xml') # Delete files cleanup(file_id) if len(blast_qresult.hits) == 0: return '' loci = [] for i in range(0, 3): if kir: loci.append(blast_qresult[i].id.split("*")[0]) else: loci.append(blast_qresult[i].id.split("*")[0]) locus = set(loci) if len(locus) == 1: if has_hla(loci[0]) or kir: return loci[0] else: return "HLA-" + loci[0] else: return ''
[ "def", "get_locus", "(", "sequences", ",", "kir", "=", "False", ",", "verbose", "=", "False", ",", "refdata", "=", "None", ",", "evalue", "=", "10", ")", ":", "if", "not", "refdata", ":", "refdata", "=", "ReferenceData", "(", ")", "file_id", "=", "str", "(", "randomid", "(", ")", ")", "input_fasta", "=", "file_id", "+", "\".fasta\"", "output_xml", "=", "file_id", "+", "\".xml\"", "SeqIO", ".", "write", "(", "sequences", ",", "input_fasta", ",", "\"fasta\"", ")", "blastn_cline", "=", "NcbiblastnCommandline", "(", "query", "=", "input_fasta", ",", "db", "=", "refdata", ".", "blastdb", ",", "evalue", "=", "evalue", ",", "outfmt", "=", "5", ",", "reward", "=", "1", ",", "penalty", "=", "-", "3", ",", "gapopen", "=", "5", ",", "gapextend", "=", "2", ",", "dust", "=", "'yes'", ",", "out", "=", "output_xml", ")", "stdout", ",", "stderr", "=", "blastn_cline", "(", ")", "blast_qresult", "=", "SearchIO", ".", "read", "(", "output_xml", ",", "'blast-xml'", ")", "# Delete files", "cleanup", "(", "file_id", ")", "if", "len", "(", "blast_qresult", ".", "hits", ")", "==", "0", ":", "return", "''", "loci", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "3", ")", ":", "if", "kir", ":", "loci", ".", "append", "(", "blast_qresult", "[", "i", "]", ".", "id", ".", "split", "(", "\"*\"", ")", "[", "0", "]", ")", "else", ":", "loci", ".", "append", "(", "blast_qresult", "[", "i", "]", ".", "id", ".", "split", "(", "\"*\"", ")", "[", "0", "]", ")", "locus", "=", "set", "(", "loci", ")", "if", "len", "(", "locus", ")", "==", "1", ":", "if", "has_hla", "(", "loci", "[", "0", "]", ")", "or", "kir", ":", "return", "loci", "[", "0", "]", "else", ":", "return", "\"HLA-\"", "+", "loci", "[", "0", "]", "else", ":", "return", "''" ]
Gets the locus of the sequence by running blastn :param sequences: sequenences to blast :param kir: bool whether the sequences are KIR or not :rtype: ``str`` Example usage: >>> from Bio.Seq import Seq >>> from seqann.blast_cmd import get_locus >>> sequence = Seq('AGAGACTCTCCCGAGGATTTCGTGTACCAGTTTAAGGCCATGTGCTACTTCACC') >>> locus = get_locus(sequence)
[ "Gets", "the", "locus", "of", "the", "sequence", "by", "running", "blastn" ]
5ce91559b0a4fbe4fb7758e034eb258202632463
https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/blast_cmd.py#L187-L245
train
PeerAssets/pypeerassets
pypeerassets/kutil.py
Kutil.address
def address(self) -> str: '''generate an address from pubkey''' return str(self._public_key.to_address( net_query(self.network)) )
python
def address(self) -> str: '''generate an address from pubkey''' return str(self._public_key.to_address( net_query(self.network)) )
[ "def", "address", "(", "self", ")", "->", "str", ":", "return", "str", "(", "self", ".", "_public_key", ".", "to_address", "(", "net_query", "(", "self", ".", "network", ")", ")", ")" ]
generate an address from pubkey
[ "generate", "an", "address", "from", "pubkey" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/kutil.py#L51-L56
train
PeerAssets/pypeerassets
pypeerassets/kutil.py
Kutil.sign_transaction
def sign_transaction(self, txins: Union[TxOut], tx: MutableTransaction) -> MutableTransaction: '''sign the parent txn outputs P2PKH''' solver = P2pkhSolver(self._private_key) return tx.spend(txins, [solver for i in txins])
python
def sign_transaction(self, txins: Union[TxOut], tx: MutableTransaction) -> MutableTransaction: '''sign the parent txn outputs P2PKH''' solver = P2pkhSolver(self._private_key) return tx.spend(txins, [solver for i in txins])
[ "def", "sign_transaction", "(", "self", ",", "txins", ":", "Union", "[", "TxOut", "]", ",", "tx", ":", "MutableTransaction", ")", "->", "MutableTransaction", ":", "solver", "=", "P2pkhSolver", "(", "self", ".", "_private_key", ")", "return", "tx", ".", "spend", "(", "txins", ",", "[", "solver", "for", "i", "in", "txins", "]", ")" ]
sign the parent txn outputs P2PKH
[ "sign", "the", "parent", "txn", "outputs", "P2PKH" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/kutil.py#L64-L69
train
PeerAssets/pypeerassets
pypeerassets/provider/cryptoid.py
Cryptoid.format_name
def format_name(net: str) -> str: '''take care of specifics of cryptoid naming system''' if net.startswith('t') or 'testnet' in net: net = net[1:] + '-test' else: net = net return net
python
def format_name(net: str) -> str: '''take care of specifics of cryptoid naming system''' if net.startswith('t') or 'testnet' in net: net = net[1:] + '-test' else: net = net return net
[ "def", "format_name", "(", "net", ":", "str", ")", "->", "str", ":", "if", "net", ".", "startswith", "(", "'t'", ")", "or", "'testnet'", "in", "net", ":", "net", "=", "net", "[", "1", ":", "]", "+", "'-test'", "else", ":", "net", "=", "net", "return", "net" ]
take care of specifics of cryptoid naming system
[ "take", "care", "of", "specifics", "of", "cryptoid", "naming", "system" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/cryptoid.py#L33-L41
train
PeerAssets/pypeerassets
pypeerassets/provider/cryptoid.py
Cryptoid.get_url
def get_url(url: str) -> Union[dict, int, float, str]: '''Perform a GET request for the url and return a dictionary parsed from the JSON response.''' request = Request(url, headers={"User-Agent": "pypeerassets"}) response = cast(HTTPResponse, urlopen(request)) if response.status != 200: raise Exception(response.reason) return json.loads(response.read().decode())
python
def get_url(url: str) -> Union[dict, int, float, str]: '''Perform a GET request for the url and return a dictionary parsed from the JSON response.''' request = Request(url, headers={"User-Agent": "pypeerassets"}) response = cast(HTTPResponse, urlopen(request)) if response.status != 200: raise Exception(response.reason) return json.loads(response.read().decode())
[ "def", "get_url", "(", "url", ":", "str", ")", "->", "Union", "[", "dict", ",", "int", ",", "float", ",", "str", "]", ":", "request", "=", "Request", "(", "url", ",", "headers", "=", "{", "\"User-Agent\"", ":", "\"pypeerassets\"", "}", ")", "response", "=", "cast", "(", "HTTPResponse", ",", "urlopen", "(", "request", ")", ")", "if", "response", ".", "status", "!=", "200", ":", "raise", "Exception", "(", "response", ".", "reason", ")", "return", "json", ".", "loads", "(", "response", ".", "read", "(", ")", ".", "decode", "(", ")", ")" ]
Perform a GET request for the url and return a dictionary parsed from the JSON response.
[ "Perform", "a", "GET", "request", "for", "the", "url", "and", "return", "a", "dictionary", "parsed", "from", "the", "JSON", "response", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/cryptoid.py#L44-L52
train
edoburu/django-template-analyzer
template_analyzer/djangoanalyzer.py
_scan_nodes
def _scan_nodes(nodelist, context, instance_types, current_block=None, ignore_blocks=None): """ Loop through all nodes of a single scope level. :type nodelist: django.template.base.NodeList :type current_block: BlockNode :param instance_types: The instance to look for """ results = [] for node in nodelist: # first check if this is the object instance to look for. if isinstance(node, instance_types): results.append(node) # if it's a Constant Include Node ({% include "template_name.html" %}) # scan the child template elif isinstance(node, IncludeNode): # if there's an error in the to-be-included template, node.template becomes None if node.template: # This is required for Django 1.7 but works on older version too # Check if it quacks like a template object, if not # presume is a template path and get the object out of it if not callable(getattr(node.template, 'render', None)): template = get_template(node.template.var) else: template = node.template if TemplateAdapter is not None and isinstance(template, TemplateAdapter): # Django 1.8: received a new object, take original template template = template.template results += _scan_nodes(template.nodelist, context, instance_types, current_block) # handle {% extends ... %} tags elif isinstance(node, ExtendsNode): results += _extend_nodelist(node, context, instance_types) # in block nodes we have to scan for super blocks elif isinstance(node, VariableNode) and current_block: if node.filter_expression.token == 'block.super': # Found a {{ block.super }} line if not hasattr(current_block.parent, 'nodelist'): raise TemplateSyntaxError( "Cannot read {{{{ block.super }}}} for {{% block {0} %}}, " "the parent template doesn't have this block.".format( current_block.name )) results += _scan_nodes(current_block.parent.nodelist, context, instance_types, current_block.parent) # ignore nested blocks which are already handled elif isinstance(node, BlockNode) and ignore_blocks and node.name in ignore_blocks: continue # if the node has the newly introduced 'child_nodelists' attribute, scan # those attributes for nodelists and recurse them elif hasattr(node, 'child_nodelists'): for nodelist_name in node.child_nodelists: if hasattr(node, nodelist_name): subnodelist = getattr(node, nodelist_name) if isinstance(subnodelist, NodeList): if isinstance(node, BlockNode): current_block = node results += _scan_nodes(subnodelist, context, instance_types, current_block) # else just scan the node for nodelist instance attributes else: for attr in dir(node): obj = getattr(node, attr) if isinstance(obj, NodeList): if isinstance(node, BlockNode): current_block = node results += _scan_nodes(obj, context, instance_types, current_block) return results
python
def _scan_nodes(nodelist, context, instance_types, current_block=None, ignore_blocks=None): """ Loop through all nodes of a single scope level. :type nodelist: django.template.base.NodeList :type current_block: BlockNode :param instance_types: The instance to look for """ results = [] for node in nodelist: # first check if this is the object instance to look for. if isinstance(node, instance_types): results.append(node) # if it's a Constant Include Node ({% include "template_name.html" %}) # scan the child template elif isinstance(node, IncludeNode): # if there's an error in the to-be-included template, node.template becomes None if node.template: # This is required for Django 1.7 but works on older version too # Check if it quacks like a template object, if not # presume is a template path and get the object out of it if not callable(getattr(node.template, 'render', None)): template = get_template(node.template.var) else: template = node.template if TemplateAdapter is not None and isinstance(template, TemplateAdapter): # Django 1.8: received a new object, take original template template = template.template results += _scan_nodes(template.nodelist, context, instance_types, current_block) # handle {% extends ... %} tags elif isinstance(node, ExtendsNode): results += _extend_nodelist(node, context, instance_types) # in block nodes we have to scan for super blocks elif isinstance(node, VariableNode) and current_block: if node.filter_expression.token == 'block.super': # Found a {{ block.super }} line if not hasattr(current_block.parent, 'nodelist'): raise TemplateSyntaxError( "Cannot read {{{{ block.super }}}} for {{% block {0} %}}, " "the parent template doesn't have this block.".format( current_block.name )) results += _scan_nodes(current_block.parent.nodelist, context, instance_types, current_block.parent) # ignore nested blocks which are already handled elif isinstance(node, BlockNode) and ignore_blocks and node.name in ignore_blocks: continue # if the node has the newly introduced 'child_nodelists' attribute, scan # those attributes for nodelists and recurse them elif hasattr(node, 'child_nodelists'): for nodelist_name in node.child_nodelists: if hasattr(node, nodelist_name): subnodelist = getattr(node, nodelist_name) if isinstance(subnodelist, NodeList): if isinstance(node, BlockNode): current_block = node results += _scan_nodes(subnodelist, context, instance_types, current_block) # else just scan the node for nodelist instance attributes else: for attr in dir(node): obj = getattr(node, attr) if isinstance(obj, NodeList): if isinstance(node, BlockNode): current_block = node results += _scan_nodes(obj, context, instance_types, current_block) return results
[ "def", "_scan_nodes", "(", "nodelist", ",", "context", ",", "instance_types", ",", "current_block", "=", "None", ",", "ignore_blocks", "=", "None", ")", ":", "results", "=", "[", "]", "for", "node", "in", "nodelist", ":", "# first check if this is the object instance to look for.", "if", "isinstance", "(", "node", ",", "instance_types", ")", ":", "results", ".", "append", "(", "node", ")", "# if it's a Constant Include Node ({% include \"template_name.html\" %})", "# scan the child template", "elif", "isinstance", "(", "node", ",", "IncludeNode", ")", ":", "# if there's an error in the to-be-included template, node.template becomes None", "if", "node", ".", "template", ":", "# This is required for Django 1.7 but works on older version too", "# Check if it quacks like a template object, if not", "# presume is a template path and get the object out of it", "if", "not", "callable", "(", "getattr", "(", "node", ".", "template", ",", "'render'", ",", "None", ")", ")", ":", "template", "=", "get_template", "(", "node", ".", "template", ".", "var", ")", "else", ":", "template", "=", "node", ".", "template", "if", "TemplateAdapter", "is", "not", "None", "and", "isinstance", "(", "template", ",", "TemplateAdapter", ")", ":", "# Django 1.8: received a new object, take original template", "template", "=", "template", ".", "template", "results", "+=", "_scan_nodes", "(", "template", ".", "nodelist", ",", "context", ",", "instance_types", ",", "current_block", ")", "# handle {% extends ... %} tags", "elif", "isinstance", "(", "node", ",", "ExtendsNode", ")", ":", "results", "+=", "_extend_nodelist", "(", "node", ",", "context", ",", "instance_types", ")", "# in block nodes we have to scan for super blocks", "elif", "isinstance", "(", "node", ",", "VariableNode", ")", "and", "current_block", ":", "if", "node", ".", "filter_expression", ".", "token", "==", "'block.super'", ":", "# Found a {{ block.super }} line", "if", "not", "hasattr", "(", "current_block", ".", "parent", ",", "'nodelist'", ")", ":", "raise", "TemplateSyntaxError", "(", "\"Cannot read {{{{ block.super }}}} for {{% block {0} %}}, \"", "\"the parent template doesn't have this block.\"", ".", "format", "(", "current_block", ".", "name", ")", ")", "results", "+=", "_scan_nodes", "(", "current_block", ".", "parent", ".", "nodelist", ",", "context", ",", "instance_types", ",", "current_block", ".", "parent", ")", "# ignore nested blocks which are already handled", "elif", "isinstance", "(", "node", ",", "BlockNode", ")", "and", "ignore_blocks", "and", "node", ".", "name", "in", "ignore_blocks", ":", "continue", "# if the node has the newly introduced 'child_nodelists' attribute, scan", "# those attributes for nodelists and recurse them", "elif", "hasattr", "(", "node", ",", "'child_nodelists'", ")", ":", "for", "nodelist_name", "in", "node", ".", "child_nodelists", ":", "if", "hasattr", "(", "node", ",", "nodelist_name", ")", ":", "subnodelist", "=", "getattr", "(", "node", ",", "nodelist_name", ")", "if", "isinstance", "(", "subnodelist", ",", "NodeList", ")", ":", "if", "isinstance", "(", "node", ",", "BlockNode", ")", ":", "current_block", "=", "node", "results", "+=", "_scan_nodes", "(", "subnodelist", ",", "context", ",", "instance_types", ",", "current_block", ")", "# else just scan the node for nodelist instance attributes", "else", ":", "for", "attr", "in", "dir", "(", "node", ")", ":", "obj", "=", "getattr", "(", "node", ",", "attr", ")", "if", "isinstance", "(", "obj", ",", "NodeList", ")", ":", "if", "isinstance", "(", "node", ",", "BlockNode", ")", ":", "current_block", "=", "node", "results", "+=", "_scan_nodes", "(", "obj", ",", "context", ",", "instance_types", ",", "current_block", ")", "return", "results" ]
Loop through all nodes of a single scope level. :type nodelist: django.template.base.NodeList :type current_block: BlockNode :param instance_types: The instance to look for
[ "Loop", "through", "all", "nodes", "of", "a", "single", "scope", "level", "." ]
912916dadf68e5fb6bd3dbaa8e5dcad69d3086d0
https://github.com/edoburu/django-template-analyzer/blob/912916dadf68e5fb6bd3dbaa8e5dcad69d3086d0/template_analyzer/djangoanalyzer.py#L125-L191
train
edoburu/django-template-analyzer
template_analyzer/djangoanalyzer.py
get_node_instances
def get_node_instances(nodelist, instances): """ Find the nodes of a given instance. In contract to the standard ``template.nodelist.get_nodes_by_type()`` method, this also looks into ``{% extends %}`` and ``{% include .. %}`` nodes to find all possible nodes of the given type. :param instances: A class Type, or tuple of types to find. :param nodelist: The Template object, or nodelist to scan. :returns: A list of Node objects which inherit from the list of given `instances` to find. :rtype: list """ context = _get_main_context(nodelist) # The Django 1.8 loader returns an adapter class; it wraps the original Template in a new object to be API compatible if TemplateAdapter is not None and isinstance(nodelist, TemplateAdapter): nodelist = nodelist.template return _scan_nodes(nodelist, context, instances)
python
def get_node_instances(nodelist, instances): """ Find the nodes of a given instance. In contract to the standard ``template.nodelist.get_nodes_by_type()`` method, this also looks into ``{% extends %}`` and ``{% include .. %}`` nodes to find all possible nodes of the given type. :param instances: A class Type, or tuple of types to find. :param nodelist: The Template object, or nodelist to scan. :returns: A list of Node objects which inherit from the list of given `instances` to find. :rtype: list """ context = _get_main_context(nodelist) # The Django 1.8 loader returns an adapter class; it wraps the original Template in a new object to be API compatible if TemplateAdapter is not None and isinstance(nodelist, TemplateAdapter): nodelist = nodelist.template return _scan_nodes(nodelist, context, instances)
[ "def", "get_node_instances", "(", "nodelist", ",", "instances", ")", ":", "context", "=", "_get_main_context", "(", "nodelist", ")", "# The Django 1.8 loader returns an adapter class; it wraps the original Template in a new object to be API compatible", "if", "TemplateAdapter", "is", "not", "None", "and", "isinstance", "(", "nodelist", ",", "TemplateAdapter", ")", ":", "nodelist", "=", "nodelist", ".", "template", "return", "_scan_nodes", "(", "nodelist", ",", "context", ",", "instances", ")" ]
Find the nodes of a given instance. In contract to the standard ``template.nodelist.get_nodes_by_type()`` method, this also looks into ``{% extends %}`` and ``{% include .. %}`` nodes to find all possible nodes of the given type. :param instances: A class Type, or tuple of types to find. :param nodelist: The Template object, or nodelist to scan. :returns: A list of Node objects which inherit from the list of given `instances` to find. :rtype: list
[ "Find", "the", "nodes", "of", "a", "given", "instance", "." ]
912916dadf68e5fb6bd3dbaa8e5dcad69d3086d0
https://github.com/edoburu/django-template-analyzer/blob/912916dadf68e5fb6bd3dbaa8e5dcad69d3086d0/template_analyzer/djangoanalyzer.py#L224-L243
train
lreis2415/PyGeoC
pygeoc/utils.py
get_config_file
def get_config_file(): # type: () -> AnyStr """Get model configuration file name from argv""" parser = argparse.ArgumentParser(description="Read configuration file.") parser.add_argument('-ini', help="Full path of configuration file") args = parser.parse_args() ini_file = args.ini if not FileClass.is_file_exists(ini_file): print("Usage: -ini <full path to the configuration file.>") exit(-1) return ini_file
python
def get_config_file(): # type: () -> AnyStr """Get model configuration file name from argv""" parser = argparse.ArgumentParser(description="Read configuration file.") parser.add_argument('-ini', help="Full path of configuration file") args = parser.parse_args() ini_file = args.ini if not FileClass.is_file_exists(ini_file): print("Usage: -ini <full path to the configuration file.>") exit(-1) return ini_file
[ "def", "get_config_file", "(", ")", ":", "# type: () -> AnyStr", "parser", "=", "argparse", ".", "ArgumentParser", "(", "description", "=", "\"Read configuration file.\"", ")", "parser", ".", "add_argument", "(", "'-ini'", ",", "help", "=", "\"Full path of configuration file\"", ")", "args", "=", "parser", ".", "parse_args", "(", ")", "ini_file", "=", "args", ".", "ini", "if", "not", "FileClass", ".", "is_file_exists", "(", "ini_file", ")", ":", "print", "(", "\"Usage: -ini <full path to the configuration file.>\"", ")", "exit", "(", "-", "1", ")", "return", "ini_file" ]
Get model configuration file name from argv
[ "Get", "model", "configuration", "file", "name", "from", "argv" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L1013-L1023
train
lreis2415/PyGeoC
pygeoc/utils.py
MathClass.isnumerical
def isnumerical(x): # type: (...) -> bool """Check the input x is numerical or not. Examples: >>> MathClass.isnumerical('78') True >>> MathClass.isnumerical('1.e-5') True >>> MathClass.isnumerical(None) False >>> MathClass.isnumerical('a1.2') False >>> MathClass.isnumerical(['1.2']) False >>> MathClass.isnumerical(numpy.float64(1.2)) True """ try: xx = float(x) except TypeError: return False except ValueError: return False except Exception: return False else: return True
python
def isnumerical(x): # type: (...) -> bool """Check the input x is numerical or not. Examples: >>> MathClass.isnumerical('78') True >>> MathClass.isnumerical('1.e-5') True >>> MathClass.isnumerical(None) False >>> MathClass.isnumerical('a1.2') False >>> MathClass.isnumerical(['1.2']) False >>> MathClass.isnumerical(numpy.float64(1.2)) True """ try: xx = float(x) except TypeError: return False except ValueError: return False except Exception: return False else: return True
[ "def", "isnumerical", "(", "x", ")", ":", "# type: (...) -> bool", "try", ":", "xx", "=", "float", "(", "x", ")", "except", "TypeError", ":", "return", "False", "except", "ValueError", ":", "return", "False", "except", "Exception", ":", "return", "False", "else", ":", "return", "True" ]
Check the input x is numerical or not. Examples: >>> MathClass.isnumerical('78') True >>> MathClass.isnumerical('1.e-5') True >>> MathClass.isnumerical(None) False >>> MathClass.isnumerical('a1.2') False >>> MathClass.isnumerical(['1.2']) False >>> MathClass.isnumerical(numpy.float64(1.2)) True
[ "Check", "the", "input", "x", "is", "numerical", "or", "not", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L111-L139
train
lreis2415/PyGeoC
pygeoc/utils.py
MathClass.rsquare
def rsquare(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate Coefficient of determination. Same as the square of the Pearson correlation coefficient (r), and, the same as the built-in Excel function RSQ(). Programmed according to equation (1) in Legates, D.R. and G.J. McCabe, 1999. Evaluating the use of "goodness of fit" measures in hydrologic and hydroclimatic model variation. Water Resources Research 35:233-241. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rsquare(obs, sim) # doctest: +ELLIPSIS 0.7528851650345053... Returns: R-square value, or raise exception """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be " "the same for R-square calculation!") if not isinstance(obsvalues, numpy.ndarray): obsvalues = numpy.array(obsvalues) if not isinstance(simvalues, numpy.ndarray): simvalues = numpy.array(simvalues) obs_avg = numpy.mean(obsvalues) pred_avg = numpy.mean(simvalues) obs_minus_avg_sq = numpy.sum((obsvalues - obs_avg) ** 2) pred_minus_avg_sq = numpy.sum((simvalues - pred_avg) ** 2) obs_pred_minus_avgs = numpy.sum((obsvalues - obs_avg) * (simvalues - pred_avg)) # Calculate R-square yy = obs_minus_avg_sq ** 0.5 * pred_minus_avg_sq ** 0.5 if MathClass.floatequal(yy, 0.): return 1. return (obs_pred_minus_avgs / yy) ** 2.
python
def rsquare(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate Coefficient of determination. Same as the square of the Pearson correlation coefficient (r), and, the same as the built-in Excel function RSQ(). Programmed according to equation (1) in Legates, D.R. and G.J. McCabe, 1999. Evaluating the use of "goodness of fit" measures in hydrologic and hydroclimatic model variation. Water Resources Research 35:233-241. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rsquare(obs, sim) # doctest: +ELLIPSIS 0.7528851650345053... Returns: R-square value, or raise exception """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be " "the same for R-square calculation!") if not isinstance(obsvalues, numpy.ndarray): obsvalues = numpy.array(obsvalues) if not isinstance(simvalues, numpy.ndarray): simvalues = numpy.array(simvalues) obs_avg = numpy.mean(obsvalues) pred_avg = numpy.mean(simvalues) obs_minus_avg_sq = numpy.sum((obsvalues - obs_avg) ** 2) pred_minus_avg_sq = numpy.sum((simvalues - pred_avg) ** 2) obs_pred_minus_avgs = numpy.sum((obsvalues - obs_avg) * (simvalues - pred_avg)) # Calculate R-square yy = obs_minus_avg_sq ** 0.5 * pred_minus_avg_sq ** 0.5 if MathClass.floatequal(yy, 0.): return 1. return (obs_pred_minus_avgs / yy) ** 2.
[ "def", "rsquare", "(", "obsvalues", ",", "# type: Union[numpy.ndarray, List[Union[float, int]]]", "simvalues", "# type: Union[numpy.ndarray, List[Union[float, int]]]", ")", ":", "# type: (...) -> Union[float, numpy.ScalarType]", "if", "len", "(", "obsvalues", ")", "!=", "len", "(", "simvalues", ")", ":", "raise", "ValueError", "(", "\"The size of observed and simulated values must be \"", "\"the same for R-square calculation!\"", ")", "if", "not", "isinstance", "(", "obsvalues", ",", "numpy", ".", "ndarray", ")", ":", "obsvalues", "=", "numpy", ".", "array", "(", "obsvalues", ")", "if", "not", "isinstance", "(", "simvalues", ",", "numpy", ".", "ndarray", ")", ":", "simvalues", "=", "numpy", ".", "array", "(", "simvalues", ")", "obs_avg", "=", "numpy", ".", "mean", "(", "obsvalues", ")", "pred_avg", "=", "numpy", ".", "mean", "(", "simvalues", ")", "obs_minus_avg_sq", "=", "numpy", ".", "sum", "(", "(", "obsvalues", "-", "obs_avg", ")", "**", "2", ")", "pred_minus_avg_sq", "=", "numpy", ".", "sum", "(", "(", "simvalues", "-", "pred_avg", ")", "**", "2", ")", "obs_pred_minus_avgs", "=", "numpy", ".", "sum", "(", "(", "obsvalues", "-", "obs_avg", ")", "*", "(", "simvalues", "-", "pred_avg", ")", ")", "# Calculate R-square", "yy", "=", "obs_minus_avg_sq", "**", "0.5", "*", "pred_minus_avg_sq", "**", "0.5", "if", "MathClass", ".", "floatequal", "(", "yy", ",", "0.", ")", ":", "return", "1.", "return", "(", "obs_pred_minus_avgs", "/", "yy", ")", "**", "2." ]
Calculate Coefficient of determination. Same as the square of the Pearson correlation coefficient (r), and, the same as the built-in Excel function RSQ(). Programmed according to equation (1) in Legates, D.R. and G.J. McCabe, 1999. Evaluating the use of "goodness of fit" measures in hydrologic and hydroclimatic model variation. Water Resources Research 35:233-241. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rsquare(obs, sim) # doctest: +ELLIPSIS 0.7528851650345053... Returns: R-square value, or raise exception
[ "Calculate", "Coefficient", "of", "determination", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L240-L284
train
lreis2415/PyGeoC
pygeoc/utils.py
MathClass.rmse
def rmse(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate RMSE. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rmse(obs, sim) # doctest: +ELLIPSIS 5.590926715533082... Returns: RMSE value """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be " "the same for R-square calculation!") if not isinstance(obsvalues, numpy.ndarray): obsvalues = numpy.array(obsvalues) if not isinstance(simvalues, numpy.ndarray): simvalues = numpy.array(simvalues) return numpy.sqrt(numpy.mean((obsvalues - simvalues) ** 2.))
python
def rmse(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate RMSE. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rmse(obs, sim) # doctest: +ELLIPSIS 5.590926715533082... Returns: RMSE value """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be " "the same for R-square calculation!") if not isinstance(obsvalues, numpy.ndarray): obsvalues = numpy.array(obsvalues) if not isinstance(simvalues, numpy.ndarray): simvalues = numpy.array(simvalues) return numpy.sqrt(numpy.mean((obsvalues - simvalues) ** 2.))
[ "def", "rmse", "(", "obsvalues", ",", "# type: Union[numpy.ndarray, List[Union[float, int]]]", "simvalues", "# type: Union[numpy.ndarray, List[Union[float, int]]]", ")", ":", "# type: (...) -> Union[float, numpy.ScalarType]", "if", "len", "(", "obsvalues", ")", "!=", "len", "(", "simvalues", ")", ":", "raise", "ValueError", "(", "\"The size of observed and simulated values must be \"", "\"the same for R-square calculation!\"", ")", "if", "not", "isinstance", "(", "obsvalues", ",", "numpy", ".", "ndarray", ")", ":", "obsvalues", "=", "numpy", ".", "array", "(", "obsvalues", ")", "if", "not", "isinstance", "(", "simvalues", ",", "numpy", ".", "ndarray", ")", ":", "simvalues", "=", "numpy", ".", "array", "(", "simvalues", ")", "return", "numpy", ".", "sqrt", "(", "numpy", ".", "mean", "(", "(", "obsvalues", "-", "simvalues", ")", "**", "2.", ")", ")" ]
Calculate RMSE. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.rmse(obs, sim) # doctest: +ELLIPSIS 5.590926715533082... Returns: RMSE value
[ "Calculate", "RMSE", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L287-L315
train
lreis2415/PyGeoC
pygeoc/utils.py
MathClass.pbias
def pbias(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate PBIAS, or percent model bias. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.pbias(obs, sim) # doctest: +ELLIPSIS 35.46099290780142... Returns: PBIAS value (percentage), or raise exception """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be" " the same for PBIAS calculation!") return sum(map(lambda x, y: (x - y) * 100, obsvalues, simvalues)) / sum(obsvalues)
python
def pbias(obsvalues, # type: Union[numpy.ndarray, List[Union[float, int]]] simvalues # type: Union[numpy.ndarray, List[Union[float, int]]] ): # type: (...) -> Union[float, numpy.ScalarType] """Calculate PBIAS, or percent model bias. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.pbias(obs, sim) # doctest: +ELLIPSIS 35.46099290780142... Returns: PBIAS value (percentage), or raise exception """ if len(obsvalues) != len(simvalues): raise ValueError("The size of observed and simulated values must be" " the same for PBIAS calculation!") return sum(map(lambda x, y: (x - y) * 100, obsvalues, simvalues)) / sum(obsvalues)
[ "def", "pbias", "(", "obsvalues", ",", "# type: Union[numpy.ndarray, List[Union[float, int]]]", "simvalues", "# type: Union[numpy.ndarray, List[Union[float, int]]]", ")", ":", "# type: (...) -> Union[float, numpy.ScalarType]", "if", "len", "(", "obsvalues", ")", "!=", "len", "(", "simvalues", ")", ":", "raise", "ValueError", "(", "\"The size of observed and simulated values must be\"", "\" the same for PBIAS calculation!\"", ")", "return", "sum", "(", "map", "(", "lambda", "x", ",", "y", ":", "(", "x", "-", "y", ")", "*", "100", ",", "obsvalues", ",", "simvalues", ")", ")", "/", "sum", "(", "obsvalues", ")" ]
Calculate PBIAS, or percent model bias. Args: obsvalues: observe values array simvalues: simulate values array Examples: >>> obs = [2.92, 2.75, 2.01, 1.09, 2.87, 1.43, 1.96,\ 4.00, 2.24, 29.28, 5.88, 0.86, 13.21] >>> sim = [2.90, 2.87, 2.85, 2.83, 3.04, 2.81, 2.85,\ 2.78, 2.76, 13.40, 2.70, 2.09, 1.62] >>> MathClass.pbias(obs, sim) # doctest: +ELLIPSIS 35.46099290780142... Returns: PBIAS value (percentage), or raise exception
[ "Calculate", "PBIAS", "or", "percent", "model", "bias", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L318-L342
train
lreis2415/PyGeoC
pygeoc/utils.py
StringClass.convert_str2num
def convert_str2num(unicode_str # type: Union[AnyStr, int, float, List[Union[AnyStr, float, int]], Tuple[Union[AnyStr, float, int]]] ): # type: (...) -> Union[AnyStr, int, float, List[Union[AnyStr, float, int]], Tuple[Union[AnyStr, float, int]]] """Convert string to string, integer, or float. Support tuple or list. Examples: >>> StringClass.convert_str2num('1.23') 1.23 >>> StringClass.convert_str2num(u'1.23') 1.23 >>> StringClass.convert_str2num(u'21.') 21 >>> StringClass.convert_str2num('abc123') 'abc123' >>> StringClass.convert_str2num((123, u'2.3', 3., 'abc', u'edf')) (123, 2.3, 3, 'abc', 'edf') >>> StringClass.convert_str2num([123, u'2.3', 3., 'abc', u'edf']) [123, 2.3, 3, 'abc', 'edf'] """ if MathClass.isnumerical(unicode_str): unicode_str = float(unicode_str) if unicode_str % 1. == 0.: unicode_str = int(unicode_str) return unicode_str elif is_string(unicode_str): return str(unicode_str) elif isinstance(unicode_str, tuple): return tuple(StringClass.convert_str2num(v) for v in unicode_str) elif isinstance(unicode_str, list): return list(StringClass.convert_str2num(v) for v in unicode_str) else: return unicode_str
python
def convert_str2num(unicode_str # type: Union[AnyStr, int, float, List[Union[AnyStr, float, int]], Tuple[Union[AnyStr, float, int]]] ): # type: (...) -> Union[AnyStr, int, float, List[Union[AnyStr, float, int]], Tuple[Union[AnyStr, float, int]]] """Convert string to string, integer, or float. Support tuple or list. Examples: >>> StringClass.convert_str2num('1.23') 1.23 >>> StringClass.convert_str2num(u'1.23') 1.23 >>> StringClass.convert_str2num(u'21.') 21 >>> StringClass.convert_str2num('abc123') 'abc123' >>> StringClass.convert_str2num((123, u'2.3', 3., 'abc', u'edf')) (123, 2.3, 3, 'abc', 'edf') >>> StringClass.convert_str2num([123, u'2.3', 3., 'abc', u'edf']) [123, 2.3, 3, 'abc', 'edf'] """ if MathClass.isnumerical(unicode_str): unicode_str = float(unicode_str) if unicode_str % 1. == 0.: unicode_str = int(unicode_str) return unicode_str elif is_string(unicode_str): return str(unicode_str) elif isinstance(unicode_str, tuple): return tuple(StringClass.convert_str2num(v) for v in unicode_str) elif isinstance(unicode_str, list): return list(StringClass.convert_str2num(v) for v in unicode_str) else: return unicode_str
[ "def", "convert_str2num", "(", "unicode_str", "# type: Union[AnyStr, int, float, List[Union[AnyStr, float, int]], Tuple[Union[AnyStr, float, int]]]", ")", ":", "# type: (...) -> Union[AnyStr, int, float, List[Union[AnyStr, float, int]], Tuple[Union[AnyStr, float, int]]]", "if", "MathClass", ".", "isnumerical", "(", "unicode_str", ")", ":", "unicode_str", "=", "float", "(", "unicode_str", ")", "if", "unicode_str", "%", "1.", "==", "0.", ":", "unicode_str", "=", "int", "(", "unicode_str", ")", "return", "unicode_str", "elif", "is_string", "(", "unicode_str", ")", ":", "return", "str", "(", "unicode_str", ")", "elif", "isinstance", "(", "unicode_str", ",", "tuple", ")", ":", "return", "tuple", "(", "StringClass", ".", "convert_str2num", "(", "v", ")", "for", "v", "in", "unicode_str", ")", "elif", "isinstance", "(", "unicode_str", ",", "list", ")", ":", "return", "list", "(", "StringClass", ".", "convert_str2num", "(", "v", ")", "for", "v", "in", "unicode_str", ")", "else", ":", "return", "unicode_str" ]
Convert string to string, integer, or float. Support tuple or list. Examples: >>> StringClass.convert_str2num('1.23') 1.23 >>> StringClass.convert_str2num(u'1.23') 1.23 >>> StringClass.convert_str2num(u'21.') 21 >>> StringClass.convert_str2num('abc123') 'abc123' >>> StringClass.convert_str2num((123, u'2.3', 3., 'abc', u'edf')) (123, 2.3, 3, 'abc', 'edf') >>> StringClass.convert_str2num([123, u'2.3', 3., 'abc', u'edf']) [123, 2.3, 3, 'abc', 'edf']
[ "Convert", "string", "to", "string", "integer", "or", "float", ".", "Support", "tuple", "or", "list", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L387-L418
train
lreis2415/PyGeoC
pygeoc/utils.py
StringClass.string_in_list
def string_in_list(tmp_str, strlist): # type: (AnyStr, List[AnyStr]) -> bool """Is tmp_str in strlist, case insensitive.""" new_str_list = strlist[:] for i, str_in_list in enumerate(new_str_list): new_str_list[i] = str_in_list.lower() return tmp_str.lower() in new_str_list
python
def string_in_list(tmp_str, strlist): # type: (AnyStr, List[AnyStr]) -> bool """Is tmp_str in strlist, case insensitive.""" new_str_list = strlist[:] for i, str_in_list in enumerate(new_str_list): new_str_list[i] = str_in_list.lower() return tmp_str.lower() in new_str_list
[ "def", "string_in_list", "(", "tmp_str", ",", "strlist", ")", ":", "# type: (AnyStr, List[AnyStr]) -> bool", "new_str_list", "=", "strlist", "[", ":", "]", "for", "i", ",", "str_in_list", "in", "enumerate", "(", "new_str_list", ")", ":", "new_str_list", "[", "i", "]", "=", "str_in_list", ".", "lower", "(", ")", "return", "tmp_str", ".", "lower", "(", ")", "in", "new_str_list" ]
Is tmp_str in strlist, case insensitive.
[ "Is", "tmp_str", "in", "strlist", "case", "insensitive", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L475-L481
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.is_file_exists
def is_file_exists(filename): # type: (AnyStr) -> bool """Check the existence of file path.""" if filename is None or not os.path.exists(filename) or not os.path.isfile(filename): return False else: return True
python
def is_file_exists(filename): # type: (AnyStr) -> bool """Check the existence of file path.""" if filename is None or not os.path.exists(filename) or not os.path.isfile(filename): return False else: return True
[ "def", "is_file_exists", "(", "filename", ")", ":", "# type: (AnyStr) -> bool", "if", "filename", "is", "None", "or", "not", "os", ".", "path", ".", "exists", "(", "filename", ")", "or", "not", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "False", "else", ":", "return", "True" ]
Check the existence of file path.
[ "Check", "the", "existence", "of", "file", "path", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L588-L594
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.is_dir_exists
def is_dir_exists(dirpath): # type: (AnyStr) -> bool """Check the existence of folder path.""" if dirpath is None or not os.path.exists(dirpath) or not os.path.isdir(dirpath): return False else: return True
python
def is_dir_exists(dirpath): # type: (AnyStr) -> bool """Check the existence of folder path.""" if dirpath is None or not os.path.exists(dirpath) or not os.path.isdir(dirpath): return False else: return True
[ "def", "is_dir_exists", "(", "dirpath", ")", ":", "# type: (AnyStr) -> bool", "if", "dirpath", "is", "None", "or", "not", "os", ".", "path", ".", "exists", "(", "dirpath", ")", "or", "not", "os", ".", "path", ".", "isdir", "(", "dirpath", ")", ":", "return", "False", "else", ":", "return", "True" ]
Check the existence of folder path.
[ "Check", "the", "existence", "of", "folder", "path", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L597-L603
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.copy_files
def copy_files(filename, dstfilename): # type: (AnyStr, AnyStr) -> None """Copy files with the same name and different suffixes, such as ESRI Shapefile.""" FileClass.remove_files(dstfilename) dst_prefix = os.path.splitext(dstfilename)[0] pattern = os.path.splitext(filename)[0] + '.*' for f in glob.iglob(pattern): ext = os.path.splitext(f)[1] dst = dst_prefix + ext copy(f, dst)
python
def copy_files(filename, dstfilename): # type: (AnyStr, AnyStr) -> None """Copy files with the same name and different suffixes, such as ESRI Shapefile.""" FileClass.remove_files(dstfilename) dst_prefix = os.path.splitext(dstfilename)[0] pattern = os.path.splitext(filename)[0] + '.*' for f in glob.iglob(pattern): ext = os.path.splitext(f)[1] dst = dst_prefix + ext copy(f, dst)
[ "def", "copy_files", "(", "filename", ",", "dstfilename", ")", ":", "# type: (AnyStr, AnyStr) -> None", "FileClass", ".", "remove_files", "(", "dstfilename", ")", "dst_prefix", "=", "os", ".", "path", ".", "splitext", "(", "dstfilename", ")", "[", "0", "]", "pattern", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "+", "'.*'", "for", "f", "in", "glob", ".", "iglob", "(", "pattern", ")", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "f", ")", "[", "1", "]", "dst", "=", "dst_prefix", "+", "ext", "copy", "(", "f", ",", "dst", ")" ]
Copy files with the same name and different suffixes, such as ESRI Shapefile.
[ "Copy", "files", "with", "the", "same", "name", "and", "different", "suffixes", "such", "as", "ESRI", "Shapefile", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L613-L622
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.remove_files
def remove_files(filename): # type: (AnyStr) -> None """ Delete all files with same root as fileName, i.e. regardless of suffix, such as ESRI shapefile """ pattern = os.path.splitext(filename)[0] + '.*' for f in glob.iglob(pattern): os.remove(f)
python
def remove_files(filename): # type: (AnyStr) -> None """ Delete all files with same root as fileName, i.e. regardless of suffix, such as ESRI shapefile """ pattern = os.path.splitext(filename)[0] + '.*' for f in glob.iglob(pattern): os.remove(f)
[ "def", "remove_files", "(", "filename", ")", ":", "# type: (AnyStr) -> None", "pattern", "=", "os", ".", "path", ".", "splitext", "(", "filename", ")", "[", "0", "]", "+", "'.*'", "for", "f", "in", "glob", ".", "iglob", "(", "pattern", ")", ":", "os", ".", "remove", "(", "f", ")" ]
Delete all files with same root as fileName, i.e. regardless of suffix, such as ESRI shapefile
[ "Delete", "all", "files", "with", "same", "root", "as", "fileName", "i", ".", "e", ".", "regardless", "of", "suffix", "such", "as", "ESRI", "shapefile" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L625-L633
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.is_up_to_date
def is_up_to_date(outfile, basedatetime): # type: (AnyStr, datetime) -> bool """Return true if outfile exists and is no older than base datetime.""" if os.path.exists(outfile): if os.path.getmtime(outfile) >= basedatetime: return True return False
python
def is_up_to_date(outfile, basedatetime): # type: (AnyStr, datetime) -> bool """Return true if outfile exists and is no older than base datetime.""" if os.path.exists(outfile): if os.path.getmtime(outfile) >= basedatetime: return True return False
[ "def", "is_up_to_date", "(", "outfile", ",", "basedatetime", ")", ":", "# type: (AnyStr, datetime) -> bool", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "if", "os", ".", "path", ".", "getmtime", "(", "outfile", ")", ">=", "basedatetime", ":", "return", "True", "return", "False" ]
Return true if outfile exists and is no older than base datetime.
[ "Return", "true", "if", "outfile", "exists", "and", "is", "no", "older", "than", "base", "datetime", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L636-L642
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.get_executable_fullpath
def get_executable_fullpath(name, dirname=None): # type: (AnyStr, Optional[AnyStr]) -> Optional[AnyStr] """get the full path of a given executable name""" if name is None: return None if is_string(name): name = str(name) else: raise RuntimeError('The input function name or path must be string!') if dirname is not None: # check the given path first dirname = os.path.abspath(dirname) fpth = dirname + os.sep + name if os.path.isfile(fpth): return fpth # If dirname is not specified, check the env then. if sysstr == 'Windows': findout = UtilClass.run_command('where %s' % name) else: findout = UtilClass.run_command('which %s' % name) if not findout or len(findout) == 0: print("%s is not included in the env path" % name) exit(-1) first_path = findout[0].split('\n')[0] if os.path.exists(first_path): return first_path return None
python
def get_executable_fullpath(name, dirname=None): # type: (AnyStr, Optional[AnyStr]) -> Optional[AnyStr] """get the full path of a given executable name""" if name is None: return None if is_string(name): name = str(name) else: raise RuntimeError('The input function name or path must be string!') if dirname is not None: # check the given path first dirname = os.path.abspath(dirname) fpth = dirname + os.sep + name if os.path.isfile(fpth): return fpth # If dirname is not specified, check the env then. if sysstr == 'Windows': findout = UtilClass.run_command('where %s' % name) else: findout = UtilClass.run_command('which %s' % name) if not findout or len(findout) == 0: print("%s is not included in the env path" % name) exit(-1) first_path = findout[0].split('\n')[0] if os.path.exists(first_path): return first_path return None
[ "def", "get_executable_fullpath", "(", "name", ",", "dirname", "=", "None", ")", ":", "# type: (AnyStr, Optional[AnyStr]) -> Optional[AnyStr]", "if", "name", "is", "None", ":", "return", "None", "if", "is_string", "(", "name", ")", ":", "name", "=", "str", "(", "name", ")", "else", ":", "raise", "RuntimeError", "(", "'The input function name or path must be string!'", ")", "if", "dirname", "is", "not", "None", ":", "# check the given path first", "dirname", "=", "os", ".", "path", ".", "abspath", "(", "dirname", ")", "fpth", "=", "dirname", "+", "os", ".", "sep", "+", "name", "if", "os", ".", "path", ".", "isfile", "(", "fpth", ")", ":", "return", "fpth", "# If dirname is not specified, check the env then.", "if", "sysstr", "==", "'Windows'", ":", "findout", "=", "UtilClass", ".", "run_command", "(", "'where %s'", "%", "name", ")", "else", ":", "findout", "=", "UtilClass", ".", "run_command", "(", "'which %s'", "%", "name", ")", "if", "not", "findout", "or", "len", "(", "findout", ")", "==", "0", ":", "print", "(", "\"%s is not included in the env path\"", "%", "name", ")", "exit", "(", "-", "1", ")", "first_path", "=", "findout", "[", "0", "]", ".", "split", "(", "'\\n'", ")", "[", "0", "]", "if", "os", ".", "path", ".", "exists", "(", "first_path", ")", ":", "return", "first_path", "return", "None" ]
get the full path of a given executable name
[ "get", "the", "full", "path", "of", "a", "given", "executable", "name" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L645-L670
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.get_file_fullpath
def get_file_fullpath(name, dirname=None): # type: (AnyStr, Optional[AnyStr]) -> Optional[AnyStr] """Return full path if available.""" if name is None: return None if is_string(name): name = str(name) else: raise RuntimeError('The input function name or path must be string!') for sep in ['\\', '/', os.sep]: # Loop all possible separators if sep in name: # name is full path already name = os.path.abspath(name) return name if dirname is not None: dirname = os.path.abspath(dirname) name = dirname + os.sep + name return name
python
def get_file_fullpath(name, dirname=None): # type: (AnyStr, Optional[AnyStr]) -> Optional[AnyStr] """Return full path if available.""" if name is None: return None if is_string(name): name = str(name) else: raise RuntimeError('The input function name or path must be string!') for sep in ['\\', '/', os.sep]: # Loop all possible separators if sep in name: # name is full path already name = os.path.abspath(name) return name if dirname is not None: dirname = os.path.abspath(dirname) name = dirname + os.sep + name return name
[ "def", "get_file_fullpath", "(", "name", ",", "dirname", "=", "None", ")", ":", "# type: (AnyStr, Optional[AnyStr]) -> Optional[AnyStr]", "if", "name", "is", "None", ":", "return", "None", "if", "is_string", "(", "name", ")", ":", "name", "=", "str", "(", "name", ")", "else", ":", "raise", "RuntimeError", "(", "'The input function name or path must be string!'", ")", "for", "sep", "in", "[", "'\\\\'", ",", "'/'", ",", "os", ".", "sep", "]", ":", "# Loop all possible separators", "if", "sep", "in", "name", ":", "# name is full path already", "name", "=", "os", ".", "path", ".", "abspath", "(", "name", ")", "return", "name", "if", "dirname", "is", "not", "None", ":", "dirname", "=", "os", ".", "path", ".", "abspath", "(", "dirname", ")", "name", "=", "dirname", "+", "os", ".", "sep", "+", "name", "return", "name" ]
Return full path if available.
[ "Return", "full", "path", "if", "available", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L673-L689
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.get_filename_by_suffixes
def get_filename_by_suffixes(dir_src, suffixes): # type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]] """get file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes list, the suffix in suffixes can with or without '.' Returns: file names with the given suffixes as list """ list_files = os.listdir(dir_src) re_files = list() if is_string(suffixes): suffixes = [suffixes] if not isinstance(suffixes, list): return None for i, suf in enumerate(suffixes): if len(suf) >= 1 and suf[0] != '.': suffixes[i] = '.' + suf for f in list_files: name, ext = os.path.splitext(f) if StringClass.string_in_list(ext, suffixes): re_files.append(f) return re_files
python
def get_filename_by_suffixes(dir_src, suffixes): # type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]] """get file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes list, the suffix in suffixes can with or without '.' Returns: file names with the given suffixes as list """ list_files = os.listdir(dir_src) re_files = list() if is_string(suffixes): suffixes = [suffixes] if not isinstance(suffixes, list): return None for i, suf in enumerate(suffixes): if len(suf) >= 1 and suf[0] != '.': suffixes[i] = '.' + suf for f in list_files: name, ext = os.path.splitext(f) if StringClass.string_in_list(ext, suffixes): re_files.append(f) return re_files
[ "def", "get_filename_by_suffixes", "(", "dir_src", ",", "suffixes", ")", ":", "# type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]]", "list_files", "=", "os", ".", "listdir", "(", "dir_src", ")", "re_files", "=", "list", "(", ")", "if", "is_string", "(", "suffixes", ")", ":", "suffixes", "=", "[", "suffixes", "]", "if", "not", "isinstance", "(", "suffixes", ",", "list", ")", ":", "return", "None", "for", "i", ",", "suf", "in", "enumerate", "(", "suffixes", ")", ":", "if", "len", "(", "suf", ")", ">=", "1", "and", "suf", "[", "0", "]", "!=", "'.'", ":", "suffixes", "[", "i", "]", "=", "'.'", "+", "suf", "for", "f", "in", "list_files", ":", "name", ",", "ext", "=", "os", ".", "path", ".", "splitext", "(", "f", ")", "if", "StringClass", ".", "string_in_list", "(", "ext", ",", "suffixes", ")", ":", "re_files", ".", "append", "(", "f", ")", "return", "re_files" ]
get file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes list, the suffix in suffixes can with or without '.' Returns: file names with the given suffixes as list
[ "get", "file", "names", "with", "the", "given", "suffixes", "in", "the", "given", "directory" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L692-L716
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.get_full_filename_by_suffixes
def get_full_filename_by_suffixes(dir_src, suffixes): # type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]] """get full file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes Returns: full file names with the given suffixes as list """ file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes) if file_names is None: return None return list(dir_src + os.sep + name for name in file_names)
python
def get_full_filename_by_suffixes(dir_src, suffixes): # type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]] """get full file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes Returns: full file names with the given suffixes as list """ file_names = FileClass.get_filename_by_suffixes(dir_src, suffixes) if file_names is None: return None return list(dir_src + os.sep + name for name in file_names)
[ "def", "get_full_filename_by_suffixes", "(", "dir_src", ",", "suffixes", ")", ":", "# type: (AnyStr, Union[AnyStr, List[AnyStr]]) -> Optional[List[AnyStr]]", "file_names", "=", "FileClass", ".", "get_filename_by_suffixes", "(", "dir_src", ",", "suffixes", ")", "if", "file_names", "is", "None", ":", "return", "None", "return", "list", "(", "dir_src", "+", "os", ".", "sep", "+", "name", "for", "name", "in", "file_names", ")" ]
get full file names with the given suffixes in the given directory Args: dir_src: directory path suffixes: wanted suffixes Returns: full file names with the given suffixes as list
[ "get", "full", "file", "names", "with", "the", "given", "suffixes", "in", "the", "given", "directory" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L719-L733
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.get_core_name_without_suffix
def get_core_name_without_suffix(file_path): # type: (AnyStr) -> AnyStr """Return core file name without suffix. Examples: >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/1990.01.30/test.01.tif') 'test.01' >>> FileClass.get_core_name_without_suffix(r'C:\zhulj\igsnrr\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:\\zhulj\\igsnrr\\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:/zhulj/igsnrr/lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/dta/taudem/aread8') 'aread8' >>> FileClass.get_core_name_without_suffix('singlename') 'singlename' >>> FileClass.get_core_name_without_suffix('singlename.txt') 'singlename' """ if '\\' in file_path: file_path = file_path.replace('\\', '/') file_name = os.path.basename(file_path) core_names = file_name.split('.') if len(core_names) > 1: core_names = core_names[:-1] if isinstance(core_names, list): return str('.'.join(core_names)) else: return str(core_names)
python
def get_core_name_without_suffix(file_path): # type: (AnyStr) -> AnyStr """Return core file name without suffix. Examples: >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/1990.01.30/test.01.tif') 'test.01' >>> FileClass.get_core_name_without_suffix(r'C:\zhulj\igsnrr\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:\\zhulj\\igsnrr\\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:/zhulj/igsnrr/lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/dta/taudem/aread8') 'aread8' >>> FileClass.get_core_name_without_suffix('singlename') 'singlename' >>> FileClass.get_core_name_without_suffix('singlename.txt') 'singlename' """ if '\\' in file_path: file_path = file_path.replace('\\', '/') file_name = os.path.basename(file_path) core_names = file_name.split('.') if len(core_names) > 1: core_names = core_names[:-1] if isinstance(core_names, list): return str('.'.join(core_names)) else: return str(core_names)
[ "def", "get_core_name_without_suffix", "(", "file_path", ")", ":", "# type: (AnyStr) -> AnyStr", "if", "'\\\\'", "in", "file_path", ":", "file_path", "=", "file_path", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "file_name", "=", "os", ".", "path", ".", "basename", "(", "file_path", ")", "core_names", "=", "file_name", ".", "split", "(", "'.'", ")", "if", "len", "(", "core_names", ")", ">", "1", ":", "core_names", "=", "core_names", "[", ":", "-", "1", "]", "if", "isinstance", "(", "core_names", ",", "list", ")", ":", "return", "str", "(", "'.'", ".", "join", "(", "core_names", ")", ")", "else", ":", "return", "str", "(", "core_names", ")" ]
Return core file name without suffix. Examples: >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/1990.01.30/test.01.tif') 'test.01' >>> FileClass.get_core_name_without_suffix(r'C:\zhulj\igsnrr\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:\\zhulj\\igsnrr\\lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'C:/zhulj/igsnrr/lreis.txt') 'lreis' >>> FileClass.get_core_name_without_suffix(r'/home/zhulj/dta/taudem/aread8') 'aread8' >>> FileClass.get_core_name_without_suffix('singlename') 'singlename' >>> FileClass.get_core_name_without_suffix('singlename.txt') 'singlename'
[ "Return", "core", "file", "name", "without", "suffix", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L736-L765
train
lreis2415/PyGeoC
pygeoc/utils.py
FileClass.add_postfix
def add_postfix(file_path, postfix): # type: (AnyStr, AnyStr) -> AnyStr """Add postfix for a full file path. Examples: >>> FileClass.add_postfix('/home/zhulj/dem.tif', 'filled') '/home/zhulj/dem_filled.tif' >>> FileClass.add_postfix('dem.tif', 'filled') 'dem_filled.tif' >>> FileClass.add_postfix('dem', 'filled') 'dem_filled' """ cur_sep = '' for sep in ['\\', '/', os.sep]: if sep in file_path: cur_sep = sep break corename = FileClass.get_core_name_without_suffix(file_path) tmpspliter = os.path.basename(file_path).split('.') suffix = '' if len(tmpspliter) > 1: suffix = tmpspliter[-1] newname = os.path.dirname(file_path) + cur_sep + corename + '_' + postfix if suffix != '': newname += '.' + suffix return str(newname)
python
def add_postfix(file_path, postfix): # type: (AnyStr, AnyStr) -> AnyStr """Add postfix for a full file path. Examples: >>> FileClass.add_postfix('/home/zhulj/dem.tif', 'filled') '/home/zhulj/dem_filled.tif' >>> FileClass.add_postfix('dem.tif', 'filled') 'dem_filled.tif' >>> FileClass.add_postfix('dem', 'filled') 'dem_filled' """ cur_sep = '' for sep in ['\\', '/', os.sep]: if sep in file_path: cur_sep = sep break corename = FileClass.get_core_name_without_suffix(file_path) tmpspliter = os.path.basename(file_path).split('.') suffix = '' if len(tmpspliter) > 1: suffix = tmpspliter[-1] newname = os.path.dirname(file_path) + cur_sep + corename + '_' + postfix if suffix != '': newname += '.' + suffix return str(newname)
[ "def", "add_postfix", "(", "file_path", ",", "postfix", ")", ":", "# type: (AnyStr, AnyStr) -> AnyStr", "cur_sep", "=", "''", "for", "sep", "in", "[", "'\\\\'", ",", "'/'", ",", "os", ".", "sep", "]", ":", "if", "sep", "in", "file_path", ":", "cur_sep", "=", "sep", "break", "corename", "=", "FileClass", ".", "get_core_name_without_suffix", "(", "file_path", ")", "tmpspliter", "=", "os", ".", "path", ".", "basename", "(", "file_path", ")", ".", "split", "(", "'.'", ")", "suffix", "=", "''", "if", "len", "(", "tmpspliter", ")", ">", "1", ":", "suffix", "=", "tmpspliter", "[", "-", "1", "]", "newname", "=", "os", ".", "path", ".", "dirname", "(", "file_path", ")", "+", "cur_sep", "+", "corename", "+", "'_'", "+", "postfix", "if", "suffix", "!=", "''", ":", "newname", "+=", "'.'", "+", "suffix", "return", "str", "(", "newname", ")" ]
Add postfix for a full file path. Examples: >>> FileClass.add_postfix('/home/zhulj/dem.tif', 'filled') '/home/zhulj/dem_filled.tif' >>> FileClass.add_postfix('dem.tif', 'filled') 'dem_filled.tif' >>> FileClass.add_postfix('dem', 'filled') 'dem_filled'
[ "Add", "postfix", "for", "a", "full", "file", "path", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L768-L793
train
lreis2415/PyGeoC
pygeoc/utils.py
DateClass.day_of_year
def day_of_year(dt): # type: (int) -> int """Day index of year from 1 to 365 or 366""" sec = time.mktime(dt.timetuple()) t = time.localtime(sec) return t.tm_yday
python
def day_of_year(dt): # type: (int) -> int """Day index of year from 1 to 365 or 366""" sec = time.mktime(dt.timetuple()) t = time.localtime(sec) return t.tm_yday
[ "def", "day_of_year", "(", "dt", ")", ":", "# type: (int) -> int", "sec", "=", "time", ".", "mktime", "(", "dt", ".", "timetuple", "(", ")", ")", "t", "=", "time", ".", "localtime", "(", "sec", ")", "return", "t", ".", "tm_yday" ]
Day index of year from 1 to 365 or 366
[ "Day", "index", "of", "year", "from", "1", "to", "365", "or", "366" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L834-L839
train
lreis2415/PyGeoC
pygeoc/utils.py
UtilClass.run_command
def run_command(commands): # type: (Union[AnyStr, List[AnyStr]]) -> List[AnyStr] """Execute external command, and return the output lines list. In windows, refers to `handling-subprocess-crash-in-windows`_. Args: commands: string or list Returns: output lines .. _handling-subprocess-crash-in-windows: https://stackoverflow.com/questions/5069224/handling-subprocess-crash-in-windows """ # commands = StringClass.convert_unicode2str(commands) # print(commands) use_shell = False subprocess_flags = 0 startupinfo = None if sysstr == 'Windows': if isinstance(commands, list): commands = ' '.join(str(c) for c in commands) import ctypes SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX) subprocess_flags = 0x8000000 # win32con.CREATE_NO_WINDOW? # this startupinfo structure prevents a console window from popping up on Windows startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # not sure if node outputs on stderr or stdout so capture both else: # for Linux/Unix OS, commands is better to be a list. if is_string(commands): use_shell = True # https://docs.python.org/2/library/subprocess.html # Using shell=True can be a security hazard. elif isinstance(commands, list): # the executable path may be enclosed with quotes, if not windows, delete the quotes if commands[0][0] == commands[0][-1] == '"' or \ commands[0][0] == commands[0][-1] == "'": commands[0] = commands[0][1:-1] for idx, v in enumerate(commands): if isinstance(v, int) or isinstance(v, float): # Fix :TypeError: execv() arg 2 must contain only strings commands[idx] = repr(v) print(commands) process = subprocess.Popen(commands, shell=use_shell, stdout=subprocess.PIPE, stdin=open(os.devnull), stderr=subprocess.STDOUT, universal_newlines=True, startupinfo=startupinfo, creationflags=subprocess_flags) out, err = process.communicate() recode = process.returncode if out is None: return [''] if recode is not None and recode != 0: raise subprocess.CalledProcessError(-1, commands, "ERROR occurred when running subprocess!") if '\n' in out: return out.split('\n') return [out]
python
def run_command(commands): # type: (Union[AnyStr, List[AnyStr]]) -> List[AnyStr] """Execute external command, and return the output lines list. In windows, refers to `handling-subprocess-crash-in-windows`_. Args: commands: string or list Returns: output lines .. _handling-subprocess-crash-in-windows: https://stackoverflow.com/questions/5069224/handling-subprocess-crash-in-windows """ # commands = StringClass.convert_unicode2str(commands) # print(commands) use_shell = False subprocess_flags = 0 startupinfo = None if sysstr == 'Windows': if isinstance(commands, list): commands = ' '.join(str(c) for c in commands) import ctypes SEM_NOGPFAULTERRORBOX = 0x0002 # From MSDN ctypes.windll.kernel32.SetErrorMode(SEM_NOGPFAULTERRORBOX) subprocess_flags = 0x8000000 # win32con.CREATE_NO_WINDOW? # this startupinfo structure prevents a console window from popping up on Windows startupinfo = subprocess.STARTUPINFO() startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW # not sure if node outputs on stderr or stdout so capture both else: # for Linux/Unix OS, commands is better to be a list. if is_string(commands): use_shell = True # https://docs.python.org/2/library/subprocess.html # Using shell=True can be a security hazard. elif isinstance(commands, list): # the executable path may be enclosed with quotes, if not windows, delete the quotes if commands[0][0] == commands[0][-1] == '"' or \ commands[0][0] == commands[0][-1] == "'": commands[0] = commands[0][1:-1] for idx, v in enumerate(commands): if isinstance(v, int) or isinstance(v, float): # Fix :TypeError: execv() arg 2 must contain only strings commands[idx] = repr(v) print(commands) process = subprocess.Popen(commands, shell=use_shell, stdout=subprocess.PIPE, stdin=open(os.devnull), stderr=subprocess.STDOUT, universal_newlines=True, startupinfo=startupinfo, creationflags=subprocess_flags) out, err = process.communicate() recode = process.returncode if out is None: return [''] if recode is not None and recode != 0: raise subprocess.CalledProcessError(-1, commands, "ERROR occurred when running subprocess!") if '\n' in out: return out.split('\n') return [out]
[ "def", "run_command", "(", "commands", ")", ":", "# type: (Union[AnyStr, List[AnyStr]]) -> List[AnyStr]", "# commands = StringClass.convert_unicode2str(commands)", "# print(commands)", "use_shell", "=", "False", "subprocess_flags", "=", "0", "startupinfo", "=", "None", "if", "sysstr", "==", "'Windows'", ":", "if", "isinstance", "(", "commands", ",", "list", ")", ":", "commands", "=", "' '", ".", "join", "(", "str", "(", "c", ")", "for", "c", "in", "commands", ")", "import", "ctypes", "SEM_NOGPFAULTERRORBOX", "=", "0x0002", "# From MSDN", "ctypes", ".", "windll", ".", "kernel32", ".", "SetErrorMode", "(", "SEM_NOGPFAULTERRORBOX", ")", "subprocess_flags", "=", "0x8000000", "# win32con.CREATE_NO_WINDOW?", "# this startupinfo structure prevents a console window from popping up on Windows", "startupinfo", "=", "subprocess", ".", "STARTUPINFO", "(", ")", "startupinfo", ".", "dwFlags", "|=", "subprocess", ".", "STARTF_USESHOWWINDOW", "# not sure if node outputs on stderr or stdout so capture both", "else", ":", "# for Linux/Unix OS, commands is better to be a list.", "if", "is_string", "(", "commands", ")", ":", "use_shell", "=", "True", "# https://docs.python.org/2/library/subprocess.html", "# Using shell=True can be a security hazard.", "elif", "isinstance", "(", "commands", ",", "list", ")", ":", "# the executable path may be enclosed with quotes, if not windows, delete the quotes", "if", "commands", "[", "0", "]", "[", "0", "]", "==", "commands", "[", "0", "]", "[", "-", "1", "]", "==", "'\"'", "or", "commands", "[", "0", "]", "[", "0", "]", "==", "commands", "[", "0", "]", "[", "-", "1", "]", "==", "\"'\"", ":", "commands", "[", "0", "]", "=", "commands", "[", "0", "]", "[", "1", ":", "-", "1", "]", "for", "idx", ",", "v", "in", "enumerate", "(", "commands", ")", ":", "if", "isinstance", "(", "v", ",", "int", ")", "or", "isinstance", "(", "v", ",", "float", ")", ":", "# Fix :TypeError: execv() arg 2 must contain only strings", "commands", "[", "idx", "]", "=", "repr", "(", "v", ")", "print", "(", "commands", ")", "process", "=", "subprocess", ".", "Popen", "(", "commands", ",", "shell", "=", "use_shell", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stdin", "=", "open", "(", "os", ".", "devnull", ")", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "universal_newlines", "=", "True", ",", "startupinfo", "=", "startupinfo", ",", "creationflags", "=", "subprocess_flags", ")", "out", ",", "err", "=", "process", ".", "communicate", "(", ")", "recode", "=", "process", ".", "returncode", "if", "out", "is", "None", ":", "return", "[", "''", "]", "if", "recode", "is", "not", "None", "and", "recode", "!=", "0", ":", "raise", "subprocess", ".", "CalledProcessError", "(", "-", "1", ",", "commands", ",", "\"ERROR occurred when running subprocess!\"", ")", "if", "'\\n'", "in", "out", ":", "return", "out", ".", "split", "(", "'\\n'", ")", "return", "[", "out", "]" ]
Execute external command, and return the output lines list. In windows, refers to `handling-subprocess-crash-in-windows`_. Args: commands: string or list Returns: output lines .. _handling-subprocess-crash-in-windows: https://stackoverflow.com/questions/5069224/handling-subprocess-crash-in-windows
[ "Execute", "external", "command", "and", "return", "the", "output", "lines", "list", ".", "In", "windows", "refers", "to", "handling", "-", "subprocess", "-", "crash", "-", "in", "-", "windows", "_", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L850-L912
train
lreis2415/PyGeoC
pygeoc/utils.py
UtilClass.current_path
def current_path(local_function): """Get current path, refers to `how-do-i-get-the-path-of-the-current-executed-file-in-python`_ Examples: .. code-block:: Python from pygeoc.utils import UtilClass curpath = UtilClass.current_path(lambda: 0) .. _how-do-i-get-the-path-of-the-current-executed-file-in-python: https://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python/18489147#18489147 """ from inspect import getsourcefile fpath = getsourcefile(local_function) if fpath is None: return None return os.path.dirname(os.path.abspath(fpath))
python
def current_path(local_function): """Get current path, refers to `how-do-i-get-the-path-of-the-current-executed-file-in-python`_ Examples: .. code-block:: Python from pygeoc.utils import UtilClass curpath = UtilClass.current_path(lambda: 0) .. _how-do-i-get-the-path-of-the-current-executed-file-in-python: https://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python/18489147#18489147 """ from inspect import getsourcefile fpath = getsourcefile(local_function) if fpath is None: return None return os.path.dirname(os.path.abspath(fpath))
[ "def", "current_path", "(", "local_function", ")", ":", "from", "inspect", "import", "getsourcefile", "fpath", "=", "getsourcefile", "(", "local_function", ")", "if", "fpath", "is", "None", ":", "return", "None", "return", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "abspath", "(", "fpath", ")", ")" ]
Get current path, refers to `how-do-i-get-the-path-of-the-current-executed-file-in-python`_ Examples: .. code-block:: Python from pygeoc.utils import UtilClass curpath = UtilClass.current_path(lambda: 0) .. _how-do-i-get-the-path-of-the-current-executed-file-in-python: https://stackoverflow.com/questions/2632199/how-do-i-get-the-path-of-the-current-executed-file-in-python/18489147#18489147
[ "Get", "current", "path", "refers", "to", "how", "-", "do", "-", "i", "-", "get", "-", "the", "-", "path", "-", "of", "-", "the", "-", "current", "-", "executed", "-", "file", "-", "in", "-", "python", "_" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L915-L932
train
lreis2415/PyGeoC
pygeoc/utils.py
UtilClass.mkdir
def mkdir(dir_path): # type: (AnyStr) -> None """Make directory if not existed""" if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path)
python
def mkdir(dir_path): # type: (AnyStr) -> None """Make directory if not existed""" if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path)
[ "def", "mkdir", "(", "dir_path", ")", ":", "# type: (AnyStr) -> None", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir_path", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "dir_path", ")", ":", "os", ".", "makedirs", "(", "dir_path", ")" ]
Make directory if not existed
[ "Make", "directory", "if", "not", "existed" ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L935-L939
train
lreis2415/PyGeoC
pygeoc/utils.py
UtilClass.rmmkdir
def rmmkdir(dir_path): # type: (AnyStr) -> None """If directory existed, then remove and make; else make it.""" if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path) else: rmtree(dir_path, True) os.makedirs(dir_path)
python
def rmmkdir(dir_path): # type: (AnyStr) -> None """If directory existed, then remove and make; else make it.""" if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path) else: rmtree(dir_path, True) os.makedirs(dir_path)
[ "def", "rmmkdir", "(", "dir_path", ")", ":", "# type: (AnyStr) -> None", "if", "not", "os", ".", "path", ".", "isdir", "(", "dir_path", ")", "or", "not", "os", ".", "path", ".", "exists", "(", "dir_path", ")", ":", "os", ".", "makedirs", "(", "dir_path", ")", "else", ":", "rmtree", "(", "dir_path", ",", "True", ")", "os", ".", "makedirs", "(", "dir_path", ")" ]
If directory existed, then remove and make; else make it.
[ "If", "directory", "existed", "then", "remove", "and", "make", ";", "else", "make", "it", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L942-L949
train
lreis2415/PyGeoC
pygeoc/utils.py
UtilClass.print_msg
def print_msg(contentlist): # type: (Union[AnyStr, List[AnyStr], Tuple[AnyStr]]) -> AnyStr """concatenate message list as single string with line feed.""" if isinstance(contentlist, list) or isinstance(contentlist, tuple): return '\n'.join(contentlist) else: # strings if len(contentlist) > 1 and contentlist[-1] != '\n': contentlist += '\n' return contentlist
python
def print_msg(contentlist): # type: (Union[AnyStr, List[AnyStr], Tuple[AnyStr]]) -> AnyStr """concatenate message list as single string with line feed.""" if isinstance(contentlist, list) or isinstance(contentlist, tuple): return '\n'.join(contentlist) else: # strings if len(contentlist) > 1 and contentlist[-1] != '\n': contentlist += '\n' return contentlist
[ "def", "print_msg", "(", "contentlist", ")", ":", "# type: (Union[AnyStr, List[AnyStr], Tuple[AnyStr]]) -> AnyStr", "if", "isinstance", "(", "contentlist", ",", "list", ")", "or", "isinstance", "(", "contentlist", ",", "tuple", ")", ":", "return", "'\\n'", ".", "join", "(", "contentlist", ")", "else", ":", "# strings", "if", "len", "(", "contentlist", ")", ">", "1", "and", "contentlist", "[", "-", "1", "]", "!=", "'\\n'", ":", "contentlist", "+=", "'\\n'", "return", "contentlist" ]
concatenate message list as single string with line feed.
[ "concatenate", "message", "list", "as", "single", "string", "with", "line", "feed", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L952-L960
train
lreis2415/PyGeoC
pygeoc/utils.py
UtilClass.decode_strs_in_dict
def decode_strs_in_dict(unicode_dict # type: Dict[Union[AnyStr, int], Union[int, float, AnyStr, List[Union[int, float, AnyStr]]]] ): # type: (...) -> Dict[Union[AnyStr, int], Any] """Decode strings in dictionary which may contains unicode strings or numeric values. - 1. integer could be key, float cannot; - 2. the function is called recursively Examples: .. code-block:: python input = {u'name': u'zhulj', u'age': u'28', u'1': ['1', 2, 3]} output = {'name': 'zhulj', 'age': 28, 1: [1, 2, 3]} input = {u'name': u'zhulj', 'edu': {'nwsuaf': 2007, u'bnu': '2011', 'igsnrr': 2014}} output = {'name': 'zhulj', 'edu': {'nwsuaf': 2007, 'bnu': 2011, 'igsnrr': 2014}} """ unicode_dict = {StringClass.convert_str2num(k): StringClass.convert_str2num(v) for k, v in iteritems(unicode_dict)} for k, v in iteritems(unicode_dict): if isinstance(v, dict): unicode_dict[k] = UtilClass.decode_strs_in_dict(v) return unicode_dict
python
def decode_strs_in_dict(unicode_dict # type: Dict[Union[AnyStr, int], Union[int, float, AnyStr, List[Union[int, float, AnyStr]]]] ): # type: (...) -> Dict[Union[AnyStr, int], Any] """Decode strings in dictionary which may contains unicode strings or numeric values. - 1. integer could be key, float cannot; - 2. the function is called recursively Examples: .. code-block:: python input = {u'name': u'zhulj', u'age': u'28', u'1': ['1', 2, 3]} output = {'name': 'zhulj', 'age': 28, 1: [1, 2, 3]} input = {u'name': u'zhulj', 'edu': {'nwsuaf': 2007, u'bnu': '2011', 'igsnrr': 2014}} output = {'name': 'zhulj', 'edu': {'nwsuaf': 2007, 'bnu': 2011, 'igsnrr': 2014}} """ unicode_dict = {StringClass.convert_str2num(k): StringClass.convert_str2num(v) for k, v in iteritems(unicode_dict)} for k, v in iteritems(unicode_dict): if isinstance(v, dict): unicode_dict[k] = UtilClass.decode_strs_in_dict(v) return unicode_dict
[ "def", "decode_strs_in_dict", "(", "unicode_dict", "# type: Dict[Union[AnyStr, int], Union[int, float, AnyStr, List[Union[int, float, AnyStr]]]]", ")", ":", "# type: (...) -> Dict[Union[AnyStr, int], Any]", "unicode_dict", "=", "{", "StringClass", ".", "convert_str2num", "(", "k", ")", ":", "StringClass", ".", "convert_str2num", "(", "v", ")", "for", "k", ",", "v", "in", "iteritems", "(", "unicode_dict", ")", "}", "for", "k", ",", "v", "in", "iteritems", "(", "unicode_dict", ")", ":", "if", "isinstance", "(", "v", ",", "dict", ")", ":", "unicode_dict", "[", "k", "]", "=", "UtilClass", ".", "decode_strs_in_dict", "(", "v", ")", "return", "unicode_dict" ]
Decode strings in dictionary which may contains unicode strings or numeric values. - 1. integer could be key, float cannot; - 2. the function is called recursively Examples: .. code-block:: python input = {u'name': u'zhulj', u'age': u'28', u'1': ['1', 2, 3]} output = {'name': 'zhulj', 'age': 28, 1: [1, 2, 3]} input = {u'name': u'zhulj', 'edu': {'nwsuaf': 2007, u'bnu': '2011', 'igsnrr': 2014}} output = {'name': 'zhulj', 'edu': {'nwsuaf': 2007, 'bnu': 2011, 'igsnrr': 2014}}
[ "Decode", "strings", "in", "dictionary", "which", "may", "contains", "unicode", "strings", "or", "numeric", "values", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/pygeoc/utils.py#L987-L1010
train
rosshamish/catan-py
catan/game.py
Game.undo
def undo(self): """ Rewind the game to the previous state. """ self.undo_manager.undo() self.notify_observers() logging.debug('undo_manager undo stack={}'.format(self.undo_manager._undo_stack))
python
def undo(self): """ Rewind the game to the previous state. """ self.undo_manager.undo() self.notify_observers() logging.debug('undo_manager undo stack={}'.format(self.undo_manager._undo_stack))
[ "def", "undo", "(", "self", ")", ":", "self", ".", "undo_manager", ".", "undo", "(", ")", "self", ".", "notify_observers", "(", ")", "logging", ".", "debug", "(", "'undo_manager undo stack={}'", ".", "format", "(", "self", ".", "undo_manager", ".", "_undo_stack", ")", ")" ]
Rewind the game to the previous state.
[ "Rewind", "the", "game", "to", "the", "previous", "state", "." ]
120438a8f16e39c13322c5d5930e1064e1d3f4be
https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/game.py#L91-L97
train
rosshamish/catan-py
catan/game.py
Game.redo
def redo(self): """ Redo the latest undone command. """ self.undo_manager.redo() self.notify_observers() logging.debug('undo_manager redo stack={}'.format(self.undo_manager._redo_stack))
python
def redo(self): """ Redo the latest undone command. """ self.undo_manager.redo() self.notify_observers() logging.debug('undo_manager redo stack={}'.format(self.undo_manager._redo_stack))
[ "def", "redo", "(", "self", ")", ":", "self", ".", "undo_manager", ".", "redo", "(", ")", "self", ".", "notify_observers", "(", ")", "logging", ".", "debug", "(", "'undo_manager redo stack={}'", ".", "format", "(", "self", ".", "undo_manager", ".", "_redo_stack", ")", ")" ]
Redo the latest undone command.
[ "Redo", "the", "latest", "undone", "command", "." ]
120438a8f16e39c13322c5d5930e1064e1d3f4be
https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/game.py#L99-L105
train
PeerAssets/pypeerassets
pypeerassets/networks.py
net_query
def net_query(name: str) -> Constants: '''Find the NetworkParams for a network by its long or short name. Raises UnsupportedNetwork if no NetworkParams is found. ''' for net_params in networks: if name in (net_params.name, net_params.shortname,): return net_params raise UnsupportedNetwork
python
def net_query(name: str) -> Constants: '''Find the NetworkParams for a network by its long or short name. Raises UnsupportedNetwork if no NetworkParams is found. ''' for net_params in networks: if name in (net_params.name, net_params.shortname,): return net_params raise UnsupportedNetwork
[ "def", "net_query", "(", "name", ":", "str", ")", "->", "Constants", ":", "for", "net_params", "in", "networks", ":", "if", "name", "in", "(", "net_params", ".", "name", ",", "net_params", ".", "shortname", ",", ")", ":", "return", "net_params", "raise", "UnsupportedNetwork" ]
Find the NetworkParams for a network by its long or short name. Raises UnsupportedNetwork if no NetworkParams is found.
[ "Find", "the", "NetworkParams", "for", "a", "network", "by", "its", "long", "or", "short", "name", ".", "Raises", "UnsupportedNetwork", "if", "no", "NetworkParams", "is", "found", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/networks.py#L100-L109
train
rosshamish/catan-py
catan/board.py
Board.get_port_at
def get_port_at(self, tile_id, direction): """ If no port is found, a new none port is made and added to self.ports. Returns the port. :param tile_id: :param direction: :return: Port """ for port in self.ports: if port.tile_id == tile_id and port.direction == direction: return port port = Port(tile_id, direction, PortType.none) self.ports.append(port) return port
python
def get_port_at(self, tile_id, direction): """ If no port is found, a new none port is made and added to self.ports. Returns the port. :param tile_id: :param direction: :return: Port """ for port in self.ports: if port.tile_id == tile_id and port.direction == direction: return port port = Port(tile_id, direction, PortType.none) self.ports.append(port) return port
[ "def", "get_port_at", "(", "self", ",", "tile_id", ",", "direction", ")", ":", "for", "port", "in", "self", ".", "ports", ":", "if", "port", ".", "tile_id", "==", "tile_id", "and", "port", ".", "direction", "==", "direction", ":", "return", "port", "port", "=", "Port", "(", "tile_id", ",", "direction", ",", "PortType", ".", "none", ")", "self", ".", "ports", ".", "append", "(", "port", ")", "return", "port" ]
If no port is found, a new none port is made and added to self.ports. Returns the port. :param tile_id: :param direction: :return: Port
[ "If", "no", "port", "is", "found", "a", "new", "none", "port", "is", "made", "and", "added", "to", "self", ".", "ports", "." ]
120438a8f16e39c13322c5d5930e1064e1d3f4be
https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/board.py#L170-L185
train
rosshamish/catan-py
catan/board.py
Board.rotate_ports
def rotate_ports(self): """ Rotates the ports 90 degrees. Useful when using the default port setup but the spectator is watching at a "rotated" angle from "true north". """ for port in self.ports: port.tile_id = ((port.tile_id + 1) % len(hexgrid.coastal_tile_ids())) + 1 port.direction = hexgrid.rotate_direction(hexgrid.EDGE, port.direction, ccw=True) self.notify_observers()
python
def rotate_ports(self): """ Rotates the ports 90 degrees. Useful when using the default port setup but the spectator is watching at a "rotated" angle from "true north". """ for port in self.ports: port.tile_id = ((port.tile_id + 1) % len(hexgrid.coastal_tile_ids())) + 1 port.direction = hexgrid.rotate_direction(hexgrid.EDGE, port.direction, ccw=True) self.notify_observers()
[ "def", "rotate_ports", "(", "self", ")", ":", "for", "port", "in", "self", ".", "ports", ":", "port", ".", "tile_id", "=", "(", "(", "port", ".", "tile_id", "+", "1", ")", "%", "len", "(", "hexgrid", ".", "coastal_tile_ids", "(", ")", ")", ")", "+", "1", "port", ".", "direction", "=", "hexgrid", ".", "rotate_direction", "(", "hexgrid", ".", "EDGE", ",", "port", ".", "direction", ",", "ccw", "=", "True", ")", "self", ".", "notify_observers", "(", ")" ]
Rotates the ports 90 degrees. Useful when using the default port setup but the spectator is watching at a "rotated" angle from "true north".
[ "Rotates", "the", "ports", "90", "degrees", ".", "Useful", "when", "using", "the", "default", "port", "setup", "but", "the", "spectator", "is", "watching", "at", "a", "rotated", "angle", "from", "true", "north", "." ]
120438a8f16e39c13322c5d5930e1064e1d3f4be
https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/board.py#L226-L234
train
etal/biofrills
biofrills/sequtils.py
intersect_keys
def intersect_keys(keys, reffile, cache=False, clean_accs=False): """Extract SeqRecords from the index by matching keys. keys - an iterable of sequence identifiers/accessions to select reffile - name of a FASTA file to extract the specified sequences from cache - save an index of the reference FASTA sequence offsets to disk? clean_accs - strip HMMer extensions from sequence accessions? """ # Build/load the index of reference sequences index = None if cache: refcache = reffile + '.sqlite' if os.path.exists(refcache): if os.stat(refcache).st_mtime < os.stat(reffile).st_mtime: logging.warn("Outdated cache; rebuilding index") else: try: index = (SeqIO.index_db(refcache, key_function=clean_accession) if clean_accs else SeqIO.index_db(refcache)) except Exception: logging.warn("Skipping corrupted cache; rebuilding index") index = None else: refcache = ':memory:' if index is None: # Rebuild the index, for whatever reason index = (SeqIO.index_db(refcache, [reffile], 'fasta', key_function=clean_accession) if clean_accs else SeqIO.index_db(refcache, [reffile], 'fasta')) # Extract records by key if clean_accs: keys = (clean_accession(k) for k in keys) for key in keys: try: record = index[key] except LookupError: # Missing keys are rare, so it's faster not to check every time logging.info("No match: %s", repr(key)) continue yield record
python
def intersect_keys(keys, reffile, cache=False, clean_accs=False): """Extract SeqRecords from the index by matching keys. keys - an iterable of sequence identifiers/accessions to select reffile - name of a FASTA file to extract the specified sequences from cache - save an index of the reference FASTA sequence offsets to disk? clean_accs - strip HMMer extensions from sequence accessions? """ # Build/load the index of reference sequences index = None if cache: refcache = reffile + '.sqlite' if os.path.exists(refcache): if os.stat(refcache).st_mtime < os.stat(reffile).st_mtime: logging.warn("Outdated cache; rebuilding index") else: try: index = (SeqIO.index_db(refcache, key_function=clean_accession) if clean_accs else SeqIO.index_db(refcache)) except Exception: logging.warn("Skipping corrupted cache; rebuilding index") index = None else: refcache = ':memory:' if index is None: # Rebuild the index, for whatever reason index = (SeqIO.index_db(refcache, [reffile], 'fasta', key_function=clean_accession) if clean_accs else SeqIO.index_db(refcache, [reffile], 'fasta')) # Extract records by key if clean_accs: keys = (clean_accession(k) for k in keys) for key in keys: try: record = index[key] except LookupError: # Missing keys are rare, so it's faster not to check every time logging.info("No match: %s", repr(key)) continue yield record
[ "def", "intersect_keys", "(", "keys", ",", "reffile", ",", "cache", "=", "False", ",", "clean_accs", "=", "False", ")", ":", "# Build/load the index of reference sequences", "index", "=", "None", "if", "cache", ":", "refcache", "=", "reffile", "+", "'.sqlite'", "if", "os", ".", "path", ".", "exists", "(", "refcache", ")", ":", "if", "os", ".", "stat", "(", "refcache", ")", ".", "st_mtime", "<", "os", ".", "stat", "(", "reffile", ")", ".", "st_mtime", ":", "logging", ".", "warn", "(", "\"Outdated cache; rebuilding index\"", ")", "else", ":", "try", ":", "index", "=", "(", "SeqIO", ".", "index_db", "(", "refcache", ",", "key_function", "=", "clean_accession", ")", "if", "clean_accs", "else", "SeqIO", ".", "index_db", "(", "refcache", ")", ")", "except", "Exception", ":", "logging", ".", "warn", "(", "\"Skipping corrupted cache; rebuilding index\"", ")", "index", "=", "None", "else", ":", "refcache", "=", "':memory:'", "if", "index", "is", "None", ":", "# Rebuild the index, for whatever reason", "index", "=", "(", "SeqIO", ".", "index_db", "(", "refcache", ",", "[", "reffile", "]", ",", "'fasta'", ",", "key_function", "=", "clean_accession", ")", "if", "clean_accs", "else", "SeqIO", ".", "index_db", "(", "refcache", ",", "[", "reffile", "]", ",", "'fasta'", ")", ")", "# Extract records by key", "if", "clean_accs", ":", "keys", "=", "(", "clean_accession", "(", "k", ")", "for", "k", "in", "keys", ")", "for", "key", "in", "keys", ":", "try", ":", "record", "=", "index", "[", "key", "]", "except", "LookupError", ":", "# Missing keys are rare, so it's faster not to check every time", "logging", ".", "info", "(", "\"No match: %s\"", ",", "repr", "(", "key", ")", ")", "continue", "yield", "record" ]
Extract SeqRecords from the index by matching keys. keys - an iterable of sequence identifiers/accessions to select reffile - name of a FASTA file to extract the specified sequences from cache - save an index of the reference FASTA sequence offsets to disk? clean_accs - strip HMMer extensions from sequence accessions?
[ "Extract", "SeqRecords", "from", "the", "index", "by", "matching", "keys", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/sequtils.py#L29-L73
train
etal/biofrills
biofrills/sequtils.py
aa_frequencies
def aa_frequencies(seq, gap_chars='-.'): """Calculate the amino acid frequencies in a sequence set.""" aa_counts = Counter(seq) # Don't count gaps for gap_char in gap_chars: if gap_char in aa_counts: del aa_counts[gap_char] # Reduce to frequencies scale = 1.0 / sum(aa_counts.values()) return dict((aa, cnt * scale) for aa, cnt in aa_counts.iteritems())
python
def aa_frequencies(seq, gap_chars='-.'): """Calculate the amino acid frequencies in a sequence set.""" aa_counts = Counter(seq) # Don't count gaps for gap_char in gap_chars: if gap_char in aa_counts: del aa_counts[gap_char] # Reduce to frequencies scale = 1.0 / sum(aa_counts.values()) return dict((aa, cnt * scale) for aa, cnt in aa_counts.iteritems())
[ "def", "aa_frequencies", "(", "seq", ",", "gap_chars", "=", "'-.'", ")", ":", "aa_counts", "=", "Counter", "(", "seq", ")", "# Don't count gaps", "for", "gap_char", "in", "gap_chars", ":", "if", "gap_char", "in", "aa_counts", ":", "del", "aa_counts", "[", "gap_char", "]", "# Reduce to frequencies", "scale", "=", "1.0", "/", "sum", "(", "aa_counts", ".", "values", "(", ")", ")", "return", "dict", "(", "(", "aa", ",", "cnt", "*", "scale", ")", "for", "aa", ",", "cnt", "in", "aa_counts", ".", "iteritems", "(", ")", ")" ]
Calculate the amino acid frequencies in a sequence set.
[ "Calculate", "the", "amino", "acid", "frequencies", "in", "a", "sequence", "set", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/sequtils.py#L76-L85
train
rosshamish/catan-py
catan/trading.py
CatanTrade.giving
def giving(self): """ Returns tuples corresponding to the number and type of each resource in the trade from giver->getter :return: eg [(2, Terrain.wood), (1, Terrain.brick)] """ logging.debug('give={}'.format(self._give)) c = Counter(self._give.copy()) return [(n, t) for t, n in c.items()]
python
def giving(self): """ Returns tuples corresponding to the number and type of each resource in the trade from giver->getter :return: eg [(2, Terrain.wood), (1, Terrain.brick)] """ logging.debug('give={}'.format(self._give)) c = Counter(self._give.copy()) return [(n, t) for t, n in c.items()]
[ "def", "giving", "(", "self", ")", ":", "logging", ".", "debug", "(", "'give={}'", ".", "format", "(", "self", ".", "_give", ")", ")", "c", "=", "Counter", "(", "self", ".", "_give", ".", "copy", "(", ")", ")", "return", "[", "(", "n", ",", "t", ")", "for", "t", ",", "n", "in", "c", ".", "items", "(", ")", "]" ]
Returns tuples corresponding to the number and type of each resource in the trade from giver->getter :return: eg [(2, Terrain.wood), (1, Terrain.brick)]
[ "Returns", "tuples", "corresponding", "to", "the", "number", "and", "type", "of", "each", "resource", "in", "the", "trade", "from", "giver", "-", ">", "getter" ]
120438a8f16e39c13322c5d5930e1064e1d3f4be
https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/trading.py#L54-L63
train
rosshamish/catan-py
catan/trading.py
CatanTrade.getting
def getting(self): """ Returns tuples corresponding to the number and type of each resource in the trade from getter->giver :return: eg [(2, Terrain.wood), (1, Terrain.brick)] """ c = Counter(self._get.copy()) return [(n, t) for t, n in c.items()]
python
def getting(self): """ Returns tuples corresponding to the number and type of each resource in the trade from getter->giver :return: eg [(2, Terrain.wood), (1, Terrain.brick)] """ c = Counter(self._get.copy()) return [(n, t) for t, n in c.items()]
[ "def", "getting", "(", "self", ")", ":", "c", "=", "Counter", "(", "self", ".", "_get", ".", "copy", "(", ")", ")", "return", "[", "(", "n", ",", "t", ")", "for", "t", ",", "n", "in", "c", ".", "items", "(", ")", "]" ]
Returns tuples corresponding to the number and type of each resource in the trade from getter->giver :return: eg [(2, Terrain.wood), (1, Terrain.brick)]
[ "Returns", "tuples", "corresponding", "to", "the", "number", "and", "type", "of", "each", "resource", "in", "the", "trade", "from", "getter", "-", ">", "giver" ]
120438a8f16e39c13322c5d5930e1064e1d3f4be
https://github.com/rosshamish/catan-py/blob/120438a8f16e39c13322c5d5930e1064e1d3f4be/catan/trading.py#L65-L73
train
moonso/ped_parser
ped_parser/family.py
Family.family_check
def family_check(self): """ Check if the family members break the structure of the family. eg. nonexistent parent, wrong sex on parent etc. Also extracts all trios found, this is of help for many at the moment since GATK can only do phasing of trios and duos. """ #TODO Make some tests for these self.logger.info("Checking family relations for {0}".format( self.family_id) ) for individual_id in self.individuals: self.logger.debug("Checking individual {0}".format(individual_id)) individual = self.individuals[individual_id] self.logger.debug("Checking if individual {0} is affected".format( individual_id)) if individual.affected: self.logger.debug("Found affected individual {0}".format( individual_id) ) self.affected_individuals.add(individual_id) father = individual.father mother = individual.mother if individual.has_parents: self.logger.debug("Individual {0} has parents".format( individual_id)) self.no_relations = False try: self.check_parent(father, father=True) self.check_parent(mother, father=False) except PedigreeError as e: self.logger.error(e.message) raise e # Check if there is a trio if individual.has_both_parents: self.trios.append(set([individual_id, father, mother])) elif father != '0': self.duos.append(set([individual_id, father])) else: self.duos.append(set([individual_id, mother])) ##TODO self.check_grandparents(individual) # Annotate siblings: for individual_2_id in self.individuals: if individual_id != individual_2_id: if self.check_siblings(individual_id, individual_2_id): individual.siblings.add(individual_2_id)
python
def family_check(self): """ Check if the family members break the structure of the family. eg. nonexistent parent, wrong sex on parent etc. Also extracts all trios found, this is of help for many at the moment since GATK can only do phasing of trios and duos. """ #TODO Make some tests for these self.logger.info("Checking family relations for {0}".format( self.family_id) ) for individual_id in self.individuals: self.logger.debug("Checking individual {0}".format(individual_id)) individual = self.individuals[individual_id] self.logger.debug("Checking if individual {0} is affected".format( individual_id)) if individual.affected: self.logger.debug("Found affected individual {0}".format( individual_id) ) self.affected_individuals.add(individual_id) father = individual.father mother = individual.mother if individual.has_parents: self.logger.debug("Individual {0} has parents".format( individual_id)) self.no_relations = False try: self.check_parent(father, father=True) self.check_parent(mother, father=False) except PedigreeError as e: self.logger.error(e.message) raise e # Check if there is a trio if individual.has_both_parents: self.trios.append(set([individual_id, father, mother])) elif father != '0': self.duos.append(set([individual_id, father])) else: self.duos.append(set([individual_id, mother])) ##TODO self.check_grandparents(individual) # Annotate siblings: for individual_2_id in self.individuals: if individual_id != individual_2_id: if self.check_siblings(individual_id, individual_2_id): individual.siblings.add(individual_2_id)
[ "def", "family_check", "(", "self", ")", ":", "#TODO Make some tests for these", "self", ".", "logger", ".", "info", "(", "\"Checking family relations for {0}\"", ".", "format", "(", "self", ".", "family_id", ")", ")", "for", "individual_id", "in", "self", ".", "individuals", ":", "self", ".", "logger", ".", "debug", "(", "\"Checking individual {0}\"", ".", "format", "(", "individual_id", ")", ")", "individual", "=", "self", ".", "individuals", "[", "individual_id", "]", "self", ".", "logger", ".", "debug", "(", "\"Checking if individual {0} is affected\"", ".", "format", "(", "individual_id", ")", ")", "if", "individual", ".", "affected", ":", "self", ".", "logger", ".", "debug", "(", "\"Found affected individual {0}\"", ".", "format", "(", "individual_id", ")", ")", "self", ".", "affected_individuals", ".", "add", "(", "individual_id", ")", "father", "=", "individual", ".", "father", "mother", "=", "individual", ".", "mother", "if", "individual", ".", "has_parents", ":", "self", ".", "logger", ".", "debug", "(", "\"Individual {0} has parents\"", ".", "format", "(", "individual_id", ")", ")", "self", ".", "no_relations", "=", "False", "try", ":", "self", ".", "check_parent", "(", "father", ",", "father", "=", "True", ")", "self", ".", "check_parent", "(", "mother", ",", "father", "=", "False", ")", "except", "PedigreeError", "as", "e", ":", "self", ".", "logger", ".", "error", "(", "e", ".", "message", ")", "raise", "e", "# Check if there is a trio", "if", "individual", ".", "has_both_parents", ":", "self", ".", "trios", ".", "append", "(", "set", "(", "[", "individual_id", ",", "father", ",", "mother", "]", ")", ")", "elif", "father", "!=", "'0'", ":", "self", ".", "duos", ".", "append", "(", "set", "(", "[", "individual_id", ",", "father", "]", ")", ")", "else", ":", "self", ".", "duos", ".", "append", "(", "set", "(", "[", "individual_id", ",", "mother", "]", ")", ")", "##TODO self.check_grandparents(individual)", "# Annotate siblings:", "for", "individual_2_id", "in", "self", ".", "individuals", ":", "if", "individual_id", "!=", "individual_2_id", ":", "if", "self", ".", "check_siblings", "(", "individual_id", ",", "individual_2_id", ")", ":", "individual", ".", "siblings", ".", "add", "(", "individual_2_id", ")" ]
Check if the family members break the structure of the family. eg. nonexistent parent, wrong sex on parent etc. Also extracts all trios found, this is of help for many at the moment since GATK can only do phasing of trios and duos.
[ "Check", "if", "the", "family", "members", "break", "the", "structure", "of", "the", "family", ".", "eg", ".", "nonexistent", "parent", "wrong", "sex", "on", "parent", "etc", ".", "Also", "extracts", "all", "trios", "found", "this", "is", "of", "help", "for", "many", "at", "the", "moment", "since", "GATK", "can", "only", "do", "phasing", "of", "trios", "and", "duos", "." ]
a7393e47139532782ea3c821aabea33d46f94323
https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L61-L116
train
moonso/ped_parser
ped_parser/family.py
Family.check_parent
def check_parent(self, parent_id, father = False): """ Check if the parent info is correct. If an individual is not present in file raise exeption. Input: An id that represents a parent father = True/False Raises SyntaxError if The parent id is not present The gender of the parent is wrong. """ self.logger.debug("Checking parent {0}".format(parent_id)) if parent_id != '0': if parent_id not in self.individuals: raise PedigreeError(self.family_id, parent_id, 'Parent is not in family.') if father: if self.individuals[parent_id].sex != 1: raise PedigreeError(self.family_id, parent_id, 'Father is not specified as male.') else: if self.individuals[parent_id].sex != 2: raise PedigreeError(self.family_id, parent_id, 'Mother is not specified as female.') return
python
def check_parent(self, parent_id, father = False): """ Check if the parent info is correct. If an individual is not present in file raise exeption. Input: An id that represents a parent father = True/False Raises SyntaxError if The parent id is not present The gender of the parent is wrong. """ self.logger.debug("Checking parent {0}".format(parent_id)) if parent_id != '0': if parent_id not in self.individuals: raise PedigreeError(self.family_id, parent_id, 'Parent is not in family.') if father: if self.individuals[parent_id].sex != 1: raise PedigreeError(self.family_id, parent_id, 'Father is not specified as male.') else: if self.individuals[parent_id].sex != 2: raise PedigreeError(self.family_id, parent_id, 'Mother is not specified as female.') return
[ "def", "check_parent", "(", "self", ",", "parent_id", ",", "father", "=", "False", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Checking parent {0}\"", ".", "format", "(", "parent_id", ")", ")", "if", "parent_id", "!=", "'0'", ":", "if", "parent_id", "not", "in", "self", ".", "individuals", ":", "raise", "PedigreeError", "(", "self", ".", "family_id", ",", "parent_id", ",", "'Parent is not in family.'", ")", "if", "father", ":", "if", "self", ".", "individuals", "[", "parent_id", "]", ".", "sex", "!=", "1", ":", "raise", "PedigreeError", "(", "self", ".", "family_id", ",", "parent_id", ",", "'Father is not specified as male.'", ")", "else", ":", "if", "self", ".", "individuals", "[", "parent_id", "]", ".", "sex", "!=", "2", ":", "raise", "PedigreeError", "(", "self", ".", "family_id", ",", "parent_id", ",", "'Mother is not specified as female.'", ")", "return" ]
Check if the parent info is correct. If an individual is not present in file raise exeption. Input: An id that represents a parent father = True/False Raises SyntaxError if The parent id is not present The gender of the parent is wrong.
[ "Check", "if", "the", "parent", "info", "is", "correct", ".", "If", "an", "individual", "is", "not", "present", "in", "file", "raise", "exeption", "." ]
a7393e47139532782ea3c821aabea33d46f94323
https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L120-L144
train
moonso/ped_parser
ped_parser/family.py
Family.to_ped
def to_ped(self, outfile=None): """ Print the individuals of the family in ped format The header will be the original ped header plus all headers found in extra info of the individuals """ ped_header = [ '#FamilyID', 'IndividualID', 'PaternalID', 'MaternalID', 'Sex', 'Phenotype', ] extra_headers = [ 'InheritanceModel', 'Proband', 'Consultand', 'Alive' ] for individual_id in self.individuals: individual = self.individuals[individual_id] for info in individual.extra_info: if info in extra_headers: if info not in ped_header: ped_header.append(info) self.logger.debug("Ped headers found: {0}".format( ', '.join(ped_header) )) if outfile: outfile.write('\t'.join(ped_header)+'\n') else: print('\t'.join(ped_header)) for individual in self.to_json(): ped_info = [] ped_info.append(individual['family_id']) ped_info.append(individual['id']) ped_info.append(individual['father']) ped_info.append(individual['mother']) ped_info.append(individual['sex']) ped_info.append(individual['phenotype']) if len(ped_header) > 6: for header in ped_header[6:]: ped_info.append(individual['extra_info'].get(header, '.')) if outfile: outfile.write('\t'.join(ped_info)+'\n') else: print('\t'.join(ped_info))
python
def to_ped(self, outfile=None): """ Print the individuals of the family in ped format The header will be the original ped header plus all headers found in extra info of the individuals """ ped_header = [ '#FamilyID', 'IndividualID', 'PaternalID', 'MaternalID', 'Sex', 'Phenotype', ] extra_headers = [ 'InheritanceModel', 'Proband', 'Consultand', 'Alive' ] for individual_id in self.individuals: individual = self.individuals[individual_id] for info in individual.extra_info: if info in extra_headers: if info not in ped_header: ped_header.append(info) self.logger.debug("Ped headers found: {0}".format( ', '.join(ped_header) )) if outfile: outfile.write('\t'.join(ped_header)+'\n') else: print('\t'.join(ped_header)) for individual in self.to_json(): ped_info = [] ped_info.append(individual['family_id']) ped_info.append(individual['id']) ped_info.append(individual['father']) ped_info.append(individual['mother']) ped_info.append(individual['sex']) ped_info.append(individual['phenotype']) if len(ped_header) > 6: for header in ped_header[6:]: ped_info.append(individual['extra_info'].get(header, '.')) if outfile: outfile.write('\t'.join(ped_info)+'\n') else: print('\t'.join(ped_info))
[ "def", "to_ped", "(", "self", ",", "outfile", "=", "None", ")", ":", "ped_header", "=", "[", "'#FamilyID'", ",", "'IndividualID'", ",", "'PaternalID'", ",", "'MaternalID'", ",", "'Sex'", ",", "'Phenotype'", ",", "]", "extra_headers", "=", "[", "'InheritanceModel'", ",", "'Proband'", ",", "'Consultand'", ",", "'Alive'", "]", "for", "individual_id", "in", "self", ".", "individuals", ":", "individual", "=", "self", ".", "individuals", "[", "individual_id", "]", "for", "info", "in", "individual", ".", "extra_info", ":", "if", "info", "in", "extra_headers", ":", "if", "info", "not", "in", "ped_header", ":", "ped_header", ".", "append", "(", "info", ")", "self", ".", "logger", ".", "debug", "(", "\"Ped headers found: {0}\"", ".", "format", "(", "', '", ".", "join", "(", "ped_header", ")", ")", ")", "if", "outfile", ":", "outfile", ".", "write", "(", "'\\t'", ".", "join", "(", "ped_header", ")", "+", "'\\n'", ")", "else", ":", "print", "(", "'\\t'", ".", "join", "(", "ped_header", ")", ")", "for", "individual", "in", "self", ".", "to_json", "(", ")", ":", "ped_info", "=", "[", "]", "ped_info", ".", "append", "(", "individual", "[", "'family_id'", "]", ")", "ped_info", ".", "append", "(", "individual", "[", "'id'", "]", ")", "ped_info", ".", "append", "(", "individual", "[", "'father'", "]", ")", "ped_info", ".", "append", "(", "individual", "[", "'mother'", "]", ")", "ped_info", ".", "append", "(", "individual", "[", "'sex'", "]", ")", "ped_info", ".", "append", "(", "individual", "[", "'phenotype'", "]", ")", "if", "len", "(", "ped_header", ")", ">", "6", ":", "for", "header", "in", "ped_header", "[", "6", ":", "]", ":", "ped_info", ".", "append", "(", "individual", "[", "'extra_info'", "]", ".", "get", "(", "header", ",", "'.'", ")", ")", "if", "outfile", ":", "outfile", ".", "write", "(", "'\\t'", ".", "join", "(", "ped_info", ")", "+", "'\\n'", ")", "else", ":", "print", "(", "'\\t'", ".", "join", "(", "ped_info", ")", ")" ]
Print the individuals of the family in ped format The header will be the original ped header plus all headers found in extra info of the individuals
[ "Print", "the", "individuals", "of", "the", "family", "in", "ped", "format", "The", "header", "will", "be", "the", "original", "ped", "header", "plus", "all", "headers", "found", "in", "extra", "info", "of", "the", "individuals" ]
a7393e47139532782ea3c821aabea33d46f94323
https://github.com/moonso/ped_parser/blob/a7393e47139532782ea3c821aabea33d46f94323/ped_parser/family.py#L251-L307
train
PeerAssets/pypeerassets
pypeerassets/__main__.py
find_deck
def find_deck(provider: Provider, key: str, version: int, prod: bool=True) -> Optional[Deck]: '''Find specific deck by deck id.''' pa_params = param_query(provider.network) if prod: p2th = pa_params.P2TH_addr else: p2th = pa_params.test_P2TH_addr rawtx = provider.getrawtransaction(key, 1) deck = deck_parser((provider, rawtx, 1, p2th)) return deck
python
def find_deck(provider: Provider, key: str, version: int, prod: bool=True) -> Optional[Deck]: '''Find specific deck by deck id.''' pa_params = param_query(provider.network) if prod: p2th = pa_params.P2TH_addr else: p2th = pa_params.test_P2TH_addr rawtx = provider.getrawtransaction(key, 1) deck = deck_parser((provider, rawtx, 1, p2th)) return deck
[ "def", "find_deck", "(", "provider", ":", "Provider", ",", "key", ":", "str", ",", "version", ":", "int", ",", "prod", ":", "bool", "=", "True", ")", "->", "Optional", "[", "Deck", "]", ":", "pa_params", "=", "param_query", "(", "provider", ".", "network", ")", "if", "prod", ":", "p2th", "=", "pa_params", ".", "P2TH_addr", "else", ":", "p2th", "=", "pa_params", ".", "test_P2TH_addr", "rawtx", "=", "provider", ".", "getrawtransaction", "(", "key", ",", "1", ")", "deck", "=", "deck_parser", "(", "(", "provider", ",", "rawtx", ",", "1", ",", "p2th", ")", ")", "return", "deck" ]
Find specific deck by deck id.
[ "Find", "specific", "deck", "by", "deck", "id", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/__main__.py#L68-L80
train
PeerAssets/pypeerassets
pypeerassets/__main__.py
deck_spawn
def deck_spawn(provider: Provider, deck: Deck, inputs: dict, change_address: str, locktime: int=0) -> Transaction: '''Creates Deck spawn raw transaction. : key - Kutil object which we'll use to sign the tx : deck - Deck object : card - CardTransfer object : inputs - utxos (has to be owned by deck issuer) : change_address - address to send the change to : locktime - tx locked until block n=int ''' network_params = net_query(deck.network) pa_params = param_query(deck.network) if deck.production: p2th_addr = pa_params.P2TH_addr else: p2th_addr = pa_params.test_P2TH_addr # first round of txn making is done by presuming minimal fee change_sum = Decimal(inputs['total'] - network_params.min_tx_fee - pa_params.P2TH_fee) txouts = [ tx_output(network=deck.network, value=pa_params.P2TH_fee, n=0, script=p2pkh_script(address=p2th_addr, network=deck.network)), # p2th tx_output(network=deck.network, value=Decimal(0), n=1, script=nulldata_script(deck.metainfo_to_protobuf)), # op_return tx_output(network=deck.network, value=change_sum, n=2, script=p2pkh_script(address=change_address, network=deck.network)) # change ] unsigned_tx = make_raw_transaction(network=deck.network, inputs=inputs['utxos'], outputs=txouts, locktime=Locktime(locktime) ) return unsigned_tx
python
def deck_spawn(provider: Provider, deck: Deck, inputs: dict, change_address: str, locktime: int=0) -> Transaction: '''Creates Deck spawn raw transaction. : key - Kutil object which we'll use to sign the tx : deck - Deck object : card - CardTransfer object : inputs - utxos (has to be owned by deck issuer) : change_address - address to send the change to : locktime - tx locked until block n=int ''' network_params = net_query(deck.network) pa_params = param_query(deck.network) if deck.production: p2th_addr = pa_params.P2TH_addr else: p2th_addr = pa_params.test_P2TH_addr # first round of txn making is done by presuming minimal fee change_sum = Decimal(inputs['total'] - network_params.min_tx_fee - pa_params.P2TH_fee) txouts = [ tx_output(network=deck.network, value=pa_params.P2TH_fee, n=0, script=p2pkh_script(address=p2th_addr, network=deck.network)), # p2th tx_output(network=deck.network, value=Decimal(0), n=1, script=nulldata_script(deck.metainfo_to_protobuf)), # op_return tx_output(network=deck.network, value=change_sum, n=2, script=p2pkh_script(address=change_address, network=deck.network)) # change ] unsigned_tx = make_raw_transaction(network=deck.network, inputs=inputs['utxos'], outputs=txouts, locktime=Locktime(locktime) ) return unsigned_tx
[ "def", "deck_spawn", "(", "provider", ":", "Provider", ",", "deck", ":", "Deck", ",", "inputs", ":", "dict", ",", "change_address", ":", "str", ",", "locktime", ":", "int", "=", "0", ")", "->", "Transaction", ":", "network_params", "=", "net_query", "(", "deck", ".", "network", ")", "pa_params", "=", "param_query", "(", "deck", ".", "network", ")", "if", "deck", ".", "production", ":", "p2th_addr", "=", "pa_params", ".", "P2TH_addr", "else", ":", "p2th_addr", "=", "pa_params", ".", "test_P2TH_addr", "# first round of txn making is done by presuming minimal fee", "change_sum", "=", "Decimal", "(", "inputs", "[", "'total'", "]", "-", "network_params", ".", "min_tx_fee", "-", "pa_params", ".", "P2TH_fee", ")", "txouts", "=", "[", "tx_output", "(", "network", "=", "deck", ".", "network", ",", "value", "=", "pa_params", ".", "P2TH_fee", ",", "n", "=", "0", ",", "script", "=", "p2pkh_script", "(", "address", "=", "p2th_addr", ",", "network", "=", "deck", ".", "network", ")", ")", ",", "# p2th", "tx_output", "(", "network", "=", "deck", ".", "network", ",", "value", "=", "Decimal", "(", "0", ")", ",", "n", "=", "1", ",", "script", "=", "nulldata_script", "(", "deck", ".", "metainfo_to_protobuf", ")", ")", ",", "# op_return", "tx_output", "(", "network", "=", "deck", ".", "network", ",", "value", "=", "change_sum", ",", "n", "=", "2", ",", "script", "=", "p2pkh_script", "(", "address", "=", "change_address", ",", "network", "=", "deck", ".", "network", ")", ")", "# change", "]", "unsigned_tx", "=", "make_raw_transaction", "(", "network", "=", "deck", ".", "network", ",", "inputs", "=", "inputs", "[", "'utxos'", "]", ",", "outputs", "=", "txouts", ",", "locktime", "=", "Locktime", "(", "locktime", ")", ")", "return", "unsigned_tx" ]
Creates Deck spawn raw transaction. : key - Kutil object which we'll use to sign the tx : deck - Deck object : card - CardTransfer object : inputs - utxos (has to be owned by deck issuer) : change_address - address to send the change to : locktime - tx locked until block n=int
[ "Creates", "Deck", "spawn", "raw", "transaction", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/__main__.py#L83-L125
train
PeerAssets/pypeerassets
pypeerassets/__main__.py
get_card_transfer
def get_card_transfer(provider: Provider, deck: Deck, txid: str, debug: bool=False) -> Iterator: '''get a single card transfer by it's id''' rawtx = provider.getrawtransaction(txid, 1) bundle = card_bundler(provider, deck, rawtx) return card_bundle_parser(bundle, debug)
python
def get_card_transfer(provider: Provider, deck: Deck, txid: str, debug: bool=False) -> Iterator: '''get a single card transfer by it's id''' rawtx = provider.getrawtransaction(txid, 1) bundle = card_bundler(provider, deck, rawtx) return card_bundle_parser(bundle, debug)
[ "def", "get_card_transfer", "(", "provider", ":", "Provider", ",", "deck", ":", "Deck", ",", "txid", ":", "str", ",", "debug", ":", "bool", "=", "False", ")", "->", "Iterator", ":", "rawtx", "=", "provider", ".", "getrawtransaction", "(", "txid", ",", "1", ")", "bundle", "=", "card_bundler", "(", "provider", ",", "deck", ",", "rawtx", ")", "return", "card_bundle_parser", "(", "bundle", ",", "debug", ")" ]
get a single card transfer by it's id
[ "get", "a", "single", "card", "transfer", "by", "it", "s", "id" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/__main__.py#L199-L208
train
PeerAssets/pypeerassets
pypeerassets/__main__.py
find_all_valid_cards
def find_all_valid_cards(provider: Provider, deck: Deck) -> Generator: '''find all the valid cards on this deck, filtering out cards which don't play nice with deck issue mode''' # validate_card_issue_modes must recieve a full list of cards, not batches unfiltered = (card for batch in get_card_bundles(provider, deck) for card in batch) for card in validate_card_issue_modes(deck.issue_mode, list(unfiltered)): yield card
python
def find_all_valid_cards(provider: Provider, deck: Deck) -> Generator: '''find all the valid cards on this deck, filtering out cards which don't play nice with deck issue mode''' # validate_card_issue_modes must recieve a full list of cards, not batches unfiltered = (card for batch in get_card_bundles(provider, deck) for card in batch) for card in validate_card_issue_modes(deck.issue_mode, list(unfiltered)): yield card
[ "def", "find_all_valid_cards", "(", "provider", ":", "Provider", ",", "deck", ":", "Deck", ")", "->", "Generator", ":", "# validate_card_issue_modes must recieve a full list of cards, not batches", "unfiltered", "=", "(", "card", "for", "batch", "in", "get_card_bundles", "(", "provider", ",", "deck", ")", "for", "card", "in", "batch", ")", "for", "card", "in", "validate_card_issue_modes", "(", "deck", ".", "issue_mode", ",", "list", "(", "unfiltered", ")", ")", ":", "yield", "card" ]
find all the valid cards on this deck, filtering out cards which don't play nice with deck issue mode
[ "find", "all", "the", "valid", "cards", "on", "this", "deck", "filtering", "out", "cards", "which", "don", "t", "play", "nice", "with", "deck", "issue", "mode" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/__main__.py#L211-L219
train
PeerAssets/pypeerassets
pypeerassets/__main__.py
card_transfer
def card_transfer(provider: Provider, card: CardTransfer, inputs: dict, change_address: str, locktime: int=0) -> Transaction: '''Prepare the CardTransfer Transaction object : card - CardTransfer object : inputs - utxos (has to be owned by deck issuer) : change_address - address to send the change to : locktime - tx locked until block n=int ''' network_params = net_query(provider.network) pa_params = param_query(provider.network) if card.deck_p2th is None: raise Exception("card.deck_p2th required for tx_output") outs = [ tx_output(network=provider.network, value=pa_params.P2TH_fee, n=0, script=p2pkh_script(address=card.deck_p2th, network=provider.network)), # deck p2th tx_output(network=provider.network, value=Decimal(0), n=1, script=nulldata_script(card.metainfo_to_protobuf)) # op_return ] for addr, index in zip(card.receiver, range(len(card.receiver))): outs.append( # TxOut for each receiver, index + 2 because we have two outs already tx_output(network=provider.network, value=Decimal(0), n=index+2, script=p2pkh_script(address=addr, network=provider.network)) ) # first round of txn making is done by presuming minimal fee change_sum = Decimal(inputs['total'] - network_params.min_tx_fee - pa_params.P2TH_fee) outs.append( tx_output(network=provider.network, value=change_sum, n=len(outs)+1, script=p2pkh_script(address=change_address, network=provider.network)) ) unsigned_tx = make_raw_transaction(network=provider.network, inputs=inputs['utxos'], outputs=outs, locktime=Locktime(locktime) ) return unsigned_tx
python
def card_transfer(provider: Provider, card: CardTransfer, inputs: dict, change_address: str, locktime: int=0) -> Transaction: '''Prepare the CardTransfer Transaction object : card - CardTransfer object : inputs - utxos (has to be owned by deck issuer) : change_address - address to send the change to : locktime - tx locked until block n=int ''' network_params = net_query(provider.network) pa_params = param_query(provider.network) if card.deck_p2th is None: raise Exception("card.deck_p2th required for tx_output") outs = [ tx_output(network=provider.network, value=pa_params.P2TH_fee, n=0, script=p2pkh_script(address=card.deck_p2th, network=provider.network)), # deck p2th tx_output(network=provider.network, value=Decimal(0), n=1, script=nulldata_script(card.metainfo_to_protobuf)) # op_return ] for addr, index in zip(card.receiver, range(len(card.receiver))): outs.append( # TxOut for each receiver, index + 2 because we have two outs already tx_output(network=provider.network, value=Decimal(0), n=index+2, script=p2pkh_script(address=addr, network=provider.network)) ) # first round of txn making is done by presuming minimal fee change_sum = Decimal(inputs['total'] - network_params.min_tx_fee - pa_params.P2TH_fee) outs.append( tx_output(network=provider.network, value=change_sum, n=len(outs)+1, script=p2pkh_script(address=change_address, network=provider.network)) ) unsigned_tx = make_raw_transaction(network=provider.network, inputs=inputs['utxos'], outputs=outs, locktime=Locktime(locktime) ) return unsigned_tx
[ "def", "card_transfer", "(", "provider", ":", "Provider", ",", "card", ":", "CardTransfer", ",", "inputs", ":", "dict", ",", "change_address", ":", "str", ",", "locktime", ":", "int", "=", "0", ")", "->", "Transaction", ":", "network_params", "=", "net_query", "(", "provider", ".", "network", ")", "pa_params", "=", "param_query", "(", "provider", ".", "network", ")", "if", "card", ".", "deck_p2th", "is", "None", ":", "raise", "Exception", "(", "\"card.deck_p2th required for tx_output\"", ")", "outs", "=", "[", "tx_output", "(", "network", "=", "provider", ".", "network", ",", "value", "=", "pa_params", ".", "P2TH_fee", ",", "n", "=", "0", ",", "script", "=", "p2pkh_script", "(", "address", "=", "card", ".", "deck_p2th", ",", "network", "=", "provider", ".", "network", ")", ")", ",", "# deck p2th", "tx_output", "(", "network", "=", "provider", ".", "network", ",", "value", "=", "Decimal", "(", "0", ")", ",", "n", "=", "1", ",", "script", "=", "nulldata_script", "(", "card", ".", "metainfo_to_protobuf", ")", ")", "# op_return", "]", "for", "addr", ",", "index", "in", "zip", "(", "card", ".", "receiver", ",", "range", "(", "len", "(", "card", ".", "receiver", ")", ")", ")", ":", "outs", ".", "append", "(", "# TxOut for each receiver, index + 2 because we have two outs already", "tx_output", "(", "network", "=", "provider", ".", "network", ",", "value", "=", "Decimal", "(", "0", ")", ",", "n", "=", "index", "+", "2", ",", "script", "=", "p2pkh_script", "(", "address", "=", "addr", ",", "network", "=", "provider", ".", "network", ")", ")", ")", "# first round of txn making is done by presuming minimal fee", "change_sum", "=", "Decimal", "(", "inputs", "[", "'total'", "]", "-", "network_params", ".", "min_tx_fee", "-", "pa_params", ".", "P2TH_fee", ")", "outs", ".", "append", "(", "tx_output", "(", "network", "=", "provider", ".", "network", ",", "value", "=", "change_sum", ",", "n", "=", "len", "(", "outs", ")", "+", "1", ",", "script", "=", "p2pkh_script", "(", "address", "=", "change_address", ",", "network", "=", "provider", ".", "network", ")", ")", ")", "unsigned_tx", "=", "make_raw_transaction", "(", "network", "=", "provider", ".", "network", ",", "inputs", "=", "inputs", "[", "'utxos'", "]", ",", "outputs", "=", "outs", ",", "locktime", "=", "Locktime", "(", "locktime", ")", ")", "return", "unsigned_tx" ]
Prepare the CardTransfer Transaction object : card - CardTransfer object : inputs - utxos (has to be owned by deck issuer) : change_address - address to send the change to : locktime - tx locked until block n=int
[ "Prepare", "the", "CardTransfer", "Transaction", "object" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/__main__.py#L222-L271
train
johnnoone/json-spec
src/jsonspec/validators/util.py
rfc3339_to_datetime
def rfc3339_to_datetime(data): """convert a rfc3339 date representation into a Python datetime""" try: ts = time.strptime(data, '%Y-%m-%d') return date(*ts[:3]) except ValueError: pass try: dt, _, tz = data.partition('Z') if tz: tz = offset(tz) else: tz = offset('00:00') if '.' in dt and dt.rsplit('.', 1)[-1].isdigit(): ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S.%f') else: ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S') return datetime(*ts[:6], tzinfo=tz) except ValueError: raise ValueError('date-time {!r} is not a valid rfc3339 date representation'.format(data))
python
def rfc3339_to_datetime(data): """convert a rfc3339 date representation into a Python datetime""" try: ts = time.strptime(data, '%Y-%m-%d') return date(*ts[:3]) except ValueError: pass try: dt, _, tz = data.partition('Z') if tz: tz = offset(tz) else: tz = offset('00:00') if '.' in dt and dt.rsplit('.', 1)[-1].isdigit(): ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S.%f') else: ts = time.strptime(dt, '%Y-%m-%dT%H:%M:%S') return datetime(*ts[:6], tzinfo=tz) except ValueError: raise ValueError('date-time {!r} is not a valid rfc3339 date representation'.format(data))
[ "def", "rfc3339_to_datetime", "(", "data", ")", ":", "try", ":", "ts", "=", "time", ".", "strptime", "(", "data", ",", "'%Y-%m-%d'", ")", "return", "date", "(", "*", "ts", "[", ":", "3", "]", ")", "except", "ValueError", ":", "pass", "try", ":", "dt", ",", "_", ",", "tz", "=", "data", ".", "partition", "(", "'Z'", ")", "if", "tz", ":", "tz", "=", "offset", "(", "tz", ")", "else", ":", "tz", "=", "offset", "(", "'00:00'", ")", "if", "'.'", "in", "dt", "and", "dt", ".", "rsplit", "(", "'.'", ",", "1", ")", "[", "-", "1", "]", ".", "isdigit", "(", ")", ":", "ts", "=", "time", ".", "strptime", "(", "dt", ",", "'%Y-%m-%dT%H:%M:%S.%f'", ")", "else", ":", "ts", "=", "time", ".", "strptime", "(", "dt", ",", "'%Y-%m-%dT%H:%M:%S'", ")", "return", "datetime", "(", "*", "ts", "[", ":", "6", "]", ",", "tzinfo", "=", "tz", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'date-time {!r} is not a valid rfc3339 date representation'", ".", "format", "(", "data", ")", ")" ]
convert a rfc3339 date representation into a Python datetime
[ "convert", "a", "rfc3339", "date", "representation", "into", "a", "Python", "datetime" ]
f91981724cea0c366bd42a6670eb07bbe31c0e0c
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/validators/util.py#L92-L112
train
etal/biofrills
biofrills/cmdutils.py
log_config
def log_config(verbose=1): """Set up logging the way I like it.""" # ENH: # - do print levelname before DEBUG and WARNING # - instead of %module, name the currently running script # - make a subclass of logging.handlers.X instead? # - tweak %root? # - take __file__ as an argument? if verbose == 0: level = logging.WARNING fmt = "%(module)s: %(message)s" elif verbose == 1: level = logging.INFO fmt = "%(module)s [@%(lineno)s]: %(message)s" else: level = logging.DEBUG fmt = "%(module)s [%(lineno)s]: %(levelname)s: %(message)s" logging.basicConfig(format=fmt, level=level)
python
def log_config(verbose=1): """Set up logging the way I like it.""" # ENH: # - do print levelname before DEBUG and WARNING # - instead of %module, name the currently running script # - make a subclass of logging.handlers.X instead? # - tweak %root? # - take __file__ as an argument? if verbose == 0: level = logging.WARNING fmt = "%(module)s: %(message)s" elif verbose == 1: level = logging.INFO fmt = "%(module)s [@%(lineno)s]: %(message)s" else: level = logging.DEBUG fmt = "%(module)s [%(lineno)s]: %(levelname)s: %(message)s" logging.basicConfig(format=fmt, level=level)
[ "def", "log_config", "(", "verbose", "=", "1", ")", ":", "# ENH:", "# - do print levelname before DEBUG and WARNING", "# - instead of %module, name the currently running script", "# - make a subclass of logging.handlers.X instead?", "# - tweak %root?", "# - take __file__ as an argument?", "if", "verbose", "==", "0", ":", "level", "=", "logging", ".", "WARNING", "fmt", "=", "\"%(module)s: %(message)s\"", "elif", "verbose", "==", "1", ":", "level", "=", "logging", ".", "INFO", "fmt", "=", "\"%(module)s [@%(lineno)s]: %(message)s\"", "else", ":", "level", "=", "logging", ".", "DEBUG", "fmt", "=", "\"%(module)s [%(lineno)s]: %(levelname)s: %(message)s\"", "logging", ".", "basicConfig", "(", "format", "=", "fmt", ",", "level", "=", "level", ")" ]
Set up logging the way I like it.
[ "Set", "up", "logging", "the", "way", "I", "like", "it", "." ]
36684bb6c7632f96215e8b2b4ebc86640f331bcd
https://github.com/etal/biofrills/blob/36684bb6c7632f96215e8b2b4ebc86640f331bcd/biofrills/cmdutils.py#L6-L23
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
MainWindow.refresh_instruments
def refresh_instruments(self): """ if self.tree_settings has been expanded, ask instruments for their actual values """ def list_access_nested_dict(dict, somelist): """ Allows one to use a list to access a nested dictionary, for example: listAccessNestedDict({'a': {'b': 1}}, ['a', 'b']) returns 1 Args: dict: somelist: Returns: """ return reduce(operator.getitem, somelist, dict) def update(item): if item.isExpanded(): for index in range(item.childCount()): child = item.child(index) if child.childCount() == 0: instrument, path_to_instrument = child.get_instrument() path_to_instrument.reverse() try: #check if item is in probes value = instrument.read_probes(path_to_instrument[-1]) except AssertionError: #if item not in probes, get value from settings instead value = list_access_nested_dict(instrument.settings, path_to_instrument) child.value = value else: update(child) #need to block signals during update so that tree.itemChanged doesn't fire and the gui doesn't try to #reupdate the instruments to their current value self.tree_settings.blockSignals(True) for index in range(self.tree_settings.topLevelItemCount()): instrument = self.tree_settings.topLevelItem(index) update(instrument) self.tree_settings.blockSignals(False)
python
def refresh_instruments(self): """ if self.tree_settings has been expanded, ask instruments for their actual values """ def list_access_nested_dict(dict, somelist): """ Allows one to use a list to access a nested dictionary, for example: listAccessNestedDict({'a': {'b': 1}}, ['a', 'b']) returns 1 Args: dict: somelist: Returns: """ return reduce(operator.getitem, somelist, dict) def update(item): if item.isExpanded(): for index in range(item.childCount()): child = item.child(index) if child.childCount() == 0: instrument, path_to_instrument = child.get_instrument() path_to_instrument.reverse() try: #check if item is in probes value = instrument.read_probes(path_to_instrument[-1]) except AssertionError: #if item not in probes, get value from settings instead value = list_access_nested_dict(instrument.settings, path_to_instrument) child.value = value else: update(child) #need to block signals during update so that tree.itemChanged doesn't fire and the gui doesn't try to #reupdate the instruments to their current value self.tree_settings.blockSignals(True) for index in range(self.tree_settings.topLevelItemCount()): instrument = self.tree_settings.topLevelItem(index) update(instrument) self.tree_settings.blockSignals(False)
[ "def", "refresh_instruments", "(", "self", ")", ":", "def", "list_access_nested_dict", "(", "dict", ",", "somelist", ")", ":", "\"\"\"\n Allows one to use a list to access a nested dictionary, for example:\n listAccessNestedDict({'a': {'b': 1}}, ['a', 'b']) returns 1\n Args:\n dict:\n somelist:\n\n Returns:\n\n \"\"\"", "return", "reduce", "(", "operator", ".", "getitem", ",", "somelist", ",", "dict", ")", "def", "update", "(", "item", ")", ":", "if", "item", ".", "isExpanded", "(", ")", ":", "for", "index", "in", "range", "(", "item", ".", "childCount", "(", ")", ")", ":", "child", "=", "item", ".", "child", "(", "index", ")", "if", "child", ".", "childCount", "(", ")", "==", "0", ":", "instrument", ",", "path_to_instrument", "=", "child", ".", "get_instrument", "(", ")", "path_to_instrument", ".", "reverse", "(", ")", "try", ":", "#check if item is in probes", "value", "=", "instrument", ".", "read_probes", "(", "path_to_instrument", "[", "-", "1", "]", ")", "except", "AssertionError", ":", "#if item not in probes, get value from settings instead", "value", "=", "list_access_nested_dict", "(", "instrument", ".", "settings", ",", "path_to_instrument", ")", "child", ".", "value", "=", "value", "else", ":", "update", "(", "child", ")", "#need to block signals during update so that tree.itemChanged doesn't fire and the gui doesn't try to", "#reupdate the instruments to their current value", "self", ".", "tree_settings", ".", "blockSignals", "(", "True", ")", "for", "index", "in", "range", "(", "self", ".", "tree_settings", ".", "topLevelItemCount", "(", ")", ")", ":", "instrument", "=", "self", ".", "tree_settings", ".", "topLevelItem", "(", "index", ")", "update", "(", "instrument", ")", "self", ".", "tree_settings", ".", "blockSignals", "(", "False", ")" ]
if self.tree_settings has been expanded, ask instruments for their actual values
[ "if", "self", ".", "tree_settings", "has", "been", "expanded", "ask", "instruments", "for", "their", "actual", "values" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L345-L386
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
MainWindow.update_parameters
def update_parameters(self, treeWidget): """ updates the internal dictionaries for scripts and instruments with values from the respective trees treeWidget: the tree from which to update """ if treeWidget == self.tree_settings: item = treeWidget.currentItem() instrument, path_to_instrument = item.get_instrument() # build nested dictionary to update instrument dictator = item.value for element in path_to_instrument: dictator = {element: dictator} # get old value from instrument old_value = instrument.settings path_to_instrument.reverse() for element in path_to_instrument: old_value = old_value[element] # send new value from tree to instrument instrument.update(dictator) new_value = item.value if new_value is not old_value: msg = "changed parameter {:s} from {:s} to {:s} on {:s}".format(item.name, str(old_value), str(new_value), instrument.name) else: msg = "did not change parameter {:s} on {:s}".format(item.name, instrument.name) self.log(msg) elif treeWidget == self.tree_scripts: item = treeWidget.currentItem() script, path_to_script, _ = item.get_script() # check if changes value is from an instrument instrument, path_to_instrument = item.get_instrument() if instrument is not None: new_value = item.value msg = "changed parameter {:s} to {:s} in {:s}".format(item.name, str(new_value), script.name) else: new_value = item.value msg = "changed parameter {:s} to {:s} in {:s}".format(item.name, str(new_value), script.name) self.log(msg)
python
def update_parameters(self, treeWidget): """ updates the internal dictionaries for scripts and instruments with values from the respective trees treeWidget: the tree from which to update """ if treeWidget == self.tree_settings: item = treeWidget.currentItem() instrument, path_to_instrument = item.get_instrument() # build nested dictionary to update instrument dictator = item.value for element in path_to_instrument: dictator = {element: dictator} # get old value from instrument old_value = instrument.settings path_to_instrument.reverse() for element in path_to_instrument: old_value = old_value[element] # send new value from tree to instrument instrument.update(dictator) new_value = item.value if new_value is not old_value: msg = "changed parameter {:s} from {:s} to {:s} on {:s}".format(item.name, str(old_value), str(new_value), instrument.name) else: msg = "did not change parameter {:s} on {:s}".format(item.name, instrument.name) self.log(msg) elif treeWidget == self.tree_scripts: item = treeWidget.currentItem() script, path_to_script, _ = item.get_script() # check if changes value is from an instrument instrument, path_to_instrument = item.get_instrument() if instrument is not None: new_value = item.value msg = "changed parameter {:s} to {:s} in {:s}".format(item.name, str(new_value), script.name) else: new_value = item.value msg = "changed parameter {:s} to {:s} in {:s}".format(item.name, str(new_value), script.name) self.log(msg)
[ "def", "update_parameters", "(", "self", ",", "treeWidget", ")", ":", "if", "treeWidget", "==", "self", ".", "tree_settings", ":", "item", "=", "treeWidget", ".", "currentItem", "(", ")", "instrument", ",", "path_to_instrument", "=", "item", ".", "get_instrument", "(", ")", "# build nested dictionary to update instrument", "dictator", "=", "item", ".", "value", "for", "element", "in", "path_to_instrument", ":", "dictator", "=", "{", "element", ":", "dictator", "}", "# get old value from instrument", "old_value", "=", "instrument", ".", "settings", "path_to_instrument", ".", "reverse", "(", ")", "for", "element", "in", "path_to_instrument", ":", "old_value", "=", "old_value", "[", "element", "]", "# send new value from tree to instrument", "instrument", ".", "update", "(", "dictator", ")", "new_value", "=", "item", ".", "value", "if", "new_value", "is", "not", "old_value", ":", "msg", "=", "\"changed parameter {:s} from {:s} to {:s} on {:s}\"", ".", "format", "(", "item", ".", "name", ",", "str", "(", "old_value", ")", ",", "str", "(", "new_value", ")", ",", "instrument", ".", "name", ")", "else", ":", "msg", "=", "\"did not change parameter {:s} on {:s}\"", ".", "format", "(", "item", ".", "name", ",", "instrument", ".", "name", ")", "self", ".", "log", "(", "msg", ")", "elif", "treeWidget", "==", "self", ".", "tree_scripts", ":", "item", "=", "treeWidget", ".", "currentItem", "(", ")", "script", ",", "path_to_script", ",", "_", "=", "item", ".", "get_script", "(", ")", "# check if changes value is from an instrument", "instrument", ",", "path_to_instrument", "=", "item", ".", "get_instrument", "(", ")", "if", "instrument", "is", "not", "None", ":", "new_value", "=", "item", ".", "value", "msg", "=", "\"changed parameter {:s} to {:s} in {:s}\"", ".", "format", "(", "item", ".", "name", ",", "str", "(", "new_value", ")", ",", "script", ".", "name", ")", "else", ":", "new_value", "=", "item", ".", "value", "msg", "=", "\"changed parameter {:s} to {:s} in {:s}\"", ".", "format", "(", "item", ".", "name", ",", "str", "(", "new_value", ")", ",", "script", ".", "name", ")", "self", ".", "log", "(", "msg", ")" ]
updates the internal dictionaries for scripts and instruments with values from the respective trees treeWidget: the tree from which to update
[ "updates", "the", "internal", "dictionaries", "for", "scripts", "and", "instruments", "with", "values", "from", "the", "respective", "trees" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L889-L947
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
MainWindow.script_finished
def script_finished(self): """ waits for the script to emit the script_finshed signal """ script = self.current_script script.updateProgress.disconnect(self.update_status) self.script_thread.started.disconnect() script.finished.disconnect() self.current_script = None self.plot_script(script) self.progressBar.setValue(100) self.btn_start_script.setEnabled(True) self.btn_skip_subscript.setEnabled(False)
python
def script_finished(self): """ waits for the script to emit the script_finshed signal """ script = self.current_script script.updateProgress.disconnect(self.update_status) self.script_thread.started.disconnect() script.finished.disconnect() self.current_script = None self.plot_script(script) self.progressBar.setValue(100) self.btn_start_script.setEnabled(True) self.btn_skip_subscript.setEnabled(False)
[ "def", "script_finished", "(", "self", ")", ":", "script", "=", "self", ".", "current_script", "script", ".", "updateProgress", ".", "disconnect", "(", "self", ".", "update_status", ")", "self", ".", "script_thread", ".", "started", ".", "disconnect", "(", ")", "script", ".", "finished", ".", "disconnect", "(", ")", "self", ".", "current_script", "=", "None", "self", ".", "plot_script", "(", "script", ")", "self", ".", "progressBar", ".", "setValue", "(", "100", ")", "self", ".", "btn_start_script", ".", "setEnabled", "(", "True", ")", "self", ".", "btn_skip_subscript", ".", "setEnabled", "(", "False", ")" ]
waits for the script to emit the script_finshed signal
[ "waits", "for", "the", "script", "to", "emit", "the", "script_finshed", "signal" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L994-L1008
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
MainWindow.update_probes
def update_probes(self, progress): """ update the probe tree """ new_values = self.read_probes.probes_values probe_count = len(self.read_probes.probes) if probe_count > self.tree_probes.topLevelItemCount(): # when run for the first time, there are no probes in the tree, so we have to fill it first self.fill_treewidget(self.tree_probes, new_values) else: for x in range(probe_count): topLvlItem = self.tree_probes.topLevelItem(x) for child_id in range(topLvlItem.childCount()): child = topLvlItem.child(child_id) child.value = new_values[topLvlItem.name][child.name] child.setText(1, str(child.value)) if self.probe_to_plot is not None: self.probe_to_plot.plot(self.matplotlibwidget_1.axes) self.matplotlibwidget_1.draw() if self.chk_probe_log.isChecked(): data = ','.join(list(np.array([[str(p) for p in list(p_dict.values())] for instr, p_dict in new_values.items()]).flatten())) self.probe_file.write('{:s}\n'.format(data))
python
def update_probes(self, progress): """ update the probe tree """ new_values = self.read_probes.probes_values probe_count = len(self.read_probes.probes) if probe_count > self.tree_probes.topLevelItemCount(): # when run for the first time, there are no probes in the tree, so we have to fill it first self.fill_treewidget(self.tree_probes, new_values) else: for x in range(probe_count): topLvlItem = self.tree_probes.topLevelItem(x) for child_id in range(topLvlItem.childCount()): child = topLvlItem.child(child_id) child.value = new_values[topLvlItem.name][child.name] child.setText(1, str(child.value)) if self.probe_to_plot is not None: self.probe_to_plot.plot(self.matplotlibwidget_1.axes) self.matplotlibwidget_1.draw() if self.chk_probe_log.isChecked(): data = ','.join(list(np.array([[str(p) for p in list(p_dict.values())] for instr, p_dict in new_values.items()]).flatten())) self.probe_file.write('{:s}\n'.format(data))
[ "def", "update_probes", "(", "self", ",", "progress", ")", ":", "new_values", "=", "self", ".", "read_probes", ".", "probes_values", "probe_count", "=", "len", "(", "self", ".", "read_probes", ".", "probes", ")", "if", "probe_count", ">", "self", ".", "tree_probes", ".", "topLevelItemCount", "(", ")", ":", "# when run for the first time, there are no probes in the tree, so we have to fill it first", "self", ".", "fill_treewidget", "(", "self", ".", "tree_probes", ",", "new_values", ")", "else", ":", "for", "x", "in", "range", "(", "probe_count", ")", ":", "topLvlItem", "=", "self", ".", "tree_probes", ".", "topLevelItem", "(", "x", ")", "for", "child_id", "in", "range", "(", "topLvlItem", ".", "childCount", "(", ")", ")", ":", "child", "=", "topLvlItem", ".", "child", "(", "child_id", ")", "child", ".", "value", "=", "new_values", "[", "topLvlItem", ".", "name", "]", "[", "child", ".", "name", "]", "child", ".", "setText", "(", "1", ",", "str", "(", "child", ".", "value", ")", ")", "if", "self", ".", "probe_to_plot", "is", "not", "None", ":", "self", ".", "probe_to_plot", ".", "plot", "(", "self", ".", "matplotlibwidget_1", ".", "axes", ")", "self", ".", "matplotlibwidget_1", ".", "draw", "(", ")", "if", "self", ".", "chk_probe_log", ".", "isChecked", "(", ")", ":", "data", "=", "','", ".", "join", "(", "list", "(", "np", ".", "array", "(", "[", "[", "str", "(", "p", ")", "for", "p", "in", "list", "(", "p_dict", ".", "values", "(", ")", ")", "]", "for", "instr", ",", "p_dict", "in", "new_values", ".", "items", "(", ")", "]", ")", ".", "flatten", "(", ")", ")", ")", "self", ".", "probe_file", ".", "write", "(", "'{:s}\\n'", ".", "format", "(", "data", ")", ")" ]
update the probe tree
[ "update", "the", "probe", "tree" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L1022-L1047
train
LISE-B26/pylabcontrol
build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py
MainWindow.update_script_from_item
def update_script_from_item(self, item): """ updates the script based on the information provided in item Args: script: script to be updated item: B26QTreeItem that contains the new settings of the script """ script, path_to_script, script_item = item.get_script() # build dictionary # get full information from script dictator = list(script_item.to_dict().values())[0] # there is only one item in the dictionary for instrument in list(script.instruments.keys()): # update instrument script.instruments[instrument]['settings'] = dictator[instrument]['settings'] # remove instrument del dictator[instrument] for sub_script_name in list(script.scripts.keys()): sub_script_item = script_item.get_subscript(sub_script_name) self.update_script_from_item(sub_script_item) del dictator[sub_script_name] script.update(dictator) # update datefolder path script.data_path = self.gui_settings['data_folder']
python
def update_script_from_item(self, item): """ updates the script based on the information provided in item Args: script: script to be updated item: B26QTreeItem that contains the new settings of the script """ script, path_to_script, script_item = item.get_script() # build dictionary # get full information from script dictator = list(script_item.to_dict().values())[0] # there is only one item in the dictionary for instrument in list(script.instruments.keys()): # update instrument script.instruments[instrument]['settings'] = dictator[instrument]['settings'] # remove instrument del dictator[instrument] for sub_script_name in list(script.scripts.keys()): sub_script_item = script_item.get_subscript(sub_script_name) self.update_script_from_item(sub_script_item) del dictator[sub_script_name] script.update(dictator) # update datefolder path script.data_path = self.gui_settings['data_folder']
[ "def", "update_script_from_item", "(", "self", ",", "item", ")", ":", "script", ",", "path_to_script", ",", "script_item", "=", "item", ".", "get_script", "(", ")", "# build dictionary", "# get full information from script", "dictator", "=", "list", "(", "script_item", ".", "to_dict", "(", ")", ".", "values", "(", ")", ")", "[", "0", "]", "# there is only one item in the dictionary", "for", "instrument", "in", "list", "(", "script", ".", "instruments", ".", "keys", "(", ")", ")", ":", "# update instrument", "script", ".", "instruments", "[", "instrument", "]", "[", "'settings'", "]", "=", "dictator", "[", "instrument", "]", "[", "'settings'", "]", "# remove instrument", "del", "dictator", "[", "instrument", "]", "for", "sub_script_name", "in", "list", "(", "script", ".", "scripts", ".", "keys", "(", ")", ")", ":", "sub_script_item", "=", "script_item", ".", "get_subscript", "(", "sub_script_name", ")", "self", ".", "update_script_from_item", "(", "sub_script_item", ")", "del", "dictator", "[", "sub_script_name", "]", "script", ".", "update", "(", "dictator", ")", "# update datefolder path", "script", ".", "data_path", "=", "self", ".", "gui_settings", "[", "'data_folder'", "]" ]
updates the script based on the information provided in item Args: script: script to be updated item: B26QTreeItem that contains the new settings of the script
[ "updates", "the", "script", "based", "on", "the", "information", "provided", "in", "item" ]
67482e5157fcd1c40705e5c2cacfb93564703ed0
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/gui/windows_and_widgets/main_window.py#L1049-L1079
train
datamachine/twx
twx/twx.py
TWXBotApi.message_search
def message_search(self, text, on_success, peer=None, min_date=None, max_date=None, max_id=None, offset=0, limit=255): """ Unsupported in the Bot API """ raise TWXUnsupportedMethod()
python
def message_search(self, text, on_success, peer=None, min_date=None, max_date=None, max_id=None, offset=0, limit=255): """ Unsupported in the Bot API """ raise TWXUnsupportedMethod()
[ "def", "message_search", "(", "self", ",", "text", ",", "on_success", ",", "peer", "=", "None", ",", "min_date", "=", "None", ",", "max_date", "=", "None", ",", "max_id", "=", "None", ",", "offset", "=", "0", ",", "limit", "=", "255", ")", ":", "raise", "TWXUnsupportedMethod", "(", ")" ]
Unsupported in the Bot API
[ "Unsupported", "in", "the", "Bot", "API" ]
d9633f12f3647b1e54ba87b70b39df3b7e02b4eb
https://github.com/datamachine/twx/blob/d9633f12f3647b1e54ba87b70b39df3b7e02b4eb/twx/twx.py#L758-L762
train
johnnoone/json-spec
src/jsonspec/operations/bases.py
Target.remove
def remove(self, pointer): """Remove element from sequence, member from mapping. :param pointer: the path to search in :return: resolved document :rtype: Target """ doc = deepcopy(self.document) parent, obj = None, doc try: # fetching for token in Pointer(pointer): parent, obj = obj, token.extract(obj, bypass_ref=True) # removing if isinstance(parent, Mapping): del parent[token] if isinstance(parent, MutableSequence): parent.pop(int(token)) except Exception as error: raise Error(*error.args) return Target(doc)
python
def remove(self, pointer): """Remove element from sequence, member from mapping. :param pointer: the path to search in :return: resolved document :rtype: Target """ doc = deepcopy(self.document) parent, obj = None, doc try: # fetching for token in Pointer(pointer): parent, obj = obj, token.extract(obj, bypass_ref=True) # removing if isinstance(parent, Mapping): del parent[token] if isinstance(parent, MutableSequence): parent.pop(int(token)) except Exception as error: raise Error(*error.args) return Target(doc)
[ "def", "remove", "(", "self", ",", "pointer", ")", ":", "doc", "=", "deepcopy", "(", "self", ".", "document", ")", "parent", ",", "obj", "=", "None", ",", "doc", "try", ":", "# fetching", "for", "token", "in", "Pointer", "(", "pointer", ")", ":", "parent", ",", "obj", "=", "obj", ",", "token", ".", "extract", "(", "obj", ",", "bypass_ref", "=", "True", ")", "# removing", "if", "isinstance", "(", "parent", ",", "Mapping", ")", ":", "del", "parent", "[", "token", "]", "if", "isinstance", "(", "parent", ",", "MutableSequence", ")", ":", "parent", ".", "pop", "(", "int", "(", "token", ")", ")", "except", "Exception", "as", "error", ":", "raise", "Error", "(", "*", "error", ".", "args", ")", "return", "Target", "(", "doc", ")" ]
Remove element from sequence, member from mapping. :param pointer: the path to search in :return: resolved document :rtype: Target
[ "Remove", "element", "from", "sequence", "member", "from", "mapping", "." ]
f91981724cea0c366bd42a6670eb07bbe31c0e0c
https://github.com/johnnoone/json-spec/blob/f91981724cea0c366bd42a6670eb07bbe31c0e0c/src/jsonspec/operations/bases.py#L47-L70
train
PeerAssets/pypeerassets
pypeerassets/provider/common.py
Provider._netname
def _netname(name: str) -> dict: '''resolute network name, required because some providers use shortnames and other use longnames.''' try: long = net_query(name).name short = net_query(name).shortname except AttributeError: raise UnsupportedNetwork('''This blockchain network is not supported by the pypeerassets, check networks.py for list of supported networks.''') return {'long': long, 'short': short}
python
def _netname(name: str) -> dict: '''resolute network name, required because some providers use shortnames and other use longnames.''' try: long = net_query(name).name short = net_query(name).shortname except AttributeError: raise UnsupportedNetwork('''This blockchain network is not supported by the pypeerassets, check networks.py for list of supported networks.''') return {'long': long, 'short': short}
[ "def", "_netname", "(", "name", ":", "str", ")", "->", "dict", ":", "try", ":", "long", "=", "net_query", "(", "name", ")", ".", "name", "short", "=", "net_query", "(", "name", ")", ".", "shortname", "except", "AttributeError", ":", "raise", "UnsupportedNetwork", "(", "'''This blockchain network is not supported by the pypeerassets, check networks.py for list of supported networks.'''", ")", "return", "{", "'long'", ":", "long", ",", "'short'", ":", "short", "}" ]
resolute network name, required because some providers use shortnames and other use longnames.
[ "resolute", "network", "name", "required", "because", "some", "providers", "use", "shortnames", "and", "other", "use", "longnames", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/common.py#L21-L32
train
PeerAssets/pypeerassets
pypeerassets/provider/common.py
Provider.sendrawtransaction
def sendrawtransaction(cls, rawtxn: str) -> str: '''sendrawtransaction remote API''' if cls.is_testnet: url = 'https://testnet-explorer.peercoin.net/api/sendrawtransaction?hex={0}'.format(rawtxn) else: url = 'https://explorer.peercoin.net/api/sendrawtransaction?hex={0}'.format(rawtxn) resp = urllib.request.urlopen(url) return resp.read().decode('utf-8')
python
def sendrawtransaction(cls, rawtxn: str) -> str: '''sendrawtransaction remote API''' if cls.is_testnet: url = 'https://testnet-explorer.peercoin.net/api/sendrawtransaction?hex={0}'.format(rawtxn) else: url = 'https://explorer.peercoin.net/api/sendrawtransaction?hex={0}'.format(rawtxn) resp = urllib.request.urlopen(url) return resp.read().decode('utf-8')
[ "def", "sendrawtransaction", "(", "cls", ",", "rawtxn", ":", "str", ")", "->", "str", ":", "if", "cls", ".", "is_testnet", ":", "url", "=", "'https://testnet-explorer.peercoin.net/api/sendrawtransaction?hex={0}'", ".", "format", "(", "rawtxn", ")", "else", ":", "url", "=", "'https://explorer.peercoin.net/api/sendrawtransaction?hex={0}'", ".", "format", "(", "rawtxn", ")", "resp", "=", "urllib", ".", "request", ".", "urlopen", "(", "url", ")", "return", "resp", ".", "read", "(", ")", ".", "decode", "(", "'utf-8'", ")" ]
sendrawtransaction remote API
[ "sendrawtransaction", "remote", "API" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/common.py#L62-L71
train
PeerAssets/pypeerassets
pypeerassets/provider/common.py
Provider.validateaddress
def validateaddress(self, address: str) -> bool: "Returns True if the passed address is valid, False otherwise." try: Address.from_string(address, self.network_properties) except InvalidAddress: return False return True
python
def validateaddress(self, address: str) -> bool: "Returns True if the passed address is valid, False otherwise." try: Address.from_string(address, self.network_properties) except InvalidAddress: return False return True
[ "def", "validateaddress", "(", "self", ",", "address", ":", "str", ")", "->", "bool", ":", "try", ":", "Address", ".", "from_string", "(", "address", ",", "self", ".", "network_properties", ")", "except", "InvalidAddress", ":", "return", "False", "return", "True" ]
Returns True if the passed address is valid, False otherwise.
[ "Returns", "True", "if", "the", "passed", "address", "is", "valid", "False", "otherwise", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/provider/common.py#L116-L124
train
bennylope/smartystreets.py
smartystreets/async.py
chunker
def chunker(l, n): """ Generates n-sized chunks from the list l """ for i in ranger(0, len(l), n): yield l[i:i + n]
python
def chunker(l, n): """ Generates n-sized chunks from the list l """ for i in ranger(0, len(l), n): yield l[i:i + n]
[ "def", "chunker", "(", "l", ",", "n", ")", ":", "for", "i", "in", "ranger", "(", "0", ",", "len", "(", "l", ")", ",", "n", ")", ":", "yield", "l", "[", "i", ":", "i", "+", "n", "]" ]
Generates n-sized chunks from the list l
[ "Generates", "n", "-", "sized", "chunks", "from", "the", "list", "l" ]
f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/async.py#L24-L29
train
bennylope/smartystreets.py
smartystreets/async.py
AsyncClient.post
def post(self, endpoint, data, parallelism=5): """ Executes most of the request. The parallelism parameter is useful to avoid swamping the API service with calls. Thus the entire set of requests won't be all made at once, but in chunked groups. :param endpoint: string indicating the URL component to call :param data: the JSON ready data to submit (list of dictionaries of addresses) :param parallelism: number of simultaneous requests to make. :return: a tuple of an AddressCollection and a dictionary of the response codes and the count for each. """ headers = { "Content-Type": "application/json", "Accept": "application/json", "x-standardize-only": "true" if self.standardize else "false", "x-include-invalid": "true" if self.invalid else "false", "x-accept-keypair": "true" if self.accept_keypair else "false", } if not self.logging: headers["x-suppress-logging"] = "false" params = {"auth-id": self.auth_id, "auth-token": self.auth_token} url = self.BASE_URL + endpoint rs = ( grequests.post( url=url, data=json.dumps(stringify(data_chunk)), params=params, headers=headers, ) for data_chunk in chunker(data, 100) ) responses = grequests.imap(rs, size=parallelism) status_codes = {} addresses = AddressCollection([]) for response in responses: if response.status_code not in status_codes.keys(): status_codes[response.status_code] = 1 else: status_codes[response.status_code] += 1 if response.status_code == 200: addresses[0:0] = AddressCollection( response.json() ) # Fast list insertion # If an auth error is raised, it's safe to say that this is # going to affect every request, so raise the exception immediately.. elif response.status_code == 401: raise ERROR_CODES[401] # The return value or exception is simple if it is consistent. if len(status_codes.keys()) == 1: if 200 in status_codes: return addresses, status_codes else: raise ERROR_CODES.get(status_codes.keys()[0], SmartyStreetsError) # For any other mix not really sure of the best way to handle it. If it's a mix of 200 # and error codes, then returning the resultant addresses and status code dictionary # seems pretty sensible. But if it's a mix of all error codes (could be a mix of payment # error, input error, potentially server error) this will probably require careful # checking in the code using this interface. return addresses, status_codes
python
def post(self, endpoint, data, parallelism=5): """ Executes most of the request. The parallelism parameter is useful to avoid swamping the API service with calls. Thus the entire set of requests won't be all made at once, but in chunked groups. :param endpoint: string indicating the URL component to call :param data: the JSON ready data to submit (list of dictionaries of addresses) :param parallelism: number of simultaneous requests to make. :return: a tuple of an AddressCollection and a dictionary of the response codes and the count for each. """ headers = { "Content-Type": "application/json", "Accept": "application/json", "x-standardize-only": "true" if self.standardize else "false", "x-include-invalid": "true" if self.invalid else "false", "x-accept-keypair": "true" if self.accept_keypair else "false", } if not self.logging: headers["x-suppress-logging"] = "false" params = {"auth-id": self.auth_id, "auth-token": self.auth_token} url = self.BASE_URL + endpoint rs = ( grequests.post( url=url, data=json.dumps(stringify(data_chunk)), params=params, headers=headers, ) for data_chunk in chunker(data, 100) ) responses = grequests.imap(rs, size=parallelism) status_codes = {} addresses = AddressCollection([]) for response in responses: if response.status_code not in status_codes.keys(): status_codes[response.status_code] = 1 else: status_codes[response.status_code] += 1 if response.status_code == 200: addresses[0:0] = AddressCollection( response.json() ) # Fast list insertion # If an auth error is raised, it's safe to say that this is # going to affect every request, so raise the exception immediately.. elif response.status_code == 401: raise ERROR_CODES[401] # The return value or exception is simple if it is consistent. if len(status_codes.keys()) == 1: if 200 in status_codes: return addresses, status_codes else: raise ERROR_CODES.get(status_codes.keys()[0], SmartyStreetsError) # For any other mix not really sure of the best way to handle it. If it's a mix of 200 # and error codes, then returning the resultant addresses and status code dictionary # seems pretty sensible. But if it's a mix of all error codes (could be a mix of payment # error, input error, potentially server error) this will probably require careful # checking in the code using this interface. return addresses, status_codes
[ "def", "post", "(", "self", ",", "endpoint", ",", "data", ",", "parallelism", "=", "5", ")", ":", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/json\"", ",", "\"Accept\"", ":", "\"application/json\"", ",", "\"x-standardize-only\"", ":", "\"true\"", "if", "self", ".", "standardize", "else", "\"false\"", ",", "\"x-include-invalid\"", ":", "\"true\"", "if", "self", ".", "invalid", "else", "\"false\"", ",", "\"x-accept-keypair\"", ":", "\"true\"", "if", "self", ".", "accept_keypair", "else", "\"false\"", ",", "}", "if", "not", "self", ".", "logging", ":", "headers", "[", "\"x-suppress-logging\"", "]", "=", "\"false\"", "params", "=", "{", "\"auth-id\"", ":", "self", ".", "auth_id", ",", "\"auth-token\"", ":", "self", ".", "auth_token", "}", "url", "=", "self", ".", "BASE_URL", "+", "endpoint", "rs", "=", "(", "grequests", ".", "post", "(", "url", "=", "url", ",", "data", "=", "json", ".", "dumps", "(", "stringify", "(", "data_chunk", ")", ")", ",", "params", "=", "params", ",", "headers", "=", "headers", ",", ")", "for", "data_chunk", "in", "chunker", "(", "data", ",", "100", ")", ")", "responses", "=", "grequests", ".", "imap", "(", "rs", ",", "size", "=", "parallelism", ")", "status_codes", "=", "{", "}", "addresses", "=", "AddressCollection", "(", "[", "]", ")", "for", "response", "in", "responses", ":", "if", "response", ".", "status_code", "not", "in", "status_codes", ".", "keys", "(", ")", ":", "status_codes", "[", "response", ".", "status_code", "]", "=", "1", "else", ":", "status_codes", "[", "response", ".", "status_code", "]", "+=", "1", "if", "response", ".", "status_code", "==", "200", ":", "addresses", "[", "0", ":", "0", "]", "=", "AddressCollection", "(", "response", ".", "json", "(", ")", ")", "# Fast list insertion", "# If an auth error is raised, it's safe to say that this is", "# going to affect every request, so raise the exception immediately..", "elif", "response", ".", "status_code", "==", "401", ":", "raise", "ERROR_CODES", "[", "401", "]", "# The return value or exception is simple if it is consistent.", "if", "len", "(", "status_codes", ".", "keys", "(", ")", ")", "==", "1", ":", "if", "200", "in", "status_codes", ":", "return", "addresses", ",", "status_codes", "else", ":", "raise", "ERROR_CODES", ".", "get", "(", "status_codes", ".", "keys", "(", ")", "[", "0", "]", ",", "SmartyStreetsError", ")", "# For any other mix not really sure of the best way to handle it. If it's a mix of 200", "# and error codes, then returning the resultant addresses and status code dictionary", "# seems pretty sensible. But if it's a mix of all error codes (could be a mix of payment", "# error, input error, potentially server error) this will probably require careful", "# checking in the code using this interface.", "return", "addresses", ",", "status_codes" ]
Executes most of the request. The parallelism parameter is useful to avoid swamping the API service with calls. Thus the entire set of requests won't be all made at once, but in chunked groups. :param endpoint: string indicating the URL component to call :param data: the JSON ready data to submit (list of dictionaries of addresses) :param parallelism: number of simultaneous requests to make. :return: a tuple of an AddressCollection and a dictionary of the response codes and the count for each.
[ "Executes", "most", "of", "the", "request", "." ]
f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69
https://github.com/bennylope/smartystreets.py/blob/f45e37dd52ea7cec8ed43ce2b64724beb6dbbb69/smartystreets/async.py#L52-L120
train
idlesign/django-siteblocks
siteblocks/siteblocksapp.py
SiteBlocks._cache_init
def _cache_init(self): """Initializes local cache from Django cache.""" cache_ = cache.get(self.CACHE_KEY) if cache_ is None: cache_ = defaultdict(dict) self._cache = cache_
python
def _cache_init(self): """Initializes local cache from Django cache.""" cache_ = cache.get(self.CACHE_KEY) if cache_ is None: cache_ = defaultdict(dict) self._cache = cache_
[ "def", "_cache_init", "(", "self", ")", ":", "cache_", "=", "cache", ".", "get", "(", "self", ".", "CACHE_KEY", ")", "if", "cache_", "is", "None", ":", "cache_", "=", "defaultdict", "(", "dict", ")", "self", ".", "_cache", "=", "cache_" ]
Initializes local cache from Django cache.
[ "Initializes", "local", "cache", "from", "Django", "cache", "." ]
7fdb3800f7330dd4143d55416393d83d01a09f73
https://github.com/idlesign/django-siteblocks/blob/7fdb3800f7330dd4143d55416393d83d01a09f73/siteblocks/siteblocksapp.py#L92-L97
train
idlesign/django-siteblocks
siteblocks/siteblocksapp.py
SiteBlocks.get_contents_static
def get_contents_static(self, block_alias, context): """Returns contents of a static block.""" if 'request' not in context: # No use in further actions as we won't ever know current URL. return '' current_url = context['request'].path # Resolve current view name to support view names as block URLs. try: resolver_match = resolve(current_url) namespace = '' if resolver_match.namespaces: # More than one namespace, really? Hmm. namespace = resolver_match.namespaces[0] resolved_view_name = ':%s:%s' % (namespace, resolver_match.url_name) except Resolver404: resolved_view_name = None self._cache_init() cache_entry_name = cache_get_key(block_alias) siteblocks_static = self._cache_get(cache_entry_name) if not siteblocks_static: blocks = Block.objects.filter(alias=block_alias, hidden=False).only('url', 'contents') siteblocks_static = [defaultdict(list), defaultdict(list)] for block in blocks: if block.url == '*': url_re = block.url elif block.url.startswith(':'): url_re = block.url # Normalize URL name to include namespace. if url_re.count(':') == 1: url_re = ':%s' % url_re else: url_re = re.compile(r'%s' % block.url) if block.access_guest: siteblocks_static[self.IDX_GUEST][url_re].append(block.contents) elif block.access_loggedin: siteblocks_static[self.IDX_AUTH][url_re].append(block.contents) else: siteblocks_static[self.IDX_GUEST][url_re].append(block.contents) siteblocks_static[self.IDX_AUTH][url_re].append(block.contents) self._cache_set(cache_entry_name, siteblocks_static) self._cache_save() user = getattr(context['request'], 'user', None) is_authenticated = getattr(user, 'is_authenticated', False) if not DJANGO_2: is_authenticated = is_authenticated() if is_authenticated: lookup_area = siteblocks_static[self.IDX_AUTH] else: lookup_area = siteblocks_static[self.IDX_GUEST] static_block_contents = '' if '*' in lookup_area: static_block_contents = choice(lookup_area['*']) elif resolved_view_name in lookup_area: static_block_contents = choice(lookup_area[resolved_view_name]) else: for url, contents in lookup_area.items(): if url.match(current_url): static_block_contents = choice(contents) break return static_block_contents
python
def get_contents_static(self, block_alias, context): """Returns contents of a static block.""" if 'request' not in context: # No use in further actions as we won't ever know current URL. return '' current_url = context['request'].path # Resolve current view name to support view names as block URLs. try: resolver_match = resolve(current_url) namespace = '' if resolver_match.namespaces: # More than one namespace, really? Hmm. namespace = resolver_match.namespaces[0] resolved_view_name = ':%s:%s' % (namespace, resolver_match.url_name) except Resolver404: resolved_view_name = None self._cache_init() cache_entry_name = cache_get_key(block_alias) siteblocks_static = self._cache_get(cache_entry_name) if not siteblocks_static: blocks = Block.objects.filter(alias=block_alias, hidden=False).only('url', 'contents') siteblocks_static = [defaultdict(list), defaultdict(list)] for block in blocks: if block.url == '*': url_re = block.url elif block.url.startswith(':'): url_re = block.url # Normalize URL name to include namespace. if url_re.count(':') == 1: url_re = ':%s' % url_re else: url_re = re.compile(r'%s' % block.url) if block.access_guest: siteblocks_static[self.IDX_GUEST][url_re].append(block.contents) elif block.access_loggedin: siteblocks_static[self.IDX_AUTH][url_re].append(block.contents) else: siteblocks_static[self.IDX_GUEST][url_re].append(block.contents) siteblocks_static[self.IDX_AUTH][url_re].append(block.contents) self._cache_set(cache_entry_name, siteblocks_static) self._cache_save() user = getattr(context['request'], 'user', None) is_authenticated = getattr(user, 'is_authenticated', False) if not DJANGO_2: is_authenticated = is_authenticated() if is_authenticated: lookup_area = siteblocks_static[self.IDX_AUTH] else: lookup_area = siteblocks_static[self.IDX_GUEST] static_block_contents = '' if '*' in lookup_area: static_block_contents = choice(lookup_area['*']) elif resolved_view_name in lookup_area: static_block_contents = choice(lookup_area[resolved_view_name]) else: for url, contents in lookup_area.items(): if url.match(current_url): static_block_contents = choice(contents) break return static_block_contents
[ "def", "get_contents_static", "(", "self", ",", "block_alias", ",", "context", ")", ":", "if", "'request'", "not", "in", "context", ":", "# No use in further actions as we won't ever know current URL.", "return", "''", "current_url", "=", "context", "[", "'request'", "]", ".", "path", "# Resolve current view name to support view names as block URLs.", "try", ":", "resolver_match", "=", "resolve", "(", "current_url", ")", "namespace", "=", "''", "if", "resolver_match", ".", "namespaces", ":", "# More than one namespace, really? Hmm.", "namespace", "=", "resolver_match", ".", "namespaces", "[", "0", "]", "resolved_view_name", "=", "':%s:%s'", "%", "(", "namespace", ",", "resolver_match", ".", "url_name", ")", "except", "Resolver404", ":", "resolved_view_name", "=", "None", "self", ".", "_cache_init", "(", ")", "cache_entry_name", "=", "cache_get_key", "(", "block_alias", ")", "siteblocks_static", "=", "self", ".", "_cache_get", "(", "cache_entry_name", ")", "if", "not", "siteblocks_static", ":", "blocks", "=", "Block", ".", "objects", ".", "filter", "(", "alias", "=", "block_alias", ",", "hidden", "=", "False", ")", ".", "only", "(", "'url'", ",", "'contents'", ")", "siteblocks_static", "=", "[", "defaultdict", "(", "list", ")", ",", "defaultdict", "(", "list", ")", "]", "for", "block", "in", "blocks", ":", "if", "block", ".", "url", "==", "'*'", ":", "url_re", "=", "block", ".", "url", "elif", "block", ".", "url", ".", "startswith", "(", "':'", ")", ":", "url_re", "=", "block", ".", "url", "# Normalize URL name to include namespace.", "if", "url_re", ".", "count", "(", "':'", ")", "==", "1", ":", "url_re", "=", "':%s'", "%", "url_re", "else", ":", "url_re", "=", "re", ".", "compile", "(", "r'%s'", "%", "block", ".", "url", ")", "if", "block", ".", "access_guest", ":", "siteblocks_static", "[", "self", ".", "IDX_GUEST", "]", "[", "url_re", "]", ".", "append", "(", "block", ".", "contents", ")", "elif", "block", ".", "access_loggedin", ":", "siteblocks_static", "[", "self", ".", "IDX_AUTH", "]", "[", "url_re", "]", ".", "append", "(", "block", ".", "contents", ")", "else", ":", "siteblocks_static", "[", "self", ".", "IDX_GUEST", "]", "[", "url_re", "]", ".", "append", "(", "block", ".", "contents", ")", "siteblocks_static", "[", "self", ".", "IDX_AUTH", "]", "[", "url_re", "]", ".", "append", "(", "block", ".", "contents", ")", "self", ".", "_cache_set", "(", "cache_entry_name", ",", "siteblocks_static", ")", "self", ".", "_cache_save", "(", ")", "user", "=", "getattr", "(", "context", "[", "'request'", "]", ",", "'user'", ",", "None", ")", "is_authenticated", "=", "getattr", "(", "user", ",", "'is_authenticated'", ",", "False", ")", "if", "not", "DJANGO_2", ":", "is_authenticated", "=", "is_authenticated", "(", ")", "if", "is_authenticated", ":", "lookup_area", "=", "siteblocks_static", "[", "self", ".", "IDX_AUTH", "]", "else", ":", "lookup_area", "=", "siteblocks_static", "[", "self", ".", "IDX_GUEST", "]", "static_block_contents", "=", "''", "if", "'*'", "in", "lookup_area", ":", "static_block_contents", "=", "choice", "(", "lookup_area", "[", "'*'", "]", ")", "elif", "resolved_view_name", "in", "lookup_area", ":", "static_block_contents", "=", "choice", "(", "lookup_area", "[", "resolved_view_name", "]", ")", "else", ":", "for", "url", ",", "contents", "in", "lookup_area", ".", "items", "(", ")", ":", "if", "url", ".", "match", "(", "current_url", ")", ":", "static_block_contents", "=", "choice", "(", "contents", ")", "break", "return", "static_block_contents" ]
Returns contents of a static block.
[ "Returns", "contents", "of", "a", "static", "block", "." ]
7fdb3800f7330dd4143d55416393d83d01a09f73
https://github.com/idlesign/django-siteblocks/blob/7fdb3800f7330dd4143d55416393d83d01a09f73/siteblocks/siteblocksapp.py#L114-L188
train
idlesign/django-siteblocks
siteblocks/siteblocksapp.py
SiteBlocks.get_contents_dynamic
def get_contents_dynamic(self, block_alias, context): """Returns contents of a dynamic block.""" dynamic_block = get_dynamic_blocks().get(block_alias, []) if not dynamic_block: return '' dynamic_block = choice(dynamic_block) return dynamic_block(block_alias=block_alias, block_context=context)
python
def get_contents_dynamic(self, block_alias, context): """Returns contents of a dynamic block.""" dynamic_block = get_dynamic_blocks().get(block_alias, []) if not dynamic_block: return '' dynamic_block = choice(dynamic_block) return dynamic_block(block_alias=block_alias, block_context=context)
[ "def", "get_contents_dynamic", "(", "self", ",", "block_alias", ",", "context", ")", ":", "dynamic_block", "=", "get_dynamic_blocks", "(", ")", ".", "get", "(", "block_alias", ",", "[", "]", ")", "if", "not", "dynamic_block", ":", "return", "''", "dynamic_block", "=", "choice", "(", "dynamic_block", ")", "return", "dynamic_block", "(", "block_alias", "=", "block_alias", ",", "block_context", "=", "context", ")" ]
Returns contents of a dynamic block.
[ "Returns", "contents", "of", "a", "dynamic", "block", "." ]
7fdb3800f7330dd4143d55416393d83d01a09f73
https://github.com/idlesign/django-siteblocks/blob/7fdb3800f7330dd4143d55416393d83d01a09f73/siteblocks/siteblocksapp.py#L190-L197
train
hyperledger-archives/indy-ledger
ledger/tree_hasher.py
TreeHasher.hash_full_tree
def hash_full_tree(self, leaves): """Hash a set of leaves representing a valid full tree.""" root_hash, hashes = self._hash_full(leaves, 0, len(leaves)) assert len(hashes) == count_bits_set(len(leaves)) assert (self._hash_fold(hashes) == root_hash if hashes else root_hash == self.hash_empty()) return root_hash
python
def hash_full_tree(self, leaves): """Hash a set of leaves representing a valid full tree.""" root_hash, hashes = self._hash_full(leaves, 0, len(leaves)) assert len(hashes) == count_bits_set(len(leaves)) assert (self._hash_fold(hashes) == root_hash if hashes else root_hash == self.hash_empty()) return root_hash
[ "def", "hash_full_tree", "(", "self", ",", "leaves", ")", ":", "root_hash", ",", "hashes", "=", "self", ".", "_hash_full", "(", "leaves", ",", "0", ",", "len", "(", "leaves", ")", ")", "assert", "len", "(", "hashes", ")", "==", "count_bits_set", "(", "len", "(", "leaves", ")", ")", "assert", "(", "self", ".", "_hash_fold", "(", "hashes", ")", "==", "root_hash", "if", "hashes", "else", "root_hash", "==", "self", ".", "hash_empty", "(", ")", ")", "return", "root_hash" ]
Hash a set of leaves representing a valid full tree.
[ "Hash", "a", "set", "of", "leaves", "representing", "a", "valid", "full", "tree", "." ]
7210c3b288e07f940eddad09b1dfc6a56be846df
https://github.com/hyperledger-archives/indy-ledger/blob/7210c3b288e07f940eddad09b1dfc6a56be846df/ledger/tree_hasher.py#L63-L69
train
lreis2415/PyGeoC
examples/ex06_model_performace_index.py
cal_model_performance
def cal_model_performance(obsl, siml): """Calculate model performance indexes.""" nse = MathClass.nashcoef(obsl, siml) r2 = MathClass.rsquare(obsl, siml) rmse = MathClass.rmse(obsl, siml) pbias = MathClass.pbias(obsl, siml) rsr = MathClass.rsr(obsl, siml) print('NSE: %.2f, R-square: %.2f, PBIAS: %.2f%%, RMSE: %.2f, RSR: %.2f' % (nse, r2, pbias, rmse, rsr))
python
def cal_model_performance(obsl, siml): """Calculate model performance indexes.""" nse = MathClass.nashcoef(obsl, siml) r2 = MathClass.rsquare(obsl, siml) rmse = MathClass.rmse(obsl, siml) pbias = MathClass.pbias(obsl, siml) rsr = MathClass.rsr(obsl, siml) print('NSE: %.2f, R-square: %.2f, PBIAS: %.2f%%, RMSE: %.2f, RSR: %.2f' % (nse, r2, pbias, rmse, rsr))
[ "def", "cal_model_performance", "(", "obsl", ",", "siml", ")", ":", "nse", "=", "MathClass", ".", "nashcoef", "(", "obsl", ",", "siml", ")", "r2", "=", "MathClass", ".", "rsquare", "(", "obsl", ",", "siml", ")", "rmse", "=", "MathClass", ".", "rmse", "(", "obsl", ",", "siml", ")", "pbias", "=", "MathClass", ".", "pbias", "(", "obsl", ",", "siml", ")", "rsr", "=", "MathClass", ".", "rsr", "(", "obsl", ",", "siml", ")", "print", "(", "'NSE: %.2f, R-square: %.2f, PBIAS: %.2f%%, RMSE: %.2f, RSR: %.2f'", "%", "(", "nse", ",", "r2", ",", "pbias", ",", "rmse", ",", "rsr", ")", ")" ]
Calculate model performance indexes.
[ "Calculate", "model", "performance", "indexes", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/examples/ex06_model_performace_index.py#L7-L15
train
nmdp-bioinformatics/SeqAnn
seqann/gfe.py
GFE.load_features
def load_features(self): """ Loads all the known features from the feature service """ # Loading all loci that # are in self.loci variable defined # when the pyGFE object is created for loc in self.loci: if self.verbose: self.logger.info(self.logname + "Loading features for " + loc) # Loading all features for loc from feature service self.all_feats.update({loc: self.locus_features(loc)}) if self.verbose: self.logger.info(self.logname + "Finished loading features for " + loc) if self.verbose: mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000) self.logger.info(self.logname + "Finished loading all features * all_feats = " + mem + " MB *")
python
def load_features(self): """ Loads all the known features from the feature service """ # Loading all loci that # are in self.loci variable defined # when the pyGFE object is created for loc in self.loci: if self.verbose: self.logger.info(self.logname + "Loading features for " + loc) # Loading all features for loc from feature service self.all_feats.update({loc: self.locus_features(loc)}) if self.verbose: self.logger.info(self.logname + "Finished loading features for " + loc) if self.verbose: mem = "{:4.4f}".format(sys.getsizeof(self.all_feats) / 1000000) self.logger.info(self.logname + "Finished loading all features * all_feats = " + mem + " MB *")
[ "def", "load_features", "(", "self", ")", ":", "# Loading all loci that", "# are in self.loci variable defined", "# when the pyGFE object is created", "for", "loc", "in", "self", ".", "loci", ":", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "self", ".", "logname", "+", "\"Loading features for \"", "+", "loc", ")", "# Loading all features for loc from feature service", "self", ".", "all_feats", ".", "update", "(", "{", "loc", ":", "self", ".", "locus_features", "(", "loc", ")", "}", ")", "if", "self", ".", "verbose", ":", "self", ".", "logger", ".", "info", "(", "self", ".", "logname", "+", "\"Finished loading features for \"", "+", "loc", ")", "if", "self", ".", "verbose", ":", "mem", "=", "\"{:4.4f}\"", ".", "format", "(", "sys", ".", "getsizeof", "(", "self", ".", "all_feats", ")", "/", "1000000", ")", "self", ".", "logger", ".", "info", "(", "self", ".", "logname", "+", "\"Finished loading all features * all_feats = \"", "+", "mem", "+", "\" MB *\"", ")" ]
Loads all the known features from the feature service
[ "Loads", "all", "the", "known", "features", "from", "the", "feature", "service" ]
5ce91559b0a4fbe4fb7758e034eb258202632463
https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/gfe.py#L100-L119
train
nmdp-bioinformatics/SeqAnn
seqann/gfe.py
GFE.locus_features
def locus_features(self, locus): """ Returns all features associated with a locus :param locus: string containing HLA locus. :type locus: ``str`` :rtype: ``dict`` """ features = self.api.list_features(locus=locus) feat_dict = {":".join([a.locus, str(a.rank), a.term, a.sequence]): a.accession for a in features} return feat_dict
python
def locus_features(self, locus): """ Returns all features associated with a locus :param locus: string containing HLA locus. :type locus: ``str`` :rtype: ``dict`` """ features = self.api.list_features(locus=locus) feat_dict = {":".join([a.locus, str(a.rank), a.term, a.sequence]): a.accession for a in features} return feat_dict
[ "def", "locus_features", "(", "self", ",", "locus", ")", ":", "features", "=", "self", ".", "api", ".", "list_features", "(", "locus", "=", "locus", ")", "feat_dict", "=", "{", "\":\"", ".", "join", "(", "[", "a", ".", "locus", ",", "str", "(", "a", ".", "rank", ")", ",", "a", ".", "term", ",", "a", ".", "sequence", "]", ")", ":", "a", ".", "accession", "for", "a", "in", "features", "}", "return", "feat_dict" ]
Returns all features associated with a locus :param locus: string containing HLA locus. :type locus: ``str`` :rtype: ``dict``
[ "Returns", "all", "features", "associated", "with", "a", "locus" ]
5ce91559b0a4fbe4fb7758e034eb258202632463
https://github.com/nmdp-bioinformatics/SeqAnn/blob/5ce91559b0a4fbe4fb7758e034eb258202632463/seqann/gfe.py#L121-L131
train
CitrineInformatics/pif-dft
dfttopif/drivers.py
tarfile_to_pif
def tarfile_to_pif(filename, temp_root_dir='', verbose=0): """ Process a tar file that contains DFT data. Input: filename - String, Path to the file to process. temp_root_dir - String, Directory in which to save temporary files. Defaults to working directory. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format """ temp_dir = temp_root_dir + str(uuid.uuid4()) os.makedirs(temp_dir) try: tar = tarfile.open(filename, 'r') tar.extractall(path=temp_dir) tar.close() for i in os.listdir(temp_dir): cur_dir = temp_dir + '/' + i if os.path.isdir(cur_dir): return directory_to_pif(cur_dir, verbose=verbose) return directory_to_pif(temp_dir, verbose=verbose) finally: shutil.rmtree(temp_dir)
python
def tarfile_to_pif(filename, temp_root_dir='', verbose=0): """ Process a tar file that contains DFT data. Input: filename - String, Path to the file to process. temp_root_dir - String, Directory in which to save temporary files. Defaults to working directory. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format """ temp_dir = temp_root_dir + str(uuid.uuid4()) os.makedirs(temp_dir) try: tar = tarfile.open(filename, 'r') tar.extractall(path=temp_dir) tar.close() for i in os.listdir(temp_dir): cur_dir = temp_dir + '/' + i if os.path.isdir(cur_dir): return directory_to_pif(cur_dir, verbose=verbose) return directory_to_pif(temp_dir, verbose=verbose) finally: shutil.rmtree(temp_dir)
[ "def", "tarfile_to_pif", "(", "filename", ",", "temp_root_dir", "=", "''", ",", "verbose", "=", "0", ")", ":", "temp_dir", "=", "temp_root_dir", "+", "str", "(", "uuid", ".", "uuid4", "(", ")", ")", "os", ".", "makedirs", "(", "temp_dir", ")", "try", ":", "tar", "=", "tarfile", ".", "open", "(", "filename", ",", "'r'", ")", "tar", ".", "extractall", "(", "path", "=", "temp_dir", ")", "tar", ".", "close", "(", ")", "for", "i", "in", "os", ".", "listdir", "(", "temp_dir", ")", ":", "cur_dir", "=", "temp_dir", "+", "'/'", "+", "i", "if", "os", ".", "path", ".", "isdir", "(", "cur_dir", ")", ":", "return", "directory_to_pif", "(", "cur_dir", ",", "verbose", "=", "verbose", ")", "return", "directory_to_pif", "(", "temp_dir", ",", "verbose", "=", "verbose", ")", "finally", ":", "shutil", ".", "rmtree", "(", "temp_dir", ")" ]
Process a tar file that contains DFT data. Input: filename - String, Path to the file to process. temp_root_dir - String, Directory in which to save temporary files. Defaults to working directory. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format
[ "Process", "a", "tar", "file", "that", "contains", "DFT", "data", "." ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/drivers.py#L59-L84
train
CitrineInformatics/pif-dft
dfttopif/drivers.py
archive_to_pif
def archive_to_pif(filename, verbose=0): """ Given a archive file that contains output from a DFT calculation, parse the data and return a PIF object. Input: filename - String, Path to the file to process. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format """ if tarfile.is_tarfile(filename): return tarfile_to_pif(filename, verbose) raise Exception('Cannot process file type')
python
def archive_to_pif(filename, verbose=0): """ Given a archive file that contains output from a DFT calculation, parse the data and return a PIF object. Input: filename - String, Path to the file to process. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format """ if tarfile.is_tarfile(filename): return tarfile_to_pif(filename, verbose) raise Exception('Cannot process file type')
[ "def", "archive_to_pif", "(", "filename", ",", "verbose", "=", "0", ")", ":", "if", "tarfile", ".", "is_tarfile", "(", "filename", ")", ":", "return", "tarfile_to_pif", "(", "filename", ",", "verbose", ")", "raise", "Exception", "(", "'Cannot process file type'", ")" ]
Given a archive file that contains output from a DFT calculation, parse the data and return a PIF object. Input: filename - String, Path to the file to process. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format
[ "Given", "a", "archive", "file", "that", "contains", "output", "from", "a", "DFT", "calculation", "parse", "the", "data", "and", "return", "a", "PIF", "object", "." ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/drivers.py#L87-L101
train
CitrineInformatics/pif-dft
dfttopif/drivers.py
files_to_pif
def files_to_pif(files, verbose=0, quality_report=True, inline=True): '''Given a directory that contains output from a DFT calculation, parse the data and return a pif object Input: files - [str] list of files from which the parser is allowed to read. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format ''' # Look for the first parser compatible with the directory found_parser = False for possible_parser in [PwscfParser, VaspParser]: try: parser = possible_parser(files) found_parser = True break except InvalidIngesterException: # Constructors fail when they cannot find appropriate files pass if not found_parser: raise Exception('Directory is not in correct format for an existing parser') if verbose > 0: print("Found a {} directory".format(parser.get_name())) # Get information about the chemical system chem = ChemicalSystem() chem.chemical_formula = parser.get_composition() # Get software information, to list as method software = Software(name=parser.get_name(), version=parser.get_version_number()) # Define the DFT method object method = Method(name='Density Functional Theory', software=[software]) # Get the settings (aka. "conditions") of the DFT calculations conditions = [] for name, func in parser.get_setting_functions().items(): # Get the condition cond = getattr(parser, func)() # If the condition is None or False, skip it if cond is None: continue if inline and cond.files is not None: continue # Set the name cond.name = name # Set the types conditions.append(cond) # Get the properties of the system chem.properties = [] for name, func in parser.get_result_functions().items(): # Get the property prop = getattr(parser, func)() # If the property is None, skip it if prop is None: continue if inline and prop.files is not None: continue # Add name and other data prop.name = name prop.methods = [method,] prop.data_type='COMPUTATIONAL' if verbose > 0 and isinstance(prop, Value): print(name) if prop.conditions is None: prop.conditions = conditions else: if not isinstance(prop.conditions, list): prop.conditions = [prop.conditions] prop.conditions.extend(conditions) # Add it to the output chem.properties.append(prop) # Check to see if we should add the quality report if quality_report and isinstance(parser, VaspParser): _add_quality_report(parser, chem) return chem
python
def files_to_pif(files, verbose=0, quality_report=True, inline=True): '''Given a directory that contains output from a DFT calculation, parse the data and return a pif object Input: files - [str] list of files from which the parser is allowed to read. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format ''' # Look for the first parser compatible with the directory found_parser = False for possible_parser in [PwscfParser, VaspParser]: try: parser = possible_parser(files) found_parser = True break except InvalidIngesterException: # Constructors fail when they cannot find appropriate files pass if not found_parser: raise Exception('Directory is not in correct format for an existing parser') if verbose > 0: print("Found a {} directory".format(parser.get_name())) # Get information about the chemical system chem = ChemicalSystem() chem.chemical_formula = parser.get_composition() # Get software information, to list as method software = Software(name=parser.get_name(), version=parser.get_version_number()) # Define the DFT method object method = Method(name='Density Functional Theory', software=[software]) # Get the settings (aka. "conditions") of the DFT calculations conditions = [] for name, func in parser.get_setting_functions().items(): # Get the condition cond = getattr(parser, func)() # If the condition is None or False, skip it if cond is None: continue if inline and cond.files is not None: continue # Set the name cond.name = name # Set the types conditions.append(cond) # Get the properties of the system chem.properties = [] for name, func in parser.get_result_functions().items(): # Get the property prop = getattr(parser, func)() # If the property is None, skip it if prop is None: continue if inline and prop.files is not None: continue # Add name and other data prop.name = name prop.methods = [method,] prop.data_type='COMPUTATIONAL' if verbose > 0 and isinstance(prop, Value): print(name) if prop.conditions is None: prop.conditions = conditions else: if not isinstance(prop.conditions, list): prop.conditions = [prop.conditions] prop.conditions.extend(conditions) # Add it to the output chem.properties.append(prop) # Check to see if we should add the quality report if quality_report and isinstance(parser, VaspParser): _add_quality_report(parser, chem) return chem
[ "def", "files_to_pif", "(", "files", ",", "verbose", "=", "0", ",", "quality_report", "=", "True", ",", "inline", "=", "True", ")", ":", "# Look for the first parser compatible with the directory", "found_parser", "=", "False", "for", "possible_parser", "in", "[", "PwscfParser", ",", "VaspParser", "]", ":", "try", ":", "parser", "=", "possible_parser", "(", "files", ")", "found_parser", "=", "True", "break", "except", "InvalidIngesterException", ":", "# Constructors fail when they cannot find appropriate files", "pass", "if", "not", "found_parser", ":", "raise", "Exception", "(", "'Directory is not in correct format for an existing parser'", ")", "if", "verbose", ">", "0", ":", "print", "(", "\"Found a {} directory\"", ".", "format", "(", "parser", ".", "get_name", "(", ")", ")", ")", "# Get information about the chemical system", "chem", "=", "ChemicalSystem", "(", ")", "chem", ".", "chemical_formula", "=", "parser", ".", "get_composition", "(", ")", "# Get software information, to list as method", "software", "=", "Software", "(", "name", "=", "parser", ".", "get_name", "(", ")", ",", "version", "=", "parser", ".", "get_version_number", "(", ")", ")", "# Define the DFT method object", "method", "=", "Method", "(", "name", "=", "'Density Functional Theory'", ",", "software", "=", "[", "software", "]", ")", "# Get the settings (aka. \"conditions\") of the DFT calculations", "conditions", "=", "[", "]", "for", "name", ",", "func", "in", "parser", ".", "get_setting_functions", "(", ")", ".", "items", "(", ")", ":", "# Get the condition", "cond", "=", "getattr", "(", "parser", ",", "func", ")", "(", ")", "# If the condition is None or False, skip it", "if", "cond", "is", "None", ":", "continue", "if", "inline", "and", "cond", ".", "files", "is", "not", "None", ":", "continue", "# Set the name", "cond", ".", "name", "=", "name", "# Set the types", "conditions", ".", "append", "(", "cond", ")", "# Get the properties of the system", "chem", ".", "properties", "=", "[", "]", "for", "name", ",", "func", "in", "parser", ".", "get_result_functions", "(", ")", ".", "items", "(", ")", ":", "# Get the property", "prop", "=", "getattr", "(", "parser", ",", "func", ")", "(", ")", "# If the property is None, skip it", "if", "prop", "is", "None", ":", "continue", "if", "inline", "and", "prop", ".", "files", "is", "not", "None", ":", "continue", "# Add name and other data", "prop", ".", "name", "=", "name", "prop", ".", "methods", "=", "[", "method", ",", "]", "prop", ".", "data_type", "=", "'COMPUTATIONAL'", "if", "verbose", ">", "0", "and", "isinstance", "(", "prop", ",", "Value", ")", ":", "print", "(", "name", ")", "if", "prop", ".", "conditions", "is", "None", ":", "prop", ".", "conditions", "=", "conditions", "else", ":", "if", "not", "isinstance", "(", "prop", ".", "conditions", ",", "list", ")", ":", "prop", ".", "conditions", "=", "[", "prop", ".", "conditions", "]", "prop", ".", "conditions", ".", "extend", "(", "conditions", ")", "# Add it to the output", "chem", ".", "properties", ".", "append", "(", "prop", ")", "# Check to see if we should add the quality report", "if", "quality_report", "and", "isinstance", "(", "parser", ",", "VaspParser", ")", ":", "_add_quality_report", "(", "parser", ",", "chem", ")", "return", "chem" ]
Given a directory that contains output from a DFT calculation, parse the data and return a pif object Input: files - [str] list of files from which the parser is allowed to read. verbose - int, How much status messages to print Output: pif - ChemicalSystem, Results and settings of the DFT calculation in pif format
[ "Given", "a", "directory", "that", "contains", "output", "from", "a", "DFT", "calculation", "parse", "the", "data", "and", "return", "a", "pif", "object" ]
d5411dc1f6c6e8d454b132977ca7ab3bb8131a80
https://github.com/CitrineInformatics/pif-dft/blob/d5411dc1f6c6e8d454b132977ca7ab3bb8131a80/dfttopif/drivers.py#L104-L197
train
PeerAssets/pypeerassets
examples/once_issue_mode_example.py
wait_for_confirmation
def wait_for_confirmation(provider, transaction_id): 'Sleep on a loop until we see a confirmation of the transaction.' while(True): transaction = provider.gettransaction(transaction_id) if transaction["confirmations"] > 0: break time.sleep(10)
python
def wait_for_confirmation(provider, transaction_id): 'Sleep on a loop until we see a confirmation of the transaction.' while(True): transaction = provider.gettransaction(transaction_id) if transaction["confirmations"] > 0: break time.sleep(10)
[ "def", "wait_for_confirmation", "(", "provider", ",", "transaction_id", ")", ":", "while", "(", "True", ")", ":", "transaction", "=", "provider", ".", "gettransaction", "(", "transaction_id", ")", "if", "transaction", "[", "\"confirmations\"", "]", ">", "0", ":", "break", "time", ".", "sleep", "(", "10", ")" ]
Sleep on a loop until we see a confirmation of the transaction.
[ "Sleep", "on", "a", "loop", "until", "we", "see", "a", "confirmation", "of", "the", "transaction", "." ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/examples/once_issue_mode_example.py#L51-L57
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
validate_card_issue_modes
def validate_card_issue_modes(issue_mode: int, cards: list) -> list: """validate cards against deck_issue modes""" supported_mask = 63 # sum of all issue_mode values if not bool(issue_mode & supported_mask): return [] # return empty list for i in [1 << x for x in range(len(IssueMode))]: if bool(i & issue_mode): try: parser_fn = cast( Callable[[list], Optional[list]], parsers[IssueMode(i).name] ) except ValueError: continue parsed_cards = parser_fn(cards) if not parsed_cards: return [] cards = parsed_cards return cards
python
def validate_card_issue_modes(issue_mode: int, cards: list) -> list: """validate cards against deck_issue modes""" supported_mask = 63 # sum of all issue_mode values if not bool(issue_mode & supported_mask): return [] # return empty list for i in [1 << x for x in range(len(IssueMode))]: if bool(i & issue_mode): try: parser_fn = cast( Callable[[list], Optional[list]], parsers[IssueMode(i).name] ) except ValueError: continue parsed_cards = parser_fn(cards) if not parsed_cards: return [] cards = parsed_cards return cards
[ "def", "validate_card_issue_modes", "(", "issue_mode", ":", "int", ",", "cards", ":", "list", ")", "->", "list", ":", "supported_mask", "=", "63", "# sum of all issue_mode values", "if", "not", "bool", "(", "issue_mode", "&", "supported_mask", ")", ":", "return", "[", "]", "# return empty list", "for", "i", "in", "[", "1", "<<", "x", "for", "x", "in", "range", "(", "len", "(", "IssueMode", ")", ")", "]", ":", "if", "bool", "(", "i", "&", "issue_mode", ")", ":", "try", ":", "parser_fn", "=", "cast", "(", "Callable", "[", "[", "list", "]", ",", "Optional", "[", "list", "]", "]", ",", "parsers", "[", "IssueMode", "(", "i", ")", ".", "name", "]", ")", "except", "ValueError", ":", "continue", "parsed_cards", "=", "parser_fn", "(", "cards", ")", "if", "not", "parsed_cards", ":", "return", "[", "]", "cards", "=", "parsed_cards", "return", "cards" ]
validate cards against deck_issue modes
[ "validate", "cards", "against", "deck_issue", "modes" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L365-L389
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
Deck.p2th_address
def p2th_address(self) -> Optional[str]: '''P2TH address of this deck''' if self.id: return Kutil(network=self.network, privkey=bytearray.fromhex(self.id)).address else: return None
python
def p2th_address(self) -> Optional[str]: '''P2TH address of this deck''' if self.id: return Kutil(network=self.network, privkey=bytearray.fromhex(self.id)).address else: return None
[ "def", "p2th_address", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "if", "self", ".", "id", ":", "return", "Kutil", "(", "network", "=", "self", ".", "network", ",", "privkey", "=", "bytearray", ".", "fromhex", "(", "self", ".", "id", ")", ")", ".", "address", "else", ":", "return", "None" ]
P2TH address of this deck
[ "P2TH", "address", "of", "this", "deck" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L89-L96
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
Deck.p2th_wif
def p2th_wif(self) -> Optional[str]: '''P2TH privkey in WIF format''' if self.id: return Kutil(network=self.network, privkey=bytearray.fromhex(self.id)).wif else: return None
python
def p2th_wif(self) -> Optional[str]: '''P2TH privkey in WIF format''' if self.id: return Kutil(network=self.network, privkey=bytearray.fromhex(self.id)).wif else: return None
[ "def", "p2th_wif", "(", "self", ")", "->", "Optional", "[", "str", "]", ":", "if", "self", ".", "id", ":", "return", "Kutil", "(", "network", "=", "self", ".", "network", ",", "privkey", "=", "bytearray", ".", "fromhex", "(", "self", ".", "id", ")", ")", ".", "wif", "else", ":", "return", "None" ]
P2TH privkey in WIF format
[ "P2TH", "privkey", "in", "WIF", "format" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L99-L106
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
Deck.metainfo_to_dict
def metainfo_to_dict(self) -> dict: '''encode deck into dictionary''' r = { "version": self.version, "name": self.name, "number_of_decimals": self.number_of_decimals, "issue_mode": self.issue_mode } if self.asset_specific_data: r.update({'asset_specific_data': self.asset_specific_data}) return r
python
def metainfo_to_dict(self) -> dict: '''encode deck into dictionary''' r = { "version": self.version, "name": self.name, "number_of_decimals": self.number_of_decimals, "issue_mode": self.issue_mode } if self.asset_specific_data: r.update({'asset_specific_data': self.asset_specific_data}) return r
[ "def", "metainfo_to_dict", "(", "self", ")", "->", "dict", ":", "r", "=", "{", "\"version\"", ":", "self", ".", "version", ",", "\"name\"", ":", "self", ".", "name", ",", "\"number_of_decimals\"", ":", "self", ".", "number_of_decimals", ",", "\"issue_mode\"", ":", "self", ".", "issue_mode", "}", "if", "self", ".", "asset_specific_data", ":", "r", ".", "update", "(", "{", "'asset_specific_data'", ":", "self", ".", "asset_specific_data", "}", ")", "return", "r" ]
encode deck into dictionary
[ "encode", "deck", "into", "dictionary" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L132-L145
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
Deck.to_json
def to_json(self) -> dict: '''export the Deck object to json-ready format''' d = self.__dict__ d['p2th_wif'] = self.p2th_wif return d
python
def to_json(self) -> dict: '''export the Deck object to json-ready format''' d = self.__dict__ d['p2th_wif'] = self.p2th_wif return d
[ "def", "to_json", "(", "self", ")", "->", "dict", ":", "d", "=", "self", ".", "__dict__", "d", "[", "'p2th_wif'", "]", "=", "self", ".", "p2th_wif", "return", "d" ]
export the Deck object to json-ready format
[ "export", "the", "Deck", "object", "to", "json", "-", "ready", "format" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L147-L152
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
CardTransfer.metainfo_to_dict
def metainfo_to_dict(self) -> dict: '''encode card into dictionary''' r = { "version": self.version, "amount": self.amount, "number_of_decimals": self.number_of_decimals } if self.asset_specific_data: r.update({'asset_specific_data': self.asset_specific_data}) return r
python
def metainfo_to_dict(self) -> dict: '''encode card into dictionary''' r = { "version": self.version, "amount": self.amount, "number_of_decimals": self.number_of_decimals } if self.asset_specific_data: r.update({'asset_specific_data': self.asset_specific_data}) return r
[ "def", "metainfo_to_dict", "(", "self", ")", "->", "dict", ":", "r", "=", "{", "\"version\"", ":", "self", ".", "version", ",", "\"amount\"", ":", "self", ".", "amount", ",", "\"number_of_decimals\"", ":", "self", ".", "number_of_decimals", "}", "if", "self", ".", "asset_specific_data", ":", "r", ".", "update", "(", "{", "'asset_specific_data'", ":", "self", ".", "asset_specific_data", "}", ")", "return", "r" ]
encode card into dictionary
[ "encode", "card", "into", "dictionary" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L331-L343
train
PeerAssets/pypeerassets
pypeerassets/protocol.py
DeckState._sort_cards
def _sort_cards(self, cards: Generator) -> list: '''sort cards by blocknum and blockseq''' return sorted([card.__dict__ for card in cards], key=itemgetter('blocknum', 'blockseq', 'cardseq'))
python
def _sort_cards(self, cards: Generator) -> list: '''sort cards by blocknum and blockseq''' return sorted([card.__dict__ for card in cards], key=itemgetter('blocknum', 'blockseq', 'cardseq'))
[ "def", "_sort_cards", "(", "self", ",", "cards", ":", "Generator", ")", "->", "list", ":", "return", "sorted", "(", "[", "card", ".", "__dict__", "for", "card", "in", "cards", "]", ",", "key", "=", "itemgetter", "(", "'blocknum'", ",", "'blockseq'", ",", "'cardseq'", ")", ")" ]
sort cards by blocknum and blockseq
[ "sort", "cards", "by", "blocknum", "and", "blockseq" ]
8927b4a686887f44fe2cd9de777e2c827c948987
https://github.com/PeerAssets/pypeerassets/blob/8927b4a686887f44fe2cd9de777e2c827c948987/pypeerassets/protocol.py#L439-L443
train
lreis2415/PyGeoC
examples/ex07_handling_raster_with_numpy.py
main
def main(): """Read GeoTiff raster data and perform log transformation. """ input_tif = "../tests/data/Jamaica_dem.tif" output_tif = "../tests/data/tmp_results/log_dem.tif" rst = RasterUtilClass.read_raster(input_tif) # raster data (with noDataValue as numpy.nan) as numpy array rst_valid = rst.validValues output_data = np.log(rst_valid) # write output raster RasterUtilClass.write_gtiff_file(output_tif, rst.nRows, rst.nCols, output_data, rst.geotrans, rst.srs, rst.noDataValue, rst.dataType)
python
def main(): """Read GeoTiff raster data and perform log transformation. """ input_tif = "../tests/data/Jamaica_dem.tif" output_tif = "../tests/data/tmp_results/log_dem.tif" rst = RasterUtilClass.read_raster(input_tif) # raster data (with noDataValue as numpy.nan) as numpy array rst_valid = rst.validValues output_data = np.log(rst_valid) # write output raster RasterUtilClass.write_gtiff_file(output_tif, rst.nRows, rst.nCols, output_data, rst.geotrans, rst.srs, rst.noDataValue, rst.dataType)
[ "def", "main", "(", ")", ":", "input_tif", "=", "\"../tests/data/Jamaica_dem.tif\"", "output_tif", "=", "\"../tests/data/tmp_results/log_dem.tif\"", "rst", "=", "RasterUtilClass", ".", "read_raster", "(", "input_tif", ")", "# raster data (with noDataValue as numpy.nan) as numpy array", "rst_valid", "=", "rst", ".", "validValues", "output_data", "=", "np", ".", "log", "(", "rst_valid", ")", "# write output raster", "RasterUtilClass", ".", "write_gtiff_file", "(", "output_tif", ",", "rst", ".", "nRows", ",", "rst", ".", "nCols", ",", "output_data", ",", "rst", ".", "geotrans", ",", "rst", ".", "srs", ",", "rst", ".", "noDataValue", ",", "rst", ".", "dataType", ")" ]
Read GeoTiff raster data and perform log transformation.
[ "Read", "GeoTiff", "raster", "data", "and", "perform", "log", "transformation", "." ]
9a92d1a229bb74298e3c57f27c97079980b5f729
https://github.com/lreis2415/PyGeoC/blob/9a92d1a229bb74298e3c57f27c97079980b5f729/examples/ex07_handling_raster_with_numpy.py#L10-L21
train
zero-os/zerotier_client
zerotier/client_support.py
val_factory
def val_factory(val, datatypes): """ return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages. """ exceptions = [] for dt in datatypes: try: if isinstance(val, dt): return val return type_handler_object(val, dt) except Exception as e: exceptions.append(str(e)) # if we get here, we never found a valid value. raise an error raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'. format(val=val, types=datatypes, excs=exceptions))
python
def val_factory(val, datatypes): """ return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages. """ exceptions = [] for dt in datatypes: try: if isinstance(val, dt): return val return type_handler_object(val, dt) except Exception as e: exceptions.append(str(e)) # if we get here, we never found a valid value. raise an error raise ValueError('val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'. format(val=val, types=datatypes, excs=exceptions))
[ "def", "val_factory", "(", "val", ",", "datatypes", ")", ":", "exceptions", "=", "[", "]", "for", "dt", "in", "datatypes", ":", "try", ":", "if", "isinstance", "(", "val", ",", "dt", ")", ":", "return", "val", "return", "type_handler_object", "(", "val", ",", "dt", ")", "except", "Exception", "as", "e", ":", "exceptions", ".", "append", "(", "str", "(", "e", ")", ")", "# if we get here, we never found a valid value. raise an error", "raise", "ValueError", "(", "'val_factory: Unable to instantiate {val} from types {types}. Exceptions: {excs}'", ".", "format", "(", "val", "=", "val", ",", "types", "=", "datatypes", ",", "excs", "=", "exceptions", ")", ")" ]
return an instance of `val` that is of type `datatype`. keep track of exceptions so we can produce meaningful error messages.
[ "return", "an", "instance", "of", "val", "that", "is", "of", "type", "datatype", ".", "keep", "track", "of", "exceptions", "so", "we", "can", "produce", "meaningful", "error", "messages", "." ]
03993da11e69d837a0308a2f41ae7b378692fd82
https://github.com/zero-os/zerotier_client/blob/03993da11e69d837a0308a2f41ae7b378692fd82/zerotier/client_support.py#L75-L90
train
zero-os/zerotier_client
zerotier/client_support.py
handler_for
def handler_for(obj): """return the handler for the object type""" for handler_type in handlers: if isinstance(obj, handler_type): return handlers[handler_type] try: for handler_type in handlers: if issubclass(obj, handler_type): return handlers[handler_type] except TypeError: # if obj isn't a class, issubclass will raise a TypeError pass
python
def handler_for(obj): """return the handler for the object type""" for handler_type in handlers: if isinstance(obj, handler_type): return handlers[handler_type] try: for handler_type in handlers: if issubclass(obj, handler_type): return handlers[handler_type] except TypeError: # if obj isn't a class, issubclass will raise a TypeError pass
[ "def", "handler_for", "(", "obj", ")", ":", "for", "handler_type", "in", "handlers", ":", "if", "isinstance", "(", "obj", ",", "handler_type", ")", ":", "return", "handlers", "[", "handler_type", "]", "try", ":", "for", "handler_type", "in", "handlers", ":", "if", "issubclass", "(", "obj", ",", "handler_type", ")", ":", "return", "handlers", "[", "handler_type", "]", "except", "TypeError", ":", "# if obj isn't a class, issubclass will raise a TypeError", "pass" ]
return the handler for the object type
[ "return", "the", "handler", "for", "the", "object", "type" ]
03993da11e69d837a0308a2f41ae7b378692fd82
https://github.com/zero-os/zerotier_client/blob/03993da11e69d837a0308a2f41ae7b378692fd82/zerotier/client_support.py#L189-L201
train