repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
| partition
stringclasses 1
value |
---|---|---|---|---|---|---|---|---|---|---|---|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
extract_filestem
|
def extract_filestem(data):
"""Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
"""
escapes = re.compile(r"[\s/,#\(\)]")
escname = re.sub(escapes, '_', data['AssemblyName'])
return '_'.join([data['AssemblyAccession'], escname])
|
python
|
def extract_filestem(data):
"""Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
"""
escapes = re.compile(r"[\s/,#\(\)]")
escname = re.sub(escapes, '_', data['AssemblyName'])
return '_'.join([data['AssemblyAccession'], escname])
|
[
"def",
"extract_filestem",
"(",
"data",
")",
":",
"escapes",
"=",
"re",
".",
"compile",
"(",
"r\"[\\s/,#\\(\\)]\"",
")",
"escname",
"=",
"re",
".",
"sub",
"(",
"escapes",
",",
"'_'",
",",
"data",
"[",
"'AssemblyName'",
"]",
")",
"return",
"'_'",
".",
"join",
"(",
"[",
"data",
"[",
"'AssemblyAccession'",
"]",
",",
"escname",
"]",
")"
] |
Extract filestem from Entrez eSummary data.
Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]
Some illegal characters may occur in AssemblyName - for these, a more
robust regex replace/escape may be required. Sadly, NCBI don't just
use standard percent escapes, but instead replace certain
characters with underscores: white space, slash, comma, hash, brackets.
|
[
"Extract",
"filestem",
"from",
"Entrez",
"eSummary",
"data",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L263-L275
|
train
|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
write_contigs
|
def write_contigs(asm_uid, contig_uids, batchsize=10000):
"""Writes assembly contigs out to a single FASTA file in the script's
designated output directory.
FASTA records are returned, as GenBank and even GenBankWithParts format
records don't reliably give correct sequence in all cases.
The script returns two strings for each assembly, a 'class' and a 'label'
string - this is for use with, e.g. pyani.
"""
# Has duplicate code with get_class_label_info() - needs refactoring
logger.info("Collecting contig data for %s", asm_uid)
# Assembly record - get binomial and strain names
asm_record = Entrez.read(
entrez_retry(
Entrez.esummary, db='assembly', id=asm_uid, rettype='text'),
validate=False)
asm_organism = asm_record['DocumentSummarySet']['DocumentSummary'][0][
'SpeciesName']
try:
asm_strain = asm_record['DocumentSummarySet']['DocumentSummary'][0][
'Biosource']['InfraspeciesList'][0]['Sub_value']
except KeyError:
asm_strain = ""
# Assembly UID (long form) for the output filename
outfilename = "%s.fasta" % os.path.join(args.outdirname, asm_record[
'DocumentSummarySet']['DocumentSummary'][0]['AssemblyAccession'])
# Create label and class strings
genus, species = asm_organism.split(' ', 1)
# Get FASTA records for contigs
logger.info("Downloading FASTA records for assembly %s (%s)", asm_uid,
' '.join([genus[0] + '.', species, asm_strain]))
# We're doing an explicit outer retry loop here because we want to confirm
# we have the correct data, as well as test for Entrez connection errors,
# which is all the entrez_retry function does.
tries, success = 0, False
while not success and tries < args.retries:
records = [] # Holds all return records
# We may need to batch contigs
query_uids = ','.join(contig_uids)
try:
for start in range(0, len(contig_uids), batchsize):
logger.info("Batch: %d-%d", start, start + batchsize)
records.extend(
list(
SeqIO.parse(
entrez_retry(
Entrez.efetch,
db='nucleotide',
id=query_uids,
rettype='fasta',
retmode='text',
retstart=start,
retmax=batchsize), 'fasta')))
tries += 1
# Check only that correct number of records returned.
if len(records) == len(contig_uids):
success = True
else:
logger.warning("%d contigs expected, %d contigs returned",
len(contig_uids), len(records))
logger.warning("FASTA download for assembly %s failed",
asm_uid)
logger.warning("try %d/20", tries)
# Could also check expected assembly sequence length?
logger.info("Downloaded genome size: %d",
sum([len(r) for r in records]))
except:
logger.warning("FASTA download for assembly %s failed", asm_uid)
logger.warning(last_exception())
logger.warning("try %d/20", tries)
if not success:
# Could place option on command-line to stop or continue here.
logger.error("Failed to download records for %s (continuing)", asm_uid)
# Write contigs to file
retval = SeqIO.write(records, outfilename, 'fasta')
logger.info("Wrote %d contigs to %s", retval, outfilename)
|
python
|
def write_contigs(asm_uid, contig_uids, batchsize=10000):
"""Writes assembly contigs out to a single FASTA file in the script's
designated output directory.
FASTA records are returned, as GenBank and even GenBankWithParts format
records don't reliably give correct sequence in all cases.
The script returns two strings for each assembly, a 'class' and a 'label'
string - this is for use with, e.g. pyani.
"""
# Has duplicate code with get_class_label_info() - needs refactoring
logger.info("Collecting contig data for %s", asm_uid)
# Assembly record - get binomial and strain names
asm_record = Entrez.read(
entrez_retry(
Entrez.esummary, db='assembly', id=asm_uid, rettype='text'),
validate=False)
asm_organism = asm_record['DocumentSummarySet']['DocumentSummary'][0][
'SpeciesName']
try:
asm_strain = asm_record['DocumentSummarySet']['DocumentSummary'][0][
'Biosource']['InfraspeciesList'][0]['Sub_value']
except KeyError:
asm_strain = ""
# Assembly UID (long form) for the output filename
outfilename = "%s.fasta" % os.path.join(args.outdirname, asm_record[
'DocumentSummarySet']['DocumentSummary'][0]['AssemblyAccession'])
# Create label and class strings
genus, species = asm_organism.split(' ', 1)
# Get FASTA records for contigs
logger.info("Downloading FASTA records for assembly %s (%s)", asm_uid,
' '.join([genus[0] + '.', species, asm_strain]))
# We're doing an explicit outer retry loop here because we want to confirm
# we have the correct data, as well as test for Entrez connection errors,
# which is all the entrez_retry function does.
tries, success = 0, False
while not success and tries < args.retries:
records = [] # Holds all return records
# We may need to batch contigs
query_uids = ','.join(contig_uids)
try:
for start in range(0, len(contig_uids), batchsize):
logger.info("Batch: %d-%d", start, start + batchsize)
records.extend(
list(
SeqIO.parse(
entrez_retry(
Entrez.efetch,
db='nucleotide',
id=query_uids,
rettype='fasta',
retmode='text',
retstart=start,
retmax=batchsize), 'fasta')))
tries += 1
# Check only that correct number of records returned.
if len(records) == len(contig_uids):
success = True
else:
logger.warning("%d contigs expected, %d contigs returned",
len(contig_uids), len(records))
logger.warning("FASTA download for assembly %s failed",
asm_uid)
logger.warning("try %d/20", tries)
# Could also check expected assembly sequence length?
logger.info("Downloaded genome size: %d",
sum([len(r) for r in records]))
except:
logger.warning("FASTA download for assembly %s failed", asm_uid)
logger.warning(last_exception())
logger.warning("try %d/20", tries)
if not success:
# Could place option on command-line to stop or continue here.
logger.error("Failed to download records for %s (continuing)", asm_uid)
# Write contigs to file
retval = SeqIO.write(records, outfilename, 'fasta')
logger.info("Wrote %d contigs to %s", retval, outfilename)
|
[
"def",
"write_contigs",
"(",
"asm_uid",
",",
"contig_uids",
",",
"batchsize",
"=",
"10000",
")",
":",
"# Has duplicate code with get_class_label_info() - needs refactoring",
"logger",
".",
"info",
"(",
"\"Collecting contig data for %s\"",
",",
"asm_uid",
")",
"# Assembly record - get binomial and strain names",
"asm_record",
"=",
"Entrez",
".",
"read",
"(",
"entrez_retry",
"(",
"Entrez",
".",
"esummary",
",",
"db",
"=",
"'assembly'",
",",
"id",
"=",
"asm_uid",
",",
"rettype",
"=",
"'text'",
")",
",",
"validate",
"=",
"False",
")",
"asm_organism",
"=",
"asm_record",
"[",
"'DocumentSummarySet'",
"]",
"[",
"'DocumentSummary'",
"]",
"[",
"0",
"]",
"[",
"'SpeciesName'",
"]",
"try",
":",
"asm_strain",
"=",
"asm_record",
"[",
"'DocumentSummarySet'",
"]",
"[",
"'DocumentSummary'",
"]",
"[",
"0",
"]",
"[",
"'Biosource'",
"]",
"[",
"'InfraspeciesList'",
"]",
"[",
"0",
"]",
"[",
"'Sub_value'",
"]",
"except",
"KeyError",
":",
"asm_strain",
"=",
"\"\"",
"# Assembly UID (long form) for the output filename",
"outfilename",
"=",
"\"%s.fasta\"",
"%",
"os",
".",
"path",
".",
"join",
"(",
"args",
".",
"outdirname",
",",
"asm_record",
"[",
"'DocumentSummarySet'",
"]",
"[",
"'DocumentSummary'",
"]",
"[",
"0",
"]",
"[",
"'AssemblyAccession'",
"]",
")",
"# Create label and class strings",
"genus",
",",
"species",
"=",
"asm_organism",
".",
"split",
"(",
"' '",
",",
"1",
")",
"# Get FASTA records for contigs",
"logger",
".",
"info",
"(",
"\"Downloading FASTA records for assembly %s (%s)\"",
",",
"asm_uid",
",",
"' '",
".",
"join",
"(",
"[",
"genus",
"[",
"0",
"]",
"+",
"'.'",
",",
"species",
",",
"asm_strain",
"]",
")",
")",
"# We're doing an explicit outer retry loop here because we want to confirm",
"# we have the correct data, as well as test for Entrez connection errors,",
"# which is all the entrez_retry function does.",
"tries",
",",
"success",
"=",
"0",
",",
"False",
"while",
"not",
"success",
"and",
"tries",
"<",
"args",
".",
"retries",
":",
"records",
"=",
"[",
"]",
"# Holds all return records",
"# We may need to batch contigs",
"query_uids",
"=",
"','",
".",
"join",
"(",
"contig_uids",
")",
"try",
":",
"for",
"start",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"contig_uids",
")",
",",
"batchsize",
")",
":",
"logger",
".",
"info",
"(",
"\"Batch: %d-%d\"",
",",
"start",
",",
"start",
"+",
"batchsize",
")",
"records",
".",
"extend",
"(",
"list",
"(",
"SeqIO",
".",
"parse",
"(",
"entrez_retry",
"(",
"Entrez",
".",
"efetch",
",",
"db",
"=",
"'nucleotide'",
",",
"id",
"=",
"query_uids",
",",
"rettype",
"=",
"'fasta'",
",",
"retmode",
"=",
"'text'",
",",
"retstart",
"=",
"start",
",",
"retmax",
"=",
"batchsize",
")",
",",
"'fasta'",
")",
")",
")",
"tries",
"+=",
"1",
"# Check only that correct number of records returned.",
"if",
"len",
"(",
"records",
")",
"==",
"len",
"(",
"contig_uids",
")",
":",
"success",
"=",
"True",
"else",
":",
"logger",
".",
"warning",
"(",
"\"%d contigs expected, %d contigs returned\"",
",",
"len",
"(",
"contig_uids",
")",
",",
"len",
"(",
"records",
")",
")",
"logger",
".",
"warning",
"(",
"\"FASTA download for assembly %s failed\"",
",",
"asm_uid",
")",
"logger",
".",
"warning",
"(",
"\"try %d/20\"",
",",
"tries",
")",
"# Could also check expected assembly sequence length?",
"logger",
".",
"info",
"(",
"\"Downloaded genome size: %d\"",
",",
"sum",
"(",
"[",
"len",
"(",
"r",
")",
"for",
"r",
"in",
"records",
"]",
")",
")",
"except",
":",
"logger",
".",
"warning",
"(",
"\"FASTA download for assembly %s failed\"",
",",
"asm_uid",
")",
"logger",
".",
"warning",
"(",
"last_exception",
"(",
")",
")",
"logger",
".",
"warning",
"(",
"\"try %d/20\"",
",",
"tries",
")",
"if",
"not",
"success",
":",
"# Could place option on command-line to stop or continue here.",
"logger",
".",
"error",
"(",
"\"Failed to download records for %s (continuing)\"",
",",
"asm_uid",
")",
"# Write contigs to file",
"retval",
"=",
"SeqIO",
".",
"write",
"(",
"records",
",",
"outfilename",
",",
"'fasta'",
")",
"logger",
".",
"info",
"(",
"\"Wrote %d contigs to %s\"",
",",
"retval",
",",
"outfilename",
")"
] |
Writes assembly contigs out to a single FASTA file in the script's
designated output directory.
FASTA records are returned, as GenBank and even GenBankWithParts format
records don't reliably give correct sequence in all cases.
The script returns two strings for each assembly, a 'class' and a 'label'
string - this is for use with, e.g. pyani.
|
[
"Writes",
"assembly",
"contigs",
"out",
"to",
"a",
"single",
"FASTA",
"file",
"in",
"the",
"script",
"s",
"designated",
"output",
"directory",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L481-L560
|
train
|
widdowquinn/pyani
|
bin/genbank_get_genomes_by_taxon.py
|
logreport_downloaded
|
def logreport_downloaded(accession, skippedlist, accessiondict, uidaccdict):
"""Reports to logger whether alternative assemblies for an accession that
was missing have been downloaded
"""
for vid in accessiondict[accession.split('.')[0]]:
if vid in skippedlist:
status = "NOT DOWNLOADED"
else:
status = "DOWNLOADED"
logger.warning("\t\t%s: %s - %s", vid, uidaccdict[vid], status)
|
python
|
def logreport_downloaded(accession, skippedlist, accessiondict, uidaccdict):
"""Reports to logger whether alternative assemblies for an accession that
was missing have been downloaded
"""
for vid in accessiondict[accession.split('.')[0]]:
if vid in skippedlist:
status = "NOT DOWNLOADED"
else:
status = "DOWNLOADED"
logger.warning("\t\t%s: %s - %s", vid, uidaccdict[vid], status)
|
[
"def",
"logreport_downloaded",
"(",
"accession",
",",
"skippedlist",
",",
"accessiondict",
",",
"uidaccdict",
")",
":",
"for",
"vid",
"in",
"accessiondict",
"[",
"accession",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"]",
":",
"if",
"vid",
"in",
"skippedlist",
":",
"status",
"=",
"\"NOT DOWNLOADED\"",
"else",
":",
"status",
"=",
"\"DOWNLOADED\"",
"logger",
".",
"warning",
"(",
"\"\\t\\t%s: %s - %s\"",
",",
"vid",
",",
"uidaccdict",
"[",
"vid",
"]",
",",
"status",
")"
] |
Reports to logger whether alternative assemblies for an accession that
was missing have been downloaded
|
[
"Reports",
"to",
"logger",
"whether",
"alternative",
"assemblies",
"for",
"an",
"accession",
"that",
"was",
"missing",
"have",
"been",
"downloaded"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/bin/genbank_get_genomes_by_taxon.py#L564-L573
|
train
|
widdowquinn/pyani
|
pyani/tetra.py
|
calculate_tetra_zscores
|
def calculate_tetra_zscores(infilenames):
"""Returns dictionary of TETRA Z-scores for each input file.
- infilenames - collection of paths to sequence files
"""
org_tetraz = {}
for filename in infilenames:
org = os.path.splitext(os.path.split(filename)[-1])[0]
org_tetraz[org] = calculate_tetra_zscore(filename)
return org_tetraz
|
python
|
def calculate_tetra_zscores(infilenames):
"""Returns dictionary of TETRA Z-scores for each input file.
- infilenames - collection of paths to sequence files
"""
org_tetraz = {}
for filename in infilenames:
org = os.path.splitext(os.path.split(filename)[-1])[0]
org_tetraz[org] = calculate_tetra_zscore(filename)
return org_tetraz
|
[
"def",
"calculate_tetra_zscores",
"(",
"infilenames",
")",
":",
"org_tetraz",
"=",
"{",
"}",
"for",
"filename",
"in",
"infilenames",
":",
"org",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"org_tetraz",
"[",
"org",
"]",
"=",
"calculate_tetra_zscore",
"(",
"filename",
")",
"return",
"org_tetraz"
] |
Returns dictionary of TETRA Z-scores for each input file.
- infilenames - collection of paths to sequence files
|
[
"Returns",
"dictionary",
"of",
"TETRA",
"Z",
"-",
"scores",
"for",
"each",
"input",
"file",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/tetra.py#L33-L42
|
train
|
widdowquinn/pyani
|
pyani/tetra.py
|
calculate_tetra_zscore
|
def calculate_tetra_zscore(filename):
"""Returns TETRA Z-score for the sequence in the passed file.
- filename - path to sequence file
Calculates mono-, di-, tri- and tetranucleotide frequencies
for each sequence, on each strand, and follows Teeling et al. (2004)
in calculating a corresponding Z-score for each observed
tetranucleotide frequency, dependent on the mono-, di- and tri-
nucleotide frequencies for that input sequence.
"""
# For the Teeling et al. method, the Z-scores require us to count
# mono, di, tri and tetranucleotide sequences - these are stored
# (in order) in the counts tuple
counts = (collections.defaultdict(int), collections.defaultdict(int),
collections.defaultdict(int), collections.defaultdict(int))
for rec in SeqIO.parse(filename, 'fasta'):
for seq in [str(rec.seq).upper(),
str(rec.seq.reverse_complement()).upper()]:
# The Teeling et al. algorithm requires us to consider
# both strand orientations, so monocounts are easy
for base in ('G', 'C', 'T', 'A'):
counts[0][base] += seq.count(base)
# For di, tri and tetranucleotide counts, loop over the
# sequence and its reverse complement, until near the end:
for i in range(len(seq[:-4])):
din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4]
counts[1][str(din)] += 1
counts[2][str(tri)] += 1
counts[3][str(tetra)] += 1
# Then clean up the straggling bit at the end:
counts[2][str(seq[-4:-1])] += 1
counts[2][str(seq[-3:])] += 1
counts[1][str(seq[-4:-2])] += 1
counts[1][str(seq[-3:-1])] += 1
counts[1][str(seq[-2:])] += 1
# Following Teeling (2004), calculate expected frequencies for each
# tetranucleotide; we ignore ambiguity symbols
tetra_exp = {}
for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]:
tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \
counts[1][tet[1:3]]
# Following Teeling (2004) we approximate the std dev and Z-score for each
# tetranucleotide
tetra_sd = {}
tetra_z = {}
for tet, exp in list(tetra_exp.items()):
den = counts[1][tet[1:3]]
tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) *
(den - counts[2][tet[1:]]) / (den * den))
try:
tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet]
except ZeroDivisionError:
# To record if we hit a zero in the estimation of variance
# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]
tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]])
return tetra_z
|
python
|
def calculate_tetra_zscore(filename):
"""Returns TETRA Z-score for the sequence in the passed file.
- filename - path to sequence file
Calculates mono-, di-, tri- and tetranucleotide frequencies
for each sequence, on each strand, and follows Teeling et al. (2004)
in calculating a corresponding Z-score for each observed
tetranucleotide frequency, dependent on the mono-, di- and tri-
nucleotide frequencies for that input sequence.
"""
# For the Teeling et al. method, the Z-scores require us to count
# mono, di, tri and tetranucleotide sequences - these are stored
# (in order) in the counts tuple
counts = (collections.defaultdict(int), collections.defaultdict(int),
collections.defaultdict(int), collections.defaultdict(int))
for rec in SeqIO.parse(filename, 'fasta'):
for seq in [str(rec.seq).upper(),
str(rec.seq.reverse_complement()).upper()]:
# The Teeling et al. algorithm requires us to consider
# both strand orientations, so monocounts are easy
for base in ('G', 'C', 'T', 'A'):
counts[0][base] += seq.count(base)
# For di, tri and tetranucleotide counts, loop over the
# sequence and its reverse complement, until near the end:
for i in range(len(seq[:-4])):
din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4]
counts[1][str(din)] += 1
counts[2][str(tri)] += 1
counts[3][str(tetra)] += 1
# Then clean up the straggling bit at the end:
counts[2][str(seq[-4:-1])] += 1
counts[2][str(seq[-3:])] += 1
counts[1][str(seq[-4:-2])] += 1
counts[1][str(seq[-3:-1])] += 1
counts[1][str(seq[-2:])] += 1
# Following Teeling (2004), calculate expected frequencies for each
# tetranucleotide; we ignore ambiguity symbols
tetra_exp = {}
for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]:
tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \
counts[1][tet[1:3]]
# Following Teeling (2004) we approximate the std dev and Z-score for each
# tetranucleotide
tetra_sd = {}
tetra_z = {}
for tet, exp in list(tetra_exp.items()):
den = counts[1][tet[1:3]]
tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) *
(den - counts[2][tet[1:]]) / (den * den))
try:
tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet]
except ZeroDivisionError:
# To record if we hit a zero in the estimation of variance
# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]
tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]])
return tetra_z
|
[
"def",
"calculate_tetra_zscore",
"(",
"filename",
")",
":",
"# For the Teeling et al. method, the Z-scores require us to count",
"# mono, di, tri and tetranucleotide sequences - these are stored",
"# (in order) in the counts tuple",
"counts",
"=",
"(",
"collections",
".",
"defaultdict",
"(",
"int",
")",
",",
"collections",
".",
"defaultdict",
"(",
"int",
")",
",",
"collections",
".",
"defaultdict",
"(",
"int",
")",
",",
"collections",
".",
"defaultdict",
"(",
"int",
")",
")",
"for",
"rec",
"in",
"SeqIO",
".",
"parse",
"(",
"filename",
",",
"'fasta'",
")",
":",
"for",
"seq",
"in",
"[",
"str",
"(",
"rec",
".",
"seq",
")",
".",
"upper",
"(",
")",
",",
"str",
"(",
"rec",
".",
"seq",
".",
"reverse_complement",
"(",
")",
")",
".",
"upper",
"(",
")",
"]",
":",
"# The Teeling et al. algorithm requires us to consider",
"# both strand orientations, so monocounts are easy",
"for",
"base",
"in",
"(",
"'G'",
",",
"'C'",
",",
"'T'",
",",
"'A'",
")",
":",
"counts",
"[",
"0",
"]",
"[",
"base",
"]",
"+=",
"seq",
".",
"count",
"(",
"base",
")",
"# For di, tri and tetranucleotide counts, loop over the",
"# sequence and its reverse complement, until near the end:",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"seq",
"[",
":",
"-",
"4",
"]",
")",
")",
":",
"din",
",",
"tri",
",",
"tetra",
"=",
"seq",
"[",
"i",
":",
"i",
"+",
"2",
"]",
",",
"seq",
"[",
"i",
":",
"i",
"+",
"3",
"]",
",",
"seq",
"[",
"i",
":",
"i",
"+",
"4",
"]",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"din",
")",
"]",
"+=",
"1",
"counts",
"[",
"2",
"]",
"[",
"str",
"(",
"tri",
")",
"]",
"+=",
"1",
"counts",
"[",
"3",
"]",
"[",
"str",
"(",
"tetra",
")",
"]",
"+=",
"1",
"# Then clean up the straggling bit at the end:",
"counts",
"[",
"2",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"4",
":",
"-",
"1",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"2",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"3",
":",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"4",
":",
"-",
"2",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"3",
":",
"-",
"1",
"]",
")",
"]",
"+=",
"1",
"counts",
"[",
"1",
"]",
"[",
"str",
"(",
"seq",
"[",
"-",
"2",
":",
"]",
")",
"]",
"+=",
"1",
"# Following Teeling (2004), calculate expected frequencies for each",
"# tetranucleotide; we ignore ambiguity symbols",
"tetra_exp",
"=",
"{",
"}",
"for",
"tet",
"in",
"[",
"tetn",
"for",
"tetn",
"in",
"counts",
"[",
"3",
"]",
"if",
"tetra_clean",
"(",
"tetn",
")",
"]",
":",
"tetra_exp",
"[",
"tet",
"]",
"=",
"1.",
"*",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
":",
"3",
"]",
"]",
"*",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
"1",
":",
"]",
"]",
"/",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
"# Following Teeling (2004) we approximate the std dev and Z-score for each",
"# tetranucleotide",
"tetra_sd",
"=",
"{",
"}",
"tetra_z",
"=",
"{",
"}",
"for",
"tet",
",",
"exp",
"in",
"list",
"(",
"tetra_exp",
".",
"items",
"(",
")",
")",
":",
"den",
"=",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
"tetra_sd",
"[",
"tet",
"]",
"=",
"math",
".",
"sqrt",
"(",
"exp",
"*",
"(",
"den",
"-",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
":",
"3",
"]",
"]",
")",
"*",
"(",
"den",
"-",
"counts",
"[",
"2",
"]",
"[",
"tet",
"[",
"1",
":",
"]",
"]",
")",
"/",
"(",
"den",
"*",
"den",
")",
")",
"try",
":",
"tetra_z",
"[",
"tet",
"]",
"=",
"(",
"counts",
"[",
"3",
"]",
"[",
"tet",
"]",
"-",
"exp",
")",
"/",
"tetra_sd",
"[",
"tet",
"]",
"except",
"ZeroDivisionError",
":",
"# To record if we hit a zero in the estimation of variance",
"# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]",
"tetra_z",
"[",
"tet",
"]",
"=",
"1",
"/",
"(",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
"*",
"counts",
"[",
"1",
"]",
"[",
"tet",
"[",
"1",
":",
"3",
"]",
"]",
")",
"return",
"tetra_z"
] |
Returns TETRA Z-score for the sequence in the passed file.
- filename - path to sequence file
Calculates mono-, di-, tri- and tetranucleotide frequencies
for each sequence, on each strand, and follows Teeling et al. (2004)
in calculating a corresponding Z-score for each observed
tetranucleotide frequency, dependent on the mono-, di- and tri-
nucleotide frequencies for that input sequence.
|
[
"Returns",
"TETRA",
"Z",
"-",
"score",
"for",
"the",
"sequence",
"in",
"the",
"passed",
"file",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/tetra.py#L46-L102
|
train
|
widdowquinn/pyani
|
pyani/tetra.py
|
calculate_correlations
|
def calculate_correlations(tetra_z):
"""Returns dataframe of Pearson correlation coefficients.
- tetra_z - dictionary of Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
"""
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs,
dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx+1:]:
assert sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys())
tets = sorted(tetra_z[org1].keys())
zscores = [[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets]]
zmeans = [sum(zscore)/len(zscore) for zscore in zscores]
zdiffs = [[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]]]
diffprods = sum([zdiffs[0][i] * zdiffs[1][i] for i in
range(len(zdiffs[0]))])
zdiffs2 = [sum([z * z for z in zdiffs[0]]),
sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / \
math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations
|
python
|
def calculate_correlations(tetra_z):
"""Returns dataframe of Pearson correlation coefficients.
- tetra_z - dictionary of Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
"""
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs,
dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx+1:]:
assert sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys())
tets = sorted(tetra_z[org1].keys())
zscores = [[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets]]
zmeans = [sum(zscore)/len(zscore) for zscore in zscores]
zdiffs = [[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]]]
diffprods = sum([zdiffs[0][i] * zdiffs[1][i] for i in
range(len(zdiffs[0]))])
zdiffs2 = [sum([z * z for z in zdiffs[0]]),
sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / \
math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations
|
[
"def",
"calculate_correlations",
"(",
"tetra_z",
")",
":",
"orgs",
"=",
"sorted",
"(",
"tetra_z",
".",
"keys",
"(",
")",
")",
"correlations",
"=",
"pd",
".",
"DataFrame",
"(",
"index",
"=",
"orgs",
",",
"columns",
"=",
"orgs",
",",
"dtype",
"=",
"float",
")",
".",
"fillna",
"(",
"1.0",
")",
"for",
"idx",
",",
"org1",
"in",
"enumerate",
"(",
"orgs",
"[",
":",
"-",
"1",
"]",
")",
":",
"for",
"org2",
"in",
"orgs",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"assert",
"sorted",
"(",
"tetra_z",
"[",
"org1",
"]",
".",
"keys",
"(",
")",
")",
"==",
"sorted",
"(",
"tetra_z",
"[",
"org2",
"]",
".",
"keys",
"(",
")",
")",
"tets",
"=",
"sorted",
"(",
"tetra_z",
"[",
"org1",
"]",
".",
"keys",
"(",
")",
")",
"zscores",
"=",
"[",
"[",
"tetra_z",
"[",
"org1",
"]",
"[",
"t",
"]",
"for",
"t",
"in",
"tets",
"]",
",",
"[",
"tetra_z",
"[",
"org2",
"]",
"[",
"t",
"]",
"for",
"t",
"in",
"tets",
"]",
"]",
"zmeans",
"=",
"[",
"sum",
"(",
"zscore",
")",
"/",
"len",
"(",
"zscore",
")",
"for",
"zscore",
"in",
"zscores",
"]",
"zdiffs",
"=",
"[",
"[",
"z",
"-",
"zmeans",
"[",
"0",
"]",
"for",
"z",
"in",
"zscores",
"[",
"0",
"]",
"]",
",",
"[",
"z",
"-",
"zmeans",
"[",
"1",
"]",
"for",
"z",
"in",
"zscores",
"[",
"1",
"]",
"]",
"]",
"diffprods",
"=",
"sum",
"(",
"[",
"zdiffs",
"[",
"0",
"]",
"[",
"i",
"]",
"*",
"zdiffs",
"[",
"1",
"]",
"[",
"i",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"zdiffs",
"[",
"0",
"]",
")",
")",
"]",
")",
"zdiffs2",
"=",
"[",
"sum",
"(",
"[",
"z",
"*",
"z",
"for",
"z",
"in",
"zdiffs",
"[",
"0",
"]",
"]",
")",
",",
"sum",
"(",
"[",
"z",
"*",
"z",
"for",
"z",
"in",
"zdiffs",
"[",
"1",
"]",
"]",
")",
"]",
"correlations",
"[",
"org1",
"]",
"[",
"org2",
"]",
"=",
"diffprods",
"/",
"math",
".",
"sqrt",
"(",
"zdiffs2",
"[",
"0",
"]",
"*",
"zdiffs2",
"[",
"1",
"]",
")",
"correlations",
"[",
"org2",
"]",
"[",
"org1",
"]",
"=",
"correlations",
"[",
"org1",
"]",
"[",
"org2",
"]",
"return",
"correlations"
] |
Returns dataframe of Pearson correlation coefficients.
- tetra_z - dictionary of Z-scores, keyed by sequence ID
Calculates Pearson correlation coefficient from Z scores for each
tetranucleotide. This is done longhand here, which is fast enough,
but for robustness we might want to do something else... (TODO).
Note that we report a correlation by this method, rather than a
percentage identity.
|
[
"Returns",
"dataframe",
"of",
"Pearson",
"correlation",
"coefficients",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/tetra.py#L118-L149
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
get_labels
|
def get_labels(filename, logger=None):
"""Returns a dictionary of alternative sequence labels, or None
- filename - path to file containing tab-separated table of labels
Input files should be formatted as <key>\t<label>, one pair per line.
"""
labeldict = {}
if filename is not None:
if logger:
logger.info("Reading labels from %s", filename)
with open(filename, "r") as ifh:
count = 0
for line in ifh.readlines():
count += 1
try:
key, label = line.strip().split("\t")
except ValueError:
if logger:
logger.warning("Problem with class file: %s", filename)
logger.warning("%d: %s", (count, line.strip()))
logger.warning("(skipping line)")
continue
else:
labeldict[key] = label
return labeldict
|
python
|
def get_labels(filename, logger=None):
"""Returns a dictionary of alternative sequence labels, or None
- filename - path to file containing tab-separated table of labels
Input files should be formatted as <key>\t<label>, one pair per line.
"""
labeldict = {}
if filename is not None:
if logger:
logger.info("Reading labels from %s", filename)
with open(filename, "r") as ifh:
count = 0
for line in ifh.readlines():
count += 1
try:
key, label = line.strip().split("\t")
except ValueError:
if logger:
logger.warning("Problem with class file: %s", filename)
logger.warning("%d: %s", (count, line.strip()))
logger.warning("(skipping line)")
continue
else:
labeldict[key] = label
return labeldict
|
[
"def",
"get_labels",
"(",
"filename",
",",
"logger",
"=",
"None",
")",
":",
"labeldict",
"=",
"{",
"}",
"if",
"filename",
"is",
"not",
"None",
":",
"if",
"logger",
":",
"logger",
".",
"info",
"(",
"\"Reading labels from %s\"",
",",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"\"r\"",
")",
"as",
"ifh",
":",
"count",
"=",
"0",
"for",
"line",
"in",
"ifh",
".",
"readlines",
"(",
")",
":",
"count",
"+=",
"1",
"try",
":",
"key",
",",
"label",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"\"\\t\"",
")",
"except",
"ValueError",
":",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"Problem with class file: %s\"",
",",
"filename",
")",
"logger",
".",
"warning",
"(",
"\"%d: %s\"",
",",
"(",
"count",
",",
"line",
".",
"strip",
"(",
")",
")",
")",
"logger",
".",
"warning",
"(",
"\"(skipping line)\"",
")",
"continue",
"else",
":",
"labeldict",
"[",
"key",
"]",
"=",
"label",
"return",
"labeldict"
] |
Returns a dictionary of alternative sequence labels, or None
- filename - path to file containing tab-separated table of labels
Input files should be formatted as <key>\t<label>, one pair per line.
|
[
"Returns",
"a",
"dictionary",
"of",
"alternative",
"sequence",
"labels",
"or",
"None"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L131-L156
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
ANIResults.add_tot_length
|
def add_tot_length(self, qname, sname, value, sym=True):
"""Add a total length value to self.alignment_lengths."""
self.alignment_lengths.loc[qname, sname] = value
if sym:
self.alignment_lengths.loc[sname, qname] = value
|
python
|
def add_tot_length(self, qname, sname, value, sym=True):
"""Add a total length value to self.alignment_lengths."""
self.alignment_lengths.loc[qname, sname] = value
if sym:
self.alignment_lengths.loc[sname, qname] = value
|
[
"def",
"add_tot_length",
"(",
"self",
",",
"qname",
",",
"sname",
",",
"value",
",",
"sym",
"=",
"True",
")",
":",
"self",
".",
"alignment_lengths",
".",
"loc",
"[",
"qname",
",",
"sname",
"]",
"=",
"value",
"if",
"sym",
":",
"self",
".",
"alignment_lengths",
".",
"loc",
"[",
"sname",
",",
"qname",
"]",
"=",
"value"
] |
Add a total length value to self.alignment_lengths.
|
[
"Add",
"a",
"total",
"length",
"value",
"to",
"self",
".",
"alignment_lengths",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L33-L37
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
ANIResults.add_sim_errors
|
def add_sim_errors(self, qname, sname, value, sym=True):
"""Add a similarity error value to self.similarity_errors."""
self.similarity_errors.loc[qname, sname] = value
if sym:
self.similarity_errors.loc[sname, qname] = value
|
python
|
def add_sim_errors(self, qname, sname, value, sym=True):
"""Add a similarity error value to self.similarity_errors."""
self.similarity_errors.loc[qname, sname] = value
if sym:
self.similarity_errors.loc[sname, qname] = value
|
[
"def",
"add_sim_errors",
"(",
"self",
",",
"qname",
",",
"sname",
",",
"value",
",",
"sym",
"=",
"True",
")",
":",
"self",
".",
"similarity_errors",
".",
"loc",
"[",
"qname",
",",
"sname",
"]",
"=",
"value",
"if",
"sym",
":",
"self",
".",
"similarity_errors",
".",
"loc",
"[",
"sname",
",",
"qname",
"]",
"=",
"value"
] |
Add a similarity error value to self.similarity_errors.
|
[
"Add",
"a",
"similarity",
"error",
"value",
"to",
"self",
".",
"similarity_errors",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L39-L43
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
ANIResults.add_pid
|
def add_pid(self, qname, sname, value, sym=True):
"""Add a percentage identity value to self.percentage_identity."""
self.percentage_identity.loc[qname, sname] = value
if sym:
self.percentage_identity.loc[sname, qname] = value
|
python
|
def add_pid(self, qname, sname, value, sym=True):
"""Add a percentage identity value to self.percentage_identity."""
self.percentage_identity.loc[qname, sname] = value
if sym:
self.percentage_identity.loc[sname, qname] = value
|
[
"def",
"add_pid",
"(",
"self",
",",
"qname",
",",
"sname",
",",
"value",
",",
"sym",
"=",
"True",
")",
":",
"self",
".",
"percentage_identity",
".",
"loc",
"[",
"qname",
",",
"sname",
"]",
"=",
"value",
"if",
"sym",
":",
"self",
".",
"percentage_identity",
".",
"loc",
"[",
"sname",
",",
"qname",
"]",
"=",
"value"
] |
Add a percentage identity value to self.percentage_identity.
|
[
"Add",
"a",
"percentage",
"identity",
"value",
"to",
"self",
".",
"percentage_identity",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L45-L49
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
ANIResults.add_coverage
|
def add_coverage(self, qname, sname, qcover, scover=None):
"""Add percentage coverage values to self.alignment_coverage."""
self.alignment_coverage.loc[qname, sname] = qcover
if scover:
self.alignment_coverage.loc[sname, qname] = scover
|
python
|
def add_coverage(self, qname, sname, qcover, scover=None):
"""Add percentage coverage values to self.alignment_coverage."""
self.alignment_coverage.loc[qname, sname] = qcover
if scover:
self.alignment_coverage.loc[sname, qname] = scover
|
[
"def",
"add_coverage",
"(",
"self",
",",
"qname",
",",
"sname",
",",
"qcover",
",",
"scover",
"=",
"None",
")",
":",
"self",
".",
"alignment_coverage",
".",
"loc",
"[",
"qname",
",",
"sname",
"]",
"=",
"qcover",
"if",
"scover",
":",
"self",
".",
"alignment_coverage",
".",
"loc",
"[",
"sname",
",",
"qname",
"]",
"=",
"scover"
] |
Add percentage coverage values to self.alignment_coverage.
|
[
"Add",
"percentage",
"coverage",
"values",
"to",
"self",
".",
"alignment_coverage",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L51-L55
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
BLASTcmds.get_db_name
|
def get_db_name(self, fname):
"""Return database filename"""
return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[1]
|
python
|
def get_db_name(self, fname):
"""Return database filename"""
return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[1]
|
[
"def",
"get_db_name",
"(",
"self",
",",
"fname",
")",
":",
"return",
"self",
".",
"funcs",
".",
"db_func",
"(",
"fname",
",",
"self",
".",
"outdir",
",",
"self",
".",
"exes",
".",
"format_exe",
")",
"[",
"1",
"]"
] |
Return database filename
|
[
"Return",
"database",
"filename"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L121-L123
|
train
|
widdowquinn/pyani
|
pyani/pyani_tools.py
|
BLASTcmds.build_blast_cmd
|
def build_blast_cmd(self, fname, dbname):
"""Return BLASTN command"""
return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe)
|
python
|
def build_blast_cmd(self, fname, dbname):
"""Return BLASTN command"""
return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe)
|
[
"def",
"build_blast_cmd",
"(",
"self",
",",
"fname",
",",
"dbname",
")",
":",
"return",
"self",
".",
"funcs",
".",
"blastn_func",
"(",
"fname",
",",
"dbname",
",",
"self",
".",
"outdir",
",",
"self",
".",
"exes",
".",
"blast_exe",
")"
] |
Return BLASTN command
|
[
"Return",
"BLASTN",
"command"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_tools.py#L125-L127
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
fragment_fasta_files
|
def fragment_fasta_files(infiles, outdirname, fragsize):
"""Chops sequences of the passed files into fragments, returns filenames.
- infiles - paths to each input sequence file
- outdirname - path to output directory
- fragsize - the size of sequence fragments
Takes every sequence from every file in infiles, and splits them into
consecutive fragments of length fragsize, (with any trailing sequences
being included, even if shorter than fragsize), and writes the resulting
set of sequences to a file with the same name in the output directory.
All fragments are named consecutively and uniquely (within a file) as
fragNNNNN. Sequence description fields are retained.
"""
outfnames = []
for fname in infiles:
outstem, outext = os.path.splitext(os.path.split(fname)[-1])
outfname = os.path.join(outdirname, outstem) + "-fragments" + outext
outseqs = []
count = 0
for seq in SeqIO.parse(fname, "fasta"):
idx = 0
while idx < len(seq):
count += 1
newseq = seq[idx : idx + fragsize]
newseq.id = "frag%05d" % count
outseqs.append(newseq)
idx += fragsize
outfnames.append(outfname)
SeqIO.write(outseqs, outfname, "fasta")
return outfnames, get_fraglength_dict(outfnames)
|
python
|
def fragment_fasta_files(infiles, outdirname, fragsize):
"""Chops sequences of the passed files into fragments, returns filenames.
- infiles - paths to each input sequence file
- outdirname - path to output directory
- fragsize - the size of sequence fragments
Takes every sequence from every file in infiles, and splits them into
consecutive fragments of length fragsize, (with any trailing sequences
being included, even if shorter than fragsize), and writes the resulting
set of sequences to a file with the same name in the output directory.
All fragments are named consecutively and uniquely (within a file) as
fragNNNNN. Sequence description fields are retained.
"""
outfnames = []
for fname in infiles:
outstem, outext = os.path.splitext(os.path.split(fname)[-1])
outfname = os.path.join(outdirname, outstem) + "-fragments" + outext
outseqs = []
count = 0
for seq in SeqIO.parse(fname, "fasta"):
idx = 0
while idx < len(seq):
count += 1
newseq = seq[idx : idx + fragsize]
newseq.id = "frag%05d" % count
outseqs.append(newseq)
idx += fragsize
outfnames.append(outfname)
SeqIO.write(outseqs, outfname, "fasta")
return outfnames, get_fraglength_dict(outfnames)
|
[
"def",
"fragment_fasta_files",
"(",
"infiles",
",",
"outdirname",
",",
"fragsize",
")",
":",
"outfnames",
"=",
"[",
"]",
"for",
"fname",
"in",
"infiles",
":",
"outstem",
",",
"outext",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname",
")",
"[",
"-",
"1",
"]",
")",
"outfname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdirname",
",",
"outstem",
")",
"+",
"\"-fragments\"",
"+",
"outext",
"outseqs",
"=",
"[",
"]",
"count",
"=",
"0",
"for",
"seq",
"in",
"SeqIO",
".",
"parse",
"(",
"fname",
",",
"\"fasta\"",
")",
":",
"idx",
"=",
"0",
"while",
"idx",
"<",
"len",
"(",
"seq",
")",
":",
"count",
"+=",
"1",
"newseq",
"=",
"seq",
"[",
"idx",
":",
"idx",
"+",
"fragsize",
"]",
"newseq",
".",
"id",
"=",
"\"frag%05d\"",
"%",
"count",
"outseqs",
".",
"append",
"(",
"newseq",
")",
"idx",
"+=",
"fragsize",
"outfnames",
".",
"append",
"(",
"outfname",
")",
"SeqIO",
".",
"write",
"(",
"outseqs",
",",
"outfname",
",",
"\"fasta\"",
")",
"return",
"outfnames",
",",
"get_fraglength_dict",
"(",
"outfnames",
")"
] |
Chops sequences of the passed files into fragments, returns filenames.
- infiles - paths to each input sequence file
- outdirname - path to output directory
- fragsize - the size of sequence fragments
Takes every sequence from every file in infiles, and splits them into
consecutive fragments of length fragsize, (with any trailing sequences
being included, even if shorter than fragsize), and writes the resulting
set of sequences to a file with the same name in the output directory.
All fragments are named consecutively and uniquely (within a file) as
fragNNNNN. Sequence description fields are retained.
|
[
"Chops",
"sequences",
"of",
"the",
"passed",
"files",
"into",
"fragments",
"returns",
"filenames",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L100-L130
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
get_fraglength_dict
|
def get_fraglength_dict(fastafiles):
"""Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames.
"""
fraglength_dict = {}
for filename in fastafiles:
qname = os.path.split(filename)[-1].split("-fragments")[0]
fraglength_dict[qname] = get_fragment_lengths(filename)
return fraglength_dict
|
python
|
def get_fraglength_dict(fastafiles):
"""Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames.
"""
fraglength_dict = {}
for filename in fastafiles:
qname = os.path.split(filename)[-1].split("-fragments")[0]
fraglength_dict[qname] = get_fragment_lengths(filename)
return fraglength_dict
|
[
"def",
"get_fraglength_dict",
"(",
"fastafiles",
")",
":",
"fraglength_dict",
"=",
"{",
"}",
"for",
"filename",
"in",
"fastafiles",
":",
"qname",
"=",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
".",
"split",
"(",
"\"-fragments\"",
")",
"[",
"0",
"]",
"fraglength_dict",
"[",
"qname",
"]",
"=",
"get_fragment_lengths",
"(",
"filename",
")",
"return",
"fraglength_dict"
] |
Returns dictionary of sequence fragment lengths, keyed by query name.
- fastafiles - list of FASTA input whole sequence files
Loops over input files and, for each, produces a dictionary with fragment
lengths, keyed by sequence ID. These are returned as a dictionary with
the keys being query IDs derived from filenames.
|
[
"Returns",
"dictionary",
"of",
"sequence",
"fragment",
"lengths",
"keyed",
"by",
"query",
"name",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L134-L147
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
get_fragment_lengths
|
def get_fragment_lengths(fastafile):
"""Returns dictionary of sequence fragment lengths, keyed by fragment ID.
Biopython's SeqIO module is used to parse all sequences in the FASTA
file.
NOTE: ambiguity symbols are not discounted.
"""
fraglengths = {}
for seq in SeqIO.parse(fastafile, "fasta"):
fraglengths[seq.id] = len(seq)
return fraglengths
|
python
|
def get_fragment_lengths(fastafile):
"""Returns dictionary of sequence fragment lengths, keyed by fragment ID.
Biopython's SeqIO module is used to parse all sequences in the FASTA
file.
NOTE: ambiguity symbols are not discounted.
"""
fraglengths = {}
for seq in SeqIO.parse(fastafile, "fasta"):
fraglengths[seq.id] = len(seq)
return fraglengths
|
[
"def",
"get_fragment_lengths",
"(",
"fastafile",
")",
":",
"fraglengths",
"=",
"{",
"}",
"for",
"seq",
"in",
"SeqIO",
".",
"parse",
"(",
"fastafile",
",",
"\"fasta\"",
")",
":",
"fraglengths",
"[",
"seq",
".",
"id",
"]",
"=",
"len",
"(",
"seq",
")",
"return",
"fraglengths"
] |
Returns dictionary of sequence fragment lengths, keyed by fragment ID.
Biopython's SeqIO module is used to parse all sequences in the FASTA
file.
NOTE: ambiguity symbols are not discounted.
|
[
"Returns",
"dictionary",
"of",
"sequence",
"fragment",
"lengths",
"keyed",
"by",
"fragment",
"ID",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L151-L162
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
build_db_jobs
|
def build_db_jobs(infiles, blastcmds):
"""Returns dictionary of db-building commands, keyed by dbname."""
dbjobdict = {} # Dict of database construction jobs, keyed by filename
# Create dictionary of database building jobs, keyed by db name
# defining jobnum for later use as last job index used
for idx, fname in enumerate(infiles):
dbjobdict[blastcmds.get_db_name(fname)] = pyani_jobs.Job(
"%s_db_%06d" % (blastcmds.prefix, idx), blastcmds.build_db_cmd(fname)
)
return dbjobdict
|
python
|
def build_db_jobs(infiles, blastcmds):
"""Returns dictionary of db-building commands, keyed by dbname."""
dbjobdict = {} # Dict of database construction jobs, keyed by filename
# Create dictionary of database building jobs, keyed by db name
# defining jobnum for later use as last job index used
for idx, fname in enumerate(infiles):
dbjobdict[blastcmds.get_db_name(fname)] = pyani_jobs.Job(
"%s_db_%06d" % (blastcmds.prefix, idx), blastcmds.build_db_cmd(fname)
)
return dbjobdict
|
[
"def",
"build_db_jobs",
"(",
"infiles",
",",
"blastcmds",
")",
":",
"dbjobdict",
"=",
"{",
"}",
"# Dict of database construction jobs, keyed by filename",
"# Create dictionary of database building jobs, keyed by db name",
"# defining jobnum for later use as last job index used",
"for",
"idx",
",",
"fname",
"in",
"enumerate",
"(",
"infiles",
")",
":",
"dbjobdict",
"[",
"blastcmds",
".",
"get_db_name",
"(",
"fname",
")",
"]",
"=",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_db_%06d\"",
"%",
"(",
"blastcmds",
".",
"prefix",
",",
"idx",
")",
",",
"blastcmds",
".",
"build_db_cmd",
"(",
"fname",
")",
")",
"return",
"dbjobdict"
] |
Returns dictionary of db-building commands, keyed by dbname.
|
[
"Returns",
"dictionary",
"of",
"db",
"-",
"building",
"commands",
"keyed",
"by",
"dbname",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L166-L175
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
make_blastcmd_builder
|
def make_blastcmd_builder(
mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST"
):
"""Returns BLASTcmds object for construction of BLAST commands."""
if mode == "ANIb": # BLAST/formatting executable depends on mode
blastcmds = BLASTcmds(
BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline),
BLASTexes(
format_exe or pyani_config.MAKEBLASTDB_DEFAULT,
blast_exe or pyani_config.BLASTN_DEFAULT,
),
prefix,
outdir,
)
else:
blastcmds = BLASTcmds(
BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline),
BLASTexes(
format_exe or pyani_config.FORMATDB_DEFAULT,
blast_exe or pyani_config.BLASTALL_DEFAULT,
),
prefix,
outdir,
)
return blastcmds
|
python
|
def make_blastcmd_builder(
mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST"
):
"""Returns BLASTcmds object for construction of BLAST commands."""
if mode == "ANIb": # BLAST/formatting executable depends on mode
blastcmds = BLASTcmds(
BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline),
BLASTexes(
format_exe or pyani_config.MAKEBLASTDB_DEFAULT,
blast_exe or pyani_config.BLASTN_DEFAULT,
),
prefix,
outdir,
)
else:
blastcmds = BLASTcmds(
BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline),
BLASTexes(
format_exe or pyani_config.FORMATDB_DEFAULT,
blast_exe or pyani_config.BLASTALL_DEFAULT,
),
prefix,
outdir,
)
return blastcmds
|
[
"def",
"make_blastcmd_builder",
"(",
"mode",
",",
"outdir",
",",
"format_exe",
"=",
"None",
",",
"blast_exe",
"=",
"None",
",",
"prefix",
"=",
"\"ANIBLAST\"",
")",
":",
"if",
"mode",
"==",
"\"ANIb\"",
":",
"# BLAST/formatting executable depends on mode",
"blastcmds",
"=",
"BLASTcmds",
"(",
"BLASTfunctions",
"(",
"construct_makeblastdb_cmd",
",",
"construct_blastn_cmdline",
")",
",",
"BLASTexes",
"(",
"format_exe",
"or",
"pyani_config",
".",
"MAKEBLASTDB_DEFAULT",
",",
"blast_exe",
"or",
"pyani_config",
".",
"BLASTN_DEFAULT",
",",
")",
",",
"prefix",
",",
"outdir",
",",
")",
"else",
":",
"blastcmds",
"=",
"BLASTcmds",
"(",
"BLASTfunctions",
"(",
"construct_formatdb_cmd",
",",
"construct_blastall_cmdline",
")",
",",
"BLASTexes",
"(",
"format_exe",
"or",
"pyani_config",
".",
"FORMATDB_DEFAULT",
",",
"blast_exe",
"or",
"pyani_config",
".",
"BLASTALL_DEFAULT",
",",
")",
",",
"prefix",
",",
"outdir",
",",
")",
"return",
"blastcmds"
] |
Returns BLASTcmds object for construction of BLAST commands.
|
[
"Returns",
"BLASTcmds",
"object",
"for",
"construction",
"of",
"BLAST",
"commands",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L178-L202
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
make_job_graph
|
def make_job_graph(infiles, fragfiles, blastcmds):
"""Return a job dependency graph, based on the passed input sequence files.
- infiles - a list of paths to input FASTA files
- fragfiles - a list of paths to fragmented input FASTA files
By default, will run ANIb - it *is* possible to make a mess of passing the
wrong executable for the mode you're using.
All items in the returned graph list are BLAST executable jobs that must
be run *after* the corresponding database creation. The Job objects
corresponding to the database creation are contained as dependencies.
How those jobs are scheduled depends on the scheduler (see
run_multiprocessing.py, run_sge.py)
"""
joblist = [] # Holds list of job dependency graphs
# Get dictionary of database-building jobs
dbjobdict = build_db_jobs(infiles, blastcmds)
# Create list of BLAST executable jobs, with dependencies
jobnum = len(dbjobdict)
for idx, fname1 in enumerate(fragfiles[:-1]):
for fname2 in fragfiles[idx + 1 :]:
jobnum += 1
jobs = [
pyani_jobs.Job(
"%s_exe_%06d_a" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")),
),
pyani_jobs.Job(
"%s_exe_%06d_b" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")),
),
]
jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")])
jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")])
joblist.extend(jobs)
# Return the dependency graph
return joblist
|
python
|
def make_job_graph(infiles, fragfiles, blastcmds):
"""Return a job dependency graph, based on the passed input sequence files.
- infiles - a list of paths to input FASTA files
- fragfiles - a list of paths to fragmented input FASTA files
By default, will run ANIb - it *is* possible to make a mess of passing the
wrong executable for the mode you're using.
All items in the returned graph list are BLAST executable jobs that must
be run *after* the corresponding database creation. The Job objects
corresponding to the database creation are contained as dependencies.
How those jobs are scheduled depends on the scheduler (see
run_multiprocessing.py, run_sge.py)
"""
joblist = [] # Holds list of job dependency graphs
# Get dictionary of database-building jobs
dbjobdict = build_db_jobs(infiles, blastcmds)
# Create list of BLAST executable jobs, with dependencies
jobnum = len(dbjobdict)
for idx, fname1 in enumerate(fragfiles[:-1]):
for fname2 in fragfiles[idx + 1 :]:
jobnum += 1
jobs = [
pyani_jobs.Job(
"%s_exe_%06d_a" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")),
),
pyani_jobs.Job(
"%s_exe_%06d_b" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")),
),
]
jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")])
jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")])
joblist.extend(jobs)
# Return the dependency graph
return joblist
|
[
"def",
"make_job_graph",
"(",
"infiles",
",",
"fragfiles",
",",
"blastcmds",
")",
":",
"joblist",
"=",
"[",
"]",
"# Holds list of job dependency graphs",
"# Get dictionary of database-building jobs",
"dbjobdict",
"=",
"build_db_jobs",
"(",
"infiles",
",",
"blastcmds",
")",
"# Create list of BLAST executable jobs, with dependencies",
"jobnum",
"=",
"len",
"(",
"dbjobdict",
")",
"for",
"idx",
",",
"fname1",
"in",
"enumerate",
"(",
"fragfiles",
"[",
":",
"-",
"1",
"]",
")",
":",
"for",
"fname2",
"in",
"fragfiles",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"jobnum",
"+=",
"1",
"jobs",
"=",
"[",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_exe_%06d_a\"",
"%",
"(",
"blastcmds",
".",
"prefix",
",",
"jobnum",
")",
",",
"blastcmds",
".",
"build_blast_cmd",
"(",
"fname1",
",",
"fname2",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
")",
",",
")",
",",
"pyani_jobs",
".",
"Job",
"(",
"\"%s_exe_%06d_b\"",
"%",
"(",
"blastcmds",
".",
"prefix",
",",
"jobnum",
")",
",",
"blastcmds",
".",
"build_blast_cmd",
"(",
"fname2",
",",
"fname1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
")",
",",
")",
",",
"]",
"jobs",
"[",
"0",
"]",
".",
"add_dependency",
"(",
"dbjobdict",
"[",
"fname1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"]",
")",
"jobs",
"[",
"1",
"]",
".",
"add_dependency",
"(",
"dbjobdict",
"[",
"fname2",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"]",
")",
"joblist",
".",
"extend",
"(",
"jobs",
")",
"# Return the dependency graph",
"return",
"joblist"
] |
Return a job dependency graph, based on the passed input sequence files.
- infiles - a list of paths to input FASTA files
- fragfiles - a list of paths to fragmented input FASTA files
By default, will run ANIb - it *is* possible to make a mess of passing the
wrong executable for the mode you're using.
All items in the returned graph list are BLAST executable jobs that must
be run *after* the corresponding database creation. The Job objects
corresponding to the database creation are contained as dependencies.
How those jobs are scheduled depends on the scheduler (see
run_multiprocessing.py, run_sge.py)
|
[
"Return",
"a",
"job",
"dependency",
"graph",
"based",
"on",
"the",
"passed",
"input",
"sequence",
"files",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L206-L246
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
construct_makeblastdb_cmd
|
def construct_makeblastdb_cmd(
filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT
):
"""Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable
"""
title = os.path.splitext(os.path.split(filename)[-1])[0]
outfilename = os.path.join(outdir, os.path.split(filename)[-1])
return (
"{0} -dbtype nucl -in {1} -title {2} -out {3}".format(
blastdb_exe, filename, title, outfilename
),
outfilename,
)
|
python
|
def construct_makeblastdb_cmd(
filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT
):
"""Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable
"""
title = os.path.splitext(os.path.split(filename)[-1])[0]
outfilename = os.path.join(outdir, os.path.split(filename)[-1])
return (
"{0} -dbtype nucl -in {1} -title {2} -out {3}".format(
blastdb_exe, filename, title, outfilename
),
outfilename,
)
|
[
"def",
"construct_makeblastdb_cmd",
"(",
"filename",
",",
"outdir",
",",
"blastdb_exe",
"=",
"pyani_config",
".",
"MAKEBLASTDB_DEFAULT",
")",
":",
"title",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"outfilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
")",
"return",
"(",
"\"{0} -dbtype nucl -in {1} -title {2} -out {3}\"",
".",
"format",
"(",
"blastdb_exe",
",",
"filename",
",",
"title",
",",
"outfilename",
")",
",",
"outfilename",
",",
")"
] |
Returns a single makeblastdb command.
- filename - input filename
- blastdb_exe - path to the makeblastdb executable
|
[
"Returns",
"a",
"single",
"makeblastdb",
"command",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L271-L286
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
construct_formatdb_cmd
|
def construct_formatdb_cmd(filename, outdir, blastdb_exe=pyani_config.FORMATDB_DEFAULT):
"""Returns a single formatdb command.
- filename - input filename
- blastdb_exe - path to the formatdb executable
"""
title = os.path.splitext(os.path.split(filename)[-1])[0]
newfilename = os.path.join(outdir, os.path.split(filename)[-1])
shutil.copy(filename, newfilename)
return (
"{0} -p F -i {1} -t {2}".format(blastdb_exe, newfilename, title),
newfilename,
)
|
python
|
def construct_formatdb_cmd(filename, outdir, blastdb_exe=pyani_config.FORMATDB_DEFAULT):
"""Returns a single formatdb command.
- filename - input filename
- blastdb_exe - path to the formatdb executable
"""
title = os.path.splitext(os.path.split(filename)[-1])[0]
newfilename = os.path.join(outdir, os.path.split(filename)[-1])
shutil.copy(filename, newfilename)
return (
"{0} -p F -i {1} -t {2}".format(blastdb_exe, newfilename, title),
newfilename,
)
|
[
"def",
"construct_formatdb_cmd",
"(",
"filename",
",",
"outdir",
",",
"blastdb_exe",
"=",
"pyani_config",
".",
"FORMATDB_DEFAULT",
")",
":",
"title",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"newfilename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"os",
".",
"path",
".",
"split",
"(",
"filename",
")",
"[",
"-",
"1",
"]",
")",
"shutil",
".",
"copy",
"(",
"filename",
",",
"newfilename",
")",
"return",
"(",
"\"{0} -p F -i {1} -t {2}\"",
".",
"format",
"(",
"blastdb_exe",
",",
"newfilename",
",",
"title",
")",
",",
"newfilename",
",",
")"
] |
Returns a single formatdb command.
- filename - input filename
- blastdb_exe - path to the formatdb executable
|
[
"Returns",
"a",
"single",
"formatdb",
"command",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L290-L302
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
generate_blastn_commands
|
def generate_blastn_commands(filenames, outdir, blast_exe=None, mode="ANIb"):
"""Return a list of blastn command-lines for ANIm
- filenames - a list of paths to fragmented input FASTA files
- outdir - path to output directory
- blastn_exe - path to BLASTN executable
Assumes that the fragment sequence input filenames have the form
ACCESSION-fragments.ext, where the corresponding BLAST database filenames
have the form ACCESSION.ext. This is the convention followed by the
fragment_FASTA_files() function above.
"""
if mode == "ANIb":
construct_blast_cmdline = construct_blastn_cmdline
else:
construct_blast_cmdline = construct_blastall_cmdline
cmdlines = []
for idx, fname1 in enumerate(filenames[:-1]):
dbname1 = fname1.replace("-fragments", "")
for fname2 in filenames[idx + 1 :]:
dbname2 = fname2.replace("-fragments", "")
if blast_exe is None:
cmdlines.append(construct_blast_cmdline(fname1, dbname2, outdir))
cmdlines.append(construct_blast_cmdline(fname2, dbname1, outdir))
else:
cmdlines.append(
construct_blast_cmdline(fname1, dbname2, outdir, blast_exe)
)
cmdlines.append(
construct_blast_cmdline(fname2, dbname1, outdir, blast_exe)
)
return cmdlines
|
python
|
def generate_blastn_commands(filenames, outdir, blast_exe=None, mode="ANIb"):
"""Return a list of blastn command-lines for ANIm
- filenames - a list of paths to fragmented input FASTA files
- outdir - path to output directory
- blastn_exe - path to BLASTN executable
Assumes that the fragment sequence input filenames have the form
ACCESSION-fragments.ext, where the corresponding BLAST database filenames
have the form ACCESSION.ext. This is the convention followed by the
fragment_FASTA_files() function above.
"""
if mode == "ANIb":
construct_blast_cmdline = construct_blastn_cmdline
else:
construct_blast_cmdline = construct_blastall_cmdline
cmdlines = []
for idx, fname1 in enumerate(filenames[:-1]):
dbname1 = fname1.replace("-fragments", "")
for fname2 in filenames[idx + 1 :]:
dbname2 = fname2.replace("-fragments", "")
if blast_exe is None:
cmdlines.append(construct_blast_cmdline(fname1, dbname2, outdir))
cmdlines.append(construct_blast_cmdline(fname2, dbname1, outdir))
else:
cmdlines.append(
construct_blast_cmdline(fname1, dbname2, outdir, blast_exe)
)
cmdlines.append(
construct_blast_cmdline(fname2, dbname1, outdir, blast_exe)
)
return cmdlines
|
[
"def",
"generate_blastn_commands",
"(",
"filenames",
",",
"outdir",
",",
"blast_exe",
"=",
"None",
",",
"mode",
"=",
"\"ANIb\"",
")",
":",
"if",
"mode",
"==",
"\"ANIb\"",
":",
"construct_blast_cmdline",
"=",
"construct_blastn_cmdline",
"else",
":",
"construct_blast_cmdline",
"=",
"construct_blastall_cmdline",
"cmdlines",
"=",
"[",
"]",
"for",
"idx",
",",
"fname1",
"in",
"enumerate",
"(",
"filenames",
"[",
":",
"-",
"1",
"]",
")",
":",
"dbname1",
"=",
"fname1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"for",
"fname2",
"in",
"filenames",
"[",
"idx",
"+",
"1",
":",
"]",
":",
"dbname2",
"=",
"fname2",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"if",
"blast_exe",
"is",
"None",
":",
"cmdlines",
".",
"append",
"(",
"construct_blast_cmdline",
"(",
"fname1",
",",
"dbname2",
",",
"outdir",
")",
")",
"cmdlines",
".",
"append",
"(",
"construct_blast_cmdline",
"(",
"fname2",
",",
"dbname1",
",",
"outdir",
")",
")",
"else",
":",
"cmdlines",
".",
"append",
"(",
"construct_blast_cmdline",
"(",
"fname1",
",",
"dbname2",
",",
"outdir",
",",
"blast_exe",
")",
")",
"cmdlines",
".",
"append",
"(",
"construct_blast_cmdline",
"(",
"fname2",
",",
"dbname1",
",",
"outdir",
",",
"blast_exe",
")",
")",
"return",
"cmdlines"
] |
Return a list of blastn command-lines for ANIm
- filenames - a list of paths to fragmented input FASTA files
- outdir - path to output directory
- blastn_exe - path to BLASTN executable
Assumes that the fragment sequence input filenames have the form
ACCESSION-fragments.ext, where the corresponding BLAST database filenames
have the form ACCESSION.ext. This is the convention followed by the
fragment_FASTA_files() function above.
|
[
"Return",
"a",
"list",
"of",
"blastn",
"command",
"-",
"lines",
"for",
"ANIm"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L306-L337
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
construct_blastn_cmdline
|
def construct_blastn_cmdline(
fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT
):
"""Returns a single blastn command.
- filename - input filename
- blastn_exe - path to BLASTN executable
"""
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -out {1}.blast_tab -query {2} -db {3} "
+ "-xdrop_gap_final 150 -dust no -evalue 1e-15 "
+ "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch "
+ "pident nident qlen slen qstart qend sstart send positive "
+ "ppos gaps' -task blastn"
)
return cmd.format(blastn_exe, prefix, fname1, fname2)
|
python
|
def construct_blastn_cmdline(
fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT
):
"""Returns a single blastn command.
- filename - input filename
- blastn_exe - path to BLASTN executable
"""
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -out {1}.blast_tab -query {2} -db {3} "
+ "-xdrop_gap_final 150 -dust no -evalue 1e-15 "
+ "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch "
+ "pident nident qlen slen qstart qend sstart send positive "
+ "ppos gaps' -task blastn"
)
return cmd.format(blastn_exe, prefix, fname1, fname2)
|
[
"def",
"construct_blastn_cmdline",
"(",
"fname1",
",",
"fname2",
",",
"outdir",
",",
"blastn_exe",
"=",
"pyani_config",
".",
"BLASTN_DEFAULT",
")",
":",
"fstem1",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname1",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"fstem2",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname2",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"fstem1",
"=",
"fstem1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"\"%s_vs_%s\"",
"%",
"(",
"fstem1",
",",
"fstem2",
")",
")",
"cmd",
"=",
"(",
"\"{0} -out {1}.blast_tab -query {2} -db {3} \"",
"+",
"\"-xdrop_gap_final 150 -dust no -evalue 1e-15 \"",
"+",
"\"-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch \"",
"+",
"\"pident nident qlen slen qstart qend sstart send positive \"",
"+",
"\"ppos gaps' -task blastn\"",
")",
"return",
"cmd",
".",
"format",
"(",
"blastn_exe",
",",
"prefix",
",",
"fname1",
",",
"fname2",
")"
] |
Returns a single blastn command.
- filename - input filename
- blastn_exe - path to BLASTN executable
|
[
"Returns",
"a",
"single",
"blastn",
"command",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L341-L360
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
construct_blastall_cmdline
|
def construct_blastall_cmdline(
fname1, fname2, outdir, blastall_exe=pyani_config.BLASTALL_DEFAULT
):
"""Returns a single blastall command.
- blastall_exe - path to BLASTALL executable
"""
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -p blastn -o {1}.blast_tab -i {2} -d {3} "
+ "-X 150 -q -1 -F F -e 1e-15 "
+ "-b 1 -v 1 -m 8"
)
return cmd.format(blastall_exe, prefix, fname1, fname2)
|
python
|
def construct_blastall_cmdline(
fname1, fname2, outdir, blastall_exe=pyani_config.BLASTALL_DEFAULT
):
"""Returns a single blastall command.
- blastall_exe - path to BLASTALL executable
"""
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -p blastn -o {1}.blast_tab -i {2} -d {3} "
+ "-X 150 -q -1 -F F -e 1e-15 "
+ "-b 1 -v 1 -m 8"
)
return cmd.format(blastall_exe, prefix, fname1, fname2)
|
[
"def",
"construct_blastall_cmdline",
"(",
"fname1",
",",
"fname2",
",",
"outdir",
",",
"blastall_exe",
"=",
"pyani_config",
".",
"BLASTALL_DEFAULT",
")",
":",
"fstem1",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname1",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"fstem2",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"fname2",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
"fstem1",
"=",
"fstem1",
".",
"replace",
"(",
"\"-fragments\"",
",",
"\"\"",
")",
"prefix",
"=",
"os",
".",
"path",
".",
"join",
"(",
"outdir",
",",
"\"%s_vs_%s\"",
"%",
"(",
"fstem1",
",",
"fstem2",
")",
")",
"cmd",
"=",
"(",
"\"{0} -p blastn -o {1}.blast_tab -i {2} -d {3} \"",
"+",
"\"-X 150 -q -1 -F F -e 1e-15 \"",
"+",
"\"-b 1 -v 1 -m 8\"",
")",
"return",
"cmd",
".",
"format",
"(",
"blastall_exe",
",",
"prefix",
",",
"fname1",
",",
"fname2",
")"
] |
Returns a single blastall command.
- blastall_exe - path to BLASTALL executable
|
[
"Returns",
"a",
"single",
"blastall",
"command",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L364-L380
|
train
|
widdowquinn/pyani
|
pyani/anib.py
|
process_blast
|
def process_blast(
blast_dir,
org_lengths,
fraglengths=None,
mode="ANIb",
identity=0.3,
coverage=0.7,
logger=None,
):
"""Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files
blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), mode)
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .blast_tab files assuming that the filename format holds:
# org1_vs_org2.blast_tab:
for blastfile in blastfiles:
qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_")
# We may have BLAST files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % blastfile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % blastfile
)
continue
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
# Populate dataframes: when assigning data, we need to note that
# we have asymmetrical data from BLAST output, so only the
# upper triangle is populated
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover)
return results
|
python
|
def process_blast(
blast_dir,
org_lengths,
fraglengths=None,
mode="ANIb",
identity=0.3,
coverage=0.7,
logger=None,
):
"""Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
"""
# Process directory to identify input files
blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), mode)
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .blast_tab files assuming that the filename format holds:
# org1_vs_org2.blast_tab:
for blastfile in blastfiles:
qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_")
# We may have BLAST files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % blastfile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % blastfile
)
continue
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
# Populate dataframes: when assigning data, we need to note that
# we have asymmetrical data from BLAST output, so only the
# upper triangle is populated
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover)
return results
|
[
"def",
"process_blast",
"(",
"blast_dir",
",",
"org_lengths",
",",
"fraglengths",
"=",
"None",
",",
"mode",
"=",
"\"ANIb\"",
",",
"identity",
"=",
"0.3",
",",
"coverage",
"=",
"0.7",
",",
"logger",
"=",
"None",
",",
")",
":",
"# Process directory to identify input files",
"blastfiles",
"=",
"pyani_files",
".",
"get_input_files",
"(",
"blast_dir",
",",
"\".blast_tab\"",
")",
"# Hold data in ANIResults object",
"results",
"=",
"ANIResults",
"(",
"list",
"(",
"org_lengths",
".",
"keys",
"(",
")",
")",
",",
"mode",
")",
"# Fill diagonal NA values for alignment_length with org_lengths",
"for",
"org",
",",
"length",
"in",
"list",
"(",
"org_lengths",
".",
"items",
"(",
")",
")",
":",
"results",
".",
"alignment_lengths",
"[",
"org",
"]",
"[",
"org",
"]",
"=",
"length",
"# Process .blast_tab files assuming that the filename format holds:",
"# org1_vs_org2.blast_tab:",
"for",
"blastfile",
"in",
"blastfiles",
":",
"qname",
",",
"sname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"os",
".",
"path",
".",
"split",
"(",
"blastfile",
")",
"[",
"-",
"1",
"]",
")",
"[",
"0",
"]",
".",
"split",
"(",
"\"_vs_\"",
")",
"# We may have BLAST files from other analyses in the same directory",
"# If this occurs, we raise a warning, and skip the file",
"if",
"qname",
"not",
"in",
"list",
"(",
"org_lengths",
".",
"keys",
"(",
")",
")",
":",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"Query name %s not in input \"",
"%",
"qname",
"+",
"\"sequence list, skipping %s\"",
"%",
"blastfile",
")",
"continue",
"if",
"sname",
"not",
"in",
"list",
"(",
"org_lengths",
".",
"keys",
"(",
")",
")",
":",
"if",
"logger",
":",
"logger",
".",
"warning",
"(",
"\"Subject name %s not in input \"",
"%",
"sname",
"+",
"\"sequence list, skipping %s\"",
"%",
"blastfile",
")",
"continue",
"resultvals",
"=",
"parse_blast_tab",
"(",
"blastfile",
",",
"fraglengths",
",",
"identity",
",",
"coverage",
",",
"mode",
")",
"query_cover",
"=",
"float",
"(",
"resultvals",
"[",
"0",
"]",
")",
"/",
"org_lengths",
"[",
"qname",
"]",
"# Populate dataframes: when assigning data, we need to note that",
"# we have asymmetrical data from BLAST output, so only the",
"# upper triangle is populated",
"results",
".",
"add_tot_length",
"(",
"qname",
",",
"sname",
",",
"resultvals",
"[",
"0",
"]",
",",
"sym",
"=",
"False",
")",
"results",
".",
"add_sim_errors",
"(",
"qname",
",",
"sname",
",",
"resultvals",
"[",
"1",
"]",
",",
"sym",
"=",
"False",
")",
"results",
".",
"add_pid",
"(",
"qname",
",",
"sname",
",",
"0.01",
"*",
"resultvals",
"[",
"2",
"]",
",",
"sym",
"=",
"False",
")",
"results",
".",
"add_coverage",
"(",
"qname",
",",
"sname",
",",
"query_cover",
")",
"return",
"results"
] |
Returns a tuple of ANIb results for .blast_tab files in the output dir.
- blast_dir - path to the directory containing .blast_tab files
- org_lengths - the base count for each input sequence
- fraglengths - dictionary of query sequence fragment lengths, only
needed for BLASTALL output
- mode - parsing BLASTN+ or BLASTALL output?
- logger - a logger for messages
Returns the following pandas dataframes in an ANIResults object;
query sequences are rows, subject sequences are columns:
- alignment_lengths - non-symmetrical: total length of alignment
- percentage_identity - non-symmetrical: ANIb (Goris) percentage identity
- alignment_coverage - non-symmetrical: coverage of query
- similarity_errors - non-symmetrical: count of similarity errors
May throw a ZeroDivisionError if one or more BLAST runs failed, or a
very distant sequence was included in the analysis.
|
[
"Returns",
"a",
"tuple",
"of",
"ANIb",
"results",
"for",
".",
"blast_tab",
"files",
"in",
"the",
"output",
"dir",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/anib.py#L384-L453
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
split_seq
|
def split_seq(iterable, size):
"""Splits a passed iterable into chunks of a given size."""
elm = iter(iterable)
item = list(itertools.islice(elm, size))
while item:
yield item
item = list(itertools.islice(elm, size))
|
python
|
def split_seq(iterable, size):
"""Splits a passed iterable into chunks of a given size."""
elm = iter(iterable)
item = list(itertools.islice(elm, size))
while item:
yield item
item = list(itertools.islice(elm, size))
|
[
"def",
"split_seq",
"(",
"iterable",
",",
"size",
")",
":",
"elm",
"=",
"iter",
"(",
"iterable",
")",
"item",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"elm",
",",
"size",
")",
")",
"while",
"item",
":",
"yield",
"item",
"item",
"=",
"list",
"(",
"itertools",
".",
"islice",
"(",
"elm",
",",
"size",
")",
")"
] |
Splits a passed iterable into chunks of a given size.
|
[
"Splits",
"a",
"passed",
"iterable",
"into",
"chunks",
"of",
"a",
"given",
"size",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L23-L29
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
build_joblist
|
def build_joblist(jobgraph):
"""Returns a list of jobs, from a passed jobgraph."""
jobset = set()
for job in jobgraph:
jobset = populate_jobset(job, jobset, depth=1)
return list(jobset)
|
python
|
def build_joblist(jobgraph):
"""Returns a list of jobs, from a passed jobgraph."""
jobset = set()
for job in jobgraph:
jobset = populate_jobset(job, jobset, depth=1)
return list(jobset)
|
[
"def",
"build_joblist",
"(",
"jobgraph",
")",
":",
"jobset",
"=",
"set",
"(",
")",
"for",
"job",
"in",
"jobgraph",
":",
"jobset",
"=",
"populate_jobset",
"(",
"job",
",",
"jobset",
",",
"depth",
"=",
"1",
")",
"return",
"list",
"(",
"jobset",
")"
] |
Returns a list of jobs, from a passed jobgraph.
|
[
"Returns",
"a",
"list",
"of",
"jobs",
"from",
"a",
"passed",
"jobgraph",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L33-L38
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
compile_jobgroups_from_joblist
|
def compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize):
"""Return list of jobgroups, rather than list of jobs."""
jobcmds = defaultdict(list)
for job in joblist:
jobcmds[job.command.split(' ', 1)[0]].append(job.command)
jobgroups = []
for cmds in list(jobcmds.items()):
# Break arglist up into batches of sgegroupsize (default: 10,000)
sublists = split_seq(cmds[1], sgegroupsize)
count = 0
for sublist in sublists:
count += 1
sge_jobcmdlist = ['\"%s\"' % jc for jc in sublist]
jobgroups.append(JobGroup("%s_%d" % (jgprefix, count),
"$cmds",
arguments={'cmds': sge_jobcmdlist}))
return jobgroups
|
python
|
def compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize):
"""Return list of jobgroups, rather than list of jobs."""
jobcmds = defaultdict(list)
for job in joblist:
jobcmds[job.command.split(' ', 1)[0]].append(job.command)
jobgroups = []
for cmds in list(jobcmds.items()):
# Break arglist up into batches of sgegroupsize (default: 10,000)
sublists = split_seq(cmds[1], sgegroupsize)
count = 0
for sublist in sublists:
count += 1
sge_jobcmdlist = ['\"%s\"' % jc for jc in sublist]
jobgroups.append(JobGroup("%s_%d" % (jgprefix, count),
"$cmds",
arguments={'cmds': sge_jobcmdlist}))
return jobgroups
|
[
"def",
"compile_jobgroups_from_joblist",
"(",
"joblist",
",",
"jgprefix",
",",
"sgegroupsize",
")",
":",
"jobcmds",
"=",
"defaultdict",
"(",
"list",
")",
"for",
"job",
"in",
"joblist",
":",
"jobcmds",
"[",
"job",
".",
"command",
".",
"split",
"(",
"' '",
",",
"1",
")",
"[",
"0",
"]",
"]",
".",
"append",
"(",
"job",
".",
"command",
")",
"jobgroups",
"=",
"[",
"]",
"for",
"cmds",
"in",
"list",
"(",
"jobcmds",
".",
"items",
"(",
")",
")",
":",
"# Break arglist up into batches of sgegroupsize (default: 10,000)",
"sublists",
"=",
"split_seq",
"(",
"cmds",
"[",
"1",
"]",
",",
"sgegroupsize",
")",
"count",
"=",
"0",
"for",
"sublist",
"in",
"sublists",
":",
"count",
"+=",
"1",
"sge_jobcmdlist",
"=",
"[",
"'\\\"%s\\\"'",
"%",
"jc",
"for",
"jc",
"in",
"sublist",
"]",
"jobgroups",
".",
"append",
"(",
"JobGroup",
"(",
"\"%s_%d\"",
"%",
"(",
"jgprefix",
",",
"count",
")",
",",
"\"$cmds\"",
",",
"arguments",
"=",
"{",
"'cmds'",
":",
"sge_jobcmdlist",
"}",
")",
")",
"return",
"jobgroups"
] |
Return list of jobgroups, rather than list of jobs.
|
[
"Return",
"list",
"of",
"jobgroups",
"rather",
"than",
"list",
"of",
"jobs",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L42-L58
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
run_dependency_graph
|
def run_dependency_graph(jobgraph, logger=None, jgprefix="ANIm_SGE_JG",
sgegroupsize=10000, sgeargs=None):
"""Creates and runs GridEngine scripts for jobs based on the passed
jobgraph.
- jobgraph - list of jobs, which may have dependencies.
- verbose - flag for multiprocessing verbosity
- logger - a logger module logger (optional)
- jgprefix - a prefix for the submitted jobs, in the scheduler
- sgegroupsize - the maximum size for an array job submission
- sgeargs - additional arguments to qsub
The strategy here is to loop over each job in the list of jobs (jobgraph),
and create/populate a series of Sets of commands, to be run in
reverse order with multiprocessing_run as asynchronous pools.
The strategy here is to loop over each job in the dependency graph, and
add the job to a new list of jobs, swapping out the Job dependency for
the name of the Job on which it depends.
"""
joblist = build_joblist(jobgraph)
# Try to be informative by telling the user what jobs will run
dep_count = 0 # how many dependencies are there
if logger:
logger.info("Jobs to run with scheduler")
for job in joblist:
logger.info("{0}: {1}".format(job.name, job.command))
if len(job.dependencies):
dep_count += len(job.dependencies)
for dep in job.dependencies:
logger.info("\t[^ depends on: %s]" % dep.name)
logger.info("There are %d job dependencies" % dep_count)
# If there are no job dependencies, we can use an array (or series of
# arrays) to schedule our jobs. This cuts down on problems with long
# job lists choking up the queue.
if dep_count == 0:
logger.info("Compiling jobs into JobGroups")
joblist = compile_jobgroups_from_joblist(joblist, jgprefix,
sgegroupsize)
# Send jobs to scheduler
logger.info("Running jobs with scheduler...")
logger.info("Jobs passed to scheduler in order:")
for job in joblist:
logger.info("\t%s" % job.name)
build_and_submit_jobs(os.curdir, joblist, sgeargs)
logger.info("Waiting for SGE-submitted jobs to finish (polling)")
for job in joblist:
job.wait()
|
python
|
def run_dependency_graph(jobgraph, logger=None, jgprefix="ANIm_SGE_JG",
sgegroupsize=10000, sgeargs=None):
"""Creates and runs GridEngine scripts for jobs based on the passed
jobgraph.
- jobgraph - list of jobs, which may have dependencies.
- verbose - flag for multiprocessing verbosity
- logger - a logger module logger (optional)
- jgprefix - a prefix for the submitted jobs, in the scheduler
- sgegroupsize - the maximum size for an array job submission
- sgeargs - additional arguments to qsub
The strategy here is to loop over each job in the list of jobs (jobgraph),
and create/populate a series of Sets of commands, to be run in
reverse order with multiprocessing_run as asynchronous pools.
The strategy here is to loop over each job in the dependency graph, and
add the job to a new list of jobs, swapping out the Job dependency for
the name of the Job on which it depends.
"""
joblist = build_joblist(jobgraph)
# Try to be informative by telling the user what jobs will run
dep_count = 0 # how many dependencies are there
if logger:
logger.info("Jobs to run with scheduler")
for job in joblist:
logger.info("{0}: {1}".format(job.name, job.command))
if len(job.dependencies):
dep_count += len(job.dependencies)
for dep in job.dependencies:
logger.info("\t[^ depends on: %s]" % dep.name)
logger.info("There are %d job dependencies" % dep_count)
# If there are no job dependencies, we can use an array (or series of
# arrays) to schedule our jobs. This cuts down on problems with long
# job lists choking up the queue.
if dep_count == 0:
logger.info("Compiling jobs into JobGroups")
joblist = compile_jobgroups_from_joblist(joblist, jgprefix,
sgegroupsize)
# Send jobs to scheduler
logger.info("Running jobs with scheduler...")
logger.info("Jobs passed to scheduler in order:")
for job in joblist:
logger.info("\t%s" % job.name)
build_and_submit_jobs(os.curdir, joblist, sgeargs)
logger.info("Waiting for SGE-submitted jobs to finish (polling)")
for job in joblist:
job.wait()
|
[
"def",
"run_dependency_graph",
"(",
"jobgraph",
",",
"logger",
"=",
"None",
",",
"jgprefix",
"=",
"\"ANIm_SGE_JG\"",
",",
"sgegroupsize",
"=",
"10000",
",",
"sgeargs",
"=",
"None",
")",
":",
"joblist",
"=",
"build_joblist",
"(",
"jobgraph",
")",
"# Try to be informative by telling the user what jobs will run",
"dep_count",
"=",
"0",
"# how many dependencies are there",
"if",
"logger",
":",
"logger",
".",
"info",
"(",
"\"Jobs to run with scheduler\"",
")",
"for",
"job",
"in",
"joblist",
":",
"logger",
".",
"info",
"(",
"\"{0}: {1}\"",
".",
"format",
"(",
"job",
".",
"name",
",",
"job",
".",
"command",
")",
")",
"if",
"len",
"(",
"job",
".",
"dependencies",
")",
":",
"dep_count",
"+=",
"len",
"(",
"job",
".",
"dependencies",
")",
"for",
"dep",
"in",
"job",
".",
"dependencies",
":",
"logger",
".",
"info",
"(",
"\"\\t[^ depends on: %s]\"",
"%",
"dep",
".",
"name",
")",
"logger",
".",
"info",
"(",
"\"There are %d job dependencies\"",
"%",
"dep_count",
")",
"# If there are no job dependencies, we can use an array (or series of",
"# arrays) to schedule our jobs. This cuts down on problems with long",
"# job lists choking up the queue.",
"if",
"dep_count",
"==",
"0",
":",
"logger",
".",
"info",
"(",
"\"Compiling jobs into JobGroups\"",
")",
"joblist",
"=",
"compile_jobgroups_from_joblist",
"(",
"joblist",
",",
"jgprefix",
",",
"sgegroupsize",
")",
"# Send jobs to scheduler",
"logger",
".",
"info",
"(",
"\"Running jobs with scheduler...\"",
")",
"logger",
".",
"info",
"(",
"\"Jobs passed to scheduler in order:\"",
")",
"for",
"job",
"in",
"joblist",
":",
"logger",
".",
"info",
"(",
"\"\\t%s\"",
"%",
"job",
".",
"name",
")",
"build_and_submit_jobs",
"(",
"os",
".",
"curdir",
",",
"joblist",
",",
"sgeargs",
")",
"logger",
".",
"info",
"(",
"\"Waiting for SGE-submitted jobs to finish (polling)\"",
")",
"for",
"job",
"in",
"joblist",
":",
"job",
".",
"wait",
"(",
")"
] |
Creates and runs GridEngine scripts for jobs based on the passed
jobgraph.
- jobgraph - list of jobs, which may have dependencies.
- verbose - flag for multiprocessing verbosity
- logger - a logger module logger (optional)
- jgprefix - a prefix for the submitted jobs, in the scheduler
- sgegroupsize - the maximum size for an array job submission
- sgeargs - additional arguments to qsub
The strategy here is to loop over each job in the list of jobs (jobgraph),
and create/populate a series of Sets of commands, to be run in
reverse order with multiprocessing_run as asynchronous pools.
The strategy here is to loop over each job in the dependency graph, and
add the job to a new list of jobs, swapping out the Job dependency for
the name of the Job on which it depends.
|
[
"Creates",
"and",
"runs",
"GridEngine",
"scripts",
"for",
"jobs",
"based",
"on",
"the",
"passed",
"jobgraph",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L62-L112
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
populate_jobset
|
def populate_jobset(job, jobset, depth):
""" Creates a set of jobs, containing jobs at difference depths of the
dependency tree, retaining dependencies as strings, not Jobs.
"""
jobset.add(job)
if len(job.dependencies) == 0:
return jobset
for j in job.dependencies:
jobset = populate_jobset(j, jobset, depth+1)
return jobset
|
python
|
def populate_jobset(job, jobset, depth):
""" Creates a set of jobs, containing jobs at difference depths of the
dependency tree, retaining dependencies as strings, not Jobs.
"""
jobset.add(job)
if len(job.dependencies) == 0:
return jobset
for j in job.dependencies:
jobset = populate_jobset(j, jobset, depth+1)
return jobset
|
[
"def",
"populate_jobset",
"(",
"job",
",",
"jobset",
",",
"depth",
")",
":",
"jobset",
".",
"add",
"(",
"job",
")",
"if",
"len",
"(",
"job",
".",
"dependencies",
")",
"==",
"0",
":",
"return",
"jobset",
"for",
"j",
"in",
"job",
".",
"dependencies",
":",
"jobset",
"=",
"populate_jobset",
"(",
"j",
",",
"jobset",
",",
"depth",
"+",
"1",
")",
"return",
"jobset"
] |
Creates a set of jobs, containing jobs at difference depths of the
dependency tree, retaining dependencies as strings, not Jobs.
|
[
"Creates",
"a",
"set",
"of",
"jobs",
"containing",
"jobs",
"at",
"difference",
"depths",
"of",
"the",
"dependency",
"tree",
"retaining",
"dependencies",
"as",
"strings",
"not",
"Jobs",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L115-L124
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
build_job_scripts
|
def build_job_scripts(root_dir, jobs):
"""Constructs the script for each passed Job in the jobs iterable
- root_dir Path to output directory
"""
# Loop over the job list, creating each job script in turn, and then adding
# scriptPath to the Job object
for job in jobs:
scriptpath = os.path.join(root_dir, "jobs", job.name)
with open(scriptpath, "w") as scriptfile:
scriptfile.write("#!/bin/sh\n#$ -S /bin/bash\n%s\n" % job.script)
job.scriptpath = scriptpath
|
python
|
def build_job_scripts(root_dir, jobs):
"""Constructs the script for each passed Job in the jobs iterable
- root_dir Path to output directory
"""
# Loop over the job list, creating each job script in turn, and then adding
# scriptPath to the Job object
for job in jobs:
scriptpath = os.path.join(root_dir, "jobs", job.name)
with open(scriptpath, "w") as scriptfile:
scriptfile.write("#!/bin/sh\n#$ -S /bin/bash\n%s\n" % job.script)
job.scriptpath = scriptpath
|
[
"def",
"build_job_scripts",
"(",
"root_dir",
",",
"jobs",
")",
":",
"# Loop over the job list, creating each job script in turn, and then adding",
"# scriptPath to the Job object",
"for",
"job",
"in",
"jobs",
":",
"scriptpath",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"jobs\"",
",",
"job",
".",
"name",
")",
"with",
"open",
"(",
"scriptpath",
",",
"\"w\"",
")",
"as",
"scriptfile",
":",
"scriptfile",
".",
"write",
"(",
"\"#!/bin/sh\\n#$ -S /bin/bash\\n%s\\n\"",
"%",
"job",
".",
"script",
")",
"job",
".",
"scriptpath",
"=",
"scriptpath"
] |
Constructs the script for each passed Job in the jobs iterable
- root_dir Path to output directory
|
[
"Constructs",
"the",
"script",
"for",
"each",
"passed",
"Job",
"in",
"the",
"jobs",
"iterable"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L149-L160
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
extract_submittable_jobs
|
def extract_submittable_jobs(waiting):
"""Obtain a list of jobs that are able to be submitted from the passed
list of pending jobs
- waiting List of Job objects
"""
submittable = set() # Holds jobs that are able to be submitted
# Loop over each job, and check all the subjobs in that job's dependency
# list. If there are any, and all of these have been submitted, then
# append the job to the list of submittable jobs.
for job in waiting:
unsatisfied = sum([(subjob.submitted is False) for subjob in
job.dependencies])
if unsatisfied == 0:
submittable.add(job)
return list(submittable)
|
python
|
def extract_submittable_jobs(waiting):
"""Obtain a list of jobs that are able to be submitted from the passed
list of pending jobs
- waiting List of Job objects
"""
submittable = set() # Holds jobs that are able to be submitted
# Loop over each job, and check all the subjobs in that job's dependency
# list. If there are any, and all of these have been submitted, then
# append the job to the list of submittable jobs.
for job in waiting:
unsatisfied = sum([(subjob.submitted is False) for subjob in
job.dependencies])
if unsatisfied == 0:
submittable.add(job)
return list(submittable)
|
[
"def",
"extract_submittable_jobs",
"(",
"waiting",
")",
":",
"submittable",
"=",
"set",
"(",
")",
"# Holds jobs that are able to be submitted",
"# Loop over each job, and check all the subjobs in that job's dependency",
"# list. If there are any, and all of these have been submitted, then",
"# append the job to the list of submittable jobs.",
"for",
"job",
"in",
"waiting",
":",
"unsatisfied",
"=",
"sum",
"(",
"[",
"(",
"subjob",
".",
"submitted",
"is",
"False",
")",
"for",
"subjob",
"in",
"job",
".",
"dependencies",
"]",
")",
"if",
"unsatisfied",
"==",
"0",
":",
"submittable",
".",
"add",
"(",
"job",
")",
"return",
"list",
"(",
"submittable",
")"
] |
Obtain a list of jobs that are able to be submitted from the passed
list of pending jobs
- waiting List of Job objects
|
[
"Obtain",
"a",
"list",
"of",
"jobs",
"that",
"are",
"able",
"to",
"be",
"submitted",
"from",
"the",
"passed",
"list",
"of",
"pending",
"jobs"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L163-L178
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
submit_safe_jobs
|
def submit_safe_jobs(root_dir, jobs, sgeargs=None):
"""Submit the passed list of jobs to the Grid Engine server, using the
passed directory as the root for scheduler output.
- root_dir Path to output directory
- jobs Iterable of Job objects
"""
# Loop over each job, constructing SGE command-line based on job settings
for job in jobs:
job.out = os.path.join(root_dir, "stdout")
job.err = os.path.join(root_dir, "stderr")
# Add the job name, current working directory, and SGE stdout/stderr
# directories to the SGE command line
args = " -N %s " % (job.name)
args += " -cwd "
args += " -o %s -e %s " % (job.out, job.err)
# If a queue is specified, add this to the SGE command line
# LP: This has an undeclared variable, not sure why - delete?
#if job.queue is not None and job.queue in local_queues:
# args += local_queues[job.queue]
# If the job is actually a JobGroup, add the task numbering argument
if isinstance(job, JobGroup):
args += "-t 1:%d " % (job.tasks)
# If there are dependencies for this job, hold the job until they are
# complete
if len(job.dependencies) > 0:
args += "-hold_jid "
for dep in job.dependencies:
args += dep.name + ","
args = args[:-1]
# Build the qsub SGE commandline (passing local environment)
qsubcmd = ("%s -V %s %s" %
(pyani_config.QSUB_DEFAULT, args, job.scriptpath))
if sgeargs is not None:
qsubcmd = "%s %s" % (qsubcmd, sgeargs)
os.system(qsubcmd) # Run the command
job.submitted = True
|
python
|
def submit_safe_jobs(root_dir, jobs, sgeargs=None):
"""Submit the passed list of jobs to the Grid Engine server, using the
passed directory as the root for scheduler output.
- root_dir Path to output directory
- jobs Iterable of Job objects
"""
# Loop over each job, constructing SGE command-line based on job settings
for job in jobs:
job.out = os.path.join(root_dir, "stdout")
job.err = os.path.join(root_dir, "stderr")
# Add the job name, current working directory, and SGE stdout/stderr
# directories to the SGE command line
args = " -N %s " % (job.name)
args += " -cwd "
args += " -o %s -e %s " % (job.out, job.err)
# If a queue is specified, add this to the SGE command line
# LP: This has an undeclared variable, not sure why - delete?
#if job.queue is not None and job.queue in local_queues:
# args += local_queues[job.queue]
# If the job is actually a JobGroup, add the task numbering argument
if isinstance(job, JobGroup):
args += "-t 1:%d " % (job.tasks)
# If there are dependencies for this job, hold the job until they are
# complete
if len(job.dependencies) > 0:
args += "-hold_jid "
for dep in job.dependencies:
args += dep.name + ","
args = args[:-1]
# Build the qsub SGE commandline (passing local environment)
qsubcmd = ("%s -V %s %s" %
(pyani_config.QSUB_DEFAULT, args, job.scriptpath))
if sgeargs is not None:
qsubcmd = "%s %s" % (qsubcmd, sgeargs)
os.system(qsubcmd) # Run the command
job.submitted = True
|
[
"def",
"submit_safe_jobs",
"(",
"root_dir",
",",
"jobs",
",",
"sgeargs",
"=",
"None",
")",
":",
"# Loop over each job, constructing SGE command-line based on job settings",
"for",
"job",
"in",
"jobs",
":",
"job",
".",
"out",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"stdout\"",
")",
"job",
".",
"err",
"=",
"os",
".",
"path",
".",
"join",
"(",
"root_dir",
",",
"\"stderr\"",
")",
"# Add the job name, current working directory, and SGE stdout/stderr",
"# directories to the SGE command line",
"args",
"=",
"\" -N %s \"",
"%",
"(",
"job",
".",
"name",
")",
"args",
"+=",
"\" -cwd \"",
"args",
"+=",
"\" -o %s -e %s \"",
"%",
"(",
"job",
".",
"out",
",",
"job",
".",
"err",
")",
"# If a queue is specified, add this to the SGE command line",
"# LP: This has an undeclared variable, not sure why - delete?",
"#if job.queue is not None and job.queue in local_queues:",
"# args += local_queues[job.queue]",
"# If the job is actually a JobGroup, add the task numbering argument",
"if",
"isinstance",
"(",
"job",
",",
"JobGroup",
")",
":",
"args",
"+=",
"\"-t 1:%d \"",
"%",
"(",
"job",
".",
"tasks",
")",
"# If there are dependencies for this job, hold the job until they are",
"# complete",
"if",
"len",
"(",
"job",
".",
"dependencies",
")",
">",
"0",
":",
"args",
"+=",
"\"-hold_jid \"",
"for",
"dep",
"in",
"job",
".",
"dependencies",
":",
"args",
"+=",
"dep",
".",
"name",
"+",
"\",\"",
"args",
"=",
"args",
"[",
":",
"-",
"1",
"]",
"# Build the qsub SGE commandline (passing local environment)",
"qsubcmd",
"=",
"(",
"\"%s -V %s %s\"",
"%",
"(",
"pyani_config",
".",
"QSUB_DEFAULT",
",",
"args",
",",
"job",
".",
"scriptpath",
")",
")",
"if",
"sgeargs",
"is",
"not",
"None",
":",
"qsubcmd",
"=",
"\"%s %s\"",
"%",
"(",
"qsubcmd",
",",
"sgeargs",
")",
"os",
".",
"system",
"(",
"qsubcmd",
")",
"# Run the command",
"job",
".",
"submitted",
"=",
"True"
] |
Submit the passed list of jobs to the Grid Engine server, using the
passed directory as the root for scheduler output.
- root_dir Path to output directory
- jobs Iterable of Job objects
|
[
"Submit",
"the",
"passed",
"list",
"of",
"jobs",
"to",
"the",
"Grid",
"Engine",
"server",
"using",
"the",
"passed",
"directory",
"as",
"the",
"root",
"for",
"scheduler",
"output",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L181-L222
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
submit_jobs
|
def submit_jobs(root_dir, jobs, sgeargs=None):
""" Submit each of the passed jobs to the SGE server, using the passed
directory as root for SGE output.
- root_dir Path to output directory
- jobs List of Job objects
"""
waiting = list(jobs) # List of jobs still to be done
# Loop over the list of pending jobs, while there still are any
while len(waiting) > 0:
# extract submittable jobs
submittable = extract_submittable_jobs(waiting)
# run those jobs
submit_safe_jobs(root_dir, submittable, sgeargs)
# remove those from the waiting list
for job in submittable:
waiting.remove(job)
|
python
|
def submit_jobs(root_dir, jobs, sgeargs=None):
""" Submit each of the passed jobs to the SGE server, using the passed
directory as root for SGE output.
- root_dir Path to output directory
- jobs List of Job objects
"""
waiting = list(jobs) # List of jobs still to be done
# Loop over the list of pending jobs, while there still are any
while len(waiting) > 0:
# extract submittable jobs
submittable = extract_submittable_jobs(waiting)
# run those jobs
submit_safe_jobs(root_dir, submittable, sgeargs)
# remove those from the waiting list
for job in submittable:
waiting.remove(job)
|
[
"def",
"submit_jobs",
"(",
"root_dir",
",",
"jobs",
",",
"sgeargs",
"=",
"None",
")",
":",
"waiting",
"=",
"list",
"(",
"jobs",
")",
"# List of jobs still to be done",
"# Loop over the list of pending jobs, while there still are any",
"while",
"len",
"(",
"waiting",
")",
">",
"0",
":",
"# extract submittable jobs",
"submittable",
"=",
"extract_submittable_jobs",
"(",
"waiting",
")",
"# run those jobs",
"submit_safe_jobs",
"(",
"root_dir",
",",
"submittable",
",",
"sgeargs",
")",
"# remove those from the waiting list",
"for",
"job",
"in",
"submittable",
":",
"waiting",
".",
"remove",
"(",
"job",
")"
] |
Submit each of the passed jobs to the SGE server, using the passed
directory as root for SGE output.
- root_dir Path to output directory
- jobs List of Job objects
|
[
"Submit",
"each",
"of",
"the",
"passed",
"jobs",
"to",
"the",
"SGE",
"server",
"using",
"the",
"passed",
"directory",
"as",
"root",
"for",
"SGE",
"output",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L225-L241
|
train
|
widdowquinn/pyani
|
pyani/run_sge.py
|
build_and_submit_jobs
|
def build_and_submit_jobs(root_dir, jobs, sgeargs=None):
"""Submits the passed iterable of Job objects to SGE, placing SGE's
output in the passed root directory
- root_dir Root directory for SGE and job output
- jobs List of Job objects, describing each job to be submitted
- sgeargs Additional arguments to qsub
"""
# If the passed set of jobs is not a list, turn it into one. This makes the
# use of a single JobGroup a little more intutitive
if not isinstance(jobs, list):
jobs = [jobs]
# Build and submit the passed jobs
build_directories(root_dir) # build all necessary directories
build_job_scripts(root_dir, jobs) # build job scripts
submit_jobs(root_dir, jobs, sgeargs)
|
python
|
def build_and_submit_jobs(root_dir, jobs, sgeargs=None):
"""Submits the passed iterable of Job objects to SGE, placing SGE's
output in the passed root directory
- root_dir Root directory for SGE and job output
- jobs List of Job objects, describing each job to be submitted
- sgeargs Additional arguments to qsub
"""
# If the passed set of jobs is not a list, turn it into one. This makes the
# use of a single JobGroup a little more intutitive
if not isinstance(jobs, list):
jobs = [jobs]
# Build and submit the passed jobs
build_directories(root_dir) # build all necessary directories
build_job_scripts(root_dir, jobs) # build job scripts
submit_jobs(root_dir, jobs, sgeargs)
|
[
"def",
"build_and_submit_jobs",
"(",
"root_dir",
",",
"jobs",
",",
"sgeargs",
"=",
"None",
")",
":",
"# If the passed set of jobs is not a list, turn it into one. This makes the",
"# use of a single JobGroup a little more intutitive",
"if",
"not",
"isinstance",
"(",
"jobs",
",",
"list",
")",
":",
"jobs",
"=",
"[",
"jobs",
"]",
"# Build and submit the passed jobs",
"build_directories",
"(",
"root_dir",
")",
"# build all necessary directories",
"build_job_scripts",
"(",
"root_dir",
",",
"jobs",
")",
"# build job scripts",
"submit_jobs",
"(",
"root_dir",
",",
"jobs",
",",
"sgeargs",
")"
] |
Submits the passed iterable of Job objects to SGE, placing SGE's
output in the passed root directory
- root_dir Root directory for SGE and job output
- jobs List of Job objects, describing each job to be submitted
- sgeargs Additional arguments to qsub
|
[
"Submits",
"the",
"passed",
"iterable",
"of",
"Job",
"objects",
"to",
"SGE",
"placing",
"SGE",
"s",
"output",
"in",
"the",
"passed",
"root",
"directory"
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/run_sge.py#L244-L260
|
train
|
widdowquinn/pyani
|
pyani/pyani_config.py
|
params_mpl
|
def params_mpl(df):
"""Returns dict of matplotlib parameters, dependent on dataframe."""
return {'ANIb_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIb_alignment_coverage': ('BuRd', 0, 1),
'ANIb_hadamard': ('hadamard_BuRd', 0, 1),
'ANIb_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIm_alignment_coverage': ('BuRd', 0, 1),
'ANIm_hadamard': ('hadamard_BuRd', 0, 1),
'ANIm_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'TETRA_correlations': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_coverage': ('BuRd', 0, 1),
'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1),
'ANIblastall_similarity_errors': ('afmhot', df.values.min(),
df.values.max())}
|
python
|
def params_mpl(df):
"""Returns dict of matplotlib parameters, dependent on dataframe."""
return {'ANIb_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIb_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIb_alignment_coverage': ('BuRd', 0, 1),
'ANIb_hadamard': ('hadamard_BuRd', 0, 1),
'ANIb_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIm_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIm_alignment_coverage': ('BuRd', 0, 1),
'ANIm_hadamard': ('hadamard_BuRd', 0, 1),
'ANIm_similarity_errors': ('afmhot', df.values.min(),
df.values.max()),
'TETRA_correlations': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_lengths': ('afmhot', df.values.min(),
df.values.max()),
'ANIblastall_percentage_identity': ('spbnd_BuRd', 0, 1),
'ANIblastall_alignment_coverage': ('BuRd', 0, 1),
'ANIblastall_hadamard': ('hadamard_BuRd', 0, 1),
'ANIblastall_similarity_errors': ('afmhot', df.values.min(),
df.values.max())}
|
[
"def",
"params_mpl",
"(",
"df",
")",
":",
"return",
"{",
"'ANIb_alignment_lengths'",
":",
"(",
"'afmhot'",
",",
"df",
".",
"values",
".",
"min",
"(",
")",
",",
"df",
".",
"values",
".",
"max",
"(",
")",
")",
",",
"'ANIb_percentage_identity'",
":",
"(",
"'spbnd_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIb_alignment_coverage'",
":",
"(",
"'BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIb_hadamard'",
":",
"(",
"'hadamard_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIb_similarity_errors'",
":",
"(",
"'afmhot'",
",",
"df",
".",
"values",
".",
"min",
"(",
")",
",",
"df",
".",
"values",
".",
"max",
"(",
")",
")",
",",
"'ANIm_alignment_lengths'",
":",
"(",
"'afmhot'",
",",
"df",
".",
"values",
".",
"min",
"(",
")",
",",
"df",
".",
"values",
".",
"max",
"(",
")",
")",
",",
"'ANIm_percentage_identity'",
":",
"(",
"'spbnd_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIm_alignment_coverage'",
":",
"(",
"'BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIm_hadamard'",
":",
"(",
"'hadamard_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIm_similarity_errors'",
":",
"(",
"'afmhot'",
",",
"df",
".",
"values",
".",
"min",
"(",
")",
",",
"df",
".",
"values",
".",
"max",
"(",
")",
")",
",",
"'TETRA_correlations'",
":",
"(",
"'spbnd_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIblastall_alignment_lengths'",
":",
"(",
"'afmhot'",
",",
"df",
".",
"values",
".",
"min",
"(",
")",
",",
"df",
".",
"values",
".",
"max",
"(",
")",
")",
",",
"'ANIblastall_percentage_identity'",
":",
"(",
"'spbnd_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIblastall_alignment_coverage'",
":",
"(",
"'BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIblastall_hadamard'",
":",
"(",
"'hadamard_BuRd'",
",",
"0",
",",
"1",
")",
",",
"'ANIblastall_similarity_errors'",
":",
"(",
"'afmhot'",
",",
"df",
".",
"values",
".",
"min",
"(",
")",
",",
"df",
".",
"values",
".",
"max",
"(",
")",
")",
"}"
] |
Returns dict of matplotlib parameters, dependent on dataframe.
|
[
"Returns",
"dict",
"of",
"matplotlib",
"parameters",
"dependent",
"on",
"dataframe",
"."
] |
2b24ec971401e04024bba896e4011984fe3f53f0
|
https://github.com/widdowquinn/pyani/blob/2b24ec971401e04024bba896e4011984fe3f53f0/pyani/pyani_config.py#L107-L130
|
train
|
datapythonista/mnist
|
mnist/__init__.py
|
download_file
|
def download_file(fname, target_dir=None, force=False):
"""Download fname from the datasets_url, and save it to target_dir,
unless the file already exists, and force is False.
Parameters
----------
fname : str
Name of the file to download
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
fname : str
Full path of the downloaded file
"""
target_dir = target_dir or temporary_dir()
target_fname = os.path.join(target_dir, fname)
if force or not os.path.isfile(target_fname):
url = urljoin(datasets_url, fname)
urlretrieve(url, target_fname)
return target_fname
|
python
|
def download_file(fname, target_dir=None, force=False):
"""Download fname from the datasets_url, and save it to target_dir,
unless the file already exists, and force is False.
Parameters
----------
fname : str
Name of the file to download
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
fname : str
Full path of the downloaded file
"""
target_dir = target_dir or temporary_dir()
target_fname = os.path.join(target_dir, fname)
if force or not os.path.isfile(target_fname):
url = urljoin(datasets_url, fname)
urlretrieve(url, target_fname)
return target_fname
|
[
"def",
"download_file",
"(",
"fname",
",",
"target_dir",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"target_dir",
"=",
"target_dir",
"or",
"temporary_dir",
"(",
")",
"target_fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target_dir",
",",
"fname",
")",
"if",
"force",
"or",
"not",
"os",
".",
"path",
".",
"isfile",
"(",
"target_fname",
")",
":",
"url",
"=",
"urljoin",
"(",
"datasets_url",
",",
"fname",
")",
"urlretrieve",
"(",
"url",
",",
"target_fname",
")",
"return",
"target_fname"
] |
Download fname from the datasets_url, and save it to target_dir,
unless the file already exists, and force is False.
Parameters
----------
fname : str
Name of the file to download
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
fname : str
Full path of the downloaded file
|
[
"Download",
"fname",
"from",
"the",
"datasets_url",
"and",
"save",
"it",
"to",
"target_dir",
"unless",
"the",
"file",
"already",
"exists",
"and",
"force",
"is",
"False",
"."
] |
d91df2b27ee62d07396b5b64c7cfead59833b563
|
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L34-L61
|
train
|
datapythonista/mnist
|
mnist/__init__.py
|
parse_idx
|
def parse_idx(fd):
"""Parse an IDX file, and return it as a numpy array.
Parameters
----------
fd : file
File descriptor of the IDX file to parse
endian : str
Byte order of the IDX file. See [1] for available options
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
1. https://docs.python.org/3/library/struct.html
#byte-order-size-and-alignment
"""
DATA_TYPES = {0x08: 'B', # unsigned byte
0x09: 'b', # signed byte
0x0b: 'h', # short (2 bytes)
0x0c: 'i', # int (4 bytes)
0x0d: 'f', # float (4 bytes)
0x0e: 'd'} # double (8 bytes)
header = fd.read(4)
if len(header) != 4:
raise IdxDecodeError('Invalid IDX file, '
'file empty or does not contain a full header.')
zeros, data_type, num_dimensions = struct.unpack('>HBB', header)
if zeros != 0:
raise IdxDecodeError('Invalid IDX file, '
'file must start with two zero bytes. '
'Found 0x%02x' % zeros)
try:
data_type = DATA_TYPES[data_type]
except KeyError:
raise IdxDecodeError('Unknown data type '
'0x%02x in IDX file' % data_type)
dimension_sizes = struct.unpack('>' + 'I' * num_dimensions,
fd.read(4 * num_dimensions))
data = array.array(data_type, fd.read())
data.byteswap() # looks like array.array reads data as little endian
expected_items = functools.reduce(operator.mul, dimension_sizes)
if len(data) != expected_items:
raise IdxDecodeError('IDX file has wrong number of items. '
'Expected: %d. Found: %d' % (expected_items,
len(data)))
return numpy.array(data).reshape(dimension_sizes)
|
python
|
def parse_idx(fd):
"""Parse an IDX file, and return it as a numpy array.
Parameters
----------
fd : file
File descriptor of the IDX file to parse
endian : str
Byte order of the IDX file. See [1] for available options
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
1. https://docs.python.org/3/library/struct.html
#byte-order-size-and-alignment
"""
DATA_TYPES = {0x08: 'B', # unsigned byte
0x09: 'b', # signed byte
0x0b: 'h', # short (2 bytes)
0x0c: 'i', # int (4 bytes)
0x0d: 'f', # float (4 bytes)
0x0e: 'd'} # double (8 bytes)
header = fd.read(4)
if len(header) != 4:
raise IdxDecodeError('Invalid IDX file, '
'file empty or does not contain a full header.')
zeros, data_type, num_dimensions = struct.unpack('>HBB', header)
if zeros != 0:
raise IdxDecodeError('Invalid IDX file, '
'file must start with two zero bytes. '
'Found 0x%02x' % zeros)
try:
data_type = DATA_TYPES[data_type]
except KeyError:
raise IdxDecodeError('Unknown data type '
'0x%02x in IDX file' % data_type)
dimension_sizes = struct.unpack('>' + 'I' * num_dimensions,
fd.read(4 * num_dimensions))
data = array.array(data_type, fd.read())
data.byteswap() # looks like array.array reads data as little endian
expected_items = functools.reduce(operator.mul, dimension_sizes)
if len(data) != expected_items:
raise IdxDecodeError('IDX file has wrong number of items. '
'Expected: %d. Found: %d' % (expected_items,
len(data)))
return numpy.array(data).reshape(dimension_sizes)
|
[
"def",
"parse_idx",
"(",
"fd",
")",
":",
"DATA_TYPES",
"=",
"{",
"0x08",
":",
"'B'",
",",
"# unsigned byte",
"0x09",
":",
"'b'",
",",
"# signed byte",
"0x0b",
":",
"'h'",
",",
"# short (2 bytes)",
"0x0c",
":",
"'i'",
",",
"# int (4 bytes)",
"0x0d",
":",
"'f'",
",",
"# float (4 bytes)",
"0x0e",
":",
"'d'",
"}",
"# double (8 bytes)",
"header",
"=",
"fd",
".",
"read",
"(",
"4",
")",
"if",
"len",
"(",
"header",
")",
"!=",
"4",
":",
"raise",
"IdxDecodeError",
"(",
"'Invalid IDX file, '",
"'file empty or does not contain a full header.'",
")",
"zeros",
",",
"data_type",
",",
"num_dimensions",
"=",
"struct",
".",
"unpack",
"(",
"'>HBB'",
",",
"header",
")",
"if",
"zeros",
"!=",
"0",
":",
"raise",
"IdxDecodeError",
"(",
"'Invalid IDX file, '",
"'file must start with two zero bytes. '",
"'Found 0x%02x'",
"%",
"zeros",
")",
"try",
":",
"data_type",
"=",
"DATA_TYPES",
"[",
"data_type",
"]",
"except",
"KeyError",
":",
"raise",
"IdxDecodeError",
"(",
"'Unknown data type '",
"'0x%02x in IDX file'",
"%",
"data_type",
")",
"dimension_sizes",
"=",
"struct",
".",
"unpack",
"(",
"'>'",
"+",
"'I'",
"*",
"num_dimensions",
",",
"fd",
".",
"read",
"(",
"4",
"*",
"num_dimensions",
")",
")",
"data",
"=",
"array",
".",
"array",
"(",
"data_type",
",",
"fd",
".",
"read",
"(",
")",
")",
"data",
".",
"byteswap",
"(",
")",
"# looks like array.array reads data as little endian",
"expected_items",
"=",
"functools",
".",
"reduce",
"(",
"operator",
".",
"mul",
",",
"dimension_sizes",
")",
"if",
"len",
"(",
"data",
")",
"!=",
"expected_items",
":",
"raise",
"IdxDecodeError",
"(",
"'IDX file has wrong number of items. '",
"'Expected: %d. Found: %d'",
"%",
"(",
"expected_items",
",",
"len",
"(",
"data",
")",
")",
")",
"return",
"numpy",
".",
"array",
"(",
"data",
")",
".",
"reshape",
"(",
"dimension_sizes",
")"
] |
Parse an IDX file, and return it as a numpy array.
Parameters
----------
fd : file
File descriptor of the IDX file to parse
endian : str
Byte order of the IDX file. See [1] for available options
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
1. https://docs.python.org/3/library/struct.html
#byte-order-size-and-alignment
|
[
"Parse",
"an",
"IDX",
"file",
"and",
"return",
"it",
"as",
"a",
"numpy",
"array",
"."
] |
d91df2b27ee62d07396b5b64c7cfead59833b563
|
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L64-L120
|
train
|
datapythonista/mnist
|
mnist/__init__.py
|
download_and_parse_mnist_file
|
def download_and_parse_mnist_file(fname, target_dir=None, force=False):
"""Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
"""
fname = download_file(fname, target_dir=target_dir, force=force)
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return parse_idx(fd)
|
python
|
def download_and_parse_mnist_file(fname, target_dir=None, force=False):
"""Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
"""
fname = download_file(fname, target_dir=target_dir, force=force)
fopen = gzip.open if os.path.splitext(fname)[1] == '.gz' else open
with fopen(fname, 'rb') as fd:
return parse_idx(fd)
|
[
"def",
"download_and_parse_mnist_file",
"(",
"fname",
",",
"target_dir",
"=",
"None",
",",
"force",
"=",
"False",
")",
":",
"fname",
"=",
"download_file",
"(",
"fname",
",",
"target_dir",
"=",
"target_dir",
",",
"force",
"=",
"force",
")",
"fopen",
"=",
"gzip",
".",
"open",
"if",
"os",
".",
"path",
".",
"splitext",
"(",
"fname",
")",
"[",
"1",
"]",
"==",
"'.gz'",
"else",
"open",
"with",
"fopen",
"(",
"fname",
",",
"'rb'",
")",
"as",
"fd",
":",
"return",
"parse_idx",
"(",
"fd",
")"
] |
Download the IDX file named fname from the URL specified in dataset_url
and return it as a numpy array.
Parameters
----------
fname : str
File name to download and parse
target_dir : str
Directory where to store the file
force : bool
Force downloading the file, if it already exists
Returns
-------
data : numpy.ndarray
Numpy array with the dimensions and the data in the IDX file
|
[
"Download",
"the",
"IDX",
"file",
"named",
"fname",
"from",
"the",
"URL",
"specified",
"in",
"dataset_url",
"and",
"return",
"it",
"as",
"a",
"numpy",
"array",
"."
] |
d91df2b27ee62d07396b5b64c7cfead59833b563
|
https://github.com/datapythonista/mnist/blob/d91df2b27ee62d07396b5b64c7cfead59833b563/mnist/__init__.py#L123-L146
|
train
|
Bogdanp/anom-py
|
anom/query.py
|
Pages.fetch_next_page
|
def fetch_next_page(self):
"""Fetch the next Page of results.
Returns:
Page: The next page of results.
"""
for page in self:
return page
else:
return Page(self._resultset.cursor, iter(()))
|
python
|
def fetch_next_page(self):
"""Fetch the next Page of results.
Returns:
Page: The next page of results.
"""
for page in self:
return page
else:
return Page(self._resultset.cursor, iter(()))
|
[
"def",
"fetch_next_page",
"(",
"self",
")",
":",
"for",
"page",
"in",
"self",
":",
"return",
"page",
"else",
":",
"return",
"Page",
"(",
"self",
".",
"_resultset",
".",
"cursor",
",",
"iter",
"(",
"(",
")",
")",
")"
] |
Fetch the next Page of results.
Returns:
Page: The next page of results.
|
[
"Fetch",
"the",
"next",
"Page",
"of",
"results",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L204-L213
|
train
|
Bogdanp/anom-py
|
anom/query.py
|
Query.count
|
def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
"""Counts the number of entities that match this query.
Note:
Since Datastore doesn't provide a native way to count
entities by query, this method paginates through all the
entities' keys and counts them.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of entities.
"""
entities = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
entities += len(list(page))
return entities
|
python
|
def count(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
"""Counts the number of entities that match this query.
Note:
Since Datastore doesn't provide a native way to count
entities by query, this method paginates through all the
entities' keys and counts them.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of entities.
"""
entities = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
entities += len(list(page))
return entities
|
[
"def",
"count",
"(",
"self",
",",
"*",
",",
"page_size",
"=",
"DEFAULT_BATCH_SIZE",
",",
"*",
"*",
"options",
")",
":",
"entities",
"=",
"0",
"options",
"=",
"QueryOptions",
"(",
"self",
")",
".",
"replace",
"(",
"keys_only",
"=",
"True",
")",
"for",
"page",
"in",
"self",
".",
"paginate",
"(",
"page_size",
"=",
"page_size",
",",
"*",
"*",
"options",
")",
":",
"entities",
"+=",
"len",
"(",
"list",
"(",
"page",
")",
")",
"return",
"entities"
] |
Counts the number of entities that match this query.
Note:
Since Datastore doesn't provide a native way to count
entities by query, this method paginates through all the
entities' keys and counts them.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of entities.
|
[
"Counts",
"the",
"number",
"of",
"entities",
"that",
"match",
"this",
"query",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L383-L401
|
train
|
Bogdanp/anom-py
|
anom/query.py
|
Query.delete
|
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
"""Deletes all the entities that match this query.
Note:
Since Datasotre doesn't provide a native way to delete
entities by query, this method paginates through all the
entities' keys and issues a single delete_multi call per
page.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of deleted entities.
"""
from .model import delete_multi
deleted = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
keys = list(page)
deleted += len(keys)
delete_multi(keys)
return deleted
|
python
|
def delete(self, *, page_size=DEFAULT_BATCH_SIZE, **options):
"""Deletes all the entities that match this query.
Note:
Since Datasotre doesn't provide a native way to delete
entities by query, this method paginates through all the
entities' keys and issues a single delete_multi call per
page.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of deleted entities.
"""
from .model import delete_multi
deleted = 0
options = QueryOptions(self).replace(keys_only=True)
for page in self.paginate(page_size=page_size, **options):
keys = list(page)
deleted += len(keys)
delete_multi(keys)
return deleted
|
[
"def",
"delete",
"(",
"self",
",",
"*",
",",
"page_size",
"=",
"DEFAULT_BATCH_SIZE",
",",
"*",
"*",
"options",
")",
":",
"from",
".",
"model",
"import",
"delete_multi",
"deleted",
"=",
"0",
"options",
"=",
"QueryOptions",
"(",
"self",
")",
".",
"replace",
"(",
"keys_only",
"=",
"True",
")",
"for",
"page",
"in",
"self",
".",
"paginate",
"(",
"page_size",
"=",
"page_size",
",",
"*",
"*",
"options",
")",
":",
"keys",
"=",
"list",
"(",
"page",
")",
"deleted",
"+=",
"len",
"(",
"keys",
")",
"delete_multi",
"(",
"keys",
")",
"return",
"deleted"
] |
Deletes all the entities that match this query.
Note:
Since Datasotre doesn't provide a native way to delete
entities by query, this method paginates through all the
entities' keys and issues a single delete_multi call per
page.
Parameters:
\**options(QueryOptions, optional)
Returns:
int: The number of deleted entities.
|
[
"Deletes",
"all",
"the",
"entities",
"that",
"match",
"this",
"query",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L403-L427
|
train
|
Bogdanp/anom-py
|
anom/query.py
|
Query.get
|
def get(self, **options):
"""Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results.
"""
sub_query = self.with_limit(1)
options = QueryOptions(sub_query).replace(batch_size=1)
for result in sub_query.run(**options):
return result
return None
|
python
|
def get(self, **options):
"""Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results.
"""
sub_query = self.with_limit(1)
options = QueryOptions(sub_query).replace(batch_size=1)
for result in sub_query.run(**options):
return result
return None
|
[
"def",
"get",
"(",
"self",
",",
"*",
"*",
"options",
")",
":",
"sub_query",
"=",
"self",
".",
"with_limit",
"(",
"1",
")",
"options",
"=",
"QueryOptions",
"(",
"sub_query",
")",
".",
"replace",
"(",
"batch_size",
"=",
"1",
")",
"for",
"result",
"in",
"sub_query",
".",
"run",
"(",
"*",
"*",
"options",
")",
":",
"return",
"result",
"return",
"None"
] |
Run this query and get the first result.
Parameters:
\**options(QueryOptions, optional)
Returns:
Model: An entity or None if there were no results.
|
[
"Run",
"this",
"query",
"and",
"get",
"the",
"first",
"result",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L429-L442
|
train
|
Bogdanp/anom-py
|
anom/query.py
|
Query.paginate
|
def paginate(self, *, page_size, **options):
"""Run this query and return a page iterator.
Parameters:
page_size(int): The number of entities to fetch per page.
\**options(QueryOptions, optional)
Returns:
Pages: An iterator for this query's pages of results.
"""
return Pages(self._prepare(), page_size, QueryOptions(self, **options))
|
python
|
def paginate(self, *, page_size, **options):
"""Run this query and return a page iterator.
Parameters:
page_size(int): The number of entities to fetch per page.
\**options(QueryOptions, optional)
Returns:
Pages: An iterator for this query's pages of results.
"""
return Pages(self._prepare(), page_size, QueryOptions(self, **options))
|
[
"def",
"paginate",
"(",
"self",
",",
"*",
",",
"page_size",
",",
"*",
"*",
"options",
")",
":",
"return",
"Pages",
"(",
"self",
".",
"_prepare",
"(",
")",
",",
"page_size",
",",
"QueryOptions",
"(",
"self",
",",
"*",
"*",
"options",
")",
")"
] |
Run this query and return a page iterator.
Parameters:
page_size(int): The number of entities to fetch per page.
\**options(QueryOptions, optional)
Returns:
Pages: An iterator for this query's pages of results.
|
[
"Run",
"this",
"query",
"and",
"return",
"a",
"page",
"iterator",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/query.py#L455-L465
|
train
|
Bogdanp/anom-py
|
anom/namespaces.py
|
namespace
|
def namespace(namespace):
"""Context manager for stacking the current thread-local default
namespace. Exiting the context sets the thread-local default
namespace back to the previously-set namespace. If there is no
previous namespace, then the thread-local namespace is cleared.
Example:
>>> with namespace("foo"):
... with namespace("bar"):
... assert get_namespace() == "bar"
... assert get_namespace() == "foo"
>>> assert get_namespace() == ""
Parameters:
namespace(str): namespace to set as the current thread-local
default.
Returns:
None
"""
try:
current_namespace = _namespace.current
except AttributeError:
current_namespace = None
set_namespace(namespace)
try:
yield
finally:
set_namespace(current_namespace)
|
python
|
def namespace(namespace):
"""Context manager for stacking the current thread-local default
namespace. Exiting the context sets the thread-local default
namespace back to the previously-set namespace. If there is no
previous namespace, then the thread-local namespace is cleared.
Example:
>>> with namespace("foo"):
... with namespace("bar"):
... assert get_namespace() == "bar"
... assert get_namespace() == "foo"
>>> assert get_namespace() == ""
Parameters:
namespace(str): namespace to set as the current thread-local
default.
Returns:
None
"""
try:
current_namespace = _namespace.current
except AttributeError:
current_namespace = None
set_namespace(namespace)
try:
yield
finally:
set_namespace(current_namespace)
|
[
"def",
"namespace",
"(",
"namespace",
")",
":",
"try",
":",
"current_namespace",
"=",
"_namespace",
".",
"current",
"except",
"AttributeError",
":",
"current_namespace",
"=",
"None",
"set_namespace",
"(",
"namespace",
")",
"try",
":",
"yield",
"finally",
":",
"set_namespace",
"(",
"current_namespace",
")"
] |
Context manager for stacking the current thread-local default
namespace. Exiting the context sets the thread-local default
namespace back to the previously-set namespace. If there is no
previous namespace, then the thread-local namespace is cleared.
Example:
>>> with namespace("foo"):
... with namespace("bar"):
... assert get_namespace() == "bar"
... assert get_namespace() == "foo"
>>> assert get_namespace() == ""
Parameters:
namespace(str): namespace to set as the current thread-local
default.
Returns:
None
|
[
"Context",
"manager",
"for",
"stacking",
"the",
"current",
"thread",
"-",
"local",
"default",
"namespace",
".",
"Exiting",
"the",
"context",
"sets",
"the",
"thread",
"-",
"local",
"default",
"namespace",
"back",
"to",
"the",
"previously",
"-",
"set",
"namespace",
".",
"If",
"there",
"is",
"no",
"previous",
"namespace",
"then",
"the",
"thread",
"-",
"local",
"namespace",
"is",
"cleared",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/namespaces.py#L55-L85
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
lookup_model_by_kind
|
def lookup_model_by_kind(kind):
"""Look up the model instance for a given Datastore kind.
Parameters:
kind(str)
Raises:
RuntimeError: If a model for the given kind has not been
defined.
Returns:
model: The model class.
"""
model = _known_models.get(kind)
if model is None:
raise RuntimeError(f"Model for kind {kind!r} not found.")
return model
|
python
|
def lookup_model_by_kind(kind):
"""Look up the model instance for a given Datastore kind.
Parameters:
kind(str)
Raises:
RuntimeError: If a model for the given kind has not been
defined.
Returns:
model: The model class.
"""
model = _known_models.get(kind)
if model is None:
raise RuntimeError(f"Model for kind {kind!r} not found.")
return model
|
[
"def",
"lookup_model_by_kind",
"(",
"kind",
")",
":",
"model",
"=",
"_known_models",
".",
"get",
"(",
"kind",
")",
"if",
"model",
"is",
"None",
":",
"raise",
"RuntimeError",
"(",
"f\"Model for kind {kind!r} not found.\"",
")",
"return",
"model"
] |
Look up the model instance for a given Datastore kind.
Parameters:
kind(str)
Raises:
RuntimeError: If a model for the given kind has not been
defined.
Returns:
model: The model class.
|
[
"Look",
"up",
"the",
"model",
"instance",
"for",
"a",
"given",
"Datastore",
"kind",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L612-L628
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
delete_multi
|
def delete_multi(keys):
"""Delete a set of entitites from Datastore by their
respective keys.
Note:
This uses the adapter that is tied to the first model in the list.
If the keys have disparate adapters this function may behave in
unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to delete.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
"""
if not keys:
return
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_delete_hook(key)
adapter.delete_multi(keys)
for key in keys:
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
model.post_delete_hook(key)
|
python
|
def delete_multi(keys):
"""Delete a set of entitites from Datastore by their
respective keys.
Note:
This uses the adapter that is tied to the first model in the list.
If the keys have disparate adapters this function may behave in
unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to delete.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
"""
if not keys:
return
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_delete_hook(key)
adapter.delete_multi(keys)
for key in keys:
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
model.post_delete_hook(key)
|
[
"def",
"delete_multi",
"(",
"keys",
")",
":",
"if",
"not",
"keys",
":",
"return",
"adapter",
"=",
"None",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
".",
"is_partial",
":",
"raise",
"RuntimeError",
"(",
"f\"Key {key!r} is partial.\"",
")",
"model",
"=",
"lookup_model_by_kind",
"(",
"key",
".",
"kind",
")",
"if",
"adapter",
"is",
"None",
":",
"adapter",
"=",
"model",
".",
"_adapter",
"model",
".",
"pre_delete_hook",
"(",
"key",
")",
"adapter",
".",
"delete_multi",
"(",
"keys",
")",
"for",
"key",
"in",
"keys",
":",
"# Micro-optimization to avoid calling get_model. This is OK",
"# to do here because we've already proved that a model for",
"# that kind exists in the previous block.",
"model",
"=",
"_known_models",
"[",
"key",
".",
"kind",
"]",
"model",
".",
"post_delete_hook",
"(",
"key",
")"
] |
Delete a set of entitites from Datastore by their
respective keys.
Note:
This uses the adapter that is tied to the first model in the list.
If the keys have disparate adapters this function may behave in
unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to delete.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
|
[
"Delete",
"a",
"set",
"of",
"entitites",
"from",
"Datastore",
"by",
"their",
"respective",
"keys",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L631-L673
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
get_multi
|
def get_multi(keys):
"""Get a set of entities from Datastore by their respective keys.
Note:
This uses the adapter that is tied to the first model in the
list. If the keys have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to get.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
Returns:
list[Model]: Entities that do not exist are going to be None
in the result list. The order of results matches the order
of the input keys.
"""
if not keys:
return []
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_get_hook(key)
entities_data, entities = adapter.get_multi(keys), []
for key, entity_data in zip(keys, entities_data):
if entity_data is None:
entities.append(None)
continue
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
entity = model._load(key, entity_data)
entities.append(entity)
entity.post_get_hook()
return entities
|
python
|
def get_multi(keys):
"""Get a set of entities from Datastore by their respective keys.
Note:
This uses the adapter that is tied to the first model in the
list. If the keys have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to get.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
Returns:
list[Model]: Entities that do not exist are going to be None
in the result list. The order of results matches the order
of the input keys.
"""
if not keys:
return []
adapter = None
for key in keys:
if key.is_partial:
raise RuntimeError(f"Key {key!r} is partial.")
model = lookup_model_by_kind(key.kind)
if adapter is None:
adapter = model._adapter
model.pre_get_hook(key)
entities_data, entities = adapter.get_multi(keys), []
for key, entity_data in zip(keys, entities_data):
if entity_data is None:
entities.append(None)
continue
# Micro-optimization to avoid calling get_model. This is OK
# to do here because we've already proved that a model for
# that kind exists in the previous block.
model = _known_models[key.kind]
entity = model._load(key, entity_data)
entities.append(entity)
entity.post_get_hook()
return entities
|
[
"def",
"get_multi",
"(",
"keys",
")",
":",
"if",
"not",
"keys",
":",
"return",
"[",
"]",
"adapter",
"=",
"None",
"for",
"key",
"in",
"keys",
":",
"if",
"key",
".",
"is_partial",
":",
"raise",
"RuntimeError",
"(",
"f\"Key {key!r} is partial.\"",
")",
"model",
"=",
"lookup_model_by_kind",
"(",
"key",
".",
"kind",
")",
"if",
"adapter",
"is",
"None",
":",
"adapter",
"=",
"model",
".",
"_adapter",
"model",
".",
"pre_get_hook",
"(",
"key",
")",
"entities_data",
",",
"entities",
"=",
"adapter",
".",
"get_multi",
"(",
"keys",
")",
",",
"[",
"]",
"for",
"key",
",",
"entity_data",
"in",
"zip",
"(",
"keys",
",",
"entities_data",
")",
":",
"if",
"entity_data",
"is",
"None",
":",
"entities",
".",
"append",
"(",
"None",
")",
"continue",
"# Micro-optimization to avoid calling get_model. This is OK",
"# to do here because we've already proved that a model for",
"# that kind exists in the previous block.",
"model",
"=",
"_known_models",
"[",
"key",
".",
"kind",
"]",
"entity",
"=",
"model",
".",
"_load",
"(",
"key",
",",
"entity_data",
")",
"entities",
".",
"append",
"(",
"entity",
")",
"entity",
".",
"post_get_hook",
"(",
")",
"return",
"entities"
] |
Get a set of entities from Datastore by their respective keys.
Note:
This uses the adapter that is tied to the first model in the
list. If the keys have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of keys multiple times.
Parameters:
keys(list[anom.Key]): The list of keys whose entities to get.
Raises:
RuntimeError: If the given set of keys have models that use
a disparate set of adapters or if any of the keys are
partial.
Returns:
list[Model]: Entities that do not exist are going to be None
in the result list. The order of results matches the order
of the input keys.
|
[
"Get",
"a",
"set",
"of",
"entities",
"from",
"Datastore",
"by",
"their",
"respective",
"keys",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L676-L730
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
put_multi
|
def put_multi(entities):
"""Persist a set of entities to Datastore.
Note:
This uses the adapter that is tied to the first Entity in the
list. If the entities have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of entities multiple times.
Parameters:
entities(list[Model]): The list of entities to persist.
Raises:
RuntimeError: If the given set of models use a disparate set of
adapters.
Returns:
list[Model]: The list of persisted entitites.
"""
if not entities:
return []
adapter, requests = None, []
for entity in entities:
if adapter is None:
adapter = entity._adapter
entity.pre_put_hook()
requests.append(PutRequest(entity.key, entity.unindexed_properties, entity))
keys = adapter.put_multi(requests)
for key, entity in zip(keys, entities):
entity.key = key
entity.post_put_hook()
return entities
|
python
|
def put_multi(entities):
"""Persist a set of entities to Datastore.
Note:
This uses the adapter that is tied to the first Entity in the
list. If the entities have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of entities multiple times.
Parameters:
entities(list[Model]): The list of entities to persist.
Raises:
RuntimeError: If the given set of models use a disparate set of
adapters.
Returns:
list[Model]: The list of persisted entitites.
"""
if not entities:
return []
adapter, requests = None, []
for entity in entities:
if adapter is None:
adapter = entity._adapter
entity.pre_put_hook()
requests.append(PutRequest(entity.key, entity.unindexed_properties, entity))
keys = adapter.put_multi(requests)
for key, entity in zip(keys, entities):
entity.key = key
entity.post_put_hook()
return entities
|
[
"def",
"put_multi",
"(",
"entities",
")",
":",
"if",
"not",
"entities",
":",
"return",
"[",
"]",
"adapter",
",",
"requests",
"=",
"None",
",",
"[",
"]",
"for",
"entity",
"in",
"entities",
":",
"if",
"adapter",
"is",
"None",
":",
"adapter",
"=",
"entity",
".",
"_adapter",
"entity",
".",
"pre_put_hook",
"(",
")",
"requests",
".",
"append",
"(",
"PutRequest",
"(",
"entity",
".",
"key",
",",
"entity",
".",
"unindexed_properties",
",",
"entity",
")",
")",
"keys",
"=",
"adapter",
".",
"put_multi",
"(",
"requests",
")",
"for",
"key",
",",
"entity",
"in",
"zip",
"(",
"keys",
",",
"entities",
")",
":",
"entity",
".",
"key",
"=",
"key",
"entity",
".",
"post_put_hook",
"(",
")",
"return",
"entities"
] |
Persist a set of entities to Datastore.
Note:
This uses the adapter that is tied to the first Entity in the
list. If the entities have disparate adapters this function may
behave in unexpected ways.
Warning:
You must pass a **list** and not a generator or some other kind
of iterable to this function as it has to iterate over the list
of entities multiple times.
Parameters:
entities(list[Model]): The list of entities to persist.
Raises:
RuntimeError: If the given set of models use a disparate set of
adapters.
Returns:
list[Model]: The list of persisted entitites.
|
[
"Persist",
"a",
"set",
"of",
"entities",
"to",
"Datastore",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L733-L772
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
Key.from_path
|
def from_path(cls, *path, namespace=None):
"""Build up a Datastore key from a path.
Parameters:
\*path(tuple[str or int]): The path segments.
namespace(str): An optional namespace for the key. This is
applied to each key in the tree.
Returns:
anom.Key: The Datastore represented by the given path.
"""
parent = None
for i in range(0, len(path), 2):
parent = cls(*path[i:i + 2], parent=parent, namespace=namespace)
return parent
|
python
|
def from_path(cls, *path, namespace=None):
"""Build up a Datastore key from a path.
Parameters:
\*path(tuple[str or int]): The path segments.
namespace(str): An optional namespace for the key. This is
applied to each key in the tree.
Returns:
anom.Key: The Datastore represented by the given path.
"""
parent = None
for i in range(0, len(path), 2):
parent = cls(*path[i:i + 2], parent=parent, namespace=namespace)
return parent
|
[
"def",
"from_path",
"(",
"cls",
",",
"*",
"path",
",",
"namespace",
"=",
"None",
")",
":",
"parent",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"path",
")",
",",
"2",
")",
":",
"parent",
"=",
"cls",
"(",
"*",
"path",
"[",
"i",
":",
"i",
"+",
"2",
"]",
",",
"parent",
"=",
"parent",
",",
"namespace",
"=",
"namespace",
")",
"return",
"parent"
] |
Build up a Datastore key from a path.
Parameters:
\*path(tuple[str or int]): The path segments.
namespace(str): An optional namespace for the key. This is
applied to each key in the tree.
Returns:
anom.Key: The Datastore represented by the given path.
|
[
"Build",
"up",
"a",
"Datastore",
"key",
"from",
"a",
"path",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L70-L85
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
Property.validate
|
def validate(self, value):
"""Validates that `value` can be assigned to this Property.
Parameters:
value: The value to validate.
Raises:
TypeError: If the type of the assigned value is invalid.
Returns:
The value that should be assigned to the entity.
"""
if isinstance(value, self._types):
return value
elif self.optional and value is None:
return [] if self.repeated else None
elif self.repeated and isinstance(value, (tuple, list)) and all(isinstance(x, self._types) for x in value):
return value
else:
raise TypeError(f"Value of type {classname(value)} assigned to {classname(self)} property.")
|
python
|
def validate(self, value):
"""Validates that `value` can be assigned to this Property.
Parameters:
value: The value to validate.
Raises:
TypeError: If the type of the assigned value is invalid.
Returns:
The value that should be assigned to the entity.
"""
if isinstance(value, self._types):
return value
elif self.optional and value is None:
return [] if self.repeated else None
elif self.repeated and isinstance(value, (tuple, list)) and all(isinstance(x, self._types) for x in value):
return value
else:
raise TypeError(f"Value of type {classname(value)} assigned to {classname(self)} property.")
|
[
"def",
"validate",
"(",
"self",
",",
"value",
")",
":",
"if",
"isinstance",
"(",
"value",
",",
"self",
".",
"_types",
")",
":",
"return",
"value",
"elif",
"self",
".",
"optional",
"and",
"value",
"is",
"None",
":",
"return",
"[",
"]",
"if",
"self",
".",
"repeated",
"else",
"None",
"elif",
"self",
".",
"repeated",
"and",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
")",
")",
"and",
"all",
"(",
"isinstance",
"(",
"x",
",",
"self",
".",
"_types",
")",
"for",
"x",
"in",
"value",
")",
":",
"return",
"value",
"else",
":",
"raise",
"TypeError",
"(",
"f\"Value of type {classname(value)} assigned to {classname(self)} property.\"",
")"
] |
Validates that `value` can be assigned to this Property.
Parameters:
value: The value to validate.
Raises:
TypeError: If the type of the assigned value is invalid.
Returns:
The value that should be assigned to the entity.
|
[
"Validates",
"that",
"value",
"can",
"be",
"assigned",
"to",
"this",
"Property",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L230-L252
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
Property.prepare_to_store
|
def prepare_to_store(self, entity, value):
"""Prepare `value` for storage. Called by the Model for each
Property, value pair it contains before handing the data off
to an adapter.
Parameters:
entity(Model): The entity to which the value belongs.
value: The value being stored.
Raises:
RuntimeError: If this property is required but no value was
assigned to it.
Returns:
The value that should be persisted.
"""
if value is None and not self.optional:
raise RuntimeError(f"Property {self.name_on_model} requires a value.")
return value
|
python
|
def prepare_to_store(self, entity, value):
"""Prepare `value` for storage. Called by the Model for each
Property, value pair it contains before handing the data off
to an adapter.
Parameters:
entity(Model): The entity to which the value belongs.
value: The value being stored.
Raises:
RuntimeError: If this property is required but no value was
assigned to it.
Returns:
The value that should be persisted.
"""
if value is None and not self.optional:
raise RuntimeError(f"Property {self.name_on_model} requires a value.")
return value
|
[
"def",
"prepare_to_store",
"(",
"self",
",",
"entity",
",",
"value",
")",
":",
"if",
"value",
"is",
"None",
"and",
"not",
"self",
".",
"optional",
":",
"raise",
"RuntimeError",
"(",
"f\"Property {self.name_on_model} requires a value.\"",
")",
"return",
"value"
] |
Prepare `value` for storage. Called by the Model for each
Property, value pair it contains before handing the data off
to an adapter.
Parameters:
entity(Model): The entity to which the value belongs.
value: The value being stored.
Raises:
RuntimeError: If this property is required but no value was
assigned to it.
Returns:
The value that should be persisted.
|
[
"Prepare",
"value",
"for",
"storage",
".",
"Called",
"by",
"the",
"Model",
"for",
"each",
"Property",
"value",
"pair",
"it",
"contains",
"before",
"handing",
"the",
"data",
"off",
"to",
"an",
"adapter",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L268-L286
|
train
|
Bogdanp/anom-py
|
anom/model.py
|
Model.get
|
def get(cls, id_or_name, *, parent=None, namespace=None):
"""Get an entity by id.
Parameters:
id_or_name(int or str): The entity's id.
parent(anom.Key, optional): The entity's parent Key.
namespace(str, optional): The entity's namespace.
Returns:
Model: An entity or ``None`` if the entity doesn't exist in
Datastore.
"""
return Key(cls, id_or_name, parent=parent, namespace=namespace).get()
|
python
|
def get(cls, id_or_name, *, parent=None, namespace=None):
"""Get an entity by id.
Parameters:
id_or_name(int or str): The entity's id.
parent(anom.Key, optional): The entity's parent Key.
namespace(str, optional): The entity's namespace.
Returns:
Model: An entity or ``None`` if the entity doesn't exist in
Datastore.
"""
return Key(cls, id_or_name, parent=parent, namespace=namespace).get()
|
[
"def",
"get",
"(",
"cls",
",",
"id_or_name",
",",
"*",
",",
"parent",
"=",
"None",
",",
"namespace",
"=",
"None",
")",
":",
"return",
"Key",
"(",
"cls",
",",
"id_or_name",
",",
"parent",
"=",
"parent",
",",
"namespace",
"=",
"namespace",
")",
".",
"get",
"(",
")"
] |
Get an entity by id.
Parameters:
id_or_name(int or str): The entity's id.
parent(anom.Key, optional): The entity's parent Key.
namespace(str, optional): The entity's namespace.
Returns:
Model: An entity or ``None`` if the entity doesn't exist in
Datastore.
|
[
"Get",
"an",
"entity",
"by",
"id",
"."
] |
519078b6d1570fa63c5f17cf98817c7bb5588136
|
https://github.com/Bogdanp/anom-py/blob/519078b6d1570fa63c5f17cf98817c7bb5588136/anom/model.py#L519-L531
|
train
|
grauwoelfchen/flask-dotenv
|
flask_dotenv.py
|
DotEnv.init_app
|
def init_app(self, app, env_file=None, verbose_mode=False):
"""Imports .env file."""
if self.app is None:
self.app = app
self.verbose_mode = verbose_mode
if env_file is None:
env_file = os.path.join(os.getcwd(), ".env")
if not os.path.exists(env_file):
warnings.warn("can't read {0} - it doesn't exist".format(env_file))
else:
self.__import_vars(env_file)
|
python
|
def init_app(self, app, env_file=None, verbose_mode=False):
"""Imports .env file."""
if self.app is None:
self.app = app
self.verbose_mode = verbose_mode
if env_file is None:
env_file = os.path.join(os.getcwd(), ".env")
if not os.path.exists(env_file):
warnings.warn("can't read {0} - it doesn't exist".format(env_file))
else:
self.__import_vars(env_file)
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
",",
"env_file",
"=",
"None",
",",
"verbose_mode",
"=",
"False",
")",
":",
"if",
"self",
".",
"app",
"is",
"None",
":",
"self",
".",
"app",
"=",
"app",
"self",
".",
"verbose_mode",
"=",
"verbose_mode",
"if",
"env_file",
"is",
"None",
":",
"env_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"\".env\"",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"env_file",
")",
":",
"warnings",
".",
"warn",
"(",
"\"can't read {0} - it doesn't exist\"",
".",
"format",
"(",
"env_file",
")",
")",
"else",
":",
"self",
".",
"__import_vars",
"(",
"env_file",
")"
] |
Imports .env file.
|
[
"Imports",
".",
"env",
"file",
"."
] |
7dc811fff18570c4b6803ce48c3ecca7eebabe51
|
https://github.com/grauwoelfchen/flask-dotenv/blob/7dc811fff18570c4b6803ce48c3ecca7eebabe51/flask_dotenv.py#L24-L35
|
train
|
grauwoelfchen/flask-dotenv
|
flask_dotenv.py
|
DotEnv.__import_vars
|
def __import_vars(self, env_file):
"""Actual importing function."""
with open(env_file, "r") as f: # pylint: disable=invalid-name
for line in f:
try:
line = line.lstrip()
if line.startswith('export'):
line = line.replace('export', '', 1)
key, val = line.strip().split('=', 1)
except ValueError: # Take care of blank or comment lines
pass
else:
if not callable(val):
if self.verbose_mode:
if key in self.app.config:
print(
" * Overwriting an existing config var:"
" {0}".format(key))
else:
print(
" * Setting an entirely new config var:"
" {0}".format(key))
self.app.config[key] = re.sub(
r"\A[\"']|[\"']\Z", "", val)
|
python
|
def __import_vars(self, env_file):
"""Actual importing function."""
with open(env_file, "r") as f: # pylint: disable=invalid-name
for line in f:
try:
line = line.lstrip()
if line.startswith('export'):
line = line.replace('export', '', 1)
key, val = line.strip().split('=', 1)
except ValueError: # Take care of blank or comment lines
pass
else:
if not callable(val):
if self.verbose_mode:
if key in self.app.config:
print(
" * Overwriting an existing config var:"
" {0}".format(key))
else:
print(
" * Setting an entirely new config var:"
" {0}".format(key))
self.app.config[key] = re.sub(
r"\A[\"']|[\"']\Z", "", val)
|
[
"def",
"__import_vars",
"(",
"self",
",",
"env_file",
")",
":",
"with",
"open",
"(",
"env_file",
",",
"\"r\"",
")",
"as",
"f",
":",
"# pylint: disable=invalid-name",
"for",
"line",
"in",
"f",
":",
"try",
":",
"line",
"=",
"line",
".",
"lstrip",
"(",
")",
"if",
"line",
".",
"startswith",
"(",
"'export'",
")",
":",
"line",
"=",
"line",
".",
"replace",
"(",
"'export'",
",",
"''",
",",
"1",
")",
"key",
",",
"val",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'='",
",",
"1",
")",
"except",
"ValueError",
":",
"# Take care of blank or comment lines",
"pass",
"else",
":",
"if",
"not",
"callable",
"(",
"val",
")",
":",
"if",
"self",
".",
"verbose_mode",
":",
"if",
"key",
"in",
"self",
".",
"app",
".",
"config",
":",
"print",
"(",
"\" * Overwriting an existing config var:\"",
"\" {0}\"",
".",
"format",
"(",
"key",
")",
")",
"else",
":",
"print",
"(",
"\" * Setting an entirely new config var:\"",
"\" {0}\"",
".",
"format",
"(",
"key",
")",
")",
"self",
".",
"app",
".",
"config",
"[",
"key",
"]",
"=",
"re",
".",
"sub",
"(",
"r\"\\A[\\\"']|[\\\"']\\Z\"",
",",
"\"\"",
",",
"val",
")"
] |
Actual importing function.
|
[
"Actual",
"importing",
"function",
"."
] |
7dc811fff18570c4b6803ce48c3ecca7eebabe51
|
https://github.com/grauwoelfchen/flask-dotenv/blob/7dc811fff18570c4b6803ce48c3ecca7eebabe51/flask_dotenv.py#L37-L60
|
train
|
uktrade/directory-components
|
directory_components/middleware.py
|
CountryMiddleware.process_response
|
def process_response(self, request, response):
"""
Shares config with the language cookie as they serve a similar purpose
"""
if hasattr(request, 'COUNTRY_CODE'):
response.set_cookie(
key=constants.COUNTRY_COOKIE_NAME,
value=request.COUNTRY_CODE,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN
)
return response
|
python
|
def process_response(self, request, response):
"""
Shares config with the language cookie as they serve a similar purpose
"""
if hasattr(request, 'COUNTRY_CODE'):
response.set_cookie(
key=constants.COUNTRY_COOKIE_NAME,
value=request.COUNTRY_CODE,
max_age=settings.LANGUAGE_COOKIE_AGE,
path=settings.LANGUAGE_COOKIE_PATH,
domain=settings.LANGUAGE_COOKIE_DOMAIN
)
return response
|
[
"def",
"process_response",
"(",
"self",
",",
"request",
",",
"response",
")",
":",
"if",
"hasattr",
"(",
"request",
",",
"'COUNTRY_CODE'",
")",
":",
"response",
".",
"set_cookie",
"(",
"key",
"=",
"constants",
".",
"COUNTRY_COOKIE_NAME",
",",
"value",
"=",
"request",
".",
"COUNTRY_CODE",
",",
"max_age",
"=",
"settings",
".",
"LANGUAGE_COOKIE_AGE",
",",
"path",
"=",
"settings",
".",
"LANGUAGE_COOKIE_PATH",
",",
"domain",
"=",
"settings",
".",
"LANGUAGE_COOKIE_DOMAIN",
")",
"return",
"response"
] |
Shares config with the language cookie as they serve a similar purpose
|
[
"Shares",
"config",
"with",
"the",
"language",
"cookie",
"as",
"they",
"serve",
"a",
"similar",
"purpose"
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/directory_components/middleware.py#L91-L104
|
train
|
uktrade/directory-components
|
directory_components/widgets.py
|
PrettyIDsMixin.create_option
|
def create_option(
self, name, value, label, selected, index,
subindex=None, attrs=None):
"""Patch to use nicer ids."""
index = str(index) if subindex is None else "%s%s%s" % (
index, self.id_separator, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(
self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
if self.use_nice_ids:
option_attrs['id'] = "%s%s%s" % (
option_attrs['id'],
self.id_separator,
slugify(label.lower())
)
else:
option_attrs['id'] = self.id_for_label(
option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
|
python
|
def create_option(
self, name, value, label, selected, index,
subindex=None, attrs=None):
"""Patch to use nicer ids."""
index = str(index) if subindex is None else "%s%s%s" % (
index, self.id_separator, subindex)
if attrs is None:
attrs = {}
option_attrs = self.build_attrs(
self.attrs, attrs) if self.option_inherits_attrs else {}
if selected:
option_attrs.update(self.checked_attribute)
if 'id' in option_attrs:
if self.use_nice_ids:
option_attrs['id'] = "%s%s%s" % (
option_attrs['id'],
self.id_separator,
slugify(label.lower())
)
else:
option_attrs['id'] = self.id_for_label(
option_attrs['id'], index)
return {
'name': name,
'value': value,
'label': label,
'selected': selected,
'index': index,
'attrs': option_attrs,
'type': self.input_type,
'template_name': self.option_template_name,
'wrap_label': True,
}
|
[
"def",
"create_option",
"(",
"self",
",",
"name",
",",
"value",
",",
"label",
",",
"selected",
",",
"index",
",",
"subindex",
"=",
"None",
",",
"attrs",
"=",
"None",
")",
":",
"index",
"=",
"str",
"(",
"index",
")",
"if",
"subindex",
"is",
"None",
"else",
"\"%s%s%s\"",
"%",
"(",
"index",
",",
"self",
".",
"id_separator",
",",
"subindex",
")",
"if",
"attrs",
"is",
"None",
":",
"attrs",
"=",
"{",
"}",
"option_attrs",
"=",
"self",
".",
"build_attrs",
"(",
"self",
".",
"attrs",
",",
"attrs",
")",
"if",
"self",
".",
"option_inherits_attrs",
"else",
"{",
"}",
"if",
"selected",
":",
"option_attrs",
".",
"update",
"(",
"self",
".",
"checked_attribute",
")",
"if",
"'id'",
"in",
"option_attrs",
":",
"if",
"self",
".",
"use_nice_ids",
":",
"option_attrs",
"[",
"'id'",
"]",
"=",
"\"%s%s%s\"",
"%",
"(",
"option_attrs",
"[",
"'id'",
"]",
",",
"self",
".",
"id_separator",
",",
"slugify",
"(",
"label",
".",
"lower",
"(",
")",
")",
")",
"else",
":",
"option_attrs",
"[",
"'id'",
"]",
"=",
"self",
".",
"id_for_label",
"(",
"option_attrs",
"[",
"'id'",
"]",
",",
"index",
")",
"return",
"{",
"'name'",
":",
"name",
",",
"'value'",
":",
"value",
",",
"'label'",
":",
"label",
",",
"'selected'",
":",
"selected",
",",
"'index'",
":",
"index",
",",
"'attrs'",
":",
"option_attrs",
",",
"'type'",
":",
"self",
".",
"input_type",
",",
"'template_name'",
":",
"self",
".",
"option_template_name",
",",
"'wrap_label'",
":",
"True",
",",
"}"
] |
Patch to use nicer ids.
|
[
"Patch",
"to",
"use",
"nicer",
"ids",
"."
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/directory_components/widgets.py#L15-L47
|
train
|
uktrade/directory-components
|
scripts/upgrade_header_footer.py
|
current_version
|
def current_version():
"""Get current version of directory-components."""
filepath = os.path.abspath(
project_root / "directory_components" / "version.py")
version_py = get_file_string(filepath)
regex = re.compile(Utils.get_version)
if regex.search(version_py) is not None:
current_version = regex.search(version_py).group(0)
print(color(
"Current directory-components version: {}".format(current_version),
fg='blue', style='bold'))
get_update_info()
else:
print(color(
'Error finding directory-components version.',
fg='red', style='bold'))
|
python
|
def current_version():
"""Get current version of directory-components."""
filepath = os.path.abspath(
project_root / "directory_components" / "version.py")
version_py = get_file_string(filepath)
regex = re.compile(Utils.get_version)
if regex.search(version_py) is not None:
current_version = regex.search(version_py).group(0)
print(color(
"Current directory-components version: {}".format(current_version),
fg='blue', style='bold'))
get_update_info()
else:
print(color(
'Error finding directory-components version.',
fg='red', style='bold'))
|
[
"def",
"current_version",
"(",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"project_root",
"/",
"\"directory_components\"",
"/",
"\"version.py\"",
")",
"version_py",
"=",
"get_file_string",
"(",
"filepath",
")",
"regex",
"=",
"re",
".",
"compile",
"(",
"Utils",
".",
"get_version",
")",
"if",
"regex",
".",
"search",
"(",
"version_py",
")",
"is",
"not",
"None",
":",
"current_version",
"=",
"regex",
".",
"search",
"(",
"version_py",
")",
".",
"group",
"(",
"0",
")",
"print",
"(",
"color",
"(",
"\"Current directory-components version: {}\"",
".",
"format",
"(",
"current_version",
")",
",",
"fg",
"=",
"'blue'",
",",
"style",
"=",
"'bold'",
")",
")",
"get_update_info",
"(",
")",
"else",
":",
"print",
"(",
"color",
"(",
"'Error finding directory-components version.'",
",",
"fg",
"=",
"'red'",
",",
"style",
"=",
"'bold'",
")",
")"
] |
Get current version of directory-components.
|
[
"Get",
"current",
"version",
"of",
"directory",
"-",
"components",
"."
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/scripts/upgrade_header_footer.py#L27-L42
|
train
|
uktrade/directory-components
|
scripts/upgrade_header_footer.py
|
get_file_string
|
def get_file_string(filepath):
"""Get string from file."""
with open(os.path.abspath(filepath)) as f:
return f.read()
|
python
|
def get_file_string(filepath):
"""Get string from file."""
with open(os.path.abspath(filepath)) as f:
return f.read()
|
[
"def",
"get_file_string",
"(",
"filepath",
")",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"filepath",
")",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] |
Get string from file.
|
[
"Get",
"string",
"from",
"file",
"."
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/scripts/upgrade_header_footer.py#L45-L48
|
train
|
uktrade/directory-components
|
scripts/upgrade_header_footer.py
|
replace_in_dirs
|
def replace_in_dirs(version):
"""Look through dirs and run replace_in_files in each."""
print(color(
"Upgrading directory-components dependency in all repos...",
fg='blue', style='bold'))
for dirname in Utils.dirs:
replace = "directory-components=={}".format(version)
replace_in_files(dirname, replace)
done(version)
|
python
|
def replace_in_dirs(version):
"""Look through dirs and run replace_in_files in each."""
print(color(
"Upgrading directory-components dependency in all repos...",
fg='blue', style='bold'))
for dirname in Utils.dirs:
replace = "directory-components=={}".format(version)
replace_in_files(dirname, replace)
done(version)
|
[
"def",
"replace_in_dirs",
"(",
"version",
")",
":",
"print",
"(",
"color",
"(",
"\"Upgrading directory-components dependency in all repos...\"",
",",
"fg",
"=",
"'blue'",
",",
"style",
"=",
"'bold'",
")",
")",
"for",
"dirname",
"in",
"Utils",
".",
"dirs",
":",
"replace",
"=",
"\"directory-components=={}\"",
".",
"format",
"(",
"version",
")",
"replace_in_files",
"(",
"dirname",
",",
"replace",
")",
"done",
"(",
"version",
")"
] |
Look through dirs and run replace_in_files in each.
|
[
"Look",
"through",
"dirs",
"and",
"run",
"replace_in_files",
"in",
"each",
"."
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/scripts/upgrade_header_footer.py#L59-L67
|
train
|
uktrade/directory-components
|
scripts/upgrade_header_footer.py
|
replace_in_files
|
def replace_in_files(dirname, replace):
"""Replace current version with new version in requirements files."""
filepath = os.path.abspath(dirname / "requirements.in")
if os.path.isfile(filepath) and header_footer_exists(filepath):
replaced = re.sub(Utils.exp, replace, get_file_string(filepath))
with open(filepath, "w") as f:
f.write(replaced)
print(color(
"Written to file: {}".format(filepath),
fg='magenta', style='bold'))
|
python
|
def replace_in_files(dirname, replace):
"""Replace current version with new version in requirements files."""
filepath = os.path.abspath(dirname / "requirements.in")
if os.path.isfile(filepath) and header_footer_exists(filepath):
replaced = re.sub(Utils.exp, replace, get_file_string(filepath))
with open(filepath, "w") as f:
f.write(replaced)
print(color(
"Written to file: {}".format(filepath),
fg='magenta', style='bold'))
|
[
"def",
"replace_in_files",
"(",
"dirname",
",",
"replace",
")",
":",
"filepath",
"=",
"os",
".",
"path",
".",
"abspath",
"(",
"dirname",
"/",
"\"requirements.in\"",
")",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filepath",
")",
"and",
"header_footer_exists",
"(",
"filepath",
")",
":",
"replaced",
"=",
"re",
".",
"sub",
"(",
"Utils",
".",
"exp",
",",
"replace",
",",
"get_file_string",
"(",
"filepath",
")",
")",
"with",
"open",
"(",
"filepath",
",",
"\"w\"",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"replaced",
")",
"print",
"(",
"color",
"(",
"\"Written to file: {}\"",
".",
"format",
"(",
"filepath",
")",
",",
"fg",
"=",
"'magenta'",
",",
"style",
"=",
"'bold'",
")",
")"
] |
Replace current version with new version in requirements files.
|
[
"Replace",
"current",
"version",
"with",
"new",
"version",
"in",
"requirements",
"files",
"."
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/scripts/upgrade_header_footer.py#L70-L79
|
train
|
uktrade/directory-components
|
scripts/upgrade_header_footer.py
|
header_footer_exists
|
def header_footer_exists(filepath):
"""Check if directory-components is listed in requirements files."""
with open(filepath) as f:
return re.search(Utils.exp, f.read())
|
python
|
def header_footer_exists(filepath):
"""Check if directory-components is listed in requirements files."""
with open(filepath) as f:
return re.search(Utils.exp, f.read())
|
[
"def",
"header_footer_exists",
"(",
"filepath",
")",
":",
"with",
"open",
"(",
"filepath",
")",
"as",
"f",
":",
"return",
"re",
".",
"search",
"(",
"Utils",
".",
"exp",
",",
"f",
".",
"read",
"(",
")",
")"
] |
Check if directory-components is listed in requirements files.
|
[
"Check",
"if",
"directory",
"-",
"components",
"is",
"listed",
"in",
"requirements",
"files",
"."
] |
305b3cfd590e170255503ae3c41aebcaa658af8e
|
https://github.com/uktrade/directory-components/blob/305b3cfd590e170255503ae3c41aebcaa658af8e/scripts/upgrade_header_footer.py#L82-L85
|
train
|
bxlab/bx-python
|
lib/bx_extras/pstat.py
|
linedelimited
|
def linedelimited (inlist,delimiter):
"""
Returns a string composed of elements in inlist, with each element
separated by 'delimiter.' Used by function writedelimited. Use '\t'
for tab-delimiting.
Usage: linedelimited (inlist,delimiter)
"""
outstr = ''
for item in inlist:
if type(item) != StringType:
item = str(item)
outstr = outstr + item + delimiter
outstr = outstr[0:-1]
return outstr
|
python
|
def linedelimited (inlist,delimiter):
"""
Returns a string composed of elements in inlist, with each element
separated by 'delimiter.' Used by function writedelimited. Use '\t'
for tab-delimiting.
Usage: linedelimited (inlist,delimiter)
"""
outstr = ''
for item in inlist:
if type(item) != StringType:
item = str(item)
outstr = outstr + item + delimiter
outstr = outstr[0:-1]
return outstr
|
[
"def",
"linedelimited",
"(",
"inlist",
",",
"delimiter",
")",
":",
"outstr",
"=",
"''",
"for",
"item",
"in",
"inlist",
":",
"if",
"type",
"(",
"item",
")",
"!=",
"StringType",
":",
"item",
"=",
"str",
"(",
"item",
")",
"outstr",
"=",
"outstr",
"+",
"item",
"+",
"delimiter",
"outstr",
"=",
"outstr",
"[",
"0",
":",
"-",
"1",
"]",
"return",
"outstr"
] |
Returns a string composed of elements in inlist, with each element
separated by 'delimiter.' Used by function writedelimited. Use '\t'
for tab-delimiting.
Usage: linedelimited (inlist,delimiter)
|
[
"Returns",
"a",
"string",
"composed",
"of",
"elements",
"in",
"inlist",
"with",
"each",
"element",
"separated",
"by",
"delimiter",
".",
"Used",
"by",
"function",
"writedelimited",
".",
"Use",
"\\",
"t",
"for",
"tab",
"-",
"delimiting",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L396-L410
|
train
|
bxlab/bx-python
|
lib/bx_extras/pstat.py
|
lineincustcols
|
def lineincustcols (inlist,colsizes):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
outstr = ''
for i in range(len(inlist)):
if type(inlist[i]) != StringType:
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr
|
python
|
def lineincustcols (inlist,colsizes):
"""
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
"""
outstr = ''
for i in range(len(inlist)):
if type(inlist[i]) != StringType:
item = str(inlist[i])
else:
item = inlist[i]
size = len(item)
if size <= colsizes[i]:
for j in range(colsizes[i]-size):
outstr = outstr + ' '
outstr = outstr + item
else:
outstr = outstr + item[0:colsizes[i]+1]
return outstr
|
[
"def",
"lineincustcols",
"(",
"inlist",
",",
"colsizes",
")",
":",
"outstr",
"=",
"''",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inlist",
")",
")",
":",
"if",
"type",
"(",
"inlist",
"[",
"i",
"]",
")",
"!=",
"StringType",
":",
"item",
"=",
"str",
"(",
"inlist",
"[",
"i",
"]",
")",
"else",
":",
"item",
"=",
"inlist",
"[",
"i",
"]",
"size",
"=",
"len",
"(",
"item",
")",
"if",
"size",
"<=",
"colsizes",
"[",
"i",
"]",
":",
"for",
"j",
"in",
"range",
"(",
"colsizes",
"[",
"i",
"]",
"-",
"size",
")",
":",
"outstr",
"=",
"outstr",
"+",
"' '",
"outstr",
"=",
"outstr",
"+",
"item",
"else",
":",
"outstr",
"=",
"outstr",
"+",
"item",
"[",
"0",
":",
"colsizes",
"[",
"i",
"]",
"+",
"1",
"]",
"return",
"outstr"
] |
Returns a string composed of elements in inlist, with each element
right-aligned in a column of width specified by a sequence colsizes. The
length of colsizes must be greater than or equal to the number of columns
in inlist.
Usage: lineincustcols (inlist,colsizes)
Returns: formatted string created from inlist
|
[
"Returns",
"a",
"string",
"composed",
"of",
"elements",
"in",
"inlist",
"with",
"each",
"element",
"right",
"-",
"aligned",
"in",
"a",
"column",
"of",
"width",
"specified",
"by",
"a",
"sequence",
"colsizes",
".",
"The",
"length",
"of",
"colsizes",
"must",
"be",
"greater",
"than",
"or",
"equal",
"to",
"the",
"number",
"of",
"columns",
"in",
"inlist",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L434-L457
|
train
|
bxlab/bx-python
|
lib/bx_extras/pstat.py
|
list2string
|
def list2string (inlist,delimit=' '):
"""
Converts a 1D list to a single long string for file output, using
the string.join function.
Usage: list2string (inlist,delimit=' ')
Returns: the string created from inlist
"""
stringlist = [makestr(_) for _ in inlist]
return string.join(stringlist,delimit)
|
python
|
def list2string (inlist,delimit=' '):
"""
Converts a 1D list to a single long string for file output, using
the string.join function.
Usage: list2string (inlist,delimit=' ')
Returns: the string created from inlist
"""
stringlist = [makestr(_) for _ in inlist]
return string.join(stringlist,delimit)
|
[
"def",
"list2string",
"(",
"inlist",
",",
"delimit",
"=",
"' '",
")",
":",
"stringlist",
"=",
"[",
"makestr",
"(",
"_",
")",
"for",
"_",
"in",
"inlist",
"]",
"return",
"string",
".",
"join",
"(",
"stringlist",
",",
"delimit",
")"
] |
Converts a 1D list to a single long string for file output, using
the string.join function.
Usage: list2string (inlist,delimit=' ')
Returns: the string created from inlist
|
[
"Converts",
"a",
"1D",
"list",
"to",
"a",
"single",
"long",
"string",
"for",
"file",
"output",
"using",
"the",
"string",
".",
"join",
"function",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L460-L469
|
train
|
bxlab/bx-python
|
lib/bx_extras/pstat.py
|
replace
|
def replace (inlst,oldval,newval):
"""
Replaces all occurrences of 'oldval' with 'newval', recursively.
Usage: replace (inlst,oldval,newval)
"""
lst = inlst*1
for i in range(len(lst)):
if type(lst[i]) not in [ListType,TupleType]:
if lst[i]==oldval: lst[i]=newval
else:
lst[i] = replace(lst[i],oldval,newval)
return lst
|
python
|
def replace (inlst,oldval,newval):
"""
Replaces all occurrences of 'oldval' with 'newval', recursively.
Usage: replace (inlst,oldval,newval)
"""
lst = inlst*1
for i in range(len(lst)):
if type(lst[i]) not in [ListType,TupleType]:
if lst[i]==oldval: lst[i]=newval
else:
lst[i] = replace(lst[i],oldval,newval)
return lst
|
[
"def",
"replace",
"(",
"inlst",
",",
"oldval",
",",
"newval",
")",
":",
"lst",
"=",
"inlst",
"*",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"lst",
")",
")",
":",
"if",
"type",
"(",
"lst",
"[",
"i",
"]",
")",
"not",
"in",
"[",
"ListType",
",",
"TupleType",
"]",
":",
"if",
"lst",
"[",
"i",
"]",
"==",
"oldval",
":",
"lst",
"[",
"i",
"]",
"=",
"newval",
"else",
":",
"lst",
"[",
"i",
"]",
"=",
"replace",
"(",
"lst",
"[",
"i",
"]",
",",
"oldval",
",",
"newval",
")",
"return",
"lst"
] |
Replaces all occurrences of 'oldval' with 'newval', recursively.
Usage: replace (inlst,oldval,newval)
|
[
"Replaces",
"all",
"occurrences",
"of",
"oldval",
"with",
"newval",
"recursively",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L564-L576
|
train
|
bxlab/bx-python
|
lib/bx_extras/pstat.py
|
duplicates
|
def duplicates(inlist):
"""
Returns duplicate items in the FIRST dimension of the passed list.
Usage: duplicates (inlist)
"""
dups = []
for i in range(len(inlist)):
if inlist[i] in inlist[i+1:]:
dups.append(inlist[i])
return dups
|
python
|
def duplicates(inlist):
"""
Returns duplicate items in the FIRST dimension of the passed list.
Usage: duplicates (inlist)
"""
dups = []
for i in range(len(inlist)):
if inlist[i] in inlist[i+1:]:
dups.append(inlist[i])
return dups
|
[
"def",
"duplicates",
"(",
"inlist",
")",
":",
"dups",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inlist",
")",
")",
":",
"if",
"inlist",
"[",
"i",
"]",
"in",
"inlist",
"[",
"i",
"+",
"1",
":",
"]",
":",
"dups",
".",
"append",
"(",
"inlist",
"[",
"i",
"]",
")",
"return",
"dups"
] |
Returns duplicate items in the FIRST dimension of the passed list.
Usage: duplicates (inlist)
|
[
"Returns",
"duplicate",
"items",
"in",
"the",
"FIRST",
"dimension",
"of",
"the",
"passed",
"list",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L676-L686
|
train
|
bxlab/bx-python
|
lib/bx_extras/pstat.py
|
nonrepeats
|
def nonrepeats(inlist):
"""
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
"""
nonrepeats = []
for i in range(len(inlist)):
if inlist.count(inlist[i]) == 1:
nonrepeats.append(inlist[i])
return nonrepeats
|
python
|
def nonrepeats(inlist):
"""
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
"""
nonrepeats = []
for i in range(len(inlist)):
if inlist.count(inlist[i]) == 1:
nonrepeats.append(inlist[i])
return nonrepeats
|
[
"def",
"nonrepeats",
"(",
"inlist",
")",
":",
"nonrepeats",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"inlist",
")",
")",
":",
"if",
"inlist",
".",
"count",
"(",
"inlist",
"[",
"i",
"]",
")",
"==",
"1",
":",
"nonrepeats",
".",
"append",
"(",
"inlist",
"[",
"i",
"]",
")",
"return",
"nonrepeats"
] |
Returns items that are NOT duplicated in the first dim of the passed list.
Usage: nonrepeats (inlist)
|
[
"Returns",
"items",
"that",
"are",
"NOT",
"duplicated",
"in",
"the",
"first",
"dim",
"of",
"the",
"passed",
"list",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/pstat.py#L689-L699
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
lz
|
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
|
python
|
def lz (inlist, score):
"""
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
"""
z = (score-mean(inlist))/samplestdev(inlist)
return z
|
[
"def",
"lz",
"(",
"inlist",
",",
"score",
")",
":",
"z",
"=",
"(",
"score",
"-",
"mean",
"(",
"inlist",
")",
")",
"/",
"samplestdev",
"(",
"inlist",
")",
"return",
"z"
] |
Returns the z-score for a given input score, given that score and the
list from which that score came. Not appropriate for population calculations.
Usage: lz(inlist, score)
|
[
"Returns",
"the",
"z",
"-",
"score",
"for",
"a",
"given",
"input",
"score",
"given",
"that",
"score",
"and",
"the",
"list",
"from",
"which",
"that",
"score",
"came",
".",
"Not",
"appropriate",
"for",
"population",
"calculations",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L681-L689
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
llinregress
|
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
|
python
|
def llinregress(x,y):
"""
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
"""
TINY = 1.0e-20
if len(x) != len(y):
raise ValueError('Input values not paired in linregress. Aborting.')
n = len(x)
x = [float(_) for _ in x]
y = [float(_) for _ in y]
xmean = mean(x)
ymean = mean(y)
r_num = float(n*(summult(x,y)) - sum(x)*sum(y))
r_den = math.sqrt((n*ss(x) - square_of_sums(x))*(n*ss(y)-square_of_sums(y)))
r = r_num / r_den
z = 0.5*math.log((1.0+r+TINY)/(1.0-r+TINY))
df = n-2
t = r*math.sqrt(df/((1.0-r+TINY)*(1.0+r+TINY)))
prob = betai(0.5*df,0.5,df/(df+t*t))
slope = r_num / float(n*ss(x) - square_of_sums(x))
intercept = ymean - slope*xmean
sterrest = math.sqrt(1-r*r)*samplestdev(y)
return slope, intercept, r, prob, sterrest
|
[
"def",
"llinregress",
"(",
"x",
",",
"y",
")",
":",
"TINY",
"=",
"1.0e-20",
"if",
"len",
"(",
"x",
")",
"!=",
"len",
"(",
"y",
")",
":",
"raise",
"ValueError",
"(",
"'Input values not paired in linregress. Aborting.'",
")",
"n",
"=",
"len",
"(",
"x",
")",
"x",
"=",
"[",
"float",
"(",
"_",
")",
"for",
"_",
"in",
"x",
"]",
"y",
"=",
"[",
"float",
"(",
"_",
")",
"for",
"_",
"in",
"y",
"]",
"xmean",
"=",
"mean",
"(",
"x",
")",
"ymean",
"=",
"mean",
"(",
"y",
")",
"r_num",
"=",
"float",
"(",
"n",
"*",
"(",
"summult",
"(",
"x",
",",
"y",
")",
")",
"-",
"sum",
"(",
"x",
")",
"*",
"sum",
"(",
"y",
")",
")",
"r_den",
"=",
"math",
".",
"sqrt",
"(",
"(",
"n",
"*",
"ss",
"(",
"x",
")",
"-",
"square_of_sums",
"(",
"x",
")",
")",
"*",
"(",
"n",
"*",
"ss",
"(",
"y",
")",
"-",
"square_of_sums",
"(",
"y",
")",
")",
")",
"r",
"=",
"r_num",
"/",
"r_den",
"z",
"=",
"0.5",
"*",
"math",
".",
"log",
"(",
"(",
"1.0",
"+",
"r",
"+",
"TINY",
")",
"/",
"(",
"1.0",
"-",
"r",
"+",
"TINY",
")",
")",
"df",
"=",
"n",
"-",
"2",
"t",
"=",
"r",
"*",
"math",
".",
"sqrt",
"(",
"df",
"/",
"(",
"(",
"1.0",
"-",
"r",
"+",
"TINY",
")",
"*",
"(",
"1.0",
"+",
"r",
"+",
"TINY",
")",
")",
")",
"prob",
"=",
"betai",
"(",
"0.5",
"*",
"df",
",",
"0.5",
",",
"df",
"/",
"(",
"df",
"+",
"t",
"*",
"t",
")",
")",
"slope",
"=",
"r_num",
"/",
"float",
"(",
"n",
"*",
"ss",
"(",
"x",
")",
"-",
"square_of_sums",
"(",
"x",
")",
")",
"intercept",
"=",
"ymean",
"-",
"slope",
"*",
"xmean",
"sterrest",
"=",
"math",
".",
"sqrt",
"(",
"1",
"-",
"r",
"*",
"r",
")",
"*",
"samplestdev",
"(",
"y",
")",
"return",
"slope",
",",
"intercept",
",",
"r",
",",
"prob",
",",
"sterrest"
] |
Calculates a regression line on x,y pairs.
Usage: llinregress(x,y) x,y are equal-length lists of x-y coordinates
Returns: slope, intercept, r, two-tailed prob, sterr-of-estimate
|
[
"Calculates",
"a",
"regression",
"line",
"on",
"x",
"y",
"pairs",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L928-L953
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
lks_2samp
|
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
|
python
|
def lks_2samp (data1,data2):
"""
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
"""
j1 = 0
j2 = 0
fn1 = 0.0
fn2 = 0.0
n1 = len(data1)
n2 = len(data2)
en1 = n1
en2 = n2
d = 0.0
data1.sort()
data2.sort()
while j1 < n1 and j2 < n2:
d1=data1[j1]
d2=data2[j2]
if d1 <= d2:
fn1 = (j1)/float(en1)
j1 = j1 + 1
if d2 <= d1:
fn2 = (j2)/float(en2)
j2 = j2 + 1
dt = (fn2-fn1)
if math.fabs(dt) > math.fabs(d):
d = dt
try:
en = math.sqrt(en1*en2/float(en1+en2))
prob = ksprob((en+0.12+0.11/en)*abs(d))
except:
prob = 1.0
return d, prob
|
[
"def",
"lks_2samp",
"(",
"data1",
",",
"data2",
")",
":",
"j1",
"=",
"0",
"j2",
"=",
"0",
"fn1",
"=",
"0.0",
"fn2",
"=",
"0.0",
"n1",
"=",
"len",
"(",
"data1",
")",
"n2",
"=",
"len",
"(",
"data2",
")",
"en1",
"=",
"n1",
"en2",
"=",
"n2",
"d",
"=",
"0.0",
"data1",
".",
"sort",
"(",
")",
"data2",
".",
"sort",
"(",
")",
"while",
"j1",
"<",
"n1",
"and",
"j2",
"<",
"n2",
":",
"d1",
"=",
"data1",
"[",
"j1",
"]",
"d2",
"=",
"data2",
"[",
"j2",
"]",
"if",
"d1",
"<=",
"d2",
":",
"fn1",
"=",
"(",
"j1",
")",
"/",
"float",
"(",
"en1",
")",
"j1",
"=",
"j1",
"+",
"1",
"if",
"d2",
"<=",
"d1",
":",
"fn2",
"=",
"(",
"j2",
")",
"/",
"float",
"(",
"en2",
")",
"j2",
"=",
"j2",
"+",
"1",
"dt",
"=",
"(",
"fn2",
"-",
"fn1",
")",
"if",
"math",
".",
"fabs",
"(",
"dt",
")",
">",
"math",
".",
"fabs",
"(",
"d",
")",
":",
"d",
"=",
"dt",
"try",
":",
"en",
"=",
"math",
".",
"sqrt",
"(",
"en1",
"*",
"en2",
"/",
"float",
"(",
"en1",
"+",
"en2",
")",
")",
"prob",
"=",
"ksprob",
"(",
"(",
"en",
"+",
"0.12",
"+",
"0.11",
"/",
"en",
")",
"*",
"abs",
"(",
"d",
")",
")",
"except",
":",
"prob",
"=",
"1.0",
"return",
"d",
",",
"prob"
] |
Computes the Kolmogorov-Smirnof statistic on 2 samples. From
Numerical Recipies in C, page 493.
Usage: lks_2samp(data1,data2) data1&2 are lists of values for 2 conditions
Returns: KS D-value, associated p-value
|
[
"Computes",
"the",
"Kolmogorov",
"-",
"Smirnof",
"statistic",
"on",
"2",
"samples",
".",
"From",
"Numerical",
"Recipies",
"in",
"C",
"page",
"493",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1072-L1108
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
lranksums
|
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
|
python
|
def lranksums(x,y):
"""
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
"""
n1 = len(x)
n2 = len(y)
alldata = x+y
ranked = rankdata(alldata)
x = ranked[:n1]
y = ranked[n1:]
s = sum(x)
expected = n1*(n1+n2+1) / 2.0
z = (s - expected) / math.sqrt(n1*n2*(n1+n2+1)/12.0)
prob = 2*(1.0 -zprob(abs(z)))
return z, prob
|
[
"def",
"lranksums",
"(",
"x",
",",
"y",
")",
":",
"n1",
"=",
"len",
"(",
"x",
")",
"n2",
"=",
"len",
"(",
"y",
")",
"alldata",
"=",
"x",
"+",
"y",
"ranked",
"=",
"rankdata",
"(",
"alldata",
")",
"x",
"=",
"ranked",
"[",
":",
"n1",
"]",
"y",
"=",
"ranked",
"[",
"n1",
":",
"]",
"s",
"=",
"sum",
"(",
"x",
")",
"expected",
"=",
"n1",
"*",
"(",
"n1",
"+",
"n2",
"+",
"1",
")",
"/",
"2.0",
"z",
"=",
"(",
"s",
"-",
"expected",
")",
"/",
"math",
".",
"sqrt",
"(",
"n1",
"*",
"n2",
"*",
"(",
"n1",
"+",
"n2",
"+",
"1",
")",
"/",
"12.0",
")",
"prob",
"=",
"2",
"*",
"(",
"1.0",
"-",
"zprob",
"(",
"abs",
"(",
"z",
")",
")",
")",
"return",
"z",
",",
"prob"
] |
Calculates the rank sums statistic on the provided scores and
returns the result. Use only when the n in each condition is > 20 and you
have 2 independent samples of ranks.
Usage: lranksums(x,y)
Returns: a z-statistic, two-tailed p-value
|
[
"Calculates",
"the",
"rank",
"sums",
"statistic",
"on",
"the",
"provided",
"scores",
"and",
"returns",
"the",
"result",
".",
"Use",
"only",
"when",
"the",
"n",
"in",
"each",
"condition",
"is",
">",
"20",
"and",
"you",
"have",
"2",
"independent",
"samples",
"of",
"ranks",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1165-L1184
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
lkruskalwallish
|
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
|
python
|
def lkruskalwallish(*args):
"""
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
"""
args = list(args)
n = [0]*len(args)
all = []
n = [len(_) for _ in args]
for i in range(len(args)):
all = all + args[i]
ranked = rankdata(all)
T = tiecorrect(ranked)
for i in range(len(args)):
args[i] = ranked[0:n[i]]
del ranked[0:n[i]]
rsums = []
for i in range(len(args)):
rsums.append(sum(args[i])**2)
rsums[i] = rsums[i] / float(n[i])
ssbn = sum(rsums)
totaln = sum(n)
h = 12.0 / (totaln*(totaln+1)) * ssbn - 3*(totaln+1)
df = len(args) - 1
if T == 0:
raise ValueError('All numbers are identical in lkruskalwallish')
h = h / float(T)
return h, chisqprob(h,df)
|
[
"def",
"lkruskalwallish",
"(",
"*",
"args",
")",
":",
"args",
"=",
"list",
"(",
"args",
")",
"n",
"=",
"[",
"0",
"]",
"*",
"len",
"(",
"args",
")",
"all",
"=",
"[",
"]",
"n",
"=",
"[",
"len",
"(",
"_",
")",
"for",
"_",
"in",
"args",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
")",
")",
":",
"all",
"=",
"all",
"+",
"args",
"[",
"i",
"]",
"ranked",
"=",
"rankdata",
"(",
"all",
")",
"T",
"=",
"tiecorrect",
"(",
"ranked",
")",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
")",
")",
":",
"args",
"[",
"i",
"]",
"=",
"ranked",
"[",
"0",
":",
"n",
"[",
"i",
"]",
"]",
"del",
"ranked",
"[",
"0",
":",
"n",
"[",
"i",
"]",
"]",
"rsums",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"args",
")",
")",
":",
"rsums",
".",
"append",
"(",
"sum",
"(",
"args",
"[",
"i",
"]",
")",
"**",
"2",
")",
"rsums",
"[",
"i",
"]",
"=",
"rsums",
"[",
"i",
"]",
"/",
"float",
"(",
"n",
"[",
"i",
"]",
")",
"ssbn",
"=",
"sum",
"(",
"rsums",
")",
"totaln",
"=",
"sum",
"(",
"n",
")",
"h",
"=",
"12.0",
"/",
"(",
"totaln",
"*",
"(",
"totaln",
"+",
"1",
")",
")",
"*",
"ssbn",
"-",
"3",
"*",
"(",
"totaln",
"+",
"1",
")",
"df",
"=",
"len",
"(",
"args",
")",
"-",
"1",
"if",
"T",
"==",
"0",
":",
"raise",
"ValueError",
"(",
"'All numbers are identical in lkruskalwallish'",
")",
"h",
"=",
"h",
"/",
"float",
"(",
"T",
")",
"return",
"h",
",",
"chisqprob",
"(",
"h",
",",
"df",
")"
] |
The Kruskal-Wallis H-test is a non-parametric ANOVA for 3 or more
groups, requiring at least 5 subjects in each group. This function
calculates the Kruskal-Wallis H-test for 3 or more independent samples
and returns the result.
Usage: lkruskalwallish(*args)
Returns: H-statistic (corrected for ties), associated p-value
|
[
"The",
"Kruskal",
"-",
"Wallis",
"H",
"-",
"test",
"is",
"a",
"non",
"-",
"parametric",
"ANOVA",
"for",
"3",
"or",
"more",
"groups",
"requiring",
"at",
"least",
"5",
"subjects",
"in",
"each",
"group",
".",
"This",
"function",
"calculates",
"the",
"Kruskal",
"-",
"Wallis",
"H",
"-",
"test",
"for",
"3",
"or",
"more",
"independent",
"samples",
"and",
"returns",
"the",
"result",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1220-L1252
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
lksprob
|
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0
|
python
|
def lksprob(alam):
"""
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
"""
fac = 2.0
sum = 0.0
termbf = 0.0
a2 = -2.0*alam*alam
for j in range(1,201):
term = fac*math.exp(a2*j*j)
sum = sum + term
if math.fabs(term) <= (0.001*termbf) or math.fabs(term) < (1.0e-8*sum):
return sum
fac = -fac
termbf = math.fabs(term)
return 1.0
|
[
"def",
"lksprob",
"(",
"alam",
")",
":",
"fac",
"=",
"2.0",
"sum",
"=",
"0.0",
"termbf",
"=",
"0.0",
"a2",
"=",
"-",
"2.0",
"*",
"alam",
"*",
"alam",
"for",
"j",
"in",
"range",
"(",
"1",
",",
"201",
")",
":",
"term",
"=",
"fac",
"*",
"math",
".",
"exp",
"(",
"a2",
"*",
"j",
"*",
"j",
")",
"sum",
"=",
"sum",
"+",
"term",
"if",
"math",
".",
"fabs",
"(",
"term",
")",
"<=",
"(",
"0.001",
"*",
"termbf",
")",
"or",
"math",
".",
"fabs",
"(",
"term",
")",
"<",
"(",
"1.0e-8",
"*",
"sum",
")",
":",
"return",
"sum",
"fac",
"=",
"-",
"fac",
"termbf",
"=",
"math",
".",
"fabs",
"(",
"term",
")",
"return",
"1.0"
] |
Computes a Kolmolgorov-Smirnov t-test significance level. Adapted from
Numerical Recipies.
Usage: lksprob(alam)
|
[
"Computes",
"a",
"Kolmolgorov",
"-",
"Smirnov",
"t",
"-",
"test",
"significance",
"level",
".",
"Adapted",
"from",
"Numerical",
"Recipies",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1403-L1421
|
train
|
bxlab/bx-python
|
lib/bx_extras/stats.py
|
outputpairedstats
|
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)!=StringType or len(fname)==0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix)
print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
|
python
|
def outputpairedstats(fname,writemode,name1,n1,m1,se1,min1,max1,name2,n2,m2,se2,min2,max2,statname,stat,prob):
"""
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
"""
suffix = '' # for *s after the p-value
try:
x = prob.shape
prob = prob[0]
except:
pass
if prob < 0.001: suffix = ' ***'
elif prob < 0.01: suffix = ' **'
elif prob < 0.05: suffix = ' *'
title = [['Name','N','Mean','SD','Min','Max']]
lofl = title+[[name1,n1,round(m1,3),round(math.sqrt(se1),3),min1,max1],
[name2,n2,round(m2,3),round(math.sqrt(se2),3),min2,max2]]
if type(fname)!=StringType or len(fname)==0:
print()
print(statname)
print()
pstat.printcc(lofl)
print()
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
print('Test statistic = ',round(stat,3),' p = ',round(prob,3),suffix)
print()
else:
file = open(fname,writemode)
file.write('\n'+statname+'\n\n')
file.close()
writecc(lofl,fname,'a')
file = open(fname,'a')
try:
if stat.shape == ():
stat = stat[0]
if prob.shape == ():
prob = prob[0]
except:
pass
file.write(pstat.list2string(['\nTest statistic = ',round(stat,4),' p = ',round(prob,4),suffix,'\n\n']))
file.close()
return None
|
[
"def",
"outputpairedstats",
"(",
"fname",
",",
"writemode",
",",
"name1",
",",
"n1",
",",
"m1",
",",
"se1",
",",
"min1",
",",
"max1",
",",
"name2",
",",
"n2",
",",
"m2",
",",
"se2",
",",
"min2",
",",
"max2",
",",
"statname",
",",
"stat",
",",
"prob",
")",
":",
"suffix",
"=",
"''",
"# for *s after the p-value",
"try",
":",
"x",
"=",
"prob",
".",
"shape",
"prob",
"=",
"prob",
"[",
"0",
"]",
"except",
":",
"pass",
"if",
"prob",
"<",
"0.001",
":",
"suffix",
"=",
"' ***'",
"elif",
"prob",
"<",
"0.01",
":",
"suffix",
"=",
"' **'",
"elif",
"prob",
"<",
"0.05",
":",
"suffix",
"=",
"' *'",
"title",
"=",
"[",
"[",
"'Name'",
",",
"'N'",
",",
"'Mean'",
",",
"'SD'",
",",
"'Min'",
",",
"'Max'",
"]",
"]",
"lofl",
"=",
"title",
"+",
"[",
"[",
"name1",
",",
"n1",
",",
"round",
"(",
"m1",
",",
"3",
")",
",",
"round",
"(",
"math",
".",
"sqrt",
"(",
"se1",
")",
",",
"3",
")",
",",
"min1",
",",
"max1",
"]",
",",
"[",
"name2",
",",
"n2",
",",
"round",
"(",
"m2",
",",
"3",
")",
",",
"round",
"(",
"math",
".",
"sqrt",
"(",
"se2",
")",
",",
"3",
")",
",",
"min2",
",",
"max2",
"]",
"]",
"if",
"type",
"(",
"fname",
")",
"!=",
"StringType",
"or",
"len",
"(",
"fname",
")",
"==",
"0",
":",
"print",
"(",
")",
"print",
"(",
"statname",
")",
"print",
"(",
")",
"pstat",
".",
"printcc",
"(",
"lofl",
")",
"print",
"(",
")",
"try",
":",
"if",
"stat",
".",
"shape",
"==",
"(",
")",
":",
"stat",
"=",
"stat",
"[",
"0",
"]",
"if",
"prob",
".",
"shape",
"==",
"(",
")",
":",
"prob",
"=",
"prob",
"[",
"0",
"]",
"except",
":",
"pass",
"print",
"(",
"'Test statistic = '",
",",
"round",
"(",
"stat",
",",
"3",
")",
",",
"' p = '",
",",
"round",
"(",
"prob",
",",
"3",
")",
",",
"suffix",
")",
"print",
"(",
")",
"else",
":",
"file",
"=",
"open",
"(",
"fname",
",",
"writemode",
")",
"file",
".",
"write",
"(",
"'\\n'",
"+",
"statname",
"+",
"'\\n\\n'",
")",
"file",
".",
"close",
"(",
")",
"writecc",
"(",
"lofl",
",",
"fname",
",",
"'a'",
")",
"file",
"=",
"open",
"(",
"fname",
",",
"'a'",
")",
"try",
":",
"if",
"stat",
".",
"shape",
"==",
"(",
")",
":",
"stat",
"=",
"stat",
"[",
"0",
"]",
"if",
"prob",
".",
"shape",
"==",
"(",
")",
":",
"prob",
"=",
"prob",
"[",
"0",
"]",
"except",
":",
"pass",
"file",
".",
"write",
"(",
"pstat",
".",
"list2string",
"(",
"[",
"'\\nTest statistic = '",
",",
"round",
"(",
"stat",
",",
"4",
")",
",",
"' p = '",
",",
"round",
"(",
"prob",
",",
"4",
")",
",",
"suffix",
",",
"'\\n\\n'",
"]",
")",
")",
"file",
".",
"close",
"(",
")",
"return",
"None"
] |
Prints or write to a file stats for two groups, using the name, n,
mean, sterr, min and max for each group, as well as the statistic name,
its value, and the associated p-value.
Usage: outputpairedstats(fname,writemode,
name1,n1,mean1,stderr1,min1,max1,
name2,n2,mean2,stderr2,min2,max2,
statname,stat,prob)
Returns: None
|
[
"Prints",
"or",
"write",
"to",
"a",
"file",
"stats",
"for",
"two",
"groups",
"using",
"the",
"name",
"n",
"mean",
"sterr",
"min",
"and",
"max",
"for",
"each",
"group",
"as",
"well",
"as",
"the",
"statistic",
"name",
"its",
"value",
"and",
"the",
"associated",
"p",
"-",
"value",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx_extras/stats.py#L1762-L1816
|
train
|
bxlab/bx-python
|
lib/bx/gene_reader.py
|
GeneReader
|
def GeneReader( fh, format='gff' ):
""" yield chrom, strand, gene_exons, name """
known_formats = ( 'gff', 'gtf', 'bed')
if format not in known_formats:
print('%s format not in %s' % (format, ",".join( known_formats )), file=sys.stderr)
raise Exception('?')
if format == 'bed':
for line in fh:
f = line.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
gene_exons = []
for base,offset in zip( blockStarts, blockSizes ):
exon_start = base
exon_end = base+offset
gene_exons.append( (exon_start, exon_end) )
yield chrom, strand, gene_exons, name
genelist = {}
grouplist = []
if format == 'gff' or format == 'gtf':
for line in fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if len( fields ) < 9: continue
# fields
chrom = fields[0]
ex_st = int( fields[3] ) - 1 # make zero-centered
ex_end = int( fields[4] ) #+ 1 # make exclusive
strand = fields[6]
if format == 'gtf':
group = fields[8].split(';')[0]
else:
group = fields[8]
if group not in grouplist: grouplist.append( group )
if group not in genelist:
genelist[group] = (chrom, strand, [])
exons_i = 2
genelist[group][exons_i].append( ( ex_st, ex_end ) )
sp = lambda a,b: cmp( a[0], b[0] )
#for gene in genelist.values():
for gene in grouplist:
chrom, strand, gene_exons = genelist[ gene ]
gene_exons = bitset_union( gene_exons )
yield chrom, strand, gene_exons, gene
|
python
|
def GeneReader( fh, format='gff' ):
""" yield chrom, strand, gene_exons, name """
known_formats = ( 'gff', 'gtf', 'bed')
if format not in known_formats:
print('%s format not in %s' % (format, ",".join( known_formats )), file=sys.stderr)
raise Exception('?')
if format == 'bed':
for line in fh:
f = line.strip().split()
chrom = f[0]
chrom_start = int(f[1])
name = f[4]
strand = f[5]
cdsStart = int(f[6])
cdsEnd = int(f[7])
blockCount = int(f[9])
blockSizes = [ int(i) for i in f[10].strip(',').split(',') ]
blockStarts = [ chrom_start + int(i) for i in f[11].strip(',').split(',') ]
# grab cdsStart - cdsEnd
gene_exons = []
for base,offset in zip( blockStarts, blockSizes ):
exon_start = base
exon_end = base+offset
gene_exons.append( (exon_start, exon_end) )
yield chrom, strand, gene_exons, name
genelist = {}
grouplist = []
if format == 'gff' or format == 'gtf':
for line in fh:
if line.startswith('#'): continue
fields = line.strip().split('\t')
if len( fields ) < 9: continue
# fields
chrom = fields[0]
ex_st = int( fields[3] ) - 1 # make zero-centered
ex_end = int( fields[4] ) #+ 1 # make exclusive
strand = fields[6]
if format == 'gtf':
group = fields[8].split(';')[0]
else:
group = fields[8]
if group not in grouplist: grouplist.append( group )
if group not in genelist:
genelist[group] = (chrom, strand, [])
exons_i = 2
genelist[group][exons_i].append( ( ex_st, ex_end ) )
sp = lambda a,b: cmp( a[0], b[0] )
#for gene in genelist.values():
for gene in grouplist:
chrom, strand, gene_exons = genelist[ gene ]
gene_exons = bitset_union( gene_exons )
yield chrom, strand, gene_exons, gene
|
[
"def",
"GeneReader",
"(",
"fh",
",",
"format",
"=",
"'gff'",
")",
":",
"known_formats",
"=",
"(",
"'gff'",
",",
"'gtf'",
",",
"'bed'",
")",
"if",
"format",
"not",
"in",
"known_formats",
":",
"print",
"(",
"'%s format not in %s'",
"%",
"(",
"format",
",",
"\",\"",
".",
"join",
"(",
"known_formats",
")",
")",
",",
"file",
"=",
"sys",
".",
"stderr",
")",
"raise",
"Exception",
"(",
"'?'",
")",
"if",
"format",
"==",
"'bed'",
":",
"for",
"line",
"in",
"fh",
":",
"f",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
")",
"chrom",
"=",
"f",
"[",
"0",
"]",
"chrom_start",
"=",
"int",
"(",
"f",
"[",
"1",
"]",
")",
"name",
"=",
"f",
"[",
"4",
"]",
"strand",
"=",
"f",
"[",
"5",
"]",
"cdsStart",
"=",
"int",
"(",
"f",
"[",
"6",
"]",
")",
"cdsEnd",
"=",
"int",
"(",
"f",
"[",
"7",
"]",
")",
"blockCount",
"=",
"int",
"(",
"f",
"[",
"9",
"]",
")",
"blockSizes",
"=",
"[",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"f",
"[",
"10",
"]",
".",
"strip",
"(",
"','",
")",
".",
"split",
"(",
"','",
")",
"]",
"blockStarts",
"=",
"[",
"chrom_start",
"+",
"int",
"(",
"i",
")",
"for",
"i",
"in",
"f",
"[",
"11",
"]",
".",
"strip",
"(",
"','",
")",
".",
"split",
"(",
"','",
")",
"]",
"# grab cdsStart - cdsEnd",
"gene_exons",
"=",
"[",
"]",
"for",
"base",
",",
"offset",
"in",
"zip",
"(",
"blockStarts",
",",
"blockSizes",
")",
":",
"exon_start",
"=",
"base",
"exon_end",
"=",
"base",
"+",
"offset",
"gene_exons",
".",
"append",
"(",
"(",
"exon_start",
",",
"exon_end",
")",
")",
"yield",
"chrom",
",",
"strand",
",",
"gene_exons",
",",
"name",
"genelist",
"=",
"{",
"}",
"grouplist",
"=",
"[",
"]",
"if",
"format",
"==",
"'gff'",
"or",
"format",
"==",
"'gtf'",
":",
"for",
"line",
"in",
"fh",
":",
"if",
"line",
".",
"startswith",
"(",
"'#'",
")",
":",
"continue",
"fields",
"=",
"line",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'\\t'",
")",
"if",
"len",
"(",
"fields",
")",
"<",
"9",
":",
"continue",
"# fields",
"chrom",
"=",
"fields",
"[",
"0",
"]",
"ex_st",
"=",
"int",
"(",
"fields",
"[",
"3",
"]",
")",
"-",
"1",
"# make zero-centered",
"ex_end",
"=",
"int",
"(",
"fields",
"[",
"4",
"]",
")",
"#+ 1 # make exclusive",
"strand",
"=",
"fields",
"[",
"6",
"]",
"if",
"format",
"==",
"'gtf'",
":",
"group",
"=",
"fields",
"[",
"8",
"]",
".",
"split",
"(",
"';'",
")",
"[",
"0",
"]",
"else",
":",
"group",
"=",
"fields",
"[",
"8",
"]",
"if",
"group",
"not",
"in",
"grouplist",
":",
"grouplist",
".",
"append",
"(",
"group",
")",
"if",
"group",
"not",
"in",
"genelist",
":",
"genelist",
"[",
"group",
"]",
"=",
"(",
"chrom",
",",
"strand",
",",
"[",
"]",
")",
"exons_i",
"=",
"2",
"genelist",
"[",
"group",
"]",
"[",
"exons_i",
"]",
".",
"append",
"(",
"(",
"ex_st",
",",
"ex_end",
")",
")",
"sp",
"=",
"lambda",
"a",
",",
"b",
":",
"cmp",
"(",
"a",
"[",
"0",
"]",
",",
"b",
"[",
"0",
"]",
")",
"#for gene in genelist.values():",
"for",
"gene",
"in",
"grouplist",
":",
"chrom",
",",
"strand",
",",
"gene_exons",
"=",
"genelist",
"[",
"gene",
"]",
"gene_exons",
"=",
"bitset_union",
"(",
"gene_exons",
")",
"yield",
"chrom",
",",
"strand",
",",
"gene_exons",
",",
"gene"
] |
yield chrom, strand, gene_exons, name
|
[
"yield",
"chrom",
"strand",
"gene_exons",
"name"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/gene_reader.py#L19-L79
|
train
|
bxlab/bx-python
|
lib/bx/seq/seq.py
|
SeqFile.get
|
def get(self, start, length):
"""
Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated.
"""
# Check parameters
assert length >= 0, "Length must be non-negative (got %d)" % length
assert start >= 0,"Start must be greater than 0 (got %d)" % start
assert start + length <= self.length, \
"Interval beyond end of sequence (%s..%s > %s)" % ( start, start + length, self.length )
# Fetch sequence and reverse complement if necesary
if not self.revcomp:
return self.raw_fetch( start, length )
if self.revcomp == "-3'":
return self.reverse_complement(self.raw_fetch(start,length))
assert self.revcomp == "-5'", "unrecognized reverse complement scheme"
start = self.length - (start+length)
return self.reverse_complement(self.raw_fetch(start,length))
|
python
|
def get(self, start, length):
"""
Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated.
"""
# Check parameters
assert length >= 0, "Length must be non-negative (got %d)" % length
assert start >= 0,"Start must be greater than 0 (got %d)" % start
assert start + length <= self.length, \
"Interval beyond end of sequence (%s..%s > %s)" % ( start, start + length, self.length )
# Fetch sequence and reverse complement if necesary
if not self.revcomp:
return self.raw_fetch( start, length )
if self.revcomp == "-3'":
return self.reverse_complement(self.raw_fetch(start,length))
assert self.revcomp == "-5'", "unrecognized reverse complement scheme"
start = self.length - (start+length)
return self.reverse_complement(self.raw_fetch(start,length))
|
[
"def",
"get",
"(",
"self",
",",
"start",
",",
"length",
")",
":",
"# Check parameters",
"assert",
"length",
">=",
"0",
",",
"\"Length must be non-negative (got %d)\"",
"%",
"length",
"assert",
"start",
">=",
"0",
",",
"\"Start must be greater than 0 (got %d)\"",
"%",
"start",
"assert",
"start",
"+",
"length",
"<=",
"self",
".",
"length",
",",
"\"Interval beyond end of sequence (%s..%s > %s)\"",
"%",
"(",
"start",
",",
"start",
"+",
"length",
",",
"self",
".",
"length",
")",
"# Fetch sequence and reverse complement if necesary",
"if",
"not",
"self",
".",
"revcomp",
":",
"return",
"self",
".",
"raw_fetch",
"(",
"start",
",",
"length",
")",
"if",
"self",
".",
"revcomp",
"==",
"\"-3'\"",
":",
"return",
"self",
".",
"reverse_complement",
"(",
"self",
".",
"raw_fetch",
"(",
"start",
",",
"length",
")",
")",
"assert",
"self",
".",
"revcomp",
"==",
"\"-5'\"",
",",
"\"unrecognized reverse complement scheme\"",
"start",
"=",
"self",
".",
"length",
"-",
"(",
"start",
"+",
"length",
")",
"return",
"self",
".",
"reverse_complement",
"(",
"self",
".",
"raw_fetch",
"(",
"start",
",",
"length",
")",
")"
] |
Fetch subsequence starting at position `start` with length `length`.
This method is picky about parameters, the requested interval must
have non-negative length and fit entirely inside the NIB sequence,
the returned string will contain exactly 'length' characters, or an
AssertionError will be generated.
|
[
"Fetch",
"subsequence",
"starting",
"at",
"position",
"start",
"with",
"length",
"length",
".",
"This",
"method",
"is",
"picky",
"about",
"parameters",
"the",
"requested",
"interval",
"must",
"have",
"non",
"-",
"negative",
"length",
"and",
"fit",
"entirely",
"inside",
"the",
"NIB",
"sequence",
"the",
"returned",
"string",
"will",
"contain",
"exactly",
"length",
"characters",
"or",
"an",
"AssertionError",
"will",
"be",
"generated",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/seq/seq.py#L74-L94
|
train
|
bxlab/bx-python
|
lib/bx/align/score.py
|
read_scoring_scheme
|
def read_scoring_scheme( f, gap_open, gap_extend, gap1="-", gap2=None, **kwargs ):
"""
Initialize scoring scheme from a file containint a blastz style text blob.
f can be either a file or the name of a file.
"""
close_it = False
if (type(f) == str):
f = file(f,"rt")
close_it = True
ss = build_scoring_scheme("".join([line for line in f]),gap_open, gap_extend, gap1=gap1, gap2=gap2, **kwargs)
if (close_it):
f.close()
return ss
|
python
|
def read_scoring_scheme( f, gap_open, gap_extend, gap1="-", gap2=None, **kwargs ):
"""
Initialize scoring scheme from a file containint a blastz style text blob.
f can be either a file or the name of a file.
"""
close_it = False
if (type(f) == str):
f = file(f,"rt")
close_it = True
ss = build_scoring_scheme("".join([line for line in f]),gap_open, gap_extend, gap1=gap1, gap2=gap2, **kwargs)
if (close_it):
f.close()
return ss
|
[
"def",
"read_scoring_scheme",
"(",
"f",
",",
"gap_open",
",",
"gap_extend",
",",
"gap1",
"=",
"\"-\"",
",",
"gap2",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"close_it",
"=",
"False",
"if",
"(",
"type",
"(",
"f",
")",
"==",
"str",
")",
":",
"f",
"=",
"file",
"(",
"f",
",",
"\"rt\"",
")",
"close_it",
"=",
"True",
"ss",
"=",
"build_scoring_scheme",
"(",
"\"\"",
".",
"join",
"(",
"[",
"line",
"for",
"line",
"in",
"f",
"]",
")",
",",
"gap_open",
",",
"gap_extend",
",",
"gap1",
"=",
"gap1",
",",
"gap2",
"=",
"gap2",
",",
"*",
"*",
"kwargs",
")",
"if",
"(",
"close_it",
")",
":",
"f",
".",
"close",
"(",
")",
"return",
"ss"
] |
Initialize scoring scheme from a file containint a blastz style text blob.
f can be either a file or the name of a file.
|
[
"Initialize",
"scoring",
"scheme",
"from",
"a",
"file",
"containint",
"a",
"blastz",
"style",
"text",
"blob",
".",
"f",
"can",
"be",
"either",
"a",
"file",
"or",
"the",
"name",
"of",
"a",
"file",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/score.py#L91-L103
|
train
|
bxlab/bx-python
|
lib/bx/align/core.py
|
shuffle_columns
|
def shuffle_columns( a ):
"""Randomize the columns of an alignment"""
mask = range( a.text_size )
random.shuffle( mask )
for c in a.components:
c.text = ''.join( [ c.text[i] for i in mask ] )
|
python
|
def shuffle_columns( a ):
"""Randomize the columns of an alignment"""
mask = range( a.text_size )
random.shuffle( mask )
for c in a.components:
c.text = ''.join( [ c.text[i] for i in mask ] )
|
[
"def",
"shuffle_columns",
"(",
"a",
")",
":",
"mask",
"=",
"range",
"(",
"a",
".",
"text_size",
")",
"random",
".",
"shuffle",
"(",
"mask",
")",
"for",
"c",
"in",
"a",
".",
"components",
":",
"c",
".",
"text",
"=",
"''",
".",
"join",
"(",
"[",
"c",
".",
"text",
"[",
"i",
"]",
"for",
"i",
"in",
"mask",
"]",
")"
] |
Randomize the columns of an alignment
|
[
"Randomize",
"the",
"columns",
"of",
"an",
"alignment"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/core.py#L403-L408
|
train
|
bxlab/bx-python
|
lib/bx/align/core.py
|
Alignment.slice_by_component
|
def slice_by_component( self, component_index, start, end ):
"""
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
"""
if type( component_index ) == type( 0 ):
ref = self.components[ component_index ]
elif type( component_index ) == type( "" ):
ref = self.get_component_by_src( component_index )
elif type( component_index ) == Component:
ref = component_index
else:
raise ValueError( "can't figure out what to do" )
start_col = ref.coord_to_col( start )
end_col = ref.coord_to_col( end )
if (ref.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col )
|
python
|
def slice_by_component( self, component_index, start, end ):
"""
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
"""
if type( component_index ) == type( 0 ):
ref = self.components[ component_index ]
elif type( component_index ) == type( "" ):
ref = self.get_component_by_src( component_index )
elif type( component_index ) == Component:
ref = component_index
else:
raise ValueError( "can't figure out what to do" )
start_col = ref.coord_to_col( start )
end_col = ref.coord_to_col( end )
if (ref.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col )
|
[
"def",
"slice_by_component",
"(",
"self",
",",
"component_index",
",",
"start",
",",
"end",
")",
":",
"if",
"type",
"(",
"component_index",
")",
"==",
"type",
"(",
"0",
")",
":",
"ref",
"=",
"self",
".",
"components",
"[",
"component_index",
"]",
"elif",
"type",
"(",
"component_index",
")",
"==",
"type",
"(",
"\"\"",
")",
":",
"ref",
"=",
"self",
".",
"get_component_by_src",
"(",
"component_index",
")",
"elif",
"type",
"(",
"component_index",
")",
"==",
"Component",
":",
"ref",
"=",
"component_index",
"else",
":",
"raise",
"ValueError",
"(",
"\"can't figure out what to do\"",
")",
"start_col",
"=",
"ref",
".",
"coord_to_col",
"(",
"start",
")",
"end_col",
"=",
"ref",
".",
"coord_to_col",
"(",
"end",
")",
"if",
"(",
"ref",
".",
"strand",
"==",
"'-'",
")",
":",
"(",
"start_col",
",",
"end_col",
")",
"=",
"(",
"end_col",
",",
"start_col",
")",
"return",
"self",
".",
"slice",
"(",
"start_col",
",",
"end_col",
")"
] |
Return a slice of the alignment, corresponding to an coordinate interval in a specific component.
component_index is one of
an integer offset into the components list
a string indicating the src of the desired component
a component
start and end are relative to the + strand, regardless of the component's strand.
|
[
"Return",
"a",
"slice",
"of",
"the",
"alignment",
"corresponding",
"to",
"an",
"coordinate",
"interval",
"in",
"a",
"specific",
"component",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/core.py#L122-L146
|
train
|
bxlab/bx-python
|
lib/bx/align/core.py
|
Alignment.remove_all_gap_columns
|
def remove_all_gap_columns( self ):
"""
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
"""
seqs = []
for c in self.components:
try:
seqs.append( list( c.text ) )
except TypeError:
seqs.append( None )
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None: continue
if seq[i] != '-': all_gap = False
if all_gap:
for seq in seqs:
if seq is None: continue
del seq[i]
text_size -= 1
else:
i += 1
for i in range( len( self.components ) ):
if seqs[i] is None: continue
self.components[i].text = ''.join( seqs[i] )
self.text_size = text_size
|
python
|
def remove_all_gap_columns( self ):
"""
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
"""
seqs = []
for c in self.components:
try:
seqs.append( list( c.text ) )
except TypeError:
seqs.append( None )
i = 0
text_size = self.text_size
while i < text_size:
all_gap = True
for seq in seqs:
if seq is None: continue
if seq[i] != '-': all_gap = False
if all_gap:
for seq in seqs:
if seq is None: continue
del seq[i]
text_size -= 1
else:
i += 1
for i in range( len( self.components ) ):
if seqs[i] is None: continue
self.components[i].text = ''.join( seqs[i] )
self.text_size = text_size
|
[
"def",
"remove_all_gap_columns",
"(",
"self",
")",
":",
"seqs",
"=",
"[",
"]",
"for",
"c",
"in",
"self",
".",
"components",
":",
"try",
":",
"seqs",
".",
"append",
"(",
"list",
"(",
"c",
".",
"text",
")",
")",
"except",
"TypeError",
":",
"seqs",
".",
"append",
"(",
"None",
")",
"i",
"=",
"0",
"text_size",
"=",
"self",
".",
"text_size",
"while",
"i",
"<",
"text_size",
":",
"all_gap",
"=",
"True",
"for",
"seq",
"in",
"seqs",
":",
"if",
"seq",
"is",
"None",
":",
"continue",
"if",
"seq",
"[",
"i",
"]",
"!=",
"'-'",
":",
"all_gap",
"=",
"False",
"if",
"all_gap",
":",
"for",
"seq",
"in",
"seqs",
":",
"if",
"seq",
"is",
"None",
":",
"continue",
"del",
"seq",
"[",
"i",
"]",
"text_size",
"-=",
"1",
"else",
":",
"i",
"+=",
"1",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"components",
")",
")",
":",
"if",
"seqs",
"[",
"i",
"]",
"is",
"None",
":",
"continue",
"self",
".",
"components",
"[",
"i",
"]",
".",
"text",
"=",
"''",
".",
"join",
"(",
"seqs",
"[",
"i",
"]",
")",
"self",
".",
"text_size",
"=",
"text_size"
] |
Remove any columns containing only gaps from alignment components,
text of components is modified IN PLACE.
|
[
"Remove",
"any",
"columns",
"containing",
"only",
"gaps",
"from",
"alignment",
"components",
"text",
"of",
"components",
"is",
"modified",
"IN",
"PLACE",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/core.py#L160-L188
|
train
|
bxlab/bx-python
|
lib/bx/align/core.py
|
Component.slice_by_coord
|
def slice_by_coord( self, start, end ):
"""
Return the slice of the component corresponding to a coordinate interval.
start and end are relative to the + strand, regardless of the component's strand.
"""
start_col = self.coord_to_col( start )
end_col = self.coord_to_col( end )
if (self.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col )
|
python
|
def slice_by_coord( self, start, end ):
"""
Return the slice of the component corresponding to a coordinate interval.
start and end are relative to the + strand, regardless of the component's strand.
"""
start_col = self.coord_to_col( start )
end_col = self.coord_to_col( end )
if (self.strand == '-'):
(start_col,end_col) = (end_col,start_col)
return self.slice( start_col, end_col )
|
[
"def",
"slice_by_coord",
"(",
"self",
",",
"start",
",",
"end",
")",
":",
"start_col",
"=",
"self",
".",
"coord_to_col",
"(",
"start",
")",
"end_col",
"=",
"self",
".",
"coord_to_col",
"(",
"end",
")",
"if",
"(",
"self",
".",
"strand",
"==",
"'-'",
")",
":",
"(",
"start_col",
",",
"end_col",
")",
"=",
"(",
"end_col",
",",
"start_col",
")",
"return",
"self",
".",
"slice",
"(",
"start_col",
",",
"end_col",
")"
] |
Return the slice of the component corresponding to a coordinate interval.
start and end are relative to the + strand, regardless of the component's strand.
|
[
"Return",
"the",
"slice",
"of",
"the",
"component",
"corresponding",
"to",
"a",
"coordinate",
"interval",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/core.py#L307-L318
|
train
|
bxlab/bx-python
|
lib/bx/align/core.py
|
Component.coord_to_col
|
def coord_to_col( self, pos ):
"""
Return the alignment column index corresponding to coordinate pos.
pos is relative to the + strand, regardless of the component's strand.
"""
start,end = self.get_forward_strand_start(),self.get_forward_strand_end()
if pos < start or pos > end:
raise ValueError("Range error: %d not in %d-%d" % ( pos, start, end ))
if not self.index:
self.index = list()
if (self.strand == '-'):
# nota bene: for - strand self.index[x] maps to one column
# higher than is actually associated with the position; thus
# when slice_by_component() and slice_by_coord() flip the ends,
# the resulting slice is correct
for x in range( len(self.text)-1,-1,-1 ):
if not self.text[x] == '-':
self.index.append( x + 1 )
self.index.append( 0 )
else:
for x in range( len(self.text) ):
if not self.text[x] == '-':
self.index.append(x)
self.index.append( len(self.text) )
x = None
try:
x = self.index[ pos - start ]
except:
raise Exception("Error in index.")
return x
|
python
|
def coord_to_col( self, pos ):
"""
Return the alignment column index corresponding to coordinate pos.
pos is relative to the + strand, regardless of the component's strand.
"""
start,end = self.get_forward_strand_start(),self.get_forward_strand_end()
if pos < start or pos > end:
raise ValueError("Range error: %d not in %d-%d" % ( pos, start, end ))
if not self.index:
self.index = list()
if (self.strand == '-'):
# nota bene: for - strand self.index[x] maps to one column
# higher than is actually associated with the position; thus
# when slice_by_component() and slice_by_coord() flip the ends,
# the resulting slice is correct
for x in range( len(self.text)-1,-1,-1 ):
if not self.text[x] == '-':
self.index.append( x + 1 )
self.index.append( 0 )
else:
for x in range( len(self.text) ):
if not self.text[x] == '-':
self.index.append(x)
self.index.append( len(self.text) )
x = None
try:
x = self.index[ pos - start ]
except:
raise Exception("Error in index.")
return x
|
[
"def",
"coord_to_col",
"(",
"self",
",",
"pos",
")",
":",
"start",
",",
"end",
"=",
"self",
".",
"get_forward_strand_start",
"(",
")",
",",
"self",
".",
"get_forward_strand_end",
"(",
")",
"if",
"pos",
"<",
"start",
"or",
"pos",
">",
"end",
":",
"raise",
"ValueError",
"(",
"\"Range error: %d not in %d-%d\"",
"%",
"(",
"pos",
",",
"start",
",",
"end",
")",
")",
"if",
"not",
"self",
".",
"index",
":",
"self",
".",
"index",
"=",
"list",
"(",
")",
"if",
"(",
"self",
".",
"strand",
"==",
"'-'",
")",
":",
"# nota bene: for - strand self.index[x] maps to one column",
"# higher than is actually associated with the position; thus",
"# when slice_by_component() and slice_by_coord() flip the ends,",
"# the resulting slice is correct",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"text",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"if",
"not",
"self",
".",
"text",
"[",
"x",
"]",
"==",
"'-'",
":",
"self",
".",
"index",
".",
"append",
"(",
"x",
"+",
"1",
")",
"self",
".",
"index",
".",
"append",
"(",
"0",
")",
"else",
":",
"for",
"x",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"text",
")",
")",
":",
"if",
"not",
"self",
".",
"text",
"[",
"x",
"]",
"==",
"'-'",
":",
"self",
".",
"index",
".",
"append",
"(",
"x",
")",
"self",
".",
"index",
".",
"append",
"(",
"len",
"(",
"self",
".",
"text",
")",
")",
"x",
"=",
"None",
"try",
":",
"x",
"=",
"self",
".",
"index",
"[",
"pos",
"-",
"start",
"]",
"except",
":",
"raise",
"Exception",
"(",
"\"Error in index.\"",
")",
"return",
"x"
] |
Return the alignment column index corresponding to coordinate pos.
pos is relative to the + strand, regardless of the component's strand.
|
[
"Return",
"the",
"alignment",
"column",
"index",
"corresponding",
"to",
"coordinate",
"pos",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/core.py#L320-L351
|
train
|
bxlab/bx-python
|
lib/bx/align/tools/thread.py
|
get_components_for_species
|
def get_components_for_species( alignment, species ):
"""Return the component for each species in the list `species` or None"""
# If the number of components in the alignment is less that the requested number
# of species we can immediately fail
if len( alignment.components ) < len( species ): return None
# Otherwise, build an index of components by species, then lookup
index = dict( [ ( c.src.split( '.' )[0], c ) for c in alignment.components ] )
try: return [ index[s] for s in species ]
except: return None
|
python
|
def get_components_for_species( alignment, species ):
"""Return the component for each species in the list `species` or None"""
# If the number of components in the alignment is less that the requested number
# of species we can immediately fail
if len( alignment.components ) < len( species ): return None
# Otherwise, build an index of components by species, then lookup
index = dict( [ ( c.src.split( '.' )[0], c ) for c in alignment.components ] )
try: return [ index[s] for s in species ]
except: return None
|
[
"def",
"get_components_for_species",
"(",
"alignment",
",",
"species",
")",
":",
"# If the number of components in the alignment is less that the requested number",
"# of species we can immediately fail",
"if",
"len",
"(",
"alignment",
".",
"components",
")",
"<",
"len",
"(",
"species",
")",
":",
"return",
"None",
"# Otherwise, build an index of components by species, then lookup ",
"index",
"=",
"dict",
"(",
"[",
"(",
"c",
".",
"src",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
",",
"c",
")",
"for",
"c",
"in",
"alignment",
".",
"components",
"]",
")",
"try",
":",
"return",
"[",
"index",
"[",
"s",
"]",
"for",
"s",
"in",
"species",
"]",
"except",
":",
"return",
"None"
] |
Return the component for each species in the list `species` or None
|
[
"Return",
"the",
"component",
"for",
"each",
"species",
"in",
"the",
"list",
"species",
"or",
"None"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/tools/thread.py#L68-L76
|
train
|
bxlab/bx-python
|
lib/bx/align/maf.py
|
read_next_maf
|
def read_next_maf( file, species_to_lengths=None, parse_e_rows=False ):
"""
Read the next MAF block from `file` and return as an `Alignment`
instance. If `parse_i_rows` is true, empty components will be created
when e rows are encountered.
"""
alignment = Alignment(species_to_lengths=species_to_lengths)
# Attributes line
line = readline( file, skip_blank=True )
if not line: return None
fields = line.split()
if fields[0] != 'a': raise Exception("Expected 'a ...' line")
alignment.attributes = parse_attributes( fields[1:] )
if 'score' in alignment.attributes:
alignment.score = alignment.attributes['score']
del alignment.attributes['score']
else:
alignment.score = 0
# Sequence lines
last_component = None
while 1:
line = readline( file )
# EOF or Blank line terminates alignment components
if not line or line.isspace(): break
if line.isspace(): break
# Parse row
fields = line.split()
if fields[0] == 's':
# An 's' row contains sequence for a component
component = Component()
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
if len(fields) > 6: component.text = fields[6].strip()
# Add to set
alignment.add_component( component )
last_component = component
elif fields[0] == 'e':
# An 'e' row, when no bases align for a given species this tells
# us something about the synteny
if parse_e_rows:
component = Component()
component.empty = True
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
component.text = None
synteny = fields[6].strip()
assert len( synteny ) == 1, \
"Synteny status in 'e' rows should be denoted with a single character code"
component.synteny_empty = synteny
alignment.add_component( component )
last_component = component
elif fields[0] == 'i':
# An 'i' row, indicates left and right synteny status for the
# previous component, we hope ;)
assert fields[1] == last_component.src, "'i' row does not follow matching 's' row"
last_component.synteny_left = ( fields[2], int( fields[3] ) )
last_component.synteny_right = ( fields[4], int( fields[5] ) )
elif fields[0] == 'q':
assert fields[1] == last_component.src, "'q' row does not follow matching 's' row"
# TODO: Should convert this to an integer array?
last_component.quality = fields[2]
return alignment
|
python
|
def read_next_maf( file, species_to_lengths=None, parse_e_rows=False ):
"""
Read the next MAF block from `file` and return as an `Alignment`
instance. If `parse_i_rows` is true, empty components will be created
when e rows are encountered.
"""
alignment = Alignment(species_to_lengths=species_to_lengths)
# Attributes line
line = readline( file, skip_blank=True )
if not line: return None
fields = line.split()
if fields[0] != 'a': raise Exception("Expected 'a ...' line")
alignment.attributes = parse_attributes( fields[1:] )
if 'score' in alignment.attributes:
alignment.score = alignment.attributes['score']
del alignment.attributes['score']
else:
alignment.score = 0
# Sequence lines
last_component = None
while 1:
line = readline( file )
# EOF or Blank line terminates alignment components
if not line or line.isspace(): break
if line.isspace(): break
# Parse row
fields = line.split()
if fields[0] == 's':
# An 's' row contains sequence for a component
component = Component()
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
if len(fields) > 6: component.text = fields[6].strip()
# Add to set
alignment.add_component( component )
last_component = component
elif fields[0] == 'e':
# An 'e' row, when no bases align for a given species this tells
# us something about the synteny
if parse_e_rows:
component = Component()
component.empty = True
component.src = fields[1]
component.start = int( fields[2] )
component.size = int( fields[3] )
component.strand = fields[4]
component.src_size = int( fields[5] )
component.text = None
synteny = fields[6].strip()
assert len( synteny ) == 1, \
"Synteny status in 'e' rows should be denoted with a single character code"
component.synteny_empty = synteny
alignment.add_component( component )
last_component = component
elif fields[0] == 'i':
# An 'i' row, indicates left and right synteny status for the
# previous component, we hope ;)
assert fields[1] == last_component.src, "'i' row does not follow matching 's' row"
last_component.synteny_left = ( fields[2], int( fields[3] ) )
last_component.synteny_right = ( fields[4], int( fields[5] ) )
elif fields[0] == 'q':
assert fields[1] == last_component.src, "'q' row does not follow matching 's' row"
# TODO: Should convert this to an integer array?
last_component.quality = fields[2]
return alignment
|
[
"def",
"read_next_maf",
"(",
"file",
",",
"species_to_lengths",
"=",
"None",
",",
"parse_e_rows",
"=",
"False",
")",
":",
"alignment",
"=",
"Alignment",
"(",
"species_to_lengths",
"=",
"species_to_lengths",
")",
"# Attributes line",
"line",
"=",
"readline",
"(",
"file",
",",
"skip_blank",
"=",
"True",
")",
"if",
"not",
"line",
":",
"return",
"None",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"if",
"fields",
"[",
"0",
"]",
"!=",
"'a'",
":",
"raise",
"Exception",
"(",
"\"Expected 'a ...' line\"",
")",
"alignment",
".",
"attributes",
"=",
"parse_attributes",
"(",
"fields",
"[",
"1",
":",
"]",
")",
"if",
"'score'",
"in",
"alignment",
".",
"attributes",
":",
"alignment",
".",
"score",
"=",
"alignment",
".",
"attributes",
"[",
"'score'",
"]",
"del",
"alignment",
".",
"attributes",
"[",
"'score'",
"]",
"else",
":",
"alignment",
".",
"score",
"=",
"0",
"# Sequence lines",
"last_component",
"=",
"None",
"while",
"1",
":",
"line",
"=",
"readline",
"(",
"file",
")",
"# EOF or Blank line terminates alignment components",
"if",
"not",
"line",
"or",
"line",
".",
"isspace",
"(",
")",
":",
"break",
"if",
"line",
".",
"isspace",
"(",
")",
":",
"break",
"# Parse row",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"if",
"fields",
"[",
"0",
"]",
"==",
"'s'",
":",
"# An 's' row contains sequence for a component",
"component",
"=",
"Component",
"(",
")",
"component",
".",
"src",
"=",
"fields",
"[",
"1",
"]",
"component",
".",
"start",
"=",
"int",
"(",
"fields",
"[",
"2",
"]",
")",
"component",
".",
"size",
"=",
"int",
"(",
"fields",
"[",
"3",
"]",
")",
"component",
".",
"strand",
"=",
"fields",
"[",
"4",
"]",
"component",
".",
"src_size",
"=",
"int",
"(",
"fields",
"[",
"5",
"]",
")",
"if",
"len",
"(",
"fields",
")",
">",
"6",
":",
"component",
".",
"text",
"=",
"fields",
"[",
"6",
"]",
".",
"strip",
"(",
")",
"# Add to set",
"alignment",
".",
"add_component",
"(",
"component",
")",
"last_component",
"=",
"component",
"elif",
"fields",
"[",
"0",
"]",
"==",
"'e'",
":",
"# An 'e' row, when no bases align for a given species this tells",
"# us something about the synteny ",
"if",
"parse_e_rows",
":",
"component",
"=",
"Component",
"(",
")",
"component",
".",
"empty",
"=",
"True",
"component",
".",
"src",
"=",
"fields",
"[",
"1",
"]",
"component",
".",
"start",
"=",
"int",
"(",
"fields",
"[",
"2",
"]",
")",
"component",
".",
"size",
"=",
"int",
"(",
"fields",
"[",
"3",
"]",
")",
"component",
".",
"strand",
"=",
"fields",
"[",
"4",
"]",
"component",
".",
"src_size",
"=",
"int",
"(",
"fields",
"[",
"5",
"]",
")",
"component",
".",
"text",
"=",
"None",
"synteny",
"=",
"fields",
"[",
"6",
"]",
".",
"strip",
"(",
")",
"assert",
"len",
"(",
"synteny",
")",
"==",
"1",
",",
"\"Synteny status in 'e' rows should be denoted with a single character code\"",
"component",
".",
"synteny_empty",
"=",
"synteny",
"alignment",
".",
"add_component",
"(",
"component",
")",
"last_component",
"=",
"component",
"elif",
"fields",
"[",
"0",
"]",
"==",
"'i'",
":",
"# An 'i' row, indicates left and right synteny status for the ",
"# previous component, we hope ;)",
"assert",
"fields",
"[",
"1",
"]",
"==",
"last_component",
".",
"src",
",",
"\"'i' row does not follow matching 's' row\"",
"last_component",
".",
"synteny_left",
"=",
"(",
"fields",
"[",
"2",
"]",
",",
"int",
"(",
"fields",
"[",
"3",
"]",
")",
")",
"last_component",
".",
"synteny_right",
"=",
"(",
"fields",
"[",
"4",
"]",
",",
"int",
"(",
"fields",
"[",
"5",
"]",
")",
")",
"elif",
"fields",
"[",
"0",
"]",
"==",
"'q'",
":",
"assert",
"fields",
"[",
"1",
"]",
"==",
"last_component",
".",
"src",
",",
"\"'q' row does not follow matching 's' row\"",
"# TODO: Should convert this to an integer array?",
"last_component",
".",
"quality",
"=",
"fields",
"[",
"2",
"]",
"return",
"alignment"
] |
Read the next MAF block from `file` and return as an `Alignment`
instance. If `parse_i_rows` is true, empty components will be created
when e rows are encountered.
|
[
"Read",
"the",
"next",
"MAF",
"block",
"from",
"file",
"and",
"return",
"as",
"an",
"Alignment",
"instance",
".",
"If",
"parse_i_rows",
"is",
"true",
"empty",
"components",
"will",
"be",
"created",
"when",
"e",
"rows",
"are",
"encountered",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/maf.py#L133-L201
|
train
|
bxlab/bx-python
|
lib/bx/align/maf.py
|
readline
|
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
python
|
def readline( file, skip_blank=False ):
"""Read a line from provided file, skipping any blank or comment lines"""
while 1:
line = file.readline()
#print "every line: %r" % line
if not line: return None
if line[0] != '#' and not ( skip_blank and line.isspace() ):
return line
|
[
"def",
"readline",
"(",
"file",
",",
"skip_blank",
"=",
"False",
")",
":",
"while",
"1",
":",
"line",
"=",
"file",
".",
"readline",
"(",
")",
"#print \"every line: %r\" % line",
"if",
"not",
"line",
":",
"return",
"None",
"if",
"line",
"[",
"0",
"]",
"!=",
"'#'",
"and",
"not",
"(",
"skip_blank",
"and",
"line",
".",
"isspace",
"(",
")",
")",
":",
"return",
"line"
] |
Read a line from provided file, skipping any blank or comment lines
|
[
"Read",
"a",
"line",
"from",
"provided",
"file",
"skipping",
"any",
"blank",
"or",
"comment",
"lines"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/maf.py#L203-L210
|
train
|
bxlab/bx-python
|
lib/bx/align/maf.py
|
parse_attributes
|
def parse_attributes( fields ):
"""Parse list of key=value strings into a dict"""
attributes = {}
for field in fields:
pair = field.split( '=' )
attributes[ pair[0] ] = pair[1]
return attributes
|
python
|
def parse_attributes( fields ):
"""Parse list of key=value strings into a dict"""
attributes = {}
for field in fields:
pair = field.split( '=' )
attributes[ pair[0] ] = pair[1]
return attributes
|
[
"def",
"parse_attributes",
"(",
"fields",
")",
":",
"attributes",
"=",
"{",
"}",
"for",
"field",
"in",
"fields",
":",
"pair",
"=",
"field",
".",
"split",
"(",
"'='",
")",
"attributes",
"[",
"pair",
"[",
"0",
"]",
"]",
"=",
"pair",
"[",
"1",
"]",
"return",
"attributes"
] |
Parse list of key=value strings into a dict
|
[
"Parse",
"list",
"of",
"key",
"=",
"value",
"strings",
"into",
"a",
"dict"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/maf.py#L212-L218
|
train
|
bxlab/bx-python
|
lib/bx/motif/io/transfac.py
|
TransfacReader.as_dict
|
def as_dict( self, key="id" ):
"""
Return a dictionary containing all remaining motifs, using `key`
as the dictionary key.
"""
rval = {}
for motif in self:
rval[ getattr( motif, key ) ] = motif
return rval
|
python
|
def as_dict( self, key="id" ):
"""
Return a dictionary containing all remaining motifs, using `key`
as the dictionary key.
"""
rval = {}
for motif in self:
rval[ getattr( motif, key ) ] = motif
return rval
|
[
"def",
"as_dict",
"(",
"self",
",",
"key",
"=",
"\"id\"",
")",
":",
"rval",
"=",
"{",
"}",
"for",
"motif",
"in",
"self",
":",
"rval",
"[",
"getattr",
"(",
"motif",
",",
"key",
")",
"]",
"=",
"motif",
"return",
"rval"
] |
Return a dictionary containing all remaining motifs, using `key`
as the dictionary key.
|
[
"Return",
"a",
"dictionary",
"containing",
"all",
"remaining",
"motifs",
"using",
"key",
"as",
"the",
"dictionary",
"key",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/motif/io/transfac.py#L52-L60
|
train
|
bxlab/bx-python
|
lib/bx/motif/io/transfac.py
|
TransfacReader.parse_record
|
def parse_record( self, lines ):
"""
Parse a TRANSFAC record out of `lines` and return a motif.
"""
# Break lines up
temp_lines = []
for line in lines:
fields = line.rstrip( "\r\n" ).split( None, 1 )
if len( fields ) == 1:
fields.append( "" )
temp_lines.append( fields )
lines = temp_lines
# Fill in motif from lines
motif = TransfacMotif()
current_line = 0
while 1:
# Done parsing if no more lines to consume
if current_line >= len( lines ):
break
# Remove prefix and first separator from line
prefix, rest = lines[ current_line ]
# No action for this prefix, just ignore the line
if prefix not in self.parse_actions:
current_line += 1
continue
# Get action for line
action = self.parse_actions[ prefix ]
# Store a single line value
if action[0] == "store_single":
key = action[1]
setattr( motif, key, rest )
current_line += 1
# Add a single line value to a list
if action[0] == "store_single_list":
key = action[1]
if not getattr( motif, key ):
setattr( motif, key, [] )
getattr( motif, key ).append( rest )
current_line += 1
# Add a single line value to a dictionary
if action[0] == "store_single_key_value":
key = action[1]
k, v = rest.strip().split( '=', 1 )
if not getattr( motif, key ):
setattr( motif, key, {} )
getattr( motif, key )[k] = v
current_line += 1
# Store a block of text
if action[0] == "store_block":
key = action[1]
value = []
while current_line < len( lines ) and lines[ current_line ][0] == prefix:
value.append( lines[current_line][1] )
current_line += 1
setattr( motif, key, str.join( "\n", value ) )
# Store a matrix
if action[0] == "store_matrix":
# First line is alphabet
alphabet = rest.split()
alphabet_size = len( alphabet )
rows = []
pattern = ""
current_line += 1
# Next lines are the rows of the matrix (we allow 0 rows)
while current_line < len( lines ):
prefix, rest = lines[ current_line ]
# Prefix should be a two digit 0 padded row number
if not prefix.isdigit():
break
# The first `alphabet_size` fields are the row values
values = rest.split()
rows.append( [ float(_) for _ in values[:alphabet_size] ] )
# TRANSFAC includes an extra column with the IUPAC code
if len( values ) > alphabet_size:
pattern += values[alphabet_size]
current_line += 1
# Only store the pattern if it is the correct length (meaning
# that every row had an extra field)
if len( pattern ) != len( rows ):
pattern = None
matrix = FrequencyMatrix.from_rows( alphabet, rows )
setattr( motif, action[1], matrix )
# Only return a motif if we saw at least ID or AC or NA
if motif.id or motif.accession or motif.name:
return motif
|
python
|
def parse_record( self, lines ):
"""
Parse a TRANSFAC record out of `lines` and return a motif.
"""
# Break lines up
temp_lines = []
for line in lines:
fields = line.rstrip( "\r\n" ).split( None, 1 )
if len( fields ) == 1:
fields.append( "" )
temp_lines.append( fields )
lines = temp_lines
# Fill in motif from lines
motif = TransfacMotif()
current_line = 0
while 1:
# Done parsing if no more lines to consume
if current_line >= len( lines ):
break
# Remove prefix and first separator from line
prefix, rest = lines[ current_line ]
# No action for this prefix, just ignore the line
if prefix not in self.parse_actions:
current_line += 1
continue
# Get action for line
action = self.parse_actions[ prefix ]
# Store a single line value
if action[0] == "store_single":
key = action[1]
setattr( motif, key, rest )
current_line += 1
# Add a single line value to a list
if action[0] == "store_single_list":
key = action[1]
if not getattr( motif, key ):
setattr( motif, key, [] )
getattr( motif, key ).append( rest )
current_line += 1
# Add a single line value to a dictionary
if action[0] == "store_single_key_value":
key = action[1]
k, v = rest.strip().split( '=', 1 )
if not getattr( motif, key ):
setattr( motif, key, {} )
getattr( motif, key )[k] = v
current_line += 1
# Store a block of text
if action[0] == "store_block":
key = action[1]
value = []
while current_line < len( lines ) and lines[ current_line ][0] == prefix:
value.append( lines[current_line][1] )
current_line += 1
setattr( motif, key, str.join( "\n", value ) )
# Store a matrix
if action[0] == "store_matrix":
# First line is alphabet
alphabet = rest.split()
alphabet_size = len( alphabet )
rows = []
pattern = ""
current_line += 1
# Next lines are the rows of the matrix (we allow 0 rows)
while current_line < len( lines ):
prefix, rest = lines[ current_line ]
# Prefix should be a two digit 0 padded row number
if not prefix.isdigit():
break
# The first `alphabet_size` fields are the row values
values = rest.split()
rows.append( [ float(_) for _ in values[:alphabet_size] ] )
# TRANSFAC includes an extra column with the IUPAC code
if len( values ) > alphabet_size:
pattern += values[alphabet_size]
current_line += 1
# Only store the pattern if it is the correct length (meaning
# that every row had an extra field)
if len( pattern ) != len( rows ):
pattern = None
matrix = FrequencyMatrix.from_rows( alphabet, rows )
setattr( motif, action[1], matrix )
# Only return a motif if we saw at least ID or AC or NA
if motif.id or motif.accession or motif.name:
return motif
|
[
"def",
"parse_record",
"(",
"self",
",",
"lines",
")",
":",
"# Break lines up",
"temp_lines",
"=",
"[",
"]",
"for",
"line",
"in",
"lines",
":",
"fields",
"=",
"line",
".",
"rstrip",
"(",
"\"\\r\\n\"",
")",
".",
"split",
"(",
"None",
",",
"1",
")",
"if",
"len",
"(",
"fields",
")",
"==",
"1",
":",
"fields",
".",
"append",
"(",
"\"\"",
")",
"temp_lines",
".",
"append",
"(",
"fields",
")",
"lines",
"=",
"temp_lines",
"# Fill in motif from lines",
"motif",
"=",
"TransfacMotif",
"(",
")",
"current_line",
"=",
"0",
"while",
"1",
":",
"# Done parsing if no more lines to consume",
"if",
"current_line",
">=",
"len",
"(",
"lines",
")",
":",
"break",
"# Remove prefix and first separator from line",
"prefix",
",",
"rest",
"=",
"lines",
"[",
"current_line",
"]",
"# No action for this prefix, just ignore the line",
"if",
"prefix",
"not",
"in",
"self",
".",
"parse_actions",
":",
"current_line",
"+=",
"1",
"continue",
"# Get action for line",
"action",
"=",
"self",
".",
"parse_actions",
"[",
"prefix",
"]",
"# Store a single line value",
"if",
"action",
"[",
"0",
"]",
"==",
"\"store_single\"",
":",
"key",
"=",
"action",
"[",
"1",
"]",
"setattr",
"(",
"motif",
",",
"key",
",",
"rest",
")",
"current_line",
"+=",
"1",
"# Add a single line value to a list",
"if",
"action",
"[",
"0",
"]",
"==",
"\"store_single_list\"",
":",
"key",
"=",
"action",
"[",
"1",
"]",
"if",
"not",
"getattr",
"(",
"motif",
",",
"key",
")",
":",
"setattr",
"(",
"motif",
",",
"key",
",",
"[",
"]",
")",
"getattr",
"(",
"motif",
",",
"key",
")",
".",
"append",
"(",
"rest",
")",
"current_line",
"+=",
"1",
"# Add a single line value to a dictionary",
"if",
"action",
"[",
"0",
"]",
"==",
"\"store_single_key_value\"",
":",
"key",
"=",
"action",
"[",
"1",
"]",
"k",
",",
"v",
"=",
"rest",
".",
"strip",
"(",
")",
".",
"split",
"(",
"'='",
",",
"1",
")",
"if",
"not",
"getattr",
"(",
"motif",
",",
"key",
")",
":",
"setattr",
"(",
"motif",
",",
"key",
",",
"{",
"}",
")",
"getattr",
"(",
"motif",
",",
"key",
")",
"[",
"k",
"]",
"=",
"v",
"current_line",
"+=",
"1",
"# Store a block of text",
"if",
"action",
"[",
"0",
"]",
"==",
"\"store_block\"",
":",
"key",
"=",
"action",
"[",
"1",
"]",
"value",
"=",
"[",
"]",
"while",
"current_line",
"<",
"len",
"(",
"lines",
")",
"and",
"lines",
"[",
"current_line",
"]",
"[",
"0",
"]",
"==",
"prefix",
":",
"value",
".",
"append",
"(",
"lines",
"[",
"current_line",
"]",
"[",
"1",
"]",
")",
"current_line",
"+=",
"1",
"setattr",
"(",
"motif",
",",
"key",
",",
"str",
".",
"join",
"(",
"\"\\n\"",
",",
"value",
")",
")",
"# Store a matrix",
"if",
"action",
"[",
"0",
"]",
"==",
"\"store_matrix\"",
":",
"# First line is alphabet",
"alphabet",
"=",
"rest",
".",
"split",
"(",
")",
"alphabet_size",
"=",
"len",
"(",
"alphabet",
")",
"rows",
"=",
"[",
"]",
"pattern",
"=",
"\"\"",
"current_line",
"+=",
"1",
"# Next lines are the rows of the matrix (we allow 0 rows)",
"while",
"current_line",
"<",
"len",
"(",
"lines",
")",
":",
"prefix",
",",
"rest",
"=",
"lines",
"[",
"current_line",
"]",
"# Prefix should be a two digit 0 padded row number",
"if",
"not",
"prefix",
".",
"isdigit",
"(",
")",
":",
"break",
"# The first `alphabet_size` fields are the row values",
"values",
"=",
"rest",
".",
"split",
"(",
")",
"rows",
".",
"append",
"(",
"[",
"float",
"(",
"_",
")",
"for",
"_",
"in",
"values",
"[",
":",
"alphabet_size",
"]",
"]",
")",
"# TRANSFAC includes an extra column with the IUPAC code",
"if",
"len",
"(",
"values",
")",
">",
"alphabet_size",
":",
"pattern",
"+=",
"values",
"[",
"alphabet_size",
"]",
"current_line",
"+=",
"1",
"# Only store the pattern if it is the correct length (meaning",
"# that every row had an extra field)",
"if",
"len",
"(",
"pattern",
")",
"!=",
"len",
"(",
"rows",
")",
":",
"pattern",
"=",
"None",
"matrix",
"=",
"FrequencyMatrix",
".",
"from_rows",
"(",
"alphabet",
",",
"rows",
")",
"setattr",
"(",
"motif",
",",
"action",
"[",
"1",
"]",
",",
"matrix",
")",
"# Only return a motif if we saw at least ID or AC or NA",
"if",
"motif",
".",
"id",
"or",
"motif",
".",
"accession",
"or",
"motif",
".",
"name",
":",
"return",
"motif"
] |
Parse a TRANSFAC record out of `lines` and return a motif.
|
[
"Parse",
"a",
"TRANSFAC",
"record",
"out",
"of",
"lines",
"and",
"return",
"a",
"motif",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/motif/io/transfac.py#L90-L174
|
train
|
bxlab/bx-python
|
scripts/bed_rand_intersect.py
|
bit_clone
|
def bit_clone( bits ):
"""
Clone a bitset
"""
new = BitSet( bits.size )
new.ior( bits )
return new
|
python
|
def bit_clone( bits ):
"""
Clone a bitset
"""
new = BitSet( bits.size )
new.ior( bits )
return new
|
[
"def",
"bit_clone",
"(",
"bits",
")",
":",
"new",
"=",
"BitSet",
"(",
"bits",
".",
"size",
")",
"new",
".",
"ior",
"(",
"bits",
")",
"return",
"new"
] |
Clone a bitset
|
[
"Clone",
"a",
"bitset"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/bed_rand_intersect.py#L35-L41
|
train
|
bxlab/bx-python
|
scripts/bed_rand_intersect.py
|
throw_random
|
def throw_random( lengths, mask ):
"""
Try multiple times to run 'throw_random'
"""
saved = None
for i in range( maxtries ):
try:
return throw_random_bits( lengths, mask )
except MaxtriesException as e:
saved = e
continue
raise e
|
python
|
def throw_random( lengths, mask ):
"""
Try multiple times to run 'throw_random'
"""
saved = None
for i in range( maxtries ):
try:
return throw_random_bits( lengths, mask )
except MaxtriesException as e:
saved = e
continue
raise e
|
[
"def",
"throw_random",
"(",
"lengths",
",",
"mask",
")",
":",
"saved",
"=",
"None",
"for",
"i",
"in",
"range",
"(",
"maxtries",
")",
":",
"try",
":",
"return",
"throw_random_bits",
"(",
"lengths",
",",
"mask",
")",
"except",
"MaxtriesException",
"as",
"e",
":",
"saved",
"=",
"e",
"continue",
"raise",
"e"
] |
Try multiple times to run 'throw_random'
|
[
"Try",
"multiple",
"times",
"to",
"run",
"throw_random"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/bed_rand_intersect.py#L43-L54
|
train
|
bxlab/bx-python
|
scripts/bed_rand_intersect.py
|
as_bits
|
def as_bits( region_start, region_length, intervals ):
"""
Convert a set of intervals overlapping a region of a chromosome into
a bitset for just that region with the bits covered by the intervals
set.
"""
bits = BitSet( region_length )
for chr, start, stop in intervals:
bits.set_range( start - region_start, stop - start )
return bits
|
python
|
def as_bits( region_start, region_length, intervals ):
"""
Convert a set of intervals overlapping a region of a chromosome into
a bitset for just that region with the bits covered by the intervals
set.
"""
bits = BitSet( region_length )
for chr, start, stop in intervals:
bits.set_range( start - region_start, stop - start )
return bits
|
[
"def",
"as_bits",
"(",
"region_start",
",",
"region_length",
",",
"intervals",
")",
":",
"bits",
"=",
"BitSet",
"(",
"region_length",
")",
"for",
"chr",
",",
"start",
",",
"stop",
"in",
"intervals",
":",
"bits",
".",
"set_range",
"(",
"start",
"-",
"region_start",
",",
"stop",
"-",
"start",
")",
"return",
"bits"
] |
Convert a set of intervals overlapping a region of a chromosome into
a bitset for just that region with the bits covered by the intervals
set.
|
[
"Convert",
"a",
"set",
"of",
"intervals",
"overlapping",
"a",
"region",
"of",
"a",
"chromosome",
"into",
"a",
"bitset",
"for",
"just",
"that",
"region",
"with",
"the",
"bits",
"covered",
"by",
"the",
"intervals",
"set",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/bed_rand_intersect.py#L56-L65
|
train
|
bxlab/bx-python
|
scripts/bed_rand_intersect.py
|
interval_lengths
|
def interval_lengths( bits ):
"""
Get the length distribution of all contiguous runs of set bits from
"""
end = 0
while 1:
start = bits.next_set( end )
if start == bits.size: break
end = bits.next_clear( start )
yield end - start
|
python
|
def interval_lengths( bits ):
"""
Get the length distribution of all contiguous runs of set bits from
"""
end = 0
while 1:
start = bits.next_set( end )
if start == bits.size: break
end = bits.next_clear( start )
yield end - start
|
[
"def",
"interval_lengths",
"(",
"bits",
")",
":",
"end",
"=",
"0",
"while",
"1",
":",
"start",
"=",
"bits",
".",
"next_set",
"(",
"end",
")",
"if",
"start",
"==",
"bits",
".",
"size",
":",
"break",
"end",
"=",
"bits",
".",
"next_clear",
"(",
"start",
")",
"yield",
"end",
"-",
"start"
] |
Get the length distribution of all contiguous runs of set bits from
|
[
"Get",
"the",
"length",
"distribution",
"of",
"all",
"contiguous",
"runs",
"of",
"set",
"bits",
"from"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/bed_rand_intersect.py#L67-L76
|
train
|
bxlab/bx-python
|
scripts/bed_rand_intersect.py
|
count_overlap
|
def count_overlap( bits1, bits2 ):
"""
Count the number of bits that overlap between two sets
"""
b = BitSet( bits1.size )
b |= bits1
b &= bits2
return b.count_range( 0, b.size )
|
python
|
def count_overlap( bits1, bits2 ):
"""
Count the number of bits that overlap between two sets
"""
b = BitSet( bits1.size )
b |= bits1
b &= bits2
return b.count_range( 0, b.size )
|
[
"def",
"count_overlap",
"(",
"bits1",
",",
"bits2",
")",
":",
"b",
"=",
"BitSet",
"(",
"bits1",
".",
"size",
")",
"b",
"|=",
"bits1",
"b",
"&=",
"bits2",
"return",
"b",
".",
"count_range",
"(",
"0",
",",
"b",
".",
"size",
")"
] |
Count the number of bits that overlap between two sets
|
[
"Count",
"the",
"number",
"of",
"bits",
"that",
"overlap",
"between",
"two",
"sets"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/bed_rand_intersect.py#L78-L85
|
train
|
bxlab/bx-python
|
scripts/bed_rand_intersect.py
|
overlapping_in_bed
|
def overlapping_in_bed( fname, r_chr, r_start, r_stop ):
"""
Get from a bed all intervals that overlap the region defined by
r_chr, r_start, r_stop.
"""
rval = []
for line in open( fname ):
if line.startswith( "#" ) or line.startswith( "track" ):
continue
fields = line.split()
chr, start, stop = fields[0], int( fields[1] ), int( fields[2] )
if chr == r_chr and start < r_stop and stop >= r_start:
rval.append( ( chr, max( start, r_start ), min( stop, r_stop ) ) )
return rval
|
python
|
def overlapping_in_bed( fname, r_chr, r_start, r_stop ):
"""
Get from a bed all intervals that overlap the region defined by
r_chr, r_start, r_stop.
"""
rval = []
for line in open( fname ):
if line.startswith( "#" ) or line.startswith( "track" ):
continue
fields = line.split()
chr, start, stop = fields[0], int( fields[1] ), int( fields[2] )
if chr == r_chr and start < r_stop and stop >= r_start:
rval.append( ( chr, max( start, r_start ), min( stop, r_stop ) ) )
return rval
|
[
"def",
"overlapping_in_bed",
"(",
"fname",
",",
"r_chr",
",",
"r_start",
",",
"r_stop",
")",
":",
"rval",
"=",
"[",
"]",
"for",
"line",
"in",
"open",
"(",
"fname",
")",
":",
"if",
"line",
".",
"startswith",
"(",
"\"#\"",
")",
"or",
"line",
".",
"startswith",
"(",
"\"track\"",
")",
":",
"continue",
"fields",
"=",
"line",
".",
"split",
"(",
")",
"chr",
",",
"start",
",",
"stop",
"=",
"fields",
"[",
"0",
"]",
",",
"int",
"(",
"fields",
"[",
"1",
"]",
")",
",",
"int",
"(",
"fields",
"[",
"2",
"]",
")",
"if",
"chr",
"==",
"r_chr",
"and",
"start",
"<",
"r_stop",
"and",
"stop",
">=",
"r_start",
":",
"rval",
".",
"append",
"(",
"(",
"chr",
",",
"max",
"(",
"start",
",",
"r_start",
")",
",",
"min",
"(",
"stop",
",",
"r_stop",
")",
")",
")",
"return",
"rval"
] |
Get from a bed all intervals that overlap the region defined by
r_chr, r_start, r_stop.
|
[
"Get",
"from",
"a",
"bed",
"all",
"intervals",
"that",
"overlap",
"the",
"region",
"defined",
"by",
"r_chr",
"r_start",
"r_stop",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/bed_rand_intersect.py#L87-L100
|
train
|
bxlab/bx-python
|
lib/bx/align/tools/tile.py
|
tile_interval
|
def tile_interval( sources, index, ref_src, start, end, seq_db=None ):
"""
Tile maf blocks onto an interval. The resulting block will span the interval
exactly and contain the column from the highest scoring alignment at each
position.
`sources`: list of sequence source names to include in final block
`index`: an instnace that can return maf blocks overlapping intervals
`ref_src`: source name of the interval (ie, hg17.chr7)
`start`: start of interval
`end`: end of interval
`seq_db`: a mapping for source names in the reference species to nib files
"""
# First entry in sources should also be on the reference species
assert sources[0].split('.')[0] == ref_src.split('.')[0], \
"%s != %s" % ( sources[0].split('.')[0], ref_src.split('.')[0] )
base_len = end - start
blocks = index.get( ref_src, start, end )
# From low to high score
blocks.sort(key=lambda t: t.score)
mask = [ -1 ] * base_len
ref_src_size = None
for i, block in enumerate( blocks ):
ref = block.get_component_by_src_start( ref_src )
ref_src_size = ref.src_size
assert ref.strand == "+"
slice_start = max( start, ref.start )
slice_end = min( end, ref.end )
for j in range( slice_start, slice_end ):
mask[j-start] = i
tiled = []
for i in range( len( sources ) ):
tiled.append( [] )
for ss, ee, index in intervals_from_mask( mask ):
# Interval with no covering alignments
if index < 0:
# Get sequence if available, otherwise just use 'N'
if seq_db:
tiled[0].append( bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( start+ss, ee-ss ) )
else:
tiled[0].append( "N" * (ee-ss) )
# Gaps in all other species
for row in tiled[1:]:
row.append( "-" * ( ee - ss ) )
else:
slice_start = start + ss
slice_end = start + ee
block = blocks[index]
ref = block.get_component_by_src_start( ref_src )
sliced = block.slice_by_component( ref, slice_start, slice_end )
sliced = sliced.limit_to_species( sources )
sliced.remove_all_gap_columns()
for i, src in enumerate( sources ):
comp = sliced.get_component_by_src_start( src )
if comp:
tiled[i].append( comp.text )
else:
tiled[i].append( "-" * sliced.text_size )
return [ "".join( t ) for t in tiled ]
|
python
|
def tile_interval( sources, index, ref_src, start, end, seq_db=None ):
"""
Tile maf blocks onto an interval. The resulting block will span the interval
exactly and contain the column from the highest scoring alignment at each
position.
`sources`: list of sequence source names to include in final block
`index`: an instnace that can return maf blocks overlapping intervals
`ref_src`: source name of the interval (ie, hg17.chr7)
`start`: start of interval
`end`: end of interval
`seq_db`: a mapping for source names in the reference species to nib files
"""
# First entry in sources should also be on the reference species
assert sources[0].split('.')[0] == ref_src.split('.')[0], \
"%s != %s" % ( sources[0].split('.')[0], ref_src.split('.')[0] )
base_len = end - start
blocks = index.get( ref_src, start, end )
# From low to high score
blocks.sort(key=lambda t: t.score)
mask = [ -1 ] * base_len
ref_src_size = None
for i, block in enumerate( blocks ):
ref = block.get_component_by_src_start( ref_src )
ref_src_size = ref.src_size
assert ref.strand == "+"
slice_start = max( start, ref.start )
slice_end = min( end, ref.end )
for j in range( slice_start, slice_end ):
mask[j-start] = i
tiled = []
for i in range( len( sources ) ):
tiled.append( [] )
for ss, ee, index in intervals_from_mask( mask ):
# Interval with no covering alignments
if index < 0:
# Get sequence if available, otherwise just use 'N'
if seq_db:
tiled[0].append( bx.seq.nib.NibFile( open( seq_db[ ref_src ] ) ).get( start+ss, ee-ss ) )
else:
tiled[0].append( "N" * (ee-ss) )
# Gaps in all other species
for row in tiled[1:]:
row.append( "-" * ( ee - ss ) )
else:
slice_start = start + ss
slice_end = start + ee
block = blocks[index]
ref = block.get_component_by_src_start( ref_src )
sliced = block.slice_by_component( ref, slice_start, slice_end )
sliced = sliced.limit_to_species( sources )
sliced.remove_all_gap_columns()
for i, src in enumerate( sources ):
comp = sliced.get_component_by_src_start( src )
if comp:
tiled[i].append( comp.text )
else:
tiled[i].append( "-" * sliced.text_size )
return [ "".join( t ) for t in tiled ]
|
[
"def",
"tile_interval",
"(",
"sources",
",",
"index",
",",
"ref_src",
",",
"start",
",",
"end",
",",
"seq_db",
"=",
"None",
")",
":",
"# First entry in sources should also be on the reference species",
"assert",
"sources",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"==",
"ref_src",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
",",
"\"%s != %s\"",
"%",
"(",
"sources",
"[",
"0",
"]",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
",",
"ref_src",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
")",
"base_len",
"=",
"end",
"-",
"start",
"blocks",
"=",
"index",
".",
"get",
"(",
"ref_src",
",",
"start",
",",
"end",
")",
"# From low to high score",
"blocks",
".",
"sort",
"(",
"key",
"=",
"lambda",
"t",
":",
"t",
".",
"score",
")",
"mask",
"=",
"[",
"-",
"1",
"]",
"*",
"base_len",
"ref_src_size",
"=",
"None",
"for",
"i",
",",
"block",
"in",
"enumerate",
"(",
"blocks",
")",
":",
"ref",
"=",
"block",
".",
"get_component_by_src_start",
"(",
"ref_src",
")",
"ref_src_size",
"=",
"ref",
".",
"src_size",
"assert",
"ref",
".",
"strand",
"==",
"\"+\"",
"slice_start",
"=",
"max",
"(",
"start",
",",
"ref",
".",
"start",
")",
"slice_end",
"=",
"min",
"(",
"end",
",",
"ref",
".",
"end",
")",
"for",
"j",
"in",
"range",
"(",
"slice_start",
",",
"slice_end",
")",
":",
"mask",
"[",
"j",
"-",
"start",
"]",
"=",
"i",
"tiled",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"sources",
")",
")",
":",
"tiled",
".",
"append",
"(",
"[",
"]",
")",
"for",
"ss",
",",
"ee",
",",
"index",
"in",
"intervals_from_mask",
"(",
"mask",
")",
":",
"# Interval with no covering alignments",
"if",
"index",
"<",
"0",
":",
"# Get sequence if available, otherwise just use 'N'",
"if",
"seq_db",
":",
"tiled",
"[",
"0",
"]",
".",
"append",
"(",
"bx",
".",
"seq",
".",
"nib",
".",
"NibFile",
"(",
"open",
"(",
"seq_db",
"[",
"ref_src",
"]",
")",
")",
".",
"get",
"(",
"start",
"+",
"ss",
",",
"ee",
"-",
"ss",
")",
")",
"else",
":",
"tiled",
"[",
"0",
"]",
".",
"append",
"(",
"\"N\"",
"*",
"(",
"ee",
"-",
"ss",
")",
")",
"# Gaps in all other species",
"for",
"row",
"in",
"tiled",
"[",
"1",
":",
"]",
":",
"row",
".",
"append",
"(",
"\"-\"",
"*",
"(",
"ee",
"-",
"ss",
")",
")",
"else",
":",
"slice_start",
"=",
"start",
"+",
"ss",
"slice_end",
"=",
"start",
"+",
"ee",
"block",
"=",
"blocks",
"[",
"index",
"]",
"ref",
"=",
"block",
".",
"get_component_by_src_start",
"(",
"ref_src",
")",
"sliced",
"=",
"block",
".",
"slice_by_component",
"(",
"ref",
",",
"slice_start",
",",
"slice_end",
")",
"sliced",
"=",
"sliced",
".",
"limit_to_species",
"(",
"sources",
")",
"sliced",
".",
"remove_all_gap_columns",
"(",
")",
"for",
"i",
",",
"src",
"in",
"enumerate",
"(",
"sources",
")",
":",
"comp",
"=",
"sliced",
".",
"get_component_by_src_start",
"(",
"src",
")",
"if",
"comp",
":",
"tiled",
"[",
"i",
"]",
".",
"append",
"(",
"comp",
".",
"text",
")",
"else",
":",
"tiled",
"[",
"i",
"]",
".",
"append",
"(",
"\"-\"",
"*",
"sliced",
".",
"text_size",
")",
"return",
"[",
"\"\"",
".",
"join",
"(",
"t",
")",
"for",
"t",
"in",
"tiled",
"]"
] |
Tile maf blocks onto an interval. The resulting block will span the interval
exactly and contain the column from the highest scoring alignment at each
position.
`sources`: list of sequence source names to include in final block
`index`: an instnace that can return maf blocks overlapping intervals
`ref_src`: source name of the interval (ie, hg17.chr7)
`start`: start of interval
`end`: end of interval
`seq_db`: a mapping for source names in the reference species to nib files
|
[
"Tile",
"maf",
"blocks",
"onto",
"an",
"interval",
".",
"The",
"resulting",
"block",
"will",
"span",
"the",
"interval",
"exactly",
"and",
"contain",
"the",
"column",
"from",
"the",
"highest",
"scoring",
"alignment",
"at",
"each",
"position",
"."
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/align/tools/tile.py#L13-L71
|
train
|
bxlab/bx-python
|
scripts/maf_tile_2bit.py
|
get_fill_char
|
def get_fill_char( maf_status ):
"""
Return the character that should be used to fill between blocks
having a given status
"""
## assert maf_status not in ( maf.MAF_CONTIG_NESTED_STATUS, maf.MAF_NEW_NESTED_STATUS,
## maf.MAF_MAYBE_NEW_NESTED_STATUS ), \
## "Nested rows do not make sense in a single coverage MAF (or do they?)"
if maf_status in ( maf.MAF_NEW_STATUS, maf.MAF_MAYBE_NEW_STATUS,
maf.MAF_NEW_NESTED_STATUS, maf.MAF_MAYBE_NEW_NESTED_STATUS ):
return "*"
elif maf_status in ( maf.MAF_INVERSE_STATUS, maf.MAF_INSERT_STATUS ):
return "="
elif maf_status in ( maf.MAF_CONTIG_STATUS, maf.MAF_CONTIG_NESTED_STATUS ):
return "#"
elif maf_status == maf.MAF_MISSING_STATUS:
return "X"
else:
raise ValueError("Unknwon maf status")
|
python
|
def get_fill_char( maf_status ):
"""
Return the character that should be used to fill between blocks
having a given status
"""
## assert maf_status not in ( maf.MAF_CONTIG_NESTED_STATUS, maf.MAF_NEW_NESTED_STATUS,
## maf.MAF_MAYBE_NEW_NESTED_STATUS ), \
## "Nested rows do not make sense in a single coverage MAF (or do they?)"
if maf_status in ( maf.MAF_NEW_STATUS, maf.MAF_MAYBE_NEW_STATUS,
maf.MAF_NEW_NESTED_STATUS, maf.MAF_MAYBE_NEW_NESTED_STATUS ):
return "*"
elif maf_status in ( maf.MAF_INVERSE_STATUS, maf.MAF_INSERT_STATUS ):
return "="
elif maf_status in ( maf.MAF_CONTIG_STATUS, maf.MAF_CONTIG_NESTED_STATUS ):
return "#"
elif maf_status == maf.MAF_MISSING_STATUS:
return "X"
else:
raise ValueError("Unknwon maf status")
|
[
"def",
"get_fill_char",
"(",
"maf_status",
")",
":",
"## assert maf_status not in ( maf.MAF_CONTIG_NESTED_STATUS, maf.MAF_NEW_NESTED_STATUS, ",
"## maf.MAF_MAYBE_NEW_NESTED_STATUS ), \\",
"## \"Nested rows do not make sense in a single coverage MAF (or do they?)\"",
"if",
"maf_status",
"in",
"(",
"maf",
".",
"MAF_NEW_STATUS",
",",
"maf",
".",
"MAF_MAYBE_NEW_STATUS",
",",
"maf",
".",
"MAF_NEW_NESTED_STATUS",
",",
"maf",
".",
"MAF_MAYBE_NEW_NESTED_STATUS",
")",
":",
"return",
"\"*\"",
"elif",
"maf_status",
"in",
"(",
"maf",
".",
"MAF_INVERSE_STATUS",
",",
"maf",
".",
"MAF_INSERT_STATUS",
")",
":",
"return",
"\"=\"",
"elif",
"maf_status",
"in",
"(",
"maf",
".",
"MAF_CONTIG_STATUS",
",",
"maf",
".",
"MAF_CONTIG_NESTED_STATUS",
")",
":",
"return",
"\"#\"",
"elif",
"maf_status",
"==",
"maf",
".",
"MAF_MISSING_STATUS",
":",
"return",
"\"X\"",
"else",
":",
"raise",
"ValueError",
"(",
"\"Unknwon maf status\"",
")"
] |
Return the character that should be used to fill between blocks
having a given status
|
[
"Return",
"the",
"character",
"that",
"should",
"be",
"used",
"to",
"fill",
"between",
"blocks",
"having",
"a",
"given",
"status"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/maf_tile_2bit.py#L72-L90
|
train
|
bxlab/bx-python
|
scripts/maf_tile_2bit.py
|
guess_fill_char
|
def guess_fill_char( left_comp, right_comp ):
"""
For the case where there is no annotated synteny we will try to guess it
"""
# No left component, obiously new
return "*"
# First check that the blocks have the same src (not just species) and
# orientation
if ( left_comp.src == right_comp.src and left_comp.strand != right_comp.strand ):
# Are they completely contiguous? Easy to call that a gap
if left_comp.end == right_comp.start:
return "-"
# TODO: should be able to make some guesses about short insertions
# here
# All other cases we have no clue about
return "*"
|
python
|
def guess_fill_char( left_comp, right_comp ):
"""
For the case where there is no annotated synteny we will try to guess it
"""
# No left component, obiously new
return "*"
# First check that the blocks have the same src (not just species) and
# orientation
if ( left_comp.src == right_comp.src and left_comp.strand != right_comp.strand ):
# Are they completely contiguous? Easy to call that a gap
if left_comp.end == right_comp.start:
return "-"
# TODO: should be able to make some guesses about short insertions
# here
# All other cases we have no clue about
return "*"
|
[
"def",
"guess_fill_char",
"(",
"left_comp",
",",
"right_comp",
")",
":",
"# No left component, obiously new",
"return",
"\"*\"",
"# First check that the blocks have the same src (not just species) and ",
"# orientation",
"if",
"(",
"left_comp",
".",
"src",
"==",
"right_comp",
".",
"src",
"and",
"left_comp",
".",
"strand",
"!=",
"right_comp",
".",
"strand",
")",
":",
"# Are they completely contiguous? Easy to call that a gap",
"if",
"left_comp",
".",
"end",
"==",
"right_comp",
".",
"start",
":",
"return",
"\"-\"",
"# TODO: should be able to make some guesses about short insertions",
"# here",
"# All other cases we have no clue about",
"return",
"\"*\""
] |
For the case where there is no annotated synteny we will try to guess it
|
[
"For",
"the",
"case",
"where",
"there",
"is",
"no",
"annotated",
"synteny",
"we",
"will",
"try",
"to",
"guess",
"it"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/maf_tile_2bit.py#L92-L107
|
train
|
bxlab/bx-python
|
scripts/maf_tile_2bit.py
|
remove_all_gap_columns
|
def remove_all_gap_columns( texts ):
"""
Remove any columns containing only gaps from alignment texts
"""
seqs = [ list( t ) for t in texts ]
i = 0
text_size = len( texts[0] )
while i < text_size:
all_gap = True
for seq in seqs:
if seq[i] not in ( '-', '#', '*', '=', 'X', '@' ):
all_gap = False
if all_gap:
for seq in seqs:
del seq[i]
text_size -= 1
else:
i += 1
return [ ''.join( s ) for s in seqs ]
|
python
|
def remove_all_gap_columns( texts ):
"""
Remove any columns containing only gaps from alignment texts
"""
seqs = [ list( t ) for t in texts ]
i = 0
text_size = len( texts[0] )
while i < text_size:
all_gap = True
for seq in seqs:
if seq[i] not in ( '-', '#', '*', '=', 'X', '@' ):
all_gap = False
if all_gap:
for seq in seqs:
del seq[i]
text_size -= 1
else:
i += 1
return [ ''.join( s ) for s in seqs ]
|
[
"def",
"remove_all_gap_columns",
"(",
"texts",
")",
":",
"seqs",
"=",
"[",
"list",
"(",
"t",
")",
"for",
"t",
"in",
"texts",
"]",
"i",
"=",
"0",
"text_size",
"=",
"len",
"(",
"texts",
"[",
"0",
"]",
")",
"while",
"i",
"<",
"text_size",
":",
"all_gap",
"=",
"True",
"for",
"seq",
"in",
"seqs",
":",
"if",
"seq",
"[",
"i",
"]",
"not",
"in",
"(",
"'-'",
",",
"'#'",
",",
"'*'",
",",
"'='",
",",
"'X'",
",",
"'@'",
")",
":",
"all_gap",
"=",
"False",
"if",
"all_gap",
":",
"for",
"seq",
"in",
"seqs",
":",
"del",
"seq",
"[",
"i",
"]",
"text_size",
"-=",
"1",
"else",
":",
"i",
"+=",
"1",
"return",
"[",
"''",
".",
"join",
"(",
"s",
")",
"for",
"s",
"in",
"seqs",
"]"
] |
Remove any columns containing only gaps from alignment texts
|
[
"Remove",
"any",
"columns",
"containing",
"only",
"gaps",
"from",
"alignment",
"texts"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/scripts/maf_tile_2bit.py#L109-L127
|
train
|
bxlab/bx-python
|
lib/bx/cookbook/__init__.py
|
cross_lists
|
def cross_lists(*sets):
"""Return the cross product of the arguments"""
wheels = [iter(_) for _ in sets]
digits = [next(it) for it in wheels]
while True:
yield digits[:]
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(sets[i])
digits[i] = next(wheels[i])
else:
break
|
python
|
def cross_lists(*sets):
"""Return the cross product of the arguments"""
wheels = [iter(_) for _ in sets]
digits = [next(it) for it in wheels]
while True:
yield digits[:]
for i in range(len(digits)-1, -1, -1):
try:
digits[i] = next(wheels[i])
break
except StopIteration:
wheels[i] = iter(sets[i])
digits[i] = next(wheels[i])
else:
break
|
[
"def",
"cross_lists",
"(",
"*",
"sets",
")",
":",
"wheels",
"=",
"[",
"iter",
"(",
"_",
")",
"for",
"_",
"in",
"sets",
"]",
"digits",
"=",
"[",
"next",
"(",
"it",
")",
"for",
"it",
"in",
"wheels",
"]",
"while",
"True",
":",
"yield",
"digits",
"[",
":",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"digits",
")",
"-",
"1",
",",
"-",
"1",
",",
"-",
"1",
")",
":",
"try",
":",
"digits",
"[",
"i",
"]",
"=",
"next",
"(",
"wheels",
"[",
"i",
"]",
")",
"break",
"except",
"StopIteration",
":",
"wheels",
"[",
"i",
"]",
"=",
"iter",
"(",
"sets",
"[",
"i",
"]",
")",
"digits",
"[",
"i",
"]",
"=",
"next",
"(",
"wheels",
"[",
"i",
"]",
")",
"else",
":",
"break"
] |
Return the cross product of the arguments
|
[
"Return",
"the",
"cross",
"product",
"of",
"the",
"arguments"
] |
09cb725284803df90a468d910f2274628d8647de
|
https://github.com/bxlab/bx-python/blob/09cb725284803df90a468d910f2274628d8647de/lib/bx/cookbook/__init__.py#L16-L30
|
train
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.