Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def filter_all_clusters(data, samples, ipyclient):
"""
Open the clust_database HDF5 array with seqs, catg, and filter data.
Fill the remaining filters.
"""
## create loadbalanced ipyclient
lbview = ipyclient.load_balanced_view()
## get chunk size from the HD5 array and close
with h5py.File(data.clust_database, 'r') as io5:
## the size of chunks for reading/writing
optim = io5["seqs"].attrs["chunksize"][0]
## the samples in the database in their locus order
dbsamples = io5["seqs"].attrs["samples"]
## the total number of loci
nloci = io5["seqs"].shape[0]
## make a tmp directory for saving chunked arrays to
chunkdir = os.path.join(data.dirs.outfiles, data.name+"_tmpchunks")
if not os.path.exists(chunkdir):
os.mkdir(chunkdir)
## get the indices of the samples that we are going to include
sidx = select_samples(dbsamples, samples)
## do the same for the populations samples
if data.populations:
data._populations = {}
for pop in data.populations:
try:
_samps = [data.samples[i] for i in data.populations[pop][1]]
data._populations[pop] = (
data.populations[pop][0],
select_samples(dbsamples, _samps, sidx))
except:
print(" Sample in populations file not present in assembly - {}".format(data.populations[pop][1]))
raise
LOGGER.info("samples %s \n, dbsamples %s \n, sidx %s \n",
samples, dbsamples, sidx)
## Put inside a try statement so we can delete tmpchunks
try:
## load a list of args to send to Engines. Each arg contains the index
## to sample optim loci from catg, seqs, filters &or edges, which will
## be loaded on the remote Engine.
## create job queue
start = time.time()
printstr = " filtering loci | {} | s7 |"
fasyncs = {}
submitted = 0
while submitted < nloci:
hslice = np.array([submitted, submitted+optim])
fasyncs[hslice[0]] = lbview.apply(filter_stacks, *(data, sidx, hslice))
submitted += optim
## run filter_stacks on all chunks
while 1:
readies = [i.ready() for i in fasyncs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(readies), sum(readies),
printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if sum(readies) == len(readies):
print("")
break
## raise error if any jobs failed
for async in fasyncs:
if not fasyncs[async].successful():
LOGGER.error("error in filter_stacks on chunk %s: %s",
async, fasyncs[async].exception())
raise IPyradWarningExit("error in filter_stacks on chunk {}: {}"\
.format(async, fasyncs[async].exception()))
ipyclient.purge_everything()
## get all the saved tmp arrays for each slice
tmpsnp = glob.glob(os.path.join(chunkdir, "snpf.*.npy"))
tmphet = glob.glob(os.path.join(chunkdir, "hetf.*.npy"))
tmpmin = glob.glob(os.path.join(chunkdir, "minf.*.npy"))
tmpedg = glob.glob(os.path.join(chunkdir, "edgf.*.npy"))
tmppld = glob.glob(os.path.join(chunkdir, "pldf.*.npy"))
tmpind = glob.glob(os.path.join(chunkdir, "indf.*.npy"))
## sort array files within each group
arrdict = OrderedDict([('ind', tmpind),
('snp', tmpsnp), ('het', tmphet),
('min', tmpmin), ('edg', tmpedg),
('pld', tmppld)])
for arrglob in arrdict.values():
arrglob.sort(key=lambda x: int(x.rsplit(".")[-2]))
## re-load the full filter array who's order is
## ["duplicates", "max_indels", "max_snps", "max_hets", "min_samps", "max_alleles"]
io5 = h5py.File(data.database, 'r+')
superfilter = np.zeros(io5["filters"].shape, io5["filters"].dtype)
## iterate across filter types (dups is already filled)
## we have [4,4] b/c minf and edgf both write to minf
for fidx, ftype in zip([1, 2, 3, 4, 4, 5], arrdict.keys()):
## fill in the edgefilters
for ffile in arrdict[ftype]:
## grab a file and get it's slice
hslice = int(ffile.split(".")[-2])
## load in the array
arr = np.load(ffile)
## store slice into full array (we use += here because the minf
## and edgf arrays both write to the same filter).
superfilter[hslice:hslice+optim, fidx] += arr
## store to DB
io5["filters"][:] += superfilter
del arr, superfilter
## store the other arrayed values (edges, snps)
edgarrs = glob.glob(os.path.join(chunkdir, "edgearr.*.npy"))
snparrs = glob.glob(os.path.join(chunkdir, "snpsarr.*.npy"))
## sort array files within each group
arrdict = OrderedDict([('edges', edgarrs), ('snps', snparrs)])
for arrglob in arrdict.values():
arrglob.sort(key=lambda x: int(x.rsplit(".")[-2]))
## fill the edge array, splits are already in there.
superedge = np.zeros(io5['edges'].shape, io5['edges'].dtype)
for ffile in arrdict['edges']:
## grab a file and get it's slice
hslice = int(ffile.split(".")[-2])
## load in the array w/ shape (hslice, 5)
arr = np.load(ffile)
## store slice into full array
superedge[hslice:hslice+optim, :] = arr
io5["edges"][:, :] = superedge
del arr, superedge
## fill the snps array. shape= (nloci, maxlen, 2)
supersnps = np.zeros(io5['snps'].shape, io5['snps'].dtype)
for ffile in arrdict['snps']:
## grab a file and get it's slice
hslice = int(ffile.split(".")[-2])
## load in the array w/ shape (hslice, maxlen, 2)
arr = np.load(ffile)
## store slice into full array
LOGGER.info("shapes, %s %s", supersnps.shape, arr.shape)
supersnps[hslice:hslice+optim, :, :] = arr
io5["snps"][:] = supersnps
del arr
io5.close()
finally:
## clean up the tmp files/dirs even if we failed.
try:
LOGGER.info("finished filtering")
shutil.rmtree(chunkdir)
except (IOError, OSError):
pass |
def padnames(names):
""" pads names for loci output """
## get longest name
longname_len = max(len(i) for i in names)
## Padding distance between name and seq.
padding = 5
## add pad to names
pnames = [name + " " * (longname_len - len(name)+ padding) \
for name in names]
snppad = "//" + " " * (longname_len - 2 + padding)
return np.array(pnames), snppad |
def make_loci_and_stats(data, samples, ipyclient):
"""
Makes the .loci file from h5 data base. Iterates by optim loci at a
time and write to file. Also makes alleles file if requested.
"""
## start vcf progress bar
start = time.time()
printstr = " building loci/stats | {} | s7 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## get some db info
with h5py.File(data.clust_database, 'r') as io5:
## will iterate optim loci at a time
optim = io5["seqs"].attrs["chunksize"][0]
nloci = io5["seqs"].shape[0]
anames = io5["seqs"].attrs["samples"]
## get name and snp padding
pnames, snppad = padnames(anames)
snames = [i.name for i in samples]
smask = np.array([i not in snames for i in anames])
## keep track of how many loci from each sample pass all filters
samplecov = np.zeros(len(anames), dtype=np.int32)
## set initial value to zero for all values above min_samples_locus
#for cov in range(data.paramsdict["min_samples_locus"], len(anames)+1):
locuscov = Counter()
for cov in range(len(anames)+1):
locuscov[cov] = 0
## client for sending jobs to parallel engines
lbview = ipyclient.load_balanced_view()
## send jobs in chunks
loci_asyncs = {}
for istart in xrange(0, nloci, optim):
args = [data, optim, pnames, snppad, smask, istart, samplecov, locuscov, 1]
loci_asyncs[istart] = lbview.apply(locichunk, args)
while 1:
done = [i.ready() for i in loci_asyncs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(done), sum(done), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if len(done) == sum(done):
print("")
break
## check for errors
for job in loci_asyncs:
if loci_asyncs[job].ready() and not loci_asyncs[job].successful():
LOGGER.error("error in building loci [%s]: %s",
job, loci_asyncs[job].exception())
raise IPyradWarningExit(loci_asyncs[job].exception())
## concat and cleanup
results = [i.get() for i in loci_asyncs.values()]
## update dictionaries
for chunk in results:
samplecov += chunk[0]
locuscov.update(chunk[1])
## get all chunk files
tmploci = glob.glob(data.outfiles.loci+".[0-9]*")
## sort by start value
tmploci.sort(key=lambda x: int(x.split(".")[-1]))
## write tmpchunks to locus file
locifile = open(data.outfiles.loci, 'w')
for tmploc in tmploci:
with open(tmploc, 'r') as inloc:
locdat = inloc.read()
locifile.write(locdat)
os.remove(tmploc)
locifile.close()
## make stats file from data
make_stats(data, samples, samplecov, locuscov)
## repeat for alleles output
if "a" in data.paramsdict["output_formats"]:
loci_asyncs = {}
for istart in xrange(0, nloci, optim):
args = [data, optim, pnames, snppad, smask, istart, samplecov, locuscov, 0]
loci_asyncs[istart] = lbview.apply(locichunk, args)
while 1:
done = [i.ready() for i in loci_asyncs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(done), sum(done),
" building alleles | {} | s7 |".format(elapsed),
spacer=data._spacer)
time.sleep(0.1)
if len(done) == sum(done):
print("")
break
## check for errors
for job in loci_asyncs:
if loci_asyncs[job].ready() and not loci_asyncs[job].successful():
LOGGER.error("error in building alleles [%s]: %s",
job, loci_asyncs[job].exception())
raise IPyradWarningExit(loci_asyncs[job].exception())
## concat and cleanup
#results = [i.get() for i in loci_asyncs.values()]
## get all chunk files
tmploci = glob.glob(data.outfiles.loci+".[0-9]*")
## sort by start value
tmploci.sort(key=lambda x: int(x.split(".")[-1]))
## write tmpchunks to locus file
locifile = open(data.outfiles.alleles, 'w')
for tmploc in tmploci:
with open(tmploc, 'r') as inloc:
locdat = inloc.read()
inalleles = get_alleles(locdat)
locifile.write(inalleles)
os.remove(tmploc)
locifile.close() |
def locichunk(args):
"""
Function from make_loci to apply to chunks. smask is sample mask.
"""
## parse args
data, optim, pnames, snppad, smask, start, samplecov, locuscov, upper = args
## this slice
hslice = [start, start+optim]
## get filter db info
co5 = h5py.File(data.database, 'r')
afilt = co5["filters"][hslice[0]:hslice[1], ]
aedge = co5["edges"][hslice[0]:hslice[1], ]
asnps = co5["snps"][hslice[0]:hslice[1], ]
## get seqs db
io5 = h5py.File(data.clust_database, 'r')
if upper:
aseqs = np.char.upper(io5["seqs"][hslice[0]:hslice[1], ])
else:
aseqs = io5["seqs"][hslice[0]:hslice[1], ]
## which loci passed all filters
keep = np.where(np.sum(afilt, axis=1) == 0)[0]
store = []
## write loci that passed after trimming edges, then write snp string
for iloc in keep:
edg = aedge[iloc]
#LOGGER.info("!!!!!! iloc edg %s, %s", iloc, edg)
args = [iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start]
if edg[4]:
outstr, samplecov, locuscov = enter_pairs(*args)
store.append(outstr)
else:
outstr, samplecov, locuscov = enter_singles(*args)
store.append(outstr)
## write to file and clear store
tmpo = os.path.join(data.dirs.outfiles, data.name+".loci.{}".format(start))
with open(tmpo, 'w') as tmpout:
tmpout.write("\n".join(store) + "\n")
## close handles
io5.close()
co5.close()
## return sample counter
return samplecov, locuscov, start |
def enter_pairs(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start):
""" enters funcs for pairs """
## snps was created using only the selected samples.
LOGGER.info("edges in enter_pairs %s", edg)
seq1 = aseqs[iloc, :, edg[0]:edg[1]+1]
snp1 = asnps[iloc, edg[0]:edg[1]+1, ]
## the 2nd read edges are +5 for the spacer
seq2 = aseqs[iloc, :, edg[2]:edg[3]+1]
snp2 = asnps[iloc, edg[2]:edg[3]+1, ]
## remove rows with all Ns, seq has only selected samples
nalln = np.all(seq1 == "N", axis=1)
## make mask of removed rows and excluded samples. Use the inverse
## of this to save the coverage for samples
nsidx = nalln + smask
LOGGER.info("nsidx %s, nalln %s, smask %s", nsidx, nalln, smask)
samplecov = samplecov + np.invert(nsidx).astype(np.int32)
LOGGER.info("samplecov %s", samplecov)
idx = np.sum(np.invert(nsidx).astype(np.int32))
LOGGER.info("idx %s", idx)
locuscov[idx] += 1
## select the remaining names in order
seq1 = seq1[~nsidx, ]
seq2 = seq2[~nsidx, ]
names = pnames[~nsidx]
## save string for printing, excluding names not in samples
outstr = "\n".join(\
[name + s1.tostring()+"nnnn"+s2.tostring() for name, s1, s2 in \
zip(names, seq1, seq2)])
#LOGGER.info("s1 %s", s1.tostring())
#LOGGER.info("s2 %s", s2.tostring())
## get snp string and add to store
snpstring1 = ["-" if snp1[i, 0] else \
"*" if snp1[i, 1] else \
" " for i in range(len(snp1))]
snpstring2 = ["-" if snp2[i, 0] else \
"*" if snp2[i, 1] else \
" " for i in range(len(snp2))]
#npis = str(snpstring1+snpstring2).count("*")
#nvars = str(snpstring1+snpstring2).count("-") + npis
outstr += "\n" + snppad + "".join(snpstring1)+\
" "+"".join(snpstring2)+"|{}|".format(iloc+start)
#"|LOCID={},DBID={},NVAR={},NPIS={}|"\
#.format(1+iloc+start, iloc, nvars, npis)
return outstr, samplecov, locuscov |
def enter_singles(iloc, pnames, snppad, edg, aseqs, asnps, smask, samplecov, locuscov, start):
""" enter funcs for SE or merged data """
## grab all seqs between edges
seq = aseqs[iloc, :, edg[0]:edg[1]+1]
## snps was created using only the selected samples, and is edge masked.
## The mask is for counting snps quickly, but trimming is still needed here
## to make the snps line up with the seqs in the snp string.
snp = asnps[iloc, edg[0]:edg[1]+1, ]
## remove rows with all Ns, seq has only selected samples
nalln = np.all(seq == "N", axis=1)
## make mask of removed rows and excluded samples. Use the inverse
## of this to save the coverage for samples
nsidx = nalln + smask
samplecov = samplecov + np.invert(nsidx).astype(np.int32)
idx = np.sum(np.invert(nsidx).astype(np.int32))
locuscov[idx] += 1
## select the remaining names in order
seq = seq[~nsidx, ]
names = pnames[~nsidx]
## save string for printing, excluding names not in samples
outstr = "\n".join(\
[name + s.tostring() for name, s in zip(names, seq)])
## get snp string and add to store
snpstring = ["-" if snp[i, 0] else \
"*" if snp[i, 1] else \
" " for i in range(len(snp))]
outstr += "\n" + snppad + "".join(snpstring) + "|{}|".format(iloc+start)
#LOGGER.info("outstr %s", outstr)
return outstr, samplecov, locuscov |
def init_arrays(data):
"""
Create database file for storing final filtered snps data as hdf5 array.
Copies splits and duplicates info from clust_database to database.
"""
## get stats from step6 h5 and create new h5
co5 = h5py.File(data.clust_database, 'r')
io5 = h5py.File(data.database, 'w')
## get maxlen and chunk len
maxlen = data._hackersonly["max_fragment_length"] + 20
chunks = co5["seqs"].attrs["chunksize"][0]
nloci = co5["seqs"].shape[0]
## make array for snp string, 2 cols, - and *
snps = io5.create_dataset("snps", (nloci, maxlen, 2),
dtype=np.bool,
chunks=(chunks, maxlen, 2),
compression='gzip')
snps.attrs["chunksize"] = chunks
snps.attrs["names"] = ["-", "*"]
## array for filters that will be applied in step7
filters = io5.create_dataset("filters", (nloci, 6), dtype=np.bool)
filters.attrs["filters"] = ["duplicates", "max_indels",
"max_snps", "max_shared_hets",
"min_samps", "max_alleles"]
## array for edgetrimming
edges = io5.create_dataset("edges", (nloci, 5),
dtype=np.uint16,
chunks=(chunks, 5),
compression="gzip")
edges.attrs["chunksize"] = chunks
edges.attrs["names"] = ["R1_L", "R1_R", "R2_L", "R2_R", "sep"]
## xfer data from clustdb to finaldb
edges[:, 4] = co5["splits"][:]
filters[:, 0] = co5["duplicates"][:]
## close h5s
io5.close()
co5.close() |
def filter_stacks(data, sidx, hslice):
"""
Grab a chunk of loci from the HDF5 database. Apply filters and fill the
the filters boolean array.
The design of the filtering steps intentionally sacrifices some performance
for an increase in readability, and extensibility. Calling multiple filter
functions ends up running through the sequences per stack several times,
but I felt this design made more sense, and also will easily allow us to
add more filters in the future.
"""
LOGGER.info("Entering filter_stacks")
## open h5 handles
io5 = h5py.File(data.clust_database, 'r')
co5 = h5py.File(data.database, 'r')
## get a chunk (hslice) of loci for the selected samples (sidx)
#superseqs = io5["seqs"][hslice[0]:hslice[1], sidx,]
## get an int view of the seq array
#superints = io5["seqs"][hslice[0]:hslice[1], sidx, :].view(np.int8)
## we need to use upper to skip lowercase allele storage
## this slows down the rate of loading in data by a ton.
superints = np.char.upper(io5["seqs"][hslice[0]:hslice[1], sidx,]).view(np.int8)
LOGGER.info("superints shape {}".format(superints.shape))
## fill edge filter
## get edges of superseqs and supercats, since edges need to be trimmed
## before counting hets, snps, inds. Technically, this could edge trim
## clusters to the point that they are below the minlen, and so this
## also constitutes a filter, though one that is uncommon. For this
## reason we have another filter called edgfilter.
splits = co5["edges"][hslice[0]:hslice[1], 4]
edgfilter, edgearr = get_edges(data, superints, splits)
del splits
LOGGER.info('passed edges %s', hslice[0])
## minsamp coverages filtered from superseqs
minfilter = filter_minsamp(data, superints)
LOGGER.info('passed minfilt %s', hslice[0])
## maxhets per site column from superseqs after trimming edges
hetfilter = filter_maxhet(data, superints, edgearr)
LOGGER.info('passed minhet %s', hslice[0])
## ploidy filter
pldfilter = io5["nalleles"][hslice[0]:hslice[1]].max(axis=1) > \
data.paramsdict["max_alleles_consens"]
## indel filter, needs a fresh superints b/c get_edges does (-)->(N)
indfilter = filter_indels(data, superints, edgearr)
LOGGER.info('passed minind %s', hslice[0])
## Build the .loci snpstring as an array (snps)
## shape = (chunk, 1) dtype=S1, or should it be (chunk, 2) for [-,*] ?
snpfilter, snpsarr = filter_maxsnp(data, superints, edgearr)
LOGGER.info("edg %s", edgfilter.sum())
LOGGER.info("min %s", minfilter.sum())
LOGGER.info("het %s", hetfilter.sum())
LOGGER.info("pld %s", pldfilter.sum())
LOGGER.info("snp %s", snpfilter.sum())
LOGGER.info("ind %s", indfilter.sum())
## SAVE FILTERS AND INFO TO DISK BY SLICE NUMBER (.0.tmp.h5)
chunkdir = os.path.join(data.dirs.outfiles, data.name+"_tmpchunks")
handle = os.path.join(chunkdir, "edgf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, edgfilter)
handle = os.path.join(chunkdir, "minf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, minfilter)
handle = os.path.join(chunkdir, "hetf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, hetfilter)
handle = os.path.join(chunkdir, "snpf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, snpfilter)
handle = os.path.join(chunkdir, "pldf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, pldfilter)
handle = os.path.join(chunkdir, "indf.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, indfilter)
handle = os.path.join(chunkdir, "snpsarr.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, snpsarr)
handle = os.path.join(chunkdir, "edgearr.{}.npy".format(hslice[0]))
with open(handle, 'w') as out:
np.save(out, edgearr)
io5.close()
co5.close() |
def get_edges(data, superints, splits):
"""
Gets edge trimming based on the overlap of sequences at the edges of
alignments and the tuple arg passed in for edge_trimming. Trims as
(R1 left, R1 right, R2 left, R2 right). We also trim off the restriction
site if it present. This modifies superints, and so should be run on an
engine so it doesn't affect local copy. If this is changed to run locally
for some reason make sure we copy the superints instead.
"""
## the filtering arg and parse it into minsamp numbers
if "trim_overhang" in data.paramsdict:
edgetrims = np.array(data.paramsdict["trim_overhang"]).astype(np.int16)
else:
edgetrims = np.array(data.paramsdict["trim_loci"]).astype(np.int16)
## Cuts 3 and 4 are only for 3rad/radcap
## TODO: This is moderately hackish, it's not using cut3/4
## correctly, just assuming the length is the same as cut1/2
try:
cut1, cut2, _, _ = data.paramsdict["restriction_overhang"]
LOGGER.debug("Found 3Rad cut sites")
except ValueError:
cut1, cut2 = data.paramsdict["restriction_overhang"]
cuts = np.array([len(cut1), len(cut2)], dtype=np.int16)
## a local array for storing edge trims
edges = np.zeros((superints.shape[0], 5), dtype=np.int16)
## a local array for storing edge filtered loci, these are stored
## eventually as minsamp excludes.
edgefilter = np.zeros((superints.shape[0],), dtype=np.bool)
## TRIM GUIDE. The cut site lengths are always trimmed. In addition,
## edge overhangs are trimmed to min(4, minsamp), and then additional
## number of columns is trimmed based on edgetrims values.
## A special case, -1 value means no trim at all.
if data.paramsdict["min_samples_locus"] <= 4:
minedge = np.int16(data.paramsdict["min_samples_locus"])
else:
minedge = np.int16(max(4, data.paramsdict["min_samples_locus"]))
## convert all - to N to make this easier
nodashints = copy.deepcopy(superints)#.copy()
nodashints[nodashints == 45] = 78
## trim overhanging edges
## get the number not Ns in each site,
#ccx = np.sum(superseqs != "N", axis=1)
ccx = np.sum(nodashints != 78, axis=1, dtype=np.uint16)
efi, edg = edgetrim_numba(splits, ccx, edges, edgefilter, edgetrims, cuts, minedge)
return efi, edg |
def filter_minsamp(data, superints):
"""
Filter minimum # of samples per locus from superseqs[chunk]. The shape
of superseqs is [chunk, sum(sidx), maxlen]
"""
## global minsamp
minsamp = data.paramsdict["min_samples_locus"]
## use population minsamps
if data.populations:
## data._populations will look like this:
## {'a': (3, [0, 1, 2, 3],
## 'b': (3, [4, 5, 6, 7],
## 'c': (3, [8, 9, 10, 11]}
LOGGER.info("POPULATIONS %s", data.populations)
## superints has already been subsampled by sidx
## get the sidx values for each pop
minfilters = []
for pop in data._populations:
samps = data._populations[pop][1]
minsamp = data._populations[pop][0]
mini = np.sum(~np.all(superints[:, samps, :] == 78, axis=2), axis=1) < minsamp
minfilters.append(mini)
## get sum across all pops for each locus
minfilt = np.any(minfilters, axis=0)
else:
## if not pop-file use global minsamp filter
minfilt = np.sum(~np.all(superints == 78, axis=2), axis=1) < minsamp
#LOGGER.info("Filtered by min_samples_locus - {}".format(minfilt.sum()))
return minfilt |
def ucount(sitecol):
"""
Used to count the number of unique bases in a site for snpstring.
returns as a spstring with * and -
"""
## a list for only catgs
catg = [i for i in sitecol if i in "CATG"]
## find sites that are ambigs
where = [sitecol[sitecol == i] for i in "RSKYWM"]
## for each occurrence of RSKWYM add ambig resolution to catg
for ambig in where:
for _ in range(ambig.size):
catg += list(AMBIGS[ambig[0]])
## if invariant return " "
if len(set(catg)) < 2:
return " "
else:
## get second most common site
second = Counter(catg).most_common()[1][1]
if second > 1:
return "*"
else:
return "-" |
def filter_maxsnp(data, superints, edgearr):
"""
Filter max # of SNPs per locus. Do R1 and R2 separately if PE.
Also generate the snpsite line for the .loci format and save in the snp arr
This uses the edge filters that have been built based on trimming, and
saves the snps array with edges filtered. **Loci are not yet filtered.**
"""
## an empty array to count with failed loci
snpfilt = np.zeros(superints.shape[0], dtype=np.bool)
snpsarr = np.zeros((superints.shape[0], superints.shape[2], 2), dtype=np.bool)
maxsnps = np.array(data.paramsdict['max_SNPs_locus'], dtype=np.int16)
## get the per site snp string | shape=(chunk, maxlen)
# snpsarr[:, :, 0] = snps == "-"
# snpsarr[:, :, 1] = snps == "*"
snpsarr = snpcount_numba(superints, snpsarr)
LOGGER.info("---found the snps: %s", snpsarr.sum())
snpfilt, snpsarr = snpfilter_numba(snpsarr, snpfilt, edgearr, maxsnps)
LOGGER.info("---filtered snps: %s", snpfilt.sum())
return snpfilt, snpsarr |
def snpcount_numba(superints, snpsarr):
"""
Used to count the number of unique bases in a site for snpstring.
"""
## iterate over all loci
for iloc in xrange(superints.shape[0]):
for site in xrange(superints.shape[2]):
## make new array
catg = np.zeros(4, dtype=np.int16)
## a list for only catgs
ncol = superints[iloc, :, site]
for idx in range(ncol.shape[0]):
if ncol[idx] == 67: #C
catg[0] += 1
elif ncol[idx] == 65: #A
catg[1] += 1
elif ncol[idx] == 84: #T
catg[2] += 1
elif ncol[idx] == 71: #G
catg[3] += 1
elif ncol[idx] == 82: #R
catg[1] += 1 #A
catg[3] += 1 #G
elif ncol[idx] == 75: #K
catg[2] += 1 #T
catg[3] += 1 #G
elif ncol[idx] == 83: #S
catg[0] += 1 #C
catg[3] += 1 #G
elif ncol[idx] == 89: #Y
catg[0] += 1 #C
catg[2] += 1 #T
elif ncol[idx] == 87: #W
catg[1] += 1 #A
catg[2] += 1 #T
elif ncol[idx] == 77: #M
catg[0] += 1 #C
catg[1] += 1 #A
## get second most common site
catg.sort()
## if invariant e.g., [0, 0, 0, 9], then nothing (" ")
if not catg[2]:
pass
else:
if catg[2] > 1:
snpsarr[iloc, site, 1] = True
else:
snpsarr[iloc, site, 0] = True
return snpsarr |
def filter_maxhet(data, superints, edgearr):
"""
Filter max shared heterozygosity per locus. The dimensions of superseqs
are (chunk, sum(sidx), maxlen). Don't need split info since it applies to
entire loci based on site patterns (i.e., location along the seq doesn't
matter.) Current implementation does ints, but does not apply float diff
to every loc based on coverage...
"""
## the filter max
## The type of max_shared_Hs_locus is determined and the cast to either
## int or float is made at assembly load time
maxhet = data.paramsdict["max_shared_Hs_locus"]
if isinstance(maxhet, float):
## get an array with maxhet fraction * ntaxa with data for each locus
#maxhet = np.array(superints.shape[1]*maxhet, dtype=np.int16)
maxhet = np.floor(
maxhet * (superints.shape[1] -
np.all(superints == 78, axis=2).sum(axis=1))).astype(np.int16)
elif isinstance(maxhet, int):
maxhet = np.zeros(superints.shape[0], dtype=np.int16)
maxhet.fill(data.paramsdict["max_shared_Hs_locus"])
## an empty array to fill with failed loci
LOGGER.info("--------------maxhet mins %s", maxhet)
hetfilt = np.zeros(superints.shape[0], dtype=np.bool)
hetfilt = maxhet_numba(superints, edgearr, maxhet, hetfilt)
LOGGER.info("--------------maxhet sums %s", hetfilt.sum())
return hetfilt |
def filter_indels(data, superints, edgearr):
"""
Filter max indels. Needs to split to apply to each read separately.
The dimensions of superseqs are (chunk, sum(sidx), maxlen).
"""
maxinds = np.array(data.paramsdict["max_Indels_locus"]).astype(np.int64)
## an empty array to fill with failed loci
ifilter = np.zeros(superints.shape[0], dtype=np.bool_)
## if paired then worry about splits
if "pair" in data.paramsdict["datatype"]:
for idx in xrange(superints.shape[0]):
block1 = superints[idx, :, edgearr[idx, 0]:edgearr[idx, 1]]
block2 = superints[idx, :, edgearr[idx, 2]:edgearr[idx, 3]]
sums1 = maxind_numba(block1)
## If all loci are merged then block2 will be empty which will
## cause maxind_numba to throw a very confusing ValueError
if np.any(block2):
sums2 = maxind_numba(block2)
else:
sums2 = 0
if (sums1 > maxinds[0]) or (sums2 > maxinds[1]):
ifilter[idx] = True
else:
for idx in xrange(superints.shape[0]):
## get block based on edge filters
block = superints[idx, :, edgearr[idx, 0]:edgearr[idx, 1]]
## shorten block to exclude terminal indels
## if data at this locus (not already filtered by edges/minsamp)
if block.shape[1] > 1:
try:
sums = maxind_numba(block)
except ValueError as inst:
msg = "All loci filterd by max_Indels_locus. Try increasing this parameter value."
raise IPyradError(msg)
except Exception as inst:
LOGGER.error("error in block {}".format(block))
#LOGGER.info("maxind numba %s %s", idx, sums)
#LOGGER.info("sums, maxinds[0], compare: %s %s %s",
# sums, maxinds[0], sums > maxinds[0])
if sums > maxinds[0]:
ifilter[idx] = True
LOGGER.info("--------------maxIndels sums %s", ifilter.sum())
return ifilter |
def maxind_numba(block):
""" filter for indels """
## remove terminal edges
inds = 0
for row in xrange(block.shape[0]):
where = np.where(block[row] != 45)[0]
if len(where) == 0:
obs = 100
else:
left = np.min(where)
right = np.max(where)
obs = np.sum(block[row, left:right] == 45)
if obs > inds:
inds = obs
return inds |
def make_outfiles(data, samples, output_formats, ipyclient):
"""
Get desired formats from paramsdict and write files to outfiles
directory.
"""
## will iterate optim loci at a time
with h5py.File(data.clust_database, 'r') as io5:
optim = io5["seqs"].attrs["chunksize"][0]
nloci = io5["seqs"].shape[0]
## get name and snp padding
anames = io5["seqs"].attrs["samples"]
snames = [i.name for i in samples]
## get only snames in this data set sorted in the order they are in io5
names = [i for i in anames if i in snames]
pnames, _ = padnames(names)
## get names boolean
sidx = np.array([i in snames for i in anames])
assert len(pnames) == sum(sidx)
## get names index in order of pnames
#sindx = [list(anames).index(i) for i in snames]
## send off outputs as parallel jobs
lbview = ipyclient.load_balanced_view()
start = time.time()
results = {}
## build arrays and outputs from arrays.
## these arrays are keys in the tmp h5 array: seqarr, snparr, bisarr, maparr
boss_make_arrays(data, sidx, optim, nloci, ipyclient)
start = time.time()
## phy and partitions are a default output ({}.phy, {}.phy.partitions)
if "p" in output_formats:
data.outfiles.phy = os.path.join(data.dirs.outfiles, data.name+".phy")
async = lbview.apply(write_phy, *[data, sidx, pnames])
results['phy'] = async
## nexus format includes ... additional information ({}.nex)
if "n" in output_formats:
data.outfiles.nexus = os.path.join(data.dirs.outfiles, data.name+".nex")
async = lbview.apply(write_nex, *[data, sidx, pnames])
results['nexus'] = async
## snps is actually all snps written in phylip format ({}.snps.phy)
if "s" in output_formats:
data.outfiles.snpsmap = os.path.join(data.dirs.outfiles, data.name+".snps.map")
data.outfiles.snpsphy = os.path.join(data.dirs.outfiles, data.name+".snps.phy")
async = lbview.apply(write_snps, *[data, sidx, pnames])
results['snps'] = async
async = lbview.apply(write_snps_map, data)
results['snpsmap'] = async
## usnps is one randomly sampled snp from each locus ({}.u.snps.phy)
if "u" in output_formats:
data.outfiles.usnpsphy = os.path.join(data.dirs.outfiles, data.name+".u.snps.phy")
async = lbview.apply(write_usnps, *[data, sidx, pnames])
results['usnps'] = async
## str and ustr are for structure analyses. A fairly outdated format, six
## columns of empty space. Full and subsample included ({}.str, {}.u.str)
if "k" in output_formats:
data.outfiles.str = os.path.join(data.dirs.outfiles, data.name+".str")
data.outfiles.ustr = os.path.join(data.dirs.outfiles, data.name+".ustr")
async = lbview.apply(write_str, *[data, sidx, pnames])
results['structure'] = async
## geno output is for admixture and other software. We include all SNPs,
## but also a .map file which has "distances" between SNPs.
if 'g' in output_formats:
data.outfiles.geno = os.path.join(data.dirs.outfiles, data.name+".geno")
data.outfiles.ugeno = os.path.join(data.dirs.outfiles, data.name+".u.geno")
async = lbview.apply(write_geno, *[data, sidx])
results['geno'] = async
## G-PhoCS output. Have to use cap G here cuz little g is already taken, lol.
if 'G' in output_formats:
data.outfiles.gphocs = os.path.join(data.dirs.outfiles, data.name+".gphocs")
async = lbview.apply(write_gphocs, *[data, sidx])
results['gphocs'] = async
## wait for finished outfiles
while 1:
readies = [i.ready() for i in results.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(readies), sum(readies),
" writing outfiles | {} | s7 |".format(elapsed),
spacer=data._spacer)
time.sleep(0.1)
if all(readies):
break
print("")
## check for errors
for suff, async in results.items():
if not async.successful():
print(" Warning: error encountered while writing {} outfile: {}"\
.format(suff, async.exception()))
LOGGER.error(" Warning: error in writing %s outfile: %s", \
suff, async.exception())
## remove the tmparrays
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
os.remove(tmparrs) |
def worker_make_arrays(data, sidx, hslice, optim, maxlen):
"""
Parallelized worker to build array chunks for output files. One main
goal here is to keep seqarr to less than ~1GB RAM.
"""
## big data arrays
io5 = h5py.File(data.clust_database, 'r')
co5 = h5py.File(data.database, 'r')
## temporary storage until writing to h5 array
maxsnp = co5["snps"][hslice:hslice+optim].sum() ## concat later
maparr = np.zeros((maxsnp, 4), dtype=np.uint32)
snparr = np.zeros((sum(sidx), maxsnp), dtype="S1")
bisarr = np.zeros((sum(sidx), maxsnp), dtype="S1")
seqarr = np.zeros((sum(sidx), maxlen*optim), dtype="S1")
## apply all filters and write loci data
seqleft = 0
snpleft = 0
bis = 0
## edge filter has already been applied to snps, but has not yet been
## applied to seqs. The locus filters have not been applied to either yet.
mapsnp = 0
totloc = 0
afilt = co5["filters"][hslice:hslice+optim, :]
aedge = co5["edges"][hslice:hslice+optim, :]
asnps = co5["snps"][hslice:hslice+optim, :]
#aseqs = io5["seqs"][hslice:hslice+optim, sidx, :]
## have to run upper on seqs b/c they have lowercase storage of alleles
aseqs = np.char.upper(io5["seqs"][hslice:hslice+optim, sidx, :])
## which loci passed all filters
keep = np.where(np.sum(afilt, axis=1) == 0)[0]
## write loci that passed after trimming edges, then write snp string
for iloc in keep:
## grab r1 seqs between edges
edg = aedge[iloc]
## grab SNPs from seqs already sidx subsampled and edg masked.
## needs to be done here before seqs are edgetrimmed.
getsnps = asnps[iloc].sum(axis=1).astype(np.bool)
snps = aseqs[iloc, :, getsnps].T
## trim edges and split from seqs and concatenate for pairs.
## this seq array will be the phy output.
if not "pair" in data.paramsdict["datatype"]:
seq = aseqs[iloc, :, edg[0]:edg[1]+1]
else:
seq = np.concatenate([aseqs[iloc, :, edg[0]:edg[1]+1],
aseqs[iloc, :, edg[2]:edg[3]+1]], axis=1)
## remove cols from seq (phy) array that are all N-
lcopy = seq
lcopy[lcopy == "-"] = "N"
bcols = np.all(lcopy == "N", axis=0)
seq = seq[:, ~bcols]
## put into large array (could put right into h5?)
seqarr[:, seqleft:seqleft+seq.shape[1]] = seq
seqleft += seq.shape[1]
## subsample all SNPs into an array
snparr[:, snpleft:snpleft+snps.shape[1]] = snps
snpleft += snps.shape[1]
## Enter each snp into the map file
for i in xrange(snps.shape[1]):
## 1-indexed loci in first column
## actual locus number in second column
## counter for this locus in third column
## snp counter total in fourth column
maparr[mapsnp, :] = [totloc+1, hslice+iloc, i, mapsnp+1]
mapsnp += 1
## subsample one SNP into an array
if snps.shape[1]:
samp = np.random.randint(snps.shape[1])
bisarr[:, bis] = snps[:, samp]
bis += 1
totloc += 1
## clean up
io5.close()
co5.close()
## trim trailing edges b/c we made the array bigger than needed.
ridx = np.all(seqarr == "", axis=0)
seqarr = seqarr[:, ~ridx]
ridx = np.all(snparr == "", axis=0)
snparr = snparr[:, ~ridx]
ridx = np.all(bisarr == "", axis=0)
bisarr = bisarr[:, ~ridx]
ridx = np.all(maparr == 0, axis=1)
maparr = maparr[~ridx, :]
## return these three arrays which are pretty small
## catg array gets to be pretty huge, so we return only
return seqarr, snparr, bisarr, maparr |
def write_phy(data, sidx, pnames):
"""
write the phylip output file from the tmparr[seqarray]
"""
## grab seq data from tmparr
start = time.time()
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
seqarr = io5["seqarr"]
## trim to size b/c it was made longer than actual
end = np.where(np.all(seqarr[:] == "", axis=0))[0]
if np.any(end):
end = end.min()
else:
end = seqarr.shape[1]
## write to phylip
with open(data.outfiles.phy, 'w') as out:
## write header
out.write("{} {}\n".format(seqarr.shape[0], end))
## write data rows
for idx, name in enumerate(pnames):
out.write("{}{}\n".format(name, "".join(seqarr[idx, :end])))
LOGGER.debug("finished writing phy in: %s", time.time() - start) |
def write_nex(data, sidx, pnames):
"""
write the nexus output file from the tmparr[seqarray] and tmparr[maparr]
"""
## grab seq data from tmparr
start = time.time()
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
seqarr = io5["seqarr"]
## trim to size b/c it was made longer than actual
end = np.where(np.all(seqarr[:] == "", axis=0))[0]
if np.any(end):
end = end.min()
else:
end = seqarr.shape[1]
## write to nexus
data.outfiles.nex = os.path.join(data.dirs.outfiles, data.name+".nex")
with open(data.outfiles.nex, 'w') as out:
## write nexus seq header
out.write(NEXHEADER.format(seqarr.shape[0], end))
## grab a big block of data
chunksize = 100000 # this should be a multiple of 100
for bidx in xrange(0, end, chunksize):
bigblock = seqarr[:, bidx:bidx+chunksize]
lend = end-bidx
#LOGGER.info("BIG: %s %s %s %s", bigblock.shape, bidx, lend, end)
## write interleaved seqs 100 chars with longname+2 before
tmpout = []
for block in xrange(0, min(chunksize, lend), 100):
stop = min(block+100, end)
for idx, name in enumerate(pnames):
seqdat = bigblock[idx, block:stop]
tmpout.append(" {}{}\n".format(name, "".join(seqdat)))
tmpout.append("\n")
## print intermediate result and clear
if any(tmpout):
out.write("".join(tmpout))
## closer
out.write(NEXCLOSER)
LOGGER.debug("finished writing nex in: %s", time.time() - start) |
def write_snps_map(data):
""" write a map file with linkage information for SNPs file"""
## grab map data from tmparr
start = time.time()
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
maparr = io5["maparr"][:]
## get last data
end = np.where(np.all(maparr[:] == 0, axis=1))[0]
if np.any(end):
end = end.min()
else:
end = maparr.shape[0]
## write to map file (this is too slow...)
outchunk = []
with open(data.outfiles.snpsmap, 'w') as out:
for idx in xrange(end):
## build to list
line = maparr[idx, :]
#print(line)
outchunk.append(\
"{}\trad{}_snp{}\t{}\t{}\n"\
.format(line[0], line[1], line[2], 0, line[3]))
## clear list
if not idx % 10000:
out.write("".join(outchunk))
outchunk = []
## write remaining
out.write("".join(outchunk))
LOGGER.debug("finished writing snps_map in: %s", time.time() - start) |
def write_usnps(data, sidx, pnames):
""" write the bisnp string """
## grab bis data from tmparr
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
bisarr = io5["bisarr"]
## trim to size b/c it was made longer than actual
end = np.where(np.all(bisarr[:] == "", axis=0))[0]
if np.any(end):
end = end.min()
else:
end = bisarr.shape[1]
## write to usnps file
with open(data.outfiles.usnpsphy, 'w') as out:
out.write("{} {}\n".format(bisarr.shape[0], end))
for idx, name in enumerate(pnames):
out.write("{}{}\n".format(name, "".join(bisarr[idx, :end]))) |
def write_str(data, sidx, pnames):
""" Write STRUCTURE format for all SNPs and unlinked SNPs """
## grab snp and bis data from tmparr
start = time.time()
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
snparr = io5["snparr"]
bisarr = io5["bisarr"]
## trim to size b/c it was made longer than actual
bend = np.where(np.all(bisarr[:] == "", axis=0))[0]
if np.any(bend):
bend = bend.min()
else:
bend = bisarr.shape[1]
send = np.where(np.all(snparr[:] == "", axis=0))[0]
if np.any(send):
send = send.min()
else:
send = snparr.shape[1]
## write to str and ustr
out1 = open(data.outfiles.str, 'w')
out2 = open(data.outfiles.ustr, 'w')
numdict = {'A': '0', 'T': '1', 'G': '2', 'C': '3', 'N': '-9', '-': '-9'}
if data.paramsdict["max_alleles_consens"] > 1:
for idx, name in enumerate(pnames):
out1.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]])))
out1.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][1]] for i in snparr[idx, :send]])))
out2.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]])))
out2.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][1]] for i in bisarr[idx, :bend]])))
else:
## haploid output
for idx, name in enumerate(pnames):
out1.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in snparr[idx, :send]])))
out2.write("{}\t\t\t\t\t{}\n"\
.format(name,
"\t".join([numdict[DUCT[i][0]] for i in bisarr[idx, :bend]])))
out1.close()
out2.close()
LOGGER.debug("finished writing str in: %s", time.time() - start) |
def write_geno(data, sidx):
"""
write the geno output formerly used by admixture, still supported by
adegenet, perhaps. Also, sNMF still likes .geno.
"""
## grab snp and bis data from tmparr
start = time.time()
tmparrs = os.path.join(data.dirs.outfiles, "tmp-{}.h5".format(data.name))
with h5py.File(tmparrs, 'r') as io5:
snparr = io5["snparr"]
bisarr = io5["bisarr"]
## trim to size b/c it was made longer than actual
bend = np.where(np.all(bisarr[:] == "", axis=0))[0]
if np.any(bend):
bend = bend.min()
else:
bend = bisarr.shape[1]
send = np.where(np.all(snparr[:] == "", axis=0))[0]
if np.any(send):
send = send.min()
else:
send = snparr.shape[1]
## get most common base at each SNP as a pseudo-reference
## and record 0,1,2 or missing=9 for counts of the ref allele
snpref = reftrick(snparr[:, :send].view(np.int8), GETCONS).view("S1")
bisref = reftrick(bisarr[:, :bend].view(np.int8), GETCONS).view("S1")
## geno matrix to fill (9 is empty)
snpgeno = np.zeros((snparr.shape[0], send), dtype=np.uint8)
snpgeno.fill(9)
bisgeno = np.zeros((bisarr.shape[0], bend), dtype=np.uint8)
bisgeno.fill(9)
##--------------------------------------------------------------------
## fill in complete hits (match to first column ref base)
mask2 = np.array(snparr[:, :send] == snpref[:, 0])
snpgeno[mask2] = 2
## fill in single hits (heteros) match to hetero of first+second column
ambref = np.apply_along_axis(lambda x: TRANSFULL[tuple(x)], 1, snpref[:, :2])
mask1 = np.array(snparr[:, :send] == ambref)
snpgeno[mask1] = 1
## fill in zero hits, meaning a perfect match to the second column base
## anything else is left at 9 (missing), b/c it's either missing or it
## is not bi-allelic.
mask0 = np.array(snparr[:, :send] == snpref[:, 1])
snpgeno[mask0] = 0
##--------------------------------------------------------------------
## fill in complete hits
mask2 = np.array(bisarr[:, :bend] == bisref[:, 0])
bisgeno[mask2] = 2
## fill in single hits (heteros)
ambref = np.apply_along_axis(lambda x: TRANSFULL[tuple(x)], 1, bisref[:, :2])
mask1 = np.array(bisarr[:, :bend] == ambref)
bisgeno[mask1] = 1
## fill in zero hits (match to second base)
mask0 = np.array(bisarr[:, :bend] == bisref[:, 1])
bisgeno[mask0] = 0
##---------------------------------------------------------------------
## print to files
np.savetxt(data.outfiles.geno, snpgeno.T, delimiter="", fmt="%d")
np.savetxt(data.outfiles.ugeno, bisgeno.T, delimiter="", fmt="%d")
LOGGER.debug("finished writing geno in: %s", time.time() - start) |
def write_gphocs(data, sidx):
"""
write the g-phocs output. This code is hella ugly bcz it's copy/pasted
directly from the old loci2gphocs script from pyrad. I figure having it
get done the stupid way is better than not having it done at all, at
least for the time being. This could probably be sped up significantly.
"""
outfile = data.outfiles.gphocs
infile = data.outfiles.loci
infile = open(infile)
outfile = open(outfile, 'w')
## parse the loci
## Each set of reads at a locus is appended with a line
## beginning with // and ending with |x, where x in the locus id.
## so after this call 'loci' will contain an array
## of sets of each read per locus.
loci = re.compile("\|[0-9]+\|").split(infile.read())[:-1]
# Print the header, the number of loci in this file
outfile.write(str(len(loci)) + "\n\n")
# iterate through each locus, print out the header for each locus:
# <locus_name> <n_samples> <locus_length>
# Then print the data for each sample in this format:
# <individual_name> <sequence>
for i, loc in enumerate(loci):
## Get rid of the line that contains the snp info
loc = loc.rsplit("\n", 1)[0]
# Separate out each sequence within the loc block. 'sequences'
# will now be a list strings containing name/sequence pairs.
# We select each line in the locus string that starts with ">"
names = [line.split()[0] for line in loc.strip().split("\n")]
try:
sequences = [line.split()[1] for line in loc.strip().split("\n")]
except:
pass
# Strips off 'nnnn' separator for paired data
# replaces '-' with 'N'
editsequences = [seq.replace("n","").replace('-','N') for seq in sequences]
sequence_length = len(editsequences[0])
# get length of longest name and add 4 spaces
longname = max(map(len,names))+4
# Print out the header for this locus
outfile.write('locus{} {} {}\n'.format(str(i), len(sequences), sequence_length))
# Iterate through each sequence read at this locus and write it to the file.
for name,sequence in zip(names, editsequences):
# Clean up the sequence data to make gphocs happy. Only accepts UPPER
# case chars for bases, and only accepts 'N' for missing data.
outfile.write(name+" "*(longname-len(name))+sequence + "\n")
## Separate loci with so it's prettier
outfile.write("\n") |
def make_vcf(data, samples, ipyclient, full=0):
"""
Write the full VCF for loci passing filtering. Other vcf formats are
possible, like SNPs-only, or with filtered loci included but the filter
explicitly labeled. These are not yet supported, however.
"""
## start vcf progress bar
start = time.time()
printstr = " building vcf file | {} | s7 |"
LOGGER.info("Writing .vcf file")
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(20, 0, printstr.format(elapsed), spacer=data._spacer)
## create outputs for v and V, gzip V to be friendly
data.outfiles.vcf = os.path.join(data.dirs.outfiles, data.name+".vcf")
if full:
data.outfiles.VCF = os.path.join(data.dirs.outfiles, data.name+".vcf.gz")
## get some db info
with h5py.File(data.clust_database, 'r') as io5:
## will iterate optim loci at a time
optim = io5["seqs"].attrs["chunksize"][0]
nloci = io5["seqs"].shape[0]
## get name and snp padding
anames = io5["seqs"].attrs["samples"]
snames = [i.name for i in samples]
names = [i for i in anames if i in snames]
## get names index
sidx = np.array([i in snames for i in anames])
## client for sending jobs to parallel engines, for this step we'll limit
## to half of the available cpus if
lbview = ipyclient.load_balanced_view()
## send jobs in chunks
vasyncs = {}
total = 0
for chunk in xrange(0, nloci, optim):
vasyncs[chunk] = lbview.apply(vcfchunk, *(data, optim, sidx, chunk, full))
total += 1
## tmp files get left behind and intensive processes are left running when a
## a job is killed/interrupted during vcf build, so we try/except wrap.
try:
while 1:
keys = [i for (i, j) in vasyncs.items() if j.ready()]
## check for failures
for job in keys:
if not vasyncs[job].successful():
## raise exception
err = " error in vcf build chunk {}: {}"\
.format(job, vasyncs[job].result())
LOGGER.error(err)
raise IPyradWarningExit(err)
else:
## free up memory
del vasyncs[job]
finished = total - len(vasyncs) #sum([i.ready() for i in vasyncs.values()])
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(total, finished, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.5)
if not vasyncs:
break
print("")
except Exception as inst:
## make sure all future jobs are aborted
keys = [i for (i, j) in vasyncs.items() if not j.ready()]
try:
for job in keys:
#vasyncs[job].abort()
vasyncs[job].cancel()
except Exception:
pass
## make sure all tmp files are destroyed
vcfchunks = glob.glob(os.path.join(data.dirs.outfiles, "*.vcf.[0-9]*"))
h5chunks = glob.glob(os.path.join(data.dirs.outfiles, ".tmp.[0-9]*.h5"))
for dfile in vcfchunks+h5chunks:
os.remove(dfile)
## reraise the error
raise inst
## writing full vcf file to disk
start = time.time()
printstr = " writing vcf file | {} | s7 |"
res = lbview.apply(concat_vcf, *(data, names, full))
ogchunks = len(glob.glob(data.outfiles.vcf+".*"))
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
curchunks = len(glob.glob(data.outfiles.vcf+".*"))
progressbar(ogchunks, ogchunks-curchunks, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if res.ready():
break
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(1, 1, printstr.format(elapsed), spacer=data._spacer)
print("") |
def concat_vcf(data, names, full):
"""
Sorts, concatenates, and gzips VCF chunks. Also cleans up chunks.
"""
## open handle and write headers
if not full:
writer = open(data.outfiles.vcf, 'w')
else:
writer = gzip.open(data.outfiles.VCF, 'w')
vcfheader(data, names, writer)
writer.close()
## get vcf chunks
vcfchunks = glob.glob(data.outfiles.vcf+".*")
vcfchunks.sort(key=lambda x: int(x.rsplit(".")[-1]))
## concatenate
if not full:
writer = open(data.outfiles.vcf, 'a')
else:
writer = gzip.open(data.outfiles.VCF, 'a')
## what order do users want? The order in the original ref file?
## Sorted by the size of chroms? that is the order in faidx.
## If reference mapping then it's nice to sort the vcf data by
## CHROM and POS. This is doing a very naive sort right now, so the
## CHROM will be ordered, but not the pos within each chrom.
if data.paramsdict["assembly_method"] in ["reference", "denovo+reference"]:
## Some unix sorting magic to get POS sorted within CHROM
## First you sort by POS (-k 2,2), then you do a `stable` sort
## by CHROM. You end up with POS ordered and grouped correctly by CHROM
## but relatively unordered CHROMs (locus105 will be before locus11).
cmd = ["cat"] + vcfchunks + [" | sort -k 2,2 -n | sort -k 1,1 -s"]
cmd = " ".join(cmd)
proc = sps.Popen(cmd, shell=True, stderr=sps.STDOUT, stdout=writer, close_fds=True)
else:
proc = sps.Popen(["cat"] + vcfchunks, stderr=sps.STDOUT, stdout=writer, close_fds=True)
err = proc.communicate()[0]
if proc.returncode:
raise IPyradWarningExit("err in concat_vcf: %s", err)
writer.close()
for chunk in vcfchunks:
os.remove(chunk) |
def vcfchunk(data, optim, sidx, chunk, full):
"""
Function called within make_vcf to run chunks on separate engines.
"""
## empty array to be filled before writing
## will not actually be optim*maxlen, extra needs to be trimmed
maxlen = data._hackersonly["max_fragment_length"] + 20
## get data sliced (optim chunks at a time)
hslice = [chunk, chunk+optim]
## read all taxa from disk (faster), then subsample taxa with sidx and
## keepmask to greatly reduce the memory load
with h5py.File(data.database, 'r') as co5:
afilt = co5["filters"][hslice[0]:hslice[1], :]
keepmask = afilt.sum(axis=1) == 0
## apply mask to edges
aedge = co5["edges"][hslice[0]:hslice[1], :]
aedge = aedge[keepmask, :]
del afilt
## same memory subsampling.
with h5py.File(data.clust_database, 'r') as io5:
## apply mask to edges to aseqs and acatg
#aseqs = io5["seqs"][hslice[0]:hslice[1], :, :].view(np.uint8)
## need to read in seqs with upper b/c lowercase allele info
aseqs = np.char.upper(io5["seqs"][hslice[0]:hslice[1], :, :]).view(np.uint8)
aseqs = aseqs[keepmask, :]
aseqs = aseqs[:, sidx, :]
acatg = io5["catgs"][hslice[0]:hslice[1], :, :, :]
acatg = acatg[keepmask, :]
acatg = acatg[:, sidx, :, :]
achrom = io5["chroms"][hslice[0]:hslice[1]]
achrom = achrom[keepmask, :]
LOGGER.info('acatg.shape %s', acatg.shape)
## to save memory some columns are stored in diff dtypes until printing
if not full:
with h5py.File(data.database, 'r') as co5:
snps = co5["snps"][hslice[0]:hslice[1], :]
snps = snps[keepmask, :]
snps = snps.sum(axis=2)
snpidxs = snps > 0
maxsnplen = snps.sum()
## vcf info to fill, this is bigger than the actual array
nrows = maxsnplen
cols0 = np.zeros(nrows, dtype=np.int64) #h5py.special_dtype(vlen=bytes))
cols1 = np.zeros(nrows, dtype=np.uint32)
cols34 = np.zeros((nrows, 2), dtype="S5")
cols7 = np.zeros((nrows, 1), dtype="S20")
## when nsamples is high this blows up memory (e.g., dim=(5M x 500))
## so we'll instead create a list of arrays with 10 samples at a time.
## maybe later replace this with a h5 array
tmph = os.path.join(data.dirs.outfiles, ".tmp.{}.h5".format(hslice[0]))
htmp = h5py.File(tmph, 'w')
htmp.create_dataset("vcf", shape=(nrows, sum(sidx)), dtype="S24")
## which loci passed all filters
init = 0
## write loci that passed after trimming edges, then write snp string
locindex = np.where(keepmask)[0]
for iloc in xrange(aseqs.shape[0]):
edg = aedge[iloc]
## grab all seqs between edges
if not 'pair' in data.paramsdict["datatype"]:
seq = aseqs[iloc, :, edg[0]:edg[1]+1]
catg = acatg[iloc, :, edg[0]:edg[1]+1]
if not full:
snpidx = snpidxs[iloc, edg[0]:edg[1]+1]
seq = seq[:, snpidx]
catg = catg[:, snpidx]
else:
seq = np.hstack([aseqs[iloc, :, edg[0]:edg[1]+1],
aseqs[iloc, :, edg[2]:edg[3]+1]])
catg = np.hstack([acatg[iloc, :, edg[0]:edg[1]+1],
acatg[iloc, :, edg[2]:edg[3]+1]])
if not full:
snpidx = np.hstack([snpidxs[iloc, edg[0]:edg[1]+1],
snpidxs[iloc, edg[2]:edg[3]+1]])
seq = seq[:, snpidx]
catg = catg[:, snpidx]
## empty arrs to fill
alleles = np.zeros((nrows, 4), dtype=np.uint8)
genos = np.zeros((seq.shape[1], sum(sidx)), dtype="S4")
genos[:] = "./.:"
## ---- build string array ----
pos = 0
## If any < 0 this indicates an anonymous locus in denovo+ref assembly
if achrom[iloc][0] > 0:
pos = achrom[iloc][1]
cols0[init:init+seq.shape[1]] = achrom[iloc][0]
cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1
else:
if full:
cols1[init:init+seq.shape[1]] = pos + np.arange(seq.shape[1]) + 1
else:
cols1[init:init+seq.shape[1]] = pos + np.where(snpidx)[0] + 1
cols0[init:init+seq.shape[1]] = (chunk + locindex[iloc] + 1) * -1
## fill reference base
alleles = reftrick(seq, GETCONS)
## get the info string column
tmp0 = np.sum(catg, axis=2)
tmp1 = tmp0 != 0
tmp2 = tmp1.sum(axis=1) > 0
nsamp = np.sum(tmp1, axis=0)
depth = np.sum(tmp0, axis=0)
list7 = [["NS={};DP={}".format(i, j)] for i, j in zip(nsamp, depth)]
if list7:
cols7[init:init+seq.shape[1]] = list7
## default fill cons sites where no variants
genos[tmp1.T] = "0/0:"
## fill cons genotypes for sites with alt alleles for taxa in order
mask = alleles[:, 1] == 46
mask += alleles[:, 1] == 45
obs = alleles[~mask, :]
alts = seq[:, ~mask]
who = np.where(mask == False)[0]
## fill variable sites
for site in xrange(alts.shape[1]):
bases = alts[:, site]
#LOGGER.info("bases %s", bases)
ohere = obs[site][obs[site] != 0]
#LOGGER.info("ohere %s", ohere)
alls = np.array([DCONS[i] for i in bases], dtype=np.uint32)
#LOGGER.info("all %s", alls)
for jdx in xrange(ohere.shape[0]):
alls[alls == ohere[jdx]] = jdx
#LOGGER.info("all2 %s", alls)
## fill into array
for cidx in xrange(catg.shape[0]):
if tmp2[cidx]:
if alls[cidx][0] < 5:
genos[who[site], cidx] = "/".join(alls[cidx].astype("S1").tolist())+":"
else:
genos[who[site], cidx] = "./.:"
#LOGGER.info("genos filled: %s %s %s", who[site], cidx, genos)
## build geno+depth strings
## for each taxon enter 4 catg values
fulltmp = np.zeros((seq.shape[1], catg.shape[0]), dtype="S24")
for cidx in xrange(catg.shape[0]):
## fill catgs from catgs
tmp0 = [str(i.sum()) for i in catg[cidx]]
tmp1 = [",".join(i) for i in catg[cidx].astype("S4").tolist()]
tmp2 = ["".join(i+j+":"+k) for i, j, k in zip(genos[:, cidx], tmp0, tmp1)]
## fill tmp allcidx
fulltmp[:, cidx] = tmp2
## write to h5 for this locus
htmp["vcf"][init:init+seq.shape[1], :] = fulltmp
cols34[init:init+seq.shape[1], 0] = alleles[:, 0].view("S1")
cols34[init:init+seq.shape[1], 1] = [",".join([j for j in i if j]) \
for i in alleles[:, 1:].view("S1").tolist()]
## advance counter
init += seq.shape[1]
## trim off empty rows if they exist
withdat = cols0 != 0
tot = withdat.sum()
## get scaffold names
faidict = {}
if (data.paramsdict["assembly_method"] in ["reference", "denovo+reference"]) and \
(os.path.exists(data.paramsdict["reference_sequence"])):
fai = pd.read_csv(data.paramsdict["reference_sequence"] + ".fai",
names=['scaffold', 'size', 'sumsize', 'a', 'b'],
sep="\t")
faidict = {i+1:j for i,j in enumerate(fai.scaffold)}
try:
## This is hax, but it's the only way it will work. The faidict uses positive numbers
## for reference sequence mapped loci for the CHROM/POS info, and it uses negative
## numbers for anonymous loci. Both are 1 indexed, which is where that last `+ 2` comes from.
faidict.update({-i:"locus_{}".format(i-1) for i in xrange(chunk+1, chunk + optim + 2)})
chroms = [faidict[i] for i in cols0]
except Exception as inst:
LOGGER.error("Invalid chromosome dictionary indexwat: {}".format(inst))
LOGGER.debug("faidict {}".format([str(k)+"/"+str(v) for k, v in faidict.items() if "locus" in v]))
LOGGER.debug("chroms {}".format([x for x in cols0 if x < 0]))
raise
cols0 = np.array(chroms)
#else:
# cols0 = np.array(["locus_{}".format(i) for i in cols0-1])
## Only write if there is some data that passed filtering
if tot:
LOGGER.debug("Writing data to vcf")
if not full:
writer = open(data.outfiles.vcf+".{}".format(chunk), 'w')
else:
writer = gzip.open(data.outfiles.vcf+".{}".format(chunk), 'w')
try:
## write in iterations b/c it can be freakin huge.
## for cols0 and cols1 the 'newaxis' slice and the transpose
## are for turning the 1d arrays into column vectors.
np.savetxt(writer,
np.concatenate(
(cols0[:tot][np.newaxis].T,
cols1[:tot][np.newaxis].T,
np.array([["."]]*tot, dtype="S1"),
cols34[:tot, :],
np.array([["13", "PASS"]]*tot, dtype="S4"),
cols7[:tot, :],
np.array([["GT:DP:CATG"]]*tot, dtype="S10"),
htmp["vcf"][:tot, :],
),
axis=1),
delimiter="\t", fmt="%s")
except Exception as inst:
LOGGER.error("Error building vcf file - ".format(inst))
raise
writer.close()
## close and remove tmp h5
htmp.close()
os.remove(tmph) |
def reftrick(iseq, consdict):
""" Returns the most common base at each site in order. """
altrefs = np.zeros((iseq.shape[1], 4), dtype=np.uint8)
altrefs[:, 1] = 46
for col in xrange(iseq.shape[1]):
## expand colums with ambigs and remove N-
fcounts = np.zeros(111, dtype=np.int64)
counts = np.bincount(iseq[:, col])#, minlength=90)
fcounts[:counts.shape[0]] = counts
## set N and - to zero, wish numba supported minlen arg
fcounts[78] = 0
fcounts[45] = 0
## add ambig counts to true bases
for aidx in xrange(consdict.shape[0]):
nbases = fcounts[consdict[aidx, 0]]
for _ in xrange(nbases):
fcounts[consdict[aidx, 1]] += 1
fcounts[consdict[aidx, 2]] += 1
fcounts[consdict[aidx, 0]] = 0
## now get counts from the modified counts arr
who = np.argmax(fcounts)
altrefs[col, 0] = who
fcounts[who] = 0
## if an alt allele fill over the "." placeholder
who = np.argmax(fcounts)
if who:
altrefs[col, 1] = who
fcounts[who] = 0
## if 3rd or 4th alleles observed then add to arr
who = np.argmax(fcounts)
altrefs[col, 2] = who
fcounts[who] = 0
## if 3rd or 4th alleles observed then add to arr
who = np.argmax(fcounts)
altrefs[col, 3] = who
return altrefs |
def vcfheader(data, names, ofile):
"""
Prints header for vcf files
"""
## choose reference string
if data.paramsdict["reference_sequence"]:
reference = data.paramsdict["reference_sequence"]
else:
reference = "pseudo-reference (most common base at site)"
##FILTER=<ID=minCov,Description="Data shared across <{mincov} samples">
##FILTER=<ID=maxSH,Description="Heterozygosous site shared across >{maxsh} samples">
header = """\
##fileformat=VCFv4.0
##fileDate={date}
##source=ipyrad_v.{version}
##reference={reference}
##phasing=unphased
##INFO=<ID=NS,Number=1,Type=Integer,Description="Number of Samples With Data">
##INFO=<ID=DP,Number=1,Type=Integer,Description="Total Depth">
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##FORMAT=<ID=DP,Number=1,Type=Integer,Description="Read Depth">
##FORMAT=<ID=CATG,Number=1,Type=String,Description="Base Counts (CATG)">
#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t{names}
""".format(date=time.strftime("%Y/%m/%d"),
version=__version__,
reference=os.path.basename(reference),
mincov=data.paramsdict["min_samples_locus"],
maxsh=data.paramsdict["max_shared_Hs_locus"],
names="\t".join(names))
## WRITE
ofile.write(header) |
def loci2bpp(name, locifile, imap, guidetree,
minmap=None,
maxloci=None,
infer_sptree=0,
infer_delimit=0,
delimit_alg=(0, 5),
seed=12345,
burnin=1000,
nsample=10000,
sampfreq=2,
thetaprior=(5, 5),
tauprior=(4, 2, 1),
traits_df=None,
nu=0,
kappa=0,
useseqdata=1,
usetraitdata=1,
cleandata=0,
wdir=None,
finetune=(0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01),
verbose=0):
"""
Converts loci file format to bpp file format, i.e., concatenated phylip-like
format, and produces imap and ctl input files for bpp.
Parameters:
-----------
name:
A prefix name for output files that will be produced
locifile:
A .loci file produced by ipyrad.
imap:
A Python dictionary with 'species' names as keys, and lists of sample
names for the values. Any sample that is not included in the imap
dictionary will be filtered out of the data when converting the .loci
file into the bpp formatted sequence file. Each species in the imap
dictionary must also be present in the input 'guidetree'.
guidetree:
A newick string species tree hypothesis [e.g., (((a,b),(c,d)),e);]
All species in the imap dictionary must also be present in the guidetree
Optional parameters:
--------------------
infer_sptree:
Default=0, only infer parameters on a fixed species tree. If 1, then the
input tree is treated as a guidetree and tree search is employed to find
the best tree. The results will include support values for the inferred
topology.
infer_delimit:
Default=0, no delimitation. If 1 then splits in the tree that separate
'species' will be collapsed to test whether fewer species are a better
fit to the data than the number in the input guidetree.
delimit_alg:
Species delimitation algorithm. This is a tuple. The first value
is the algorithm (0 or 1) and the following values are arguments
for the given algorithm. See other ctl files for examples of what the
delimitation line looks like. This is where you can enter the params
(e.g., alpha, migration) for the two different algorithms.
For example, the following args would produce the following ctl lines:
alg=0, epsilon=5
> delimit_alg = (0, 5)
speciesdelimitation = 1 0 5
alg=1, alpha=2, migration=1
> delimit_alg = (1, 2, 1)
speciesdelimitation = 1 1 2 1
alg=1, alpha=2, migration=1, diagnosis=0, ?=1
> delimit_alg = (1, 2, 1, 0, 1)
speciesdelimitation = 1 1 2 1 0 1
seed:
A random number seed at start of analysis.
burnin:
Number of burnin generations in mcmc
nsample:
Number of mcmc generations to run.
sampfreq:
How often to sample from the mcmc chain.
thetaprior:
Prior on theta (4Neu), gamma distributed. mean = a/b. e.g., (5, 5)
tauprior
Prior on root tau, gamma distributed mean = a/b. Last number is
dirichlet prior for other taus. e.g., (4, 2, 1)
traits_df:
A pandas DataFrame with trait data properly formatted. This means only
quantitative traits are included, and missing values are NaN.
The first column contains sample names, with "Indiv" as the header.
The following columns have a header row with trait names. This script
will write a CSV trait file with trait values mean-standardized, with
NaN replaced by "NA", and with sample not present in IMAP removed.
nu:
A prior on phenotypic trait variance (0) for iBPP analysis.
kappa:
A prior on phenotypic trait mean (0) for iBPP analysis.
useseqdata:
If false inference proceeds without sequence data (can be used to test
the effect of priors on the tree distributions).
usetraitdata:
If false inference proceeds without trait data (can be used to test
the effect of priors on the trait distributions).
cleandata:
If 1 then sites with missing or hetero characters are removed.
wdir:
A working directory to write files to.
finetune:
See bpp documentation.
verbose:
If verbose=1 the ctl file text will also be written to screen (stderr).
"""
## check args
if not imap:
raise IPyradWarningExit(IMAP_REQUIRED)
if minmap:
if minmap.keys() != imap.keys():
raise IPyradWarningExit(KEYS_DIFFER)
## working directory, make sure it exists
if wdir:
wdir = os.path.abspath(wdir)
if not os.path.exists(wdir):
raise IPyradWarningExit(" working directory (wdir) does not exist")
else:
wdir = os.path.curdir
## if traits_df then we make '.ibpp' files
prog = 'bpp'
if isinstance(traits_df, pd.DataFrame):
prog = 'ibpp'
outfile = OPJ(wdir, "{}.{}.seq.txt".format(name, prog))
mapfile = OPJ(wdir, "{}.{}.imap.txt".format(name, prog))
## open outhandles
fout = open(outfile, 'w')
fmap = open(mapfile, 'w')
## parse the loci file
with open(locifile, 'r') as infile:
## split on "//" for legacy compatibility
loci = infile.read().strip().split("|\n")
nloci = len(loci)
## all samples
samples = list(itertools.chain(*imap.values()))
## iterate over loci, printing to outfile
nkept = 0
for iloc in xrange(nloci):
lines = loci[iloc].split("//")[0].split()
names = lines[::2]
names = ["^"+i for i in names]
seqs = [list(i) for i in lines[1::2]]
seqlen = len(seqs[0])
## whether to skip this locus based on filters below
skip = 0
## if minmap filter for sample coverage
if minmap:
covd = {}
for group, vals in imap.items():
covd[group] = sum(["^"+i in names for i in vals])
## check that coverage is good enough
if not all([covd[group] >= minmap[group] for group in minmap]):
skip = 1
## too many loci?
if maxloci:
if nkept >= maxloci:
skip = 1
## build locus as a string
if not skip:
## convert to phylip with caret starter and replace - with N.
data = ["{:<30} {}".format(i, "".join(k).replace("-", "N")) for \
(i, k) in zip(names, seqs) if i[1:] in samples]
## if not empty, write to the file
if data:
fout.write("{} {}\n\n{}\n\n"\
.format(len(data), seqlen, "\n".join(data)))
nkept += 1
## close up shop
fout.close()
## write the imap file:
data = ["{:<30} {}".format(val, key) for key \
in sorted(imap) for val in imap[key]]
fmap.write("\n".join(data))
fmap.close()
## write ctl file
write_ctl(name, imap, guidetree, nkept,
infer_sptree, infer_delimit, delimit_alg,
seed, burnin, nsample, sampfreq,
thetaprior, tauprior, traits_df, nu, kappa,
cleandata, useseqdata, usetraitdata, wdir,
finetune, verbose)
## print message?
sys.stderr.write("new files created ({} loci, {} species, {} samples)\n"\
.format(nkept, len(imap.keys()),
sum([len(i) for i in imap.values()])))
sys.stderr.write(" {}.{}.seq.txt\n".format(name, prog))
sys.stderr.write(" {}.{}.imap.txt\n".format(name, prog))
sys.stderr.write(" {}.{}.ctl.txt\n".format(name, prog))
if isinstance(traits_df, pd.DataFrame):
sys.stderr.write(" {}.{}.traits.txt\n".format(name, prog))
## return the ctl file string
return os.path.abspath(
"{}.{}.ctl.txt".format(OPJ(wdir, name), prog)) |
def write_ctl(name, imap, guidetree, nloci,
infer_sptree, infer_delimit, delimit_alg,
seed, burnin, nsample, sampfreq,
thetaprior, tauprior, traits_df, nu0, kappa0,
cleandata, useseqdata, usetraitdata, wdir,
finetune, verbose):
""" write outfile with any args in argdict """
## A string to store ctl info
ctl = []
## check the tree (can do this better once we install ete3 w/ ipyrad)
if not guidetree.endswith(";"):
guidetree += ";"
## if traits_df then we make '.ibpp' files
prog = 'bpp'
if isinstance(traits_df, pd.DataFrame):
prog = 'ibpp'
## write the top header info
ctl.append("seed = {}".format(seed))
ctl.append("seqfile = {}.{}.seq.txt".format(OPJ(wdir, name), prog))
ctl.append("Imapfile = {}.{}.imap.txt".format(OPJ(wdir, name), prog))
ctl.append("mcmcfile = {}.{}.mcmc.txt".format(OPJ(wdir, name), prog))
ctl.append("outfile = {}.{}.out.txt".format(OPJ(wdir, name), prog))
if isinstance(traits_df, pd.DataFrame):
ctl.append("traitfile = {}.{}.traits.txt".format(OPJ(wdir, name), prog))
## number of loci (checks that seq file exists and parses from there)
ctl.append("nloci = {}".format(nloci))
ctl.append("usedata = {}".format(useseqdata))
ctl.append("cleandata = {}".format(cleandata))
## infer species tree
if infer_sptree:
ctl.append("speciestree = 1 0.4 0.2 0.1")
else:
ctl.append("speciestree = 0")
## infer delimitation (with algorithm 1 by default)
ctl.append("speciesdelimitation = {} {} {}"\
.format(infer_delimit, delimit_alg[0],
" ".join([str(i) for i in delimit_alg[1:]])))
## if using iBPP (if not traits_df, we assume you're using bpp (v.3.3+)
if isinstance(traits_df, pd.DataFrame):
## check that the data frame is properly formatted
try:
traits_df.values.astype(float)
except Exception:
raise IPyradWarningExit(PDREAD_ERROR)
## subsample to keep only samples that are in IMAP, we do not need to
## standarize traits b/c ibpp does that for us.
samples = sorted(list(itertools.chain(*imap.values())))
didx = [list(traits_df.index).index(i) for i in traits_df.index \
if i not in samples]
dtraits = traits_df.drop(traits_df.index[didx])
## mean standardize traits values after excluding samples
straits = dtraits.apply(lambda x: (x - x.mean()) / (x.std()))
## convert NaN to "NA" cuz that's what ibpp likes, and write to file
ftraits = straits.fillna("NA")
traitdict = ftraits.T.to_dict("list")
## get reverse imap dict
rev = {val:key for key in sorted(imap) for val in imap[key]}
## write trait file
traitfile = "{}.{}.traits.txt".format(os.path.join(wdir, name), prog)
with open(traitfile, 'w') as tout:
tout.write("Indiv\n")
tout.write("\t".join(
['Species'] + list(ftraits.columns))+"\n"
)
#for key in sorted(traitdict):
# tout.write("\t".join([key, rev[key]] + \
# ["^"+str(i) for i in traitdict[key]])+"\n"
# )
nindT = 0
for ikey in sorted(imap.keys()):
samps = imap[ikey]
for samp in sorted(samps):
if samp in traitdict:
tout.write("\t".join([samp, rev[samp]] + \
[str(i) for i in traitdict[samp]])+"\n"
)
nindT += 1
# tout.write("Indiv\n"+"\t".join(["Species"]+\
# ["t_{}".format(i) for i in range(len(traitdict.values()[0]))])+"\n")
# for key in sorted(traitdict):
# print >>tout, "\t".join([key, rev[key]] + \
# [str(i) for i in traitdict[key]])
#ftraits.to_csv(traitfile)
## write ntraits and nindT and traitfilename
ctl.append("ntraits = {}".format(traits_df.shape[1]))
ctl.append("nindT = {}".format(nindT)) #traits_df.shape[0]))
ctl.append("usetraitdata = {}".format(usetraitdata))
ctl.append("useseqdata = {}".format(useseqdata))
## trait priors
ctl.append("nu0 = {}".format(nu0))
ctl.append("kappa0 = {}".format(kappa0))
## remove ibpp incompatible options
ctl.remove("usedata = {}".format(useseqdata))
ctl.remove("speciestree = {}".format(infer_sptree))
## get tree values
nspecies = str(len(imap))
species = " ".join(sorted(imap))
ninds = " ".join([str(len(imap[i])) for i in sorted(imap)])
## write the tree
ctl.append("""\
species&tree = {} {}
{}
{}""".format(nspecies, species, ninds, guidetree))
## priors
ctl.append("thetaprior = {} {}".format(*thetaprior))
ctl.append("tauprior = {} {} {}".format(*tauprior))
## other values, fixed for now
ctl.append("finetune = 1: {}".format(" ".join([str(i) for i in finetune])))
#CTL.append("finetune = 1: 1 0.002 0.01 0.01 0.02 0.005 1.0")
ctl.append("print = 1 0 0 0")
ctl.append("burnin = {}".format(burnin))
ctl.append("sampfreq = {}".format(sampfreq))
ctl.append("nsample = {}".format(nsample))
## write out the ctl file
with open("{}.{}.ctl.txt".format(OPJ(wdir, name), prog), 'w') as out:
out.write("\n".join(ctl))
## if verbose print ctl
if verbose:
sys.stderr.write("ctl file\n--------\n"+"\n".join(ctl)+"\n--------\n\n") |
def _collapse_outgroup(tree, taxdicts):
""" collapse outgroup in ete Tree for easier viewing """
## check that all tests have the same outgroup
outg = taxdicts[0]["p4"]
if not all([i["p4"] == outg for i in taxdicts]):
raise Exception("no good")
## prune tree, keep only one sample from outgroup
tre = ete.Tree(tree.write(format=1)) #tree.copy(method="deepcopy")
alltax = [i for i in tre.get_leaf_names() if i not in outg]
alltax += [outg[0]]
tre.prune(alltax)
tre.search_nodes(name=outg[0])[0].name = "outgroup"
tre.ladderize()
## remove other ougroups from taxdicts
taxd = copy.deepcopy(taxdicts)
newtaxdicts = []
for test in taxd:
#test["p4"] = [outg[0]]
test["p4"] = ["outgroup"]
newtaxdicts.append(test)
return tre, newtaxdicts |
def _decompose_tree(ttree, orient='right', use_edge_lengths=True):
""" decomposes tree into component parts for plotting """
## set attributes
ttree._orient = orient
ttree._use_edge_lengths = use_edge_lengths
ult = use_edge_lengths == False
## map numeric values to internal nodes from root to tips
names = {}
idx = 0
for node in ttree.tree.traverse("preorder"):
if not node.is_leaf():
if node.name:
names[idx] = node.name
else:
names[idx] = idx
node.name = str(idx)
node.idx = idx
idx += 1
## map number to the tips, these will be the highest numbers
for node in ttree.tree.get_leaves():
names[idx] = node.name
node.idx = idx
idx += 1
## create empty edges and coords arrays
ttree.node_labels = names
ttree.tip_labels = ttree.tree.get_leaf_names()
#self.tip_labels = self.tree.get_leaf_names()[::-1]
#self.node_labels = self.names
ttree.edges = np.zeros((idx - 1, 2), dtype=int)
ttree.verts = np.zeros((idx, 2), dtype=float)
ttree._lines = [] # np.zeros((ntips-1), dtype=int)
ttree._coords = [] # np.zeros((idx * 2 - ntips), dtype=float)
## postorder: first children and then parents. This moves up the list .
nidx = 0
tip_num = len(ttree.tree.get_leaves()) - 1
## tips to root to fill in the verts and edges
for node in ttree.tree.traverse("postorder"):
if node.is_leaf():
## set the xy-axis positions of the tips
node.y = ttree.tree.get_distance(node)
if ult:
node.y = 0.
node.x = tip_num
tip_num -= 1
## edges connect this vert to
ttree.verts[node.idx] = [node.x, node.y]
ttree.edges[nidx] = [node.up.idx, node.idx]
elif node.is_root():
node.y = ttree.tree.get_distance(node)
if ult:
node.y = -1 * node.get_farthest_leaf(True)[1] - 1
node.x = sum(i.x for i in node.children) / float(len(node.children))
ttree.verts[node.idx] = [node.x, node.y]
else:
## create new nodes left and right
node.y = ttree.tree.get_distance(node)
if ult:
node.y = -1 * node.get_farthest_leaf(True)[1] - 1
node.x = sum(i.x for i in node.children) / float(len(node.children))
ttree.edges[nidx, :] = [node.up.idx, node.idx]
ttree.verts[node.idx] = [node.x, node.y]
nidx += 1
## root to tips to fill in the coords and lines
cidx = 0
for node in ttree.tree.traverse():
## add yourself
if not node.is_leaf():
ttree._coords += [[node.x, node.y]]
pidx = cidx
cidx += 1
for child in node.children:
## add children
ttree._coords += [[child.x, node.y], [child.x, child.y]]
ttree._lines += [[pidx, cidx]] ## connect yourself to newx
ttree._lines += [[cidx, cidx+1]] ## connect newx to child
cidx += 2
ttree._coords = np.array(ttree._coords, dtype=float)
ttree._lines = np.array(ttree._lines, dtype=int)
## invert for sideways trees
if ttree._orient in ['up', 0]:
pass
if ttree._orient in ['left', 1]:
ttree.verts[:, 1] = ttree.verts[:, 1] * -1
ttree.verts = ttree.verts[:, [1, 0]]
ttree._coords[:, 1] = ttree._coords[:, 1] * -1
ttree._coords = ttree._coords[:, [1, 0]]
if ttree._orient in ['down', 0]:
ttree.verts[:, 1] = ttree.verts[:, 1] * -1
ttree._coords[:, 1] = ttree._coords[:, 1] * -1
if ttree._orient in ['right', 3]:
ttree.verts = ttree.verts[:, [1, 0]]
ttree._coords = ttree._coords[:, [1, 0]] |
def draw(
self,
show_tip_labels=True,
show_node_support=False,
use_edge_lengths=False,
orient="right",
print_args=False,
*args,
**kwargs):
"""
plot the tree using toyplot.graph.
Parameters:
-----------
show_tip_labels: bool
Show tip names from tree.
use_edge_lengths: bool
Use edge lengths from newick tree.
show_node_support: bool
Show support values at nodes using a set of default
options.
...
"""
## re-decompose tree for new orient and edges args
self._decompose_tree(orient=orient, use_edge_lengths=use_edge_lengths)
## update kwargs with entered args and all other kwargs
dwargs = {}
dwargs["show_tip_labels"] = show_tip_labels
dwargs["show_node_support"] = show_node_support
dwargs.update(kwargs)
## pass to panel plotter
canvas, axes, panel = tree_panel_plot(self, print_args, **dwargs)
return canvas, axes, panel |
def tree_panel_plot(ttree,
print_args=False,
*args,
**kwargs):
"""
signature...
"""
## create Panel plot object and set height & width
panel = Panel(ttree) #tree, edges, verts, names)
if not kwargs.get("width"):
panel.kwargs["width"] = min(1000, 25*len(panel.tree))
if not kwargs.get("height"):
panel.kwargs["height"] = panel.kwargs["width"]
## update defaults with kwargs & update size based on ntips & ntests
panel.kwargs.update(kwargs)
## magic node label arguments overrides others
if panel.kwargs["show_node_support"]:
nnodes = sum(1 for i in panel.tree.traverse()) - len(panel.tree)
## set node values
supps = [int(panel.tree.search_nodes(idx=j)[0].support) \
for j in range(nnodes)]
if not panel.kwargs["vsize"]:
panel.kwargs["vsize"] = 20
sizes = [panel.kwargs["vsize"] for j in range(nnodes)]
## add leaf values
supps += [""] * len(panel.tree)
sizes += [0] * len(panel.tree)
## override args
panel.kwargs["vlabel"] = supps
panel.kwargs["vsize"] = sizes
panel.kwargs["vlshow"] = True
#panel.kwargs["vmarker"] = 's' ## square
## if unrooted then hide root node scores
if len(panel.tree.children) > 2:
supps[0] = ""
sizes[0] = 0
#print(panel.kwargs["vlabels"])
#print(panel.kwargs["vsize"])
elif panel.kwargs.get("vlabel"):
panel.kwargs["vlabel"] = panel.kwargs["vlabel"]
panel.kwargs["vlshow"] = True
else:
panel.kwargs["vlabel"] = panel.node_labels.keys() #names.keys()
## debugger / see all options
if print_args:
print(panel.kwargs)
## maybe add panels for plotting tip traits in the future
## ...
## create a canvas and a single cartesian coord system
canvas = toyplot.Canvas(height=panel.kwargs['height'], width=panel.kwargs['width'])
axes = canvas.cartesian(bounds=("10%", "90%", "10%", "90%"))
axes.show = panel.kwargs["show_axes"]
## add panel plots to the axis
panel._panel_tree(axes)
if panel.kwargs["show_tip_labels"]:
panel._panel_tip_labels(axes)
return canvas, axes, panel |
def get_quick_depths(data, sample):
""" iterate over clustS files to get data """
## use existing sample cluster path if it exists, since this
## func can be used in step 4 and that can occur after merging
## assemblies after step3, and if we then referenced by data.dirs.clusts
## the path would be broken.
##
## If branching at step 3 to test different clust thresholds, the
## branched samples will retain the samples.files.clusters of the
## parent (which have the clust_threshold value of the parent), so
## it will look like nothing has changed. If we call this func
## from step 3 then it indicates we are in a branch and should
## reset the sample.files.clusters handle to point to the correct
## data.dirs.clusts directory. See issue #229.
## Easier to just always trust that samples.files.clusters is right,
## no matter what step?
#if sample.files.clusters and not sample.stats.state == 3:
# pass
#else:
# ## set cluster file handles
sample.files.clusters = os.path.join(
data.dirs.clusts, sample.name+".clustS.gz")
## get new clustered loci
fclust = data.samples[sample.name].files.clusters
clusters = gzip.open(fclust, 'r')
pairdealer = itertools.izip(*[iter(clusters)]*2)
## storage
depths = []
maxlen = []
## start with cluster 0
tdepth = 0
tlen = 0
## iterate until empty
while 1:
## grab next
try:
name, seq = pairdealer.next()
except StopIteration:
break
## if not the end of a cluster
#print name.strip(), seq.strip()
if name.strip() == seq.strip():
depths.append(tdepth)
maxlen.append(tlen)
tlen = 0
tdepth = 0
else:
tdepth += int(name.split(";")[-2][5:])
tlen = len(seq)
## return
clusters.close()
return np.array(maxlen), np.array(depths) |
def sample_cleanup(data, sample):
""" stats, cleanup, and link to samples """
## get maxlen and depths array from clusters
maxlens, depths = get_quick_depths(data, sample)
try:
depths.max()
except ValueError:
## If depths is an empty array max() will raise
print(" no clusters found for {}".format(sample.name))
return
## Test if depths is non-empty, but just full of zeros.
if depths.max():
## store which min was used to calculate hidepth here
sample.stats_dfs.s3["hidepth_min"] = data.paramsdict["mindepth_majrule"]
## If our longest sequence is longer than the current max_fragment_length
## then update max_fragment_length. For assurance we require that
## max len is 4 greater than maxlen, to allow for pair separators.
hidepths = depths >= data.paramsdict["mindepth_majrule"]
maxlens = maxlens[hidepths]
## Handle the case where there are no hidepth clusters
if maxlens.any():
maxlen = int(maxlens.mean() + (2.*maxlens.std()))
else:
maxlen = 0
if maxlen > data._hackersonly["max_fragment_length"]:
data._hackersonly["max_fragment_length"] = maxlen + 4
## make sense of stats
keepmj = depths[depths >= data.paramsdict["mindepth_majrule"]]
keepstat = depths[depths >= data.paramsdict["mindepth_statistical"]]
## sample summary stat assignments
sample.stats["state"] = 3
sample.stats["clusters_total"] = depths.shape[0]
sample.stats["clusters_hidepth"] = keepmj.shape[0]
## store depths histogram as a dict. Limit to first 25 bins
bars, bins = np.histogram(depths, bins=range(1, 26))
sample.depths = {int(i):v for i, v in zip(bins, bars) if v}
## sample stat assignments
## Trap numpy warnings ("mean of empty slice") printed by samples
## with few reads.
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=RuntimeWarning)
sample.stats_dfs.s3["merged_pairs"] = sample.stats.reads_merged
sample.stats_dfs.s3["clusters_total"] = depths.shape[0]
try:
sample.stats_dfs.s3["clusters_hidepth"] = int(sample.stats["clusters_hidepth"])
except ValueError:
## Handle clusters_hidepth == NaN
sample.stats_dfs.s3["clusters_hidepth"] = 0
sample.stats_dfs.s3["avg_depth_total"] = depths.mean()
LOGGER.debug("total depth {}".format(sample.stats_dfs.s3["avg_depth_total"]))
sample.stats_dfs.s3["avg_depth_mj"] = keepmj.mean()
LOGGER.debug("mj depth {}".format(sample.stats_dfs.s3["avg_depth_mj"]))
sample.stats_dfs.s3["avg_depth_stat"] = keepstat.mean()
sample.stats_dfs.s3["sd_depth_total"] = depths.std()
sample.stats_dfs.s3["sd_depth_mj"] = keepmj.std()
sample.stats_dfs.s3["sd_depth_stat"] = keepstat.std()
else:
print(" no clusters found for {}".format(sample.name))
## Get some stats from the bam files
## This is moderately hackish. samtools flagstat returns
## the number of reads in the bam file as the first element
## of the first line, this call makes this assumption.
if not data.paramsdict["assembly_method"] == "denovo":
refmap_stats(data, sample)
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
## Clean up loose files only if not in DEBUG
##- edits/*derep, utemp, *utemp.sort, *htemp, *clust.gz
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
for f in [derepfile, mergefile, uhandle, usort, hhandle, clusters]:
try:
os.remove(f)
except:
pass |
def persistent_popen_align3(clusts, maxseqs=200, is_gbs=False):
""" keeps a persistent bash shell open and feeds it muscle alignments """
## create a separate shell for running muscle in, this is much faster
## than spawning a separate subprocess for each muscle call
proc = sps.Popen(["bash"],
stdin=sps.PIPE,
stdout=sps.PIPE,
universal_newlines=True)
## iterate over clusters in this file until finished
aligned = []
for clust in clusts:
## new alignment string for read1s and read2s
align1 = ""
align2 = ""
## don't bother aligning if only one seq
if clust.count(">") == 1:
aligned.append(clust.replace(">", "").strip())
else:
## do we need to split the alignment? (is there a PE insert?)
try:
## make into list (only read maxseqs lines, 2X cuz names)
lclust = clust.split()[:maxseqs*2]
## try to split cluster list at nnnn separator for each read
lclust1 = list(itertools.chain(*zip(\
lclust[::2], [i.split("nnnn")[0] for i in lclust[1::2]])))
lclust2 = list(itertools.chain(*zip(\
lclust[::2], [i.split("nnnn")[1] for i in lclust[1::2]])))
## put back into strings
clust1 = "\n".join(lclust1)
clust2 = "\n".join(lclust2)
## Align the first reads.
## The muscle command with alignment as stdin and // as splitter
cmd1 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(clust1, ipyrad.bins.muscle, "//")
## send cmd1 to the bash shell
print(cmd1, file=proc.stdin)
## read the stdout by line until splitter is reached
## meaning that the alignment is finished.
for line in iter(proc.stdout.readline, '//\n'):
align1 += line
## Align the second reads.
## The muscle command with alignment as stdin and // as splitter
cmd2 = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(clust2, ipyrad.bins.muscle, "//")
## send cmd2 to the bash shell
print(cmd2, file=proc.stdin)
## read the stdout by line until splitter is reached
## meaning that the alignment is finished.
for line in iter(proc.stdout.readline, '//\n'):
align2 += line
## join up aligned read1 and read2 and ensure names order matches
la1 = align1[1:].split("\n>")
la2 = align2[1:].split("\n>")
dalign1 = dict([i.split("\n", 1) for i in la1])
dalign2 = dict([i.split("\n", 1) for i in la2])
align1 = []
try:
keys = sorted(dalign1.keys(), key=DEREP, reverse=True)
except ValueError as inst:
## Lines is empty. This means the call to muscle alignment failed.
## Not sure how to handle this, but it happens only very rarely.
LOGGER.error("Muscle alignment failed: Bad clust - {}\nBad lines - {}"\
.format(clust, lines))
continue
## put seed at top of alignment
seed = [i for i in keys if i.split(";")[-1][0]=="*"][0]
keys.pop(keys.index(seed))
keys = [seed] + keys
for key in keys:
align1.append("\n".join([key,
dalign1[key].replace("\n", "")+"nnnn"+\
dalign2[key].replace("\n", "")]))
## append aligned cluster string
aligned.append("\n".join(align1).strip())
## Malformed clust. Dictionary creation with only 1 element will raise.
except ValueError as inst:
LOGGER.debug("Bad PE cluster - {}\nla1 - {}\nla2 - {}".format(\
clust, la1, la2))
## Either reads are SE, or at least some pairs are merged.
except IndexError:
## limit the number of input seqs
lclust = "\n".join(clust.split()[:maxseqs*2])
## the muscle command with alignment as stdin and // as splitter
cmd = "echo -e '{}' | {} -quiet -in - ; echo {}"\
.format(lclust, ipyrad.bins.muscle, "//")
## send cmd to the bash shell (TODO: PIPE could overflow here!)
print(cmd, file=proc.stdin)
## read the stdout by line until // is reached. This BLOCKS.
for line in iter(proc.stdout.readline, '//\n'):
align1 += line
## remove '>' from names, and '\n' from inside long seqs
lines = align1[1:].split("\n>")
try:
## find seed of the cluster and put it on top.
seed = [i for i in lines if i.split(";")[-1][0]=="*"][0]
lines.pop(lines.index(seed))
lines = [seed] + sorted(lines, key=DEREP, reverse=True)
except ValueError as inst:
## Lines is empty. This means the call to muscle alignment failed.
## Not sure how to handle this, but it happens only very rarely.
LOGGER.error("Muscle alignment failed: Bad clust - {}\nBad lines - {}"\
.format(clust, lines))
continue
## format remove extra newlines from muscle
aa = [i.split("\n", 1) for i in lines]
align1 = [i[0]+'\n'+"".join([j.replace("\n", "") for j in i[1:]]) for i in aa]
## trim edges in sloppy gbs/ezrad data. Maybe relevant to other types too...
if is_gbs:
align1 = gbs_trim(align1)
## append to aligned
aligned.append("\n".join(align1).strip())
# cleanup
proc.stdout.close()
if proc.stderr:
proc.stderr.close()
proc.stdin.close()
proc.wait()
## return the aligned clusters
return aligned |
def gbs_trim(align1):
"""
No reads can go past the left of the seed, or right of the least extended
reverse complement match. Example below. m is a match. u is an area where
lots of mismatches typically occur. The cut sites are shown.
Original locus*
Seed TGCAG************************************-----------------------
Forward-match TGCAGmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm-----------------------
Forward-match TGCAGmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm-----------------------------
Forward-match TGCAGmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm------------------------
Revcomp-match ------------------------mmmmmmmmmmmmmmmmmmmmmmmmmmmCTGCAuuuuuuuu
Revcomp-match ---------------mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmCTGCAuuuuuuuuuuuuuu
Revcomp-match --------------------------------mmmmmmmmmmmmmmmmmmmmmmmmmmmCTGCA
Revcomp-match ------------------------mmmmmmmmmmmmmmmmmmmmmmmmmmmCTGCAuuuuuuuu
Trimmed locus*
Seed TGCAG************************************---------
Forward-match TGCAGmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm---------
Forward-match TGCAGmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm---------------
Forward-match TGCAGmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm----------
Revcomp-match ------------------------mmmmmmmmmmmmmmmmmmmmmmmmmm
Revcomp-match ---------------mmmmmmmmmmmmmmmmmmmmmmmmmmmmmmCTGCA
Revcomp-match --------------------------------mmmmmmmmmmmmmmmmmm
Revcomp-match ------------------------mmmmmmmmmmmmmmmmmmmmmmmmmm
"""
leftmost = rightmost = None
dd = {k:v for k,v in [j.rsplit("\n", 1) for j in align1]}
seed = [i for i in dd.keys() if i.rsplit(";")[-1][0] == "*"][0]
leftmost = [i != "-" for i in dd[seed]].index(True)
revs = [i for i in dd.keys() if i.rsplit(";")[-1][0] == "-"]
if revs:
subright = max([[i!="-" for i in seq[::-1]].index(True) \
for seq in [dd[i] for i in revs]])
else:
subright = 0
rightmost = len(dd[seed]) - subright
## if locus got clobbered then print place-holder NNN
names, seqs = zip(*[i.rsplit("\n", 1) for i in align1])
if rightmost > leftmost:
newalign1 = [n+"\n"+i[leftmost:rightmost] for i,n in zip(seqs, names)]
else:
newalign1 = [n+"\nNNN" for i,n in zip(seqs, names)]
return newalign1 |
def align_and_parse(handle, max_internal_indels=5, is_gbs=False):
""" much faster implementation for aligning chunks """
## data are already chunked, read in the whole thing. bail if no data.
try:
with open(handle, 'rb') as infile:
clusts = infile.read().split("//\n//\n")
## remove any empty spots
clusts = [i for i in clusts if i]
## Skip entirely empty chunks
if not clusts:
raise IPyradError
except (IOError, IPyradError):
LOGGER.debug("skipping empty chunk - {}".format(handle))
return 0
## count discarded clusters for printing to stats later
highindels = 0
## iterate over clusters sending each to muscle, splits and aligns pairs
try:
aligned = persistent_popen_align3(clusts, 200, is_gbs)
except Exception as inst:
LOGGER.debug("Error in handle - {} - {}".format(handle, inst))
#raise IPyradWarningExit("error hrere {}".format(inst))
aligned = []
## store good alignments to be written to file
refined = []
## filter and trim alignments
for clust in aligned:
## check for too many internal indels
filtered = aligned_indel_filter(clust, max_internal_indels)
## reverse complement matches. No longer implemented.
#filtered = overshoot_filter(clust)
## finally, add to outstack if alignment is good
if not filtered:
refined.append(clust)#"\n".join(stack))
else:
highindels += 1
## write to file after
if refined:
outhandle = handle.rsplit(".", 1)[0]+".aligned"
with open(outhandle, 'wb') as outfile:
outfile.write("\n//\n//\n".join(refined)+"\n")
## remove the old tmp file
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
os.remove(handle)
return highindels |
def aligned_indel_filter(clust, max_internal_indels):
""" checks for too many internal indels in muscle aligned clusters """
## make into list
lclust = clust.split()
## paired or not
try:
seq1 = [i.split("nnnn")[0] for i in lclust[1::2]]
seq2 = [i.split("nnnn")[1] for i in lclust[1::2]]
intindels1 = [i.rstrip("-").lstrip("-").count("-") for i in seq1]
intindels2 = [i.rstrip("-").lstrip("-").count("-") for i in seq2]
intindels = intindels1 + intindels2
if max(intindels) > max_internal_indels:
return 1
except IndexError:
seq1 = lclust[1::2]
intindels = [i.rstrip("-").lstrip("-").count("-") for i in seq1]
if max(intindels) > max_internal_indels:
return 1
return 0 |
def build_clusters(data, sample, maxindels):
"""
Combines information from .utemp and .htemp files to create .clust files,
which contain un-aligned clusters. Hits to seeds are only kept in the
cluster if the number of internal indels is less than 'maxindels'.
By default, we set maxindels=6 for this step (within-sample clustering).
"""
## If reference assembly then here we're clustering the unmapped reads
if "reference" in data.paramsdict["assembly_method"]:
derepfile = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
else:
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
## i/o vsearch files
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
## create an output file to write clusters to
sample.files.clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
clustsout = gzip.open(sample.files.clusters, 'wb')
## Sort the uhandle file so we can read through matches efficiently
cmd = ["sort", "-k", "2", uhandle, "-o", usort]
proc = sps.Popen(cmd, close_fds=True)
_ = proc.communicate()[0]
## load ALL derep reads into a dictionary (this can be a few GB of RAM)
## and is larger if names are larger. We are grabbing two lines at a time.
alldereps = {}
with open(derepfile, 'rb') as ioderep:
dereps = itertools.izip(*[iter(ioderep)]*2)
for namestr, seq in dereps:
nnn, sss = [i.strip() for i in namestr, seq]
alldereps[nnn[1:]] = sss
## store observed seeds (this could count up to >million in bad data sets)
seedsseen = set()
## Iterate through the usort file grabbing matches to build clusters
with open(usort, 'rb') as insort:
## iterator, seed null, seqlist null
isort = iter(insort)
lastseed = 0
fseqs = []
seqlist = []
seqsize = 0
while 1:
## grab the next line
try:
hit, seed, _, ind, ori, _ = isort.next().strip().split()
LOGGER.debug(">{} {} {}".format(hit, seed, ori, seq))
except StopIteration:
break
## same seed, append match
if seed != lastseed:
seedsseen.add(seed)
## store the last cluster (fseq), count it, and clear fseq
if fseqs:
## sort fseqs by derep after pulling out the seed
fseqs = [fseqs[0]] + sorted(fseqs[1:], key=lambda x: \
int(x.split(";size=")[1].split(";")[0]), reverse=True)
seqlist.append("\n".join(fseqs))
seqsize += 1
fseqs = []
## occasionally write/dump stored clusters to file and clear mem
if not seqsize % 10000:
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
## store the new seed on top of fseq list
fseqs.append(">{}*\n{}".format(seed, alldereps[seed]))
lastseed = seed
## add match to the seed
## revcomp if orientation is reversed (comp preserves nnnn)
if ori == "-":
seq = comp(alldereps[hit])[::-1]
else:
seq = alldereps[hit]
## only save if not too many indels
if int(ind) <= maxindels:
fseqs.append(">{}{}\n{}".format(hit, ori, seq))
else:
LOGGER.info("filtered by maxindels: %s %s", ind, seq)
## write whatever is left over to the clusts file
if fseqs:
seqlist.append("\n".join(fseqs))
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## now write the seeds that had no hits. Make dict from htemp
with open(hhandle, 'rb') as iotemp:
nohits = itertools.izip(*[iter(iotemp)]*2)
seqlist = []
seqsize = 0
while 1:
try:
nnn, _ = [i.strip() for i in nohits.next()]
except StopIteration:
break
## occasionally write to file
if not seqsize % 10000:
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist)+"\n//\n//\n")
## reset list and counter
seqlist = []
## append to list if new seed
if nnn[1:] not in seedsseen:
seqlist.append("{}*\n{}".format(nnn, alldereps[nnn[1:]]))
seqsize += 1
## write whatever is left over to the clusts file
if seqlist:
clustsout.write("\n//\n//\n".join(seqlist))#+"\n//\n//\n")
## close the file handle
clustsout.close()
del alldereps |
def setup_dirs(data):
""" sets up directories for step3 data """
## make output folder for clusters
pdir = os.path.realpath(data.paramsdict["project_dir"])
data.dirs.clusts = os.path.join(pdir, "{}_clust_{}"\
.format(data.name, data.paramsdict["clust_threshold"]))
if not os.path.exists(data.dirs.clusts):
os.mkdir(data.dirs.clusts)
## make a tmpdir for align files
data.tmpdir = os.path.abspath(os.path.expanduser(
os.path.join(pdir, data.name+'-tmpalign')))
if not os.path.exists(data.tmpdir):
os.mkdir(data.tmpdir)
## If ref mapping, init samples and make the refmapping output directory.
if not data.paramsdict["assembly_method"] == "denovo":
## make output directory for read mapping process
data.dirs.refmapping = os.path.join(pdir, "{}_refmapping".format(data.name))
if not os.path.exists(data.dirs.refmapping):
os.mkdir(data.dirs.refmapping) |
def new_apply_jobs(data, samples, ipyclient, nthreads, maxindels, force):
"""
Create a DAG of prealign jobs to be run in order for each sample. Track
Progress, report errors. Each assembly method has a slightly different
DAG setup, calling different functions.
"""
## is datatype gbs? used in alignment-trimming by align_and_parse()
is_gbs = bool("gbs" in data.paramsdict["datatype"])
## Two view objects, threaded and unthreaded
lbview = ipyclient.load_balanced_view()
start = time.time()
elapsed = datetime.timedelta(seconds=int(time.time()-start))
firstfunc = "derep_concat_split"
printstr = " {} | {} | s3 |".format(PRINTSTR[firstfunc], elapsed)
#printstr = " {} | {} | s3 |".format(PRINTSTR[], elapsed)
progressbar(10, 0, printstr, spacer=data._spacer)
## TODO: for HPC systems this should be done to make sure targets are spread
## among different nodes.
if nthreads:
if nthreads < len(ipyclient.ids):
thview = ipyclient.load_balanced_view(targets=ipyclient.ids[::nthreads])
elif nthreads == 1:
thview = ipyclient.load_balanced_view()
else:
if len(ipyclient) > 40:
thview = ipyclient.load_balanced_view(targets=ipyclient.ids[::4])
else:
thview = ipyclient.load_balanced_view(targets=ipyclient.ids[::2])
## get list of jobs/dependencies as a DAG for all pre-align funcs.
dag, joborder = build_dag(data, samples)
## dicts for storing submitted jobs and results
results = {}
## submit jobs to the engines in single or threaded views. The topological
## sort makes sure jobs are input with all dependencies found.
for node in nx.topological_sort(dag):
## get list of async results leading to this job
deps = [results.get(n) for n in dag.predecessors(node)]
deps = ipp.Dependency(dependencies=deps, failure=True)
## get func, sample, and args for this func (including [data, sample])
funcstr, chunk, sname = node.split("-", 2)
func = FUNCDICT[funcstr]
sample = data.samples[sname]
## args vary depending on the function
if funcstr in ["derep_concat_split", "cluster"]:
args = [data, sample, nthreads, force]
elif funcstr in ["mapreads"]:
args = [data, sample, nthreads, force]
elif funcstr in ["build_clusters"]:
args = [data, sample, maxindels]
elif funcstr in ["muscle_align"]:
handle = os.path.join(data.tmpdir,
"{}_chunk_{}.ali".format(sample.name, chunk))
args = [handle, maxindels, is_gbs]
else:
args = [data, sample]
# submit and store AsyncResult object. Some jobs are threaded.
if nthreads and (funcstr in THREADED_FUNCS):
#LOGGER.info('submitting %s to %s-threaded view', funcstr, nthreads)
with thview.temp_flags(after=deps, block=False):
results[node] = thview.apply(func, *args)
else:
#LOGGER.info('submitting %s to single-threaded view', funcstr)
with lbview.temp_flags(after=deps, block=False):
results[node] = lbview.apply(func, *args)
## track jobs as they finish, abort if someone fails. This blocks here
## until all jobs are done. Keep track of which samples have failed so
## we only print the first error message.
sfailed = set()
for funcstr in joborder + ["muscle_align", "reconcat"]:
errfunc, sfails, msgs = trackjobs(funcstr, results, spacer=data._spacer)
LOGGER.info("{}-{}-{}".format(errfunc, sfails, msgs))
if errfunc:
for sidx in xrange(len(sfails)):
sname = sfails[sidx]
errmsg = msgs[sidx]
if sname not in sfailed:
print(" sample [{}] failed. See error in ./ipyrad_log.txt"\
.format(sname))
LOGGER.error("sample [%s] failed in step [%s]; error: %s",
sname, errfunc, errmsg)
sfailed.add(sname)
## Cleanup of successful samples, skip over failed samples
badaligns = {}
for sample in samples:
## The muscle_align step returns the number of excluded bad alignments
for async in results:
func, chunk, sname = async.split("-", 2)
if (func == "muscle_align") and (sname == sample.name):
if results[async].successful():
badaligns[sample] = int(results[async].get())
## for the samples that were successful:
for sample in badaligns:
## store the result
sample.stats_dfs.s3.filtered_bad_align = badaligns[sample]
## store all results
try:
sample_cleanup(data, sample)
except Exception as inst:
msg = " Sample {} failed this step. See ipyrad_log.txt.\
".format(sample.name)
print(msg)
LOGGER.error("%s - %s", sample.name, inst)
## store the results to data
data_cleanup(data) |
def build_dag(data, samples):
"""
build a directed acyclic graph describing jobs to be run in order.
"""
## Create DAGs for the assembly method being used, store jobs in nodes
snames = [i.name for i in samples]
dag = nx.DiGraph()
## get list of pre-align jobs from globals based on assembly method
joborder = JOBORDER[data.paramsdict["assembly_method"]]
## WHICH JOBS TO RUN: iterate over the sample names
for sname in snames:
## append pre-align job for each sample to nodes list
for func in joborder:
dag.add_node("{}-{}-{}".format(func, 0, sname))
## append align func jobs, each will have max 10
for chunk in xrange(10):
dag.add_node("{}-{}-{}".format("muscle_align", chunk, sname))
## append final reconcat jobs
dag.add_node("{}-{}-{}".format("reconcat", 0, sname))
## ORDER OF JOBS: add edges/dependency between jobs: (first-this, then-that)
for sname in snames:
for sname2 in snames:
## enforce that clust/map cannot start until derep is done for ALL
## samples. This is b/c...
dag.add_edge("{}-{}-{}".format(joborder[0], 0, sname2),
"{}-{}-{}".format(joborder[1], 0, sname))
## add remaining pre-align jobs
for idx in xrange(2, len(joborder)):
dag.add_edge("{}-{}-{}".format(joborder[idx-1], 0, sname),
"{}-{}-{}".format(joborder[idx], 0, sname))
## Add 10 align jobs, none of which can start until all chunker jobs
## are finished. Similarly, reconcat jobs cannot start until all align
## jobs are finished.
for sname2 in snames:
for chunk in range(10):
dag.add_edge("{}-{}-{}".format("muscle_chunker", 0, sname2),
"{}-{}-{}".format("muscle_align", chunk, sname))
## add that the final reconcat job can't start until after
## each chunk of its own sample has finished aligning.
dag.add_edge("{}-{}-{}".format("muscle_align", chunk, sname),
"{}-{}-{}".format("reconcat", 0, sname))
## return the dag
return dag, joborder |
def _plot_dag(dag, results, snames):
"""
makes plot to help visualize the DAG setup. For developers only.
"""
try:
import matplotlib.pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
## first figure is dag layout
plt.figure("dag_layout", figsize=(10, 10))
nx.draw(dag,
pos=nx.spring_layout(dag),
node_color='pink',
with_labels=True)
plt.savefig("./dag_layout.png", bbox_inches='tight', dpi=200)
## second figure is times for steps
pos = {}
colors = {}
for node in dag:
#jobkey = "{}-{}".format(node, sample)
mtd = results[node].metadata
start = date2num(mtd.started)
#runtime = date2num(md.completed)# - start
## sample id to separate samples on x-axis
_, _, sname = node.split("-", 2)
sid = snames.index(sname)
## 1e6 to separate on y-axis
pos[node] = (start+sid, start*1e6)
colors[node] = mtd.engine_id
## x just spaces out samples;
## y is start time of each job with edge leading to next job
## color is the engine that ran the job
## all jobs were submitted as 3 second wait times
plt.figure("dag_starttimes", figsize=(10, 16))
nx.draw(dag, pos,
node_list=colors.keys(),
node_color=colors.values(),
cmap=gist_rainbow,
with_labels=True)
plt.savefig("./dag_starttimes.png", bbox_inches='tight', dpi=200)
except Exception as inst:
LOGGER.warning(inst) |
def trackjobs(func, results, spacer):
"""
Blocks and prints progress for just the func being requested from a list
of submitted engine jobs. Returns whether any of the jobs failed.
func = str
results = dict of asyncs
"""
## TODO: try to insert a better way to break on KBD here.
LOGGER.info("inside trackjobs of %s", func)
## get just the jobs from results that are relevant to this func
asyncs = [(i, results[i]) for i in results if i.split("-", 2)[0] == func]
## progress bar
start = time.time()
while 1:
## how many of this func have finished so far
ready = [i[1].ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
printstr = " {} | {} | s3 |".format(PRINTSTR[func], elapsed)
progressbar(len(ready), sum(ready), printstr, spacer=spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
sfails = []
errmsgs = []
for job in asyncs:
if not job[1].successful():
sfails.append(job[0])
errmsgs.append(job[1].result())
return func, sfails, errmsgs |
def declone_3rad(data, sample):
"""
3rad uses random adapters to identify pcr duplicates. We will
remove pcr dupes here. Basically append the radom adapter to
each sequence, do a regular old vsearch derep, then trim
off the adapter, and push it down the pipeline. This will
remove all identical seqs with identical random i5 adapters.
"""
LOGGER.info("Entering declone_3rad - {}".format(sample.name))
## Append i5 adapter to the head of each read. Merged file is input, and
## still has fq qual score so also have to append several qscores for the
## adapter bases. Open the merge file, get quarts, go through each read
## and append the necessary stuff.
adapter_seqs_file = tempfile.NamedTemporaryFile(mode='wb',
delete=False,
dir=data.dirs.edits,
suffix="_append_adapters_.fastq")
try:
with open(sample.files.edits[0][0]) as infile:
quarts = itertools.izip(*[iter(infile)]*4)
## a list to store until writing
writing = []
counts = 0
while 1:
try:
read = quarts.next()
except StopIteration:
break
## Split on +, get [1], split on "_" (can be either _r1 or
## _m1 if merged reads) and get [0] for the i5
## prepend "EEEEEEEE" as qscore for the adapters
i5 = read[0].split("+")[1].split("_")[0]
## If any non ACGT in the i5 then drop this sequence
if 'N' in i5:
continue
writing.append("\n".join([
read[0].strip(),
i5 + read[1].strip(),
read[2].strip(),
"E"*8 + read[3].strip()]
))
## Write the data in chunks
counts += 1
if not counts % 1000:
adapter_seqs_file.write("\n".join(writing)+"\n")
writing = []
if writing:
adapter_seqs_file.write("\n".join(writing))
adapter_seqs_file.close()
tmp_outfile = tempfile.NamedTemporaryFile(mode='wb',
delete=False,
dir=data.dirs.edits,
suffix="_decloned_w_adapters_.fastq")
## Close the tmp file bcz vsearch will write to it by name, then
## we will want to reopen it to read from it.
tmp_outfile.close()
## Derep the data (adapters+seq)
derep_and_sort(data, adapter_seqs_file.name,
os.path.join(data.dirs.edits, tmp_outfile.name), 2)
## Remove adapters from head of sequence and write out
## tmp_outfile is now the input file for the next step
## first vsearch derep discards the qscore so we iterate
## by pairs
with open(tmp_outfile.name) as infile:
with open(os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),\
'wb') as outfile:
duo = itertools.izip(*[iter(infile)]*2)
## a list to store until writing
writing = []
counts2 = 0
while 1:
try:
read = duo.next()
except StopIteration:
break
## Peel off the adapters. There's probably a faster
## way of doing this.
writing.append("\n".join([
read[0].strip(),
read[1].strip()[8:]]
))
## Write the data in chunks
counts2 += 1
if not counts2 % 1000:
outfile.write("\n".join(writing)+"\n")
writing = []
if writing:
outfile.write("\n".join(writing))
outfile.close()
LOGGER.info("Removed pcr duplicates from {} - {}".format(sample.name, counts-counts2))
except Exception as inst:
raise IPyradError(" Caught error while decloning "\
+ "3rad data - {}".format(inst))
finally:
## failed samples will cause tmp file removal to raise.
## just ignore it.
try:
## Clean up temp files
if os.path.exists(adapter_seqs_file.name):
os.remove(adapter_seqs_file.name)
if os.path.exists(tmp_outfile.name):
os.remove(tmp_outfile.name)
except Exception as inst:
pass |
def derep_and_sort(data, infile, outfile, nthreads):
"""
Dereplicates reads and sorts so reads that were highly replicated are at
the top, and singletons at bottom, writes output to derep file. Paired
reads are dereplicated as one concatenated read and later split again.
Updated this function to take infile and outfile to support the double
dereplication that we need for 3rad (5/29/15 iao).
"""
## datatypes options
strand = "plus"
if "gbs" in data.paramsdict["datatype"]\
or "2brad" in data.paramsdict["datatype"]:
strand = "both"
## pipe in a gzipped file
if infile.endswith(".gz"):
catcmd = ["gunzip", "-c", infile]
else:
catcmd = ["cat", infile]
## do dereplication with vsearch
cmd = [ipyrad.bins.vsearch,
"--derep_fulllength", "-",
"--strand", strand,
"--output", outfile,
"--threads", str(nthreads),
"--fasta_width", str(0),
"--fastq_qmax", "1000",
"--sizeout",
"--relabel_md5",
]
LOGGER.info("derep cmd %s", " ".join(cmd))
## run vsearch
proc1 = sps.Popen(catcmd, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
proc2 = sps.Popen(cmd, stdin=proc1.stdout, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
try:
errmsg = proc2.communicate()[0]
except KeyboardInterrupt:
LOGGER.info("interrupted during dereplication")
raise KeyboardInterrupt()
if proc2.returncode:
LOGGER.error("error inside derep_and_sort %s", errmsg)
raise IPyradWarningExit(errmsg) |
def data_cleanup(data):
""" cleanup / statswriting function for Assembly obj """
data.stats_dfs.s3 = data._build_stat("s3")
data.stats_files.s3 = os.path.join(data.dirs.clusts, "s3_cluster_stats.txt")
with io.open(data.stats_files.s3, 'w') as outfile:
data.stats_dfs.s3.to_string(
buf=outfile,
formatters={
'merged_pairs':'{:.0f}'.format,
'clusters_total':'{:.0f}'.format,
'clusters_hidepth':'{:.0f}'.format,
'filtered_bad_align':'{:.0f}'.format,
'avg_depth_stat':'{:.2f}'.format,
'avg_depth_mj':'{:.2f}'.format,
'avg_depth_total':'{:.2f}'.format,
'sd_depth_stat':'{:.2f}'.format,
'sd_depth_mj':'{:.2f}'.format,
'sd_depth_total':'{:.2f}'.format
}) |
def concat_multiple_edits(data, sample):
"""
if multiple fastq files were appended into the list of fastqs for samples
then we merge them here before proceeding.
"""
## if more than one tuple in fastq list
if len(sample.files.edits) > 1:
## create a cat command to append them all (doesn't matter if they
## are gzipped, cat still works). Grab index 0 of tuples for R1s.
cmd1 = ["cat"] + [i[0] for i in sample.files.edits]
## write to new concat handle
conc1 = os.path.join(data.dirs.edits, sample.name+"_R1_concatedit.fq.gz")
with open(conc1, 'w') as cout1:
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=cout1, close_fds=True)
res1 = proc1.communicate()[0]
if proc1.returncode:
raise IPyradWarningExit("error in: %s, %s", cmd1, res1)
## Only set conc2 if R2 actually exists
conc2 = 0
if os.path.exists(str(sample.files.edits[0][1])):
cmd2 = ["cat"] + [i[1] for i in sample.files.edits]
conc2 = os.path.join(data.dirs.edits, sample.name+"_R2_concatedit.fq.gz")
with gzip.open(conc2, 'w') as cout2:
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=cout2, close_fds=True)
res2 = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("error in: %s, %s", cmd2, res2)
## store new file handles
sample.files.edits = [(conc1, conc2)]
return sample.files.edits |
def cluster(data, sample, nthreads, force):
"""
Calls vsearch for clustering. cov varies by data type, values were chosen
based on experience, but could be edited by users
"""
## get the dereplicated reads
if "reference" in data.paramsdict["assembly_method"]:
derephandle = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
## In the event all reads for all samples map successfully then clustering
## the unmapped reads makes no sense, so just bail out.
if not os.stat(derephandle).st_size:
## In this case you do have to create empty, dummy vsearch output
## files so building_clusters will not fail.
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
usort = os.path.join(data.dirs.clusts, sample.name+".utemp.sort")
hhandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
for f in [uhandle, usort, hhandle]:
open(f, 'a').close()
return
else:
derephandle = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
## create handles for the outfiles
uhandle = os.path.join(data.dirs.clusts, sample.name+".utemp")
temphandle = os.path.join(data.dirs.clusts, sample.name+".htemp")
## If derep file doesn't exist then bail out
if not os.path.isfile(derephandle):
LOGGER.warn("Bad derephandle - {}".format(derephandle))
raise IPyradError("Input file for clustering doesn't exist - {}"\
.format(derephandle))
## testing one sample fail
#if sample.name == "1C_0":
# x
## datatype specific optimization
## minsl: the percentage of the seed that must be matched
## smaller values for RAD/ddRAD where we might want to combine, say 50bp
## reads and 100bp reads in the same analysis.
## query_cov: the percentage of the query sequence that must match seed
## smaller values are needed for gbs where only the tips might overlap
## larger values for pairgbs where they should overlap near completely
## small minsl and high query cov allows trimmed reads to match to untrim
## seed for rad/ddrad/pairddrad.
strand = "plus"
cov = 0.75
minsl = 0.5
if data.paramsdict["datatype"] in ["gbs", "2brad"]:
strand = "both"
cov = 0.5
minsl = 0.5
elif data.paramsdict["datatype"] == 'pairgbs':
strand = "both"
cov = 0.75
minsl = 0.75
## If this value is not null (which is the default) then override query cov
if data._hackersonly["query_cov"]:
cov = str(data._hackersonly["query_cov"])
assert float(cov) <= 1, "query_cov must be <= 1.0"
## get call string
cmd = [ipyrad.bins.vsearch,
"-cluster_smallmem", derephandle,
"-strand", strand,
"-query_cov", str(cov),
"-id", str(data.paramsdict["clust_threshold"]),
"-minsl", str(minsl),
"-userout", uhandle,
"-userfields", "query+target+id+gaps+qstrand+qcov",
"-maxaccepts", "1",
"-maxrejects", "0",
"-threads", str(nthreads),
"-notmatched", temphandle,
"-fasta_width", "0",
"-fastq_qmax", "100",
"-fulldp",
"-usersort"]
## not sure what the benefit of this option is exactly, needs testing,
## might improve indel detection on left side, but we don't want to enforce
## aligning on left side if not necessarily, since quality trimmed reads
## might lose bases on left side in step2 and no longer align.
#if data.paramsdict["datatype"] in ["rad", "ddrad", "pairddrad"]:
# cmd += ["-leftjust"]
## run vsearch
LOGGER.debug("%s", cmd)
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE, close_fds=True)
## This is long running so we wrap it to make sure we can kill it
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
raise KeyboardInterrupt
## check for errors
if proc.returncode:
LOGGER.error("error %s: %s", cmd, res)
raise IPyradWarningExit("cmd {}: {}".format(cmd, res)) |
def muscle_chunker(data, sample):
"""
Splits the muscle alignment into chunks. Each chunk is run on a separate
computing core. Because the largest clusters are at the beginning of the
clusters file, assigning equal clusters to each file would put all of the
large cluster, that take longer to align, near the top. So instead we
randomly distribute the clusters among the files. If assembly method is
reference then this step is just a placeholder and nothing happens.
"""
## log our location for debugging
LOGGER.info("inside muscle_chunker")
## only chunk up denovo data, refdata has its own chunking method which
## makes equal size chunks, instead of uneven chunks like in denovo
if data.paramsdict["assembly_method"] != "reference":
## get the number of clusters
clustfile = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
with iter(gzip.open(clustfile, 'rb')) as clustio:
nloci = sum(1 for i in clustio if "//" in i) // 2
#tclust = clustio.read().count("//")//2
optim = (nloci//20) + (nloci%20)
LOGGER.info("optim for align chunks: %s", optim)
## write optim clusters to each tmp file
clustio = gzip.open(clustfile, 'rb')
inclusts = iter(clustio.read().strip().split("//\n//\n"))
## splitting loci so first file is smaller and last file is bigger
inc = optim // 10
for idx in range(10):
## how big is this chunk?
this = optim + (idx * inc)
left = nloci-this
if idx == 9:
## grab everything left
grabchunk = list(itertools.islice(inclusts, int(1e9)))
else:
## grab next chunks-worth of data
grabchunk = list(itertools.islice(inclusts, this))
nloci = left
## write the chunk to file
tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_{}.ali".format(idx))
with open(tmpfile, 'wb') as out:
out.write("//\n//\n".join(grabchunk))
## write the chunk to file
#grabchunk = list(itertools.islice(inclusts, left))
#if grabchunk:
# tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_9.ali")
# with open(tmpfile, 'a') as out:
# out.write("\n//\n//\n".join(grabchunk))
clustio.close() |
def reconcat(data, sample):
""" takes aligned chunks (usually 10) and concatenates them """
try:
## get chunks
chunks = glob.glob(os.path.join(data.tmpdir,
sample.name+"_chunk_[0-9].aligned"))
## sort by chunk number, cuts off last 8 =(aligned)
chunks.sort(key=lambda x: int(x.rsplit("_", 1)[-1][:-8]))
LOGGER.info("chunk %s", chunks)
## concatenate finished reads
sample.files.clusters = os.path.join(data.dirs.clusts,
sample.name+".clustS.gz")
## reconcats aligned clusters
with gzip.open(sample.files.clusters, 'wb') as out:
for fname in chunks:
with open(fname) as infile:
dat = infile.read()
## avoids mess if last chunk was empty
if dat.endswith("\n"):
out.write(dat+"//\n//\n")
else:
out.write(dat+"\n//\n//\n")
os.remove(fname)
except Exception as inst:
LOGGER.error("Error in reconcat {}".format(inst))
raise |
def derep_concat_split(data, sample, nthreads, force):
"""
Running on remote Engine. Refmaps, then merges, then dereplicates,
then denovo clusters reads.
"""
## report location for debugging
LOGGER.info("INSIDE derep %s", sample.name)
## MERGED ASSEMBIES ONLY:
## concatenate edits files within Samples. Returns a new sample.files.edits
## with the concat file. No change if not merged Assembly.
mergefile = os.path.join(data.dirs.edits, sample.name+"_merged_.fastq")
if not force:
if not os.path.exists(mergefile):
sample.files.edits = concat_multiple_edits(data, sample)
else:
LOGGER.info("skipped concat_multiple_edits: {} exists"\
.format(mergefile))
else:
sample.files.edits = concat_multiple_edits(data, sample)
## PAIRED DATA ONLY:
## Denovo: merge or concat fastq pairs [sample.files.pairs]
## Reference: only concat fastq pairs []
## Denovo + Reference: ...
if 'pair' in data.paramsdict['datatype']:
## the output file handle for merged reads
## modify behavior of merging vs concating if reference
if "reference" in data.paramsdict["assembly_method"]:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 0, 0)
else:
nmerged = merge_pairs(data, sample.files.edits, mergefile, 1, 1)
## store results
sample.files.edits = [(mergefile, )]
sample.stats.reads_merged = nmerged
## 3rad uses random adapters to identify pcr duplicates. We will
## remove pcr dupes here. Basically append the radom adapter to
## each sequence, do a regular old vsearch derep, then trim
## off the adapter, and push it down the pipeline. This will
## remove all identical seqs with identical random i5 adapters.
if "3rad" in data.paramsdict["datatype"]:
declone_3rad(data, sample)
derep_and_sort(data,
os.path.join(data.dirs.edits, sample.name+"_declone.fastq"),
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads)
else:
## convert fastq to fasta, then derep and sort reads by their size.
## we pass in only one file b/c paired should be merged by now.
derep_and_sort(data,
sample.files.edits[0][0],
os.path.join(data.dirs.edits, sample.name+"_derep.fastq"),
nthreads) |
def run(data, samples, noreverse, maxindels, force, ipyclient):
""" run the major functions for clustering within samples """
## list of samples to submit to queue
subsamples = []
## if sample is already done skip
for sample in samples:
## If sample not in state 2 don't try to cluster it.
if sample.stats.state < 2:
print("""\
Sample not ready for clustering. First run step2 on sample: {}""".\
format(sample.name))
continue
if not force:
if sample.stats.state >= 3:
print("""\
Skipping {}; aleady clustered. Use force to re-cluster""".\
format(sample.name))
else:
if sample.stats.reads_passed_filter:
subsamples.append(sample)
else:
## force to overwrite
if sample.stats.reads_passed_filter:
subsamples.append(sample)
## run subsamples
if not subsamples:
print(" No Samples ready to be clustered. First run step2().")
else:
## arguments to apply_jobs, inst catches exceptions
try:
## make dirs that are needed including tmpdir
setup_dirs(data)
## if refmapping make filehandles that will be persistent
if not data.paramsdict["assembly_method"] == "denovo":
for sample in subsamples:
refmap_init(data, sample, force)
## set thread-count to 2 for paired-data
nthreads = 2
## set thread-count to 1 for single-end data
else:
nthreads = 1
## overwrite nthreads if value in _ipcluster dict
if "threads" in data._ipcluster.keys():
nthreads = int(data._ipcluster["threads"])
## if more CPUs than there are samples then increase threads
_ncpus = len(ipyclient)
if _ncpus > 2*len(data.samples):
nthreads *= 2
## submit jobs to be run on cluster
args = [data, subsamples, ipyclient, nthreads, maxindels, force]
new_apply_jobs(*args)
finally:
## this can fail if jobs were not stopped properly and are still
## writing to tmpdir. don't cleanup if debug is on.
try:
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
if not log_level == "DEBUG":
if os.path.exists(data.tmpdir):
shutil.rmtree(data.tmpdir)
## get all refmap_derep.fastqs
rdereps = glob.glob(os.path.join(data.dirs.edits, "*-refmap_derep.fastq"))
## Remove the unmapped fastq files
for rmfile in rdereps:
os.remove(rmfile)
except Exception as _:
LOGGER.warning("failed to cleanup files/dirs") |
def parse_params(args):
""" Parse the params file args, create and return Assembly object."""
## check that params.txt file is correctly formatted.
try:
with open(args.params) as paramsin:
plines = paramsin.readlines()
except IOError as _:
sys.exit(" No params file found")
## check header: big version changes can be distinguished by the header
legacy_version = 0
try:
## try to update the Assembly ...
legacy_version = 1
if not len(plines[0].split()[0]) == 7:
raise IPyradWarningExit("""
Error: file '{}' is not compatible with ipyrad v.{}.
Please create and update a new params file using the -n argument.
For info on which parameters have changed see the changelog:
(http://ipyrad.readthedocs.io/releasenotes.html)
""".format(args.params, ip.__version__))
except IndexError:
raise IPyradWarningExit("""
Error: Params file should not have any empty lines at the top
of the file. Verify there are no blank lines and rerun ipyrad.
Offending file - {}
""".format(args.params))
## update and backup
if legacy_version:
#which version...
#update_to_6()
pass
## make into a dict. Ignore blank lines at the end of file
## Really this will ignore all blank lines
items = [i.split("##")[0].strip() for i in plines[1:] if not i.strip() == ""]
#keys = [i.split("]")[-2][-1] for i in plines[1:]]
#keys = range(len(plines)-1)
keys = ip.Assembly('null', quiet=True).paramsdict.keys()
parsedict = {str(i):j for i, j in zip(keys, items)}
return parsedict |
def showstats(parsedict):
""" loads assembly or dies, and print stats to screen """
#project_dir = parsedict['1']
project_dir = parsedict["project_dir"]
if not project_dir:
project_dir = "./"
## Be nice if somebody also puts in the file extension
#assembly_name = parsedict['0']
assembly_name = parsedict["assembly_name"]
my_assembly = os.path.join(project_dir, assembly_name)
## If the project_dir doesn't exist don't even bother trying harder.
if not os.path.isdir(project_dir):
msg = """
Trying to print stats for Assembly ({}) that doesn't exist. You must
first run steps before you can show results.
""".format(project_dir)
sys.exit(msg)
if not assembly_name:
msg = """
Assembly name is not set in params.txt, meaning it was either changed or
erased since the Assembly was started. Please restore the original name.
You can find the name of your Assembly in the "project dir": {}.
""".format(project_dir)
raise IPyradError(msg)
data = ip.load_json(my_assembly, quiet=True, cli=True)
print("\nSummary stats of Assembly {}".format(data.name) \
+"\n------------------------------------------------")
if not data.stats.empty:
print(data.stats)
print("\n\nFull stats files"\
+"\n------------------------------------------------")
fullcurdir = os.path.realpath(os.path.curdir)
for i in range(1, 8):
#enumerate(sorted(data.stats_files)):
key = "s"+str(i)
try:
val = data.stats_files[key]
val = val.replace(fullcurdir, ".")
print("step {}: {}".format(i, val))
except (KeyError, AttributeError):
print("step {}: None".format(i))
print("\n")
else:
print("No stats to display") |
def branch_assembly(args, parsedict):
"""
Load the passed in assembly and create a branch. Copy it
to a new assembly, and also write out the appropriate params.txt
"""
## Get the current assembly
data = getassembly(args, parsedict)
## get arguments to branch command
bargs = args.branch
## get new name, trim off .txt if it was accidentally added
newname = bargs[0]
if newname.endswith(".txt"):
newname = newname[:-4]
## look for subsamples
if len(bargs) > 1:
## Branching and subsampling at step 6 is a bad idea, it messes up
## indexing into the hdf5 cluster file. Warn against this.
if any([x.stats.state == 6 for x in data.samples.values()]):
pass
## TODODODODODO
#print("wat")
## are we removing or keeping listed samples?
subsamples = bargs[1:]
## drop the matching samples
if bargs[1] == "-":
## check drop names
fails = [i for i in subsamples[1:] if i not in data.samples.keys()]
if any(fails):
raise IPyradWarningExit("\
\n Failed: unrecognized names requested, check spelling:\n {}"\
.format("\n ".join([i for i in fails])))
print(" dropping {} samples".format(len(subsamples)-1))
subsamples = list(set(data.samples.keys()) - set(subsamples))
## If the arg after the new param name is a file that exists
if os.path.exists(bargs[1]):
new_data = data.branch(newname, infile=bargs[1])
else:
new_data = data.branch(newname, subsamples)
## keeping all samples
else:
new_data = data.branch(newname, None)
print(" creating a new branch called '{}' with {} Samples".\
format(new_data.name, len(new_data.samples)))
print(" writing new params file to {}"\
.format("params-"+new_data.name+".txt\n"))
new_data.write_params("params-"+new_data.name+".txt", force=args.force) |
def merge_assemblies(args):
"""
merge all given assemblies into a new assembly. Copies the params
from the first passed in extant assembly. this function is called
with the ipyrad -m flag. You must pass it at least 3 values, the first
is a new assembly name (a new `param-newname.txt` will be created).
The second and third args must be params files for currently existing
assemblies. Any args beyond the third must also be params file for
extant assemblies.
"""
print("\n Merging assemblies: {}".format(args.merge[1:]))
## Make sure there are the right number of args
if len(args.merge) < 3:
sys.exit(_WRONG_NUM_CLI_MERGE)
## Make sure the first arg isn't a params file, i could see someone doing it
newname = args.merge[0]
if os.path.exists(newname) and "params-" in newname:
sys.exit(_WRONG_ORDER_CLI_MERGE)
## Make sure first arg will create a param file that doesn't already exist
if os.path.exists("params-" + newname + ".txt") and not args.force:
sys.exit(_NAME_EXISTS_MERGE.format("params-" + newname + ".txt"))
## Make sure the rest of the args are params files that already exist
assemblies_to_merge = args.merge[1:]
for assembly in assemblies_to_merge:
if not os.path.exists(assembly):
sys.exit(_DOES_NOT_EXIST_MERGE.format(assembly))
## Get assemblies for each of the passed in params files.
## We're recycling some of the machinery for loading assemblies here
assemblies = []
for params_file in args.merge[1:]:
args.params = params_file
parsedict = parse_params(args)
assemblies.append(getassembly(args, parsedict))
## Do the merge
merged_assembly = ip.merge(newname, assemblies)
## Write out the merged assembly params file and report success
merged_assembly.write_params("params-{}.txt".format(newname), force=args.force)
print("\n Merging succeeded. New params file for merged assembly:")
print("\n params-{}.txt\n".format(newname)) |
def getassembly(args, parsedict):
"""
loads assembly or creates a new one and set its params from
parsedict. Does not launch ipcluster.
"""
## Creating an assembly with a full path in the name will "work"
## but it is potentially dangerous, so here we have assembly_name
## and assembly_file, name is used for creating new in cwd, file is
## used for loading existing.
##
## Be nice if the user includes the extension.
#project_dir = ip.core.assembly._expander(parsedict['1'])
#assembly_name = parsedict['0']
project_dir = ip.core.assembly._expander(parsedict['project_dir'])
assembly_name = parsedict['assembly_name']
assembly_file = os.path.join(project_dir, assembly_name)
## Assembly creation will handle error checking on
## the format of the assembly_name
## make sure the working directory exists.
if not os.path.exists(project_dir):
os.mkdir(project_dir)
try:
## If 1 and force then go ahead and create a new assembly
if ('1' in args.steps) and args.force:
data = ip.Assembly(assembly_name, cli=True)
else:
data = ip.load_json(assembly_file, cli=True)
data._cli = True
except IPyradWarningExit as _:
## if no assembly is found then go ahead and make one
if '1' not in args.steps:
raise IPyradWarningExit(\
" Error: You must first run step 1 on the assembly: {}"\
.format(assembly_file))
else:
## create a new assembly object
data = ip.Assembly(assembly_name, cli=True)
## for entering some params...
for param in parsedict:
## trap assignment of assembly_name since it is immutable.
if param == "assembly_name":
## Raise error if user tried to change assembly name
if parsedict[param] != data.name:
data.set_params(param, parsedict[param])
else:
## all other params should be handled by set_params
try:
data.set_params(param, parsedict[param])
except IndexError as _:
print(" Malformed params file: {}".format(args.params))
print(" Bad parameter {} - {}".format(param, parsedict[param]))
sys.exit(-1)
return data |
def _check_version():
""" Test if there's a newer version and nag the user to upgrade."""
import urllib2
from distutils.version import LooseVersion
header = \
"\n -------------------------------------------------------------"+\
"\n ipyrad [v.{}]".format(ip.__version__)+\
"\n Interactive assembly and analysis of RAD-seq data"+\
"\n -------------------------------------------------------------"
try:
htmldat = urllib2.urlopen("https://anaconda.org/ipyrad/ipyrad").readlines()
curversion = next((x for x in htmldat if "subheader" in x), None).split(">")[1].split("<")[0]
if LooseVersion(ip.__version__) < LooseVersion(curversion):
msg = """
A new version of ipyrad is available (v.{}). To upgrade run:
conda install -c ipyrad ipyrad\n""".format(curversion)
print(header + "\n" + msg)
else:
pass
#print("You are up to date")
except Exception as inst:
## Always fail silently
pass |
def main():
""" main function """
## turn off traceback for the CLI
ip.__interactive__ = 0
## Check for a new version on anaconda
_check_version()
## parse params file input (returns to stdout if --help or --version)
args = parse_command_line()
## Turn the debug output written to ipyrad_log.txt up to 11!
## Clean up the old one first, it's cleaner to do this here than
## at the end (exceptions, etc)
if os.path.exists(ip.__debugflag__):
os.remove(ip.__debugflag__)
if args.debug:
print("\n ** Enabling debug mode ** ")
ip._debug_on()
atexit.register(ip._debug_off)
## create new paramsfile if -n
if args.new:
## Create a tmp assembly, call write_params to make default params.txt
try:
tmpassembly = ip.Assembly(args.new, quiet=True, cli=True)
tmpassembly.write_params("params-{}.txt".format(args.new),
force=args.force)
except Exception as inst:
print(inst)
sys.exit(2)
print("\n New file 'params-{}.txt' created in {}\n".\
format(args.new, os.path.realpath(os.path.curdir)))
sys.exit(2)
## if params then must provide action argument with it
if args.params:
if not any([args.branch, args.results, args.steps]):
print("""
Must provide action argument along with -p argument for params file.
e.g., ipyrad -p params-test.txt -r ## shows results
e.g., ipyrad -p params-test.txt -s 12 ## runs steps 1 & 2
e.g., ipyrad -p params-test.txt -b newbranch ## branch this assembly
""")
sys.exit(2)
if not args.params:
if any([args.branch, args.results, args.steps]):
print("""
Must provide params file for branching, doing steps, or getting results.
e.g., ipyrad -p params-test.txt -r ## shows results
e.g., ipyrad -p params-test.txt -s 12 ## runs steps 1 & 2
e.g., ipyrad -p params-test.txt -b newbranch ## branch this assembly
""")
## if branching, or merging do not allow steps in same command
## print spacer
if any([args.branch, args.merge]):
args.steps = ""
print("")
## always print the header when doing steps
header = \
"\n -------------------------------------------------------------"+\
"\n ipyrad [v.{}]".format(ip.__version__)+\
"\n Interactive assembly and analysis of RAD-seq data"+\
"\n -------------------------------------------------------------"
## Log the current version. End run around the LOGGER
## so it'll always print regardless of log level.
with open(ip.__debugfile__, 'a') as logfile:
logfile.write(header)
logfile.write("\n Begin run: {}".format(time.strftime("%Y-%m-%d %H:%M")))
logfile.write("\n Using args {}".format(vars(args)))
logfile.write("\n Platform info: {}".format(os.uname()))
## if merging just do the merge and exit
if args.merge:
print(header)
merge_assemblies(args)
sys.exit(1)
## if download data do it and then exit. Runs single core in CLI.
if args.download:
if len(args.download) == 1:
downloaddir = "sra-fastqs"
else:
downloaddir = args.download[1]
sratools_download(args.download[0], workdir=downloaddir, force=args.force)
sys.exit(1)
## create new Assembly or load existing Assembly, quit if args.results
elif args.params:
parsedict = parse_params(args)
if args.branch:
branch_assembly(args, parsedict)
elif args.steps:
## print header
print(header)
## Only blank the log file if we're actually going to run a new
## assembly. This used to be in __init__, but had the side effect
## of occasionally blanking the log file in an undesirable fashion
## for instance if you run a long assembly and it crashes and
## then you run `-r` and it blanks the log, it's crazymaking.
if os.path.exists(ip.__debugfile__):
if os.path.getsize(ip.__debugfile__) > 50000000:
with open(ip.__debugfile__, 'w') as clear:
clear.write("file reset")
## run Assembly steps
## launch or load assembly with custom profile/pid
data = getassembly(args, parsedict)
## set CLI ipcluster terms
data._ipcluster["threads"] = args.threads
## if ipyclient is running (and matched profile) then use that one
if args.ipcluster:
ipyclient = ipp.Client(profile=args.ipcluster)
data._ipcluster["cores"] = len(ipyclient)
## if not then we need to register and launch an ipcluster instance
else:
## set CLI ipcluster terms
ipyclient = None
data._ipcluster["cores"] = args.cores if args.cores else detect_cpus()
data._ipcluster["engines"] = "Local"
if args.MPI:
data._ipcluster["engines"] = "MPI"
if not args.cores:
raise IPyradWarningExit("must provide -c argument with --MPI")
## register to have a cluster-id with "ip- name"
data = register_ipcluster(data)
## set to print headers
data._headers = 1
## run assembly steps
steps = list(args.steps)
data.run(
steps=steps,
force=args.force,
show_cluster=1,
ipyclient=ipyclient)
if args.results:
showstats(parsedict) |
def get_binom(base1, base2, estE, estH):
"""
return probability of base call
"""
prior_homo = (1. - estH) / 2.
prior_hete = estH
## calculate probs
bsum = base1 + base2
hetprob = scipy.misc.comb(bsum, base1)/(2. **(bsum))
homoa = scipy.stats.binom.pmf(base2, bsum, estE)
homob = scipy.stats.binom.pmf(base1, bsum, estE)
## calculate probs
hetprob *= prior_hete
homoa *= prior_homo
homob *= prior_homo
## final
probabilities = [homoa, homob, hetprob]
bestprob = max(probabilities)/float(sum(probabilities))
## return
if hetprob > homoa:
return True, bestprob
else:
return False, bestprob |
def removerepeats(consens, arrayed):
"""
Checks for interior Ns in consensus seqs and removes those that are at
low depth, here defined as less than 1/3 of the average depth. The prop 1/3
is chosen so that mindepth=6 requires 2 base calls that are not in [N,-].
"""
## default trim no edges
consens = "".join(consens).replace("-", "N")
## split for pairs
try:
cons1, cons2 = consens.split("nnnn")
split = consens.index("nnnn")
arr1 = arrayed[:, :split]
arr2 = arrayed[:, split+4:]
except ValueError:
cons1 = consens
cons2 = ""
arr1 = arrayed
## trim from left and right of cons1
edges = [None, None]
lcons = len(cons1)
cons1 = cons1.lstrip("N")
edges[0] = lcons - len(cons1)
## trim from right if nonzero
lcons = len(cons1)
cons1 = cons1.rstrip("N")
if lcons - len(cons1):
edges[1] = -1*(lcons - len(cons1))
## trim same from arrayed
arr1 = arr1[:, edges[0]:edges[1]]
## trim from left and right of cons2 if present
if cons2:
## trim from left and right of cons1
edges = [None, None]
lcons = len(cons2)
cons2 = cons2.lstrip("N")
edges[0] = lcons - len(cons2)
## trim from right if nonzero
lcons = len(cons2)
cons2 = cons2.rstrip("N")
if lcons - len(cons2):
edges[1] = -1*(lcons - len(cons2))
## trim same from arrayed
arr2 = arr2[:, edges[0]:edges[1]]
## reconstitute pairs
consens = cons1 + "nnnn" + cons2
consens = np.array(list(consens))
sep = np.array(arr1.shape[0]*[list("nnnn")])
arrayed = np.hstack([arr1, sep, arr2])
## if single-end...
else:
consens = np.array(list(cons1))
arrayed = arr1
## get column counts of Ns and -s
ndepths = np.sum(arrayed == 'N', axis=0)
idepths = np.sum(arrayed == '-', axis=0)
## get proportion of bases that are N- at each site
nons = ((ndepths + idepths) / float(arrayed.shape[0])) >= 0.75
## boolean of whether base was called N
isn = consens == "N"
## make ridx
ridx = nons * isn
## apply filter
consens = consens[~ridx]
arrayed = arrayed[:, ~ridx]
return consens, arrayed |
def newconsensus(data, sample, tmpchunk, optim):
"""
new faster replacement to consensus
"""
## do reference map funcs?
isref = "reference" in data.paramsdict["assembly_method"]
## temporarily store the mean estimates to Assembly
data._este = data.stats.error_est.mean()
data._esth = data.stats.hetero_est.mean()
## get number relative to tmp file
tmpnum = int(tmpchunk.split(".")[-1])
## prepare data for reading
clusters = open(tmpchunk, 'rb')
pairdealer = itertools.izip(*[iter(clusters)]*2)
maxlen = data._hackersonly["max_fragment_length"]
## write to tmp cons to file to be combined later
consenshandle = os.path.join(
data.dirs.consens, sample.name+"_tmpcons."+str(tmpnum))
tmp5 = consenshandle.replace("_tmpcons.", "_tmpcats.")
with h5py.File(tmp5, 'w') as io5:
io5.create_dataset("cats", (optim, maxlen, 4), dtype=np.uint32)
io5.create_dataset("alls", (optim, ), dtype=np.uint8)
io5.create_dataset("chroms", (optim, 3), dtype=np.int64)
## local copies to use to fill the arrays
catarr = io5["cats"][:]
nallel = io5["alls"][:]
refarr = io5["chroms"][:]
## if reference-mapped then parse the fai to get index number of chroms
if isref:
fai = pd.read_csv(data.paramsdict["reference_sequence"] + ".fai",
names=['scaffold', 'size', 'sumsize', 'a', 'b'],
sep="\t")
faidict = {j:i for i,j in enumerate(fai.scaffold)}
## store data for stats counters
counters = {"name" : tmpnum,
"heteros": 0,
"nsites" : 0,
"nconsens" : 0}
## store data for what got filtered
filters = {"depth" : 0,
"maxh" : 0,
"maxn" : 0}
## store data for writing
storeseq = {}
## set max limits
if 'pair' in data.paramsdict["datatype"]:
maxhet = sum(data.paramsdict["max_Hs_consens"])
maxn = sum(data.paramsdict["max_Ns_consens"])
else:
maxhet = data.paramsdict["max_Hs_consens"][0]
maxn = data.paramsdict["max_Ns_consens"][0]
## load the refmap dictionary if refmapping
done = 0
while not done:
try:
done, chunk = clustdealer(pairdealer, 1)
except IndexError:
raise IPyradError("clustfile formatting error in %s", chunk)
if chunk:
## get names and seqs
piece = chunk[0].strip().split("\n")
names = piece[0::2]
seqs = piece[1::2]
## pull replicate read info from seqs
reps = [int(sname.split(";")[-2][5:]) for sname in names]
## IF this is a reference mapped read store the chrom and pos info
## -1 defaults to indicating an anonymous locus, since we are using
## the faidict as 0 indexed. If chrompos fails it defaults to -1
ref_position = (-1, 0, 0)
if isref:
try:
## parse position from name string
name, _, _ = names[0].rsplit(";", 2)
chrom, pos0, pos1 = name.rsplit(":", 2)
## pull idx from .fai reference dict
chromint = faidict[chrom] + 1
ref_position = (int(chromint), int(pos0), int(pos1))
except Exception as inst:
LOGGER.debug("Reference sequence chrom/pos failed for {}".format(names[0]))
LOGGER.debug(inst)
## apply read depth filter
if nfilter1(data, reps):
## get stacks of base counts
sseqs = [list(seq) for seq in seqs]
arrayed = np.concatenate(
[[seq]*rep for seq, rep in zip(sseqs, reps)])
arrayed = arrayed[:, :maxlen]
## get consens call for each site, applies paralog-x-site filter
#consens = np.apply_along_axis(basecall, 0, arrayed, data)
consens = basecaller(
arrayed,
data.paramsdict["mindepth_majrule"],
data.paramsdict["mindepth_statistical"],
data._esth,
data._este,
)
## apply a filter to remove low coverage sites/Ns that
## are likely sequence repeat errors. This is only applied to
## clusters that already passed the read-depth filter (1)
if "N" in consens:
try:
consens, arrayed = removerepeats(consens, arrayed)
except ValueError as _:
LOGGER.info("Caught a bad chunk w/ all Ns. Skip it.")
continue
## get hetero sites
hidx = [i for (i, j) in enumerate(consens) \
if j in list("RKSYWM")]
nheteros = len(hidx)
## filter for max number of hetero sites
if nfilter2(nheteros, maxhet):
## filter for maxN, & minlen
if nfilter3(consens, maxn):
## counter right now
current = counters["nconsens"]
## get N alleles and get lower case in consens
consens, nhaps = nfilter4(consens, hidx, arrayed)
## store the number of alleles observed
nallel[current] = nhaps
## store a reduced array with only CATG
catg = np.array(\
[np.sum(arrayed == i, axis=0) \
for i in list("CATG")],
dtype='uint32').T
catarr[current, :catg.shape[0], :] = catg
refarr[current] = ref_position
## store the seqdata for tmpchunk
storeseq[counters["name"]] = "".join(list(consens))
counters["name"] += 1
counters["nconsens"] += 1
counters["heteros"] += nheteros
else:
#LOGGER.debug("@haplo")
filters['maxn'] += 1
else:
#LOGGER.debug("@hetero")
filters['maxh'] += 1
else:
#LOGGER.debug("@depth")
filters['depth'] += 1
## close infile io
clusters.close()
## write final consens string chunk
if storeseq:
with open(consenshandle, 'wb') as outfile:
outfile.write("\n".join([">"+sample.name+"_"+str(key)+"\n"+\
str(storeseq[key]) for key in storeseq]))
## write to h5 array, this can be a bit slow on big data sets and is not
## currently convered by progressbar movement.
with h5py.File(tmp5, 'a') as io5:
io5["cats"][:] = catarr
io5["alls"][:] = nallel
io5["chroms"][:] = refarr
del catarr
del nallel
del refarr
## return stats
counters['nsites'] = sum([len(i) for i in storeseq.itervalues()])
return counters, filters |
def basecaller(arrayed, mindepth_majrule, mindepth_statistical, estH, estE):
"""
call all sites in a locus array.
"""
## an array to fill with consensus site calls
cons = np.zeros(arrayed.shape[1], dtype=np.uint8)
cons.fill(78)
arr = arrayed.view(np.uint8)
## iterate over columns
for col in xrange(arr.shape[1]):
## the site of focus
carr = arr[:, col]
## make mask of N and - sites
mask = carr == 45
mask += carr == 78
marr = carr[~mask]
## skip if only empties (e.g., N-)
if not marr.shape[0]:
cons[col] = 78
## skip if not variable
elif np.all(marr == marr[0]):
cons[col] = marr[0]
## estimate variable site call
else:
## get allele freqs (first-most, second, third = p, q, r)
counts = np.bincount(marr)
pbase = np.argmax(counts)
nump = counts[pbase]
counts[pbase] = 0
qbase = np.argmax(counts)
numq = counts[qbase]
counts[qbase] = 0
rbase = np.argmax(counts)
numr = counts[rbase]
## based on biallelic depth
bidepth = nump + numq
if bidepth < mindepth_majrule:
cons[col] = 78
else:
## if depth is too high, reduce to sampled int
if bidepth > 500:
base1 = int(500 * (nump / float(bidepth)))
base2 = int(500 * (numq / float(bidepth)))
else:
base1 = nump
base2 = numq
## make statistical base call
if bidepth >= mindepth_statistical:
ishet, prob = get_binom(base1, base2, estE, estH)
#LOGGER.info("ishet, prob, b1, b2: %s %s %s %s", ishet, prob, base1, base2)
if prob < 0.95:
cons[col] = 78
else:
if ishet:
cons[col] = TRANS[(pbase, qbase)]
else:
cons[col] = pbase
## make majrule base call
else: #if bidepth >= mindepth_majrule:
if nump == numq:
cons[col] = TRANS[(pbase, qbase)]
else:
cons[col] = pbase
return cons.view("S1") |
def nfilter1(data, reps):
""" applies read depths filter """
if sum(reps) >= data.paramsdict["mindepth_majrule"] and \
sum(reps) <= data.paramsdict["maxdepth"]:
return 1
else:
return 0 |
def nfilter4(consens, hidx, arrayed):
""" applies max haplotypes filter returns pass and consens"""
## if less than two Hs then there is only one allele
if len(hidx) < 2:
return consens, 1
## store base calls for hetero sites
harray = arrayed[:, hidx]
## remove any reads that have N or - base calls at hetero sites
## these cannot be used when calling alleles currently.
harray = harray[~np.any(harray == "-", axis=1)]
harray = harray[~np.any(harray == "N", axis=1)]
## get counts of each allele (e.g., AT:2, CG:2)
ccx = Counter([tuple(i) for i in harray])
## Two possibilities we would like to distinguish, but we can't. Therefore,
## we just throw away low depth third alleles that are within seq. error.
## 1) a third base came up as a sequencing error but is not a unique allele
## 2) a third or more unique allele is there but at low frequency
## remove low freq alleles if more than 2, since they may reflect
## sequencing errors at hetero sites, making a third allele, or a new
## allelic combination that is not real.
if len(ccx) > 2:
totdepth = harray.shape[0]
cutoff = max(1, totdepth // 10)
alleles = [i for i in ccx if ccx[i] > cutoff]
else:
alleles = ccx.keys()
## how many high depth alleles?
nalleles = len(alleles)
## if 2 alleles then save the phase using lowercase coding
if nalleles == 2:
try:
consens = storealleles(consens, hidx, alleles)
except (IndexError, KeyError):
## the H sites do not form good alleles
LOGGER.info("failed at phasing loc, skipping")
LOGGER.info("""
consens %s
hidx %s
alleles %s
""", consens, hidx, alleles)
return consens, nalleles
## just return the info for later filtering
else:
return consens, nalleles |
def storealleles(consens, hidx, alleles):
""" store phased allele data for diploids """
## find the first hetero site and choose the priority base
## example, if W: then priority base in A and not T. PRIORITY=(order: CATG)
bigbase = PRIORITY[consens[hidx[0]]]
## find which allele has priority based on bigbase
bigallele = [i for i in alleles if i[0] == bigbase][0]
## uplow other bases relative to this one and the priority list
## e.g., if there are two hetero sites (WY) and the two alleles are
## AT and TC, then since bigbase of (W) is A second hetero site should
## be stored as y, since the ordering is swapped in this case; the priority
## base (C versus T) is C, but C goes with the minor base at h site 1.
#consens = list(consens)
for hsite, pbase in zip(hidx[1:], bigallele[1:]):
if PRIORITY[consens[hsite]] != pbase:
consens[hsite] = consens[hsite].lower()
## return consens
return consens |
def cleanup(data, sample, statsdicts):
"""
cleaning up. optim is the size (nloci) of tmp arrays
"""
LOGGER.info("in cleanup for: %s", sample.name)
isref = 'reference' in data.paramsdict["assembly_method"]
## collect consens chunk files
combs1 = glob.glob(os.path.join(
data.dirs.consens,
sample.name+"_tmpcons.*"))
combs1.sort(key=lambda x: int(x.split(".")[-1]))
## collect tmpcat files
tmpcats = glob.glob(os.path.join(
data.dirs.consens,
sample.name+"_tmpcats.*"))
tmpcats.sort(key=lambda x: int(x.split(".")[-1]))
## get shape info from the first cat, (optim, maxlen, 4)
with h5py.File(tmpcats[0], 'r') as io5:
optim, maxlen, _ = io5['cats'].shape
## save as a chunked compressed hdf5 array
handle1 = os.path.join(data.dirs.consens, sample.name+".catg")
with h5py.File(handle1, 'w') as ioh5:
nloci = len(tmpcats) * optim
dcat = ioh5.create_dataset("catg", (nloci, maxlen, 4),
dtype=np.uint32,
chunks=(optim, maxlen, 4),
compression="gzip")
dall = ioh5.create_dataset("nalleles", (nloci, ),
dtype=np.uint8,
chunks=(optim, ),
compression="gzip")
## only create chrom for reference-aligned data
if isref:
dchrom = ioh5.create_dataset("chroms", (nloci, 3),
dtype=np.int64,
chunks=(optim, 3),
compression="gzip")
## Combine all those tmp cats into the big cat
start = 0
for icat in tmpcats:
io5 = h5py.File(icat, 'r')
end = start + optim
dcat[start:end] = io5['cats'][:]
dall[start:end] = io5['alls'][:]
if isref:
dchrom[start:end] = io5['chroms'][:]
start += optim
io5.close()
os.remove(icat)
## store the handle to the Sample
sample.files.database = handle1
## record results
xcounters = {"nconsens": 0,
"heteros": 0,
"nsites": 0}
xfilters = {"depth": 0,
"maxh": 0,
"maxn": 0}
## merge finished consens stats
for counters, filters in statsdicts:
## sum individual counters
for key in xcounters:
xcounters[key] += counters[key]
for key in xfilters:
xfilters[key] += filters[key]
## merge consens read files
handle1 = os.path.join(data.dirs.consens, sample.name+".consens.gz")
with gzip.open(handle1, 'wb') as out:
for fname in combs1:
with open(fname) as infile:
out.write(infile.read()+"\n")
os.remove(fname)
sample.files.consens = [handle1]
## set Sample stats_dfs values
if int(xcounters['nsites']):
prop = int(xcounters["heteros"]) / float(xcounters['nsites'])
else:
prop = 0
sample.stats_dfs.s5.nsites = int(xcounters["nsites"])
sample.stats_dfs.s5.nhetero = int(xcounters["heteros"])
sample.stats_dfs.s5.filtered_by_depth = xfilters['depth']
sample.stats_dfs.s5.filtered_by_maxH = xfilters['maxh']
sample.stats_dfs.s5.filtered_by_maxN = xfilters['maxn']
sample.stats_dfs.s5.reads_consens = int(xcounters["nconsens"])
sample.stats_dfs.s5.clusters_total = sample.stats_dfs.s3.clusters_total
sample.stats_dfs.s5.heterozygosity = float(prop)
## set the Sample stats summary value
sample.stats.reads_consens = int(xcounters["nconsens"])
## save state to Sample if successful
if sample.stats.reads_consens:
sample.stats.state = 5
else:
print("No clusters passed filtering in Sample: {}".format(sample.name))
return sample |
def chunk_clusters(data, sample):
""" split job into bits and pass to the client """
## counter for split job submission
num = 0
## set optim size for chunks in N clusters. The first few chunks take longer
## because they contain larger clusters, so we create 4X as many chunks as
## processors so that they are split more evenly.
optim = int((sample.stats.clusters_total // data.cpus) + \
(sample.stats.clusters_total % data.cpus))
## break up the file into smaller tmp files for each engine
## chunking by cluster is a bit trickier than chunking by N lines
chunkslist = []
## open to clusters
with gzip.open(sample.files.clusters, 'rb') as clusters:
## create iterator to sample 2 lines at a time
pairdealer = itertools.izip(*[iter(clusters)]*2)
## Use iterator to sample til end of cluster
done = 0
while not done:
## grab optim clusters and write to file.
done, chunk = clustdealer(pairdealer, optim)
chunkhandle = os.path.join(data.dirs.clusts,
"tmp_"+str(sample.name)+"."+str(num*optim))
if chunk:
chunkslist.append((optim, chunkhandle))
with open(chunkhandle, 'wb') as outchunk:
outchunk.write("//\n//\n".join(chunk)+"//\n//\n")
num += 1
return chunkslist |
def get_subsamples(data, samples, force):
"""
Apply state, ncluster, and force filters to select samples to be run.
"""
subsamples = []
for sample in samples:
if not force:
if sample.stats.state >= 5:
print("""\
Skipping Sample {}; Already has consens reads. Use force arg to overwrite.\
""".format(sample.name))
elif not sample.stats.clusters_hidepth:
print("""\
Skipping Sample {}; No clusters found."""\
.format(sample.name, int(sample.stats.clusters_hidepth)))
elif sample.stats.state < 4:
print("""\
Skipping Sample {}; not yet finished step4 """\
.format(sample.name))
else:
subsamples.append(sample)
else:
if not sample.stats.clusters_hidepth:
print("""\
Skipping Sample {}; No clusters found in {}."""\
.format(sample.name, sample.files.clusters))
elif sample.stats.state < 4:
print("""\
Skipping Sample {}; not yet finished step4"""\
.format(sample.name))
else:
subsamples.append(sample)
if len(subsamples) == 0:
raise IPyradWarningExit("""
No samples to cluster, exiting.
""")
## if sample is already done skip
if "hetero_est" not in data.stats:
print(" No estimates of heterozygosity and error rate. Using default "\
"values")
for sample in subsamples:
sample.stats.hetero_est = 0.001
sample.stats.error_est = 0.0001
if data._headers:
print(u"""\
Mean error [{:.5f} sd={:.5f}]
Mean hetero [{:.5f} sd={:.5f}]"""\
.format(data.stats.error_est.mean(), data.stats.error_est.std(),
data.stats.hetero_est.mean(), data.stats.hetero_est.std()))
return subsamples |
def run(data, samples, force, ipyclient):
""" checks if the sample should be run and passes the args """
## prepare dirs
data.dirs.consens = os.path.join(data.dirs.project, data.name+"_consens")
if not os.path.exists(data.dirs.consens):
os.mkdir(data.dirs.consens)
## zap any tmp files that might be leftover
tmpcons = glob.glob(os.path.join(data.dirs.consens, "*_tmpcons.*"))
tmpcats = glob.glob(os.path.join(data.dirs.consens, "*_tmpcats.*"))
for tmpfile in tmpcons+tmpcats:
os.remove(tmpfile)
## filter through samples for those ready
samples = get_subsamples(data, samples, force)
## set up parallel client: how many cores?
lbview = ipyclient.load_balanced_view()
data.cpus = data._ipcluster["cores"]
if not data.cpus:
data.cpus = len(ipyclient.ids)
## wrap everything to ensure destruction of temp files
inst = ""
try:
## calculate depths, if they changed.
samples = calculate_depths(data, samples, lbview)
## chunk clusters into bits for parallel processing
lasyncs = make_chunks(data, samples, lbview)
## process chunks and cleanup
process_chunks(data, samples, lasyncs, lbview)
except KeyboardInterrupt as inst:
raise inst
finally:
## if process failed at any point delete tmp files
tmpcons = glob.glob(os.path.join(data.dirs.clusts, "tmp_*.[0-9]*"))
tmpcons += glob.glob(os.path.join(data.dirs.consens, "*_tmpcons.*"))
tmpcons += glob.glob(os.path.join(data.dirs.consens, "*_tmpcats.*"))
for tmpchunk in tmpcons:
os.remove(tmpchunk)
## Finished step 5. Set step 6 checkpoint to 0 to force
## re-running from scratch.
data._checkpoint = 0 |
def calculate_depths(data, samples, lbview):
"""
check whether mindepth has changed, and thus whether clusters_hidepth
needs to be recalculated, and get new maxlen for new highdepth clusts.
if mindepth not changed then nothing changes.
"""
## send jobs to be processed on engines
start = time.time()
printstr = " calculating depths | {} | s5 |"
recaljobs = {}
maxlens = []
for sample in samples:
recaljobs[sample.name] = lbview.apply(recal_hidepth, *(data, sample))
## block until finished
while 1:
ready = [i.ready() for i in recaljobs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
## check for failures and collect results
modsamples = []
for sample in samples:
if not recaljobs[sample.name].successful():
LOGGER.error(" sample %s failed: %s", sample.name, recaljobs[sample.name].exception())
else:
modsample, _, maxlen, _, _ = recaljobs[sample.name].result()
modsamples.append(modsample)
maxlens.append(maxlen)
## reset global maxlen if something changed
data._hackersonly["max_fragment_length"] = int(max(maxlens)) + 4
return samples |
def make_chunks(data, samples, lbview):
"""
calls chunk_clusters and tracks progress.
"""
## first progress bar
start = time.time()
printstr = " chunking clusters | {} | s5 |"
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)
## send off samples to be chunked
lasyncs = {}
for sample in samples:
lasyncs[sample.name] = lbview.apply(chunk_clusters, *(data, sample))
## block until finished
while 1:
ready = [i.ready() for i in lasyncs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
## check for failures
for sample in samples:
if not lasyncs[sample.name].successful():
LOGGER.error(" sample %s failed: %s", sample.name,
lasyncs[sample.name].exception())
return lasyncs |
def process_chunks(data, samples, lasyncs, lbview):
"""
submit chunks to consens func and ...
"""
## send chunks to be processed
start = time.time()
asyncs = {sample.name:[] for sample in samples}
printstr = " consens calling | {} | s5 |"
## get chunklist from results
for sample in samples:
clist = lasyncs[sample.name].result()
for optim, chunkhandle in clist:
args = (data, sample, chunkhandle, optim)
#asyncs[sample.name].append(lbview.apply_async(consensus, *args))
asyncs[sample.name].append(lbview.apply_async(newconsensus, *args))
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 0, printstr.format(elapsed), spacer=data._spacer)
## track progress
allsyncs = list(itertools.chain(*[asyncs[i.name] for i in samples]))
while 1:
ready = [i.ready() for i in allsyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
break
## get clean samples
casyncs = {}
for sample in samples:
rlist = asyncs[sample.name]
statsdicts = [i.result() for i in rlist]
casyncs[sample.name] = lbview.apply(cleanup, *(data, sample, statsdicts))
while 1:
ready = [i.ready() for i in casyncs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(10, 10, printstr.format(elapsed), spacer=data._spacer)
time.sleep(0.1)
if len(ready) == sum(ready):
print("")
break
## check for failures:
for key in asyncs:
asynclist = asyncs[key]
for async in asynclist:
if not async.successful():
LOGGER.error(" async error: %s \n%s", key, async.exception())
for key in casyncs:
if not casyncs[key].successful():
LOGGER.error(" casync error: %s \n%s", key, casyncs[key].exception())
## get samples back
subsamples = [i.result() for i in casyncs.values()]
for sample in subsamples:
data.samples[sample.name] = sample
## build Assembly stats
data.stats_dfs.s5 = data._build_stat("s5")
## write stats file
data.stats_files.s5 = os.path.join(data.dirs.consens, 's5_consens_stats.txt')
with io.open(data.stats_files.s5, 'w') as out:
#out.write(data.stats_dfs.s5.to_string())
data.stats_dfs.s5.to_string(
buf=out,
formatters={
'clusters_total':'{:.0f}'.format,
'filtered_by_depth':'{:.0f}'.format,
'filtered_by_maxH':'{:.0f}'.format,
'filtered_by_maxN':'{:.0f}'.format,
'reads_consens':'{:.0f}'.format,
'nsites':'{:.0f}'.format,
'nhetero':'{:.0f}'.format,
'heterozygosity':'{:.5f}'.format
}) |
def make(data, samples):
""" reads in .loci and builds alleles from case characters """
#read in loci file
outfile = open(os.path.join(data.dirs.outfiles, data.name+".alleles"), 'w')
lines = open(os.path.join(data.dirs.outfiles, data.name+".loci"), 'r')
## Get the longest sample name for pretty printing
longname = max(len(x) for x in data.samples.keys())
## Padding between name and sequence in output file. This should be the
## same as write_outfiles.write_tmp_loci.name_padding
name_padding = 5
writing = []
loc = 0
for line in lines:
if ">" in line:
name, seq = line.split(" ")[0], line.split(" ")[-1]
allele1, allele2 = splitalleles(seq.strip())
## Format the output string. the "-2" below accounts for the additional
## 2 characters added to the sample name that don't get added to the
## snpsites line, so you gotta bump this line back 2 to make it
## line up right.
writing.append(name+"_0"+" "*(longname-len(name)-2+name_padding)+allele1)
writing.append(name+"_1"+" "*(longname-len(name)-2+name_padding)+allele2)
else:
writing.append(line.strip())
loc += 1
## print every 10K loci "
if not loc % 10000:
outfile.write("\n".join(writing)+"\n")
writing = []
outfile.write("\n".join(writing))
outfile.close() |
def make(data, samples):
""" builds snps output """
## get attr
ploidy = data.paramsdict["max_alleles_consens"]
names = [i.name for i in samples]
longname = max([len(i) for i in names])
## TODO: use iter cuz of super huge files
inloci = open(os.path.join(\
data.dirs.outfiles, data.name+".loci" ), 'r').read()
## Potential outfiles
snpsout = os.path.join(data.dirs.outfiles, data.name+".snps")
usnpsout = os.path.join(data.dirs.outfiles, data.name+".usnps")
structout = os.path.join(data.dirs.outfiles, data.name+".str")
genoout = os.path.join(data.dirs.outfiles, data.name+".snps.geno")
ugenoout = os.path.join(data.dirs.outfiles, data.name+".usnps.geno")
## Output file for writing some stats
statsfile= os.path.join(data.dirs.outfiles, data.name+".snps.stats")
## The output formats to write
formats = data.paramsdict["output_formats"]
seed = data._hackersonly["random_seed"]
np.random.seed(int(seed))
## output .snps and .unlinked_snps"
S = {} ## snp dict
Si = {} ## unlinked snp dict
for name in list(names):
S[name] = []
Si[name] = []
## record bi-allelic snps"
bis = 0
## for each locus select out the SNPs"
for loc in inloci.strip().split("|")[:-1]:
pis = ""
ns = []
ss = []
cov = {} ## record coverage for each SNP
for line in loc.split("\n"):
if ">" in line:
ns.append(line.split()[0].replace(">",""))
ss.append(line.split()[-1])
else:
pis = [i[0] for i in enumerate(line) if i[1] in list('*-')]
## assign snps to S, and record coverage for usnps"
for tax in S:
if tax in ns:
if pis:
for snpsite in pis:
snpsite -= (longname+5)
S[tax].append(ss[ns.index(tax)][snpsite])
if snpsite not in cov:
cov[snpsite] = 1
else:
cov[snpsite] += 1
## downweight selection of gap sites "
if ss[ns.index(tax)][snpsite] != '-':
cov[snpsite] += 1
else:
if pis:
for snpsite in pis:
S[tax].append("N")
Si[tax].append("N")
## randomly select among snps w/ greatest coverage for unlinked snp "
maxlist = []
for j,k in cov.items():
if k == max(cov.values()):
maxlist.append(j)
## Is bi-allelic ? "
bisnps = []
for i in maxlist:
if len(set([ss[ns.index(tax)][i] for tax in S if tax in ns])) < 3:
bisnps.append(i)
#rando = pis[np.random.randint(len(pis))]
#rando -= (longname+5)
if bisnps:
rando = bisnps[np.random.randint(len(bisnps))]
elif maxlist:
rando = maxlist[np.random.randint(len(maxlist))]
tbi = 0
for tax in S:
if tax in ns:
if pis:
## if none are bi-allelic "
if not bisnps:
tbi += 1
Si[tax].append(ss[ns.index(tax)][rando])
if pis:
## add spacer between loci "
S[tax].append(" ")
else:
## invariable locus "
S[tax].append("_ ")
bis += tbi
## names
SF = list(S.keys())
SF.sort()
## Write linked snps format
if "snps" in formats:
with open(snpsout, 'w') as outfile:
print >>outfile, "## %s taxa, %s loci, %s snps" % \
(len(S), len("".join(S.values()[0]).split(" "))-1,
len("".join(S[SF[0]]).replace(" ", "")))
for i in SF:
print >>outfile, i+(" "*(longname-len(i)+3))+"".join(S[i])
## Write unlinked snps format
if "usnps" in formats:
with open(usnpsout, 'w') as outfile:
print >>outfile, len(Si), len("".join(Si.values()[0]))
for i in SF:
print >>outfile, i+(" "*(longname-len(i)+3))+"".join(Si[i])
with open(statsfile, 'a') as statsout:
print >>statsout, "sampled unlinked SNPs=",len(Si.values()[0])
print >>statsout, "sampled unlinked bi-allelic SNPs=",len(Si.values()[0])-bis
## Write STRUCTURE format
if "str" in formats:
with open(structout, 'w') as outfile:
B = {'A': '0',
'T': '1',
'G': '2',
'C': '3',
'N': '-9',
'-': '-9'}
if ploidy > 1:
for line in SF:
print >>outfile, line+(" "*(longname-len(line)+3))+\
"\t"*6+"\t".join([B[unstruct(j)[0]] for j in Si[line]])
print >>outfile, line+(" "*(longname-len(line)+3))+\
"\t"*6+"\t".join([B[unstruct(j)[1]] for j in Si[line]])
else:
for line in SF:
print >>outfile, line+(" "*(longname-len(line)+3))+\
"\t"*6+"\t".join([B[unstruct(j)[1]] for j in Si[line]])
## Do linked and unlinked snps in .geno format
if "geno" in formats:
with open(ugenoout, 'w') as outfile:
for i in range(len(Si.values()[0])):
getref = 0
ref = "N"
while ref == "N":
ref = unstruct(Si[SF[getref]][i])[0]
getref += 1
SNProw = "".join(map(str,[unstruct(Si[j][i]).count(ref) if Si[j][i] != "N" \
else "9" for j in SF]))
## print ref,SNProw
if len(set(SNProw)) > 1:
print >>outfile, SNProw
with open(genoout, 'w') as outfile:
for i in range(len(S.values()[0])):
if S[SF[0]][i].strip("_").strip():
getref = 0
ref = "N"
while ref == "N":
#print i, S[SF[0]][i]
ref = unstruct(S[SF[getref]][i])[0]
getref += 1
SNProw = "".join(map(str,[unstruct(S[j][i]).count(ref) if \
S[j][i] != "N" else "9" for j in SF]))
## print ref,SNProw
if len(set(SNProw)) > 1:
print >>outfile, SNProw |
def cluster_info(ipyclient, spacer=""):
""" reports host and engine info for an ipyclient """
## get engine data, skips busy engines.
hosts = []
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
hosts.append(engine.apply(_socket.gethostname))
## report it
hosts = [i.get() for i in hosts]
result = []
for hostname in set(hosts):
result.append("{}host compute node: [{} cores] on {}"\
.format(spacer, hosts.count(hostname), hostname))
print "\n".join(result) |
def _debug_on():
"""
Turns on debugging by creating hidden tmp file
This is only run by the __main__ engine.
"""
## make tmp file and set loglevel for top-level init
with open(__debugflag__, 'w') as dfile:
dfile.write("wat")
__loglevel__ = "DEBUG"
_LOGGER.info("debugging turned on and registered to be turned off at exit")
_set_debug_dict(__loglevel__) |
def _set_debug_dict(__loglevel__):
""" set the debug dict """
_lconfig.dictConfig({
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': "%(asctime)s \t"\
+"pid=%(process)d \t"\
+"[%(filename)s]\t"\
+"%(levelname)s \t"\
+"%(message)s"
},
},
'handlers': {
__name__: {
'level':__loglevel__,
'class':'logging.FileHandler',
'filename':__debugfile__,
'formatter':"standard",
'mode':'a+'
}
},
'loggers':{
__name__: {
'handlers': [__name__],
'level': __loglevel__,
'propogate': True
}
}
}) |
def _debug_off():
""" turns off debugging by removing hidden tmp file """
if _os.path.exists(__debugflag__):
_os.remove(__debugflag__)
__loglevel__ = "ERROR"
_LOGGER.info("debugging turned off")
_set_debug_dict(__loglevel__) |
def _cmd_exists(cmd):
""" check if dependency program is there """
return _subprocess.call("type " + cmd,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.PIPE) == 0 |
def _getbins():
""" gets the right version of vsearch, muscle, and smalt
depending on linux vs osx """
# Return error if system is 32-bit arch.
# This is straight from the python docs:
# https://docs.python.org/2/library/platform.html#cross-platform
if not _sys.maxsize > 2**32:
_sys.exit("ipyrad requires 64bit architecture")
## get platform mac or linux
_platform = _sys.platform
## get current location
if 'VIRTUAL_ENV' in _os.environ:
ipyrad_path = _os.environ['VIRTUAL_ENV']
else:
path = _os.path.abspath(_os.path.dirname(__file__))
ipyrad_path = _os.path.dirname(path)
## find bin directory
ipyrad_path = _os.path.dirname(path)
bin_path = _os.path.join(ipyrad_path, "bin")
## get the correct binaries
if 'linux' in _platform:
vsearch = _os.path.join(
_os.path.abspath(bin_path),
"vsearch-linux-x86_64")
muscle = _os.path.join(
_os.path.abspath(bin_path),
"muscle-linux-x86_64")
smalt = _os.path.join(
_os.path.abspath(bin_path),
"smalt-linux-x86_64")
bwa = _os.path.join(
_os.path.abspath(bin_path),
"bwa-linux-x86_64")
samtools = _os.path.join(
_os.path.abspath(bin_path),
"samtools-linux-x86_64")
bedtools = _os.path.join(
_os.path.abspath(bin_path),
"bedtools-linux-x86_64")
qmc = _os.path.join(
_os.path.abspath(bin_path),
"QMC-linux-x86_64")
else:
vsearch = _os.path.join(
_os.path.abspath(bin_path),
"vsearch-osx-x86_64")
muscle = _os.path.join(
_os.path.abspath(bin_path),
"muscle-osx-x86_64")
smalt = _os.path.join(
_os.path.abspath(bin_path),
"smalt-osx-x86_64")
bwa = _os.path.join(
_os.path.abspath(bin_path),
"bwa-osx-x86_64")
samtools = _os.path.join(
_os.path.abspath(bin_path),
"samtools-osx-x86_64")
bedtools = _os.path.join(
_os.path.abspath(bin_path),
"bedtools-osx-x86_64")
## only one compiled version available, works for all?
qmc = _os.path.join(
_os.path.abspath(bin_path),
"QMC-osx-x86_64")
# Test for existence of binaries
assert _cmd_exists(muscle), "muscle not found here: "+muscle
assert _cmd_exists(vsearch), "vsearch not found here: "+vsearch
assert _cmd_exists(smalt), "smalt not found here: "+smalt
assert _cmd_exists(bwa), "bwa not found here: "+bwa
assert _cmd_exists(samtools), "samtools not found here: "+samtools
assert _cmd_exists(bedtools), "bedtools not found here: "+bedtools
#assert _cmd_exists(qmc), "wQMC not found here: "+qmc
return vsearch, muscle, smalt, bwa, samtools, bedtools, qmc |
def nworker(data, chunk):
"""
Worker to distribute work to jit funcs. Wraps everything on an
engine to run single-threaded to maximize efficiency for
multi-processing.
"""
## set the thread limit on the remote engine
oldlimit = set_mkl_thread_limit(1)
## open seqarray view, the modified arr is in bootstarr
with h5py.File(data.database.input, 'r') as io5:
seqview = io5["bootsarr"][:]
maparr = io5["bootsmap"][:, 0]
smps = io5["quartets"][chunk:chunk+data._chunksize]
## create an N-mask array of all seq cols
nall_mask = seqview[:] == 78
## init arrays to fill with results
rquartets = np.zeros((smps.shape[0], 4), dtype=np.uint16)
rinvariants = np.zeros((smps.shape[0], 16, 16), dtype=np.uint16)
## fill arrays with results as we compute them. This iterates
## over all of the quartet sets in this sample chunk. It would
## be nice to have this all numbified.
for idx in xrange(smps.shape[0]):
sidx = smps[idx]
seqs = seqview[sidx]
## these axis calls cannot be numbafied, but I can't
## find a faster way that is JIT compiled, and I've
## really, really, really tried. Tried again now that
## numba supports axis args for np.sum. Still can't
## get speed improvements by numbifying this loop.
nmask = np.any(nall_mask[sidx], axis=0)
nmask += np.all(seqs == seqs[0], axis=0)
## here are the jitted funcs
bidx, invar = calculate(seqs, maparr, nmask, TESTS)
## store results
rquartets[idx] = smps[idx][bidx]
rinvariants[idx] = invar
## reset thread limit
set_mkl_thread_limit(oldlimit)
## return results...
return rquartets, rinvariants |
def store_all(self):
"""
Populate array with all possible quartets. This allows us to
sample from the total, and also to continue from a checkpoint
"""
with h5py.File(self.database.input, 'a') as io5:
fillsets = io5["quartets"]
## generator for all quartet sets
qiter = itertools.combinations(xrange(len(self.samples)), 4)
i = 0
while i < self.params.nquartets:
## sample a chunk of the next ordered N set of quartets
dat = np.array(list(itertools.islice(qiter, self._chunksize)))
end = min(self.params.nquartets, dat.shape[0]+i)
fillsets[i:end] = dat[:end-i]
i += self._chunksize
## send progress update to stdout on engine
print(min(i, self.params.nquartets)) |
def store_random(self):
"""
Populate array with random quartets sampled from a generator.
Holding all sets in memory might take a lot, but holding a very
large list of random numbers for which ones to sample will fit
into memory for most reasonable sized sets. So we'll load a
list of random numbers in the range of the length of total
sets that can be generated, then only keep sets from the set
generator if they are in the int list. I did several tests to
check that random pairs are as likely as 0 & 1 to come up together
in a random quartet set.
"""
with h5py.File(self.database.input, 'a') as io5:
fillsets = io5["quartets"]
## set generators
qiter = itertools.combinations(xrange(len(self.samples)), 4)
rand = np.arange(0, n_choose_k(len(self.samples), 4))
np.random.shuffle(rand)
rslice = rand[:self.params.nquartets]
rss = np.sort(rslice)
riter = iter(rss)
del rand, rslice
## print progress update 1 to the engine stdout
print(self._chunksize)
## set to store
rando = riter.next()
tmpr = np.zeros((self.params.nquartets, 4), dtype=np.uint16)
tidx = 0
while 1:
try:
for i, j in enumerate(qiter):
if i == rando:
tmpr[tidx] = j
tidx += 1
rando = riter.next()
## print progress bar update to engine stdout
if not i % self._chunksize:
print(min(i, self.params.nquartets))
except StopIteration:
break
## store into database
fillsets[:] = tmpr
del tmpr |
def store_equal(self):
"""
Takes a tetrad class object and populates array with random
quartets sampled equally among splits of the tree so that
deep splits are not overrepresented relative to rare splits,
like those near the tips.
"""
with h5py.File(self.database.input, 'a') as io5:
fillsets = io5["quartets"]
## require guidetree
if not os.path.exists(self.files.tree):
raise IPyradWarningExit(
"To use sampling method 'equal' requires a guidetree")
tre = ete3.Tree(self.files.tree)
tre.unroot()
tre.resolve_polytomy(recursive=True)
## randomly sample internals splits
splits = [([self.samples.index(z.name) for z in i],
[self.samples.index(z.name) for z in j]) \
for (i, j) in tre.get_edges()]
## only keep internal splits, not single tip edges
splits = [i for i in splits if all([len(j) > 1 for j in i])]
## how many min quartets shoudl be equally sampled from each split
squarts = self.params.nquartets // len(splits)
## keep track of how many iterators are saturable.
saturable = 0
## turn each into an iterable split sampler
## if the nquartets for that split is small, then sample all,
## if it is big then make it a random sampler for that split.
qiters = []
## iterate over splits sampling quartets evenly
for idx, split in enumerate(splits):
## if small number at this split then sample all possible sets
## we will exhaust this quickly and then switch to random for
## the larger splits.
total = n_choose_k(len(split[0]), 2) * n_choose_k(len(split[1]), 2)
if total < squarts*2:
qiter = (i+j for (i, j) in itertools.product(
itertools.combinations(split[0], 2),
itertools.combinations(split[1], 2)))
saturable += 1
## else create random sampler across that split, this is slower
## because it can propose the same split repeatedly and so we
## have to check it against the 'sampled' set.
else:
qiter = (random_product(split[0], split[1]) for _ \
in xrange(self.params.nquartets))
## store all iterators into a list
qiters.append((idx, qiter))
## create infinite cycler of qiters
qitercycle = itertools.cycle(qiters)
## store visited quartets
sampled = set()
## fill chunksize at a time
i = 0
empty = set()
edge_targeted = 0
random_targeted = 0
## keep filling quartets until nquartets are sampled.
while i < self.params.nquartets:
## grab the next iterator
cycle, qiter = qitercycle.next()
## sample from iterators, store sorted set.
try:
qrtsamp = tuple(sorted(qiter.next()))
if qrtsamp not in sampled:
sampled.add(qrtsamp)
edge_targeted += 1
i += 1
## print progress bar update to engine stdout
if not i % self._chunksize:
print(min(i, self.params.nquartets))
except StopIteration:
empty.add(cycle)
if len(empty) == saturable:
break
## if array is not full then add random samples
while i <= self.params.nquartets:
newset = tuple(sorted(np.random.choice(
range(len(self.samples)), 4, replace=False)))
if newset not in sampled:
sampled.add(newset)
random_targeted += 1
i += 1
## print progress bar update to engine stdout
if not i % self._chunksize:
print(min(i, self.params.nquartets))
## store into database
print(self.params.nquartets)
fillsets[:] = np.array(tuple(sampled))
del sampled |
def random_combination(nsets, n, k):
"""
Returns nsets unique random quartet sets sampled from
n-choose-k without replacement combinations.
"""
sets = set()
while len(sets) < nsets:
newset = tuple(sorted(np.random.choice(n, k, replace=False)))
sets.add(newset)
return tuple(sets) |
def random_product(iter1, iter2):
"""
Random sampler for equal_splits functions
"""
iter4 = np.concatenate([
np.random.choice(iter1, 2, replace=False),
np.random.choice(iter2, 2, replace=False)
])
return iter4 |
def resolve_ambigs(tmpseq):
"""
Randomly resolve ambiguous bases. This is applied to each boot
replicate so that over reps the random resolutions don't matter.
Sites are randomly resolved, so best for unlinked SNPs since
otherwise linked SNPs are losing their linkage information...
though it's not like we're using it anyways.
"""
## the order of rows in GETCONS
for aidx in xrange(6):
#np.uint([82, 75, 83, 89, 87, 77]):
ambig, res1, res2 = GETCONS[aidx]
## get true wherever tmpseq is ambig
idx, idy = np.where(tmpseq == ambig)
halfmask = np.random.choice(np.array([True, False]), idx.shape[0])
for col in xrange(idx.shape[0]):
if halfmask[col]:
tmpseq[idx[col], idy[col]] = res1
else:
tmpseq[idx[col], idy[col]] = res2
return tmpseq |
def set_mkl_thread_limit(cores):
"""
set mkl thread limit and return old value so we can reset
when finished.
"""
if "linux" in sys.platform:
mkl_rt = ctypes.CDLL('libmkl_rt.so')
else:
mkl_rt = ctypes.CDLL('libmkl_rt.dylib')
oldlimit = mkl_rt.mkl_get_max_threads()
mkl_rt.mkl_set_num_threads(ctypes.byref(ctypes.c_int(cores)))
return oldlimit |
def get_total(tots, node):
""" get total number of quartets possible for a split"""
if (node.is_leaf() or node.is_root()):
return 0
else:
## Get counts on down edges.
## How to treat polytomies here?
if len(node.children) > 2:
down_r = node.children[0]
down_l = node.children[1]
for child in node.children[2:]:
down_l += child
else:
down_r, down_l = node.children
lendr = sum(1 for i in down_r.iter_leaves())
lendl = sum(1 for i in down_l.iter_leaves())
## get count on up edge sister
up_r = node.get_sisters()[0]
lenur = sum(1 for i in up_r.iter_leaves())
## everyone else
lenul = tots - (lendr + lendl + lenur)
## return product
return lendr * lendl * lenur * lenul |
def get_sampled(data, totn, node):
""" get total number of quartets sampled for a split"""
## convert tip names to ints
names = sorted(totn)
cdict = {name: idx for idx, name in enumerate(names)}
## skip some nodes
if (node.is_leaf() or node.is_root()):
return 0
else:
## get counts on down edges
if len(node.children) > 2:
down_r = node.children[0]
down_l = node.children[1]
for child in node.children[2:]:
down_l += child
else:
down_r, down_l = node.children
lendr = set(cdict[i] for i in down_r.get_leaf_names())
lendl = set(cdict[i] for i in down_l.get_leaf_names())
## get count on up edge sister
up_r = node.get_sisters()[0]
lenur = set(cdict[i] for i in up_r.get_leaf_names())
## everyone else
lenul = set(cdict[i] for i in totn) - set.union(lendr, lendl, lenur)
idx = 0
sampled = 0
with h5py.File(data.database.output, 'r') as io5:
end = io5["quartets"].shape[0]
while 1:
## break condition
if idx >= end:
break
## counts matches
qrts = io5["quartets"][idx:idx+data._chunksize]
for qrt in qrts:
sqrt = set(qrt)
if all([sqrt.intersection(i) for i in [lendr, lendl, lenur, lenul]]):
sampled += 1
## increase span
idx += data._chunksize
return sampled |
def consensus_tree(trees, names=None, cutoff=0.0):
"""
An extended majority rule consensus function for ete3.
Modelled on the similar function from scikit-bio tree module. If
cutoff=0.5 then it is a normal majority rule consensus, while if
cutoff=0.0 then subsequent non-conflicting clades are added to the tree.
"""
## find which clades occured with freq > cutoff
namedict, clade_counts = find_clades(trees, names=names)
## filter out the < cutoff clades
fclade_counts = filter_clades(clade_counts, cutoff)
## build tree
consens_tree, _ = build_trees(fclade_counts, namedict)
## make sure no singleton nodes were left behind
return consens_tree, clade_counts |
def find_clades(trees, names):
"""
A subfunc of consensus_tree(). Traverses trees to count clade occurrences.
Names are ordered by names, else they are in the order of the first
tree.
"""
## index names from the first tree
if not names:
names = trees[0].get_leaf_names()
ndict = {j:i for i, j in enumerate(names)}
namedict = {i:j for i, j in enumerate(names)}
## store counts
clade_counts = defaultdict(int)
## count as bitarray clades in each tree
for tree in trees:
tree.unroot()
for node in tree.traverse('postorder'):
#bits = bitarray('0'*len(tree))
bits = np.zeros(len(tree), dtype=np.bool_)
for child in node.iter_leaf_names():
bits[ndict[child]] = True
## if parent is root then mirror flip one child (where bit[0]=0)
# if not node.is_root():
# if node.up.is_root():
# if bits[0]:
# bits.invert()
bitstring = "".join([np.binary_repr(i) for i in bits])
clade_counts[bitstring] += 1
## convert to freq
for key, val in clade_counts.items():
clade_counts[key] = val / float(len(trees))
## return in sorted order
clade_counts = sorted(clade_counts.items(),
key=lambda x: x[1],
reverse=True)
return namedict, clade_counts |
def build_trees(fclade_counts, namedict):
"""
A subfunc of consensus_tree(). Build an unrooted consensus tree
from filtered clade counts.
"""
## storage
nodes = {}
idxarr = np.arange(len(fclade_counts[0][0]))
queue = []
## create dict of clade counts and set keys
countdict = defaultdict(int)
for clade, count in fclade_counts:
mask = np.int_(list(clade)).astype(np.bool)
ccx = idxarr[mask]
queue.append((len(ccx), frozenset(ccx)))
countdict[frozenset(ccx)] = count
while queue:
queue.sort()
(clade_size, clade) = queue.pop(0)
new_queue = []
# search for ancestors of clade
for (_, ancestor) in queue:
if clade.issubset(ancestor):
# update ancestor such that, in the following example:
# ancestor == {1, 2, 3, 4}
# clade == {2, 3}
# new_ancestor == {1, {2, 3}, 4}
new_ancestor = (ancestor - clade) | frozenset([clade])
countdict[new_ancestor] = countdict.pop(ancestor)
ancestor = new_ancestor
new_queue.append((len(ancestor), ancestor))
# if the clade is a tip, then we have a name
if clade_size == 1:
name = list(clade)[0]
name = namedict[name]
else:
name = None
# the clade will not be in nodes if it is a tip
children = [nodes.pop(c) for c in clade if c in nodes]
node = ete3.Tree(name=name)
#node = toytree.tree(name=name).tree
for child in children:
node.add_child(child)
if not node.is_leaf():
node.dist = int(round(100*countdict[clade]))
node.support = int(round(100*countdict[clade]))
else:
node.dist = int(100)
node.support = int(100)
nodes[clade] = node
queue = new_queue
tre = nodes.values()[0]
tre.unroot()
## return the tree and other trees if present
return tre, list(nodes.values()) |
def _refresh(self):
"""
Remove all existing results files and reinit the h5 arrays
so that the tetrad object is just like fresh from a CLI start.
"""
## clear any existing results files
oldfiles = [self.files.qdump] + \
self.database.__dict__.values() + \
self.trees.__dict__.values()
for oldfile in oldfiles:
if oldfile:
if os.path.exists(oldfile):
os.remove(oldfile)
## store old ipcluster info
oldcluster = copy.deepcopy(self._ipcluster)
## reinit the tetrad object data.
self.__init__(
name=self.name,
data=self.files.data,
mapfile=self.files.mapfile,
workdir=self.dirs,
method=self.params.method,
guidetree=self.files.tree,
resolve_ambigs=self.params.resolve_ambigs,
save_invariants=self.params.save_invariants,
nboots=self.params.nboots,
nquartets=self.params.nquartets,
initarr=True,
quiet=True,
cli=self.kwargs.get("cli")
)
## retain the same ipcluster info
self._ipcluster = oldcluster |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.