Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def down(self):
"""
Move this object down one position.
"""
self.swap(self.get_ordering_queryset().filter(order__gt=self.order)) |
def to(self, order):
"""
Move object to a certain position, updating all affected objects to move accordingly up or down.
"""
if order is None or self.order == order:
# object is already at desired position
return
qs = self.get_ordering_queryset()
if self.order > order:
qs.filter(order__lt=self.order, order__gte=order).update(order=F('order') + 1)
else:
qs.filter(order__gt=self.order, order__lte=order).update(order=F('order') - 1)
self.order = order
self.save() |
def above(self, ref):
"""
Move this object above the referenced object.
"""
if not self._valid_ordering_reference(ref):
raise ValueError(
"%r can only be moved above instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
if self.order == ref.order:
return
if self.order > ref.order:
o = ref.order
else:
o = self.get_ordering_queryset().filter(order__lt=ref.order).aggregate(Max('order')).get('order__max') or 0
self.to(o) |
def below(self, ref):
"""
Move this object below the referenced object.
"""
if not self._valid_ordering_reference(ref):
raise ValueError(
"%r can only be moved below instances of %r which %s equals %r." % (
self, self.__class__, self.order_with_respect_to,
self._get_order_with_respect_to()
)
)
if self.order == ref.order:
return
if self.order > ref.order:
o = self.get_ordering_queryset().filter(order__gt=ref.order).aggregate(Min('order')).get('order__min') or 0
else:
o = ref.order
self.to(o) |
def top(self):
"""
Move this object to the top of the ordered stack.
"""
o = self.get_ordering_queryset().aggregate(Min('order')).get('order__min')
self.to(o) |
def bottom(self):
"""
Move this object to the bottom of the ordered stack.
"""
o = self.get_ordering_queryset().aggregate(Max('order')).get('order__max')
self.to(o) |
def unapi(request):
"""
This view implements unAPI 1.0 (see http://unapi.info).
"""
id = request.GET.get('id')
format = request.GET.get('format')
if format is not None:
try:
publications = Publication.objects.filter(pk=int(id))
if not publications:
raise ValueError
except ValueError:
# invalid id
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<error>Invalid ID.</error>']),
content_type="application/xml",
status=404)
if format == 'bibtex':
# return BibTex encoded publication
return render(request, 'publications/publication.bib', {
'publication': publications[0]
},
content_type='text/x-bibtex; charset=UTF-8')
if format == 'mods':
# return MODS encoded publication
return render(request, 'publications/publications.mods', {
'publications': publications
},
content_type='application/xml; charset=UTF-8')
if format == 'ris':
# return MODS encoded publication
return render(request, 'publications/publications.ris', {
'publications': publications
},
content_type='application/x-research-info-systems; charset=UTF-8')
# invalid format
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<error>Invalid format.</error>']),
content_type="application/xml",
status=406)
if id is not None:
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<formats id="{0}">'.format(id),
'<format name="bibtex" type="text/x-bibtex" />',
'<format name="ris" type="application/x-research-info-systems" />',
'<format name="mods" type="application/xml" />',
'</formats>']), content_type="application/xml")
return HttpResponse('\n'.join([
'<?xml version="1.0" encoding="UTF-8"?>',
'<formats>',
'<format name="bibtex" type="text/x-bibtex" />',
'<format name="ris" type="application/x-research-info-systems" />',
'<format name="mods" type="application/xml" />',
'</formats>']), content_type="application/xml") |
def populate(publications):
"""
Load custom links and files from database and attach to publications.
"""
customlinks = CustomLink.objects.filter(publication__in=publications)
customfiles = CustomFile.objects.filter(publication__in=publications)
publications_ = {}
for publication in publications:
publication.links = []
publication.files = []
publications_[publication.id] = publication
for link in customlinks:
publications_[link.publication_id].links.append(link)
for file in customfiles:
publications_[file.publication_id].files.append(file) |
def make(data, samples):
""" build a vcf file from the supercatg array and the cat.clust.gz output"""
outfile = open(os.path.join(data.dirs.outfiles, data.name+".vcf"), 'w')
inloci = os.path.join(data.dirs.outfiles, data.name+".loci")
names = [i.name for i in samples]
names.sort()
## TODO: Get a real version number for the current sw stack
version = "0.1"
## TODO: This is just reporting minimum depth per base. Would it be useful to
## report real depth of reads per base? YEAH, that's what supercatg is for.
mindepth = data.paramsdict["mindepth_statistical"]
print >>outfile, "##fileformat=VCFv4.1"
print >>outfile, "##fileDate="+time.strftime("%Y%m%d")
print >>outfile, "##source=ipyRAD.v."+version
print >>outfile, "##reference=common_allele_at_each_locus"
print >>outfile, "##INFO=<ID=NS,Number=1,Type=Integer,Description=\"Number of Samples With Data\">"
print >>outfile, "##INFO=<ID=DP,Number=1,Type=Integer,Description=\"Total Depth\">"
print >>outfile, "##INFO=<ID=AF,Number=A,Type=Float,Description=\"Allele Frequency\">"
print >>outfile, "##INFO=<ID=AA,Number=1,Type=String,Description=\"Ancestral Allele\">"
print >>outfile, "##FORMAT=<ID=GT,Number=1,Type=String,Description=\"Genotype\">"
print >>outfile, "##FORMAT=<ID=GQ,Number=1,Type=Integer,Description=\"Genotype Quality\">"
print >>outfile, "##FORMAT=<ID=DP,Number=1,Type=Integer,Description=\"Read Depth\">"
print >>outfile, "\t".join(["#CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO ","FORMAT"]+list(names))
loci = open(inloci).read().split("|")[:-1]
snps = 0
vcflist = []
for locusnumber in range(len(loci)):
samps = [i.split()[0][1:] for i in loci[locusnumber].strip().split("\n") if ">" in i]
loc = np.array([tuple(i.split()[-1]) for i in loci[locusnumber].strip().split("\n") if ">" in i])
NS = str(len(loc))
DP = str(mindepth)
for base in range(len(loc.T)):
col = []
site = list(loc.T[base])
site = list("".join(site).replace("-","").replace("N",""))
if site:
for bb in site:
if bb in list("RKYSWM"):
col += unstruct(bb)[0]
col += unstruct(bb)[1]
else:
col += bb
REF = most_common([i for i in col if i not in list("-RKYSWMN")])
ALT = set([i for i in col if (i in list("ATGC-N")) and (i!=REF)])
if ALT:
snps += 1
GENO = [REF]+list(ALT)
GENOS = []
for samp in names:
if samp in samps:
idx = samps.index(samp)
f = unstruct(loc.T[base][idx])
if ('-' in f) or ('N' in f):
GENOS.append("./.")
else:
GENOS.append(str(GENO.index(f[0]))+"|"+str(GENO.index(f[1])))
else:
GENOS.append("./.")
vcflist.append("\t".join([`locusnumber+1`, `base+1`, '.', REF, ",".join(ALT), "20", "PASS",
";".join(["NS="+NS, "DP="+DP]), "GT"]+GENOS))
if not locusnumber % 1000:
outfile.write( "\n".join(vcflist)+"\n" )
vcflist = []
#print >>outfile, "\t".join([`locusnumber+1`, `base+1`, '.', REF, ",".join(ALT), "20", "PASS",
# ";".join(["NS="+NS, "DP="+DP]), "GT"]+GENOS)
outfile.write( "\n".join(vcflist) )
outfile.close() |
def worker(self):
"""
Calculates the quartet weights for the test at a random
subsampled chunk of loci.
"""
## subsample loci
fullseqs = self.sample_loci()
## find all iterations of samples for this quartet
liters = itertools.product(*self.imap.values())
## run tree inference for each iteration of sampledict
hashval = uuid.uuid4().hex
weights = []
for ridx, lidx in enumerate(liters):
## get subalignment for this iteration and make to nex
a,b,c,d = lidx
sub = {}
for i in lidx:
if self.rmap[i] == "p1":
sub["A"] = fullseqs[i]
elif self.rmap[i] == "p2":
sub["B"] = fullseqs[i]
elif self.rmap[i] == "p3":
sub["C"] = fullseqs[i]
else:
sub["D"] = fullseqs[i]
## write as nexus file
nex = []
for tax in list("ABCD"):
nex.append(">{} {}".format(tax, sub[tax]))
## check for too much missing or lack of variants
nsites, nvar = count_var(nex)
## only run test if there's variation present
if nvar > self.minsnps:
## format as nexus file
nexus = "{} {}\n".format(4, len(fullseqs[a])) + "\n".join(nex)
## infer ML tree
treeorder = self.run_tree_inference(nexus, "{}.{}".format(hashval, ridx))
## add to list
weights.append(treeorder)
## cleanup - remove all files with the hash val
rfiles = glob.glob(os.path.join(tempfile.tempdir, "*{}*".format(hashval)))
for rfile in rfiles:
if os.path.exists(rfile):
os.remove(rfile)
## return result as weights for the set topologies.
trees = ["ABCD", "ACBD", "ADBC"]
wdict = {i:float(weights.count(i))/len(weights) for i in trees}
return wdict |
def get_order(tre):
"""
return tree order
"""
anode = tre.tree&">A"
sister = anode.get_sisters()[0]
sisters = (anode.name[1:], sister.name[1:])
others = [i for i in list("ABCD") if i not in sisters]
return sorted(sisters) + sorted(others) |
def count_var(nex):
"""
count number of sites with cov=4, and number of variable sites.
"""
arr = np.array([list(i.split()[-1]) for i in nex])
miss = np.any(arr=="N", axis=0)
nomiss = arr[:, ~miss]
nsnps = np.invert(np.all(nomiss==nomiss[0, :], axis=0)).sum()
return nomiss.shape[1], nsnps |
def sample_loci(self):
""" finds loci with sufficient sampling for this test"""
## store idx of passing loci
idxs = np.random.choice(self.idxs, self.ntests)
## open handle, make a proper generator to reduce mem
with open(self.data) as indata:
liter = (indata.read().strip().split("|\n"))
## store data as dict
seqdata = {i:"" for i in self.samples}
## put chunks into a list
for idx, loc in enumerate(liter):
if idx in idxs:
## parse chunk
lines = loc.split("\n")[:-1]
names = [i.split()[0] for i in lines]
seqs = [i.split()[1] for i in lines]
dd = {i:j for i,j in zip(names, seqs)}
## add data to concatenated seqdict
for name in seqdata:
if name in names:
seqdata[name] += dd[name]
else:
seqdata[name] += "N"*len(seqs[0])
## concatenate into a phylip file
return seqdata |
def run_tree_inference(self, nexus, idx):
"""
Write nexus to tmpfile, runs phyml tree inference, and parses
and returns the resulting tree.
"""
## create a tmpdir for this test
tmpdir = tempfile.tempdir
tmpfile = os.path.join(tempfile.NamedTemporaryFile(
delete=False,
prefix=str(idx),
dir=tmpdir,
))
## write nexus to tmpfile
tmpfile.write(nexus)
tmpfile.flush()
## infer the tree
rax = raxml(name=str(idx), data=tmpfile.name, workdir=tmpdir, N=1, T=2)
rax.run(force=True, block=True, quiet=True)
## clean up
tmpfile.close()
## return tree order
order = get_order(toytree.tree(rax.trees.bestTree))
return "".join(order) |
def run(self, ipyclient):
"""
parallelize calls to worker function.
"""
## connect to parallel client
lbview = ipyclient.load_balanced_view()
## iterate over tests
asyncs = []
for test in xrange(self.ntests):
## submit jobs to run
async = lbview.apply(worker, self)
asyncs.append(async)
## wait for jobs to finish
ipyclient.wait()
## check for errors
for async in asyncs:
if not async.successful():
raise Exception("Error: {}".format(async.result()))
## return results as df
results = [i.result() for i in asyncs]
self.results_table = pd.DataFrame(results) |
def plot(self):
"""
return a toyplot barplot of the results table.
"""
if self.results_table == None:
return "no results found"
else:
bb = self.results_table.sort_values(
by=["ABCD", "ACBD"],
ascending=[False, True],
)
## make a barplot
import toyplot
c = toyplot.Canvas(width=600, height=200)
a = c.cartesian()
m = a.bars(bb)
return c, a, m |
def plot(self, pcs=[1, 2], ax=None, cmap=None, cdict=None, legend=True, title=None, outfile=None):
"""
Do the PCA and plot it.
Parameters
---------
pcs: list of ints
...
ax: matplotlib axis
...
cmap: matplotlib colormap
...
cdict: dictionary mapping pop names to colors
...
legend: boolean, whether or not to show the legend
"""
## Specify which 2 pcs to plot, default is pc1 and pc2
pc1 = pcs[0] - 1
pc2 = pcs[1] - 1
if pc1 < 0 or pc2 > self.ncomponents - 1:
raise IPyradError("PCs are 1-indexed. 1 is min & {} is max".format(self.ncomponents))
## Convert genotype data to allele count data
## We do this here because we might want to try different ways
## of accounting for missing data and "alt" allele counts treat
## missing data as "ref"
allele_counts = self.genotypes.to_n_alt()
## Actually do the pca
if self.ncomponents > len(self.samples_vcforder):
self.ncomponents = len(self.samples_vcforder)
print(" INFO: # PCs < # samples. Forcing # PCs = {}".format(self.ncomponents))
coords, model = allel.pca(allele_counts, n_components=self.ncomponents, scaler='patterson')
self.pcs = pd.DataFrame(coords,
index=self.samples_vcforder,
columns=["PC{}".format(x) for x in range(1,self.ncomponents+1)])
## Just allow folks to pass in the name of the cmap they want to use
if isinstance(cmap, str):
try:
cmap = cm.get_cmap(cmap)
except:
raise IPyradError(" Bad cmap value: {}".format(cmap))
if not cmap and not cdict:
if not self.quiet:
print(" Using default cmap: Spectral")
cmap = cm.get_cmap('Spectral')
if cmap:
if cdict:
print(" Passing in both cmap and cdict defaults to using the cmap value.")
popcolors = cmap(np.arange(len(self.pops))/len(self.pops))
cdict = {i:j for i, j in zip(self.pops.keys(), popcolors)}
fig = ""
if not ax:
fig = plt.figure(figsize=(6, 5))
ax = fig.add_subplot(1, 1, 1)
x = coords[:, pc1]
y = coords[:, pc2]
for pop in self.pops:
## Don't include pops with no samples, it makes the legend look stupid
## TODO: This doesn't prevent empty pops from showing up in the legend for some reason.
if len(self.pops[pop]) > 0:
mask = np.isin(self.samples_vcforder, self.pops[pop])
ax.plot(x[mask], y[mask], marker='o', linestyle=' ', color=cdict[pop], label=pop, markersize=6, mec='k', mew=.5)
ax.set_xlabel('PC%s (%.1f%%)' % (pc1+1, model.explained_variance_ratio_[pc1]*100))
ax.set_ylabel('PC%s (%.1f%%)' % (pc2+1, model.explained_variance_ratio_[pc2]*100))
if legend:
ax.legend(bbox_to_anchor=(1, 1), loc='upper left')
if fig:
fig.tight_layout()
if title:
ax.set_title(title)
if outfile:
try:
plt.savefig(outfile, format="png", bbox_inches="tight")
except:
print(" Saving pca.plot() failed to save figure to {}".format(outfile))
return ax |
def plot_pairwise_dist(self, labels=None, ax=None, cmap=None, cdict=None, metric="euclidean"):
"""
Plot pairwise distances between all samples
labels: bool or list
by default labels aren't included. If labels == True, then labels are read in
from the vcf file. Alternatively, labels can be passed in as a list, should
be same length as the number of samples.
"""
allele_counts = self.genotypes.to_n_alt()
dist = allel.pairwise_distance(allele_counts, metric=metric)
if not ax:
fig = plt.figure(figsize=(5, 5))
ax = fig.add_subplot(1, 1, 1)
if isinstance(labels, bool):
if labels:
labels = list(self.samples_vcforder)
elif isinstance(labels, type(None)):
pass
else:
## If not bool or None (default), then check to make sure the list passed in
## is the right length
if not len(labels) == len(self.samples_vcforder):
raise IPyradError(LABELS_LENGTH_ERROR.format(len(labels), len(self.samples_vcforder)))
allel.plot.pairwise_distance(dist, labels=labels, ax=ax, colorbar=False) |
def copy(self):
""" returns a copy of the pca analysis object """
cp = copy.deepcopy(self)
cp.genotypes = allel.GenotypeArray(self.genotypes, copy=True)
return cp |
def loci2cf(name, locifile, popdict, wdir=None, ipyclient=None):
"""
Convert ipyrad .loci file to an iqtree-pomo 'counts' file
Parameters:
-----------
name:
A prefix name for output files that will be produced
locifile:
A .loci file produced by ipyrad.
popdict:
A python dictionary grouping Clade names to Sample names.
Example: {"A": ['a', 'b', 'c'], "B": ['d', 'e', 'f']}
ipyclient:
If you pass it an ipyclient it will distribute work over
remote engines, otherwise we use multiprocessing (todo).
"""
## working directory, make sure it exists
if wdir:
wdir = os.path.abspath(wdir)
if not os.path.exists(wdir):
raise IPyradWarningExit(" working directory (wdir) does not exist")
else:
wdir = os.path.curdir
## output file path
name = name.rsplit(".cf")[0]
outfile = os.path.join(wdir, "{}.cf".format(name))
out = open(outfile, 'w')
## parse loci file
with open(locifile) as inloc:
loci = inloc.read().strip().split("|\n")
## get all names
names = list(itertools.chain(*popdict.values()))
popkeys = sorted(popdict.keys())
## count nsites
nsites = sum(len(loc.split("\n")[0].split()[1]) for loc in loci[:])
## print the header
out.write(HEADER.format(**{"NPOP": len(popdict),
"NSITES": nsites,
"VTAXA": "\t".join(popkeys)}))
## build print string
outstr = "chr{:<8} {:<4} "
for cidx in xrange(len(popkeys)):
outstr += "{:<8} "
toprint = []
for idx in xrange(len(loci)):
dat = loci[idx].split("\n")
seqs = np.array([list(i.split()[1]) for i in dat[:-1]])
names = [i.split()[0] for i in dat[:-1]]
data = np.zeros((seqs.shape[1], len(popkeys), 4), dtype=np.uint16)
for sidx in xrange(seqs.shape[1]):
for cidx in xrange(len(popkeys)):
for name in popdict[popkeys[cidx]]:
if name in names:
base = seqs[names.index(name), sidx]
if base in list("ACGT"):
data[sidx, cidx, BASE2IDX[base]] += 2
elif base in list("RSYMKW"):
base1, base2 = AMBIGS[base]
data[sidx, cidx, BASE2IDX[base1]] += 1
data[sidx, cidx, BASE2IDX[base2]] += 1
## print string for one locus
sdat = [",".join([str(i) for i in i.tolist()]) for i in data[sidx]]
#print outstr.format(idx+1, sidx+1, *sdat)
toprint.append(outstr.format(idx+1, sidx+1, *sdat))
## if 10K loci, then print and clear
if not idx % 10000:
out.write("\n".join(toprint)+"\n")
toprint = []
## close handle
out.write("\n".join(toprint)+"\n")
out.close() |
def loci2migrate(name, locifile, popdict, mindict=1):
"""
A function to build an input file for the program migrate from an ipyrad
.loci file, and a dictionary grouping Samples into populations.
Parameters:
-----------
name: (str)
The name prefix for the migrate formatted output file.
locifile: (str)
The path to the .loci file produced by ipyrad.
popdict: (dict)
A Python dictionary grouping Samples into Populations.
Examples:
---------
You can create the population dictionary by hand, and pass in the path
to your .loci file as a string.
>> popdict = {'A': ['a', 'b', 'c'], 'B': ['d', 'e', 'f']}
>> loci2migrate("outfile.migrate", "./mydata.loci", popdict)
Or, if you load your ipyrad.Assembly object from it's JSON file, you can
access the loci file path and population information from there directly.
>> data = ip.load_json("mydata.json")
>> loci2migrate("outfile.migrate", data.outfiles.loci, data.populations)
"""
## I/O
outfile = open(name+".migrate", 'w')
infile = open(locifile, 'r')
## minhits dictionary can be an int (all same) or a dictionary (set each)
if isinstance(mindict, int):
mindict = {pop: mindict for pop in popdict}
else:
mindict = mindict
## filter data to only the loci that have data for mindict setting
keep = []
MINS = zip(taxa.keys(), minhits)
## read in data to sample names
loci = infile.read().strip().split("|")[:-1]
for loc in loci:
samps = [i.split()[0].replace(">","") for i in loc.split("\n") if ">" in i]
## filter for coverage
GG = []
for group,mins in MINS:
GG.append( sum([i in samps for i in taxa[group]]) >= int(mins) )
if all(GG):
keep.append(loc)
## print data to file
print >>outfile, len(taxa), len(keep), "( npops nloci for data set", data.name+".loci",")"
## print all data for each population at a time
done = 0
for group in taxa:
## print a list of lengths of each locus
if not done:
loclens = [len(loc.split("\n")[1].split()[-1].replace("x","n").replace("n","")) for loc in keep]
print >>outfile, " ".join(map(str,loclens))
done += 1
## print a list of number of individuals in each locus
indslist = []
for loc in keep:
samps = [i.split()[0].replace(">","") for i in loc.split("\n") if ">" in i]
inds = sum([i in samps for i in taxa[group]])
indslist.append(inds)
print >>outfile, " ".join(map(str,indslist)), group
## print sample id, spaces, and sequence data
#for loc in range(len(keep)):
for loc in range(len(keep)):
seqs = [i.split()[-1] for i in keep[loc].split("\n") if \
i.split()[0].replace(">","") in taxa[group]]
for i in range(len(seqs)):
print >>outfile, group[0:8]+"_"+str(i)+\
(" "*(10-len(group[0:8]+"_"+str(i))))+seqs[i].replace("x","n").replace("n","")
outfile.close() |
def update(assembly, idict, count):
""" updates dictionary with the next .5M reads from the super long string
phylip file. Makes for faster reading. """
data = iter(open(os.path.join(assembly.dirs.outfiles,
assembly.name+".phy"), 'r'))
ntax, nchar = data.next().strip().split()
## read in max N bp at a time
for line in data:
tax, seq = line.strip().split()
idict[tax] = idict[tax][100000:]
idict[tax] += seq[count:count+100000]
del line
return idict |
def makephy(data, samples, longname):
""" builds phy output. If large files writes 50000 loci at a time to tmp
files and rebuilds at the end"""
## order names
names = [i.name for i in samples]
names.sort()
## read in loci file
locifile = os.path.join(data.dirs.outfiles, data.name+".loci")
locus = iter(open(locifile, 'rb'))
## dict for saving the full matrix
fdict = {name:[] for name in names}
## list for saving locus number and locus range for partitions
partitions = []
initial_pos = 1
## remove empty column sites and append edited seqs to dict F
done = 0
nloci = 0
nbases = 0
## TODO: This should be fixed. it cycles through reading each locus
## until nloci is less than this large number. It should really just
## read to the end of the file, so it'll do all loci no matter how
## many there are.
while nloci < 5000000:
seqs = []
#arrayed = np.array([])
anames = []
while 1:
## get next locus
try:
samp = locus.next()
except StopIteration:
done = 1
break
if "//" in samp:
nloci += 1
break
else:
try:
name, seq = samp.split()
except ValueError:
print samp
anames.append(name[1:])
seqs.append(seq.strip())
## reset
arrayed = np.array([list(i) for i in seqs])
if done:
break
## create mask for columns that are empty or
## that are paired-end separators (compatible w/ pyrad v2 and v3)
#mask = [i for i in range(len(arrayed.T)) if np.any([
## still surely a better way to vectorize this...
mask = [i for i in arrayed.T if any([j not in list("-Nn") for j in i])]
masked = np.dstack(mask)[0]
## partition information
loc_name = "p"+str(nloci)
loc_range = str(initial_pos) + "-" +\
str(len(masked[0]) + initial_pos -1)
initial_pos += len(masked[0])
partitions.append(loc_name+"="+loc_range)
## uncomment to print block info (used to partition by locus)
#blockend += minray
#print blockend,
#print loc
#print arrayed
## append data to dict
for name in names:
if name in anames:
#fdict[name].append(arrayed[anames.index(name), mask].tostring())
fdict[name].append(masked[anames.index(name),:].tostring())
else:
fdict[name].append("N"*masked.shape[1])
#fdict[name].append("N"*len(arrayed[0, mask]))
## add len to total length
nbases += len(fdict[name][-1])
## after x iterations tmp pickle fdict?
if not nloci % 1e4:
## concat strings
for name in fdict:
with open(os.path.join(assembly.dirs.outfiles , "tmp",
"{}_{}.phy.tmp".format(name, nloci)), 'wb') as wout:
wout.write("".join(fdict[name]))
del fdict
fdict = {name:[] for name in names}
## print out .PHY file, if really big, pull form multiple tmp pickle
superout = open(os.path.join( assembly.dirs.outfiles, assembly.name+".phy" ), 'wb')
print >>superout, len(names), nbases
if nloci < 1e4:
for name in names:
print >>superout, name+(" "*((longname+3)-\
len(name)))+"".join(fdict[name])
else:
for name in names:
superout.write("{}{}{}".format(
name,
" "*((longname+3)-len(name)),
"".join(fdict[name])))
tmpfiles = glob.glob(os.path.join(assembly.dirs.outfiles, "tmp", name+"*.phy.tmp"))
tmpfiles.sort()
for tmpf in tmpfiles:
with open(tmpf, 'rb') as tmpin:
superout.write(tmpin.read())
os.remove(tmpf)
superout.write("\n")
superout.close()
raxml_part_out = open(os.path.join(assembly.dirs.outfiles, assembly.name+".phy.partitions"), 'w')
for partition in partitions:
print >>raxml_part_out, "DNA, %s" % (partition)
raxml_part_out.close()
return partitions |
def makenex(assembly, names, longname, partitions):
""" PRINT NEXUS """
## make nexus output
data = iter(open(os.path.join(assembly.dirs.outfiles, assembly.name+".phy" ), 'r' ))
nexout = open(os.path.join(assembly.dirs.outfiles, assembly.name+".nex" ), 'wb' )
ntax, nchar = data.next().strip().split(" ")
print >>nexout, "#NEXUS"
print >>nexout, "BEGIN DATA;"
print >>nexout, " DIMENSIONS NTAX=%s NCHAR=%s;" % (ntax,nchar)
print >>nexout, " FORMAT DATATYPE=DNA MISSING=N GAP=- INTERLEAVE=YES;"
print >>nexout, " MATRIX"
idict = {}
## read in max 1M bp at a time
for line in data:
tax, seq = line.strip().split()
idict[tax] = seq[0:100000]
del line
nameorder = idict.keys()
nameorder.sort()
n=0
tempn=0
sz = 100
while n < len(seq):
for tax in nameorder:
print >>nexout, " "+tax+" "*\
((longname-len(tax))+3)+\
idict[tax][tempn:tempn+sz]
n += sz
tempn += sz
print >>nexout, ""
if not n % 100000:
#print idict[tax][tempn:tempn+sz]
idict = update(assembly, idict, n)
tempn -= 100000
print >>nexout, ';'
print >>nexout, 'END;'
### partitions info
print >>nexout, "BEGIN SETS;"
for partition in partitions:
print >>nexout, " CHARSET %s;" % (partition)
print >>nexout, "END;"
nexout.close() |
def make(assembly, samples):
""" Make phylip and nexus formats. This is hackish since I'm recycling the
code whole-hog from pyrad V3. Probably could be good to go back through
and clean up the conversion code some time.
"""
## get the longest name
longname = max([len(i) for i in assembly.samples.keys()])
names = [i.name for i in samples]
partitions = makephy(assembly, samples, longname)
makenex(assembly, names, longname, partitions) |
def sample_cleanup(data, sample):
"""
Clean up a bunch of loose files.
"""
umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq")
umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq")
unmapped = os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")
samplesam = os.path.join(data.dirs.refmapping, sample.name+".sam")
split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq")
split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq")
refmap_derep = os.path.join(data.dirs.edits, sample.name+"-refmap_derep.fastq")
for f in [umap1file, umap2file, unmapped, samplesam, split1, split2, refmap_derep]:
try:
os.remove(f)
except:
pass |
def index_reference_sequence(data, force=False):
"""
Index the reference sequence, unless it already exists. Also make a mapping
of scaffolds to index numbers for later user in steps 5-6.
"""
## get ref file from params
refseq_file = data.paramsdict['reference_sequence']
index_files = []
## Check for existence of index files. Default to bwa unless you specify smalt
if "smalt" in data._hackersonly["aligner"]:
# These are smalt index files. Only referenced here to ensure they exist
index_files.extend([".sma", ".smi"])
else:
index_files.extend([".amb", ".ann", ".bwt", ".pac", ".sa"])
## samtools specific index
index_files.extend([".fai"])
## If reference sequence already exists then bail out of this func
if not force:
if all([os.path.isfile(refseq_file+i) for i in index_files]):
return
#if data._headers:
# print(INDEX_MSG.format(data._hackersonly["aligner"]))
if "smalt" in data._hackersonly["aligner"]:
## Create smalt index for mapping
## smalt index [-k <wordlen>] [-s <stepsiz>] <index_name> <reference_file>
cmd1 = [ipyrad.bins.smalt, "index",
"-k", str(data._hackersonly["smalt_index_wordlen"]),
refseq_file,
refseq_file]
else:
## bwa index <reference_file>
cmd1 = [ipyrad.bins.bwa, "index", refseq_file]
## call the command
LOGGER.info(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
error1 = proc1.communicate()[0]
## simple samtools index for grabbing ref seqs
cmd2 = [ipyrad.bins.samtools, "faidx", refseq_file]
LOGGER.info(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
error2 = proc2.communicate()[0]
## error handling
if proc1.returncode:
raise IPyradWarningExit(error1)
if error2:
if "please use bgzip" in error2:
raise IPyradWarningExit(NO_ZIP_BINS.format(refseq_file))
else:
raise IPyradWarningExit(error2) |
def mapreads(data, sample, nthreads, force):
"""
Attempt to map reads to reference sequence. This reads in the fasta files
(samples.files.edits), and maps each read to the reference. Unmapped reads
are dropped right back in the de novo pipeline. Reads that map successfully
are processed and pushed downstream and joined with the rest of the data
post muscle_align.
Mapped reads end up in a sam file.
"""
LOGGER.info("Entering mapreads(): %s %s", sample.name, nthreads)
## This is the input derep file, for paired data we need to split the data,
## and so we will make sample.files.dereps == [derep1, derep2], but for
## SE data we can simply use sample.files.derep == [derepfile].
derepfile = os.path.join(data.dirs.edits, sample.name+"_derep.fastq")
sample.files.dereps = [derepfile]
## This is the final output files containing merged/concat derep'd refmap'd
## reads that did not match to the reference. They will be back in
## merge/concat (--nnnnn--) format ready to be input to vsearch, if needed.
mumapfile = sample.files.unmapped_reads
umap1file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap1.fastq")
umap2file = os.path.join(data.dirs.edits, sample.name+"-tmp-umap2.fastq")
## split the derepfile into the two handles we designate
if "pair" in data.paramsdict["datatype"]:
sample.files.split1 = os.path.join(data.dirs.edits, sample.name+"-split1.fastq")
sample.files.split2 = os.path.join(data.dirs.edits, sample.name+"-split2.fastq")
sample.files.dereps = [sample.files.split1, sample.files.split2]
split_merged_reads(sample.files.dereps, derepfile)
## (cmd1) smalt <task> [TASK_OPTIONS] [<index_name> <file_name_A> [<file_name_B>]]
## -f sam : Output as sam format, tried :clip: to hard mask output
## but it shreds the unmapped reads (outputs empty fq)
## -l [pe,mp,pp]: If paired end select the orientation of each read
## -n # : Number of threads to use
## -x : Perform a more exhaustive search
## -y # : proportion matched to reference (sequence similarity)
## -o : output file
## : Reference sequence
## : Input file(s), in a list. One for R1 and one for R2
## -c # : proportion of the query read length that must be covered
## (cmd1) bwa mem [OPTIONS] <index_name> <file_name_A> [<file_name_B>] > <output_file>
## -t # : Number of threads
## -M : Mark split alignments as secondary.
## (cmd2) samtools view [options] <in.bam>|<in.sam>|<in.cram> [region ...]
## -b = write to .bam
## -q = Only keep reads with mapq score >= 30 (seems to be pretty standard)
## -F = Select all reads that DON'T have these flags.
## 0x4 (segment unmapped)
## 0x100 (Secondary alignment)
## 0x800 (supplementary alignment)
## -U = Write out all reads that don't pass the -F filter
## (all unmapped reads go to this file).
## TODO: Should eventually add `-q 13` to filter low confidence mapping.
## If you do this it will throw away some fraction of reads. Ideally you'd
## catch these and throw them in with the rest of the unmapped reads, but
## I can't think of a straightforward way of doing that. There should be
## a `-Q` flag to only keep reads below the threshold, but i realize that
## would be of limited use besides for me.
## (cmd3) samtools sort [options...] [in.bam]
## -T = Temporary file name, this is required by samtools, ignore it
## Here we hack it to be samhandle.tmp cuz samtools cleans it up
## -O = Output file format, in this case bam
## -o = Output file name
if "smalt" in data._hackersonly["aligner"]:
## The output SAM data is written to file (-o)
## input is either (derep) or (derep-split1, derep-split2)
cmd1 = [ipyrad.bins.smalt, "map",
"-f", "sam",
"-n", str(max(1, nthreads)),
"-y", str(data.paramsdict['clust_threshold']),
"-o", os.path.join(data.dirs.refmapping, sample.name+".sam"),
"-x",
data.paramsdict['reference_sequence']
] + sample.files.dereps
cmd1_stdout = sps.PIPE
cmd1_stderr = sps.STDOUT
else:
cmd1 = [ipyrad.bins.bwa, "mem",
"-t", str(max(1, nthreads)),
"-M",
data.paramsdict['reference_sequence']
] + sample.files.dereps
## Insert optional flags for bwa
try:
bwa_args = data._hackersonly["bwa_args"].split()
bwa_args.reverse()
for arg in bwa_args:
cmd1.insert(2, arg)
except KeyError:
## Do nothing
pass
cmd1_stdout = open(os.path.join(data.dirs.refmapping, sample.name+".sam"), 'w')
cmd1_stderr = None
## Reads in the SAM file from cmd1. It writes the unmapped data to file
## and it pipes the mapped data to be used in cmd3
cmd2 = [ipyrad.bins.samtools, "view",
"-b",
## TODO: This introduces a bug with PE right now. Think about the case where
## R1 has low qual mapping and R2 has high. You get different numbers
## of reads in the unmapped tmp files. FML.
#"-q", "30",
"-F", "0x904",
"-U", os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam"),
os.path.join(data.dirs.refmapping, sample.name+".sam")]
## this is gonna catch mapped bam output from cmd2 and write to file
cmd3 = [ipyrad.bins.samtools, "sort",
"-T", os.path.join(data.dirs.refmapping, sample.name+".sam.tmp"),
"-O", "bam",
"-o", sample.files.mapped_reads]
## TODO: Unnecessary?
## this is gonna read the sorted BAM file and index it. only for pileup?
cmd4 = [ipyrad.bins.samtools, "index", sample.files.mapped_reads]
## this is gonna read in the unmapped files, args are added below,
## and it will output fastq formatted unmapped reads for merging.
## -v 45 sets the default qscore arbitrarily high
cmd5 = [ipyrad.bins.samtools, "bam2fq", "-v 45",
os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")]
## Insert additional arguments for paired data to the commands.
## We assume Illumina paired end reads for the orientation
## of mate pairs (orientation: ---> <----).
if 'pair' in data.paramsdict["datatype"]:
if "smalt" in data._hackersonly["aligner"]:
## add paired flag (-l pe) to cmd1 right after (smalt map ...)
cmd1.insert(2, "pe")
cmd1.insert(2, "-l")
else:
## No special PE flags for bwa
pass
## add samtools filter for only keep if both pairs hit
## 0x1 - Read is paired
## 0x2 - Each read properly aligned
cmd2.insert(2, "0x3")
cmd2.insert(2, "-f")
## tell bam2fq that there are output files for each read pair
cmd5.insert(2, umap1file)
cmd5.insert(2, "-1")
cmd5.insert(2, umap2file)
cmd5.insert(2, "-2")
else:
cmd5.insert(2, mumapfile)
cmd5.insert(2, "-0")
## Running cmd1 creates ref_mapping/sname.sam,
LOGGER.debug(" ".join(cmd1))
proc1 = sps.Popen(cmd1, stderr=cmd1_stderr, stdout=cmd1_stdout)
## This is really long running job so we wrap it to ensure it dies.
try:
error1 = proc1.communicate()[0]
except KeyboardInterrupt:
proc1.kill()
## raise error if one occurred in smalt
if proc1.returncode:
raise IPyradWarningExit(error1)
## Running cmd2 writes to ref_mapping/sname.unmapped.bam, and
## fills the pipe with mapped BAM data
LOGGER.debug(" ".join(cmd2))
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
## Running cmd3 pulls mapped BAM from pipe and writes to
## ref_mapping/sname.mapped-sorted.bam.
## Because proc2 pipes to proc3 we just communicate this to run both.
LOGGER.debug(" ".join(cmd3))
proc3 = sps.Popen(cmd3, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc2.stdout)
error3 = proc3.communicate()[0]
if proc3.returncode:
raise IPyradWarningExit(error3)
proc2.stdout.close()
## Later we're gonna use samtools to grab out regions using 'view', and to
## do that we need it to be indexed. Let's index it now.
LOGGER.debug(" ".join(cmd4))
proc4 = sps.Popen(cmd4, stderr=sps.STDOUT, stdout=sps.PIPE)
error4 = proc4.communicate()[0]
if proc4.returncode:
raise IPyradWarningExit(error4)
## Running cmd5 writes to either edits/sname-refmap_derep.fastq for SE
## or it makes edits/sname-tmp-umap{12}.fastq for paired data, which
## will then need to be merged.
LOGGER.debug(" ".join(cmd5))
proc5 = sps.Popen(cmd5, stderr=sps.STDOUT, stdout=sps.PIPE)
error5 = proc5.communicate()[0]
if proc5.returncode:
raise IPyradWarningExit(error5)
## Finally, merge the unmapped reads, which is what cluster()
## expects. If SE, just rename the outfile. In the end
## <sample>-refmap_derep.fq will be the final output
if 'pair' in data.paramsdict["datatype"]:
LOGGER.info("Merging unmapped reads {} {}".format(umap1file, umap2file))
merge_pairs_after_refmapping(data, [(umap1file, umap2file)], mumapfile) |
def fetch_cluster_se(data, samfile, chrom, rstart, rend):
"""
Builds a single end cluster from the refmapped data.
"""
## If SE then we enforce the minimum overlap distance to avoid the
## staircase syndrome of multiple reads overlapping just a little.
overlap_buffer = data._hackersonly["min_SE_refmap_overlap"]
## the *_buff variables here are because we have to play patty
## cake here with the rstart/rend vals because we want pysam to
## enforce the buffer for SE, but we want the reference sequence
## start and end positions to print correctly for downstream.
rstart_buff = rstart + overlap_buffer
rend_buff = rend - overlap_buffer
## Reads that map to only very short segements of the reference
## sequence will return buffer end values that are before the
## start values causing pysam to complain. Very short mappings.
if rstart_buff > rend_buff:
tmp = rstart_buff
rstart_buff = rend_buff
rend_buff = tmp
## Buffering can't make start and end equal or pysam returns nothing.
if rstart_buff == rend_buff:
rend_buff += 1
## store pairs
rdict = {}
clust = []
iterreg = []
iterreg = samfile.fetch(chrom, rstart_buff, rend_buff)
## use dict to match up read pairs
for read in iterreg:
if read.qname not in rdict:
rdict[read.qname] = read
## sort dict keys so highest derep is first ('seed')
sfunc = lambda x: int(x.split(";size=")[1].split(";")[0])
rkeys = sorted(rdict.keys(), key=sfunc, reverse=True)
## get blocks from the seed for filtering, bail out if seed is not paired
try:
read1 = rdict[rkeys[0]]
except ValueError:
LOGGER.error("Found bad cluster, skipping - key:{} rdict:{}".format(rkeys[0], rdict))
return ""
## the starting blocks for the seed
poss = read1.get_reference_positions(full_length=True)
seed_r1start = min(poss)
seed_r1end = max(poss)
## store the seed -------------------------------------------
if read1.is_reverse:
seq = revcomp(read1.seq)
else:
seq = read1.seq
## store, could write orient but just + for now.
size = sfunc(rkeys[0])
clust.append(">{}:{}:{};size={};*\n{}"\
.format(chrom, seed_r1start, seed_r1end, size, seq))
## If there's only one hit in this region then rkeys will only have
## one element and the call to `rkeys[1:]` will raise. Test for this.
if len(rkeys) > 1:
## store the hits to the seed -------------------------------
for key in rkeys[1:]:
skip = False
try:
read1 = rdict[key]
except ValueError:
## enter values that will make this read get skipped
read1 = rdict[key][0]
skip = True
## orient reads only if not skipping
if not skip:
poss = read1.get_reference_positions(full_length=True)
minpos = min(poss)
maxpos = max(poss)
## store the seq
if read1.is_reverse:
seq = revcomp(read1.seq)
else:
seq = read1.seq
## store, could write orient but just + for now.
size = sfunc(key)
clust.append(">{}:{}:{};size={};+\n{}"\
.format(chrom, minpos, maxpos, size, seq))
else:
## seq is excluded, though, we could save it and return
## it as a separate cluster that will be aligned separately.
pass
return clust |
def fetch_cluster_pairs(data, samfile, chrom, rstart, rend):
"""
Builds a paired cluster from the refmapped data.
"""
## store pairs
rdict = {}
clust = []
## grab the region and make tuples of info
iterreg = samfile.fetch(chrom, rstart, rend)
## use dict to match up read pairs
for read in iterreg:
if read.qname not in rdict:
rdict[read.qname] = [read]
else:
rdict[read.qname].append(read)
## sort dict keys so highest derep is first ('seed')
sfunc = lambda x: int(x.split(";size=")[1].split(";")[0])
rkeys = sorted(rdict.keys(), key=sfunc, reverse=True)
## get blocks from the seed for filtering, bail out if seed is not paired
try:
read1, read2 = rdict[rkeys[0]]
except ValueError:
return 0
## the starting blocks for the seed
poss = read1.get_reference_positions() + read2.get_reference_positions()
seed_r1start = min(poss)
seed_r2end = max(poss)
## store the seed -------------------------------------------
## Simplify. R1 and R2 are always on opposite strands, but the
## orientation is variable. We revcomp and order the reads to
## preserve genomic order.
reads_overlap = False
if read1.is_reverse:
if read2.aend > read1.get_blocks()[0][0]:
reads_overlap = True
seq = read2.seq + "nnnn" + revcomp(read1.seq)
else:
seq = read2.seq + "nnnn" + read1.seq
else:
if read1.aend > read2.get_blocks()[0][0]:
reads_overlap = True
seq = read1.seq + "nnnn" + revcomp(read2.seq)
else:
seq = read1.seq + "nnnn" + read2.seq
## store, could write orient but just + for now.
size = sfunc(rkeys[0])
clust.append(">{}:{}:{};size={};*\n{}"\
.format(chrom, seed_r1start, seed_r2end, size, seq))
## If there's only one hit in this region then rkeys will only have
## one element and the call to `rkeys[1:]` will raise. Test for this.
if len(rkeys) > 1:
## store the hits to the seed -------------------------------
for key in rkeys[1:]:
skip = False
try:
read1, read2 = rdict[key]
except ValueError:
## enter values that will make this read get skipped
read1 = rdict[key][0]
read2 = read1
skip = True
## orient reads and filter out ones that will not align well b/c
## they do not overlap enough with the seed
poss = read1.get_reference_positions() + read2.get_reference_positions()
minpos = min(poss)
maxpos = max(poss)
## skip if more than one hit location
if read1.has_tag("SA") or read2.has_tag("SA"):
skip = True
## store if read passes
if (abs(minpos - seed_r1start) < 50) and \
(abs(maxpos - seed_r2end) < 50) and \
(not skip):
## store the seq
if read1.is_reverse:
## do reads overlap
if read2.aend > read1.get_blocks()[0][0]:
reads_overlap = True
seq = read2.seq + "nnnn" + revcomp(read1.seq)
else:
seq = read2.seq + "nnnn" + read1.seq
else:
if read1.aend > read2.get_blocks()[0][0]:
reads_overlap = True
seq = read1.seq + "nnnn" + revcomp(read2.seq)
else:
seq = read1.seq + "nnnn" + read2.seq
## store, could write orient but just + for now.
size = sfunc(key)
clust.append(">{}:{}:{};size={};+\n{}"\
.format(chrom, minpos, maxpos, size, seq))
else:
## seq is excluded, though, we could save it and return
## it as a separate cluster that will be aligned separately.
pass
## merge the pairs prior to returning them
## Remember, we already tested for quality scores, so
## merge_after_pysam will generate arbitrarily high scores
## It would be nice to do something here like test if
## the average insert length + 2 stdv is > 2*read len
## so you can switch off merging for mostly non-overlapping data
if reads_overlap:
if data._hackersonly["refmap_merge_PE"]:
clust = merge_after_pysam(data, clust)
#clust = merge_pair_pipes(data, clust)
return clust |
def ref_build_and_muscle_chunk(data, sample):
"""
1. Run bedtools to get all overlapping regions
2. Parse out reads from regions using pysam and dump into chunk files.
We measure it out to create 10 chunk files per sample.
3. If we really wanted to speed this up, though it is pretty fast already,
we could parallelize it since we can easily break the regions into
a list of chunks.
"""
## get regions using bedtools
regions = bedtools_merge(data, sample).strip().split("\n")
nregions = len(regions)
chunksize = (nregions / 10) + (nregions % 10)
LOGGER.debug("nregions {} chunksize {}".format(nregions, chunksize))
## create an output file to write clusters to
idx = 0
tmpfile = os.path.join(data.tmpdir, sample.name+"_chunk_{}.ali")
## remove old files if they exist to avoid append errors
for i in range(11):
if os.path.exists(tmpfile.format(i)):
os.remove(tmpfile.format(i))
fopen = open
## If reference+denovo we drop the reads back into clust.gz
## and let the muscle_chunker do it's thing back in cluster_within
if data.paramsdict["assembly_method"] == "denovo+reference":
tmpfile = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
fopen = gzip.open
## build clusters for aligning with muscle from the sorted bam file
samfile = pysam.AlignmentFile(sample.files.mapped_reads, 'rb')
#"./tortas_refmapping/PZ70-mapped-sorted.bam", "rb")
## fill clusts list and dump periodically
clusts = []
nclusts = 0
for region in regions:
chrom, pos1, pos2 = region.split()
try:
## fetches pairs quickly but then goes slow to merge them.
if "pair" in data.paramsdict["datatype"]:
clust = fetch_cluster_pairs(data, samfile, chrom, int(pos1), int(pos2))
## fetch but no need to merge
else:
clust = fetch_cluster_se(data, samfile, chrom, int(pos1), int(pos2))
except IndexError as inst:
LOGGER.error("Bad region chrom:start-end {}:{}-{}".format(chrom, pos1, pos2))
continue
if clust:
clusts.append("\n".join(clust))
nclusts += 1
if nclusts == chunksize:
## write to file
tmphandle = tmpfile.format(idx)
with fopen(tmphandle, 'a') as tmp:
#LOGGER.debug("Writing tmpfile - {}".format(tmpfile.format(idx)))
#if data.paramsdict["assembly_method"] == "denovo+reference":
# ## This is dumb, but for this method you need to prepend the
# ## separator to maintain proper formatting of clust.gz
tmp.write("\n//\n//\n".join(clusts)+"\n//\n//\n")
idx += 1
nclusts = 0
clusts = []
if clusts:
## write remaining to file
with fopen(tmpfile.format(idx), 'a') as tmp:
#tmp.write("\n//\n//\n" + ("\n//\n//\n".join(clusts)))
tmp.write("\n//\n//\n".join(clusts)+"\n//\n//\n")
clusts = []
if not data.paramsdict["assembly_method"] == "denovo+reference":
chunkfiles = glob.glob(os.path.join(data.tmpdir, sample.name+"_chunk_*.ali"))
LOGGER.info("created chunks %s", chunkfiles)
## cleanup
samfile.close() |
def ref_muscle_chunker(data, sample):
"""
Run bedtools to get all overlapping regions. Pass this list into the func
'get_overlapping_reads' which will write fastq chunks to the clust.gz file.
1) Run bedtools merge to get a list of all contiguous blocks of bases
in the reference seqeunce where one or more of our reads overlap.
The output will look like this:
1 45230754 45230783
1 74956568 74956596
...
1 116202035 116202060
"""
LOGGER.info('entering ref_muscle_chunker')
## Get regions, which will be a giant list of 5-tuples, of which we're
## only really interested in the first three: (chrom, start, end) position.
regions = bedtools_merge(data, sample)
if len(regions) > 0:
## this calls bam_region_to_fasta a billion times
get_overlapping_reads(data, sample, regions)
else:
msg = "No reads mapped to reference sequence - {}".format(sample.name)
LOGGER.warn(msg) |
def get_overlapping_reads(data, sample, regions):
"""
For SE data, this pulls mapped reads out of sorted mapped bam files and
appends them to the clust.gz file so they fall into downstream
(muscle alignment) analysis.
For PE data, this pulls mapped reads out of sorted mapped bam files, splits
R1s from R2s and writes them to separate files. Once all reads are written,
it calls merge_reads (vsearch) to find merged and non-merged reads. These
are then put into clust.gz with either an nnnn separator or as merged.
The main func being called here is 'bam_region_to_fasta', which calls
samtools to pull out the mapped reads.
1) Coming into this function we have sample.files.mapped_reads
as a sorted bam file, and a passed in list of regions to evaluate.
2) Get all reads overlapping with each individual region.
3) Pipe to vsearch for clustering.
4) Append to the clust.gz file.
"""
## storage and counter
locus_list = []
reads_merged = 0
## Set the write mode for opening clusters file.
## 1) if "reference" then only keep refmapped, so use 'wb' to overwrite
## 2) if "denovo+reference" then 'ab' adds to end of denovo clust file
write_flag = 'wb'
if data.paramsdict["assembly_method"] == "denovo+reference":
write_flag = 'ab'
## file handle for writing clusters
sample.files.clusters = os.path.join(data.dirs.clusts, sample.name+".clust.gz")
outfile = gzip.open(sample.files.clusters, write_flag)
## write a separator if appending to clust.gz
if data.paramsdict["assembly_method"] == "denovo+reference":
outfile.write("\n//\n//\n")
## Make a process to pass in to bam_region_to_fasta so we can just reuse
## it rather than recreating a bunch of subprocesses. Saves hella time.
proc1 = sps.Popen("sh", stdin=sps.PIPE, stdout=sps.PIPE, universal_newlines=True)
# Wrap this in a try so we can easily locate errors
try:
## For each identified region, build the pileup and write out the fasta
for line in regions.strip().split("\n"):
# Blank lines returned from bedtools screw things up. Filter them.
if line == "":
continue
## get elements from bedtools region
chrom, region_start, region_end = line.strip().split()[0:3]
## bam_region_to_fasta returns a chunk of fasta sequence
args = [data, sample, proc1, chrom, region_start, region_end]
clust = bam_region_to_fasta(*args)
## If bam_region_to_fasta fails for some reason it'll return [],
## in which case skip the rest of this. Normally happens if reads
## map successfully, but too far apart.
if not clust:
continue
## Store locus in a list
# LOGGER.info("clust from bam-region-to-fasta \n %s", clust)
locus_list.append(clust)
## write chunk of 1000 loci and clear list to minimize memory
if not len(locus_list) % 1000:
outfile.write("\n//\n//\n".join(locus_list)+"\n//\n//\n")
locus_list = []
## write remaining
if any(locus_list):
outfile.write("\n//\n//\n".join(locus_list))
else:
## If it's empty, strip off the last \n//\n//\n from the outfile.
pass
## close handle
outfile.close()
except Exception as inst:
LOGGER.error("Exception inside get_overlapping_reads - {}".format(inst))
raise
finally:
if "pair" in data.paramsdict["datatype"]:
LOGGER.info("Total merged reads for {} - {}"\
.format(sample.name, reads_merged))
sample.stats.reads_merged = reads_merged |
def split_merged_reads(outhandles, input_derep):
"""
Takes merged/concat derep file from vsearch derep and split it back into
separate R1 and R2 parts.
- sample_fastq: a list of the two file paths to write out to.
- input_reads: the path to the input merged reads
"""
handle1, handle2 = outhandles
splitderep1 = open(handle1, 'w')
splitderep2 = open(handle2, 'w')
with open(input_derep, 'r') as infile:
## Read in the infile two lines at a time: (seqname, sequence)
duo = itertools.izip(*[iter(infile)]*2)
## lists for storing results until ready to write
split1s = []
split2s = []
## iterate over input splitting, saving, and writing.
idx = 0
while 1:
try:
itera = duo.next()
except StopIteration:
break
## split the duo into separate parts and inc counter
part1, part2 = itera[1].split("nnnn")
idx += 1
## R1 needs a newline, but R2 inherits it from the original file
## store parts in lists until ready to write
split1s.append("{}{}\n".format(itera[0], part1))
split2s.append("{}{}".format(itera[0], part2))
## if large enough then write to file
if not idx % 10000:
splitderep1.write("".join(split1s))
splitderep2.write("".join(split2s))
split1s = []
split2s = []
## write final chunk if there is any
if any(split1s):
splitderep1.write("".join(split1s))
splitderep2.write("".join(split2s))
## close handles
splitderep1.close()
splitderep2.close() |
def check_insert_size(data, sample):
"""
check mean insert size for this sample and update
hackersonly.max_inner_mate_distance if need be. This value controls how
far apart mate pairs can be to still be considered for bedtools merging
downstream.
"""
## pipe stats output to grep
cmd1 = [ipyrad.bins.samtools, "stats", sample.files.mapped_reads]
cmd2 = ["grep", "SN"]
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
## get piped result
res = proc2.communicate()[0]
## raise exception on failure and do cleanup
if proc2.returncode:
raise IPyradWarningExit("error in %s: %s", cmd2, res)
## starting vals
avg_insert = 0
stdv_insert = 0
avg_len = 0
## iterate over results
for line in res.split("\n"):
if "insert size average" in line:
avg_insert = float(line.split(":")[-1].strip())
elif "insert size standard deviation" in line:
## hack to fix sim data when stdv is 0.0. Shouldn't
## impact real data bcz stdv gets rounded up below
stdv_insert = float(line.split(":")[-1].strip()) + 0.1
elif "average length" in line:
avg_len = float(line.split(":")[-1].strip())
LOGGER.debug("avg {} stdv {} avg_len {}"\
.format(avg_insert, stdv_insert, avg_len))
## If all values return successfully set the max inner mate distance.
## This is tricky. avg_insert is the average length of R1+R2+inner mate
## distance. avg_len is the average length of a read. If there are lots
## of reads that overlap then avg_insert will be close to but bigger than
## avg_len. We are looking for the right value for `bedtools merge -d`
## which wants to know the max distance between reads.
if all([avg_insert, stdv_insert, avg_len]):
## If 2 * the average length of a read is less than the average
## insert size then most reads DO NOT overlap
if stdv_insert < 5:
stdv_insert = 5.
if (2 * avg_len) < avg_insert:
hack = avg_insert + (3 * np.math.ceil(stdv_insert)) - (2 * avg_len)
## If it is > than the average insert size then most reads DO
## overlap, so we have to calculate inner mate distance a little
## differently.
else:
hack = (avg_insert - avg_len) + (3 * np.math.ceil(stdv_insert))
## set the hackerdict value
LOGGER.info("stdv: hacked insert size is %s", hack)
data._hackersonly["max_inner_mate_distance"] = int(np.math.ceil(hack))
else:
## If something fsck then set a relatively conservative distance
data._hackersonly["max_inner_mate_distance"] = 300
LOGGER.debug("inner mate distance for {} - {}".format(sample.name,\
data._hackersonly["max_inner_mate_distance"])) |
def bedtools_merge(data, sample):
"""
Get all contiguous genomic regions with one or more overlapping
reads. This is the shell command we'll eventually run
bedtools bamtobed -i 1A_0.sorted.bam | bedtools merge [-d 100]
-i <input_bam> : specifies the input file to bed'ize
-d <int> : For PE set max distance between reads
"""
LOGGER.info("Entering bedtools_merge: %s", sample.name)
mappedreads = os.path.join(data.dirs.refmapping,
sample.name+"-mapped-sorted.bam")
## command to call `bedtools bamtobed`, and pipe output to stdout
## Usage: bedtools bamtobed [OPTIONS] -i <bam>
## Usage: bedtools merge [OPTIONS] -i <bam>
cmd1 = [ipyrad.bins.bedtools, "bamtobed", "-i", mappedreads]
cmd2 = [ipyrad.bins.bedtools, "merge", "-i", "-"]
## If PE the -d flag to tell bedtools how far apart to allow mate pairs.
## If SE the -d flag is negative, specifying that SE reads need to
## overlap by at least a specific number of bp. This prevents the
## stairstep syndrome when a + and - read are both extending from
## the same cutsite. Passing a negative number to `merge -d` gets this done.
if 'pair' in data.paramsdict["datatype"]:
check_insert_size(data, sample)
#cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, str(data._hackersonly["max_inner_mate_distance"]))
cmd2.insert(2, "-d")
else:
cmd2.insert(2, str(-1 * data._hackersonly["min_SE_refmap_overlap"]))
cmd2.insert(2, "-d")
## pipe output from bamtobed into merge
LOGGER.info("stdv: bedtools merge cmds: %s %s", cmd1, cmd2)
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
result = proc2.communicate()[0]
proc1.stdout.close()
## check for errors and do cleanup
if proc2.returncode:
raise IPyradWarningExit("error in %s: %s", cmd2, result)
## Write the bedfile out, because it's useful sometimes.
if os.path.exists(ipyrad.__debugflag__):
with open(os.path.join(data.dirs.refmapping, sample.name + ".bed"), 'w') as outfile:
outfile.write(result)
## Report the number of regions we're returning
nregions = len(result.strip().split("\n"))
LOGGER.info("bedtools_merge: Got # regions: %s", nregions)
return result |
def trim_reference_sequence(fasta):
"""
If doing PE and R1/R2 don't overlap then the reference sequence
will be quite long and will cause indel hell during the
alignment stage. Here trim the reference sequence to the length
of the merged reads. Input is a list of alternating locus labels
and sequence data. The first locus label is the reference
sequence label and the first seq is the reference seq. Returns
the same list except with the reference sequence trimmed to
the length of the rad tags
"""
LOGGER.debug("pre - {}".format(fasta[0]))
## If the reads are merged then the reference sequence should be the
## same length as the merged pair. If unmerged then we have to fix it.
if "nnnn" in fasta[1]:
r1_len = len(fasta[1].split("\n")[1].split("nnnn")[0])
r2_len = len(fasta[1].split("\n")[1].split("nnnn")[1])
new_seq = fasta[0].split("\n")[1][:r1_len]+("nnnn")\
+ revcomp(fasta[0].split("\n")[1][-r2_len:])
fasta[0] = fasta[0].split("\n")[0]+"\n"+new_seq
LOGGER.debug("post - {}".format(fasta[0]))
return fasta |
def bam_region_to_fasta(data, sample, proc1, chrom, region_start, region_end):
"""
Take the chromosome position, and start and end bases and return sequences
of all reads that overlap these sites. This is the command we're building:
samtools view -b 1A_sorted.bam 1:116202035-116202060 | \
samtools bam2fq <options> -
-b : output bam format
-0 : For SE, output all reads to this file
-1/-2 : For PE, output first and second reads to different files
- : Tell samtools to read in from the pipe
Write out the sam output and parse it to return as fasta for clust.gz file.
We also grab the reference sequence with a @REF header to aid in alignment
for single-end data. This will be removed post-alignment.
"""
## output bam file handle for storing genome regions
bamf = sample.files.mapped_reads
if not os.path.exists(bamf):
raise IPyradWarningExit(" file not found - %s", bamf)
# chrom = re.escape(repr(chrom))[1:-1].replace('\\\\', '\\')
#LOGGER.info("before: %s", chrom)
chrom.replace("|", r"\|")
chrom.replace("(", r"\(")
chrom.replace(")", r"\)")
#LOGGER.info("after: %s", chrom)
## What we want to do is have the num-chrom dict as an arg, then build this
## string as three ints [chrom-int, pos-start, pos-end]
#cint = cdict[chrom]
#cpstring = "__{}_{}_{}__".format(cint, int(region_start)+1, region_end)
## a string argument as input to commands, indexed at either 0 or 1,
## and with pipe characters removed from chromo names
## rstring_id1 is for fetching the reference sequence bcz faidx is
## 1 indexed
rstring_id1 = "{}:{}-{}"\
.format(chrom, str(int(region_start)+1), region_end)
## rstring_id0 is just for printing out the reference CHROM/POS
## in the read name
rstring_id0 = "{}:{}-{}"\
.format(chrom, region_start, region_end)
## If SE then we enforce the minimum overlap distance to avoid the
## staircase syndrome of multiple reads overlapping just a little.
overlap_buffer = 0
if not "pair" in data.paramsdict["datatype"]:
overlap_buffer = data._hackersonly["min_SE_refmap_overlap"]
## rstring_id0_buffered is for samtools view. We have to play patty
## cake here with the two rstring_id0s because we want `view` to
## enforce the buffer for SE, but we want the reference sequence
## start and end positions to print correctly for downstream.
rstring_id0_buffered = "{}:{}-{}"\
.format(chrom, int(region_start) + overlap_buffer,\
int(region_end) - overlap_buffer)
## The "samtools faidx" command will grab this region from reference
## which we'll paste in at the top of each stack to aid alignment.
cmd1 = [ipyrad.bins.samtools, "faidx",
data.paramsdict["reference_sequence"],
rstring_id1, " ; echo __done__"]
## Call the command, I found that it doesn't work with shell=False if
## the refstring is 'MT':100-200', but it works if it is MT:100-200.
LOGGER.info("Grabbing bam_region_to_fasta:\n {}".format(cmd1))
#proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
#ref = proc1.communicate()[0]
#if proc1.returncode:
# raise IPyradWarningExit(" error in %s: %s", cmd1, ref)
## push the samtools faidx command to our subprocess, then accumulate
## the results from stdout
print(" ".join(cmd1), file=proc1.stdin)
ref = ""
for line in iter(proc1.stdout.readline, "//\n"):
if "__done__" in line:
break
ref += line
## initialize the fasta list.
fasta = []
## parse sam to fasta. Save ref location to name.
## Set size= an improbably large value so the REF sequence
## sorts to the top for muscle aligning.
try:
name, seq = ref.strip().split("\n", 1)
seq = "".join(seq.split("\n"))
fasta = ["{}_REF;size={};+\n{}".format(name, 1000000, seq)]
except ValueError as inst:
LOGGER.error("ref failed to parse - {}".format(ref))
LOGGER.error(" ".join(cmd1))
## if PE then you have to merge the reads here
if "pair" in data.paramsdict["datatype"]:
## PE R1 can either be on the forward or the reverse strand.
## Samtools view always outputs reads with respect to the
## forward strand. This means that reads with R1 on reverse
## end up with the R1 and R2 reference sequences swapped
## in the clust.gz file. There is a way to fix it but it's
## very annoying and i'm not sure if it's worth it...
## Drop the reference sequence for now...
##
## If you ever fix this be sure to remove the reference sequence
## from each cluster post alignment in cluster_within/align_and_parse()
fasta = []
## Create temporary files for R1, R2 and merged, which we will pass to
## the function merge_pairs() which calls vsearch to test merging.
##
## If you are on linux then creating the temp files in /dev/shm
## should improve performance
if os.path.exists("/dev/shm"):
prefix = os.path.join("/dev/shm",
"{}-{}".format(sample.name, rstring_id0))
else:
prefix = os.path.join(data.dirs.refmapping,
"{}-{}".format(sample.name, rstring_id0))
read1 = "{}-R1".format(prefix)
read2 = "{}-R2".format(prefix)
merged = "{}-merged".format(prefix)
## Grab all the reads that map to this genomic location and dump
## fastq to R1 and R2 files.
## `-v 45` sets the default qscore to something high
cmd1 = " ".join([ipyrad.bins.samtools, "view", "-b", bamf, rstring_id0])
cmd2 = " ".join([ipyrad.bins.samtools, "bam2fq", "-v", "45", "-1", read1, "-2", read2, "-", "; echo __done__"])
cmd = " | ".join([cmd1, cmd2])
print(cmd, file=proc1.stdin)
for line in iter(proc1.stdout.readline, "//\n"):
if "__done__" in line:
break
## run commands, pipe 1 -> 2, then cleanup
## proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
## proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE, stdin=proc1.stdout)
## res = proc2.communicate()[0]
## if proc2.returncode:
## raise IPyradWarningExit("error {}: {}".format(cmd2, res))
## proc1.stdout.close()
## merge the pairs. 0 means don't revcomp bcz samtools already
## did it for us. 1 means "actually merge".
try:
## return number of merged reads, writes merged data to 'merged'
## we don't yet do anything with the returned number of merged
_ = merge_pairs(data, [(read1, read2)], merged, 0, 1)
with open(merged, 'r') as infile:
quatro = itertools.izip(*[iter(infile)]*4)
while 1:
try:
bits = quatro.next()
except StopIteration:
break
## TODO: figure out a real way to get orientation for PE
orient = "+"
fullfast = ">{a};{b};{c};{d}\n{e}".format(
a=bits[0].split(";")[0],
b=rstring_id1,
c=bits[0].split(";")[1],
d=orient,
e=bits[1].strip())
#,e=bits[9])
fasta.append(fullfast)
## TODO: If you ever figure out a good way to get the reference
## sequence included w/ PE then this commented call is useful
## for trimming the reference sequence to be the right length.
## If doing PE and R1/R2 don't overlap then the reference sequence
## will be quite long and will cause indel hell during the
## alignment stage. Here trim the reference sequence to the length
## of the merged reads.
## This is commented out because we aren't currently including the
## ref seq for PE alignment.
#fasta = trim_reference_sequence(fasta)
except (OSError, ValueError, IPyradError) as inst:
## ValueError raised inside merge_pairs() if it can't open one
## or both of the files. Write this out, but ignore for now.
## Failed merging, probably unequal number of reads in R1 and R2
## IPyradError raised if merge_pairs can't read either R1 or R2
## file.
## Skip this locus?
LOGGER.debug("Failed to merge reads, continuing; %s", inst)
LOGGER.error("cmd - {}".format(cmd))
return ""
finally:
## Only clean up the files if they exist otherwise it'll raise.
if os.path.exists(merged):
os.remove(merged)
if os.path.exists(read1):
os.remove(read1)
if os.path.exists(read2):
os.remove(read2)
else:
try:
## SE if faster than PE cuz it skips writing intermedidate files
## rstring_id0_buffered is going to enforce the required
## min_SE_refmap_overlap on either end of this region.
cmd2 = [ipyrad.bins.samtools, "view", bamf, rstring_id0_buffered]
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
## run and check outputs
res = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("{} {}".format(cmd2, res))
## if the region string is malformated you'll get back a warning
## from samtools
if "[main_samview]" in res:
raise IPyradError("Bad reference region {}".format(rstring_id0_buffered))
## do not join seqs that
for line in res.strip().split("\n"):
bits = line.split("\t")
## Read in the 2nd field (FLAGS), convert to binary
## and test if the 7th bit is set which indicates revcomp
orient = "+"
if int('{0:012b}'.format(int(bits[1]))[7]):
orient = "-"
## Don't actually revcomp the sequence because samtools
## writes out reference sequence on the forward strand
## as well as reverse strand hits from the bam file.
#bits[9] = revcomp(bits[9])
## Rip insert the mapping position between the seq label and
## the vsearch derep size.
fullfast = ">{a};{b};{c};{d}\n{e}".format(
a=bits[0].split(";")[0],
b=rstring_id0,
c=bits[0].split(";")[1],
d=orient,
e=bits[9])
fasta.append(fullfast)
except IPyradError as inst:
## If the mapped fragment is too short then the you'll get
## regions that look like this: scaffold262:299039-299036
## Just carry on, it's not a big deal.
LOGGER.debug("Got a bad region string: {}".format(inst))
return ""
except (OSError, ValueError, Exception) as inst:
## Once in a blue moon something fsck and it breaks the
## assembly. No reason to give up if .001% of reads fail
## so just skip this locus.
LOGGER.error("Failed get reads at a locus, continuing; %s", inst)
LOGGER.error("cmd - {}".format(cmd2))
return ""
return "\n".join(fasta) |
def refmap_stats(data, sample):
"""
Get the number of mapped and unmapped reads for a sample
and update sample.stats
"""
## shorter names
mapf = os.path.join(data.dirs.refmapping, sample.name+"-mapped-sorted.bam")
umapf = os.path.join(data.dirs.refmapping, sample.name+"-unmapped.bam")
## get from unmapped
cmd1 = [ipyrad.bins.samtools, "flagstat", umapf]
proc1 = sps.Popen(cmd1, stderr=sps.STDOUT, stdout=sps.PIPE)
result1 = proc1.communicate()[0]
## get from mapped
cmd2 = [ipyrad.bins.samtools, "flagstat", mapf]
proc2 = sps.Popen(cmd2, stderr=sps.STDOUT, stdout=sps.PIPE)
result2 = proc2.communicate()[0]
## store results
## If PE, samtools reports the _actual_ number of reads mapped, both
## R1 and R2, so here if PE divide the results by 2 to stay consistent
## with how we've been reporting R1 and R2 as one "read pair"
if "pair" in data.paramsdict["datatype"]:
sample.stats["refseq_unmapped_reads"] = int(result1.split()[0]) / 2
sample.stats["refseq_mapped_reads"] = int(result2.split()[0]) / 2
else:
sample.stats["refseq_unmapped_reads"] = int(result1.split()[0])
sample.stats["refseq_mapped_reads"] = int(result2.split()[0])
sample_cleanup(data, sample) |
def refmap_init(data, sample, force):
""" create some file handles for refmapping """
## make some persistent file handles for the refmap reads files
sample.files.unmapped_reads = os.path.join(data.dirs.edits,
"{}-refmap_derep.fastq".format(sample.name))
sample.files.mapped_reads = os.path.join(data.dirs.refmapping,
"{}-mapped-sorted.bam".format(sample.name)) |
def parse_command_line():
""" Parse CLI args. Only three options now. """
## create the parser
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog="""
* Example command-line usage ----------------------------------------------
* Read in sequence/SNP data file, provide linkage, output name, ambig option.
tetrad -s data.snps.phy -n test ## input phylip and give name
tetrad -s data.snps.phy -l data.snps.map ## sample one SNP per locus
tetrad -s data.snps.phy -n noambigs -r 0 ## do not use hetero sites
* Load saved/checkpointed analysis from '.tet.json' file, or force restart.
tetrad -j test.tet.json -b 100 ## continue 'test' until 100 boots
tetrad -j test.tet.json -b 100 -f ## force restart of 'test'
* Sampling modes: 'equal' uses guide tree to sample quartets more efficiently
tetrad -s data.snps.phy -m all ## sample all quartets
tetrad -s data.snps.phy -m random -q 1e6 -x 123 ## sample 1M randomly
tetrad -s data.snps.phy -m equal -q 1e6 -t guide.tre ## sample 1M across tree
* Connect to N cores on a computer (default without -c arg is to use all avail.)
tetrad -s data.snps.phy -c 20
* Start an MPI cluster to connect to nodes across multiple available hosts.
tetrad -s data.snps.phy --MPI
* Connect to a manually started ipcluster instance with default or named profile
tetrad -s data.snps.phy --ipcluster ## connects to default profile
tetrad -s data.snps.phy --ipcluster pname ## connects to profile='pname'
* Further documentation: http://ipyrad.readthedocs.io/analysis.html
""")
## get version from ipyrad
ipyversion = str(pkg_resources.get_distribution('ipyrad'))
parser.add_argument('-v', '--version', action='version',
version="tetrad "+ipyversion.split()[1])
parser.add_argument('-f', "--force", action='store_true',
help="force overwrite of existing data")
parser.add_argument('-s', metavar="seq", dest="seq",
type=str, default=None,
help="path to input phylip file (only SNPs)")
parser.add_argument('-j', metavar='json', dest="json",
type=str, default=None,
help="load checkpointed/saved analysis from JSON file.")
parser.add_argument('-m', metavar="method", dest="method",
type=str, default="all",
help="method for sampling quartets (all, random, or equal)")
parser.add_argument('-q', metavar="nquartets", dest="nquartets",
type=int, default=0,
help="number of quartets to sample (if not -m all)")
parser.add_argument('-b', metavar="boots", dest="boots",
type=int, default=0,
help="number of non-parametric bootstrap replicates")
parser.add_argument('-l', metavar="map_file", dest="map",
type=str, default=None,
help="map file of snp linkages (e.g., ipyrad .snps.map)")
parser.add_argument('-r', metavar="resolve", dest='resolve',
type=int, default=1,
help="randomly resolve heterozygous sites (default=1)")
parser.add_argument('-n', metavar="name", dest="name",
type=str, default="test",
help="output name prefix (default: 'test')")
parser.add_argument('-o', metavar="workdir", dest="workdir",
type=str, default="./analysis-tetrad",
help="output directory (default: creates ./analysis-tetrad)")
parser.add_argument('-t', metavar="starting_tree", dest="tree",
type=str, default=None,
help="newick file starting tree for equal splits sampling")
parser.add_argument("-c", metavar="CPUs/cores", dest="cores",
type=int, default=0,
help="setting -c improves parallel efficiency with --MPI")
parser.add_argument("-x", metavar="random_seed", dest="rseed",
type=int, default=None,
help="random seed for quartet sampling and/or bootstrapping")
parser.add_argument('-d', "--debug", action='store_true',
help="print lots more info to debugger: ipyrad_log.txt.")
parser.add_argument("--MPI", action='store_true',
help="connect to parallel CPUs across multiple nodes")
parser.add_argument("--invariants", action='store_true',
help="save a (large) database of all invariants")
parser.add_argument("--ipcluster", metavar="ipcluster", dest="ipcluster",
type=str, nargs="?", const="default",
help="connect to ipcluster profile (default: 'default')")
## if no args then return help message
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
## parse args
args = parser.parse_args()
## RAISE errors right away for some bad argument combinations:
if args.method not in ["random", "equal", "all"]:
raise IPyradWarningExit(" method argument (-m) must be one of"+\
""" "all", "random", or "equal.\n""")
## if 'random' require nquarts argument
#if args.method == 'random':
# if not args.nquartets:
# raise IPyradWarningExit(\
# " Number of quartets (-q) is required with method = random\n")
## if 'equal' method require starting tree and nquarts
# if args.method == 'equal':
# raise IPyradWarningExit(\
# " The equal sampling method is currently for developers only.\n")
# if not args.nquartets:
# raise IPyradWarningExit(\
# " Number of quartets (-q) is required with method = equal\n")
# if not args.tree:
# raise IPyradWarningExit(\
# " Input guide tree (-t) is required with method = equal\n")
## required args
if not any(x in ["seq", "json"] for x in vars(args).keys()):
print("""
Bad arguments: tetrad command must include at least one of (-s or -j)
""")
parser.print_help()
sys.exit(1)
return args |
def main():
""" main function """
## parse params file input (returns to stdout if --help or --version)
args = parse_command_line()
print(HEADER.format(ip.__version__))
## set random seed
np.random.seed(args.rseed)
## debugger----------------------------------------
if os.path.exists(ip.__debugflag__):
os.remove(ip.__debugflag__)
if args.debug:
print("\n ** Enabling debug mode ** ")
ip._debug_on()
## if JSON, load existing Tetrad analysis -----------------------
if args.json:
data = ipa.tetrad(name=args.name, workdir=args.workdir, load=True)
## if force then remove all results
if args.force:
data._refresh()
## else create a new tmp assembly for the seqarray-----------------
else:
## create new Tetrad class Object if it doesn't exist
newjson = os.path.join(args.workdir, args.name+'.tet.json')
## if not quiet...
print("tetrad instance: {}".format(args.name))
if (not os.path.exists(newjson)) or args.force:
## purge any files associated with this name if forced
if args.force:
## init an object in the correct location just to refresh
ipa.tetrad(name=args.name,
workdir=args.workdir,
data=args.seq,
initarr=False,
save_invariants=args.invariants,
cli=True,
quiet=True)._refresh()
## create new tetrad object
data = ipa.tetrad(name=args.name,
workdir=args.workdir,
method=args.method,
data=args.seq,
resolve=args.resolve,
mapfile=args.map,
guidetree=args.tree,
nboots=args.boots,
nquartets=args.nquartets,
cli=True,
save_invariants=args.invariants,
)
else:
raise SystemExit(QUARTET_EXISTS\
.format(args.name, args.workdir, args.workdir, args.name, args.name))
## boots can be set either for a new object or loaded JSON to continue it
if args.boots:
data.params.nboots = int(args.boots)
## if ipyclient is running (and matched profile) then use that one
if args.ipcluster:
ipyclient = ipp.Client(profile=args.ipcluster)
data._ipcluster["cores"] = len(ipyclient)
## if not then we need to register and launch an ipcluster instance
else:
## set CLI ipcluster terms
ipyclient = None
data._ipcluster["cores"] = args.cores if args.cores else detect_cpus()
data._ipcluster["engines"] = "Local"
if args.MPI:
data._ipcluster["engines"] = "MPI"
if not args.cores:
raise IPyradWarningExit("must provide -c argument with --MPI")
## register to have a cluster-id with "ip- name"
data = register_ipcluster(data)
## message about whether we are continuing from existing
if data.checkpoint.boots:
print(LOADING_MESSAGE.format(
data.name, data.params.method, data.checkpoint.boots))
## run tetrad main function within a wrapper. The wrapper creates an
## ipyclient view and appends to the list of arguments to run 'run'.
data.run(force=args.force, ipyclient=ipyclient) |
def _command_list(self):
""" build the command list """
## base args
cmd = [self.params.binary,
"-i", OPJ(self.workdir, self.name+".treemix.in.gz"),
"-o", OPJ(self.workdir, self.name),
]
## addon params
args = []
for key, val in self.params:
if key not in ["minmap", "binary"]:
if key == "g":
if val[0]:
args += ["-"+key, str(val[0]), str(val[1])]
elif key == "global_":
if val:
args += ["-"+key[:-1]]
elif key in ["se", "global", "noss"]:
if val:
args += ["-"+key]
else:
if val:
args += ["-"+key, str(val)]
return cmd+args |
def _subsample(self):
""" returns a subsample of unlinked snp sites """
spans = self.maparr
samp = np.zeros(spans.shape[0], dtype=np.uint64)
for i in xrange(spans.shape[0]):
samp[i] = np.random.randint(spans[i, 0], spans[i, 1], 1)
return samp |
def copy(self, name):
"""
Returns a copy of the treemix object with the same parameter settings
but with the files attributes cleared, and with a new 'name' attribute.
Parameters
----------
name (str):
A name for the new copied treemix bject that will be used for the
output files created by the object.
"""
## make deepcopy of self.__dict__ but do not copy async objects
subdict = {i:j for i, j in self.__dict__.iteritems() if i != "asyncs"}
newdict = copy.deepcopy(subdict)
## make back into a Treemix object
#if name == self.name:
# raise Exception("new object name must be different from its parent")
newobj = Treemix(
data=newdict["data"],
name=name,
workdir=newdict["workdir"],
imap={i:j for i, j in newdict["imap"].items()},
mapfile=newdict['mapfile'],
minmap={i:j for i, j in newdict["minmap"].items()},
seed=np.random.randint(0, int(1e9)),
)
## update special dict attributes but not files
for key, val in newobj.params.__dict__.iteritems():
newobj.params.__setattr__(key, self.params.__getattribute__(key))
#for key, val in newobj.filters.__dict__.iteritems():
# newobj.filters.__setattr__(key, self.filters.__getattribute__(key))
## new object must have a different name than it's parent
return newobj |
def draw(self, axes):
"""
Returns a treemix plot on a toyplot.axes object.
"""
## create a toytree object from the treemix tree result
tre = toytree.tree(newick=self.results.tree)
tre.draw(
axes=axes,
use_edge_lengths=True,
tree_style='c',
tip_labels_align=True,
edge_align_style={"stroke-width": 1}
);
## get coords
for admix in self.results.admixture:
## parse admix event
pidx, pdist, cidx, cdist, weight = admix
a = _get_admix_point(tre, pidx, pdist)
b = _get_admix_point(tre, cidx, cdist)
## add line for admixture edge
mark = axes.plot(
a = (a[0], b[0]),
b = (a[1], b[1]),
style={"stroke-width": 10*weight,
"stroke-opacity": 0.95,
"stroke-linecap": "round"}
)
## add points at admixture sink
axes.scatterplot(
a = (b[0]),
b = (b[1]),
size=8,
title="weight: {}".format(weight),
)
## add scale bar for edge lengths
axes.y.show=False
axes.x.ticks.show=True
axes.x.label.text = "Drift parameter"
return axes |
def _resolveambig(subseq):
"""
Randomly resolves iupac hetero codes. This is a shortcut
for now, we could instead use the phased alleles in RAD loci.
"""
N = []
for col in subseq:
rand = np.random.binomial(1, 0.5)
N.append([_AMBIGS[i][rand] for i in col])
return np.array(N) |
def _count_PIS(seqsamp, N):
""" filters for loci with >= N PIS """
counts = [Counter(col) for col in seqsamp.T if not ("-" in col or "N" in col)]
pis = [i.most_common(2)[1][1] > 1 for i in counts if len(i.most_common(2))>1]
if sum(pis) >= N:
return sum(pis)
else:
return 0 |
def write_nexus_files(self, force=False, quiet=False):
"""
Write nexus files to {workdir}/{name}/[0-N].nex, If the directory already
exists an exception will be raised unless you use the force flag which
will remove all files in the directory.
Parameters:
-----------
force (bool):
If True then all files in {workdir}/{name}/*.nex* will be removed.
"""
## clear existing files
existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex"))
if any(existing):
if force:
for rfile in existing:
os.remove(rfile)
else:
path = os.path.join(self.workdir, self.name)
raise IPyradWarningExit(EXISTING_NEX_FILES.format(path))
## parse the loci or alleles file
with open(self.files.data) as infile:
loci = iter(infile.read().strip().split("|\n"))
## use entered samples or parse them from the file
if not self.samples:
with open(self.files.data) as infile:
samples = set((i.split()[0] for i in infile.readlines() \
if "//" not in i))
else:
samples = set(self.samples)
## keep track of how many loci pass filtering
totn = len(samples)
nloci = 0
## this set is just used for matching, then we randomly
## subsample for real within the locus so it varies
if self._alleles:
msamples = {i+rbin() for i in samples}
else:
msamples = samples
## write subsampled set of loci
for loc in loci:
## get names and seqs from locus
dat = loc.split("\n")[:-1]
try:
names = [i.split()[0] for i in dat]
snames = set(names)
seqs = np.array([list(i.split()[1]) for i in dat])
except IndexError:
print(ALLELESBUGFIXED)
continue
## check name matches
if len(snames.intersection(msamples)) == totn:
## prune sample names if alleles. Done here so it is randomly
## different in every locus which allele is selected from
## each sample (e.g., 0 or 1)
if self._alleles:
_samples = [i+rbin() for i in samples]
else:
_samples = samples
## re-order seqs to be in set order
seqsamp = seqs[[names.index(tax) for tax in _samples]]
## resolve ambiguities randomly if .loci file otherwise
## sample one of the alleles if .alleles file.
if not self._alleles:
seqsamp = _resolveambig(seqsamp)
## find parsimony informative sites
if _count_PIS(seqsamp, self.params.minsnps):
## keep the locus
nloci += 1
## remove empty columns given this sampling
copied = seqsamp.copy()
copied[copied == "-"] == "N"
rmcol = np.all(copied == "N", axis=0)
seqsamp = seqsamp[:, ~rmcol]
## write nexus file
if self._alleles:
## trim off the allele number
samps = [i.rsplit("_", 1)[0] for i in _samples]
mdict = dict(zip(samps, [i.tostring() for i in seqsamp]))
else:
mdict = dict(zip(_samples, [i.tostring() for i in seqsamp]))
self._write_nex(mdict, nloci)
## quit early if using maxloci
if nloci == self.params.maxloci:
break
## print data size
if not quiet:
path = os.path.join(self.workdir, self.name)
path = path.replace(os.path.expanduser("~"), "~")
print("wrote {} nexus files to {}".format(nloci, path)) |
def run(self, steps=None, ipyclient=None, force=False, quiet=False):
"""
Submits an ordered list of jobs to a load-balancer to complete
the following tasks, and reports a progress bar:
(1) Write nexus files for each locus
(2) Run mrBayes on each locus to get a posterior of gene trees
(3) Run mbsum (a bucky tool) on the posterior set of trees
(4) Run Bucky on the summarized set of trees for all alpha values.
Parameters:
-----------
ipyclient (ipyparallel.Client())
A connected ipyparallel Client object used to distribute jobs
force (bool):
Whether to overwrite existing files with the same name and workdir
if they exist. Default is False.
quiet (bool):
Whether to suppress progress information. Default is False.
steps (list):
A list of integers of steps to perform. This is useful if a
job was interrupted, or you created a new bucky object copy,
or you wish to run an analysis under a new set of parameters,
after having run it once. For example, if you finished running
steps 1 and 2 (write nexus files and infer mrbayes posteriors),
but you want to rerun steps 3 and 4 with new settings, then you
could enter `steps=[3,4]` and also `force=True` to run steps 3
and 4 with a new set of parameters. Default argument is None
which means run all steps.
"""
## require ipyclient
if not ipyclient:
raise IPyradWarningExit("an ipyclient object is required")
## check the steps argument
if not steps:
steps = [1, 2, 3, 4]
if isinstance(steps, (int, str)):
steps = [int(i) for i in [steps]]
if isinstance(steps, list):
if not all(isinstance(i, int) for i in steps):
raise IPyradWarningExit("steps must be a list of integers")
## run steps ------------------------------------------------------
## TODO: wrap this function so it plays nice when interrupted.
if 1 in steps:
self.write_nexus_files(force=force, quiet=quiet)
if 2 in steps:
self.run_mrbayes(force=force, quiet=quiet, ipyclient=ipyclient)
if 3 in steps:
self.run_mbsum(force=force, quiet=quiet, ipyclient=ipyclient)
if 4 in steps:
self.run_bucky(force=force, quiet=quiet, ipyclient=ipyclient)
## make sure jobs are done if waiting (TODO: maybe make this optional)
ipyclient.wait() |
def _write_nex(self, mdict, nlocus):
"""
function that takes a dictionary mapping names to sequences,
and a locus number, and writes it as a NEXUS file with a mrbayes
analysis block given a set of mcmc arguments.
"""
## create matrix as a string
max_name_len = max([len(i) for i in mdict])
namestring = "{:<" + str(max_name_len+1) + "} {}\n"
matrix = ""
for i in mdict.items():
matrix += namestring.format(i[0], i[1])
## ensure dir
minidir = os.path.realpath(os.path.join(self.workdir, self.name))
if not os.path.exists(minidir):
os.makedirs(minidir)
## write nexus block
handle = os.path.join(minidir, "{}.nex".format(nlocus))
with open(handle, 'w') as outnex:
outnex.write(NEXBLOCK.format(**{
"ntax": len(mdict),
"nchar": len(mdict.values()[0]),
"matrix": matrix,
"ngen": self.params.mb_mcmc_ngen,
"sfreq": self.params.mb_mcmc_sample_freq,
"burnin": self.params.mb_mcmc_burnin,
})) |
def run_mbsum(self, ipyclient, force=False, quiet=False):
"""
Sums two replicate mrbayes runs for each locus
"""
minidir = os.path.realpath(os.path.join(self.workdir, self.name))
trees1 = glob.glob(os.path.join(minidir, "*.run1.t"))
trees2 = glob.glob(os.path.join(minidir, "*.run2.t"))
## clear existing files
existing = glob.glob(os.path.join(self.workdir, self.name, "*.sumt"))
if any(existing):
if force:
for rfile in existing:
os.remove(rfile)
else:
path = os.path.join(self.workdir, self.name)
raise IPyradWarningExit(EXISTING_SUMT_FILES.format(path))
## load balancer
lbview = ipyclient.load_balanced_view()
## submit each to be processed
asyncs = []
for tidx in xrange(len(trees1)):
rep1 = trees1[tidx]
rep2 = trees2[tidx]
outname = os.path.join(minidir, str(tidx)+".sumt")
async = lbview.apply(_call_mbsum, *(rep1, rep2, outname))
asyncs.append(async)
## track progress
start = time.time()
printstr = "[mbsum] sum replicate runs | {} | "
while 1:
ready = [i.ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not quiet:
progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer="")
if len(ready) == sum(ready):
if not quiet:
print("")
break
else:
time.sleep(0.1)
## check success
for async in asyncs:
if not async.successful():
raise IPyradWarningExit(async.result()) |
def run_mrbayes(self, ipyclient, force=False, quiet=False):
"""
calls the mrbayes block in each nexus file.
"""
## get all the nexus files for this object
minidir = os.path.realpath(os.path.join(self.workdir, self.name))
nexus_files = glob.glob(os.path.join(minidir, "*.nex"))
## clear existing files
#existing = glob.glob(os.path.join(self.workdir, self.name, "*.nex"))
existing = glob.glob(os.path.join(minidir, "*.nex.*"))
if any(existing):
if force:
for rfile in existing:
os.remove(rfile)
else:
raise IPyradWarningExit(EXISTING_NEXdot_FILES.format(minidir))
## write new nexus files, or should users do that before this?
#self.write_nexus_files(force=True)
## load balancer
lbview = ipyclient.load_balanced_view()
## submit each to be processed
asyncs = []
for nex in nexus_files:
async = lbview.apply(_call_mb, nex)
asyncs.append(async)
## track progress
start = time.time()
printstr = "[mb] infer gene-tree posteriors | {} | "
while 1:
ready = [i.ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not quiet:
progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer="")
if len(ready) == sum(ready):
if not quiet:
print("")
break
else:
time.sleep(0.1)
## check success
for async in asyncs:
if not async.successful():
raise IPyradWarningExit(async.result()) |
def run_bucky(self, ipyclient, force=False, quiet=False, subname=False):
"""
Runs bucky for a given set of parameters and stores the result
to the ipa.bucky object. The results will be stored by default
with the name '{name}-{alpha}' unless a argument is passed for
'subname' to customize the output name.
Parameters:
-----------
subname (str):
A custom name prefix for the output files produced by the bucky
analysis and output into the {workdir}/{name} directory.
force (bool):
If True then existing result files with the same name prefix
will be overwritten.
quiet (bool):
If True the progress bars will be suppressed.
ipyclient (ipyparallel.Client)
An active ipyparallel client to distribute jobs to.
"""
## check for existing results files
minidir = os.path.realpath(os.path.join(self.workdir, self.name))
infiles = glob.glob(os.path.join(minidir, "*.sumt"))
outroot = os.path.realpath(os.path.join(self.workdir, self.name))
## build alpha list
if isinstance(self.params.bucky_alpha, list):
alphas = self.params.bucky_alpha
else:
alphas = [self.params.bucky_alpha]
## load balancer
lbview = ipyclient.load_balanced_view()
## submit each to be processed
asyncs = []
for alpha in alphas:
pathname = os.path.join(outroot, "CF-a"+str(alpha))
if (os.path.exists(pathname)) and (force!=True):
print("BUCKy results already exist for this object at alpha={}\n".format(alpha) +\
"use force=True to overwrite existing results")
else:
args = [
alpha,
self.params.bucky_nchains,
self.params.bucky_nreps,
self.params.bucky_niter,
pathname,
infiles]
async = lbview.apply(_call_bucky, *args)
asyncs.append(async)
## track progress
start = time.time()
printstr = "[bucky] infer CF posteriors | {} | "
while 1:
ready = [i.ready() for i in asyncs]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not quiet:
progressbar(len(ready), sum(ready), printstr.format(elapsed), spacer="")
if len(ready) == sum(ready):
if not quiet:
print("")
break
else:
time.sleep(0.1)
## check success
for async in asyncs:
if not async.successful():
raise IPyradWarningExit(async.result()) |
def _get_samples(self, samples):
"""
Internal function. Prelude for each step() to read in perhaps
non empty list of samples to process. Input is a list of sample names,
output is a list of sample objects."""
## if samples not entered use all samples
if not samples:
samples = self.samples.keys()
## Be nice and allow user to pass in only one sample as a string,
## rather than a one element list. When you make the string into a list
## you have to wrap it in square braces or else list makes a list of
## each character individually.
if isinstance(samples, str):
samples = list([samples])
## if sample keys, replace with sample obj
assert isinstance(samples, list), \
"to subselect samples enter as a list, e.g., [A, B]."
newsamples = [self.samples.get(key) for key in samples \
if self.samples.get(key)]
strnewsamples = [i.name for i in newsamples]
## are there any samples that did not make it into the dict?
badsamples = set(samples).difference(set(strnewsamples))
if badsamples:
outstring = ", ".join(badsamples)
raise IPyradError(\
"Unrecognized Sample name(s) not linked to {}: {}"\
.format(self.name, outstring))
## require Samples
assert newsamples, \
"No Samples passed in and none in assembly {}".format(self.name)
return newsamples |
def _name_from_file(fname, splitnames, fields):
""" internal func: get the sample name from any pyrad file """
## allowed extensions
file_extensions = [".gz", ".fastq", ".fq", ".fasta", ".clustS", ".consens"]
base, _ = os.path.splitext(os.path.basename(fname))
## remove read number from name
base = base.replace("_R1_.", ".")\
.replace("_R1_", "")\
.replace("_R1.", ".")
## remove extensions, retains '.' in file names.
while 1:
tmpb, tmpext = os.path.splitext(base)
if tmpext in file_extensions:
base = tmpb
else:
break
if fields:
namebits = base.split(splitnames)
base = []
for field in fields:
try:
base.append(namebits[field])
except IndexError:
pass
base = splitnames.join(base)
if not base:
raise IPyradError("""
Found invalid/empty filename in link_fastqs. Check splitnames argument.
""")
return base |
def _read_sample_names(fname):
""" Read in sample names from a plain text file. This is a convenience
function for branching so if you have tons of sample names you can
pass in a file rather than having to set all the names at the command
line.
"""
try:
with open(fname, 'r') as infile:
subsamples = [x.split()[0] for x in infile.readlines() if x.strip()]
except Exception as inst:
print("Failed to read input file with sample names.\n{}".format(inst))
raise inst
return subsamples |
def _expander(namepath):
""" expand ./ ~ and ../ designators in location names """
if "~" in namepath:
namepath = os.path.expanduser(namepath)
else:
namepath = os.path.abspath(namepath)
return namepath |
def merge(name, assemblies):
"""
Creates and returns a new Assembly object in which samples from two or more
Assembly objects with matching names are 'merged'. Merging does not affect
the actual files written on disk, but rather creates new Samples that are
linked to multiple data files, and with stats summed.
"""
## checks
assemblies = list(assemblies)
## create new Assembly as a branch (deepcopy)
merged = assemblies[0].branch(name)
## get all sample names from all Assemblies
allsamples = set(merged.samples.keys())
for iterass in assemblies[1:]:
allsamples.update(set(iterass.samples.keys()))
## Make sure we have the max of all values for max frag length
## from all merging assemblies.
merged._hackersonly["max_fragment_length"] =\
max([x._hackersonly["max_fragment_length"] for x in assemblies])
## warning message?
warning = 0
## iterate over assembly objects, skip first already copied
for iterass in assemblies[1:]:
## iterate over allsamples, add if not in merged
for sample in iterass.samples:
## iterate over stats, skip 'state'
if sample not in merged.samples:
merged.samples[sample] = copy.deepcopy(iterass.samples[sample])
## if barcodes data present then keep it
if iterass.barcodes.get(sample):
merged.barcodes[sample] = iterass.barcodes[sample]
else:
## merge stats and files of the sample
for stat in merged.stats.keys()[1:]:
merged.samples[sample].stats[stat] += \
iterass.samples[sample].stats[stat]
## merge file references into a list
for filetype in ['fastqs', 'edits']:
merged.samples[sample].files[filetype] += \
iterass.samples[sample].files[filetype]
if iterass.samples[sample].files["clusters"]:
warning += 1
## print warning if clusters or later was present in merged assembly
if warning:
print("""\
Warning: the merged Assemblies contained Samples that are identically named,
and so ipyrad has attempted to merge these Samples. This is perfectly fine to
do up until step 3, but not after, because at step 3 all reads for a Sample
should be included during clustering/mapping. Take note, you can merge Assemblies
at any step *if they do not contain the same Samples*, however, here that is not
the case. If you wish to proceed with this merged Assembly you will have to
start from step 3, therefore the 'state' of the Samples in this new merged
Assembly ({}) have been set to 2.
""".format(name))
for sample in merged.samples:
merged.samples[sample].stats.state = 2
## clear stats
for stat in ["refseq_mapped_reads", "refseq_unmapped_reads",
"clusters_total", "clusters_hidepth", "hetero_est",
"error_est", "reads_consens"]:
merged.samples[sample].stats[stat] = 0
## clear files
for ftype in ["mapped_reads", "unmapped_reads", "clusters",
"consens", "database"]:
merged.samples[sample].files[ftype] = []
## Set the values for some params that don't make sense inside
## merged assemblies
merged_names = ", ".join([x.name for x in assemblies])
merged.paramsdict["raw_fastq_path"] = "Merged: " + merged_names
merged.paramsdict["barcodes_path"] = "Merged: " + merged_names
merged.paramsdict["sorted_fastq_path"] = "Merged: " + merged_names
## return the new Assembly object
merged.save()
return merged |
def _bufcountlines(filename, gzipped):
"""
fast line counter. Used to quickly sum number of input reads when running
link_fastqs to append files. """
if gzipped:
fin = gzip.open(filename)
else:
fin = open(filename)
nlines = 0
buf_size = 1024 * 1024
read_f = fin.read # loop optimization
buf = read_f(buf_size)
while buf:
nlines += buf.count('\n')
buf = read_f(buf_size)
fin.close()
return nlines |
def _zbufcountlines(filename, gzipped):
""" faster line counter """
if gzipped:
cmd1 = ["gunzip", "-c", filename]
else:
cmd1 = ["cat", filename]
cmd2 = ["wc"]
proc1 = sps.Popen(cmd1, stdout=sps.PIPE, stderr=sps.PIPE)
proc2 = sps.Popen(cmd2, stdin=proc1.stdout, stdout=sps.PIPE, stderr=sps.PIPE)
res = proc2.communicate()[0]
if proc2.returncode:
raise IPyradWarningExit("error zbufcountlines {}:".format(res))
LOGGER.info(res)
nlines = int(res.split()[0])
return nlines |
def _tuplecheck(newvalue, dtype=str):
"""
Takes a string argument and returns value as a tuple.
Needed for paramfile conversion from CLI to set_params args
"""
if isinstance(newvalue, list):
newvalue = tuple(newvalue)
if isinstance(newvalue, str):
newvalue = newvalue.rstrip(")").strip("(")
try:
newvalue = tuple([dtype(i.strip()) for i in newvalue.split(",")])
## Type error is thrown by tuple if it's applied to a non-iterable.
except TypeError:
newvalue = tuple(dtype(newvalue))
## If dtype fails to cast any element of newvalue
except ValueError:
LOGGER.info("Assembly.tuplecheck() failed to cast to {} - {}"\
.format(dtype, newvalue))
raise
except Exception as inst:
LOGGER.info(inst)
raise SystemExit(\
"\nError: Param`{}` is not formatted correctly.\n({})\n"\
.format(newvalue, inst))
return newvalue |
def _paramschecker(self, param, newvalue):
""" Raises exceptions when params are set to values they should not be"""
if param == 'assembly_name':
## Make sure somebody doesn't try to change their assembly_name, bad
## things would happen. Calling set_params on assembly_name only raises
## an informative error. Assembly_name is set at Assembly creation time
## and is immutable.
raise IPyradWarningExit(CANNOT_CHANGE_ASSEMBLY_NAME)
elif param == 'project_dir':
expandpath = _expander(newvalue)
if not expandpath.startswith("/"):
if os.path.exists(expandpath):
expandpath = _expander(expandpath)
## Forbid spaces in path names
if " " in expandpath:
raise IPyradWarningExit(BAD_PROJDIR_NAME.format(expandpath))
self.paramsdict["project_dir"] = expandpath
self.dirs["project"] = expandpath
## `Merged:` in newvalue for raw_fastq_path indicates that this
## assembly is a merge of several others, so this param has no
## value for this assembly
elif param == 'raw_fastq_path':
if newvalue and not "Merged:" in newvalue:
fullrawpath = _expander(newvalue)
if os.path.isdir(fullrawpath):
raise IPyradWarningExit(RAW_PATH_ISDIR.format(fullrawpath))
## if something is found in path
elif glob.glob(fullrawpath):
self.paramsdict['raw_fastq_path'] = fullrawpath
## else allow empty, tho it can still raise an error in step1
else:
raise IPyradWarningExit(NO_RAW_FILE.format(fullrawpath))
else:
self.paramsdict['raw_fastq_path'] = ""
## `Merged:` in newvalue for barcodes_path indicates that this
## assembly is a merge of several others, so this param has no
## value for this assembly
elif param == 'barcodes_path':
## if a value was entered check that it exists
if newvalue and not "Merged:" in newvalue:
## also allow for fuzzy match in names using glob
fullbarpath = glob.glob(_expander(newvalue))[0]
## raise error if file is not found
if not os.path.exists(fullbarpath):
raise IPyradWarningExit(BARCODE_NOT_FOUND.format(fullbarpath))
else:
self.paramsdict['barcodes_path'] = fullbarpath
self._link_barcodes()
## if no path was entered then set barcodes path to empty.
## this is checked again during step 1 and will raise an error
## if you try demultiplexing without a barcodes file
else:
self.paramsdict['barcodes_path'] = newvalue
## `Merged:` in newvalue for sorted_fastq_path indicates that this
## assembly is a merge of several others, so this param has no
## value for this assembly
elif param == 'sorted_fastq_path':
if newvalue and not "Merged:" in newvalue:
fullsortedpath = _expander(newvalue)
if os.path.isdir(fullsortedpath):
raise IPyradWarningExit(SORTED_ISDIR.format(fullsortedpath))
elif glob.glob(fullsortedpath):
self.paramsdict['sorted_fastq_path'] = fullsortedpath
else:
raise IPyradWarningExit(SORTED_NOT_FOUND.format(fullsortedpath))
## if no value was entered then set to "".
else:
self.paramsdict['sorted_fastq_path'] = ""
elif param == 'assembly_method':
## TEMPORARY BLOCK ON DENOVO+REFERENCE METHOD
# if newvalue == "denovo+reference":
# raise IPyradWarningExit("""
# Error: The 'denovo+reference' method is temporarily blocked while we
# refactor it to greatly improve the speed. You can either revert to an
# older version (pre v.0.7.0) or wait for the next update to resume using
# this method.
# """)
methods = ["denovo", "reference", "denovo+reference", "denovo-reference"]
assert newvalue in methods, BAD_ASSEMBLY_METHOD.format(newvalue)
self.paramsdict['assembly_method'] = newvalue
elif param == 'reference_sequence':
if newvalue:
fullrawpath = _expander(newvalue)
if not os.path.isfile(fullrawpath):
LOGGER.info("reference sequence file not found.")
raise IPyradWarningExit(REF_NOT_FOUND.format(fullrawpath))
self.paramsdict['reference_sequence'] = fullrawpath
## if no value was entered the set to "". Will be checked again
## at step3 if user tries to do refseq and raise error
else:
self.paramsdict['reference_sequence'] = ""
elif param == 'datatype':
## list of allowed datatypes
datatypes = ['rad', 'gbs', 'ddrad', 'pairddrad',
'pairgbs', 'merged', '2brad', 'pair3rad']
## raise error if something else
if str(newvalue) not in datatypes:
raise IPyradError("""
datatype {} not recognized, must be one of: {}
""".format(newvalue, datatypes))
else:
self.paramsdict['datatype'] = str(newvalue)
## link_barcodes is called before datatypes is set
## we need to know the datatype so we can read in
## the multiplexed barcodes for 3rad. This seems
## a little annoying, but it was better than any
## alternatives I could think of.
if "3rad" in self.paramsdict['datatype'] and not \
self.paramsdict['sorted_fastq_path'].strip():
if not "Merged:" in self.paramsdict['barcodes_path']:
self._link_barcodes()
elif param == 'restriction_overhang':
newvalue = _tuplecheck(newvalue, str)
assert isinstance(newvalue, tuple), """
cut site must be a tuple, e.g., (TGCAG, '') or (TGCAG, CCGG)"""
## Handle the special case where the user has 1
## restriction overhang and does not include the trailing comma
if len(newvalue) == 1:
## for gbs users might not know to enter the second cut site
## so we do it for them.
if self.paramsdict["datatype"] == "gbs":
newvalue += newvalue
else:
newvalue += ("",)
#=======
# newvalue = (newvalue[0], "")
#>>>>>>> d40a5d5086a0d0aace04dd08338ec4ba5341d1f2
## Handle 3rad datatype with only 3 cutters
if len(newvalue) == 3:
newvalue = (newvalue[0], newvalue[1], newvalue[2], "")
assert len(newvalue) <= 4, """
most datasets require 1 or 2 cut sites, e.g., (TGCAG, '') or (TGCAG, CCGG).
For 3rad/seqcap may be up to 4 cut sites."""
self.paramsdict['restriction_overhang'] = newvalue
elif param == 'max_low_qual_bases':
assert isinstance(int(newvalue), int), """
max_low_qual_bases must be an integer."""
self.paramsdict['max_low_qual_bases'] = int(newvalue)
elif param == 'phred_Qscore_offset':
assert isinstance(int(newvalue), int), \
"phred_Qscore_offset must be an integer."
self.paramsdict['phred_Qscore_offset'] = int(newvalue)
elif param == 'mindepth_statistical':
assert isinstance(int(newvalue), int), \
"mindepth_statistical must be an integer."
## do not allow values below 5
if int(newvalue) < 5:
raise IPyradError("""
mindepth statistical cannot be set < 5. Use mindepth_majrule.""")
else:
self.paramsdict['mindepth_statistical'] = int(newvalue)
elif param == 'mindepth_majrule':
assert isinstance(int(newvalue), int), \
"mindepth_majrule must be an integer."
self.paramsdict['mindepth_majrule'] = int(newvalue)
elif param == 'maxdepth':
self.paramsdict['maxdepth'] = int(newvalue)
elif param == 'clust_threshold':
newvalue = float(newvalue)
assert (newvalue < 1) & (newvalue > 0), \
"clust_threshold must be a decimal value between 0 and 1."
self.paramsdict['clust_threshold'] = newvalue
elif param == 'max_barcode_mismatch':
self.paramsdict['max_barcode_mismatch'] = int(newvalue)
elif param == 'filter_adapters':
self.paramsdict['filter_adapters'] = int(newvalue)
elif param == 'filter_min_trim_len':
self.paramsdict["filter_min_trim_len"] = int(newvalue)
elif param == 'max_alleles_consens':
self.paramsdict['max_alleles_consens'] = int(newvalue)
elif param == 'max_Ns_consens':
newvalue = _tuplecheck(newvalue, int)
assert isinstance(newvalue, tuple), \
"max_Ns_consens should be a tuple e.g., (8, 8)"
self.paramsdict['max_Ns_consens'] = newvalue
elif param == 'max_Hs_consens':
newvalue = _tuplecheck(newvalue, int)
assert isinstance(newvalue, tuple), \
"max_Hs_consens should be a tuple e.g., (5, 5)"
self.paramsdict['max_Hs_consens'] = newvalue
elif param == 'min_samples_locus':
self.paramsdict['min_samples_locus'] = int(newvalue)
elif param == 'max_shared_Hs_locus':
if isinstance(newvalue, str):
if newvalue.isdigit():
newvalue = int(newvalue)
else:
try:
newvalue = float(newvalue)
except Exception as inst:
raise IPyradParamsError("""
max_shared_Hs_locus must be int or float, you put: {}""".format(newvalue))
self.paramsdict['max_shared_Hs_locus'] = newvalue
elif param == 'max_SNPs_locus':
newvalue = _tuplecheck(newvalue, int)
assert isinstance(newvalue, tuple), \
"max_SNPs_locus should be a tuple e.g., (20, 20)"
self.paramsdict['max_SNPs_locus'] = newvalue
elif param == 'max_Indels_locus':
newvalue = _tuplecheck(newvalue, int)
assert isinstance(newvalue, tuple), \
"max_Indels_locus should be a tuple e.g., (5, 100)"
self.paramsdict['max_Indels_locus'] = newvalue
## deprecated but retained for legacy, now uses trim_reads (below)
elif param == 'edit_cutsites':
## Force into a string tuple
newvalue = _tuplecheck(newvalue)
## try converting each tup element to ints
newvalue = list(newvalue)
for i in range(2):
try:
newvalue[i] = int(newvalue[i])
except (ValueError, IndexError):
newvalue.append(0)
pass
newvalue = tuple(newvalue)
## make sure we have a nice tuple
if not isinstance(newvalue, tuple):
raise IPyradWarningExit("""
Error: edit_cutsites should be a tuple e.g., (0, 5) or ('TGCAG', 6),
you entered {}
""".format(newvalue))
self.paramsdict['edit_cutsites'] = newvalue
elif param == 'trim_reads':
## Force into a string tuple
newvalue = _tuplecheck(newvalue)
## try converting each tup element to ints
newvalue = list(newvalue)
for i in range(4):
try:
newvalue[i] = int(newvalue[i])
except (ValueError, IndexError):
newvalue.append(0)
pass
newvalue = tuple(newvalue)
## make sure we have a nice tuple
if not isinstance(newvalue, tuple):
raise IPyradWarningExit("""
Error: trim_reads should be a tuple e.g., (0, -5, -5, 0)
or (0, 90, 0, 90), or (0, 0, 0, 0).
You entered {}\n""".format(newvalue))
self.paramsdict['trim_reads'] = newvalue
## deprecated but retained for legacy, now named trim_loci
elif param == 'trim_overhang':
newvalue = _tuplecheck(newvalue, str)
assert isinstance(newvalue, tuple), \
"trim_overhang should be a tuple e.g., (4, *, *, 4)"
self.paramsdict['trim_overhang'] = tuple([int(i) for i in newvalue])
elif param == 'trim_loci':
newvalue = _tuplecheck(newvalue, str)
assert isinstance(newvalue, tuple), \
"trim_overhang should be a tuple e.g., (0, -5, -5, 0)"
self.paramsdict['trim_loci'] = tuple([int(i) for i in newvalue])
elif param == 'output_formats':
## let's get whatever the user entered as a tuple of letters
allowed = assemble.write_outfiles.OUTPUT_FORMATS.keys()
#<<<<<<< HEAD
## Handle the case where output formats is an empty string
if isinstance(newvalue, str):
## strip commas and spaces from string so we have only letters
newvalue = newvalue.replace(",", "").replace(" ", "")
newvalue = list(newvalue)
if not newvalue:
newvalue = ["*"]
if isinstance(newvalue, tuple):
newvalue = list(newvalue)
#=======
#if isinstance(newvalue, tuple):
# newvalue = list(newvalue)
#if isinstance(newvalue, str):
# newvalue = [i.strip() for i in newvalue.split(",")]
# ## Handle the case where output formats is empty
# if not any(newvalue):
# newvalue = "*"
#>>>>>>> 488144d1d97240b8b6f6caf9cfb6c023bb6ebb36
if isinstance(newvalue, list):
## if more than letters, raise an warning
if any([len(i) > 1 for i in newvalue]):
LOGGER.warning("""
'output_formats' params entry is malformed. Setting to * to avoid errors.""")
newvalue = allowed
newvalue = tuple(newvalue)
#newvalue = tuple([i for i in newvalue if i in allowed])
if "*" in newvalue:
newvalue = allowed
## set the param
self.paramsdict['output_formats'] = newvalue
elif param == 'pop_assign_file':
fullpoppath = _expander(newvalue)
## if a path is entered, raise exception if not found
if newvalue:
if not os.path.isfile(fullpoppath):
LOGGER.warn("Population assignment file not found.")
raise IPyradWarningExit("""
Warning: Population assignment file not found. This must be an
absolute path (/home/wat/ipyrad/data/my_popfile.txt) or relative to
the directory where you're running ipyrad (./data/my_popfile.txt)
You entered: {}\n""".format(fullpoppath))
## should we add a check here that all pop samples are in samples?
self.paramsdict['pop_assign_file'] = fullpoppath
self._link_populations()
else:
self.paramsdict['pop_assign_file'] = ""
## Don't forget to possibly blank the populations dictionary
self.populations = {}
return self |
def stats(self):
""" Returns a data frame with Sample data and state. """
nameordered = self.samples.keys()
nameordered.sort()
## Set pandas to display all samples instead of truncating
pd.options.display.max_rows = len(self.samples)
statdat = pd.DataFrame([self.samples[i].stats for i in nameordered],
index=nameordered).dropna(axis=1, how='all')
# ensure non h,e columns print as ints
for column in statdat:
if column not in ["hetero_est", "error_est"]:
statdat[column] = np.nan_to_num(statdat[column]).astype(int)
return statdat |
def files(self):
""" Returns a data frame with Sample files. Not very readable... """
nameordered = self.samples.keys()
nameordered.sort()
## replace curdir with . for shorter printing
#fullcurdir = os.path.realpath(os.path.curdir)
return pd.DataFrame([self.samples[i].files for i in nameordered],
index=nameordered).dropna(axis=1, how='all') |
def _build_stat(self, idx):
""" Returns a data frame with Sample stats for each step """
nameordered = self.samples.keys()
nameordered.sort()
newdat = pd.DataFrame([self.samples[i].stats_dfs[idx] \
for i in nameordered], index=nameordered)\
.dropna(axis=1, how='all')
return newdat |
def _link_fastqs(self, path=None, force=False, append=False, splitnames="_",
fields=None, ipyclient=None):
"""
Create Sample objects from demultiplexed fastq files in sorted_fastq_path,
or append additional fastq files to existing Samples. This provides
more flexible file input through the API than available in step1 of the
command line interface. If passed ipyclient it will run in parallel.
Note
----
This function is called during step 1 if files are specified in
'sorted_fastq_path'.
Parameters
----------
path : str
Path to the fastq files to be linked to Sample objects. The default
location is to select all files in the 'sorted_fastq_path'.
Alternatively a different path can be entered here.
append : bool
The default action is to overwrite fastq files linked to Samples if
they already have linked files. Use append=True to instead append
additional fastq files to a Sample (file names should be formatted
the same as usual, e.g., [name]_R1_[optional].fastq.gz).
splitnames : str
A string character used to file names. In combination with the
fields argument can be used to subselect filename fields names.
fields : list
A list of indices for the fields to be included in names after
filnames are split on the splitnames character. Useful for appending
sequence names which must match existing names. If the largest index
is greater than the number of split strings in the name the index
if ignored. e.g., [2,3,4] ## excludes 0, 1 and >4
force : bool
Overwrites existing Sample data and statistics.
Returns
-------
str
Prints the number of new Sample objects created and the number of
fastq files linked to Sample objects in the Assembly object.
"""
## cannot both force and append at once
if force and append:
raise IPyradError("Cannot use force and append at the same time.")
if self.samples and not (force or append):
raise IPyradError("Files already linked to `{}`.".format(self.name)\
+" Use force=True to replace all files, or append=True to add"
+" additional files to existing Samples.")
## make sure there is a workdir and workdir/fastqdir
self.dirs.fastqs = os.path.join(self.paramsdict["project_dir"],
self.name+"_fastqs")
if not os.path.exists(self.paramsdict["project_dir"]):
os.mkdir(self.paramsdict["project_dir"])
## get path to data files
if not path:
path = self.paramsdict["sorted_fastq_path"]
## but grab fastq/fq/gz, and then sort
fastqs = glob.glob(path)
## Assert files are not .bz2 format
if any([i for i in fastqs if i.endswith(".bz2")]):
raise IPyradError(NO_SUPPORT_FOR_BZ2.format(path))
fastqs = [i for i in fastqs if i.endswith(".gz") \
or i.endswith(".fastq") \
or i.endswith(".fq")]
fastqs.sort()
LOGGER.debug("Linking these fastq files:\n{}".format(fastqs))
## raise error if no files are found
if not fastqs:
raise IPyradError(NO_FILES_FOUND_PAIRS\
.format(self.paramsdict["sorted_fastq_path"]))
## link pairs into tuples
if 'pair' in self.paramsdict["datatype"]:
## check that names fit the paired naming convention
## trying to support flexible types (_R2_, _2.fastq)
r1_try1 = [i for i in fastqs if "_R1_" in i]
r1_try2 = [i for i in fastqs if i.endswith("_1.fastq.gz")]
r1_try3 = [i for i in fastqs if i.endswith("_R1.fastq.gz")]
r2_try1 = [i for i in fastqs if "_R2_" in i]
r2_try2 = [i for i in fastqs if i.endswith("_2.fastq.gz")]
r2_try3 = [i for i in fastqs if i.endswith("_R2.fastq.gz")]
r1s = [r1_try1, r1_try2, r1_try3]
r2s = [r2_try1, r2_try2, r2_try3]
## check that something was found
if not r1_try1 + r1_try2 + r1_try3:
raise IPyradWarningExit(
"Paired filenames are improperly formatted. See Documentation")
if not r2_try1 + r2_try2 + r2_try3:
raise IPyradWarningExit(
"Paired filenames are improperly formatted. See Documentation")
## find the one with the right number of R1s
for idx, tri in enumerate(r1s):
if len(tri) == len(fastqs)/2:
break
r1_files = r1s[idx]
r2_files = r2s[idx]
if len(r1_files) != len(r2_files):
raise IPyradWarningExit(R1_R2_name_error\
.format(len(r1_files), len(r2_files)))
fastqs = [(i, j) for i, j in zip(r1_files, r2_files)]
## data are not paired, create empty tuple pair
else:
## print warning if _R2_ is in names when not paired
idx = 0
if any(["_R2_" in i for i in fastqs]):
print(NAMES_LOOK_PAIRED_WARNING)
fastqs = [(i, "") for i in fastqs]
## counters for the printed output
linked = 0
appended = 0
## clear samples if force
if force:
self.samples = {}
## track parallel jobs
linkjobs = {}
if ipyclient:
lbview = ipyclient.load_balanced_view()
## iterate over input files
for fastqtuple in list(fastqs):
assert isinstance(fastqtuple, tuple), "fastqs not a tuple."
## local counters
createdinc = 0
linkedinc = 0
appendinc = 0
## remove file extension from name
if idx == 0:
sname = _name_from_file(fastqtuple[0], splitnames, fields)
elif idx == 1:
sname = os.path.basename(fastqtuple[0].rsplit("_1.fastq.gz", 1)[0])
elif idx == 2:
sname = os.path.basename(fastqtuple[0].rsplit("_R1.fastq.gz", 1)[0])
LOGGER.debug("New Sample name {}".format(sname))
if sname not in self.samples:
## create new Sample
LOGGER.debug("Creating new sample - ".format(sname))
self.samples[sname] = Sample(sname)
self.samples[sname].stats.state = 1
self.samples[sname].barcode = None
self.samples[sname].files.fastqs.append(fastqtuple)
createdinc += 1
linkedinc += 1
else:
## if not forcing, shouldn't be here with existing Samples
if append:
#if fastqtuple not in self.samples[sname].files.fastqs:
self.samples[sname].files.fastqs.append(fastqtuple)
appendinc += 1
elif force:
## overwrite/create new Sample
LOGGER.debug("Overwriting sample - ".format(sname))
self.samples[sname] = Sample(sname)
self.samples[sname].stats.state = 1
self.samples[sname].barcode = None
self.samples[sname].files.fastqs.append(fastqtuple)
createdinc += 1
linkedinc += 1
else:
print("""
The files {} are already in Sample. Use append=True to append additional
files to a Sample or force=True to replace all existing Samples.
""".format(sname))
## support serial execution w/o ipyclient
if not ipyclient:
if any([linkedinc, createdinc, appendinc]):
gzipped = bool(fastqtuple[0].endswith(".gz"))
nreads = 0
for alltuples in self.samples[sname].files.fastqs:
nreads += _zbufcountlines(alltuples[0], gzipped)
self.samples[sname].stats.reads_raw = nreads/4
self.samples[sname].stats_dfs.s1["reads_raw"] = nreads/4
self.samples[sname].state = 1
LOGGER.debug("Got reads for sample - {} {}".format(sname,\
self.samples[sname].stats.reads_raw))
#created += createdinc
linked += linkedinc
appended += appendinc
## do counting in parallel
else:
if any([linkedinc, createdinc, appendinc]):
gzipped = bool(fastqtuple[0].endswith(".gz"))
for sidx, tup in enumerate(self.samples[sname].files.fastqs):
key = sname+"_{}".format(sidx)
linkjobs[key] = lbview.apply(_bufcountlines,
*(tup[0], gzipped))
LOGGER.debug("sent count job for {}".format(sname))
#created += createdinc
linked += linkedinc
appended += appendinc
## wait for link jobs to finish if parallel
if ipyclient:
start = time.time()
printstr = ' loading reads | {} | s1 |'
while 1:
fin = [i.ready() for i in linkjobs.values()]
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(len(fin), sum(fin),
printstr.format(elapsed), spacer=self._spacer)
time.sleep(0.1)
if len(fin) == sum(fin):
print("")
break
## collect link job results
sampdict = {i:0 for i in self.samples}
for result in linkjobs:
sname = result.rsplit("_", 1)[0]
nreads = linkjobs[result].result()
sampdict[sname] += nreads
for sname in sampdict:
self.samples[sname].stats.reads_raw = sampdict[sname]/4
self.samples[sname].stats_dfs.s1["reads_raw"] = sampdict[sname]/4
self.samples[sname].state = 1
## print if data were linked
#print(" {} new Samples created in '{}'.".format(created, self.name))
if linked:
## double for paired data
if 'pair' in self.paramsdict["datatype"]:
linked = linked*2
if self._headers:
print("{}{} fastq files loaded to {} Samples.".\
format(self._spacer, linked, len(self.samples)))
## save the location where these files are located
self.dirs.fastqs = os.path.realpath(os.path.dirname(path))
if appended:
if self._headers:
print("{}{} fastq files appended to {} existing Samples.".\
format(self._spacer, appended, len(self.samples)))
## save step-1 stats. We don't want to write this to the fastq dir, b/c
## it is not necessarily inside our project dir. Instead, we'll write
## this file into our project dir in the case of linked_fastqs.
self.stats_dfs.s1 = self._build_stat("s1")
self.stats_files.s1 = os.path.join(self.paramsdict["project_dir"],
self.name+
'_s1_demultiplex_stats.txt')
with open(self.stats_files.s1, 'w') as outfile:
self.stats_dfs.s1.fillna(value=0).astype(np.int).to_string(outfile) |
def _link_barcodes(self):
"""
Private function. Links Sample barcodes in a dictionary as
[Assembly].barcodes, with barcodes parsed from the 'barcodes_path'
parameter. This function is called during set_params() when setting
the barcodes_path.
"""
## parse barcodefile
try:
## allows fuzzy match to barcodefile name
barcodefile = glob.glob(self.paramsdict["barcodes_path"])[0]
## read in the file
bdf = pd.read_csv(barcodefile, header=None, delim_whitespace=1, dtype=str)
bdf = bdf.dropna()
## make sure bars are upper case
bdf[1] = bdf[1].str.upper()
## if replicates are present then print a warning
reps = bdf[0].unique().shape[0] != bdf[0].shape[0]
if reps:
print("{spacer}Warning: technical replicates (same name) will be combined."\
.format(**{'spacer': self._spacer}))
## add -technical-replicate-N to replicate names
reps = [i for i in bdf[0] if list(bdf[0]).count(i) > 1]
ureps = list(set(reps))
for name in ureps:
idxs = bdf[bdf[0] == ureps[0]].index.tolist()
for num, idx in enumerate(idxs):
bdf.ix[idx][0] = bdf.ix[idx][0] + "-technical-replicate-" + str(num+1)
## make sure chars are all proper
if not all(bdf[1].apply(set("RKSYWMCATG").issuperset)):
LOGGER.warn(BAD_BARCODE)
raise IPyradError(BAD_BARCODE)
## 3rad/seqcap use multiplexed barcodes
## We'll concatenate them with a plus and split them later
if "3rad" in self.paramsdict["datatype"]:
try:
bdf[2] = bdf[2].str.upper()
self.barcodes = dict(zip(bdf[0], bdf[1] + "+" + bdf[2]))
except KeyError as inst:
msg = " 3rad assumes multiplexed barcodes. Doublecheck your barcodes file."
LOGGER.error(msg)
raise IPyradError(msg)
else:
## set attribute on Assembly object
self.barcodes = dict(zip(bdf[0], bdf[1]))
except (IOError, IndexError):
raise IPyradWarningExit(\
" Barcodes file not found. You entered: {}"\
.format(self.paramsdict["barcodes_path"]))
except ValueError as inst:
msg = " Barcodes file format error."
LOGGER.warn(msg)
raise IPyradError(inst) |
def _link_populations(self, popdict=None, popmins=None):
"""
Creates self.populations dictionary to save mappings of individuals to
populations/sites, and checks that individual names match with Samples.
The self.populations dict keys are pop names and the values are lists
of length 2. The first element is the min number of samples per pop
for final filtering of loci, and the second element is the list of
samples per pop.
Population assigments are used for heirarchical clustering, for
generating summary stats, and for outputing some file types (.treemix
for example). Internally stored as a dictionary.
Note
----
By default a File is read in from `pop_assign_file` with one individual
per line and space separated pairs of ind pop:
ind1 pop1
ind2 pop2
ind3 pop3
etc...
Parameters
----------
TODO: NB: Using API and passing in popdict and popmins is currently
unimplemented, or at least looks like it doesn't work. Leaving
these docs cuz Deren might have ideas about it being useful.
popdict : dict
When using the API it may be easier to simply create a dictionary
to pass in as an argument instead of reading from an input file.
This can be done with the `popdict` argument like below:
pops = {'pop1': ['ind1', 'ind2', 'ind3'], 'pop2': ['ind4', 'ind5']}
[Assembly]._link_populations(popdict=pops).
popmins : dict
If you want to apply a minsamples filter based on populations
you can add a popmins dictionary. This indicates the number of
samples in each population that must be present in a locus for
the locus to be retained. Example:
popmins = {'pop1': 3, 'pop2': 2}
"""
if not popdict:
## glob it in case of fuzzy matching
popfile = glob.glob(self.paramsdict["pop_assign_file"])[0]
if not os.path.exists(popfile):
raise IPyradError("Population assignment file not found: {}"\
.format(self.paramsdict["pop_assign_file"]))
try:
## parse populations file
popdat = pd.read_csv(popfile, header=None,
delim_whitespace=1,
names=["inds", "pops"],
comment="#")
popdict = {key: group.inds.values.tolist() for key, group in \
popdat.groupby("pops")}
## parse minsamples per population if present (line with #)
mindat = [i.lstrip("#").lstrip().rstrip() for i in \
open(popfile, 'r').readlines() if i.startswith("#")]
if mindat:
popmins = {}
for i in range(len(mindat)):
minlist = mindat[i].replace(",", "").split()
popmins.update({i.split(':')[0]:int(i.split(':')[1]) \
for i in minlist})
else:
raise IPyradError(MIN_SAMPLES_PER_POP_MALFORMED)
except (ValueError, IOError):
LOGGER.warn("Populations file may be malformed.")
raise IPyradError(MIN_SAMPLES_PER_POP_MALFORMED)
else:
## pop dict is provided by user
pass
## check popdict. Filter for bad samples
## Warn user but don't bail out, could be setting the pops file
## on a new assembly w/o any linked samples.
badsamples = [i for i in itertools.chain(*popdict.values()) \
if i not in self.samples.keys()]
if any(badsamples):
LOGGER.warn("Some names from population input do not match Sample "\
+ "names: ".format(", ".join(badsamples)))
LOGGER.warn("If this is a new assembly this is normal.")
## If popmins not set, just assume all mins are zero
if not popmins:
popmins = {i: 0 for i in popdict.keys()}
## check popmins
## cannot have higher min for a pop than there are samples in the pop
popmax = {i: len(popdict[i]) for i in popdict}
if not all([popmax[i] >= popmins[i] for i in popdict]):
raise IPyradWarningExit(\
" minsample per pop value cannot be greater than the "+
" number of samples in the pop. Modify the populations file.")
## return dict
self.populations = {i: (popmins[i], popdict[i]) for i in popdict} |
def get_params(self, param=""):
""" pretty prints params if called as a function """
fullcurdir = os.path.realpath(os.path.curdir)
if not param:
for index, (key, value) in enumerate(self.paramsdict.items()):
if isinstance(value, str):
value = value.replace(fullcurdir+"/", "./")
sys.stdout.write("{}{:<4}{:<28}{:<45}\n"\
.format(self._spacer, index, key, value))
else:
try:
if int(param):
#sys.stdout.write(self.paramsdict.values()[int(param)-1])
return self.paramsdict.values()[int(param)]
except (ValueError, TypeError, NameError, IndexError):
try:
return self.paramsdict[param]
except KeyError:
return 'key not recognized' |
def set_params(self, param, newvalue):
"""
Set a parameter to a new value. Raises error if newvalue is wrong type.
Note
----
Use [Assembly].get_params() to see the parameter values currently
linked to the Assembly object.
Parameters
----------
param : int or str
The index (e.g., 1) or string name (e.g., "project_dir")
for the parameter that will be changed.
newvalue : int, str, or tuple
The new value for the parameter selected for `param`. Use
`ipyrad.get_params_info()` to get further information about
a given parameter. If the wrong type is entered for newvalue
(e.g., a str when it should be an int), an error will be raised.
Further information about each parameter is also available
in the documentation.
Examples
--------
## param 'project_dir' takes only a str as input
[Assembly].set_params('project_dir', 'new_directory')
## param 'restriction_overhang' must be a tuple or str, if str it is
## converted to a tuple with the second entry empty.
[Assembly].set_params('restriction_overhang', ('CTGCAG', 'CCGG')
## param 'max_shared_Hs_locus' can be an int or a float:
[Assembly].set_params('max_shared_Hs_locus', 0.25)
"""
## this includes current params and some legacy params for conversion
legacy_params = ["edit_cutsites", "trim_overhang"]
current_params = self.paramsdict.keys()
allowed_params = current_params + legacy_params
## require parameter recognition
#if not ((param in range(50)) or \
# (param in [str(i) for i in range(50)]) or \
# (param in allowed_params)):
if not param in allowed_params:
raise IPyradParamsError("Parameter key not recognized: {}"\
.format(param))
## make string
param = str(param)
## get index if param is keyword arg (this index is now zero based!)
if len(param) < 3:
param = self.paramsdict.keys()[int(param)]
## run assertions on new param
try:
self = _paramschecker(self, param, newvalue)
except Exception as inst:
raise IPyradWarningExit(BAD_PARAMETER\
.format(param, inst, newvalue)) |
def write_params(self, outfile=None, force=False):
""" Write out the parameters of this assembly to a file properly
formatted as input for `ipyrad -p <params.txt>`. A good and
simple way to share/archive parameter settings for assemblies.
This is also the function that's used by __main__ to
generate default params.txt files for `ipyrad -n`
"""
if outfile is None:
outfile = "params-"+self.name+".txt"
## Test if params file already exists?
## If not forcing, test for file and bail out if it exists
if not force:
if os.path.isfile(outfile):
raise IPyradWarningExit(PARAMS_EXISTS.format(outfile))
with open(outfile, 'w') as paramsfile:
## Write the header. Format to 80 columns
header = "------- ipyrad params file (v.{})".format(ip.__version__)
header += ("-"*(80-len(header)))
paramsfile.write(header)
## Whip through the current paramsdict and write out the current
## param value, the ordered dict index number. Also,
## get the short description from paramsinfo. Make it look pretty,
## pad nicely if at all possible.
for key, val in self.paramsdict.iteritems():
## If multiple elements, write them out comma separated
if isinstance(val, list) or isinstance(val, tuple):
paramvalue = ", ".join([str(i) for i in val])
else:
paramvalue = str(val)
## skip deprecated params
if key in ["edit_cutsites", "trim_overhang"]:
continue
padding = (" "*(30-len(paramvalue)))
paramkey = self.paramsdict.keys().index(key)
paramindex = " ## [{}] ".format(paramkey)
LOGGER.debug(key, val, paramindex)
name = "[{}]: ".format(paramname(paramkey))
description = paraminfo(paramkey, short=True)
paramsfile.write("\n" + paramvalue + padding + \
paramindex + name + description) |
def branch(self, newname, subsamples=None, infile=None):
"""
Returns a copy of the Assembly object. Does not allow Assembly
object names to be replicated in namespace or path.
"""
## subsample by removal or keeping.
remove = 0
## is there a better way to ask if it already exists?
if (newname == self.name or os.path.exists(
os.path.join(self.paramsdict["project_dir"],
newname+".assembly"))):
print("{}Assembly object named {} already exists"\
.format(self._spacer, newname))
else:
## Make sure the new name doesn't have any wacky characters
self._check_name(newname)
## Bozo-check. Carve off 'params-' if it's in the new name.
if newname.startswith("params-"):
newname = newname.split("params-")[1]
## create a copy of the Assembly obj
newobj = copy.deepcopy(self)
newobj.name = newname
newobj.paramsdict["assembly_name"] = newname
if subsamples and infile:
print(BRANCH_NAMES_AND_INPUT)
if infile:
if infile[0] == "-":
remove = 1
infile = infile[1:]
if os.path.exists(infile):
subsamples = _read_sample_names(infile)
## if remove then swap the samples
if remove:
subsamples = list(set(self.samples.keys()) - set(subsamples))
## create copies of each subsampled Sample obj
if subsamples:
for sname in subsamples:
if sname in self.samples:
newobj.samples[sname] = copy.deepcopy(self.samples[sname])
else:
print("Sample name not found: {}".format(sname))
## reload sample dict w/o non subsamples
newobj.samples = {name:sample for name, sample in \
newobj.samples.items() if name in subsamples}
## create copies of each subsampled Sample obj
else:
for sample in self.samples:
newobj.samples[sample] = copy.deepcopy(self.samples[sample])
## save json of new obj and return object
newobj.save()
return newobj |
def _step1func(self, force, ipyclient):
""" hidden wrapped function to start step 1 """
## check input data files
sfiles = self.paramsdict["sorted_fastq_path"]
rfiles = self.paramsdict["raw_fastq_path"]
## do not allow both a sorted_fastq_path and a raw_fastq
if sfiles and rfiles:
raise IPyradWarningExit(NOT_TWO_PATHS)
## but also require that at least one exists
if not (sfiles or rfiles):
raise IPyradWarningExit(NO_SEQ_PATH_FOUND)
## print headers
if self._headers:
if sfiles:
print("\n{}Step 1: Loading sorted fastq data to Samples"\
.format(self._spacer))
else:
print("\n{}Step 1: Demultiplexing fastq data to Samples"\
.format(self._spacer))
## if Samples already exist then no demultiplexing
if self.samples:
if not force:
print(SAMPLES_EXIST.format(len(self.samples), self.name))
else:
## overwrite existing data else do demux
if glob.glob(sfiles):
self._link_fastqs(ipyclient=ipyclient, force=force)
else:
assemble.demultiplex.run2(self, ipyclient, force)
## Creating new Samples
else:
## first check if demultiplexed files exist in sorted path
if glob.glob(sfiles):
self._link_fastqs(ipyclient=ipyclient)
## otherwise do the demultiplexing
else:
assemble.demultiplex.run2(self, ipyclient, force) |
def _step2func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 2"""
## print header
if self._headers:
print("\n Step 2: Filtering reads ")
## If no samples in this assembly then it means you skipped step1,
if not self.samples.keys():
raise IPyradWarningExit(FIRST_RUN_1)
## Get sample objects from list of strings, if API.
samples = _get_samples(self, samples)
if not force:
## print warning and skip if all are finished
if all([i.stats.state >= 2 for i in samples]):
print(EDITS_EXIST.format(len(samples)))
return
## Run samples through rawedit
assemble.rawedit.run2(self, samples, force, ipyclient) |
def _step3func(self, samples, noreverse, maxindels, force, ipyclient):
""" hidden wrapped function to start step 3 """
## print headers
if self._headers:
print("\n Step 3: Clustering/Mapping reads")
## Require reference seq for reference-based methods
if self.paramsdict['assembly_method'] != "denovo":
if not self.paramsdict['reference_sequence']:
raise IPyradError(REQUIRE_REFERENCE_PATH\
.format(self.paramsdict["assembly_method"]))
else:
## index the reference sequence
## Allow force to reindex the reference sequence
## send to run on the cluster.
lbview = ipyclient.load_balanced_view()
async = lbview.apply(index_reference_sequence, *(self, force))
## print a progress bar for the indexing
start = time.time()
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
printstr = " {} | {} | s3 |".format("indexing reference", elapsed)
finished = int(async.ready())
progressbar(1, finished, printstr, spacer=self._spacer)
if finished:
print("")
break
time.sleep(0.9)
## error check
if not async.successful():
raise IPyradWarningExit(async.result())
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 3, force):
raise IPyradError(FIRST_RUN_2)
elif not force:
## skip if all are finished
if all([i.stats.state >= 3 for i in samples]):
print(CLUSTERS_EXIST.format(len(samples)))
return
## run the step function
assemble.cluster_within.run(self, samples, noreverse, maxindels,
force, ipyclient) |
def _step4func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 4 """
if self._headers:
print("\n Step 4: Joint estimation of error rate and heterozygosity")
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 4, force):
raise IPyradError(FIRST_RUN_3)
elif not force:
## skip if all are finished
if all([i.stats.state >= 4 for i in samples]):
print(JOINTS_EXIST.format(len(samples)))
return
## send to function
assemble.jointestimate.run(self, samples, force, ipyclient) |
def _step5func(self, samples, force, ipyclient):
""" hidden wrapped function to start step 5 """
## print header
if self._headers:
print("\n Step 5: Consensus base calling ")
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## Check if all/none in the right state
if not self._samples_precheck(samples, 5, force):
raise IPyradError(FIRST_RUN_4)
elif not force:
## skip if all are finished
if all([i.stats.state >= 5 for i in samples]):
print(CONSENS_EXIST.format(len(samples)))
return
## pass samples to rawedit
assemble.consens_se.run(self, samples, force, ipyclient) |
def _step6func(self,
samples,
noreverse,
force,
randomseed,
ipyclient,
**kwargs):
"""
Hidden function to start Step 6.
"""
## Get sample objects from list of strings
samples = _get_samples(self, samples)
## remove samples that aren't ready
csamples = self._samples_precheck(samples, 6, force)
## print CLI header
if self._headers:
print("\n Step 6: Clustering at {} similarity across {} samples".\
format(self.paramsdict["clust_threshold"], len(csamples)))
## Check if all/none in the right state
if not csamples:
raise IPyradError(FIRST_RUN_5)
elif not force:
## skip if all are finished
if all([i.stats.state >= 6 for i in csamples]):
print(DATABASE_EXISTS.format(len(samples)))
return
## run if this point is reached. We no longer check for existing
## h5 file, since checking Sample states should suffice.
assemble.cluster_across.run(
self,
csamples,
noreverse,
force,
randomseed,
ipyclient,
**kwargs) |
def _step7func(self, samples, force, ipyclient):
""" Step 7: Filter and write output files """
## Get sample objects from list of strings
samples = _get_samples(self, samples)
if self._headers:
print("\n Step 7: Filter and write output files for {} Samples".\
format(len(samples)))
## Check if all/none of the samples are in the self.database
try:
with h5py.File(self.clust_database, 'r') as io5:
dbset = set(io5["seqs"].attrs['samples'])
iset = set([i.name for i in samples])
## TODO: Handle the case where dbdiff is not empty?
## This might arise if someone tries to branch and remove
## samples at step 7.
dbdiff = dbset.difference(iset)
idiff = iset.difference(dbset)
if idiff:
print(NOT_CLUSTERED_YET\
.format(self.database, ", ".join(list(idiff))))
## The the old way that failed unless all samples were
## clustered successfully in step 6. Adding some flexibility
## to allow writing output even if some samples failed.
## raise IPyradWarningExit(msg)
## Remove the samples that aren't ready for writing out
## i.e. only proceed with the samples that are actually
## present in the db
samples = [x for x in samples if x.name not in idiff]
except (IOError, ValueError):
raise IPyradError(FIRST_RUN_6.format(self.clust_database))
if not force:
outdir = os.path.join(self.dirs.project, self.name+"_outfiles")
if os.path.exists(outdir):
raise IPyradWarningExit(OUTPUT_EXISTS.format(outdir))
## Run step7
assemble.write_outfiles.run(self, samples, force, ipyclient) |
def _samples_precheck(self, samples, mystep, force):
""" Return a list of samples that are actually ready for the next step.
Each step runs this prior to calling run, makes it easier to
centralize and normalize how each step is checking sample states.
mystep is the state produced by the current step.
"""
subsample = []
## filter by state
for sample in samples:
if sample.stats.state < mystep - 1:
LOGGER.debug("Sample {} not in proper state."\
.format(sample.name))
else:
subsample.append(sample)
return subsample |
def _compatible_params_check(self):
""" check for mindepths after all params are set, b/c doing it while each
is being set becomes complicated """
## do not allow statistical < majrule
val1 = self.paramsdict["mindepth_statistical"]
val2 = self.paramsdict['mindepth_majrule']
if val1 < val2:
msg = """
Warning: mindepth_statistical cannot not be < mindepth_majrule.
Forcing mindepth_majrule = mindepth_statistical = {}
""".format(val1)
LOGGER.warning(msg)
print(msg)
self.paramsdict["mindepth_majrule"] = val1 |
def run(self, steps=0, force=False, ipyclient=None,
show_cluster=0, **kwargs):
"""
Run assembly steps of an ipyrad analysis. Enter steps as a string,
e.g., "1", "123", "12345". This step checks for an existing
ipcluster instance otherwise it raises an exception. The ipyparallel
connection is made using information from the _ipcluster dict of the
Assembly class object.
"""
## check that mindepth params are compatible, fix and report warning.
self._compatible_params_check()
## wrap everything in a try statement to ensure that we save the
## Assembly object if it is interrupted at any point, and also
## to ensure proper cleanup of the ipyclient.
inst = None
try:
## use an existing ipcluster instance
if not ipyclient:
args = self._ipcluster.items() + [("spacer", self._spacer)]
ipyclient = ip.core.parallel.get_client(**dict(args))
## print a message about the cluster status
## if MPI setup then we are going to wait until all engines are
## ready so that we can print how many cores started on each
## host machine exactly.
if (self._cli) or show_cluster:
ip.cluster_info(ipyclient=ipyclient, spacer=self._spacer)
## get the list of steps to run
if isinstance(steps, int):
steps = str(steps)
steps = sorted(list(steps))
## print an Assembly name header if inside API
if not self._cli:
print("Assembly: {}".format(self.name))
## store ipyclient engine pids to the Assembly so we can
## hard-interrupt them later if assembly is interrupted.
## Only stores pids of engines that aren't busy at this moment,
## otherwise it would block here while waiting to find their pids.
self._ipcluster["pids"] = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
pid = engine.apply(os.getpid).get()
self._ipcluster["pids"][eid] = pid
#ipyclient[:].apply(os.getpid).get_dict()
## has many fixed arguments right now, but we may add these to
## hackerz_only, or they may be accessed in the API.
if '1' in steps:
self._step1func(force, ipyclient)
self.save()
ipyclient.purge_everything()
if '2' in steps:
self._step2func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '3' in steps:
self._step3func(samples=None, noreverse=0, force=force,
maxindels=8, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '4' in steps:
self._step4func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '5' in steps:
self._step5func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
if '6' in steps:
self._step6func(samples=None, noreverse=0, randomseed=12345,
force=force, ipyclient=ipyclient, **kwargs)
self.save()
ipyclient.purge_everything()
if '7' in steps:
self._step7func(samples=None, force=force, ipyclient=ipyclient)
self.save()
ipyclient.purge_everything()
## handle exceptions so they will be raised after we clean up below
except KeyboardInterrupt as inst:
print("\n Keyboard Interrupt by user")
LOGGER.info("assembly interrupted by user.")
except IPyradWarningExit as inst:
LOGGER.error("IPyradWarningExit: %s", inst)
print("\n Encountered an error (see details in ./ipyrad_log.txt)"+\
"\n Error summary is below -------------------------------"+\
"\n{}".format(inst))
except Exception as inst:
LOGGER.error(inst)
print("\n Encountered an unexpected error (see ./ipyrad_log.txt)"+\
"\n Error message is below -------------------------------"+\
"\n{}".format(inst))
## close client when done or interrupted
finally:
try:
## save the Assembly
self.save()
## can't close client if it was never open
if ipyclient:
## send SIGINT (2) to all engines
try:
ipyclient.abort()
time.sleep(1)
for engine_id, pid in self._ipcluster["pids"].items():
if ipyclient.queue_status()[engine_id]["tasks"]:
os.kill(pid, 2)
LOGGER.info('interrupted engine {} w/ SIGINT to {}'\
.format(engine_id, pid))
time.sleep(1)
except ipp.NoEnginesRegistered:
pass
## if CLI, stop jobs and shutdown. Don't use _cli here
## because you can have a CLI object but use the --ipcluster
## flag, in which case we don't want to kill ipcluster.
if 'ipyrad-cli' in self._ipcluster["cluster_id"]:
LOGGER.info(" shutting down engines")
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
LOGGER.info(" finished shutdown")
else:
if not ipyclient.outstanding:
ipyclient.purge_everything()
else:
## nanny: kill everything, something bad happened
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted")
## if exception is close and save, print and ignore
except Exception as inst2:
print("warning: error during shutdown:\n{}".format(inst2))
LOGGER.error("shutdown warning: %s", inst2) |
def _to_fulldict(self):
"""
Write to dict including data frames. All sample dicts
are combined in save() to dump JSON output """
##
returndict = OrderedDict([
("name", self.name),
("barcode", self.barcode),
("files", self.files),
("stats_dfs", {
"s1": self.stats_dfs.s1.to_dict(),
"s2": self.stats_dfs.s2.to_dict(),
"s3": self.stats_dfs.s3.to_dict(),
"s4": self.stats_dfs.s4.to_dict(),
"s5": self.stats_dfs.s5.to_dict(),
}),
("stats", self.stats.to_dict()),
("depths", self.depths)
])
return returndict |
def combinefiles(filepath):
""" Joins first and second read file names """
## unpack seq files in filepath
fastqs = glob.glob(filepath)
firsts = [i for i in fastqs if "_R1_" in i]
## check names
if not firsts:
raise IPyradWarningExit("First read files names must contain '_R1_'.")
## get paired reads
seconds = [ff.replace("_R1_", "_R2_") for ff in firsts]
return zip(firsts, seconds) |
def findbcode(cutters, longbar, read1):
""" find barcode sequence in the beginning of read """
## default barcode string
for cutter in cutters[0]:
## If the cutter is unambiguous there will only be one.
if not cutter:
continue
search = read1[1][:int(longbar[0]+len(cutter)+1)]
barcode = search.rsplit(cutter, 1)
if len(barcode) > 1:
return barcode[0]
## No cutter found
return barcode[0] |
def find3radbcode(cutters, longbar, read1):
""" find barcode sequence in the beginning of read """
## default barcode string
for ambigcuts in cutters:
for cutter in ambigcuts:
## If the cutter is unambiguous there will only be one.
if not cutter:
continue
search = read1[1][:int(longbar[0]+len(cutter)+1)]
splitsearch = search.rsplit(cutter, 1)
if len(splitsearch) > 1:
return splitsearch[0]
## No cutter found
return splitsearch[0] |
def make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars):
"""
Write stats and stores to Assembly object.
"""
## out file
outhandle = os.path.join(data.dirs.fastqs, 's1_demultiplex_stats.txt')
outfile = open(outhandle, 'w')
## write the header for file stats ------------------------------------
outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\
format("raw_file", "total_reads", "cut_found", "bar_matched"))
## write the file stats
r1names = sorted(perfile)
for fname in r1names:
dat = perfile[fname]
#dat = [perfile[fname][i] for i in ["ftotal", "fcutfound", "fmatched"]]
outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\
format(fname, dat[0], dat[1], dat[2]))
## repeat for pairfile
if 'pair' in data.paramsdict["datatype"]:
fname = fname.replace("_R1_", "_R2_")
outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\
format(fname, dat[0], dat[1], dat[2]))
## spacer, how many records for each sample --------------------------
outfile.write('\n{:<35} {:>13}\n'.format("sample_name", "total_reads"))
## names alphabetical. Write to file. Will save again below to Samples.
snames = set()
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
snames.add(sname)
for sname in sorted(list(snames)):
outfile.write("{:<35} {:>13}\n".format(sname, fsamplehits[sname]))
## spacer, which barcodes were found -----------------------------------
outfile.write('\n{:<35} {:>13} {:>13} {:>13}\n'.\
format("sample_name", "true_bar", "obs_bar", "N_records"))
## write sample results
for sname in sorted(data.barcodes):
if "-technical-replicate-" in sname:
fname = sname.rsplit("-technical-replicate", 1)[0]
else:
fname = sname
## write perfect hit
hit = data.barcodes[sname]
offhitstring = ""
## write off-n hits
## sort list of off-n hits
if fname in fdbars:
offkeys = list(fdbars.get(fname))
for offhit in offkeys[::-1]:
## exclude perfect hit
if offhit not in data.barcodes.values():
offhitstring += '{:<35} {:>13} {:>13} {:>13}\n'.\
format(sname, hit, offhit, fbarhits[offhit]/2)
#sumoffhits += fbarhits[offhit]
## write string to file
outfile.write('{:<35} {:>13} {:>13} {:>13}\n'.\
#format(sname, hit, hit, fsamplehits[fname]-sumoffhits))
format(sname, hit, hit, fbarhits[hit]/2))
outfile.write(offhitstring)
## write misses
misskeys = list(fmisses.keys())
misskeys.sort(key=fmisses.get)
for key in misskeys[::-1]:
outfile.write('{:<35} {:>13}{:>13}{:>13}\n'.\
format("no_match", "_", key, fmisses[key]))
outfile.close()
## Link Sample with this data file to the Assembly object
for sname in snames:
## make the sample
sample = Sample()
sample.name = sname
## allow multiple barcodes if its a replicate.
barcodes = []
for n in xrange(500):
fname = sname+"-technical-replicate-{}".format(n)
fbar = data.barcodes.get(fname)
if fbar:
barcodes.append(fbar)
if barcodes:
sample.barcode = barcodes
else:
sample.barcode = data.barcodes[sname]
## file names
if 'pair' in data.paramsdict["datatype"]:
sample.files.fastqs = [(os.path.join(data.dirs.fastqs,
sname+"_R1_.fastq.gz"),
os.path.join(data.dirs.fastqs,
sname+"_R2_.fastq.gz"))]
else:
sample.files.fastqs = [(os.path.join(data.dirs.fastqs,
sname+"_R1_.fastq.gz"), "")]
## fill in the summary stats
sample.stats["reads_raw"] = int(fsamplehits[sname])
## fill in the full df stats value
sample.stats_dfs.s1["reads_raw"] = int(fsamplehits[sname])
## Only link Sample if it has data
if sample.stats["reads_raw"]:
sample.stats.state = 1
data.samples[sample.name] = sample
else:
print("Excluded sample: no data found for", sname)
## initiate s1 key for data object
data.stats_dfs.s1 = data._build_stat("s1")
data.stats_files.s1 = outhandle |
def barmatch2(data, tups, cutters, longbar, matchdict, fnum):
"""
cleaner barmatch func...
"""
## how many reads to store before writing to disk
waitchunk = int(1e6)
## pid name for this engine
epid = os.getpid()
## counters for total reads, those with cutsite, and those that matched
filestat = np.zeros(3, dtype=np.int)
## store observed sample matches
samplehits = {}
## dictionaries to store first and second reads until writing to file
dsort1 = {}
dsort2 = {}
## dictionary for all bars matched in sample
dbars = {}
## fill for sample names
for sname in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
samplehits[sname] = 0
dsort1[sname] = []
dsort2[sname] = []
dbars[sname] = set()
## store observed bars
barhits = {}
for barc in matchdict:
barhits[barc] = 0
## store others
misses = {}
misses['_'] = 0
## build func for finding barcode
getbarcode = get_barcode_func(data, longbar)
## get quart iterator of reads
if tups[0].endswith(".gz"):
ofunc = gzip.open
else:
ofunc = open
## create iterators
ofile1 = ofunc(tups[0], 'r')
fr1 = iter(ofile1)
quart1 = itertools.izip(fr1, fr1, fr1, fr1)
if tups[1]:
ofile2 = ofunc(tups[1], 'r')
fr2 = iter(ofile2)
quart2 = itertools.izip(fr2, fr2, fr2, fr2)
quarts = itertools.izip(quart1, quart2)
else:
quarts = itertools.izip(quart1, iter(int, 1))
## go until end of the file
while 1:
try:
read1, read2 = quarts.next()
read1 = list(read1)
filestat[0] += 1
except StopIteration:
break
barcode = ""
## Get barcode_R2 and check for matching sample name
if '3rad' in data.paramsdict["datatype"]:
## Here we're just reusing the findbcode function
## for R2, and reconfiguring the longbar tuple to have the
## maxlen for the R2 barcode
## Parse barcode. Use the parsing function selected above.
barcode1 = find3radbcode(cutters=cutters,
longbar=longbar, read1=read1)
barcode2 = find3radbcode(cutters=cutters,
longbar=(longbar[2], longbar[1]), read1=read2)
barcode = barcode1 + "+" + barcode2
else:
## Parse barcode. Uses the parsing function selected above.
barcode = getbarcode(cutters, read1, longbar)
## find if it matches
sname_match = matchdict.get(barcode)
if sname_match:
#sample_index[filestat[0]-1] = snames.index(sname_match) + 1
## record who matched
dbars[sname_match].add(barcode)
filestat[1] += 1
filestat[2] += 1
samplehits[sname_match] += 1
barhits[barcode] += 1
if barcode in barhits:
barhits[barcode] += 1
else:
barhits[barcode] = 1
## trim off barcode
lenbar = len(barcode)
if '3rad' in data.paramsdict["datatype"]:
## Iff 3rad trim the len of the first barcode
lenbar = len(barcode1)
if data.paramsdict["datatype"] == '2brad':
overlen = len(cutters[0][0]) + lenbar + 1
read1[1] = read1[1][:-overlen] + "\n"
read1[3] = read1[3][:-overlen] + "\n"
else:
read1[1] = read1[1][lenbar:]
read1[3] = read1[3][lenbar:]
## Trim barcode off R2 and append. Only 3rad datatype
## pays the cpu cost of splitting R2
if '3rad' in data.paramsdict["datatype"]:
read2 = list(read2)
read2[1] = read2[1][len(barcode2):]
read2[3] = read2[3][len(barcode2):]
## append to dsort
dsort1[sname_match].append("".join(read1))
if 'pair' in data.paramsdict["datatype"]:
dsort2[sname_match].append("".join(read2))
else:
misses["_"] += 1
if barcode:
filestat[1] += 1
## how can we make it so all of the engines aren't trying to write to
## ~100-200 files all at the same time? This is the I/O limit we hit..
## write out at 100K to keep memory low. It is fine on HPC which can
## write parallel, but regular systems might crash
if not filestat[0] % waitchunk:
## write the remaining reads to file"
writetofile(data, dsort1, 1, epid)
if 'pair' in data.paramsdict["datatype"]:
writetofile(data, dsort2, 2, epid)
## clear out dsorts
for sample in data.barcodes:
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
dsort1[sname] = []
dsort2[sname] = []
## reset longlist
#longlist = np.zeros(waitchunk, dtype=np.uint32)
## close open files
ofile1.close()
if tups[1]:
ofile2.close()
## write the remaining reads to file
writetofile(data, dsort1, 1, epid)
if 'pair' in data.paramsdict["datatype"]:
writetofile(data, dsort2, 2, epid)
## return stats in saved pickle b/c return_queue is too small
## and the size of the match dictionary can become quite large
samplestats = [samplehits, barhits, misses, dbars]
outname = os.path.join(data.dirs.fastqs, "tmp_{}_{}.p".format(epid, fnum))
with open(outname, 'w') as wout:
pickle.dump([filestat, samplestats], wout)
return outname |
def get_barcode_func(data, longbar):
""" returns the fastest func given data & longbar"""
## build func for finding barcode
if longbar[1] == 'same':
if data.paramsdict["datatype"] == '2brad':
def getbarcode(cutters, read1, longbar):
""" find barcode for 2bRAD data """
return read1[1][:-(len(cutters[0][0]) + 1)][-longbar[0]:]
else:
def getbarcode(_, read1, longbar):
""" finds barcode for invariable length barcode data """
return read1[1][:longbar[0]]
else:
def getbarcode(cutters, read1, longbar):
""" finds barcode for variable barcode lengths"""
return findbcode(cutters, longbar, read1)
return getbarcode |
def get_quart_iter(tups):
""" returns an iterator to grab four lines at a time """
if tups[0].endswith(".gz"):
ofunc = gzip.open
else:
ofunc = open
## create iterators
ofile1 = ofunc(tups[0], 'r')
fr1 = iter(ofile1)
quart1 = itertools.izip(fr1, fr1, fr1, fr1)
if tups[1]:
ofile2 = ofunc(tups[1], 'r')
fr2 = iter(ofile2)
quart2 = itertools.izip(fr2, fr2, fr2, fr2)
quarts = itertools.izip(quart1, quart2)
else:
ofile2 = 0
quarts = itertools.izip(quart1, iter(int, 1))
## make a generator
def feedme(quarts):
for quart in quarts:
yield quart
genquarts = feedme(quarts)
## return generator and handles
return genquarts, ofile1, ofile2 |
def writetofastq(data, dsort, read):
"""
Writes sorted data 'dsort dict' to a tmp files
"""
if read == 1:
rrr = "R1"
else:
rrr = "R2"
for sname in dsort:
## skip writing if empty. Write to tmpname
handle = os.path.join(data.dirs.fastqs,
"{}_{}_.fastq".format(sname, rrr))
with open(handle, 'a') as out:
out.write("".join(dsort[sname])) |
def collate_files(data, sname, tmp1s, tmp2s):
"""
Collate temp fastq files in tmp-dir into 1 gzipped sample.
"""
## out handle
out1 = os.path.join(data.dirs.fastqs, "{}_R1_.fastq.gz".format(sname))
out = io.BufferedWriter(gzip.open(out1, 'w'))
## build cmd
cmd1 = ['cat']
for tmpfile in tmp1s:
cmd1 += [tmpfile]
## compression function
proc = sps.Popen(['which', 'pigz'], stderr=sps.PIPE, stdout=sps.PIPE).communicate()
if proc[0].strip():
compress = ["pigz"]
else:
compress = ["gzip"]
## call cmd
proc1 = sps.Popen(cmd1, stderr=sps.PIPE, stdout=sps.PIPE)
proc2 = sps.Popen(compress, stdin=proc1.stdout, stderr=sps.PIPE, stdout=out)
err = proc2.communicate()
if proc2.returncode:
raise IPyradWarningExit("error in collate_files R1 %s", err)
proc1.stdout.close()
out.close()
## then cleanup
for tmpfile in tmp1s:
os.remove(tmpfile)
if 'pair' in data.paramsdict["datatype"]:
## out handle
out2 = os.path.join(data.dirs.fastqs, "{}_R2_.fastq.gz".format(sname))
out = io.BufferedWriter(gzip.open(out2, 'w'))
## build cmd
cmd1 = ['cat']
for tmpfile in tmp2s:
cmd1 += [tmpfile]
## call cmd
proc1 = sps.Popen(cmd1, stderr=sps.PIPE, stdout=sps.PIPE)
proc2 = sps.Popen(compress, stdin=proc1.stdout, stderr=sps.PIPE, stdout=out)
err = proc2.communicate()
if proc2.returncode:
raise IPyradWarningExit("error in collate_files R2 %s", err)
proc1.stdout.close()
out.close()
## then cleanup
for tmpfile in tmp2s:
os.remove(tmpfile) |
def prechecks2(data, force):
"""
A new simplified version of prechecks func before demux
Checks before starting analysis.
-----------------------------------
1) Is there data in raw_fastq_path
2) Is there a barcode file
3) Is there a workdir and fastqdir
4) remove old fastq/tmp_sample_R*_ dirs/
5) return file names as pairs (r1, r2) or fakepairs (r1, 1)
6) get ambiguous cutter resolutions
7) get optim size
"""
## check for data using glob for fuzzy matching
if not glob.glob(data.paramsdict["raw_fastq_path"]):
raise IPyradWarningExit(NO_RAWS.format(data.paramsdict["raw_fastq_path"]))
## find longest barcode
try:
## Handle 3rad multi-barcodes. Gets len of the first one.
## Should be harmless for single barcode data
barlens = [len(i.split("+")[0]) for i in data.barcodes.values()]
if len(set(barlens)) == 1:
longbar = (barlens[0], 'same')
else:
longbar = (max(barlens), 'diff')
## For 3rad we need to add the length info for barcodes_R2
if "3rad" in data.paramsdict["datatype"]:
barlens = [len(i.split("+")[1]) for i in data.barcodes.values()]
longbar = (longbar[0], longbar[1], max(barlens))
except ValueError:
raise IPyradWarningExit(NO_BARS.format(data.paramsdict["barcodes_path"]))
## setup dirs: [workdir] and a [workdir/name_fastqs]
opj = os.path.join
## create project dir
pdir = os.path.realpath(data.paramsdict["project_dir"])
if not os.path.exists(pdir):
os.mkdir(pdir)
## create fastq dir
data.dirs.fastqs = opj(pdir, data.name+"_fastqs")
if os.path.exists(data.dirs.fastqs) and force:
print(OVERWRITING_FASTQS.format(**{"spacer":data._spacer}))
shutil.rmtree(data.dirs.fastqs)
if not os.path.exists(data.dirs.fastqs):
os.mkdir(data.dirs.fastqs)
## insure no leftover tmp files from a previous run (there shouldn't be)
oldtmps = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R1_"))
oldtmps += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R2_"))
for oldtmp in oldtmps:
os.remove(oldtmp)
## gather raw sequence filenames (people want this to be flexible ...)
if 'pair' in data.paramsdict["datatype"]:
raws = combinefiles(data.paramsdict["raw_fastq_path"])
else:
raws = zip(glob.glob(data.paramsdict["raw_fastq_path"]), iter(int, 1))
## returns a list of both resolutions of cut site 1
## (TGCAG, ) ==> [TGCAG, ]
## (TWGC, ) ==> [TAGC, TTGC]
## (TWGC, AATT) ==> [TAGC, TTGC]
cutters = [ambigcutters(i) for i in data.paramsdict["restriction_overhang"]]
print(cutters)
assert cutters, "Must enter a `restriction_overhang` for demultiplexing."
## get matchdict
matchdict = inverse_barcodes(data)
## return all
return raws, longbar, cutters, matchdict |
def inverse_barcodes(data):
""" Build full inverse barcodes dictionary """
matchdict = {}
bases = set("CATGN")
poss = set()
## do perfect matches
for sname, barc in data.barcodes.items():
## remove -technical-replicate-N if present
if "-technical-replicate-" in sname:
sname = sname.rsplit("-technical-replicate", 1)[0]
matchdict[barc] = sname
poss.add(barc)
if data.paramsdict["max_barcode_mismatch"] > 0:
## get 1-base diffs
for idx1, base in enumerate(barc):
diffs = bases.difference(base)
for diff in diffs:
lbar = list(barc)
lbar[idx1] = diff
tbar1 = "".join(lbar)
if tbar1 not in poss:
matchdict[tbar1] = sname
poss.add(tbar1)
else:
if matchdict.get(tbar1) != sname:
print("""\
Note: barcodes {}:{} and {}:{} are within {} base change of each other
Ambiguous barcodes that match to both samples will arbitrarily
be assigned to the first sample. If you do not like this idea
then lower the value of max_barcode_mismatch and rerun step 1\n"""\
.format(sname, barc,
matchdict[tbar1], data.barcodes[matchdict[tbar1]],
data.paramsdict["max_barcode_mismatch"]))
## if allowing two base difference things get big
## for each modified bar, allow one modification to other bases
if data.paramsdict["max_barcode_mismatch"] > 1:
for idx2, _ in enumerate(tbar1):
## skip the base that is already modified
if idx2 != idx1:
for diff in bases.difference(tbar1[idx2]):
ltbar = list(tbar1)
ltbar[idx2] = diff
tbar2 = "".join(ltbar)
if tbar2 not in poss:
matchdict[tbar2] = sname
poss.add(tbar2)
else:
if matchdict.get(tbar2) != sname:
print("""\
Note: barcodes {}:{} and {}:{} are within {} base change of each other\
Ambiguous barcodes that match to both samples will arbitrarily
be assigned to the first sample. If you do not like this idea
then lower the value of max_barcode_mismatch and rerun step 1\n"""\
.format(sname, barc,
matchdict[tbar2], data.barcodes[matchdict[tbar2]],
data.paramsdict["max_barcode_mismatch"]))
return matchdict |
def estimate_optim(data, testfile, ipyclient):
"""
Estimate a reasonable optim value by grabbing a chunk of sequences,
decompressing and counting them, to estimate the full file size.
"""
## count the len of one file and assume all others are similar len
insize = os.path.getsize(testfile)
tmp_file_name = os.path.join(data.paramsdict["project_dir"], "tmp-step1-count.fq")
if testfile.endswith(".gz"):
infile = gzip.open(testfile)
outfile = gzip.open(tmp_file_name, 'wb', compresslevel=5)
else:
infile = open(testfile)
outfile = open(tmp_file_name, 'w')
## We'll take the average of the size of a file based on the
## first 10000 reads to approximate number of reads in the main file
outfile.write("".join(itertools.islice(infile, 40000)))
outfile.close()
infile.close()
## Get the size of the tmp file
tmp_size = os.path.getsize(tmp_file_name)
## divide by the tmp file size and multiply by 10000 to approximate
## the size of the input .fq files
inputreads = int(insize / tmp_size) * 10000
os.remove(tmp_file_name)
return inputreads |
def run2(data, ipyclient, force):
"""
One input file (or pair) is run on two processors, one for reading
and decompressing the data, and the other for demuxing it.
"""
## get file handles, name-lens, cutters, and matchdict
raws, longbar, cutters, matchdict = prechecks2(data, force)
## wrap funcs to ensure we can kill tmpfiles
kbd = 0
try:
## if splitting files, split files into smaller chunks for demuxing
chunkfiles = splitfiles(data, raws, ipyclient)
## send chunks to be demux'd
statdicts = demux2(data, chunkfiles, cutters, longbar, matchdict, ipyclient)
## concat tmp files
concat_chunks(data, ipyclient)
## build stats from dictionaries
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars)
except KeyboardInterrupt:
print("\n ...interrupted, just a second while we ensure proper cleanup")
kbd = 1
## cleanup
finally:
## cleaning up the tmpdir is safe from ipyclient
tmpdir = os.path.join(data.paramsdict["project_dir"], "tmp-chunks-"+data.name)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
if kbd:
raise KeyboardInterrupt("s1")
else:
_cleanup_and_die(data) |
def _cleanup_and_die(data):
""" cleanup func for step 1 """
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
os.remove(tmpf) |
def run3(data, ipyclient, force):
"""
One input file (or pair) is run on two processors, one for reading
and decompressing the data, and the other for demuxing it.
"""
start = time.time()
## get file handles, name-lens, cutters, and matchdict,
## and remove any existing files if a previous run failed.
raws, longbar, cutters, matchdict = prechecks2(data, force)
## wrap funcs to ensure we can kill tmpfiles
kbd = 0
try:
## send chunks to be demux'd, nothing is parallelized yet.
lbview = ipyclient.load_balanced_view()
args = (data, raws, cutters, longbar, matchdict)
async = lbview.apply(demux3, *args)
## track progress
while 1:
## how many of this func have finished so far
elapsed = datetime.timedelta(seconds=int(time.time()-start))
printstr = ' writing/compressing | {} | s1 |'
progressbar(len(ready), sum(ready), printstr, spacer=spacer)
time.sleep(0.1)
if async.ready():
print("")
break
if async.successful():
statdicts = async.get()
else:
raise IPyradWarningExit(async.get())
## build stats from dictionaries
perfile, fsamplehits, fbarhits, fmisses, fdbars = statdicts
make_stats(data, perfile, fsamplehits, fbarhits, fmisses, fdbars)
except KeyboardInterrupt:
print("\n ...interrupted, just a second while we ensure proper cleanup")
kbd = 1
## cleanup
finally:
## cleaning up the tmpdir is safe from ipyclient
tmpdir = os.path.join(data.paramsdict["project_dir"], "tmp-chunks-"+data.name)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
tmpfiles = glob.glob(os.path.join(data.dirs.fastqs, "tmp_*_R*.fastq"))
tmpfiles += glob.glob(os.path.join(data.dirs.fastqs, "tmp_*.p"))
for tmpf in tmpfiles:
if os.path.exists(tmpf):
os.remove(tmpf)
if kbd:
raise |
def splitfiles(data, raws, ipyclient):
""" sends raws to be chunked"""
## create a tmpdir for chunked_files and a chunk optimizer
tmpdir = os.path.join(data.paramsdict["project_dir"], "tmp-chunks-"+data.name)
if os.path.exists(tmpdir):
shutil.rmtree(tmpdir)
os.makedirs(tmpdir)
## chunk into 8M reads
totalreads = estimate_optim(data, raws[0][0], ipyclient)
optim = int(8e6)
njobs = int(totalreads/(optim/4.)) * len(raws)
## if more files than cpus: no chunking
nosplit = 0
if (len(raws) > len(ipyclient)) or (totalreads < optim):
nosplit = 1
## send slices N at a time. The dict chunkfiles stores a tuple of rawpairs
## dictionary to store asyncresults for sorting jobs
start = time.time()
chunkfiles = {}
for fidx, tups in enumerate(raws):
handle = os.path.splitext(os.path.basename(tups[0]))[0]
## if number of lines is > 20M then just submit it
if nosplit:
chunkfiles[handle] = [tups]
else:
## chunk the file using zcat_make_temps
chunklist = zcat_make_temps(data, tups, fidx, tmpdir, optim, njobs, start)
chunkfiles[handle] = chunklist
if not nosplit:
print("")
return chunkfiles |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.