Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def _store_N_samples(self, start, ipyclient, quiet=False):
"""
Find all quartets of samples and store in a large array
A chunk size is assigned for sampling from the array of quartets
based on the number of cpus available. This should be relatively
large so that we don't spend a lot of time doing I/O, but small
enough that jobs finish often for checkpointing.
"""
breaks = 2
if self.params.nquartets < 5000:
breaks = 1
if self.params.nquartets > 100000:
breaks = 8
if self.params.nquartets > 500000:
breaks = 16
if self.params.nquartets > 5000000:
breaks = 32
## chunk up the data
ncpus = len(ipyclient)
self._chunksize = (self.params.nquartets // (breaks * ncpus) \
+ (self.params.nquartets % (breaks * ncpus)))
## create h5 OUT empty arrays
## 'quartets' stores the inferred quartet relationship (1 x 4)
## This can get huge, so we need to choose the dtype wisely.
## the values are simply the index of the taxa, so uint16 is good.
with h5py.File(self.database.output, 'w') as io5:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4))
## group for bootstrap invariant matrices ((16, 16), uint32)
## these store the actual matrix counts. dtype uint32 can store
## up to 4294967295. More than enough. uint16 max is 65535.
## the 0 boot is the original seqarray.
io5.create_group("invariants")
## append to h5 IN array (which has the seqarray, bootsarr, maparr)
## and fill it with all of the quartet sets we will ever sample.
## the samplign method will vary depending on whether this is random,
## all, or equal splits (in a separate but similar function).
with h5py.File(self.database.input, 'a') as io5:
try:
io5.create_dataset("quartets",
(self.params.nquartets, 4),
dtype=np.uint16,
chunks=(self._chunksize, 4),
compression='gzip')
except RuntimeError:
raise IPyradWarningExit(
"database file already exists for this analysis, "
+ "you must run with the force flag to overwrite")
## submit store job to write into self.database.input
if self.params.method == "all":
async = ipyclient[0].apply(store_all, self)
elif self.params.method == "random":
async = ipyclient[0].apply(store_random, self)
elif self.params.method == "equal":
async = ipyclient[0].apply(store_equal, self)
## progress bar
printstr = "generating q-sets | {} | "
prog = 0
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not quiet:
if async.stdout:
prog = int(async.stdout.strip().split()[-1])
progressbar(self.params.nquartets, prog,
printstr.format(elapsed), spacer="")
if not async.ready():
time.sleep(0.1)
else:
break
if not async.successful():
raise IPyradWarningExit(async.result())
if not quiet:
print("") |
def _dump_qmc(self):
"""
Writes the inferred quartet sets from the database to a text
file to be used as input for QMC. Quartets that had no information
available (i.e., no SNPs) were written to the database as 0,0,0,0
and are excluded here from the output.
"""
## open the h5 database
with h5py.File(self.database.output, 'r') as io5:
## create an output file for writing
self.files.qdump = os.path.join(self.dirs, self.name+".quartets.txt")
with open(self.files.qdump, 'w') as qdump:
## pull from db
for idx in xrange(0, self.params.nquartets, self._chunksize):
qchunk = io5["quartets"][idx:idx+self._chunksize, :]
quarts = [tuple(j) for j in qchunk if np.any(j)]
## shuffle and format for qmc
np.random.shuffle(quarts)
chunk = ["{},{}|{},{}".format(*i) for i in quarts]
qdump.write("\n".join(chunk)+"\n") |
def _run_qmc(self, boot):
"""
Runs quartet max-cut QMC on the quartets qdump file.
"""
## build command
self._tmp = os.path.join(self.dirs, ".tmptre")
cmd = [ip.bins.qmc, "qrtt="+self.files.qdump, "otre="+self._tmp]
## run it
proc = subprocess.Popen(cmd, stderr=subprocess.STDOUT, stdout=subprocess.PIPE)
res = proc.communicate()
if proc.returncode:
raise IPyradWarningExit(res[1])
## parse tmp file written by qmc into a tree and rename it
with open(self._tmp, 'r') as intree:
tre = ete3.Tree(intree.read().strip())
names = tre.get_leaves()
for name in names:
name.name = self.samples[int(name.name)]
tmptre = tre.write(format=9)
## save the tree to file
if boot:
self.trees.boots = os.path.join(self.dirs, self.name+".boots")
with open(self.trees.boots, 'a') as outboot:
outboot.write(tmptre+"\n")
else:
self.trees.tree = os.path.join(self.dirs, self.name+".tree")
with open(self.trees.tree, 'w') as outtree:
outtree.write(tmptre)
## save the file
self._save() |
def _compute_stats(self, start, ipyclient, quiet=False):
"""
Compute sampling stats and consens trees.
"""
## get name indices
names = self.samples
## make a consensus from bootstrap reps.
if self.checkpoint.boots:
tre = ete3.Tree(self.trees.tree, format=0)
tre.unroot()
with open(self.trees.boots, 'r') as inboots:
bb = [ete3.Tree(i.strip(), format=0) for i in inboots.readlines()]
bb = [tre] + bb
## calculate consensus supports
ctre, counts = consensus_tree(bb, names=names)
self.trees.cons = os.path.join(self.dirs, self.name+".cons")
with open(self.trees.cons, 'w') as ocons:
ocons.write(ctre.write(format=0))
else:
ctre = ete3.Tree(self.trees.tree, format=0)
ctre.unroot()
## build stats file and write trees
self.trees.nhx = os.path.join(self.dirs, self.name+".nhx")
lbview = ipyclient.load_balanced_view()
qtots = {}
qsamp = {}
tots = sum(1 for i in ctre.iter_leaves())
totn = set(ctre.get_leaf_names())
## iterate over node traversal
for node in ctre.traverse():
## this is slow, needs to look at every sampled quartet
## so we send it to be processed on engines
qtots[node] = lbview.apply(get_total, *(tots, node))
qsamp[node] = lbview.apply(get_sampled, *(self, totn, node))
## wait for jobs to finish (+1 to lenjob is for final progress printer)
alljobs = qtots.values() + qsamp.values()
lenjobs = len(alljobs) + 1
printstr = "calculating stats | {} | "
done = 0
while 1:
if not quiet:
done = sum([i.ready() for i in alljobs])
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(lenjobs, done,
printstr.format(elapsed), spacer="")
if (lenjobs - 1) == done:
break
else:
time.sleep(0.1)
## store results in the tree object
for node in ctre.traverse():
total = qtots[node].result()
sampled = qsamp[node].result()
node.add_feature("quartets_total", total)
node.add_feature("quartets_sampled", sampled)
features = ["quartets_total", "quartets_sampled"]
## update final progress
elapsed = datetime.timedelta(seconds=int(time.time()-start))
progressbar(1, 1, printstr.format(elapsed), spacer="")
if not quiet:
print("")
## write tree in NHX format
with open(self.trees.nhx, 'w') as outtre:
outtre.write(ctre.write(format=0, features=features)) |
def _load(self, name, workdir, quiet=False):
"""
Load a JSON serialized tetrad instance to continue from a checkpoint.
"""
## load the JSON string and try with name+.json
path = os.path.join(workdir, name)
if not path.endswith(".tet.json"):
path += ".tet.json"
## expand user
path = path.replace("~", os.path.expanduser("~"))
## load the json file as a dictionary
try:
with open(path, 'r') as infile:
fullj = _byteify(json.loads(infile.read(),
object_hook=_byteify),
ignore_dicts=True)
except IOError:
raise IPyradWarningExit("""\
Cannot find checkpoint (.tet.json) file at: {}""".format(path))
## set old attributes into new tetrad object
self.name = fullj["name"]
self.files.data = fullj["files"]["data"]
self.files.mapfile = fullj["files"]["mapfile"]
self.dirs = fullj["dirs"]
self._init_seqarray(quiet=quiet)
self._parse_names()
## fill in the same attributes
for key in fullj:
## fill Params a little different
if key in ["files", "params", "database",
"trees", "stats", "checkpoint"]:
filler = fullj[key]
for ikey in filler:
self.__dict__[key].__setattr__(ikey, fullj[key][ikey])
else:
self.__setattr__(key, fullj[key]) |
def _inference(self, start, ipyclient, quiet):
"""
Sends slices of quartet sets to parallel engines for computing,
enters results into output database, sends finished quartet sets
to QMC for tree inference, and prints progress bars.
"""
## load-balancer for single-threaded execution jobs
lbview = ipyclient.load_balanced_view()
## an iterator that grabs quartet chunk start positions
jobs = range(self.checkpoint.arr, self.params.nquartets, self._chunksize)
## if this is a bootstrap then init a new boot array in the database
## max val is 65535 in here if uint16
bootkey = "boot{}".format(self.checkpoint.boots)
with h5py.File(self.database.output, 'r+') as io5:
if bootkey not in io5["invariants"].keys():
io5["invariants"].create_dataset(
bootkey,
(self.params.nquartets, 16, 16),
dtype=np.uint16,
chunks=(self._chunksize, 16, 16))
## start progress bar if new or skip if bootstrapping
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if self.checkpoint.boots:
printstr = "bootstrap trees | {} | "
else:
printstr = "initial tree | {} | "
if not quiet:
progressbar(1, 0, printstr.format(elapsed), spacer="")
## submit jobs distriuted across the cluster.
asyncs = {}
for job in jobs:
asyncs[job] = lbview.apply(nworker, *(self, job))
## wait for jobs to finish, catch results as they return and
## enter them into the HDF5 database to keep memory low.
done = 0
while 1:
## gather finished jobs
finished = [i for i,j in asyncs.iteritems() if j.ready()]
## iterate over finished list
for key in finished:
async = asyncs[key]
if async.successful():
## store result
done += 1
results = async.result()
self._insert_to_array(key, results)
## purge from memory
del asyncs[key]
else:
raise IPyradWarningExit(async.result())
## progress bar is different if first vs boot tree
elapsed = datetime.timedelta(seconds=int(time.time()-start))
if not self.checkpoint.boots:
if not quiet:
progressbar(len(jobs), done, printstr.format(elapsed), spacer="")
else:
if not quiet:
progressbar(self.params.nboots, self.checkpoint.boots,
printstr.format(elapsed), spacer="")
## done is counted on finish, so this means we're done
if len(asyncs) == 0:
break
else:
time.sleep(0.1)
## dump quartets into a text file for QMC
self._dump_qmc()
## send to QMC
if not self.checkpoint.boots:
self._run_qmc(0)
else:
self._run_qmc(1)
## reset the checkpoint arr
self.checkpoint.arr = 0
## print spacer if finished first tree or last boot.
if (not self.checkpoint.boots) and (not quiet):
print("")
elif (self.checkpoint.boots == self.params.nboots) and (not quiet):
print("") |
def _insert_to_array(self, chunk, results):
"""
Enters results arrays into the HDF5 database.
"""
## two result arrs
chunksize = self._chunksize
qrts, invs = results
## enter into db
with h5py.File(self.database.output, 'r+') as io5:
io5['quartets'][chunk:chunk+chunksize] = qrts
## entered as 0-indexed !
if self.params.save_invariants:
if self.checkpoint.boots:
key = "invariants/boot{}".format(self.checkpoint.boots)
io5[key][chunk:chunk+chunksize] = invs
else:
io5["invariants/boot0"][chunk:chunk+chunksize] = invs |
def run(self, force=False, quiet=False, ipyclient=None):
"""
Parameters
----------
force (bool):
Overwrite existing results for object with the same name
and workdir as this one.
verbose (int):
0=primt nothing; 1=print progress bars; 2=print pringress
bars and cluster information.
ipyclient (ipyparallel.Client object):
A connected ipyclient object. If ipcluster instance is
not running on the default profile then ...
"""
## force overwrite needs to clear out the HDF5 database
if force:
self._refresh()
## print nquartet statement
if not quiet:
print("inferring {} quartet tree sets".format(self.params.nquartets))
## wrap the run in a try statement to ensure we properly shutdown
## and cleanup on exit or interrupt.
inst = None
try:
## find and connect to an ipcluster instance given the information
## in the _ipcluster dictionary if a connected client was not given.
if not ipyclient:
args = self._ipcluster.items() + [("spacer", "")]
ipyclient = ip.core.parallel.get_client(**dict(args))
## print the cluster connection information
if not quiet:
ip.cluster_info(ipyclient)
## store ipyclient engine pids to the dict so we can
## hard-interrupt them later if assembly is interrupted.
## Only stores pids of engines that aren't busy at this moment,
## otherwise it would block here while waiting to find their pids.
self._ipcluster["pids"] = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
pid = engine.apply(os.getpid).get()
self._ipcluster["pids"][eid] = pid
## fill the input array with quartets to sample --------------------
start = time.time()
if (not self.checkpoint.boots) and (not self.trees.tree):
self._store_N_samples(start, ipyclient, quiet=quiet)
## calculate invariants for the full seqarray ----------------------
start = time.time()
if not self.trees.tree:
self._inference(start, ipyclient, quiet=quiet)
else:
if not quiet:
print("initial tree already inferred")
## calculate invariants for each bootstrap rep ----------------------
start = time.time()
if self.params.nboots:
if self.checkpoint.boots: #<= self.params.nboots:
if not quiet:
print("{} bootstrap trees already inferred"\
.format(self.checkpoint.boots))
while self.checkpoint.boots < self.params.nboots:
## resample the bootseq array
if self.files.mapfile:
self._sample_bootseq_array_map()
else:
self._sample_bootseq_array()
## start boot inference
self.checkpoint.boots += 1
self._inference(start, ipyclient, quiet=quiet)
## write output stats -----------------------------------------------
#self.files.stats = os.path.join(self.dirs, self.name+"_stats.txt")
start = time.time()
self._compute_stats(start, ipyclient, quiet=quiet)
## handle exceptions so they will be raised after we clean up below
except KeyboardInterrupt as inst:
print("\nKeyboard Interrupt by user. Cleaning up...")
except IPyradWarningExit as inst:
print("\nError encountered: {}".format(inst))
except Exception as inst:
print("\nUnknown exception encountered: {}".format(inst))
## close client when done or interrupted
finally:
try:
## save the Assembly
self._save()
## can't close client if it was never open
if ipyclient:
## send SIGINT (2) to all engines
ipyclient.abort()
time.sleep(1)
for engine_id, pid in self._ipcluster["pids"].items():
if ipyclient.queue_status()[engine_id]["tasks"]:
os.kill(pid, 2)
time.sleep(0.25)
## if CLI, stop jobs and shutdown
if 'ipyrad-cli' in self._ipcluster["cluster_id"]:
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
else:
if not ipyclient.outstanding:
ipyclient.purge_everything()
else:
## nanny: kill everything, something bad happened
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted")
## reraise the error now that we're cleaned up
#if inst:
# raise inst
## if exception during shutdown then we really screwed up
except Exception as inst2:
print("warning: error during shutdown:\n{}".format(inst2)) |
def start_ipcluster(data):
""" Start ipcluster """
## if MPI argument then use --ip arg to view all sockets
iparg = ""
if "MPI" in data._ipcluster["engines"]:
iparg = "--ip=*"
## make ipcluster arg call
standard = """
ipcluster start
--daemonize
--cluster-id={}
--engines={}
--profile={}
--n={}
{}"""\
.format(data._ipcluster["cluster_id"],
data._ipcluster["engines"],
data._ipcluster["profile"],
data._ipcluster["cores"],
iparg)
## wrap ipcluster start
try:
LOGGER.info(shlex.split(standard))
subprocess.check_call(shlex.split(standard),
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
except subprocess.CalledProcessError as inst:
LOGGER.debug(" ipcontroller already running.")
raise
except Exception as inst:
sys.exit(" Error launching ipcluster for parallelization:\n({})\n".\
format(inst)) |
def register_ipcluster(data):
"""
The name is a unique id that keeps this __init__ of ipyrad distinct
from interfering with other ipcontrollers. Run statements are wrapped
so that ipcluster will be killed on exit.
"""
## check if this pid already has a running cluster
data._ipcluster["cluster_id"] = "ipyrad-cli-"+str(os.getpid())
start_ipcluster(data)
return data |
def get_client(cluster_id, profile, engines, timeout, cores, quiet, spacer, **kwargs):
"""
Creates a client to view ipcluster engines for a given profile and
returns it with at least one engine spun up and ready to go. If no
engines are found after nwait amount of time then an error is raised.
If engines==MPI it waits a bit longer to find engines. If the number
of engines is set then it waits even longer to try to find that number
of engines.
"""
## save stds for later, we're gonna hide them to prevent external printing
save_stdout = sys.stdout
save_stderr = sys.stderr
sys.stdout = cStringIO.StringIO()
sys.stderr = cStringIO.StringIO()
## get cluster_info print string
connection_string = "{}establishing parallel connection:".format(spacer)
## wrapped search for ipcluster
try:
## are we looking for a running ipcluster instance?
if profile not in [None, "default"]:
args = {'profile': profile, "timeout": timeout}
else:
clusterargs = [cluster_id, profile, timeout]
argnames = ["cluster_id", "profile", "timeout"]
args = {key:value for key, value in zip(argnames, clusterargs)}
## get connection within timeout window of wait time and hide messages
ipyclient = ipp.Client(**args)
sys.stdout = save_stdout
sys.stderr = save_stderr
## check that all engines have connected
if (engines == "MPI") or ("ipyrad-cli-" in cluster_id):
if not quiet:
print(connection_string)
for _ in range(6000):
initid = len(ipyclient)
time.sleep(0.01)
## If MPI then wait for all engines to start so we can report
## how many cores are on each host. If Local then only wait for
## one engine to be ready and then just go.
if (engines == "MPI") or ("ipyrad-cli-" in cluster_id):
## wait for cores to be connected
if cores:
time.sleep(0.1)
if initid == cores:
break
if initid:
time.sleep(3)
if len(ipyclient) == initid:
break
else:
if cores:
if initid == cores:
break
else:
if initid:
break
except KeyboardInterrupt as inst:
## ensure stdout is reset even if Exception was raised
sys.stdout = save_stdout
sys.stderr = save_stderr
raise inst
## This is raised if ipcluster is not running ------------
except IOError as inst:
## ensure stdout is reset even if Exception was raised
sys.stdout = save_stdout
sys.stderr = save_stderr
if "ipyrad-cli-" in cluster_id:
raise IPyradWarningExit(NO_IPCLUSTER_CLI)
else:
raise IPyradWarningExit(NO_IPCLUSTER_API)
except (ipp.TimeoutError, ipp.NoEnginesRegistered) as inst:
## raised by ipp if no connection file is found for 'nwait' seconds
sys.stdout = save_stdout
sys.stderr = save_stderr
raise inst
except Exception as inst:
## if any other exceptions were missed...
sys.stdout = save_stdout
sys.stderr = save_stderr
raise inst
finally:
## ensure that no matter what we reset the stds
sys.stdout = save_stdout
sys.stderr = save_stderr
return ipyclient |
def memoize(func):
""" Memoization decorator for a function taking one or more arguments. """
class Memodict(dict):
""" just a dict"""
def __getitem__(self, *key):
return dict.__getitem__(self, key)
def __missing__(self, key):
""" this makes it faster """
ret = self[key] = func(*key)
return ret
return Memodict().__getitem__ |
def ambigcutters(seq):
"""
Returns both resolutions of a cut site that has an ambiguous base in
it, else the single cut site
"""
resos = []
if any([i in list("RKSYWM") for i in seq]):
for base in list("RKSYWM"):
if base in seq:
resos.append(seq.replace(base, AMBIGS[base][0]))
resos.append(seq.replace(base, AMBIGS[base][1]))
return resos
else:
return [seq, ""] |
def splitalleles(consensus):
""" takes diploid consensus alleles with phase data stored as a mixture
of upper and lower case characters and splits it into 2 alleles """
## store two alleles, allele1 will start with bigbase
allele1 = list(consensus)
allele2 = list(consensus)
hidx = [i for (i, j) in enumerate(consensus) if j in "RKSWYMrkswym"]
## do remaining h sites
for idx in hidx:
hsite = consensus[idx]
if hsite.isupper():
allele1[idx] = PRIORITY[hsite]
allele2[idx] = MINOR[hsite]
else:
allele1[idx] = MINOR[hsite.upper()]
allele2[idx] = PRIORITY[hsite.upper()]
## convert back to strings
allele1 = "".join(allele1)
allele2 = "".join(allele2)
return allele1, allele2 |
def comp(seq):
""" returns a seq with complement. Preserves little n's for splitters."""
## makes base to its small complement then makes upper
return seq.replace("A", 't')\
.replace('T', 'a')\
.replace('C', 'g')\
.replace('G', 'c')\
.replace('n', 'Z')\
.upper()\
.replace("Z", "n") |
def fullcomp(seq):
""" returns complement of sequence including ambiguity characters,
and saves lower case info for multiple hetero sequences"""
## this is surely not the most efficient...
seq = seq.replace("A", 'u')\
.replace('T', 'v')\
.replace('C', 'p')\
.replace('G', 'z')\
.replace('u', 'T')\
.replace('v', 'A')\
.replace('p', 'G')\
.replace('z', 'C')
## No complement for S & W b/c complements are S & W, respectively
seq = seq.replace('R', 'u')\
.replace('K', 'v')\
.replace('Y', 'b')\
.replace('M', 'o')\
.replace('u', 'Y')\
.replace('v', 'M')\
.replace('b', 'R')\
.replace('o', 'K')
seq = seq.replace('r', 'u')\
.replace('k', 'v')\
.replace('y', 'b')\
.replace('m', 'o')\
.replace('u', 'y')\
.replace('v', 'm')\
.replace('b', 'r')\
.replace('o', 'k')
return seq |
def fastq_touchup_for_vsearch_merge(read, outfile, reverse=False):
""" option to change orientation of reads and sets Qscore to B """
counts = 0
with open(outfile, 'w') as out:
## read in paired end read files 4 lines at a time
if read.endswith(".gz"):
fr1 = gzip.open(read, 'rb')
else:
fr1 = open(read, 'rb')
quarts = itertools.izip(*[iter(fr1)]*4)
## a list to store until writing
writing = []
while 1:
try:
lines = quarts.next()
except StopIteration:
break
if reverse:
seq = lines[1].strip()[::-1]
else:
seq = lines[1].strip()
writing.append("".join([
lines[0],
seq+"\n",
lines[2],
"B"*len(seq)
]))
## write to disk
counts += 1
if not counts % 1000:
out.write("\n".join(writing)+"\n")
writing = []
if writing:
out.write("\n".join(writing))
out.close()
fr1.close() |
def merge_pairs_after_refmapping(data, two_files, merged_out):
"""
A function to merge fastq files produced by bam2fq.
"""
## create temp files
nonmerged1 = tempfile.NamedTemporaryFile(
mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R1_.fastq").name
nonmerged2 = tempfile.NamedTemporaryFile(
mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R2_.fastq").name
## get the maxn and minlen values
minlen = str(max(32, data.paramsdict["filter_min_trim_len"]))
try:
maxn = sum(data.paramsdict['max_low_qual_bases'])
except TypeError:
maxn = data.paramsdict['max_low_qual_bases']
## set the quality scores abritrarily high and orient R2 correctly
tmp1 = two_files[0][0]
tmp2 = two_files[0][1]
fastq_touchup_for_vsearch_merge(tmp1, tmp1+".tu", False)
fastq_touchup_for_vsearch_merge(tmp2, tmp2+".tu", True)
## command string to call vsearch
cmd = [ipyrad.bins.vsearch,
"--fastq_mergepairs", tmp1+".tu",
"--reverse", tmp2+".tu",
"--fastqout", merged_out,
"--fastqout_notmerged_fwd", nonmerged1,
"--fastqout_notmerged_rev", nonmerged2,
"--fasta_width", "0",
"--fastq_minmergelen", minlen,
"--fastq_maxns", str(maxn),
"--fastq_minovlen", "10",
"--fastq_maxdiffs", "4",
"--label_suffix", "_m1",
"--fastq_qmax", "1000",
"--threads", "2",
"--fastq_allowmergestagger"]
## run vsearch but allow kbd
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
## cleanup tmp files if job failed or stopped
if proc.returncode:
LOGGER.error("Error: %s %s", cmd, res)
raise IPyradWarningExit("Error merge pairs:\n %s\n%s", cmd, res)
## record how many read pairs were merged
with open(merged_out, 'r') as tmpf:
nmerged = sum(1 for i in tmpf.readlines()) // 4
## Concat unmerged pairs with a 'nnnn' separator
with open(merged_out, 'ab') as combout:
## read in paired end read files 4 lines at a time
fr1 = open(nonmerged1, 'rb')
quart1 = itertools.izip(*[iter(fr1)]*4)
fr2 = open(nonmerged2, 'rb')
quart2 = itertools.izip(*[iter(fr2)]*4)
quarts = itertools.izip(quart1, quart2)
## a list to store until writing
writing = []
counts = 0
## iterate until done
while 1:
try:
read1s, read2s = quarts.next()
except StopIteration:
break
## store the read
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
read2s[1],
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3],
]))
## count up until time to write
counts += 1
if not counts % 10:
combout.write("".join(writing))
writing = []
## write the remaining
if writing:
combout.write("".join(writing))
## close handles
fr1.close()
fr2.close()
combout.close()
## remove temp files (or do this later)
rmfiles = [nonmerged1, nonmerged2, tmp1, tmp2, tmp1+".tu", tmp2+".tu"]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
return nmerged |
def merge_after_pysam(data, clust):
"""
This is for pysam post-flight merging. The input is a cluster
for an individual locus. We have to split the clusters, write
R1 and R2 to files then call merge_pairs(). This is not ideal,
it's slow, but it works. This is the absolute worst way to do this,
it bounces all the files for each locus off the disk. I/O _hog_.
"""
try:
r1file = tempfile.NamedTemporaryFile(mode='wb', delete=False,
dir=data.dirs.edits,
suffix="_R1_.fastq")
r2file = tempfile.NamedTemporaryFile(mode='wb', delete=False,
dir=data.dirs.edits,
suffix="_R2_.fastq")
r1dat = []
r2dat = []
for locus in clust:
sname, seq = locus.split("\n")
## Have to munge the sname to make it look like fastq format
sname = "@" + sname[1:]
r1, r2 = seq.split("nnnn")
r1dat.append("{}\n{}\n{}\n{}".format(sname, r1, "+", "B"*(len(r1))))
r2dat.append("{}\n{}\n{}\n{}".format(sname, r2, "+", "B"*(len(r2))))
r1file.write("\n".join(r1dat))
r2file.write("\n".join(r2dat))
r1file.close()
r2file.close()
## Read in the merged data and format to return as a clust
merged_file = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_merged.fastq").name
clust = []
merge_pairs(data, [(r1file.name, r2file.name)], merged_file, 0, 1)
with open(merged_file) as infile:
quarts = itertools.izip(*[iter(infile)]*4)
while 1:
try:
sname, seq, _, _ = quarts.next()
## Vsearch expects R2 oriented how it would be in a raw data file
## i.e. revcomp, and that's also how it returns it
## but we want to maintain the genomic orientation so R1 and
## R2 are both on the + strand and both in ascending positional order
## so here if the reads don't merge we have to revcomp R2
if not "_m1" in sname.rsplit(";", 1)[1]:
try:
R1, R2 = seq.split("nnnn")
seq = R1 + "nnnn" + revcomp(R2)
except ValueError as inst:
LOGGER.error("Failed merge_after_pysam: {} {}".format(sname, seq))
raise
except StopIteration:
break
## put sname back
sname = ">" + sname[1:]
clust.extend([sname.strip(), seq.strip()])
except:
LOGGER.info("Error in merge_pairs post-refmap.")
raise
finally:
for i in [r1file.name, r2file.name, merged_file]:
if os.path.exists(i):
log_level = logging.getLevelName(LOGGER.getEffectiveLevel())
## uncomment this to shit ALL over the filesystem.
## if not log_level == "DEBUG":
os.remove(i)
return clust |
def merge_pairs(data, two_files, merged_out, revcomp, merge):
"""
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the
filehandle to write merged data to, and it returns the number of reads
that were merged (overlapping). If merge==0 then only concat pairs (nnnn),
no merging in vsearch.
Parameters
-----------
two_files (tuple):
A list or tuple of the [r1, r2] files to be merged.
merged_out (str):
A string file handle for the merged data to be written to.
revcomp (bool):
Whether or not to revcomp the R2s.
merge (bool):
Whether or not to perform vsearch merging. If not then reads are simply
concatenated with a 'nnnn' separator.
Returns
--------
If merge is on then the func will return the number of pairs
successfully merged, else it returns -1.
"""
LOGGER.debug("Entering merge_pairs()")
## Return the number of merged pairs
nmerged = -1
## Check input files from inside list-tuple [(r1, r2)]
for fhandle in two_files[0]:
if not os.path.exists(fhandle):
raise IPyradWarningExit("""
Attempting to merge a file that doesn't exist - {}""".format(fhandle))
## If it already exists, clean up the old merged file
if os.path.exists(merged_out):
os.remove(merged_out)
## if merge then catch nonmerged in a separate file
if merge:
nonmerged1 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R1_.fastq").name
nonmerged2 = tempfile.NamedTemporaryFile(mode='wb',
dir=data.dirs.edits,
suffix="_nonmerged_R2_.fastq").name
## if not merging then the nonmerged reads will come from the normal edits
else:
nonmerged1 = two_files[0][0]
nonmerged2 = two_files[0][1]
## get the maxn and minlen values
try:
maxn = sum(data.paramsdict['max_low_qual_bases'])
except TypeError:
maxn = data.paramsdict['max_low_qual_bases']
minlen = str(max(32, data.paramsdict["filter_min_trim_len"]))
## we need to gunzip the files if they are zipped (at least for now)
if merge and two_files[0][0].endswith(".gz"):
LOGGER.info("gunzipping pairs")
tmp1 = os.path.splitext(two_files[0][0])[0]+".tmp1"
tmp2 = os.path.splitext(two_files[0][1])[0]+".tmp2"
out1 = open(tmp1, 'w')
out2 = open(tmp2, 'w')
gun1 = sps.Popen(["gunzip", "-c", two_files[0][0]],
stderr=sps.STDOUT, stdout=out1, close_fds=True)
gun2 = sps.Popen(["gunzip", "-c", two_files[0][1]],
stderr=sps.STDOUT, stdout=out2, close_fds=True)
_ = gun1.communicate()
_ = gun2.communicate()
out1.close()
out2.close()
else:
tmp1 = two_files[0][0]
tmp2 = two_files[0][1]
try:
## If we are actually mergeing and not just joining then do vsearch
if merge:
## create tmp files with high quality scores and with R2 oriented
cmd = [ipyrad.bins.vsearch,
"--fastq_mergepairs", tmp1,
"--reverse", tmp2,
"--fastqout", merged_out,
"--fastqout_notmerged_fwd", nonmerged1,
"--fastqout_notmerged_rev", nonmerged2,
"--fasta_width", "0",
"--fastq_minmergelen", minlen,
"--fastq_maxns", str(maxn),
"--fastq_minovlen", "20",
"--fastq_maxdiffs", "4",
"--label_suffix", "_m1",
"--fastq_qmax", "1000",
"--threads", "2",
"--fastq_allowmergestagger"]
LOGGER.debug("merge cmd: %s", " ".join(cmd))
proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
try:
res = proc.communicate()[0]
except KeyboardInterrupt:
proc.kill()
if proc.returncode:
LOGGER.error("Error: %s %s", cmd, res)
## remove temp files
rmfiles = [os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2",
nonmerged1, nonmerged2]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
raise IPyradWarningExit("Error merge pairs:\n %s\n%s", cmd, res)
## record how many read pairs were merged
with open(merged_out, 'r') as tmpf:
#nmerged = len(tmpf.readlines()) // 4
nmerged = sum(1 for i in tmpf.readlines()) // 4
## Combine the unmerged pairs and append to the merge file
with open(merged_out, 'ab') as combout:
## read in paired end read files 4 lines at a time
if nonmerged1.endswith(".gz"):
fr1 = gzip.open(nonmerged1, 'rb')
else:
fr1 = open(nonmerged1, 'rb')
quart1 = itertools.izip(*[iter(fr1)]*4)
if nonmerged2.endswith(".gz"):
fr2 = gzip.open(nonmerged2, 'rb')
else:
fr2 = open(nonmerged2, 'rb')
quart2 = itertools.izip(*[iter(fr2)]*4)
quarts = itertools.izip(quart1, quart2)
## a list to store until writing
writing = []
counts = 0
## iterate until done
while 1:
try:
read1s, read2s = quarts.next()
except StopIteration:
break
if revcomp:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
comp(read2s[1].strip()[::-1]) + "\n",
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3].strip()[::-1] + "\n",
]))
else:
writing.append("".join([
read1s[0],
read1s[1].strip() + "nnnn" + \
read2s[1],
read1s[2],
read1s[3].strip() + "nnnn" + \
read2s[3],
]))
counts += 1
if not counts % 10:
combout.write("".join(writing)) #+"\n")
writing = []
if writing:
combout.write("".join(writing))
## close handles
fr1.close()
fr2.close()
combout.close()
except Exception as inst:
LOGGER.error("Exception in merge_pairs - {}".format(inst))
raise
## No matter what happens please clean up the temp files.
finally:
## if merged then delete the nonmerge tmp files
if merge:
## remove temp files
rmfiles = [nonmerged1, nonmerged2,
os.path.splitext(two_files[0][0])[0]+".tmp1",
os.path.splitext(two_files[0][1])[0]+".tmp2"]
for rmfile in rmfiles:
if os.path.exists(rmfile):
os.remove(rmfile)
return nmerged |
def revcomp(sequence):
"returns reverse complement of a string"
sequence = sequence[::-1].strip()\
.replace("A", "t")\
.replace("T", "a")\
.replace("C", "g")\
.replace("G", "c").upper()
return sequence |
def clustdealer(pairdealer, optim):
""" return optim clusters given iterators, and whether it got all or not"""
ccnt = 0
chunk = []
while ccnt < optim:
## try refreshing taker, else quit
try:
taker = itertools.takewhile(lambda x: x[0] != "//\n", pairdealer)
oneclust = ["".join(taker.next())]
except StopIteration:
#LOGGER.debug('last chunk %s', chunk)
return 1, chunk
## load one cluster
while 1:
try:
oneclust.append("".join(taker.next()))
except StopIteration:
break
chunk.append("".join(oneclust))
ccnt += 1
return 0, chunk |
def progressbar(njobs, finished, msg="", spacer=" "):
""" prints a progress bar """
if njobs:
progress = 100*(finished / float(njobs))
else:
progress = 100
hashes = '#'*int(progress/5.)
nohash = ' '*int(20-len(hashes))
if not ipyrad.__interactive__:
msg = msg.rsplit("|", 2)[0]
args = [spacer, hashes+nohash, int(progress), msg]
print("\r{}[{}] {:>3}% {} ".format(*args), end="")
sys.stdout.flush() |
def get_threaded_view(ipyclient, split=True):
""" gets optimum threaded view of ids given the host setup """
## engine ids
## e.g., [0, 1, 2, 3, 4, 5, 6, 7, 8]
eids = ipyclient.ids
## get host names
## e.g., ['a', 'a', 'b', 'b', 'a', 'c', 'c', 'c', 'c']
dview = ipyclient.direct_view()
hosts = dview.apply_sync(socket.gethostname)
## group ids into a dict by their hostnames
## e.g., {a: [0, 1, 4], b: [2, 3], c: [5, 6, 7, 8]}
hostdict = defaultdict(list)
for host, eid in zip(hosts, eids):
hostdict[host].append(eid)
## Now split threads on the same host into separate proc if there are many
hostdictkeys = hostdict.keys()
for key in hostdictkeys:
gids = hostdict[key]
maxt = len(gids)
if len(gids) >= 4:
maxt = 2
## if 4 nodes and 4 ppn, put one sample per host
if (len(gids) == 4) and (len(hosts) >= 4):
maxt = 4
if len(gids) >= 6:
maxt = 3
if len(gids) >= 8:
maxt = 4
if len(gids) >= 16:
maxt = 4
## split ids into groups of maxt
threaded = [gids[i:i+maxt] for i in xrange(0, len(gids), maxt)]
lth = len(threaded)
## if anything was split (lth>1) update hostdict with new proc
if lth > 1:
hostdict.pop(key)
for hostid in range(lth):
hostdict[str(key)+"_"+str(hostid)] = threaded[hostid]
## make sure split numbering is correct
#threaded = hostdict.values()
#assert len(ipyclient.ids) <= len(list(itertools.chain(*threaded)))
LOGGER.info("threaded_view: %s", dict(hostdict))
return hostdict |
def detect_cpus():
"""
Detects the number of CPUs on a system. This is better than asking
ipyparallel since ipp has to wait for Engines to spin up.
"""
# Linux, Unix and MacOS:
if hasattr(os, "sysconf"):
if os.sysconf_names.has_key("SC_NPROCESSORS_ONLN"):
# Linux & Unix:
ncpus = os.sysconf("SC_NPROCESSORS_ONLN")
if isinstance(ncpus, int) and ncpus > 0:
return ncpus
else: # OSX:
return int(os.popen2("sysctl -n hw.ncpu")[1].read())
# Windows:
if os.environ.has_key("NUMBER_OF_PROCESSORS"):
ncpus = int(os.environ["NUMBER_OF_PROCESSORS"])
if ncpus > 0:
return ncpus
return 1 |
def _call_structure(mname, ename, sname, name, workdir, seed, ntaxa, nsites, kpop, rep):
""" make the subprocess call to structure """
## create call string
outname = os.path.join(workdir, "{}-K-{}-rep-{}".format(name, kpop, rep))
cmd = ["structure",
"-m", mname,
"-e", ename,
"-K", str(kpop),
"-D", str(seed),
"-N", str(ntaxa),
"-L", str(nsites),
"-i", sname,
"-o", outname]
## call the shell function
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
comm = proc.communicate()
## cleanup
oldfiles = [mname, ename, sname]
for oldfile in oldfiles:
if os.path.exists(oldfile):
os.remove(oldfile)
return comm |
def _get_clumpp_table(self, kpop, max_var_multiple, quiet):
""" private function to clumpp results"""
## concat results for k=x
reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet)
if reps:
ninds = reps[0].inds
nreps = len(reps)
else:
ninds = nreps = 0
if not reps:
return "no result files found"
clumphandle = os.path.join(self.workdir, "tmp.clumppparams.txt")
self.clumppparams.kpop = kpop
self.clumppparams.c = ninds
self.clumppparams.r = nreps
with open(clumphandle, 'w') as tmp_c:
tmp_c.write(self.clumppparams._asfile())
## create CLUMPP args string
outfile = os.path.join(self.workdir,
"{}-K-{}.outfile".format(self.name, kpop))
indfile = os.path.join(self.workdir,
"{}-K-{}.indfile".format(self.name, kpop))
miscfile = os.path.join(self.workdir,
"{}-K-{}.miscfile".format(self.name, kpop))
cmd = ["CLUMPP", clumphandle,
"-i", indfile,
"-o", outfile,
"-j", miscfile,
"-r", str(nreps),
"-c", str(ninds),
"-k", str(kpop)]
## call clumpp
proc = subprocess.Popen(cmd,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
_ = proc.communicate()
## cleanup
for rfile in [indfile, miscfile]:
if os.path.exists(rfile):
os.remove(rfile)
## parse clumpp results file
ofile = os.path.join(self.workdir, "{}-K-{}.outfile".format(self.name, kpop))
if os.path.exists(ofile):
csvtable = pd.read_csv(ofile, delim_whitespace=True, header=None)
table = csvtable.loc[:, 5:]
## apply names to cols and rows
table.columns = range(table.shape[1])
table.index = self.labels
if not quiet:
sys.stderr.write(
"[K{}] {}/{} results permuted across replicates (max_var={}).\n"\
.format(kpop, nreps, nreps+excluded, max_var_multiple))
return table
else:
sys.stderr.write("No files ready for {}-K-{} in {}\n"\
.format(self.name, kpop, self.workdir))
return |
def _concat_reps(self, kpop, max_var_multiple, quiet, **kwargs):
"""
Combine structure replicates into a single indfile,
returns nreps, ninds. Excludes reps with too high of
variance (set with max_variance_multiplier) to exclude
runs that did not converge.
"""
## make an output handle
outf = os.path.join(self.workdir,
"{}-K-{}.indfile".format(self.name, kpop))
## combine replicates and write to indfile
excluded = 0
reps = []
with open(outf, 'w') as outfile:
repfiles = glob.glob(
os.path.join(self.workdir,
self.name+"-K-{}-rep-*_f".format(kpop)))
## get result as a Rep object
for rep in repfiles:
result = Rep(rep, kpop=kpop)
reps.append(result)
## exclude results with variance NX above (min)
newreps = []
if len(reps) > 1:
min_var_across_reps = np.min([i.var_lnlik for i in reps])
else:
min_var_across_reps = reps[0].var_lnlik
## iterate over reps
for rep in reps:
## store result w/o filtering
if not max_var_multiple:
newreps.append(rep)
outfile.write(rep.stable)
## use max-var-multiple as a filter for convergence
else:
#print(
# rep.var_lnlik,
# min_var_across_reps,
# rep.var_lnlik / min_var_across_reps,
# max_var_multiple)
## e.g., repvar is 1.05X minvar. We keep it if maxvar <= 1.05
if (rep.var_lnlik / min_var_across_reps) <= max_var_multiple:
newreps.append(rep)
outfile.write(rep.stable)
else:
excluded += 1
return newreps, excluded |
def _get_evanno_table(self, kpops, max_var_multiple, quiet):
"""
Calculates Evanno method K value scores for a series
of permuted clumpp results.
"""
## iterate across k-vals
kpops = sorted(kpops)
replnliks = []
for kpop in kpops:
## concat results for k=x
reps, excluded = _concat_reps(self, kpop, max_var_multiple, quiet)
## report if some results were excluded
if excluded:
if not quiet:
sys.stderr.write(
"[K{}] {} reps excluded (not converged) see 'max_var_multiple'.\n"\
.format(kpop, excluded))
if reps:
ninds = reps[0].inds
nreps = len(reps)
else:
ninds = nreps = 0
if not reps:
print "no result files found"
## all we really need is the lnlik
replnliks.append([i.est_lnlik for i in reps])
## compare lnlik and var of results
if len(replnliks) > 1:
lnmean = [np.mean(i) for i in replnliks]
lnstds = [np.std(i, ddof=1) for i in replnliks]
else:
lnmean = replnliks
lnstds = np.nan
tab = pd.DataFrame(
index=kpops,
data={
"Nreps": [len(i) for i in replnliks],
"lnPK": [0] * len(kpops),
"lnPPK": [0] * len(kpops),
"deltaK": [0] * len(kpops),
"estLnProbMean": lnmean,
"estLnProbStdev": lnstds,
}
)
## calculate Evanno's
for kpop in kpops[1:]:
tab.loc[kpop, "lnPK"] = tab.loc[kpop, "estLnProbMean"] \
- tab.loc[kpop-1, "estLnProbMean"]
for kpop in kpops[1:-1]:
tab.loc[kpop, "lnPPK"] = abs(tab.loc[kpop+1, "lnPK"]
- tab.loc[kpop, "lnPK"])
tab.loc[kpop, "deltaK"] = (abs(
tab.loc[kpop+1, "estLnProbMean"] - \
2.0 * tab.loc[kpop, "estLnProbMean"] + \
tab.loc[kpop-1, "estLnProbMean"]) / \
tab.loc[kpop, "estLnProbStdev"])
## return table
return tab |
def result_files(self):
""" returns a list of files that have finished structure """
reps = OPJ(self.workdir, self.name+"-K-*-rep-*_f")
repfiles = glob.glob(reps)
return repfiles |
def run(self,
kpop,
nreps,
ipyclient=None,
seed=12345,
force=False,
quiet=False,
):
"""
submits a job to run on the cluster and returns an asynchronous result
object. K is the number of populations, randomseed if not set will be
randomly drawn, ipyclient if not entered will raise an error. If nreps
is set then multiple jobs will be started from new seeds, each labeled
by its replicate number. If force=True then replicates will be overwritten,
otherwise, new replicates will be created starting with the last file N
found in the workdir.
Parameters:
-----------
kpop: (int)
The MAXPOPS parameter in structure, i.e., the number of populations
assumed by the model (K).
nreps: (int):
Number of independent runs starting from distinct seeds.
ipyclient: (ipyparallel.Client Object)
An ipyparallel client connected to an ipcluster instance. This is
used to manage parallel jobs. If not present a single job will
run and block until finished (i.e., code is not parallel).
seed: (int):
Random number seed used for subsampling unlinked SNPs if a mapfile
is linked to the Structure Object.
force: (bool):
If force is true then old replicates are removed and new reps start
from rep-0. Otherwise, new reps start at end of existing rep numbers.
quiet: (bool)
Whether to print number of jobs submitted to stderr
Example:
---------
import ipyparallel as ipp
import ipyrad.analysis as ipa
## get parallel client
ipyclient = ipp.Client()
## get structure object
s = ipa.structure(
name="test",
data="mydata.str",
mapfile="mydata.snps.map",
workdir="structure-results",
)
## modify some basic params
s.mainparams.numreps = 100000
s.mainparams.burnin = 10000
## submit many jobs
for kpop in [3, 4, 5]:
s.run(
kpop=kpop,
nreps=10,
ipyclient=ipyclient,
)
## block until all jobs finish
ipyclient.wait()
"""
## initiate starting seed
np.random.seed(seed)
## check for stuructre here
proc = subprocess.Popen(["which", "structure"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
if not proc:
raise Exception(\
"structure is not installed: run `conda install structure -c ipyrad`")
## start load balancer
if ipyclient:
lbview = ipyclient.load_balanced_view()
## remove old jobs with this same name
handle = OPJ(self.workdir, self.name+"-K-{}-*".format(kpop))
oldjobs = glob.glob(handle)
if force or (not oldjobs):
for job in oldjobs:
os.remove(job)
repstart = 0
repend = nreps
else:
repstart = max([int(i.split("-")[-1][:-2]) for i in oldjobs])
repend = repstart + nreps
## check that there is a ipcluster instance running
for rep in xrange(repstart, repend):
## sample random seed for this rep
self.extraparams.seed = np.random.randint(0, 1e9, 1)[0]
## prepare files (randomly subsamples snps if mapfile)
mname, ename, sname = self.write_structure_files(kpop, rep)
args = [
mname, ename, sname,
self.name,
self.workdir,
self.extraparams.seed,
self.ntaxa,
self.nsites,
kpop,
rep]
if ipyclient:
## call structure
async = lbview.apply(_call_structure, *(args))
self.asyncs.append(async)
else:
if not quiet:
sys.stderr.write("submitted 1 structure job [{}-K-{}]\n"\
.format(self.name, kpop))
comm = _call_structure(*args)
return comm
if ipyclient:
if not quiet:
sys.stderr.write("submitted {} structure jobs [{}-K-{}]\n"\
.format(nreps, self.name, kpop)) |
def write_structure_files(self, kpop, rep=1):
"""
Prepares input files for running structure. Users typically do not need
to call this function since it is called internally by .run(). But it
is optionally available here in case users wish to generate files and
run structure separately.
"""
## check params
self.mainparams.numreps = int(self.mainparams.numreps)
self.mainparams.burnin = int(self.mainparams.burnin)
## write tmp files for the job. Rando avoids filename conflict.
mname = OPJ(self.workdir, "tmp-{}-{}-{}.mainparams.txt".format(self.name, kpop, rep))
ename = OPJ(self.workdir, "tmp-{}-{}-{}.extraparams.txt".format(self.name, kpop, rep))
sname = OPJ(self.workdir, "tmp-{}-{}-{}.strfile.txt".format(self.name, kpop, rep))
tmp_m = open(mname, 'w')
tmp_e = open(ename, 'w')
tmp_s = open(sname, 'w')
## write params files
tmp_m.write(self.mainparams._asfile())
tmp_e.write(self.extraparams._asfile())
## subsample SNPs as unlinked if a mapfile is present.
## & write pop data to the tmp_s file if present
assert len(self.popdata) == len(self.labels), \
"popdata list must be the same length as the number of taxa"
with open(self.data) as ifile:
_data = ifile.readlines()
## header
header = np.array([i.strip().split("\t")[:5] for i in _data])
## seqdata
seqdata = np.array([i.strip().split("\t")[5:] for i in _data])
## enter popdata into seqfile if present in self
if any(self.popdata):
## set popdata in header
header[::2, 1] = self.popdata
header[1::2, 1] = self.popdata
## set flag to all 1s if user entered popdata but no popflag
if not any(self.popflag):
self.popflag = [1 for i in self.popdata]
header[:, 2] = 1
else:
header[::2, 2] = self.popflag
header[1::2, 2] = self.popflag
## subsample SNPs if mapfile is present
if isinstance(self.maparr, np.ndarray):
seqdata = seqdata[:, self._subsample()]
## write fullstr
fullstr = np.concatenate([header, seqdata], axis=1)
np.savetxt(tmp_s, fullstr, delimiter="\t", fmt="%s")
## close tmp files
tmp_m.close()
tmp_e.close()
tmp_s.close()
return mname, ename, sname |
def get_clumpp_table(self, kvalues, max_var_multiple=0, quiet=False):
"""
Returns a dictionary of results tables for making structure barplots.
This calls the same functions used in get_evanno_table() to call
CLUMPP to permute replicates.
Parameters:
-----------
kvalues : list or int
A kvalue or list of kvalues to run CLUMPP on and return a
results table.
max_var_multiple: int
A multiplier value to use as a filter for convergence of runs.
Default=0=no filtering. As an example, if 10 replicates
were run then the variance of the run with the minimum variance is
used as a benchmark. If other runs have a variance that is N times
greater then that run will be excluded. Remember, if replicate runs
sampled different distributions of SNPs then it is not unexpected that
they will have very different variances. However, you may still want
to exclude runs with very high variance since they likely have
not converged.
Returns:
--------
table : dict or pd.DataFrame
A dictionary of dataframes with admixture proportions.
"""
## do not allow bad vals
if max_var_multiple:
if max_var_multiple < 1:
raise ValueError('max_var_multiple must be >1')
if isinstance(kvalues, int):
return _get_clumpp_table(self, kvalues, max_var_multiple, quiet)
else:
tabledict = {}
for kpop in kvalues:
table = _get_clumpp_table(self, kpop, max_var_multiple, quiet)
tabledict[kpop] = table
return tabledict |
def get_evanno_table(self, kvalues, max_var_multiple=0, quiet=False):
"""
Calculates the Evanno table from results files for tests with
K-values in the input list kvalues. The values lnPK, lnPPK,
and deltaK are calculated. The max_var_multiplier arg can be used
to exclude results files based on variance of the likelihood as a
proxy for convergence.
Parameters:
-----------
kvalues : list
The list of K-values for which structure was run for this object.
e.g., kvalues = [3, 4, 5]
max_var_multiple: int
A multiplier value to use as a filter for convergence of runs.
Default=0=no filtering. As an example, if 10 replicates
were run then the variance of the run with the minimum variance is
used as a benchmark. If other runs have a variance that is N times
greater then that run will be excluded. Remember, if replicate runs
sampled different distributions of SNPs then it is not unexpected that
they will have very different variances. However, you may still want
to exclude runs with very high variance since they likely have
not converged.
quiet: bool
Suppresses printed messages about convergence.
Returns:
--------
table : pandas.DataFrame
A data frame with LPK, LNPPK, and delta K. The latter is typically
used to find the best fitting value of K. But be wary of over
interpreting a single best K value.
"""
## do not allow bad vals
if max_var_multiple:
if max_var_multiple < 1:
raise ValueError('max_variance_multiplier must be >1')
table = _get_evanno_table(self, kvalues, max_var_multiple, quiet)
return table |
def parse(self, psearch, dsearch):
""" parse an _f structure output file """
stable = ""
with open(self.repfile) as orep:
dat = orep.readlines()
for line in dat:
## stat lines
if "Estimated Ln Prob of Data" in line:
self.est_lnlik = float(line.split()[-1])
if "Mean value of ln likelihood" in line:
self.mean_lnlik = float(line.split()[-1])
if "Variance of ln likelihood" in line:
self.var_lnlik = float(line.split()[-1])
if "Mean value of alpha" in line:
self.alpha = float(line.split()[-1])
## matrix lines
nonline = psearch.search(line)
popline = dsearch.search(line)
#if ") : " in line:
if nonline:
## check if sample is supervised...
abc = line.strip().split()
outstr = "{}{}{}".format(
" ".join([abc[0], abc[0], abc[2],
abc[0].split('.')[0]]),
" : ",
" ".join(abc[4:])
)
self.inds += 1
stable += outstr+"\n"
elif popline:
## check if sample is supervised...
abc = line.strip().split()
prop = ["0.000"] * self.kpop
pidx = int(abc[3]) - 1
prop[pidx] = "1.000"
outstr = "{}{}{}".format(
" ".join([abc[0], abc[0], abc[2],
abc[0].split('.')[0]]),
" : ",
" ".join(prop)
)
self.inds += 1
stable += outstr+"\n"
stable += "\n"
return stable |
def _call_raxml(command_list):
""" call the command as sps """
proc = subprocess.Popen(
command_list,
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE
)
comm = proc.communicate()
return comm |
def _command_list(self):
""" build the command list """
cmd = [self.params.binary,
"-f", str(self.params.f),
"-T", str(self.params.T),
"-m", str(self.params.m),
"-N", str(self.params.N),
"-x", str(self.params.x),
"-p", str(self.params.p),
"-n", str(self.params.n),
"-w", str(self.params.w),
"-s", str(self.params.s),
]
## add ougroups
if self.params.o:
cmd += ["-o"]
cmd += [",".join(self.params.o)]
return cmd |
def run(self,
ipyclient=None,
quiet=False,
force=False,
block=False,
):
"""
Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an ipyclient
is provided then the job is sent to a remote engine and an asynchronous
result object is returned which can be queried or awaited until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient.
"""
## stop before trying in raxml
if force:
for key, oldfile in self.trees:
if os.path.exists(oldfile):
os.remove(oldfile)
if os.path.exists(self.trees.info):
print("Error: set a new name for this job or use Force flag.\nFile exists: {}"\
.format(self.trees.info))
return
## TODO: add a progress bar tracker here. It could even read it from
## the info file that is being written.
## submit it
if not ipyclient:
proc = _call_raxml(self._command_list)
self.stdout = proc[0]
self.stderr = proc[1]
else:
## find all hosts and submit job to the host with most available engines
lbview = ipyclient.load_balanced_view()
self.async = lbview.apply(_call_raxml, self._command_list)
## initiate random seed
if not quiet:
if not ipyclient:
## look for errors
if "Overall execution time" not in self.stdout:
print("Error in raxml run\n" + self.stdout)
else:
print("job {} finished successfully".format(self.params.n))
else:
print("job {} submitted to cluster".format(self.params.n)) |
def _get_binary(self):
""" find binaries available"""
## check for binary
backup_binaries = ["raxmlHPC-PTHREADS", "raxmlHPC-PTHREADS-SSE3"]
## check user binary first, then backups
for binary in [self.params.binary] + backup_binaries:
proc = subprocess.Popen(["which", self.params.binary],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT).communicate()
## update the binary
if proc:
self.params.binary = binary
## if none then raise error
if not proc[0]:
raise Exception(BINARY_ERROR.format(self.params.binary)) |
def share_matrix(locifile, tree=None, nameorder=None):
"""
returns a matrix of shared RAD-seq data
Parameters:
-----------
locifile (str):
Path to a ipyrad .loci file.
tree (str):
Path to Newick file or a Newick string representation of
a tree. If used, names will be ordered by the ladderized
tip order.
nameorder (list):
If a tree is not provided you can alternatively enter
the sample order as a list here. The tree argument will
override this argument.
Returns:
--------
matrix (numpy.array):
A uint64 numpy array of the number of shared loci between
all pairs of samples.
"""
## load in the loci data
with open(locifile, 'r') as locidata:
loci = locidata.read().split("|\n")[:-1]
## load in the tree from a string
if tree:
tree = ete.Tree(tree)
tree.ladderize()
snames = tree.get_leaf_names()
lxs, names = _getarray(loci, snames)
elif nameorder:
lxs, names = _getarray(loci, nameorder)
else:
raise IOError("must provide either tree or nameorder argument")
## get share matrix
share = _countmatrix(lxs)
return share |
def _getarray(loci, snames):
"""
parse loci list and return presence/absence matrix
ordered by the tips on the tree or list of names.
"""
## make an empty matrix
lxs = np.zeros((len(snames), len(loci)), dtype=np.uint64)
## fill the matrix
for loc in xrange(len(loci)):
for seq in loci[loc].split("\n"):
if "//" not in seq:
lxs[snames.index(seq.split()[0][:]), loc] += 1
return lxs, snames |
def batch(
baba,
ipyclient=None,
):
"""
distributes jobs to the parallel client
"""
## parse args
handle = baba.data
taxdicts = baba.tests
mindicts = baba.params.mincov
nboots = baba.params.nboots
## if ms generator make into reusable list
sims = 0
if isinstance(handle, types.GeneratorType):
handle = list(handle)
sims = 1
else:
## expand locifile path to full path
handle = os.path.realpath(handle)
## parse taxdicts into names and lists if it a dictionary
#if isinstance(taxdicts, dict):
# names, taxdicts = taxdicts.keys(), taxdicts.values()
#else:
# names = []
names = []
if isinstance(taxdicts, dict):
taxdicts = [taxdicts]
## an array to hold results (len(taxdicts), nboots)
tot = len(taxdicts)
resarr = np.zeros((tot, 7), dtype=np.float64)
bootsarr = np.zeros((tot, nboots), dtype=np.float64)
paneldict = {}
## TODO: Setup a wrapper to find and cleanup ipyclient
## define the function and parallelization to use,
## if no ipyclient then drops back to using multiprocessing.
if not ipyclient:
# ipyclient = ip.core.parallel.get_client(**self._ipcluster)
raise IPyradError("you must enter an ipyparallel.Client() object")
else:
lbview = ipyclient.load_balanced_view()
## submit jobs to run on the cluster queue
start = time.time()
asyncs = {}
idx = 0
## prepare data before sending to engines
## if it's a str (locifile) then parse it here just once.
if isinstance(handle, str):
with open(handle, 'r') as infile:
loci = infile.read().strip().split("|\n")
if isinstance(handle, list):
pass #sims()
## iterate over tests (repeats mindicts if fewer than taxdicts)
itests = iter(taxdicts)
imdict = itertools.cycle([mindicts])
#for test, mindict in zip(taxdicts, itertools.cycle([mindicts])):
for i in xrange(len(ipyclient)):
## next entries unless fewer than len ipyclient, skip
try:
test = next(itests)
mindict = next(imdict)
except StopIteration:
continue
## if it's sim data then convert to an array
if sims:
loci = _msp_to_arr(handle, test)
args = (loci, test, mindict, nboots)
print("not yet implemented")
#asyncs[idx] = lbview.apply_async(dstat, *args)
else:
args = [loci, test, mindict, nboots]
asyncs[idx] = lbview.apply(dstat, *args)
idx += 1
## block until finished, print progress if requested.
finished = 0
try:
while 1:
keys = [i for (i, j) in asyncs.items() if j.ready()]
## check for failures
for job in keys:
if not asyncs[job].successful():
raise IPyradWarningExit(\
" error: {}: {}".format(job, asyncs[job].exception()))
## enter results for successful jobs
else:
_res, _bot = asyncs[job].result()
## store D4 results
if _res.shape[0] == 1:
resarr[job] = _res.T.as_matrix()[:, 0]
bootsarr[job] = _bot
## or store D5 results
else:
paneldict[job] = _res.T
## remove old job
del asyncs[job]
finished += 1
## submit next job if there is one.
try:
test = next(itests)
mindict = next(imdict)
if sims:
loci = _msp_to_arr(handle, test)
args = (loci, test, mindict, nboots)
print("not yet implemented")
#asyncs[idx] = lbview.apply_async(dstat, *args)
else:
args = [loci, test, mindict, nboots]
asyncs[idx] = lbview.apply(dstat, *args)
idx += 1
except StopIteration:
pass
## count finished and break if all are done.
#fin = idx - len(asyncs)
elap = datetime.timedelta(seconds=int(time.time()-start))
printstr = " calculating D-stats | {} | "
progressbar(tot, finished, printstr.format(elap), spacer="")
time.sleep(0.1)
if not asyncs:
print("")
break
except KeyboardInterrupt as inst:
## cancel all jobs (ipy & multiproc modes) and then raise error
try:
ipyclient.abort()
except Exception:
pass
raise inst
## dress up resarr as a Pandas DataFrame if 4-part test
if len(test) == 4:
if not names:
names = range(len(taxdicts))
#print("resarr")
#print(resarr)
resarr = pd.DataFrame(resarr,
index=names,
columns=["dstat", "bootmean", "bootstd", "Z", "ABBA", "BABA", "nloci"])
## sort results and bootsarr to match if test names were supplied
resarr = resarr.sort_index()
order = [list(resarr.index).index(i) for i in names]
bootsarr = bootsarr[order]
return resarr, bootsarr
else:
## order results dfs
listres = []
for key in range(len(paneldict)):
listres.append(paneldict[key])
## make into a multi-index dataframe
ntests = len(paneldict)
multi_index = [
np.array([[i] * 3 for i in range(ntests)]).flatten(),
np.array(['p3', 'p4', 'shared'] * ntests),
]
resarr = pd.DataFrame(
data=pd.concat(listres).as_matrix(),
index=multi_index,
columns=listres[0].columns,
)
return resarr, None |
def dstat(inarr, taxdict, mindict=1, nboots=1000, name=0):
""" private function to perform a single D-stat test"""
#if isinstance(inarr, str):
# with open(inarr, 'r') as infile:
# inarr = infile.read().strip().split("|\n")
# ## get data as an array from loci file
# ## if loci-list then parse arr from loci
if isinstance(inarr, list):
arr, _ = _loci_to_arr(inarr, taxdict, mindict)
# ## if it's an array already then go ahead
# elif isinstance(inarr, np.ndarray):
# arr = inarr
# ## if it's a simulation object get freqs from array
# elif isinstance(inarr, Sim):
# arr = _msp_to_arr(inarr, taxdict)
#elif isinstance(inarr, types.GeneratorType):
# arr = _msp_to_arr(inarr, taxdict)
#elif isinstance(inarr, list):
# arr = _msp_to_arr(inarr, taxdict)
## get data from Sim object, do not digest the ms generator
#else:
# raise Exception("Must enter either a 'locifile' or 'arr'")
## run tests
#if len(taxdict) == 4:
if arr.shape[1] == 4:
## get results
res, boots = _get_signif_4(arr, nboots)
## make res into a nice DataFrame
res = pd.DataFrame(res,
columns=[name],
index=["Dstat", "bootmean", "bootstd", "Z", "ABBA", "BABA", "nloci"])
else:
## get results
res, boots = _get_signif_5(arr, nboots)
## make int a DataFrame
res = pd.DataFrame(res,
index=["p3", "p4", "shared"],
columns=["Dstat", "bootmean", "bootstd", "Z", "ABxxA", "BAxxA", "nloci"]
)
return res.T, boots |
def _loci_to_arr(loci, taxdict, mindict):
"""
return a frequency array from a loci file for all loci with taxa from
taxdict and min coverage from mindict.
"""
## make the array (4 or 5) and a mask array to remove loci without cov
nloci = len(loci)
maxlen = np.max(np.array([len(locus.split("\n")[0]) for locus in loci]))
keep = np.zeros(nloci, dtype=np.bool_)
arr = np.zeros((nloci, 4, maxlen), dtype=np.float64)
## six rows b/c one for each p3, and for the fused p3 ancestor
if len(taxdict) == 5:
arr = np.zeros((nloci, 6, maxlen), dtype=np.float64)
## if not mindict, make one that requires 1 in each taxon
if isinstance(mindict, int):
mindict = {i: mindict for i in taxdict}
elif isinstance(mindict, dict):
mindict = {i: mindict[i] for i in taxdict}
else:
mindict = {i: 1 for i in taxdict}
## raise error if names are not 'p[int]'
allowed_names = ['p1', 'p2', 'p3', 'p4', 'p5']
if any([i not in allowed_names for i in taxdict]):
raise IPyradError(\
"keys in taxdict must be named 'p1' through 'p4' or 'p5'")
## parse key names
keys = sorted([i for i in taxdict.keys() if i[0] == 'p'])
outg = keys[-1]
## grab seqs just for the good guys
for loc in xrange(nloci):
## parse the locus
lines = loci[loc].split("\n")[:-1]
names = [i.split()[0] for i in lines]
seqs = np.array([list(i.split()[1]) for i in lines])
## check that names cover the taxdict (still need to check by site)
covs = [sum([j in names for j in taxdict[tax]]) >= mindict[tax] \
for tax in taxdict]
## keep locus
if all(covs):
keep[loc] = True
## get the refseq
refidx = np.where([i in taxdict[outg] for i in names])[0]
refseq = seqs[refidx].view(np.uint8)
ancestral = np.array([reftrick(refseq, GETCONS2)[:, 0]])
## freq of ref in outgroup
iseq = _reffreq2(ancestral, refseq, GETCONS2)
arr[loc, -1, :iseq.shape[1]] = iseq
## enter 4-taxon freqs
if len(taxdict) == 4:
for tidx, key in enumerate(keys[:-1]):
## get idx of names in test tax
nidx = np.where([i in taxdict[key] for i in names])[0]
sidx = seqs[nidx].view(np.uint8)
## get freq of sidx
iseq = _reffreq2(ancestral, sidx, GETCONS2)
## fill it in
arr[loc, tidx, :iseq.shape[1]] = iseq
else:
## entere p5; and fill it in
iseq = _reffreq2(ancestral, refseq, GETCONS2)
arr[loc, -1, :iseq.shape[1]] = iseq
## enter p1
nidx = np.where([i in taxdict['p1'] for i in names])[0]
sidx = seqs[nidx].view(np.uint8)
iseq = _reffreq2(ancestral, sidx, GETCONS2)
arr[loc, 0, :iseq.shape[1]] = iseq
## enter p2
nidx = np.where([i in taxdict['p2'] for i in names])[0]
sidx = seqs[nidx].view(np.uint8)
iseq = _reffreq2(ancestral, sidx, GETCONS2)
arr[loc, 1, :iseq.shape[1]] = iseq
## enter p3 with p4 masked, and p4 with p3 masked
nidx = np.where([i in taxdict['p3'] for i in names])[0]
nidy = np.where([i in taxdict['p4'] for i in names])[0]
sidx = seqs[nidx].view(np.uint8)
sidy = seqs[nidy].view(np.uint8)
xseq = _reffreq2(ancestral, sidx, GETCONS2)
yseq = _reffreq2(ancestral, sidy, GETCONS2)
mask3 = xseq != 0
mask4 = yseq != 0
xseq[mask4] = 0
yseq[mask3] = 0
arr[loc, 2, :xseq.shape[1]] = xseq
arr[loc, 3, :yseq.shape[1]] = yseq
## enter p34
nidx = nidx.tolist() + nidy.tolist()
sidx = seqs[nidx].view(np.uint8)
iseq = _reffreq2(ancestral, sidx, GETCONS2)
arr[loc, 4, :iseq.shape[1]] = iseq
## size-down array to the number of loci that have taxa for the test
arr = arr[keep, :, :]
## size-down sites to
arr = masknulls(arr)
return arr, keep |
def _get_boots(arr, nboots):
"""
return array of bootstrap D-stats
"""
## hold results (nboots, [dstat, ])
boots = np.zeros((nboots,))
## iterate to fill boots
for bidx in xrange(nboots):
## sample with replacement
lidx = np.random.randint(0, arr.shape[0], arr.shape[0])
tarr = arr[lidx]
_, _, dst = _prop_dstat(tarr)
boots[bidx] = dst
## return bootarr
return boots |
def _get_signif_4(arr, nboots):
"""
returns a list of stats and an array of dstat boots. Stats includes
z-score and two-sided P-value.
"""
abba, baba, dst = _prop_dstat(arr)
boots = _get_boots(arr, nboots)
estimate, stddev = (boots.mean(), boots.std())
zscore = 0.
if stddev:
zscore = np.abs(dst) / stddev
stats = [dst, estimate, stddev, zscore, abba, baba, arr.shape[0]]
return np.array(stats), boots |
def _get_signif_5(arr, nboots):
"""
returns a list of stats and an array of dstat boots. Stats includes
z-score and two-sided P-value.
"""
statsarr = np.zeros((3, 7), dtype=np.float64)
bootsarr = np.zeros((3, nboots))
idx = 0
for acol in [2, 3, 4]:
rows = np.array([0, 1, acol, 5])
tarr = arr[:, rows, :]
abxa, baxa, dst = _prop_dstat(tarr)
boots = _get_boots(tarr, nboots)
estimate, stddev = (boots.mean(), boots.std())
if stddev:
zscore = np.abs(dst) / stddev
else:
zscore = np.NaN
stats = [dst, estimate, stddev, zscore, abxa, baxa, arr.shape[0]]
statsarr[idx] = stats
bootsarr[idx] = boots
idx += 1
return statsarr, bootsarr |
def _simulate(self, nreps, admix=None, Ns=500000, gen=20):
"""
Enter a baba.Tree object in which the 'tree' attribute (newick
derived tree) has edge lengths in units of generations. You can
use the 'gen' parameter to multiply branch lengths by a constant.
Parameters:
-----------
nreps: (int)
Number of reps (loci) to simulate under the demographic scenario
tree: (baba.Tree object)
A baba.Tree object initialized by calling baba.Tree(*args).
admix: (list)
A list of admixture events to occur on the tree. Nodes must be
reference by their index number, and events must occur in time
intervals when edges exist. Use the .draw() function of the
baba.Tree object to see node index numbers and coalescent times.
Ns: (float)
Fixed effective population size for all lineages (may allow to vary
in the future).
gen: (int)
A multiplier applied to branch lengths to scale into units of
generations. Example, if all edges on a tree were 1 then you might
enter 50000 to multiply so that edges are 50K generations long.
"""
## node ages
Taus = np.array(list(set(self.verts[:, 1]))) * 1e4 * gen
## The tips samples, ordered alphanumerically
## Population IDs correspond to their indexes in pop config
ntips = len(self.tree)
#names = {name: idx for idx, name in enumerate(sorted(self.tree.get_leaf_names()))}
## rev ladderized leaf name order (left to right on downward facing tree)
names = {name: idx for idx, name in enumerate(self.tree.get_leaf_names()[::-1])}
pop_config = [
ms.PopulationConfiguration(sample_size=2, initial_size=Ns)
for i in range(ntips)
]
## migration matrix all zeros init
migmat = np.zeros((ntips, ntips)).tolist()
## a list for storing demographic events
demog = []
## coalescent times
coals = sorted(list(set(self.verts[:, 1])))[1:]
for ct in xrange(len(coals)):
## check for admix event before next coalescence
## ...
## print coals[ct], nidxs, time
nidxs = np.where(self.verts[:, 1] == coals[ct])[0]
time = Taus[ct+1]
## add coalescence at each node
for nidx in nidxs:
node = self.tree.search_nodes(name=str(nidx))[0]
## get destionation (lowest child idx number), and other
dest = sorted(node.get_leaves(), key=lambda x: x.idx)[0]
otherchild = [i for i in node.children if not
i.get_leaves_by_name(dest.name)][0]
## get source
if otherchild.is_leaf():
source = otherchild
else:
source = sorted(otherchild.get_leaves(), key=lambda x: x.idx)[0]
## add coal events
event = ms.MassMigration(
time=int(time),
source=names[source.name],
destination=names[dest.name],
proportion=1.0)
#print(int(time), names[source.name], names[dest.name])
## ...
demog.append(event)
## sim the data
replicates = ms.simulate(
population_configurations=pop_config,
migration_matrix=migmat,
demographic_events=demog,
num_replicates=nreps,
length=100,
mutation_rate=1e-8)
return replicates |
def taxon_table(self):
"""
Returns the .tests list of taxa as a pandas dataframe.
By auto-generating this table from tests it means that
the table itself cannot be modified unless it is returned
and saved.
"""
if self.tests:
keys = sorted(self.tests[0].keys())
if isinstance(self.tests, list):
ld = [[(key, i[key]) for key in keys] for i in self.tests]
dd = [dict(i) for i in ld]
df = pd.DataFrame(dd)
return df
else:
return pd.DataFrame(pd.Series(self.tests)).T
else:
return None |
def run(self,
ipyclient=None,
):
"""
Run a batch of dstat tests on a list of tests, where each test is
a dictionary mapping sample names to {p1 - p4} (and sometimes p5).
Parameters modifying the behavior of the run, such as the number
of bootstrap replicates (nboots) or the minimum coverage for
loci (mincov) can be set in {object}.params.
Parameters:
-----------
ipyclient (ipyparallel.Client object):
An ipyparallel client object to distribute jobs to a cluster.
"""
self.results_table, self.results_boots = batch(self, ipyclient)
## skip this for 5-part test results
if not isinstance(self.results_table, list):
self.results_table.nloci = np.nan_to_num(self.results_table.nloci)\
.astype(int) |
def plot(self,
show_test_labels=True,
use_edge_lengths=True,
collapse_outgroup=False,
pct_tree_x=0.5,
pct_tree_y=0.2,
subset_tests=None,
#toytree_kwargs=None,
*args,
**kwargs):
"""
Draw a multi-panel figure with tree, tests, and results
Parameters:
-----------
height: int
...
width: int
...
show_test_labels: bool
...
use_edge_lengths: bool
...
collapse_outgroups: bool
...
pct_tree_x: float
...
pct_tree_y: float
...
subset_tests: list
...
...
"""
## check for attributes
if not self.newick:
raise IPyradError("baba plot requires a newick treefile")
if not self.tests:
raise IPyradError("baba plot must have a .tests attribute")
## ensure tests is a list
if isinstance(self.tests, dict):
self.tests = [self.tests]
## re-decompose the tree
ttree = toytree.tree(
self.newick,
orient='down',
use_edge_lengths=use_edge_lengths,
)
## subset test to show fewer
if subset_tests != None:
#tests = self.tests[subset_tests]
tests = [self.tests[i] for i in subset_tests]
boots = self.results_boots[subset_tests]
else:
tests = self.tests
boots = self.results_boots
## make the plot
canvas, axes, panel = baba_panel_plot(
ttree=ttree,
tests=tests,
boots=boots,
show_test_labels=show_test_labels,
use_edge_lengths=use_edge_lengths,
collapse_outgroup=collapse_outgroup,
pct_tree_x=pct_tree_x,
pct_tree_y=pct_tree_y,
*args,
**kwargs)
return canvas, axes, panel |
def loci2multinex(name,
locifile,
subsamples=None,
outdir=None,
maxloci=None,
minSNPs=1,
seed=12345,
mcmc_burnin=int(1e6),
mcmc_ngen=int(2e6),
mcmc_sample_freq=1000,
):
"""
Converts loci file format to multiple nexus formatted files, one for
each locus, and writes a mrbayes block in the nexus information. The
mrbayes block will be set to run 2 replicate chains, for [mcmc_ngen]
generations, skipping [burnin] steps, and sampling every
[mcmc_sample_freq] steps.
Parameters:
-----------
name: (str)
A prefix name for output files that will be produced
locifile: (str)
A .loci file produced by ipyrad.
maxloci: (int)
Limit the number of loci to the first N loci with sufficient sampling
to be included in the analysis.
minSNPs: (int)
Only include loci that have at least N parsimony informative SNPs.
seed: (int)
Random seed used for resolving ambiguities.
burnin: (int)
mrbayes nexus block burnin parameter used for 'sump burnin' and 'sumt burnin'.
The number of generations to skip before starting parameter and tree sampling.
mcmc_ngen: (int)
mrbayes nexus block 'mcmc ngen' and 'mcmc printfreq' parameters. We don't really
to have any info printed to screen, so these values are set equal. This is the
length of the chains that will be run.
mcmc_sample_freq: (int)
mrbayes nexus block 'mcmc samplefreq' parameter. The frequency of sampling from
the mcmc chain.
"""
## workdir is the top level directory (e.g., analysis-bucky)
if outdir:
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
outdir = os.path.curdir
## enforce full path names
outdir = os.path.realpath(outdir)
## outdir is a named directory within this (e.g., analysis-bucky/subs/)
outdir = os.path.join(outdir, "bucky-{}".format(name))
if not os.path.exists(outdir):
os.makedirs(outdir)
else:
## remove {number}.nex files in this folder
ofiles = glob.glob(os.path.join(outdir, "[0-9].nex*"))
for ofile in ofiles:
os.remove(ofile)
## parse the locifile to a generator
with open(locifile) as infile:
loci = (i for i in infile.read().strip().split("|\n"))
## convert subsamples to a set
if not subsamples:
## get all sample names from loci
with open(locifile) as infile:
subs = set((i.split()[0] for i in infile.readlines() if "//" not in i))
else:
subs = set(subsamples)
## keep track of how many loci pass
lens = len(subs)
nlocus = 0
## create subsampled data set
for loc in loci:
dat = loc.split("\n")[:-1]
## get names and seq from locus
names = [i.split()[0] for i in dat]
seqs = np.array([list(i.split()[1]) for i in dat])
## check that locus has required samples for each subtree
if len(set(names).intersection(set(subs))) == lens:
## order the same way every time
seqsamp = seqs[[names.index(tax) for tax in subs]]
seqsamp = _resolveambig(seqsamp)
pis = _count_PIS(seqsamp, minSNPs)
if pis:
nlocus += 1
## remove invariable columns given this subsampling
copied = seqsamp.copy()
copied[copied == "-"] = "N"
rmcol = np.all(copied == "N", axis=0)
seqsamp = seqsamp[:, ~rmcol]
## write to a nexus file
mdict = dict(zip(subs, [i.tostring() for i in seqsamp]))
nexmake(mdict, nlocus, outdir, mcmc_burnin, mcmc_ngen, mcmc_sample_freq)
print "wrote {} nexus files to {}".format(nlocus, outdir) |
def nexmake(mdict, nlocus, dirs, mcmc_burnin, mcmc_ngen, mcmc_sample_freq):
"""
function that takes a dictionary mapping names to
sequences, and a locus number, and writes it as a NEXUS
file with a mrbayes analysis block.
"""
## create matrix as a string
max_name_len = max([len(i) for i in mdict])
namestring = "{:<" + str(max_name_len+1) + "} {}\n"
matrix = ""
for i in mdict.items():
matrix += namestring.format(i[0], i[1])
## write nexus block
handle = os.path.join(dirs, "{}.nex".format(nlocus))
with open(handle, 'w') as outnex:
outnex.write(NEXBLOCK.format(**{
"ntax": len(mdict),
"nchar": len(mdict.values()[0]),
"matrix": matrix,
"ngen": mcmc_ngen,
"sfreq": mcmc_sample_freq,
"burnin": mcmc_burnin,
})) |
def call_fastq_dump_on_SRRs(self, srr, outname, paired):
"""
calls fastq-dump on SRRs, relabels fastqs by their accession
names, and writes them to the workdir. Saves temp sra files
in the designated tmp folder and immediately removes them.
"""
## build command for fastq-dumping
fd_cmd = [
"fastq-dump", srr,
"--accession", outname,
"--outdir", self.workdir,
"--gzip",
]
if paired:
fd_cmd += ["--split-files"]
## call fq dump command
proc = sps.Popen(fd_cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
o, e = proc.communicate()
## delete the stupid temp sra file from the place
## that it is very hard-coded to be written to, and
## LEFT IN, for some crazy reason.
srafile = os.path.join(self.workdir, "sra", srr+".sra")
if os.path.exists(srafile):
os.remove(srafile) |
def fields_checker(fields):
"""
returns a fields argument formatted as a list of strings.
and doesn't allow zero.
"""
## make sure fields will work
if isinstance(fields, int):
fields = str(fields)
if isinstance(fields, str):
if "," in fields:
fields = [str(i) for i in fields.split(",")]
else:
fields = [str(fields)]
elif isinstance(fields, (tuple, list)):
fields = [str(i) for i in fields]
else:
raise IPyradWarningExit("fields not properly formatted")
## do not allow zero in fields
fields = [i for i in fields if i != '0']
return fields |
def run(self,
force=False,
ipyclient=None,
name_fields=30,
name_separator="_",
dry_run=False):
"""
Download the accessions into a the designated workdir.
Parameters
----------
force: (bool)
If force=True then existing files with the same name
will be overwritten.
ipyclient: (ipyparallel.Client)
If provided, work will be distributed across a parallel
client, otherwise download will be run on a single core.
name_fields: (int, str):
Provide the index of the name fields to be used as a prefix
for fastq output files. The default is 30, which is the
SampleName field. Use sra.fetch_fields to see all available
fields and their indices. A likely alternative is 1 (Run).
If multiple are listed then they will be joined by a "_"
character. For example (29,30) would yield something like:
latin-name_sample-name (e.g., mus_musculus-NR10123).
dry_run: (bool)
If True then a table of file names that _would_ be downloaded
will be shown, but the actual files will note be downloaded.
"""
## temporarily set directory for tmpfiles used by fastq-dump
## if this fails then just skip it.
try:
## ensure output directory, also used as tmpdir
if not os.path.exists(self.workdir):
os.makedirs(self.workdir)
## get original directory for sra files
## probably /home/ncbi/public/sra by default.
self._set_vdbconfig_path()
## register ipyclient for cleanup
if ipyclient:
self._ipcluster["pids"] = {}
for eid in ipyclient.ids:
engine = ipyclient[eid]
if not engine.outstanding:
pid = engine.apply(os.getpid).get()
self._ipcluster["pids"][eid] = pid
## submit jobs to engines or local
self._submit_jobs(
force=force,
ipyclient=ipyclient,
name_fields=name_fields,
name_separator=name_separator,
dry_run=dry_run,
)
except IPyradWarningExit as inst:
print(inst)
## exceptions to catch, cleanup and handle ipyclient interrupts
except KeyboardInterrupt:
print("keyboard interrupt...")
except Exception as inst:
print("Exception in run() - {}".format(inst))
finally:
## reset working sra path
self._restore_vdbconfig_path()
## if it made a new sra directory then it should be empty when
## we are finished if all .sra files were removed. If so, then
## let's also remove the dir. if not empty, leave it.
sradir = os.path.join(self.workdir, "sra")
if os.path.exists(sradir) and (not os.listdir(sradir)):
shutil.rmtree(sradir)
else:
## print warning
try:
print(FAILED_DOWNLOAD.format(os.listdir(sradir)))
except OSError as inst:
## If sra dir doesn't even exist something very bad is broken.
raise IPyradWarningExit("Download failed. Exiting.")
## remove fastq file matching to cached sra file
for srr in os.listdir(sradir):
isrr = srr.split(".")[0]
ipath = os.path.join(self.workdir, "*_{}*.gz".format(isrr))
ifile = glob.glob(ipath)[0]
if os.path.exists(ifile):
os.remove(ifile)
## remove cache of sra files
shutil.rmtree(sradir)
## cleanup ipcluster shutdown
if ipyclient:
## send SIGINT (2) to all engines still running tasks
try:
ipyclient.abort()
time.sleep(0.5)
for engine_id, pid in self._ipcluster["pids"].items():
if ipyclient.queue_status()[engine_id]["tasks"]:
os.kill(pid, 2)
time.sleep(0.1)
except ipp.NoEnginesRegistered:
pass
## clean memory space
if not ipyclient.outstanding:
ipyclient.purge_everything()
## uh oh, kill everything, something bad happened
else:
ipyclient.shutdown(hub=True, block=False)
ipyclient.close()
print("\nwarning: ipcluster shutdown and must be restarted") |
def _submit_jobs(self,
force,
ipyclient,
name_fields,
name_separator,
dry_run):
"""
Download the accessions into a the designated workdir.
If file already exists it will only be overwritten if
force=True. Temporary files are removed.
"""
## get Run data with default fields (1,4,6,30)
df = self.fetch_runinfo(range(31), quiet=True)
sys.stdout.flush()
## if not ipyclient then use multiprocessing
if ipyclient:
lb = ipyclient.load_balanced_view()
## if Run has samples with same name (replicates) then
## we need to include the accessions in the file names
if name_fields:
## indexing requires -1 ints
fields = [int(i)-1 for i in fields_checker(name_fields)]
## make accession names, no spaces allowed
df['Accession'] = pd.Series(df[df.columns[fields[0]]], index=df.index)
for field in fields[1:]:
df.Accession += name_separator + df[df.columns[field]]
df.Accession = [i.replace(" ", "_") for i in df.Accession]
## check that names are unique
if not df.Accession.shape[0] == df.Accession.unique().shape[0]:
raise IPyradWarningExit("names are not unique:\n{}"\
.format(df.Accession))
## backup default naming scheme
else:
if len(set(df.SampleName)) != len(df.SampleName):
accs = (i+"-"+j for i, j in zip(df.SampleName, df.Run))
df.Accession = accs
else:
df.Accession = df.SampleName
if dry_run:
print("\rThe following files will be written to: {}".format(self.workdir))
print("{}\n".format(df.Accession))
else:
## iterate over and download
asyncs = []
for idx in df.index:
## get args for this run
srr = df.Run[idx]
outname = df.Accession[idx]
paired = df.spots_with_mates.values.astype(int).nonzero()[0].any()
fpath = os.path.join(self.workdir, outname+".fastq.gz")
## skip if exists and not force
skip = False
if force:
if os.path.exists(fpath):
os.remove(fpath)
else:
if os.path.exists(fpath):
skip = True
sys.stdout.flush()
print("[skip] file already exists: {}".format(fpath))
## single job progress bar
tidx = df.Accession.shape[0]
#if not ipyclient:
## submit job to run
if not skip:
args = (self, srr, outname, paired)
if ipyclient:
async = lb.apply_async(call_fastq_dump_on_SRRs, *args)
asyncs.append(async)
else:
print("Downloading file {}/{}: {}".format(idx+1, tidx, fpath))
call_fastq_dump_on_SRRs(*args)
sys.stdout.flush()
## progress bar while blocking parallel
if ipyclient:
tots = df.Accession.shape[0]
printstr = " Downloading fastq files | {} | "
start = time.time()
while 1:
elapsed = datetime.timedelta(seconds=int(time.time()-start))
ready = sum([i.ready() for i in asyncs])
progressbar(tots, ready, printstr.format(elapsed), spacer="")
time.sleep(0.1)
if tots == ready:
print("")
break
self._report(tots)
## check for fails
for async in asyncs:
if not async.successful():
raise IPyradWarningExit(async.result()) |
def fetch_runinfo(self, fields=None, quiet=False):
"""
Call esearch to grep SRR info for a project (SRP). Use the command
sra.fetch_fields to see available fields to be fetched. This function
returns a DataFrame with runinfo for the selected fields.
Parameters:
-----------
Fields: (tuple or list)
The default fields returned are 1-30. You can enter a list
or tuple of fewer numbers to select fewer fields. Example,
(1,4,6,29,30) returns a neat dataframe with Run IDs,
Number of reads (SE and PE), ScientificName, and SampleName.
"""
if not quiet:
print("\rFetching project data...", end="")
## if no entry then fetch (nearly) all fields.
if fields == None:
fields = range(30)
fields = fields_checker(fields)
## command strings
es_cmd = [
"esearch",
"-db", "sra",
"-query", self.accession,
]
ef_cmd = [
"efetch",
"--format", "runinfo",
]
cut_cmd = [
"cut",
"-d", ",",
"-f", ",".join(fields),
]
## pipe commands together
proc1 = sps.Popen(es_cmd, stderr=sps.STDOUT, stdout=sps.PIPE)
proc2 = sps.Popen(ef_cmd, stdin=proc1.stdout, stderr=sps.STDOUT, stdout=sps.PIPE)
proc3 = sps.Popen(cut_cmd, stdin=proc2.stdout, stderr=sps.STDOUT, stdout=sps.PIPE)
o, e = proc3.communicate()
proc2.stdout.close()
proc1.stdout.close()
if o:
vals = o.strip().split("\n")
names = vals[0].split(",")
items = [i.split(",") for i in vals[1:] if i not in ["", vals[0]]]
return pd.DataFrame(items, columns=names)
else:
raise IPyradWarningExit("no samples found in {}".format(self.accession)) |
def Async(cls, token, session=None, **options):
"""Returns the client in async mode."""
return cls(token, session=session, is_async=True, **options) |
def get_constants(self, **params: keys):
"""Get the CR Constants
Parameters
----------
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CONSTANTS
return self._get_model(url, **params) |
def get_player(self, *tags: crtag, **params: keys):
"""Get a player information
Parameters
----------
\*tags: str
Valid player tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + ','.join(tags)
return self._get_model(url, FullPlayer, **params) |
def get_player_verify(self, tag: crtag, apikey: str, **params: keys):
"""Check the API Key of a player.
This endpoint has been **restricted** to
certain members of the community
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
apikey: str
The API Key in the player's settings
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/verify'
params.update({'token': apikey})
return self._get_model(url, FullPlayer, **params) |
def get_player_battles(self, *tags: crtag, **params: keys):
"""Get a player's battle log
Parameters
----------
\*tags: str
Valid player tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + ','.join(tags) + '/battles'
return self._get_model(url, **params) |
def get_clan(self, *tags: crtag, **params: keys):
"""Get a clan information
Parameters
----------
\*tags: str
Valid clan tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + ','.join(tags)
return self._get_model(url, FullClan, **params) |
def get_tracking_clans(self, **params: keys):
"""Get a list of clans that are being
tracked by having either cr-api.com or
royaleapi.com in the description
Parameters
----------
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/tracking'
return self._get_model(url, **params) |
def get_clan_tracking(self, *tags: crtag, **params: keys):
"""Returns if the clan is currently being tracked
by the API by having either cr-api.com or royaleapi.com
in the clan description
Parameters
----------
\*tags: str
Valid clan tags. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + ','.join(tags) + '/tracking'
return self._get_model(url, **params) |
def get_clan_war(self, tag: crtag, **params: keys):
"""Get inforamtion about a clan's current clan war
Parameters
----------
*tag: str
A valid clan tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + tag + '/war'
return self._get_model(url, **params) |
def get_tournament(self, tag: crtag, **params: keys):
"""Get a tournament information
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT + '/' + tag
return self._get_model(url, **params) |
def search_tournaments(self, **params: keys):
"""Search for a tournament
Parameters
----------
name: str
The name of the tournament
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT + '/search'
return self._get_model(url, PartialClan, **params) |
def get_top_war_clans(self, country_key='', **params: keys):
"""Get a list of top clans by war
location_id: Optional[str] = ''
A location ID or '' (global)
See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json
for a list of acceptable location IDs
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOP + '/war/' + str(country_key)
return self._get_model(url, PartialClan, **params) |
def get_top_players(self, country_key='', **params: keys):
"""Get a list of top players
location_id: Optional[str] = ''
A location ID or '' (global)
See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json
for a list of acceptable location IDs
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOP + '/players/' + str(country_key)
return self._get_model(url, PartialPlayerClan, **params) |
def get_popular_clans(self, **params: keys):
"""Get a list of most queried clans
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.POPULAR + '/clans'
return self._get_model(url, PartialClan, **params) |
def get_popular_players(self, **params: keys):
"""Get a list of most queried players
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.POPULAR + '/players'
return self._get_model(url, PartialPlayerClan, **params) |
def get_popular_tournaments(self, **params: keys):
"""Get a list of most queried tournaments
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.POPULAR + '/tournament'
return self._get_model(url, PartialTournament, **params) |
def get_popular_decks(self, **params: keys):
"""Get a list of most queried decks
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.POPULAR + '/decks'
return self._get_model(url, **params) |
def get_known_tournaments(self, **params: tournamentfilter):
"""Get a list of queried tournaments
\*\*keys: Optional[list] = None
Filter which keys should be included in the
response
\*\*exclude: Optional[list] = None
Filter which keys should be excluded from the
response
\*\*max: Optional[int] = None
Limit the number of items returned in the response
\*\*page: Optional[int] = None
Works with max, the zero-based page of the
items
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT + '/known'
return self._get_model(url, PartialTournament, **params) |
def get_player(self, tag: crtag, timeout=None):
"""Get information about a player
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag
return self._get_model(url, FullPlayer, timeout=timeout) |
def get_player_verify(self, tag: crtag, apikey: str, timeout=None):
"""Check the API Key of a player.
This endpoint has been **restricted** to
certain members of the community
Raises BadRequest if the apikey is invalid
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
apikey: str
The API Key in the player's settings
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/verifytoken'
return self._get_model(url, FullPlayer, timeout=timeout, method='POST', json={'token': apikey}) |
def get_player_battles(self, tag: crtag, **params: keys):
"""Get a player's battle log
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*limit: Optional[int] = None
Limit the number of items returned in the response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/battlelog'
return self._get_model(url, **params) |
def get_player_chests(self, tag: crtag, timeout: int=None):
"""Get information about a player's chest cycle
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.PLAYER + '/' + tag + '/upcomingchests'
return self._get_model(url, timeout=timeout) |
def get_clan(self, tag: crtag, timeout: int=None):
"""Get inforamtion about a clan
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + tag
return self._get_model(url, FullClan, timeout=timeout) |
def search_clans(self, **params: clansearch):
"""Search for a clan. At least one
of the filters must be present
Parameters
----------
name: Optional[str]
The name of a clan
(has to be at least 3 characters long)
locationId: Optional[int]
A location ID
minMembers: Optional[int]
The minimum member count
of a clan
maxMembers: Optional[int]
The maximum member count
of a clan
minScore: Optional[int]
The minimum trophy score of
a clan
\*\*limit: Optional[int] = None
Limit the number of items returned in the response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN
return self._get_model(url, PartialClan, **params) |
def get_clan_war(self, tag: crtag, timeout: int=None):
"""Get inforamtion about a clan's current clan war
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CLAN + '/' + tag + '/currentwar'
return self._get_model(url, timeout=timeout) |
def get_tournament(self, tag: crtag, timeout=0):
"""Get a tournament information
Parameters
----------
tag: str
A valid tournament tag. Minimum length: 3
Valid characters: 0289PYLQGRJCUV
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT + '/' + tag
return self._get_model(url, PartialTournament, timeout=timeout) |
def search_tournaments(self, name: str, **params: keys):
"""Search for a tournament by its name
Parameters
----------
name: str
The name of a tournament
\*\*limit: Optional[int] = None
Limit the number of items returned in the response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.TOURNAMENT
params['name'] = name
return self._get_model(url, PartialTournament, **params) |
def get_all_cards(self, timeout: int=None):
"""Get a list of all the cards in the game
Parameters
----------
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.CARDS
return self._get_model(url, timeout=timeout) |
def get_all_locations(self, timeout: int=None):
"""Get a list of all locations
Parameters
----------
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.LOCATIONS
return self._get_model(url, timeout=timeout) |
def get_location(self, location_id: int, timeout: int=None):
"""Get a location information
Parameters
----------
location_id: int
A location ID
See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json
for a list of acceptable location IDs
timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.LOCATIONS + '/' + str(location_id)
return self._get_model(url, timeout=timeout) |
def get_top_clans(self, location_id='global', **params: keys):
"""Get a list of top clans by trophy
Parameters
----------
location_id: Optional[str] = 'global'
A location ID or global
See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json
for a list of acceptable location IDs
\*\*limit: Optional[int] = None
Limit the number of items returned in the response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.LOCATIONS + '/' + str(location_id) + '/rankings/clans'
return self._get_model(url, PartialClan, **params) |
def get_top_players(self, location_id='global', **params: keys):
"""Get a list of top players
Parameters
----------
location_id: Optional[str] = 'global'
A location ID or global
See https://github.com/RoyaleAPI/cr-api-data/blob/master/json/regions.json
for a list of acceptable location IDs
\*\*limit: Optional[int] = None
Limit the number of items returned in the response
\*\*timeout: Optional[int] = None
Custom timeout that overwrites Client.timeout
"""
url = self.api.LOCATIONS + '/' + str(location_id) + '/rankings/players'
return self._get_model(url, PartialPlayerClan, **params) |
def get_clan_image(self, obj: BaseAttrDict):
"""Get the clan badge image URL
Parameters
---------
obj: official_api.models.BaseAttrDict
An object that has the clan badge ID either in ``.clan.badge_id`` or ``.badge_id``
Can be a clan or a profile for example.
Returns str
"""
try:
badge_id = obj.clan.badge_id
except AttributeError:
try:
badge_id = obj.badge_id
except AttributeError:
return 'https://i.imgur.com/Y3uXsgj.png'
if badge_id is None:
return 'https://i.imgur.com/Y3uXsgj.png'
for i in self.constants.alliance_badges:
if i.id == badge_id:
return 'https://royaleapi.github.io/cr-api-assets/badges/' + i.name + '.png' |
def get_arena_image(self, obj: BaseAttrDict):
"""Get the arena image URL
Parameters
---------
obj: official_api.models.BaseAttrDict
An object that has the arena ID in ``.arena.id``
Can be ``Profile`` for example.
Returns None or str
"""
badge_id = obj.arena.id
for i in self.constants.arenas:
if i.id == badge_id:
return 'https://royaleapi.github.io/cr-api-assets/arenas/arena{}.png'.format(i.arena_id) |
def get_card_info(self, card_name: str):
"""Returns card info from constants
Parameters
---------
card_name: str
A card name
Returns None or Constants
"""
for c in self.constants.cards:
if c.name == card_name:
return c |
def get_rarity_info(self, rarity: str):
"""Returns card info from constants
Parameters
---------
rarity: str
A rarity name
Returns None or Constants
"""
for c in self.constants.rarities:
if c.name == rarity:
return c |
def get_deck_link(self, deck: BaseAttrDict):
"""Form a deck link
Parameters
---------
deck: official_api.models.BaseAttrDict
An object is a deck. Can be retrieved from ``Player.current_deck``
Returns str
"""
deck_link = 'https://link.clashroyale.com/deck/en?deck='
for i in deck:
card = self.get_card_info(i.name)
deck_link += '{0.id};'.format(card)
return deck_link |
def get_datetime(self, timestamp: str, unix=True):
"""Converts a %Y%m%dT%H%M%S.%fZ to a UNIX timestamp
or a datetime.datetime object
Parameters
---------
timestamp: str
A timstamp in the %Y%m%dT%H%M%S.%fZ format, usually returned by the API
in the ``created_time`` field for example (eg. 20180718T145906.000Z)
unix: Optional[bool] = True
Whether to return a POSIX timestamp (seconds since epoch) or not
Returns int or datetime.datetime
"""
time = datetime.strptime(timestamp, '%Y%m%dT%H%M%S.%fZ')
if unix:
return int(time.timestamp())
else:
return time |
def get_clan(self):
"""(a)sync function to return clan."""
try:
return self.client.get_clan(self.clan.tag)
except AttributeError:
try:
return self.client.get_clan(self.tag)
except AttributeError:
raise ValueError('This player does not have a clan.') |
def refresh(self):
"""(a)sync refresh the data."""
if self.client.is_async:
return self._arefresh()
data, cached, ts, response = self.client._request(self.response.url, timeout=None, refresh=True)
return self.from_data(data, cached, ts, response) |
def typecasted(func):
"""Decorator that converts arguments via annotations."""
signature = inspect.signature(func).parameters.items()
@wraps(func)
def wrapper(*args, **kwargs):
args = list(args)
new_args = []
new_kwargs = {}
for _, param in signature:
converter = param.annotation
if converter is inspect._empty:
converter = lambda a: a # do nothing
if param.kind is param.POSITIONAL_OR_KEYWORD:
if args:
to_conv = args.pop(0)
new_args.append(converter(to_conv))
elif param.kind is param.VAR_POSITIONAL:
for a in args:
new_args.append(converter(a))
else:
for k, v in kwargs.items():
nk, nv = converter(k, v)
new_kwargs[nk] = nv
return func(*new_args, **new_kwargs)
return wrapper |
def coerce_annotation(ann, namespace):
'''Validate that the annotation has the correct namespace,
and is well-formed.
If the annotation is not of the correct namespace, automatic conversion
is attempted.
Parameters
----------
ann : jams.Annotation
The annotation object in question
namespace : str
The namespace pattern to match `ann` against
Returns
-------
ann_coerced: jams.Annotation
The annotation coerced to the target namespace
Raises
------
NamespaceError
If `ann` does not match the proper namespace
SchemaError
If `ann` fails schema validation
See Also
--------
jams.nsconvert.convert
'''
ann = convert(ann, namespace)
ann.validate(strict=True)
return ann |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.