Code
stringlengths 103
85.9k
| Summary
listlengths 0
94
|
---|---|
Please provide a description of the function:def adapt(self, d, x):
y = np.dot(self.w, x)
e = d - y
R1 = np.dot(np.dot(np.dot(self.R,x),x.T),self.R)
R2 = self.mu + np.dot(np.dot(x,self.R),x.T)
self.R = 1/self.mu * (self.R - R1/R2)
dw = np.dot(self.R, x.T) * e
self.w += dw | [
"\n Adapt weights according one desired value and its input.\n\n **Args:**\n\n * `d` : desired value (float)\n\n * `x` : input array (1-dimensional array)\n "
] |
Please provide a description of the function:def run(self, d, x):
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N, self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
y[k] = np.dot(self.w, x[k])
e[k] = d[k] - y[k]
R1 = np.dot(np.dot(np.dot(self.R,x[k]),x[k].T),self.R)
R2 = self.mu + np.dot(np.dot(x[k],self.R),x[k].T)
self.R = 1/self.mu * (self.R - R1/R2)
dw = np.dot(self.R, x[k].T) * e[k]
self.w += dw
return y, e, self.w_history | [
"\n This function filters multiple samples in a row.\n\n **Args:**\n\n * `d` : desired value (1 dimensional array)\n\n * `x` : input matrix (2-dimensional array). Rows are samples,\n columns are input arrays.\n\n **Returns:**\n\n * `y` : output value (1 dimensional array).\n The size corresponds with the desired value.\n\n * `e` : filter error for every sample (1 dimensional array).\n The size corresponds with the desired value.\n\n * `w` : history of all weights (2 dimensional array).\n Every row is set of the weights for given sample.\n "
] |
Please provide a description of the function:def adapt(self, d, x):
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw | [
"\n Adapt weights according one desired value and its input.\n\n **Args:**\n\n * `d` : desired value (float)\n\n * `x` : input array (1-dimensional array)\n "
] |
Please provide a description of the function:def run(self, d, x):
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
self.n = len(x[0])
# prepare data
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
y = np.zeros(N)
e = np.zeros(N)
self.w_history = np.zeros((N,self.n))
# adaptation loop
for k in range(N):
self.w_history[k,:] = self.w
# create input matrix and target vector
self.x_mem[:,1:] = self.x_mem[:,:-1]
self.x_mem[:,0] = x[k]
self.d_mem[1:] = self.d_mem[:-1]
self.d_mem[0] = d[k]
# estimate output and error
self.y_mem = np.dot(self.x_mem.T, self.w)
self.e_mem = self.d_mem - self.y_mem
y[k] = self.y_mem[0]
e[k] = self.e_mem[0]
# update
dw_part1 = np.dot(self.x_mem.T, self.x_mem) + self.ide_eps
dw_part2 = np.linalg.solve(dw_part1, self.ide)
dw = np.dot(self.x_mem, np.dot(dw_part2, self.e_mem))
self.w += self.mu * dw
return y, e, self.w_history | [
"\n This function filters multiple samples in a row.\n\n **Args:**\n\n * `d` : desired value (1 dimensional array)\n\n * `x` : input matrix (2-dimensional array). Rows are samples,\n columns are input arrays.\n\n **Returns:**\n\n * `y` : output value (1 dimensional array).\n The size corresponds with the desired value.\n\n * `e` : filter error for every sample (1 dimensional array).\n The size corresponds with the desired value.\n\n * `w` : history of all weights (2 dimensional array).\n Every row is set of the weights for given sample.\n \n "
] |
Please provide a description of the function:def LDA_base(x, labels):
classes = np.array(tuple(set(labels)))
cols = x.shape[1]
# mean values for every class
means = np.zeros((len(classes), cols))
for i, cl in enumerate(classes):
means[i] = np.mean(x[labels==cl], axis=0)
# scatter matrices
scatter_within = np.zeros((cols, cols))
for cl, mean in zip(classes, means):
scatter_class = np.zeros((cols, cols))
for row in x[labels == cl]:
dif = row - mean
scatter_class += np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
scatter_within += scatter_class
total_mean = np.mean(x, axis=0)
scatter_between = np.zeros((cols, cols))
for cl, mean in zip(classes, means):
dif = mean - total_mean
dif_product = np.dot(dif.reshape(cols, 1), dif.reshape(1, cols))
scatter_between += x[labels == cl, :].shape[0] * dif_product
# eigenvalues and eigenvectors from scatter matrices
scatter_product = np.dot(np.linalg.inv(scatter_within), scatter_between)
eigen_values, eigen_vectors = np.linalg.eig(scatter_product)
return eigen_values, eigen_vectors | [
"\n Base function used for Linear Discriminant Analysis.\n\n **Args:**\n\n * `x` : input matrix (2d array), every row represents new sample\n\n * `labels` : list of labels (iterable), every item should be label for \\\n sample with corresponding index\n\n **Returns:**\n \n * `eigenvalues`, `eigenvectors` : eigenvalues and eigenvectors \\\n from LDA analysis \n\n "
] |
Please provide a description of the function:def LDA(x, labels, n=False):
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# make the LDA
eigen_values, eigen_vectors = LDA_base(x, labels)
# sort the eigen vectors according to eigen values
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
return eigen_order[:n].dot(x.T).T | [
"\n Linear Discriminant Analysis function.\n\n **Args:**\n\n * `x` : input matrix (2d array), every row represents new sample\n\n * `labels` : list of labels (iterable), every item should be label for \\\n sample with corresponding index\n\n **Kwargs:**\n\n * `n` : number of features returned (integer) - how many columns \n should the output keep\n\n **Returns:**\n \n * new_x : matrix with reduced size (number of columns are equal `n`)\n "
] |
Please provide a description of the function:def LDA_discriminants(x, labels):
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
# make the LDA
eigen_values, eigen_vectors = LDA_base(x, labels)
return eigen_values[(-eigen_values).argsort()] | [
"\n Linear Discriminant Analysis helper for determination how many columns of\n data should be reduced.\n\n **Args:**\n\n * `x` : input matrix (2d array), every row represents new sample\n\n * `labels` : list of labels (iterable), every item should be label for \\\n sample with corresponding index\n\n **Returns:**\n \n * `discriminants` : array of eigenvalues sorted in descending order\n\n "
] |
Please provide a description of the function:def adapt(self, d, x):
y = np.dot(self.w, x)
e = d - y
self.eps = self.eps - self.ro * self.mu * e * self.last_e * \
np.dot(x, self.last_x) / \
(np.dot(self.last_x, self.last_x) + self.eps)**2
nu = self.mu / (self.eps + np.dot(x, x))
self.w += nu * e * x
self.last_e = e | [
"\n Adapt weights according one desired value and its input.\n\n **Args:**\n\n * `d` : desired value (float)\n\n * `x` : input array (1-dimensional array)\n "
] |
Please provide a description of the function:def adapt(self, d, x):
self.update_memory_x(x)
m_d, m_x = self.read_memory()
# estimate
y = np.dot(self.w, x-m_x) + m_d
e = d - y
nu = self.mu / (self.eps + np.dot(x-m_x, x-m_x))
dw = nu * e * (x-m_x)
self.w += dw
self.update_memory_d(d) | [
"\n Adapt weights according one desired value and its input.\n\n Args:\n\n * `d` : desired value (float)\n\n * `x` : input array (1-dimensional array)\n "
] |
Please provide a description of the function:def read_memory(self):
if self.mem_empty == True:
if self.mem_idx == 0:
m_x = np.zeros(self.n)
m_d = 0
else:
m_x = np.mean(self.mem_x[:self.mem_idx+1], axis=0)
m_d = np.mean(self.mem_d[:self.mem_idx])
else:
m_x = np.mean(self.mem_x, axis=0)
m_d = np.mean(np.delete(self.mem_d, self.mem_idx))
self.mem_idx += 1
if self.mem_idx > len(self.mem_x)-1:
self.mem_idx = 0
self.mem_empty = False
return m_d, m_x | [
"\n This function read mean value of target`d`\n and input vector `x` from history\n "
] |
Please provide a description of the function:def filter_data(d, x, model="lms", **kwargs):
# overwrite n with correct size
kwargs["n"] = x.shape[1]
# create filter according model
if model in ["LMS", "lms"]:
f = FilterLMS(**kwargs)
elif model in ["NLMS", "nlms"]:
f = FilterNLMS(**kwargs)
elif model in ["RLS", "rls"]:
f = FilterRLS(**kwargs)
elif model in ["GNGD", "gngd"]:
f = FilterGNGD(**kwargs)
elif model in ["AP", "ap"]:
f = FilterAP(**kwargs)
elif model in ["LMF", "lmf"]:
f = FilterLMF(**kwargs)
elif model in ["NLMF", "nlmf"]:
f = FilterNLMF(**kwargs)
else:
raise ValueError('Unknown model of filter {}'.format(model))
# calculate and return the values
y, e, w = f.run(d, x)
return y, e, w | [
"\n Function that filter data with selected adaptive filter.\n \n **Args:**\n\n * `d` : desired value (1 dimensional array)\n\n * `x` : input matrix (2-dimensional array). Rows are samples, columns are\n input arrays.\n \n **Kwargs:**\n \n * Any key argument that can be accepted with selected filter model. \n For more information see documentation of desired adaptive filter.\n\n **Returns:**\n\n * `y` : output value (1 dimensional array).\n The size corresponds with the desired value.\n\n * `e` : filter error for every sample (1 dimensional array). \n The size corresponds with the desired value.\n\n * `w` : history of all weights (2 dimensional array).\n Every row is set of the weights for given sample.\n \n "
] |
Please provide a description of the function:def AdaptiveFilter(model="lms", **kwargs):
# check if the filter size was specified
if not "n" in kwargs:
raise ValueError('Filter size is not defined (n=?).')
# create filter according model
if model in ["LMS", "lms"]:
f = FilterLMS(**kwargs)
elif model in ["NLMS", "nlms"]:
f = FilterNLMS(**kwargs)
elif model in ["RLS", "rls"]:
f = FilterRLS(**kwargs)
elif model in ["GNGD", "gngd"]:
f = FilterGNGD(**kwargs)
elif model in ["AP", "ap"]:
f = FilterAP(**kwargs)
elif model in ["LMF", "lmf"]:
f = FilterLMF(**kwargs)
elif model in ["NLMF", "nlmf"]:
f = FilterNLMF(**kwargs)
else:
raise ValueError('Unknown model of filter {}'.format(model))
# return filter
return f | [
"\n Function that filter data with selected adaptive filter.\n \n **Args:**\n\n * `d` : desired value (1 dimensional array)\n\n * `x` : input matrix (2-dimensional array). Rows are samples, columns are \n input arrays.\n \n **Kwargs:**\n \n * Any key argument that can be accepted with selected filter model.\n For more information see documentation of desired adaptive filter.\n \n * It should be at least filter size `n`. \n\n **Returns:**\n\n * `y` : output value (1 dimensional array).\n The size corresponds with the desired value.\n\n * `e` : filter error for every sample (1 dimensional array). \n The size corresponds with the desired value.\n\n * `w` : history of all weights (2 dimensional array).\n Every row is set of the weights for given sample.\n \n "
] |
Please provide a description of the function:def learning_entropy(w, m=10, order=1, alpha=False):
w = np.array(w)
# get length of data and number of parameters
N = w.shape[0]
n = w.shape[1]
# get abs dw from w
dw = np.copy(w)
dw[order:] = np.abs(np.diff(dw, n=order, axis=0))
# average floting window - window is k-m ... k-1
awd = np.zeros(w.shape)
if not alpha:
# estimate the ALPHA with multiscale approach
swd = np.zeros(w.shape)
for k in range(m, N):
awd[k] = np.mean(dw[k-m:k], axis=0)
swd[k] = np.std(dw[k-m:k], axis=0)
# estimate the points of entropy
eps = 1e-10 # regularization term
le = (dw - awd) / (swd+eps)
else:
# estimate the ALPHA with direct approach
for k in range(m, N):
awd[k] = np.mean(dw[k-m:k], axis=0)
# estimate the points of entropy
alphas = np.array(alpha)
fh = np.zeros(N)
for alpha in alphas:
fh += np.sum(awd*alpha < dw, axis=1)
le = fh / float(n*len(alphas))
# clear unknown zone on begining
le[:m] = 0
# return output
return le | [
"\n This function estimates Learning Entropy.\n\n **Args:**\n\n * `w` : history of adaptive parameters of an adaptive model (2d array),\n every row represents parameters in given time index.\n\n **Kwargs:**\n\n * `m` : window size (1d array) - how many last samples are used for\n evaluation of every sample. \n \n * `order` : order of the LE (int) - order of weights differention\n\n * `alpha` : list of senstitivites (1d array). If not provided, the LE \n direct approach is used.\n\n **Returns:**\n\n * Learning Entropy of data (1 d array) - one value for every sample\n\n "
] |
Please provide a description of the function:def activation(self, x, f="sigmoid", der=False):
if f == "sigmoid":
if der:
return x * (1 - x)
return 1. / (1 + np.exp(-x))
elif f == "tanh":
if der:
return 1 - x**2
return (2. / (1 + np.exp(-2*x))) - 1 | [
"\n This function process values of layer outputs with activation function.\n\n **Args:**\n\n * `x` : array to process (1-dimensional array) \n\n **Kwargs:**\n\n * `f` : activation function\n\n * `der` : normal output, or its derivation (bool)\n\n **Returns:**\n\n * values processed with activation function (1-dimensional array)\n \n "
] |
Please provide a description of the function:def predict(self, x):
self.x[1:] = x
self.y = self.activation(np.sum(self.w*self.x, axis=1), f=self.f)
return self.y | [
"\n This function make forward pass through this layer (no update).\n\n **Args:**\n\n * `x` : input vector (1-dimensional array)\n\n **Returns:**\n \n * `y` : output of MLP (float or 1-diemnsional array).\n Size depends on number of nodes in this layer.\n \n "
] |
Please provide a description of the function:def update(self, w, e):
if len(w.shape) == 1:
e = self.activation(self.y, f=self.f, der=True) * e * w
dw = self.mu * np.outer(e, self.x)
else:
e = self.activation(self.y, f=self.f, der=True) * (1 - self.y) * np.dot(e, w)
dw = self.mu * np.outer(e, self.x)
w = self.w[:,1:]
self.w += dw
return w, e | [
"\n This function make update according provided target\n and the last used input vector.\n\n **Args:**\n\n * `d` : target (float or 1-dimensional array).\n Size depends on number of MLP outputs.\n\n **Returns:**\n\n * `w` : weights of the layers (2-dimensional layer).\n Every row represents one node.\n \n * `e` : error used for update (float or 1-diemnsional array).\n Size correspond to size of input `d`.\n "
] |
Please provide a description of the function:def train(self, x, d, epochs=10, shuffle=False):
# measure the data and check if the dimmension agree
N = len(x)
if not len(d) == N:
raise ValueError('The length of vector d and matrix x must agree.')
if not len(x[0]) == self.n_input:
raise ValueError('The number of network inputs is not correct.')
if self.outputs == 1:
if not len(d.shape) == 1:
raise ValueError('For one output MLP the d must have one dimension')
else:
if not d.shape[1] == self.outputs:
raise ValueError('The number of outputs must agree with number of columns in d')
try:
x = np.array(x)
d = np.array(d)
except:
raise ValueError('Impossible to convert x or d to a numpy array')
# create empty arrays
if self.outputs == 1:
e = np.zeros(epochs*N)
else:
e = np.zeros((epochs*N, self.outputs))
MSE = np.zeros(epochs)
# shuffle data if demanded
if shuffle:
randomize = np.arange(len(x))
np.random.shuffle(randomize)
x = x[randomize]
d = d[randomize]
# adaptation loop
for epoch in range(epochs):
for k in range(N):
self.predict(x[k])
e[(epoch*N)+k] = self.update(d[k])
MSE[epoch] = np.sum(e[epoch*N:(epoch+1)*N-1]**2) / N
return e, MSE | [
"\n Function for batch training of MLP.\n\n **Args:**\n\n * `x` : input array (2-dimensional array).\n Every row represents one input vector (features).\n\n * `d` : input array (n-dimensional array).\n Every row represents target for one input vector.\n Target can be one or more values (in case of multiple outputs).\n\n **Kwargs:**\n \n * `epochs` : amount of epochs (int). That means how many times\n the MLP will iterate over the passed set of data (`x`, `d`).\n\n * `shuffle` : if true, the order of inputs and outpust are shuffled (bool).\n That means the pairs input-output are in different order in every epoch.\n\n **Returns:**\n \n * `e`: output vector (m-dimensional array). Every row represents\n error (or errors) for an input and output in given epoch.\n The size of this array is length of provided data times\n amount of epochs (`N*epochs`).\n\n * `MSE` : mean squared error (1-dimensional array). Every value\n stands for MSE of one epoch.\n \n "
] |
Please provide a description of the function:def run(self, x):
# measure the data and check if the dimmension agree
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array')
N = len(x)
# create empty arrays
if self.outputs == 1:
y = np.zeros(N)
else:
y = np.zeros((N, self.outputs))
# predict data in loop
for k in range(N):
y[k] = self.predict(x[k])
return y | [
"\n Function for batch usage of already trained and tested MLP.\n\n **Args:**\n\n * `x` : input array (2-dimensional array).\n Every row represents one input vector (features).\n\n **Returns:**\n \n * `y`: output vector (n-dimensional array). Every row represents\n output (outputs) for an input vector.\n \n "
] |
Please provide a description of the function:def predict(self, x):
# forward pass to hidden layers
for l in self.layers:
x = l.predict(x)
self.x[1:] = x
# forward pass to output layer
if self.outputs == 1:
self.y = np.dot(self.w, self.x)
else:
self.y = np.sum(self.w*self.x, axis=1)
return self.y | [
"\n This function make forward pass through MLP (no update).\n\n **Args:**\n\n * `x` : input vector (1-dimensional array)\n\n **Returns:**\n \n * `y` : output of MLP (float or 1-diemnsional array).\n Size depends on number of MLP outputs.\n \n "
] |
Please provide a description of the function:def update(self, d):
# update output layer
e = d - self.y
error = np.copy(e)
if self.outputs == 1:
dw = self.mu * e * self.x
w = np.copy(self.w)[1:]
else:
dw = self.mu * np.outer(e, self.x)
w = np.copy(self.w)[:,1:]
self.w += dw
# update hidden layers
for l in reversed(self.layers):
w, e = l.update(w, e)
return error | [
"\n This function make update according provided target\n and the last used input vector.\n\n **Args:**\n\n * `d` : target (float or 1-dimensional array).\n Size depends on number of MLP outputs.\n\n **Returns:**\n \n * `e` : error used for update (float or 1-diemnsional array).\n Size correspond to size of input `d`.\n \n "
] |
Please provide a description of the function:def PCA_components(x):
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - order the eigenvalues
return eigen_values[(-eigen_values).argsort()] | [
"\n Principal Component Analysis helper to check out eigenvalues of components.\n\n **Args:**\n\n * `x` : input matrix (2d array), every row represents new sample\n\n **Returns:**\n \n * `components`: sorted array of principal components eigenvalues \n \n "
] |
Please provide a description of the function:def PCA(x, n=False):
# select n if not provided
if not n:
n = x.shape[1] - 1
# validate inputs
try:
x = np.array(x)
except:
raise ValueError('Impossible to convert x to a numpy array.')
assert type(n) == int, "Provided n is not an integer."
assert x.shape[1] > n, "The requested n is bigger than \
number of features in x."
# eigen values and eigen vectors of data covariance matrix
eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T))
# sort eigen vectors according biggest eigen value
eigen_order = eigen_vectors.T[(-eigen_values).argsort()]
# form output - reduced x matrix
return eigen_order[:n].dot(x.T).T | [
"\n Principal component analysis function.\n\n **Args:**\n\n * `x` : input matrix (2d array), every row represents new sample\n\n **Kwargs:**\n\n * `n` : number of features returned (integer) - how many columns \n should the output keep\n\n **Returns:**\n \n * `new_x` : matrix with reduced size (lower number of columns)\n "
] |
Please provide a description of the function:def clean_axis(axis):
axis.get_xaxis().set_ticks([])
axis.get_yaxis().set_ticks([])
for spine in list(axis.spines.values()):
spine.set_visible(False) | [
"Remove ticks, tick labels, and frame from axis"
] |
Please provide a description of the function:def get_seaborn_colorbar(dfr, classes):
levels = sorted(list(set(classes.values())))
paldict = {
lvl: pal
for (lvl, pal) in zip(
levels,
sns.cubehelix_palette(
len(levels), light=0.9, dark=0.1, reverse=True, start=1, rot=-2
),
)
}
lvl_pal = {cls: paldict[lvl] for (cls, lvl) in list(classes.items())}
col_cb = pd.Series(dfr.index).map(lvl_pal)
# The col_cb Series index now has to match the dfr.index, but
# we don't create the Series with this (and if we try, it
# fails) - so change it with this line
col_cb.index = dfr.index
return col_cb | [
"Return a colorbar representing classes, for a Seaborn plot.\n\n The aim is to get a pd.Series for the passed dataframe columns,\n in the form:\n 0 colour for class in col 0\n 1 colour for class in col 1\n ... colour for class in col ...\n n colour for class in col n\n "
] |
Please provide a description of the function:def get_safe_seaborn_labels(dfr, labels):
if labels is not None:
return [labels.get(i, i) for i in dfr.index]
return [i for i in dfr.index] | [
"Returns labels guaranteed to correspond to the dataframe."
] |
Please provide a description of the function:def get_seaborn_clustermap(dfr, params, title=None, annot=True):
fig = sns.clustermap(
dfr,
cmap=params.cmap,
vmin=params.vmin,
vmax=params.vmax,
col_colors=params.colorbar,
row_colors=params.colorbar,
figsize=(params.figsize, params.figsize),
linewidths=params.linewidths,
xticklabels=params.labels,
yticklabels=params.labels,
annot=annot,
)
fig.cax.yaxis.set_label_position("left")
if title:
fig.cax.set_ylabel(title)
# Rotate ticklabels
fig.ax_heatmap.set_xticklabels(fig.ax_heatmap.get_xticklabels(), rotation=90)
fig.ax_heatmap.set_yticklabels(fig.ax_heatmap.get_yticklabels(), rotation=0)
# Return clustermap
return fig | [
"Returns a Seaborn clustermap."
] |
Please provide a description of the function:def heatmap_seaborn(dfr, outfilename=None, title=None, params=None):
# Decide on figure layout size: a minimum size is required for
# aesthetics, and a maximum to avoid core dumps on rendering.
# If we hit the maximum size, we should modify font size.
maxfigsize = 120
calcfigsize = dfr.shape[0] * 1.1
figsize = min(max(8, calcfigsize), maxfigsize)
if figsize == maxfigsize:
scale = maxfigsize / calcfigsize
sns.set_context("notebook", font_scale=scale)
# Add a colorbar?
if params.classes is None:
col_cb = None
else:
col_cb = get_seaborn_colorbar(dfr, params.classes)
# Labels are defined before we build the clustering
# If a label mapping is missing, use the key text as fall back
params.labels = get_safe_seaborn_labels(dfr, params.labels)
# Add attributes to parameter object, and draw heatmap
params.colorbar = col_cb
params.figsize = figsize
params.linewidths = 0.25
fig = get_seaborn_clustermap(dfr, params, title=title)
# Save to file
if outfilename:
fig.savefig(outfilename)
# Return clustermap
return fig | [
"Returns seaborn heatmap with cluster dendrograms.\n\n - dfr - pandas DataFrame with relevant data\n - outfilename - path to output file (indicates output format)\n "
] |
Please provide a description of the function:def add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col"):
# Row or column axes?
if orientation == "row":
dists = distance.squareform(distance.pdist(dfr))
spec = heatmap_gs[1, 0]
orient = "left"
nrows, ncols = 1, 2
height_ratios = [1]
else: # Column dendrogram
dists = distance.squareform(distance.pdist(dfr.T))
spec = heatmap_gs[0, 1]
orient = "top"
nrows, ncols = 2, 1
height_ratios = [1, 0.15]
# Create row dendrogram axis
gspec = gridspec.GridSpecFromSubplotSpec(
nrows,
ncols,
subplot_spec=spec,
wspace=0.0,
hspace=0.1,
height_ratios=height_ratios,
)
dend_axes = fig.add_subplot(gspec[0, 0])
dend = sch.dendrogram(
sch.linkage(distance.squareform(dists), method="complete"),
color_threshold=np.inf,
orientation=orient,
)
clean_axis(dend_axes)
return {"dendrogram": dend, "gridspec": gspec} | [
"Return a dendrogram and corresponding gridspec, attached to the fig\n\n Modifies the fig in-place. Orientation is either 'row' or 'col' and\n determines location and orientation of the rendered dendrogram.\n "
] |
Please provide a description of the function:def get_mpl_heatmap_axes(dfr, fig, heatmap_gs):
# Create heatmap axis
heatmap_axes = fig.add_subplot(heatmap_gs[1, 1])
heatmap_axes.set_xticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.set_yticks(np.linspace(0, dfr.shape[0] - 1, dfr.shape[0]))
heatmap_axes.grid(False)
heatmap_axes.xaxis.tick_bottom()
heatmap_axes.yaxis.tick_right()
return heatmap_axes | [
"Return axis for Matplotlib heatmap."
] |
Please provide a description of the function:def add_mpl_colorbar(dfr, fig, dend, params, orientation="row"):
for name in dfr.index[dend["dendrogram"]["leaves"]]:
if name not in params.classes:
params.classes[name] = name
# Assign a numerical value to each class, for mpl
classdict = {cls: idx for (idx, cls) in enumerate(params.classes.values())}
# colourbar
cblist = []
for name in dfr.index[dend["dendrogram"]["leaves"]]:
try:
cblist.append(classdict[params.classes[name]])
except KeyError:
cblist.append(classdict[name])
colbar = pd.Series(cblist)
# Create colourbar axis - could capture if needed
if orientation == "row":
cbaxes = fig.add_subplot(dend["gridspec"][0, 1])
cbaxes.imshow(
[[cbar] for cbar in colbar.values],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
else:
cbaxes = fig.add_subplot(dend["gridspec"][1, 0])
cbaxes.imshow(
[colbar],
cmap=plt.get_cmap(pyani_config.MPL_CBAR),
interpolation="nearest",
aspect="auto",
origin="lower",
)
clean_axis(cbaxes)
return colbar | [
"Add class colorbars to Matplotlib heatmap."
] |
Please provide a description of the function:def add_mpl_labels(heatmap_axes, rowlabels, collabels, params):
if params.labels:
# If a label mapping is missing, use the key text as fall back
rowlabels = [params.labels.get(lab, lab) for lab in rowlabels]
collabels = [params.labels.get(lab, lab) for lab in collabels]
xlabs = heatmap_axes.set_xticklabels(collabels)
ylabs = heatmap_axes.set_yticklabels(rowlabels)
for label in xlabs: # Rotate column labels
label.set_rotation(90)
for labset in (xlabs, ylabs): # Smaller font
for label in labset:
label.set_fontsize(8) | [
"Add labels to Matplotlib heatmap axes, in-place."
] |
Please provide a description of the function:def add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title=None):
# Set tick intervals
cbticks = [params.vmin + e * params.vdiff for e in (0, 0.25, 0.5, 0.75, 1)]
if params.vmax > 10:
exponent = int(floor(log10(params.vmax))) - 1
cbticks = [int(round(e, -exponent)) for e in cbticks]
scale_subplot = gridspec.GridSpecFromSubplotSpec(
1, 3, subplot_spec=heatmap_gs[0, 0], wspace=0.0, hspace=0.0
)
scale_ax = fig.add_subplot(scale_subplot[0, 1])
cbar = fig.colorbar(ax_map, scale_ax, ticks=cbticks)
if title:
cbar.set_label(title, fontsize=6)
cbar.ax.yaxis.set_ticks_position("left")
cbar.ax.yaxis.set_label_position("left")
cbar.ax.tick_params(labelsize=6)
cbar.outline.set_linewidth(0)
return cbar | [
"Add colour scale to heatmap."
] |
Please provide a description of the function:def heatmap_mpl(dfr, outfilename=None, title=None, params=None):
# Layout figure grid and add title
# Set figure size by the number of rows in the dataframe
figsize = max(8, dfr.shape[0] * 0.175)
fig = plt.figure(figsize=(figsize, figsize))
# if title:
# fig.suptitle(title)
heatmap_gs = gridspec.GridSpec(
2, 2, wspace=0.0, hspace=0.0, width_ratios=[0.3, 1], height_ratios=[0.3, 1]
)
# Add column and row dendrograms/axes to figure
coldend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="col")
rowdend = add_mpl_dendrogram(dfr, fig, heatmap_gs, orientation="row")
# Add heatmap axes to figure, with rows/columns as in the dendrograms
heatmap_axes = get_mpl_heatmap_axes(dfr, fig, heatmap_gs)
ax_map = heatmap_axes.imshow(
dfr.iloc[rowdend["dendrogram"]["leaves"], coldend["dendrogram"]["leaves"]],
interpolation="nearest",
cmap=params.cmap,
origin="lower",
vmin=params.vmin,
vmax=params.vmax,
aspect="auto",
)
# Are there class colourbars to add?
if params.classes is not None:
add_mpl_colorbar(dfr, fig, coldend, params, orientation="col")
add_mpl_colorbar(dfr, fig, rowdend, params, orientation="row")
# Add heatmap labels
add_mpl_labels(
heatmap_axes,
dfr.index[rowdend["dendrogram"]["leaves"]],
dfr.index[coldend["dendrogram"]["leaves"]],
params,
)
# Add colour scale
add_mpl_colorscale(fig, heatmap_gs, ax_map, params, title)
# Return figure output, and write, if required
plt.subplots_adjust(top=0.85) # Leave room for title
# fig.set_tight_layout(True)
# We know that there is a UserWarning here about tight_layout and
# using the Agg renderer on OSX, so catch and ignore it, for cleanliness.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
heatmap_gs.tight_layout(fig, h_pad=0.1, w_pad=0.5)
if outfilename:
fig.savefig(outfilename)
return fig | [
"Returns matplotlib heatmap with cluster dendrograms.\n\n - dfr - pandas DataFrame with relevant data\n - outfilename - path to output file (indicates output format)\n - params - a list of parameters for plotting: [colormap, vmin, vmax]\n - labels - dictionary of alternative labels, keyed by default sequence\n labels\n - classes - dictionary of sequence classes, keyed by default sequence\n labels\n "
] |
Please provide a description of the function:def run_dependency_graph(jobgraph, workers=None, logger=None):
cmdsets = []
for job in jobgraph:
cmdsets = populate_cmdsets(job, cmdsets, depth=1)
# Put command sets in reverse order, and submit to multiprocessing_run
cmdsets.reverse()
cumretval = 0
for cmdset in cmdsets:
if logger: # Try to be informative, if the logger module is being used
logger.info("Command pool now running:")
for cmd in cmdset:
logger.info(cmd)
cumretval += multiprocessing_run(cmdset, workers)
if logger: # Try to be informative, if the logger module is being used
logger.info("Command pool done.")
return cumretval | [
"Creates and runs pools of jobs based on the passed jobgraph.\n\n - jobgraph - list of jobs, which may have dependencies.\n - verbose - flag for multiprocessing verbosity\n - logger - a logger module logger (optional)\n\n The strategy here is to loop over each job in the list of jobs (jobgraph),\n and create/populate a series of Sets of commands, to be run in\n reverse order with multiprocessing_run as asynchronous pools.\n "
] |
Please provide a description of the function:def populate_cmdsets(job, cmdsets, depth):
if len(cmdsets) < depth:
cmdsets.append(set())
cmdsets[depth-1].add(job.command)
if len(job.dependencies) == 0:
return cmdsets
for j in job.dependencies:
cmdsets = populate_cmdsets(j, cmdsets, depth+1)
return cmdsets | [
"Creates a list of sets containing jobs at different depths of the\n dependency tree.\n\n This is a recursive function (is there something quicker in the itertools\n module?) that descends each 'root' job in turn, populating each\n "
] |
Please provide a description of the function:def multiprocessing_run(cmdlines, workers=None):
# Run jobs
# If workers is None or greater than the number of cores available,
# it will be set to the maximum number of cores
pool = multiprocessing.Pool(processes=workers)
results = [pool.apply_async(subprocess.run, (str(cline), ),
{'shell': sys.platform != "win32",
'stdout': subprocess.PIPE,
'stderr': subprocess.PIPE})
for cline in cmdlines]
pool.close()
pool.join()
return sum([r.get().returncode for r in results]) | [
"Distributes passed command-line jobs using multiprocessing.\n\n - cmdlines - an iterable of command line strings\n\n Returns the sum of exit codes from each job that was run. If\n all goes well, this should be 0. Anything else and the calling\n function should act accordingly.\n "
] |
Please provide a description of the function:def get_input_files(dirname, *ext):
filelist = [f for f in os.listdir(dirname) if
os.path.splitext(f)[-1] in ext]
return [os.path.join(dirname, f) for f in filelist] | [
"Returns files in passed directory, filtered by extension.\n\n - dirname - path to input directory\n - *ext - list of arguments describing permitted file extensions\n "
] |
Please provide a description of the function:def get_sequence_lengths(fastafilenames):
tot_lengths = {}
for fn in fastafilenames:
tot_lengths[os.path.splitext(os.path.split(fn)[-1])[0]] = \
sum([len(s) for s in SeqIO.parse(fn, 'fasta')])
return tot_lengths | [
"Returns dictionary of sequence lengths, keyed by organism.\n\n Biopython's SeqIO module is used to parse all sequences in the FASTA\n file corresponding to each organism, and the total base count in each\n is obtained.\n\n NOTE: ambiguity symbols are not discounted.\n "
] |
Please provide a description of the function:def parse_cmdline():
parser = ArgumentParser(prog="average_nucleotide_identity.py")
parser.add_argument(
"--version", action="version", version="%(prog)s: pyani " + VERSION
)
parser.add_argument(
"-o",
"--outdir",
dest="outdirname",
action="store",
default=None,
required=True,
help="Output directory (required)",
)
parser.add_argument(
"-i",
"--indir",
dest="indirname",
action="store",
default=None,
required=True,
help="Input directory name (required)",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Give verbose output",
)
parser.add_argument(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="Force file overwriting",
)
parser.add_argument(
"-s",
"--fragsize",
dest="fragsize",
action="store",
default=FRAGSIZE,
type=int,
help="Sequence fragment size for ANIb " "(default %i)" % FRAGSIZE,
)
parser.add_argument(
"-l",
"--logfile",
dest="logfile",
action="store",
default=None,
help="Logfile location",
)
parser.add_argument(
"--skip_nucmer",
dest="skip_nucmer",
action="store_true",
default=False,
help="Skip NUCmer runs, for testing " + "(e.g. if output already present)",
)
parser.add_argument(
"--skip_blastn",
dest="skip_blastn",
action="store_true",
default=False,
help="Skip BLASTN runs, for testing " + "(e.g. if output already present)",
)
parser.add_argument(
"--noclobber",
dest="noclobber",
action="store_true",
default=False,
help="Don't nuke existing files",
)
parser.add_argument(
"--nocompress",
dest="nocompress",
action="store_true",
default=False,
help="Don't compress/delete the comparison output",
)
parser.add_argument(
"-g",
"--graphics",
dest="graphics",
action="store_true",
default=False,
help="Generate heatmap of ANI",
)
parser.add_argument(
"--gformat",
dest="gformat",
action="store",
default="pdf,png,eps",
help="Graphics output format(s) [pdf|png|jpg|svg] "
"(default pdf,png,eps meaning three file formats)",
)
parser.add_argument(
"--gmethod",
dest="gmethod",
action="store",
default="mpl",
choices=["mpl", "seaborn"],
help="Graphics output method (default mpl)",
)
parser.add_argument(
"--labels",
dest="labels",
action="store",
default=None,
help="Path to file containing sequence labels",
)
parser.add_argument(
"--classes",
dest="classes",
action="store",
default=None,
help="Path to file containing sequence classes",
)
parser.add_argument(
"-m",
"--method",
dest="method",
action="store",
default="ANIm",
choices=["ANIm", "ANIb", "ANIblastall", "TETRA"],
help="ANI method (default ANIm)",
)
parser.add_argument(
"--scheduler",
dest="scheduler",
action="store",
default="multiprocessing",
choices=["multiprocessing", "SGE"],
help="Job scheduler (default multiprocessing, i.e. locally)",
)
parser.add_argument(
"--workers",
dest="workers",
action="store",
default=None,
type=int,
help="Number of worker processes for multiprocessing "
"(default zero, meaning use all available cores)",
)
parser.add_argument(
"--SGEgroupsize",
dest="sgegroupsize",
action="store",
default=10000,
type=int,
help="Number of jobs to place in an SGE array group " "(default 10000)",
)
parser.add_argument(
"--SGEargs",
dest="sgeargs",
action="store",
default=None,
type=str,
help="Additional arguments for qsub",
)
parser.add_argument(
"--maxmatch",
dest="maxmatch",
action="store_true",
default=False,
help="Override MUMmer to allow all NUCmer matches",
)
parser.add_argument(
"--nucmer_exe",
dest="nucmer_exe",
action="store",
default=pyani_config.NUCMER_DEFAULT,
help="Path to NUCmer executable",
)
parser.add_argument(
"--filter_exe",
dest="filter_exe",
action="store",
default=pyani_config.FILTER_DEFAULT,
help="Path to delta-filter executable",
)
parser.add_argument(
"--blastn_exe",
dest="blastn_exe",
action="store",
default=pyani_config.BLASTN_DEFAULT,
help="Path to BLASTN+ executable",
)
parser.add_argument(
"--makeblastdb_exe",
dest="makeblastdb_exe",
action="store",
default=pyani_config.MAKEBLASTDB_DEFAULT,
help="Path to BLAST+ makeblastdb executable",
)
parser.add_argument(
"--blastall_exe",
dest="blastall_exe",
action="store",
default=pyani_config.BLASTALL_DEFAULT,
help="Path to BLASTALL executable",
)
parser.add_argument(
"--formatdb_exe",
dest="formatdb_exe",
action="store",
default=pyani_config.FORMATDB_DEFAULT,
help="Path to BLAST formatdb executable",
)
parser.add_argument(
"--write_excel",
dest="write_excel",
action="store_true",
default=False,
help="Write Excel format output tables",
)
parser.add_argument(
"--rerender",
dest="rerender",
action="store_true",
default=False,
help="Rerender graphics output without recalculation",
)
parser.add_argument(
"--subsample",
dest="subsample",
action="store",
default=None,
help="Subsample a percentage [0-1] or specific "
+ "number (1-n) of input sequences",
)
parser.add_argument(
"--seed",
dest="seed",
action="store",
default=None,
help="Set random seed for reproducible subsampling.",
)
parser.add_argument(
"--jobprefix",
dest="jobprefix",
action="store",
default="ANI",
help="Prefix for SGE jobs (default ANI).",
)
return parser.parse_args() | [
"Parse command-line arguments for script."
] |
Please provide a description of the function:def last_exception():
exc_type, exc_value, exc_traceback = sys.exc_info()
return "".join(traceback.format_exception(exc_type, exc_value, exc_traceback)) | [
" Returns last exception as a string, or use in logging.\n "
] |
Please provide a description of the function:def make_outdir():
if os.path.exists(args.outdirname):
if not args.force:
logger.error(
"Output directory %s would overwrite existing " + "files (exiting)",
args.outdirname,
)
sys.exit(1)
elif args.noclobber:
logger.warning(
"NOCLOBBER: not actually deleting directory %s", args.outdirname
)
else:
logger.info(
"Removing directory %s and everything below it", args.outdirname
)
shutil.rmtree(args.outdirname)
logger.info("Creating directory %s", args.outdirname)
try:
os.makedirs(args.outdirname) # We make the directory recursively
# Depending on the choice of method, a subdirectory will be made for
# alignment output files
if args.method != "TETRA":
os.makedirs(os.path.join(args.outdirname, ALIGNDIR[args.method]))
except OSError:
# This gets thrown if the directory exists. If we've forced overwrite/
# delete and we're not clobbering, we let things slide
if args.noclobber and args.force:
logger.info("NOCLOBBER+FORCE: not creating directory")
else:
logger.error(last_exception)
sys.exit(1) | [
"Make the output directory, if required.\n\n This is a little involved. If the output directory already exists,\n we take the safe option by default, and stop with an error. We can,\n however, choose to force the program to go on, in which case we can\n either clobber the existing directory, or not. The options turn out\n as the following, if the directory exists:\n\n DEFAULT: stop and report the collision\n FORCE: continue, and remove the existing output directory\n NOCLOBBER+FORCE: continue, but do not remove the existing output\n "
] |
Please provide a description of the function:def compress_delete_outdir(outdir):
# Compress output in .tar.gz file and remove raw output
tarfn = outdir + ".tar.gz"
logger.info("\tCompressing output from %s to %s", outdir, tarfn)
with tarfile.open(tarfn, "w:gz") as fh:
fh.add(outdir)
logger.info("\tRemoving output directory %s", outdir)
shutil.rmtree(outdir) | [
"Compress the contents of the passed directory to .tar.gz and delete."
] |
Please provide a description of the function:def calculate_anim(infiles, org_lengths):
logger.info("Running ANIm")
logger.info("Generating NUCmer command-lines")
deltadir = os.path.join(args.outdirname, ALIGNDIR["ANIm"])
logger.info("Writing nucmer output to %s", deltadir)
# Schedule NUCmer runs
if not args.skip_nucmer:
joblist = anim.generate_nucmer_jobs(
infiles,
args.outdirname,
nucmer_exe=args.nucmer_exe,
filter_exe=args.filter_exe,
maxmatch=args.maxmatch,
jobprefix=args.jobprefix,
)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
joblist, workers=args.workers, logger=logger
)
logger.info("Cumulative return value: %d", cumval)
if 0 < cumval:
logger.warning(
"At least one NUCmer comparison failed. " + "ANIm may fail."
)
else:
logger.info("All multiprocessing jobs complete.")
else:
logger.info("Running jobs with SGE")
logger.info("Jobarray group size set to %d", args.sgegroupsize)
run_sge.run_dependency_graph(
joblist,
logger=logger,
jgprefix=args.jobprefix,
sgegroupsize=args.sgegroupsize,
sgeargs=args.sgeargs,
)
else:
logger.warning("Skipping NUCmer run (as instructed)!")
# Process resulting .delta files
logger.info("Processing NUCmer .delta files.")
results = anim.process_deltadir(deltadir, org_lengths, logger=logger)
if results.zero_error: # zero percentage identity error
if not args.skip_nucmer and args.scheduler == "multiprocessing":
if 0 < cumval:
logger.error(
"This has possibly been a NUCmer run failure, "
+ "please investigate"
)
logger.error(last_exception())
sys.exit(1)
else:
logger.error(
"This is possibly due to a NUCmer comparison "
+ "being too distant for use. Please consider "
+ "using the --maxmatch option."
)
logger.error(
"This is alternatively due to NUCmer run "
+ "failure, analysis will continue, but please "
+ "investigate."
)
if not args.nocompress:
logger.info("Compressing/deleting %s", deltadir)
compress_delete_outdir(deltadir)
# Return processed data from .delta files
return results | [
"Returns ANIm result dataframes for files in input directory.\n\n - infiles - paths to each input file\n - org_lengths - dictionary of input sequence lengths, keyed by sequence\n\n Finds ANI by the ANIm method, as described in Richter et al (2009)\n Proc Natl Acad Sci USA 106: 19126-19131 doi:10.1073/pnas.0906412106.\n\n All FASTA format files (selected by suffix) in the input directory\n are compared against each other, pairwise, using NUCmer (which must\n be in the path). NUCmer output is stored in the output directory.\n\n The NUCmer .delta file output is parsed to obtain an alignment length\n and similarity error count for every unique region alignment between\n the two organisms, as represented by the sequences in the FASTA files.\n\n These are processed to give matrices of aligned sequence lengths,\n average nucleotide identity (ANI) percentages, coverage (aligned\n percentage of whole genome), and similarity error cound for each pairwise\n comparison.\n "
] |
Please provide a description of the function:def calculate_tetra(infiles):
logger.info("Running TETRA.")
# First, find Z-scores
logger.info("Calculating TETRA Z-scores for each sequence.")
tetra_zscores = {}
for filename in infiles:
logger.info("Calculating TETRA Z-scores for %s", filename)
org = os.path.splitext(os.path.split(filename)[-1])[0]
tetra_zscores[org] = tetra.calculate_tetra_zscore(filename)
# Then calculate Pearson correlation between Z-scores for each sequence
logger.info("Calculating TETRA correlation scores.")
tetra_correlations = tetra.calculate_correlations(tetra_zscores)
return tetra_correlations | [
"Calculate TETRA for files in input directory.\n\n - infiles - paths to each input file\n - org_lengths - dictionary of input sequence lengths, keyed by sequence\n\n Calculates TETRA correlation scores, as described in:\n\n Richter M, Rossello-Mora R (2009) Shifting the genomic gold standard for\n the prokaryotic species definition. Proc Natl Acad Sci USA 106:\n 19126-19131. doi:10.1073/pnas.0906412106.\n\n and\n\n Teeling et al. (2004) Application of tetranucleotide frequencies for the\n assignment of genomic fragments. Env. Microbiol. 6(9): 938-947.\n doi:10.1111/j.1462-2920.2004.00624.x\n "
] |
Please provide a description of the function:def unified_anib(infiles, org_lengths):
logger.info("Running %s", args.method)
blastdir = os.path.join(args.outdirname, ALIGNDIR[args.method])
logger.info("Writing BLAST output to %s", blastdir)
# Build BLAST databases and run pairwise BLASTN
if not args.skip_blastn:
# Make sequence fragments
logger.info("Fragmenting input files, and writing to %s", args.outdirname)
# Fraglengths does not get reused with BLASTN
fragfiles, fraglengths = anib.fragment_fasta_files(
infiles, blastdir, args.fragsize
)
# Export fragment lengths as JSON, in case we re-run with --skip_blastn
with open(os.path.join(blastdir, "fraglengths.json"), "w") as outfile:
json.dump(fraglengths, outfile)
# Which executables are we using?
# if args.method == "ANIblastall":
# format_exe = args.formatdb_exe
# blast_exe = args.blastall_exe
# else:
# format_exe = args.makeblastdb_exe
# blast_exe = args.blastn_exe
# Run BLAST database-building and executables from a jobgraph
logger.info("Creating job dependency graph")
jobgraph = anib.make_job_graph(
infiles, fragfiles, anib.make_blastcmd_builder(args.method, blastdir)
)
# jobgraph = anib.make_job_graph(infiles, fragfiles, blastdir,
# format_exe, blast_exe, args.method,
# jobprefix=args.jobprefix)
if args.scheduler == "multiprocessing":
logger.info("Running jobs with multiprocessing")
logger.info("Running job dependency graph")
if args.workers is None:
logger.info("(using maximum number of available " + "worker threads)")
else:
logger.info("(using %d worker threads, if available)", args.workers)
cumval = run_mp.run_dependency_graph(
jobgraph, workers=args.workers, logger=logger
)
if 0 < cumval:
logger.warning(
"At least one BLAST run failed. " + "%s may fail.", args.method
)
else:
logger.info("All multiprocessing jobs complete.")
else:
run_sge.run_dependency_graph(jobgraph, logger=logger)
logger.info("Running jobs with SGE")
else:
# Import fragment lengths from JSON
if args.method == "ANIblastall":
with open(os.path.join(blastdir, "fraglengths.json"), "rU") as infile:
fraglengths = json.load(infile)
else:
fraglengths = None
logger.warning("Skipping BLASTN runs (as instructed)!")
# Process pairwise BLASTN output
logger.info("Processing pairwise %s BLAST output.", args.method)
try:
data = anib.process_blast(
blastdir, org_lengths, fraglengths=fraglengths, mode=args.method
)
except ZeroDivisionError:
logger.error("One or more BLAST output files has a problem.")
if not args.skip_blastn:
if 0 < cumval:
logger.error(
"This is possibly due to BLASTN run failure, "
+ "please investigate"
)
else:
logger.error(
"This is possibly due to a BLASTN comparison "
+ "being too distant for use."
)
logger.error(last_exception())
if not args.nocompress:
logger.info("Compressing/deleting %s", blastdir)
compress_delete_outdir(blastdir)
# Return processed BLAST data
return data | [
"Calculate ANIb for files in input directory.\n\n - infiles - paths to each input file\n - org_lengths - dictionary of input sequence lengths, keyed by sequence\n\n Calculates ANI by the ANIb method, as described in Goris et al. (2007)\n Int J Syst Evol Micr 57: 81-91. doi:10.1099/ijs.0.64483-0. There are\n some minor differences depending on whether BLAST+ or legacy BLAST\n (BLASTALL) methods are used.\n\n All FASTA format files (selected by suffix) in the input directory are\n used to construct BLAST databases, placed in the output directory.\n Each file's contents are also split into sequence fragments of length\n options.fragsize, and the multiple FASTA file that results written to\n the output directory. These are BLASTNed, pairwise, against the\n databases.\n\n The BLAST output is interrogated for all fragment matches that cover\n at least 70% of the query sequence, with at least 30% nucleotide\n identity over the full length of the query sequence. This is an odd\n choice and doesn't correspond to the twilight zone limit as implied by\n Goris et al. We persist with their definition, however. Only these\n qualifying matches contribute to the total aligned length, and total\n aligned sequence identity used to calculate ANI.\n\n The results are processed to give matrices of aligned sequence length\n (aln_lengths.tab), similarity error counts (sim_errors.tab), ANIs\n (perc_ids.tab), and minimum aligned percentage (perc_aln.tab) of\n each genome, for each pairwise comparison. These are written to the\n output directory in plain text tab-separated format.\n "
] |
Please provide a description of the function:def write(results):
logger.info("Writing %s results to %s", args.method, args.outdirname)
if args.method == "TETRA":
out_excel = os.path.join(args.outdirname, TETRA_FILESTEMS[0]) + ".xlsx"
out_csv = os.path.join(args.outdirname, TETRA_FILESTEMS[0]) + ".tab"
if args.write_excel:
results.to_excel(out_excel, index=True)
results.to_csv(out_csv, index=True, sep="\t")
else:
for dfr, filestem in results.data:
out_excel = os.path.join(args.outdirname, filestem) + ".xlsx"
out_csv = os.path.join(args.outdirname, filestem) + ".tab"
logger.info("\t%s", filestem)
if args.write_excel:
dfr.to_excel(out_excel, index=True)
dfr.to_csv(out_csv, index=True, sep="\t") | [
"Write ANIb/ANIm/TETRA results to output directory.\n\n - results - results object from analysis\n\n Each dataframe is written to an Excel-format file (if args.write_excel is\n True), and plain text tab-separated file in the output directory. The\n order of result output must be reflected in the order of filestems.\n "
] |
Please provide a description of the function:def draw(filestems, gformat):
# Draw heatmaps
for filestem in filestems:
fullstem = os.path.join(args.outdirname, filestem)
outfilename = fullstem + ".%s" % gformat
infilename = fullstem + ".tab"
df = pd.read_csv(infilename, index_col=0, sep="\t")
logger.info("Writing heatmap to %s", outfilename)
params = pyani_graphics.Params(
params_mpl(df)[filestem],
pyani_tools.get_labels(args.labels),
pyani_tools.get_labels(args.classes),
)
if args.gmethod == "mpl":
pyani_graphics.heatmap_mpl(
df, outfilename=outfilename, title=filestem, params=params
)
elif args.gmethod == "seaborn":
pyani_graphics.heatmap_seaborn(
df, outfilename=outfilename, title=filestem, params=params
) | [
"Draw ANIb/ANIm/TETRA results\n\n - filestems - filestems for output files\n - gformat - the format for output graphics\n "
] |
Please provide a description of the function:def subsample_input(infiles):
logger.info("--subsample: %s", args.subsample)
try:
samplesize = float(args.subsample)
except TypeError: # Not a number
logger.error(
"--subsample must be int or float, got %s (exiting)", type(args.subsample)
)
sys.exit(1)
if samplesize <= 0: # Not a positive value
logger.error("--subsample must be positive value, got %s", str(args.subsample))
sys.exit(1)
if int(samplesize) > 1:
logger.info("Sample size integer > 1: %d", samplesize)
k = min(int(samplesize), len(infiles))
else:
logger.info("Sample size proportion in (0, 1]: %.3f", samplesize)
k = int(min(samplesize, 1.0) * len(infiles))
logger.info("Randomly subsampling %d sequences for analysis", k)
if args.seed:
logger.info("Setting random seed with: %s", args.seed)
random.seed(args.seed)
else:
logger.warning("Subsampling without specified random seed!")
logger.warning("Subsampling may NOT be easily reproducible!")
return random.sample(infiles, k) | [
"Returns a random subsample of the input files.\n\n - infiles: a list of input files for analysis\n "
] |
Please provide a description of the function:def wait(self, interval=SGE_WAIT):
finished = False
while not finished:
time.sleep(interval)
interval = min(2 * interval, 60)
finished = os.system("qstat -j %s > /dev/null" % (self.name)) | [
"Wait until the job finishes, and poll SGE on its status."
] |
Please provide a description of the function:def generate_script(self):
self.script = "" # Holds the script string
total = 1 # total number of jobs in this group
# for now, SGE_TASK_ID becomes TASK_ID, but we base it at zero
self.script +=
# build the array definitions; force ordering for Python3.5 tests
for key in sorted(self.arguments.keys()):
values = self.arguments[key]
line = ("%s_ARRAY=( " % (key))
for value in values:
line += value
line += " "
line += " )\n"
self.script += line
total *= len(values)
self.script += "\n"
# now, build the decoding logic in the script; force ordering
for key in sorted(self.arguments.keys()):
count = len(self.arguments[key])
self.script += % (key, count)
self.script += % (key, key, key)
self.script += % (count)
# now, add the command to run the job
self.script += "\n"
self.script += self.command
self.script += "\n"
# set the number of tasks in this group
self.tasks = total | [
"Create the SGE script that will run the jobs in the JobGroup, with\n the passed arguments.\n ",
"let \"TASK_ID=$SGE_TASK_ID - 1\"\\n",
"let \"%s_INDEX=$TASK_ID %% %d\"\\n",
"%s=${%s_ARRAY[$%s_INDEX]}\\n",
"let \"TASK_ID=$TASK_ID / %d\"\\n"
] |
Please provide a description of the function:def generate_nucmer_jobs(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
jobprefix="ANINUCmer",
):
ncmds, fcmds = generate_nucmer_commands(
filenames, outdir, nucmer_exe, filter_exe, maxmatch
)
joblist = []
for idx, ncmd in enumerate(ncmds):
njob = pyani_jobs.Job("%s_%06d-n" % (jobprefix, idx), ncmd)
fjob = pyani_jobs.Job("%s_%06d-f" % (jobprefix, idx), fcmds[idx])
fjob.add_dependency(njob)
# joblist.append(njob) # not required: dependency in fjob
joblist.append(fjob)
return joblist | [
"Return a list of Jobs describing NUCmer command-lines for ANIm\n\n - filenames - a list of paths to input FASTA files\n - outdir - path to output directory\n - nucmer_exe - location of the nucmer binary\n - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option\n\n Loop over all FASTA files, generating Jobs describing NUCmer command lines\n for each pairwise comparison.\n "
] |
Please provide a description of the function:def generate_nucmer_commands(
filenames,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
nucmer_cmdlines, delta_filter_cmdlines = [], []
for idx, fname1 in enumerate(filenames[:-1]):
for fname2 in filenames[idx + 1 :]:
ncmd, dcmd = construct_nucmer_cmdline(
fname1, fname2, outdir, nucmer_exe, filter_exe, maxmatch
)
nucmer_cmdlines.append(ncmd)
delta_filter_cmdlines.append(dcmd)
return (nucmer_cmdlines, delta_filter_cmdlines) | [
"Return a tuple of lists of NUCmer command-lines for ANIm\n\n The first element is a list of NUCmer commands, the second a list\n of delta_filter_wrapper.py commands. These are ordered such that\n commands are paired. The NUCmer commands should be run before\n the delta-filter commands.\n\n - filenames - a list of paths to input FASTA files\n - outdir - path to output directory\n - nucmer_exe - location of the nucmer binary\n - maxmatch - Boolean flag indicating to use NUCmer's -maxmatch option\n\n Loop over all FASTA files generating NUCmer command lines for each\n pairwise comparison.\n "
] |
Please provide a description of the function:def construct_nucmer_cmdline(
fname1,
fname2,
outdir=".",
nucmer_exe=pyani_config.NUCMER_DEFAULT,
filter_exe=pyani_config.FILTER_DEFAULT,
maxmatch=False,
):
outsubdir = os.path.join(outdir, pyani_config.ALIGNDIR["ANIm"])
outprefix = os.path.join(
outsubdir,
"%s_vs_%s"
% (
os.path.splitext(os.path.split(fname1)[-1])[0],
os.path.splitext(os.path.split(fname2)[-1])[0],
),
)
if maxmatch:
mode = "--maxmatch"
else:
mode = "--mum"
nucmercmd = "{0} {1} -p {2} {3} {4}".format(
nucmer_exe, mode, outprefix, fname1, fname2
)
filtercmd = "delta_filter_wrapper.py " + "{0} -1 {1} {2}".format(
filter_exe, outprefix + ".delta", outprefix + ".filter"
)
return (nucmercmd, filtercmd) | [
"Returns a tuple of NUCmer and delta-filter commands\n\n The split into a tuple was made necessary by changes to SGE/OGE. The\n delta-filter command must now be run as a dependency of the NUCmer\n command, and be wrapped in a Python script to capture STDOUT.\n\n NOTE: This command-line writes output data to a subdirectory of the passed\n outdir, called \"nucmer_output\".\n\n - fname1 - query FASTA filepath\n - fname2 - subject FASTA filepath\n - outdir - path to output directory\n - maxmatch - Boolean flag indicating whether to use NUCmer's -maxmatch\n option. If not, the -mum option is used instead\n "
] |
Please provide a description of the function:def parse_delta(filename):
aln_length, sim_errors = 0, 0
for line in [l.strip().split() for l in open(filename, "r").readlines()]:
if line[0] == "NUCMER" or line[0].startswith(">"): # Skip headers
continue
# We only process lines with seven columns:
if len(line) == 7:
aln_length += abs(int(line[1]) - int(line[0]))
sim_errors += int(line[4])
return aln_length, sim_errors | [
"Returns (alignment length, similarity errors) tuple from passed .delta.\n\n - filename - path to the input .delta file\n\n Extracts the aligned length and number of similarity errors for each\n aligned uniquely-matched region, and returns the cumulative total for\n each as a tuple.\n "
] |
Please provide a description of the function:def process_deltadir(delta_dir, org_lengths, logger=None):
# Process directory to identify input files - as of v0.2.4 we use the
# .filter files that result from delta-filter (1:1 alignments)
deltafiles = pyani_files.get_input_files(delta_dir, ".filter")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), "ANIm")
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .delta files assuming that the filename format holds:
# org1_vs_org2.delta
for deltafile in deltafiles:
qname, sname = os.path.splitext(os.path.split(deltafile)[-1])[0].split("_vs_")
# We may have .delta files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the .delta file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % deltafile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % deltafile
)
continue
tot_length, tot_sim_error = parse_delta(deltafile)
if tot_length == 0 and logger is not None:
if logger:
logger.warning(
"Total alignment length reported in " + "%s is zero!" % deltafile
)
query_cover = float(tot_length) / org_lengths[qname]
sbjct_cover = float(tot_length) / org_lengths[sname]
# Calculate percentage ID of aligned length. This may fail if
# total length is zero.
# The ZeroDivisionError that would arise should be handled
# Common causes are that a NUCmer run failed, or that a very
# distant sequence was included in the analysis.
try:
perc_id = 1 - float(tot_sim_error) / tot_length
except ZeroDivisionError:
perc_id = 0 # set arbitrary value of zero identity
results.zero_error = True
# Populate dataframes: when assigning data from symmetrical MUMmer
# output, both upper and lower triangles will be populated
results.add_tot_length(qname, sname, tot_length)
results.add_sim_errors(qname, sname, tot_sim_error)
results.add_pid(qname, sname, perc_id)
results.add_coverage(qname, sname, query_cover, sbjct_cover)
return results | [
"Returns a tuple of ANIm results for .deltas in passed directory.\n\n - delta_dir - path to the directory containing .delta files\n - org_lengths - dictionary of total sequence lengths, keyed by sequence\n\n Returns the following pandas dataframes in an ANIResults object;\n query sequences are rows, subject sequences are columns:\n\n - alignment_lengths - symmetrical: total length of alignment\n - percentage_identity - symmetrical: percentage identity of alignment\n - alignment_coverage - non-symmetrical: coverage of query and subject\n - similarity_errors - symmetrical: count of similarity errors\n\n May throw a ZeroDivisionError if one or more NUCmer runs failed, or a\n very distant sequence was included in the analysis.\n "
] |
Please provide a description of the function:def parse_cmdline():
parser = ArgumentParser(prog="genbank_get_genomes_by_taxon.py")
parser.add_argument(
"-o",
"--outdir",
dest="outdirname",
required=True,
action="store",
default=None,
help="Output directory (required)")
parser.add_argument(
"-t",
"--taxon",
dest="taxon",
action="store",
default=None,
help="NCBI taxonomy ID")
parser.add_argument(
"-v",
"--verbose",
dest="verbose",
action="store_true",
default=False,
help="Give verbose output")
parser.add_argument(
"-f",
"--force",
dest="force",
action="store_true",
default=False,
help="Force file overwriting")
parser.add_argument(
"--noclobber",
dest="noclobber",
action="store_true",
default=False,
help="Don't nuke existing files")
parser.add_argument(
"-l",
"--logfile",
dest="logfile",
action="store",
default=None,
help="Logfile location")
parser.add_argument(
"--format",
dest="format",
action="store",
default="fasta",
help="Output file format [gbk|fasta]")
parser.add_argument(
"--email",
dest="email",
required=True,
action="store",
default=None,
help="Email associated with NCBI queries (required)")
parser.add_argument(
"--retries",
dest="retries",
action="store",
default=20,
type=int,
help="Number of Entrez retry attempts per request.")
parser.add_argument(
"--batchsize",
dest="batchsize",
action="store",
default=10000,
type=int,
help="Entrez record return batch size")
parser.add_argument(
"--timeout",
dest="timeout",
action="store",
default=10,
type=int,
help="Timeout for URL connection (s)")
return parser.parse_args() | [
"Parse command-line arguments"
] |
Please provide a description of the function:def set_ncbi_email():
Entrez.email = args.email
logger.info("Set NCBI contact email to %s", args.email)
Entrez.tool = "genbank_get_genomes_by_taxon.py" | [
"Set contact email for NCBI."
] |
Please provide a description of the function:def entrez_retry(func, *fnargs, **fnkwargs):
tries, success = 0, False
while not success and tries < args.retries:
try:
output = func(*fnargs, **fnkwargs)
success = True
except (HTTPError, URLError):
tries += 1
logger.warning("Entrez query %s(%s, %s) failed (%d/%d)", func,
fnargs, fnkwargs, tries + 1, args.retries)
logger.warning(last_exception())
if not success:
logger.error("Too many Entrez failures (exiting)")
sys.exit(1)
return output | [
"Retries the passed function up to the number of times specified\n by args.retries\n "
] |
Please provide a description of the function:def entrez_batch_webhistory(record, expected, batchsize, *fnargs, **fnkwargs):
results = []
for start in range(0, expected, batchsize):
batch_handle = entrez_retry(
Entrez.efetch,
retstart=start,
retmax=batchsize,
webenv=record["WebEnv"],
query_key=record["QueryKey"],
*fnargs,
**fnkwargs)
batch_record = Entrez.read(batch_handle, validate=False)
results.extend(batch_record)
return results | [
"Recovers the Entrez data from a prior NCBI webhistory search, in\n batches of defined size, using Efetch. Returns all results as a list.\n\n - record: Entrez webhistory record\n - expected: number of expected search returns\n - batchsize: how many search returns to retrieve in a batch\n - *fnargs: arguments to Efetch\n - **fnkwargs: keyword arguments to Efetch\n "
] |
Please provide a description of the function:def get_asm_uids(taxon_uid):
query = "txid%s[Organism:exp]" % taxon_uid
logger.info("Entrez ESearch with query: %s", query)
# Perform initial search for assembly UIDs with taxon ID as query.
# Use NCBI history for the search.
handle = entrez_retry(
Entrez.esearch,
db="assembly",
term=query,
format="xml",
usehistory="y")
record = Entrez.read(handle, validate=False)
result_count = int(record['Count'])
logger.info("Entrez ESearch returns %d assembly IDs", result_count)
# Recover assembly UIDs from the web history
asm_ids = entrez_batch_webhistory(
record, result_count, 250, db="assembly", retmode="xml")
logger.info("Identified %d unique assemblies", len(asm_ids))
return asm_ids | [
"Returns a set of NCBI UIDs associated with the passed taxon.\n\n This query at NCBI returns all assemblies for the taxon subtree\n rooted at the passed taxon_uid.\n "
] |
Please provide a description of the function:def extract_filestem(data):
escapes = re.compile(r"[\s/,#\(\)]")
escname = re.sub(escapes, '_', data['AssemblyName'])
return '_'.join([data['AssemblyAccession'], escname]) | [
"Extract filestem from Entrez eSummary data.\n\n Function expects esummary['DocumentSummarySet']['DocumentSummary'][0]\n\n Some illegal characters may occur in AssemblyName - for these, a more\n robust regex replace/escape may be required. Sadly, NCBI don't just\n use standard percent escapes, but instead replace certain\n characters with underscores: white space, slash, comma, hash, brackets.\n "
] |
Please provide a description of the function:def get_ncbi_asm(asm_uid, fmt='fasta'):
logger.info("Identifying assembly information from NCBI for %s", asm_uid)
# Obtain full eSummary data for the assembly
summary = Entrez.read(
entrez_retry(
Entrez.esummary, db="assembly", id=asm_uid, report="full"),
validate=False)
# Extract filestem from assembly data
data = summary['DocumentSummarySet']['DocumentSummary'][0]
filestem = extract_filestem(data)
# Report interesting things from the summary for those interested
logger.info("\tOrganism: %s", data['Organism'])
logger.info("\tTaxid: %s", data['SpeciesTaxid'])
logger.info("\tAccession: %s", data['AssemblyAccession'])
logger.info("\tName: %s", data['AssemblyName'])
# NOTE: Maybe parse out the assembly stats here, in future?
# Get class and label text
organism = data['SpeciesName']
try:
strain = data['Biosource']['InfraspeciesList'][0]['Sub_value']
except (KeyError, IndexError):
# we consider this an error/incompleteness in the NCBI metadata
strain = ""
# Create label and class strings
genus, species = organism.split(' ', 1)
labeltxt = "%s_genomic\t%s %s %s" % (filestem, genus[0] + '.', species,
strain)
classtxt = "%s_genomic\t%s" % (filestem, organism)
logger.info("\tLabel: %s", labeltxt)
logger.info("\tClass: %s", classtxt)
# Download and extract genome assembly
try:
fastafilename = retrieve_asm_contigs(filestem, fmt=fmt)
except NCBIDownloadException:
# This is a little hacky. Sometimes, RefSeq assemblies are
# suppressed (presumably because they are non-redundant),
# but the GenBank assembly persists. In those cases, we
# *assume* (because it may not be true) that the corresponding
# genbank sequence shares the same accession number, except
# that GCF is replaced by GCA
gbfilestem = re.sub('^GCF_', 'GCA_', filestem)
logger.warning("Could not download %s, trying %s", filestem,
gbfilestem)
try:
fastafilename = retrieve_asm_contigs(gbfilestem, fmt=fmt)
except NCBIDownloadException:
fastafilename = None
return (fastafilename, classtxt, labeltxt, data['AssemblyAccession']) | [
"Returns the NCBI AssemblyAccession and AssemblyName for the assembly\n with passed UID, and organism data for class/label files also, as well\n as accession, so we can track whether downloads fail because only the\n most recent version is available..\n\n AssemblyAccession and AssemblyName are data fields in the eSummary record,\n and correspond to downloadable files for each assembly at\n ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GC[AF]/nnn/nnn/nnn/<AA>_<AN>\n where <AA> is AssemblyAccession, and <AN> is AssemblyName, and the choice\n of GCA vs GCF, and the three values of nnn are taken from <AA>\n "
] |
Please provide a description of the function:def retrieve_asm_contigs(filestem,
ftpstem="ftp://ftp.ncbi.nlm.nih.gov/genomes/all",
fmt='fasta'):
logger.info("Retrieving assembly sequence for %s", filestem)
# Define format suffix
logger.info("%s format requested", fmt)
if fmt == 'fasta':
suffix = "genomic.fna.gz"
elif fmt == 'gbk':
suffix = 'genomic.gbff.gz'
# Compile URL
gc, aa, an = tuple(filestem.split('_', 2))
aaval = aa.split('.')[0]
subdirs = '/'.join([aa[i:i + 3] for i in range(0, len(aaval), 3)])
url = "{0}/{1}/{2}/{3}/{3}_{4}".format(ftpstem, gc, subdirs, filestem,
suffix)
logger.info("Using URL: %s", url)
# Get data info
try:
response = urlopen(url, timeout=args.timeout)
except HTTPError:
logger.error("Download failed for URL: %s\n%s", url, last_exception())
raise NCBIDownloadException()
except URLError as e:
if isinstance(e.reason, timeout):
logger.error("Download timed out for URL: %s\n%s", url,
last_exception())
else:
logger.error("Download failed for URL: %s\n%s", url,
last_exception())
raise NCBIDownloadException()
except timeout:
# TODO: Does this ever happen?
logger.error("Download timed out for URL: %s\n%s", url,
last_exception())
raise NCBIDownloadException()
else:
logger.info("Opened URL")
# Issue 108 highlighted an fsize return problem
try:
fsize = int(response.info().get("Content-length"))
except TypeError: # Thrown if no content length returned
raise NCBIDownloadException()
else:
logger.info("Parsed file content size: %d.", fsize)
# Download data
outfname = os.path.join(args.outdirname, '_'.join([filestem, suffix]))
if os.path.exists(outfname):
logger.warning("Output file %s exists, not downloading", outfname)
else:
logger.info("Downloading %s (%d bytes)", url, fsize)
bsize = 1048576 # buffer size
fsize_dl = 0 # bytes downloaded
try:
with open(outfname, "wb") as ofh:
while True:
buffer = response.read(bsize)
if not buffer:
break
fsize_dl += len(buffer)
ofh.write(buffer)
status = r"%10d [%3.2f%%]" % (fsize_dl,
fsize_dl * 100. / fsize)
logger.info(status)
except:
logger.error("Download failed for %s", url)
logger.error(last_exception())
raise NCBIDownloadException()
# Extract data
ename = os.path.splitext(outfname)[0] # Strips only .gz from filename
# The code below would munge the extracted filename to suit the expected
# class/label from the old version of this script.
# The .gz file downloaded from NCBI has format
# <assembly UID>_<string>_genomic.fna.gz - which we would extract to
# <assembly UID>.fna
#regex = ".{3}_[0-9]{9}.[0-9]"
#outparts = os.path.split(outfname)
# print(outparts[0])
#print(re.match(regex, outparts[-1]).group())
# ename = os.path.join(outparts[0],
# re.match(regex, outparts[-1]).group() + '.fna')
if os.path.exists(ename):
logger.warning("Output file %s exists, not extracting", ename)
else:
try:
logger.info("Extracting archive %s to %s", outfname, ename)
with open(ename, 'w') as efh:
subprocess.call(
['gunzip', '-c', outfname],
stdout=efh) # can be subprocess.run in Py3.5
logger.info("Archive extracted to %s", ename)
except:
logger.error("Extracting archive %s failed", outfname)
logger.error(last_exception())
raise NCBIDownloadException()
return ename | [
"Downloads an assembly sequence to a local directory.\n\n The filestem corresponds to <AA>_<AN>, where <AA> and <AN> are\n AssemblyAccession and AssemblyName: data fields in the eSummary record.\n These correspond to downloadable files for each assembly at\n ftp://ftp.ncbi.nlm.nih.gov/genomes/all/GC[AF]/nnn/nnn/nnn/<AA>_<AN>/\n where <AA> is AssemblyAccession, and <AN> is AssemblyName. The choice\n of GCA vs GCF, and the values of nnn, are derived from <AA>\n\n The files in this directory all have the stem <AA>_<AN>_<suffix>, where\n suffixes are:\n assembly_report.txt\n assembly_stats.txt\n feature_table.txt.gz\n genomic.fna.gz\n genomic.gbff.gz\n genomic.gff.gz\n protein.faa.gz\n protein.gpff.gz\n rm_out.gz\n rm.run\n wgsmaster.gbff.gz\n\n This function downloads the genomic_fna.gz file, and extracts it in the\n output directory name specified when the script is called.\n "
] |
Please provide a description of the function:def write_contigs(asm_uid, contig_uids, batchsize=10000):
# Has duplicate code with get_class_label_info() - needs refactoring
logger.info("Collecting contig data for %s", asm_uid)
# Assembly record - get binomial and strain names
asm_record = Entrez.read(
entrez_retry(
Entrez.esummary, db='assembly', id=asm_uid, rettype='text'),
validate=False)
asm_organism = asm_record['DocumentSummarySet']['DocumentSummary'][0][
'SpeciesName']
try:
asm_strain = asm_record['DocumentSummarySet']['DocumentSummary'][0][
'Biosource']['InfraspeciesList'][0]['Sub_value']
except KeyError:
asm_strain = ""
# Assembly UID (long form) for the output filename
outfilename = "%s.fasta" % os.path.join(args.outdirname, asm_record[
'DocumentSummarySet']['DocumentSummary'][0]['AssemblyAccession'])
# Create label and class strings
genus, species = asm_organism.split(' ', 1)
# Get FASTA records for contigs
logger.info("Downloading FASTA records for assembly %s (%s)", asm_uid,
' '.join([genus[0] + '.', species, asm_strain]))
# We're doing an explicit outer retry loop here because we want to confirm
# we have the correct data, as well as test for Entrez connection errors,
# which is all the entrez_retry function does.
tries, success = 0, False
while not success and tries < args.retries:
records = [] # Holds all return records
# We may need to batch contigs
query_uids = ','.join(contig_uids)
try:
for start in range(0, len(contig_uids), batchsize):
logger.info("Batch: %d-%d", start, start + batchsize)
records.extend(
list(
SeqIO.parse(
entrez_retry(
Entrez.efetch,
db='nucleotide',
id=query_uids,
rettype='fasta',
retmode='text',
retstart=start,
retmax=batchsize), 'fasta')))
tries += 1
# Check only that correct number of records returned.
if len(records) == len(contig_uids):
success = True
else:
logger.warning("%d contigs expected, %d contigs returned",
len(contig_uids), len(records))
logger.warning("FASTA download for assembly %s failed",
asm_uid)
logger.warning("try %d/20", tries)
# Could also check expected assembly sequence length?
logger.info("Downloaded genome size: %d",
sum([len(r) for r in records]))
except:
logger.warning("FASTA download for assembly %s failed", asm_uid)
logger.warning(last_exception())
logger.warning("try %d/20", tries)
if not success:
# Could place option on command-line to stop or continue here.
logger.error("Failed to download records for %s (continuing)", asm_uid)
# Write contigs to file
retval = SeqIO.write(records, outfilename, 'fasta')
logger.info("Wrote %d contigs to %s", retval, outfilename) | [
"Writes assembly contigs out to a single FASTA file in the script's\n designated output directory.\n\n FASTA records are returned, as GenBank and even GenBankWithParts format\n records don't reliably give correct sequence in all cases.\n\n The script returns two strings for each assembly, a 'class' and a 'label'\n string - this is for use with, e.g. pyani.\n "
] |
Please provide a description of the function:def logreport_downloaded(accession, skippedlist, accessiondict, uidaccdict):
for vid in accessiondict[accession.split('.')[0]]:
if vid in skippedlist:
status = "NOT DOWNLOADED"
else:
status = "DOWNLOADED"
logger.warning("\t\t%s: %s - %s", vid, uidaccdict[vid], status) | [
"Reports to logger whether alternative assemblies for an accession that\n was missing have been downloaded\n "
] |
Please provide a description of the function:def calculate_tetra_zscores(infilenames):
org_tetraz = {}
for filename in infilenames:
org = os.path.splitext(os.path.split(filename)[-1])[0]
org_tetraz[org] = calculate_tetra_zscore(filename)
return org_tetraz | [
"Returns dictionary of TETRA Z-scores for each input file.\n\n - infilenames - collection of paths to sequence files\n "
] |
Please provide a description of the function:def calculate_tetra_zscore(filename):
# For the Teeling et al. method, the Z-scores require us to count
# mono, di, tri and tetranucleotide sequences - these are stored
# (in order) in the counts tuple
counts = (collections.defaultdict(int), collections.defaultdict(int),
collections.defaultdict(int), collections.defaultdict(int))
for rec in SeqIO.parse(filename, 'fasta'):
for seq in [str(rec.seq).upper(),
str(rec.seq.reverse_complement()).upper()]:
# The Teeling et al. algorithm requires us to consider
# both strand orientations, so monocounts are easy
for base in ('G', 'C', 'T', 'A'):
counts[0][base] += seq.count(base)
# For di, tri and tetranucleotide counts, loop over the
# sequence and its reverse complement, until near the end:
for i in range(len(seq[:-4])):
din, tri, tetra = seq[i:i+2], seq[i:i+3], seq[i:i+4]
counts[1][str(din)] += 1
counts[2][str(tri)] += 1
counts[3][str(tetra)] += 1
# Then clean up the straggling bit at the end:
counts[2][str(seq[-4:-1])] += 1
counts[2][str(seq[-3:])] += 1
counts[1][str(seq[-4:-2])] += 1
counts[1][str(seq[-3:-1])] += 1
counts[1][str(seq[-2:])] += 1
# Following Teeling (2004), calculate expected frequencies for each
# tetranucleotide; we ignore ambiguity symbols
tetra_exp = {}
for tet in [tetn for tetn in counts[3] if tetra_clean(tetn)]:
tetra_exp[tet] = 1. * counts[2][tet[:3]] * counts[2][tet[1:]] / \
counts[1][tet[1:3]]
# Following Teeling (2004) we approximate the std dev and Z-score for each
# tetranucleotide
tetra_sd = {}
tetra_z = {}
for tet, exp in list(tetra_exp.items()):
den = counts[1][tet[1:3]]
tetra_sd[tet] = math.sqrt(exp * (den - counts[2][tet[:3]]) *
(den - counts[2][tet[1:]]) / (den * den))
try:
tetra_z[tet] = (counts[3][tet] - exp)/tetra_sd[tet]
except ZeroDivisionError:
# To record if we hit a zero in the estimation of variance
# zeroes = [k for k, v in list(tetra_sd.items()) if v == 0]
tetra_z[tet] = 1 / (counts[1][tet[1:3]] * counts[1][tet[1:3]])
return tetra_z | [
"Returns TETRA Z-score for the sequence in the passed file.\n\n - filename - path to sequence file\n\n Calculates mono-, di-, tri- and tetranucleotide frequencies\n for each sequence, on each strand, and follows Teeling et al. (2004)\n in calculating a corresponding Z-score for each observed\n tetranucleotide frequency, dependent on the mono-, di- and tri-\n nucleotide frequencies for that input sequence.\n "
] |
Please provide a description of the function:def calculate_correlations(tetra_z):
orgs = sorted(tetra_z.keys())
correlations = pd.DataFrame(index=orgs, columns=orgs,
dtype=float).fillna(1.0)
for idx, org1 in enumerate(orgs[:-1]):
for org2 in orgs[idx+1:]:
assert sorted(tetra_z[org1].keys()) == sorted(tetra_z[org2].keys())
tets = sorted(tetra_z[org1].keys())
zscores = [[tetra_z[org1][t] for t in tets],
[tetra_z[org2][t] for t in tets]]
zmeans = [sum(zscore)/len(zscore) for zscore in zscores]
zdiffs = [[z - zmeans[0] for z in zscores[0]],
[z - zmeans[1] for z in zscores[1]]]
diffprods = sum([zdiffs[0][i] * zdiffs[1][i] for i in
range(len(zdiffs[0]))])
zdiffs2 = [sum([z * z for z in zdiffs[0]]),
sum([z * z for z in zdiffs[1]])]
correlations[org1][org2] = diffprods / \
math.sqrt(zdiffs2[0] * zdiffs2[1])
correlations[org2][org1] = correlations[org1][org2]
return correlations | [
"Returns dataframe of Pearson correlation coefficients.\n\n - tetra_z - dictionary of Z-scores, keyed by sequence ID\n\n Calculates Pearson correlation coefficient from Z scores for each\n tetranucleotide. This is done longhand here, which is fast enough,\n but for robustness we might want to do something else... (TODO).\n\n Note that we report a correlation by this method, rather than a\n percentage identity.\n "
] |
Please provide a description of the function:def get_labels(filename, logger=None):
labeldict = {}
if filename is not None:
if logger:
logger.info("Reading labels from %s", filename)
with open(filename, "r") as ifh:
count = 0
for line in ifh.readlines():
count += 1
try:
key, label = line.strip().split("\t")
except ValueError:
if logger:
logger.warning("Problem with class file: %s", filename)
logger.warning("%d: %s", (count, line.strip()))
logger.warning("(skipping line)")
continue
else:
labeldict[key] = label
return labeldict | [
"Returns a dictionary of alternative sequence labels, or None\n\n - filename - path to file containing tab-separated table of labels\n\n Input files should be formatted as <key>\\t<label>, one pair per line.\n "
] |
Please provide a description of the function:def add_tot_length(self, qname, sname, value, sym=True):
self.alignment_lengths.loc[qname, sname] = value
if sym:
self.alignment_lengths.loc[sname, qname] = value | [
"Add a total length value to self.alignment_lengths."
] |
Please provide a description of the function:def add_sim_errors(self, qname, sname, value, sym=True):
self.similarity_errors.loc[qname, sname] = value
if sym:
self.similarity_errors.loc[sname, qname] = value | [
"Add a similarity error value to self.similarity_errors."
] |
Please provide a description of the function:def add_pid(self, qname, sname, value, sym=True):
self.percentage_identity.loc[qname, sname] = value
if sym:
self.percentage_identity.loc[sname, qname] = value | [
"Add a percentage identity value to self.percentage_identity."
] |
Please provide a description of the function:def add_coverage(self, qname, sname, qcover, scover=None):
self.alignment_coverage.loc[qname, sname] = qcover
if scover:
self.alignment_coverage.loc[sname, qname] = scover | [
"Add percentage coverage values to self.alignment_coverage."
] |
Please provide a description of the function:def data(self):
stemdict = {
"ANIm": pyani_config.ANIM_FILESTEMS,
"ANIb": pyani_config.ANIB_FILESTEMS,
"ANIblastall": pyani_config.ANIBLASTALL_FILESTEMS,
}
return zip(
(
self.alignment_lengths,
self.percentage_identity,
self.alignment_coverage,
self.similarity_errors,
self.hadamard,
),
stemdict[self.mode],
) | [
"Return list of (dataframe, filestem) tuples."
] |
Please provide a description of the function:def build_db_cmd(self, fname):
return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[0] | [
"Return database format/build command"
] |
Please provide a description of the function:def get_db_name(self, fname):
return self.funcs.db_func(fname, self.outdir, self.exes.format_exe)[1] | [
"Return database filename"
] |
Please provide a description of the function:def build_blast_cmd(self, fname, dbname):
return self.funcs.blastn_func(fname, dbname, self.outdir, self.exes.blast_exe) | [
"Return BLASTN command"
] |
Please provide a description of the function:def fragment_fasta_files(infiles, outdirname, fragsize):
outfnames = []
for fname in infiles:
outstem, outext = os.path.splitext(os.path.split(fname)[-1])
outfname = os.path.join(outdirname, outstem) + "-fragments" + outext
outseqs = []
count = 0
for seq in SeqIO.parse(fname, "fasta"):
idx = 0
while idx < len(seq):
count += 1
newseq = seq[idx : idx + fragsize]
newseq.id = "frag%05d" % count
outseqs.append(newseq)
idx += fragsize
outfnames.append(outfname)
SeqIO.write(outseqs, outfname, "fasta")
return outfnames, get_fraglength_dict(outfnames) | [
"Chops sequences of the passed files into fragments, returns filenames.\n\n - infiles - paths to each input sequence file\n - outdirname - path to output directory\n - fragsize - the size of sequence fragments\n\n Takes every sequence from every file in infiles, and splits them into\n consecutive fragments of length fragsize, (with any trailing sequences\n being included, even if shorter than fragsize), and writes the resulting\n set of sequences to a file with the same name in the output directory.\n All fragments are named consecutively and uniquely (within a file) as\n fragNNNNN. Sequence description fields are retained.\n "
] |
Please provide a description of the function:def get_fraglength_dict(fastafiles):
fraglength_dict = {}
for filename in fastafiles:
qname = os.path.split(filename)[-1].split("-fragments")[0]
fraglength_dict[qname] = get_fragment_lengths(filename)
return fraglength_dict | [
"Returns dictionary of sequence fragment lengths, keyed by query name.\n\n - fastafiles - list of FASTA input whole sequence files\n\n Loops over input files and, for each, produces a dictionary with fragment\n lengths, keyed by sequence ID. These are returned as a dictionary with\n the keys being query IDs derived from filenames.\n "
] |
Please provide a description of the function:def get_fragment_lengths(fastafile):
fraglengths = {}
for seq in SeqIO.parse(fastafile, "fasta"):
fraglengths[seq.id] = len(seq)
return fraglengths | [
"Returns dictionary of sequence fragment lengths, keyed by fragment ID.\n\n Biopython's SeqIO module is used to parse all sequences in the FASTA\n file.\n\n NOTE: ambiguity symbols are not discounted.\n "
] |
Please provide a description of the function:def build_db_jobs(infiles, blastcmds):
dbjobdict = {} # Dict of database construction jobs, keyed by filename
# Create dictionary of database building jobs, keyed by db name
# defining jobnum for later use as last job index used
for idx, fname in enumerate(infiles):
dbjobdict[blastcmds.get_db_name(fname)] = pyani_jobs.Job(
"%s_db_%06d" % (blastcmds.prefix, idx), blastcmds.build_db_cmd(fname)
)
return dbjobdict | [
"Returns dictionary of db-building commands, keyed by dbname."
] |
Please provide a description of the function:def make_blastcmd_builder(
mode, outdir, format_exe=None, blast_exe=None, prefix="ANIBLAST"
):
if mode == "ANIb": # BLAST/formatting executable depends on mode
blastcmds = BLASTcmds(
BLASTfunctions(construct_makeblastdb_cmd, construct_blastn_cmdline),
BLASTexes(
format_exe or pyani_config.MAKEBLASTDB_DEFAULT,
blast_exe or pyani_config.BLASTN_DEFAULT,
),
prefix,
outdir,
)
else:
blastcmds = BLASTcmds(
BLASTfunctions(construct_formatdb_cmd, construct_blastall_cmdline),
BLASTexes(
format_exe or pyani_config.FORMATDB_DEFAULT,
blast_exe or pyani_config.BLASTALL_DEFAULT,
),
prefix,
outdir,
)
return blastcmds | [
"Returns BLASTcmds object for construction of BLAST commands."
] |
Please provide a description of the function:def make_job_graph(infiles, fragfiles, blastcmds):
joblist = [] # Holds list of job dependency graphs
# Get dictionary of database-building jobs
dbjobdict = build_db_jobs(infiles, blastcmds)
# Create list of BLAST executable jobs, with dependencies
jobnum = len(dbjobdict)
for idx, fname1 in enumerate(fragfiles[:-1]):
for fname2 in fragfiles[idx + 1 :]:
jobnum += 1
jobs = [
pyani_jobs.Job(
"%s_exe_%06d_a" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname1, fname2.replace("-fragments", "")),
),
pyani_jobs.Job(
"%s_exe_%06d_b" % (blastcmds.prefix, jobnum),
blastcmds.build_blast_cmd(fname2, fname1.replace("-fragments", "")),
),
]
jobs[0].add_dependency(dbjobdict[fname1.replace("-fragments", "")])
jobs[1].add_dependency(dbjobdict[fname2.replace("-fragments", "")])
joblist.extend(jobs)
# Return the dependency graph
return joblist | [
"Return a job dependency graph, based on the passed input sequence files.\n\n - infiles - a list of paths to input FASTA files\n - fragfiles - a list of paths to fragmented input FASTA files\n\n By default, will run ANIb - it *is* possible to make a mess of passing the\n wrong executable for the mode you're using.\n\n All items in the returned graph list are BLAST executable jobs that must\n be run *after* the corresponding database creation. The Job objects\n corresponding to the database creation are contained as dependencies.\n How those jobs are scheduled depends on the scheduler (see\n run_multiprocessing.py, run_sge.py)\n "
] |
Please provide a description of the function:def generate_blastdb_commands(filenames, outdir, blastdb_exe=None, mode="ANIb"):
if mode == "ANIb":
construct_db_cmdline = construct_makeblastdb_cmd
else:
construct_db_cmdline = construct_formatdb_cmd
if blastdb_exe is None:
cmdlines = [construct_db_cmdline(fname, outdir) for fname in filenames]
else:
cmdlines = [
construct_db_cmdline(fname, outdir, blastdb_exe) for fname in filenames
]
return cmdlines | [
"Return a list of makeblastdb command-lines for ANIb/ANIblastall\n\n - filenames - a list of paths to input FASTA files\n - outdir - path to output directory\n - blastdb_exe - path to the makeblastdb executable\n "
] |
Please provide a description of the function:def construct_makeblastdb_cmd(
filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT
):
title = os.path.splitext(os.path.split(filename)[-1])[0]
outfilename = os.path.join(outdir, os.path.split(filename)[-1])
return (
"{0} -dbtype nucl -in {1} -title {2} -out {3}".format(
blastdb_exe, filename, title, outfilename
),
outfilename,
) | [
"Returns a single makeblastdb command.\n\n - filename - input filename\n - blastdb_exe - path to the makeblastdb executable\n "
] |
Please provide a description of the function:def construct_formatdb_cmd(filename, outdir, blastdb_exe=pyani_config.FORMATDB_DEFAULT):
title = os.path.splitext(os.path.split(filename)[-1])[0]
newfilename = os.path.join(outdir, os.path.split(filename)[-1])
shutil.copy(filename, newfilename)
return (
"{0} -p F -i {1} -t {2}".format(blastdb_exe, newfilename, title),
newfilename,
) | [
"Returns a single formatdb command.\n\n - filename - input filename\n - blastdb_exe - path to the formatdb executable\n "
] |
Please provide a description of the function:def generate_blastn_commands(filenames, outdir, blast_exe=None, mode="ANIb"):
if mode == "ANIb":
construct_blast_cmdline = construct_blastn_cmdline
else:
construct_blast_cmdline = construct_blastall_cmdline
cmdlines = []
for idx, fname1 in enumerate(filenames[:-1]):
dbname1 = fname1.replace("-fragments", "")
for fname2 in filenames[idx + 1 :]:
dbname2 = fname2.replace("-fragments", "")
if blast_exe is None:
cmdlines.append(construct_blast_cmdline(fname1, dbname2, outdir))
cmdlines.append(construct_blast_cmdline(fname2, dbname1, outdir))
else:
cmdlines.append(
construct_blast_cmdline(fname1, dbname2, outdir, blast_exe)
)
cmdlines.append(
construct_blast_cmdline(fname2, dbname1, outdir, blast_exe)
)
return cmdlines | [
"Return a list of blastn command-lines for ANIm\n\n - filenames - a list of paths to fragmented input FASTA files\n - outdir - path to output directory\n - blastn_exe - path to BLASTN executable\n\n Assumes that the fragment sequence input filenames have the form\n ACCESSION-fragments.ext, where the corresponding BLAST database filenames\n have the form ACCESSION.ext. This is the convention followed by the\n fragment_FASTA_files() function above.\n "
] |
Please provide a description of the function:def construct_blastn_cmdline(
fname1, fname2, outdir, blastn_exe=pyani_config.BLASTN_DEFAULT
):
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -out {1}.blast_tab -query {2} -db {3} "
+ "-xdrop_gap_final 150 -dust no -evalue 1e-15 "
+ "-max_target_seqs 1 -outfmt '6 qseqid sseqid length mismatch "
+ "pident nident qlen slen qstart qend sstart send positive "
+ "ppos gaps' -task blastn"
)
return cmd.format(blastn_exe, prefix, fname1, fname2) | [
"Returns a single blastn command.\n\n - filename - input filename\n - blastn_exe - path to BLASTN executable\n "
] |
Please provide a description of the function:def construct_blastall_cmdline(
fname1, fname2, outdir, blastall_exe=pyani_config.BLASTALL_DEFAULT
):
fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0]
fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0]
fstem1 = fstem1.replace("-fragments", "")
prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2))
cmd = (
"{0} -p blastn -o {1}.blast_tab -i {2} -d {3} "
+ "-X 150 -q -1 -F F -e 1e-15 "
+ "-b 1 -v 1 -m 8"
)
return cmd.format(blastall_exe, prefix, fname1, fname2) | [
"Returns a single blastall command.\n\n - blastall_exe - path to BLASTALL executable\n "
] |
Please provide a description of the function:def process_blast(
blast_dir,
org_lengths,
fraglengths=None,
mode="ANIb",
identity=0.3,
coverage=0.7,
logger=None,
):
# Process directory to identify input files
blastfiles = pyani_files.get_input_files(blast_dir, ".blast_tab")
# Hold data in ANIResults object
results = ANIResults(list(org_lengths.keys()), mode)
# Fill diagonal NA values for alignment_length with org_lengths
for org, length in list(org_lengths.items()):
results.alignment_lengths[org][org] = length
# Process .blast_tab files assuming that the filename format holds:
# org1_vs_org2.blast_tab:
for blastfile in blastfiles:
qname, sname = os.path.splitext(os.path.split(blastfile)[-1])[0].split("_vs_")
# We may have BLAST files from other analyses in the same directory
# If this occurs, we raise a warning, and skip the file
if qname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Query name %s not in input " % qname
+ "sequence list, skipping %s" % blastfile
)
continue
if sname not in list(org_lengths.keys()):
if logger:
logger.warning(
"Subject name %s not in input " % sname
+ "sequence list, skipping %s" % blastfile
)
continue
resultvals = parse_blast_tab(blastfile, fraglengths, identity, coverage, mode)
query_cover = float(resultvals[0]) / org_lengths[qname]
# Populate dataframes: when assigning data, we need to note that
# we have asymmetrical data from BLAST output, so only the
# upper triangle is populated
results.add_tot_length(qname, sname, resultvals[0], sym=False)
results.add_sim_errors(qname, sname, resultvals[1], sym=False)
results.add_pid(qname, sname, 0.01 * resultvals[2], sym=False)
results.add_coverage(qname, sname, query_cover)
return results | [
"Returns a tuple of ANIb results for .blast_tab files in the output dir.\n\n - blast_dir - path to the directory containing .blast_tab files\n - org_lengths - the base count for each input sequence\n - fraglengths - dictionary of query sequence fragment lengths, only\n needed for BLASTALL output\n - mode - parsing BLASTN+ or BLASTALL output?\n - logger - a logger for messages\n\n Returns the following pandas dataframes in an ANIResults object;\n query sequences are rows, subject sequences are columns:\n\n - alignment_lengths - non-symmetrical: total length of alignment\n - percentage_identity - non-symmetrical: ANIb (Goris) percentage identity\n - alignment_coverage - non-symmetrical: coverage of query\n - similarity_errors - non-symmetrical: count of similarity errors\n\n May throw a ZeroDivisionError if one or more BLAST runs failed, or a\n very distant sequence was included in the analysis.\n "
] |
Please provide a description of the function:def parse_blast_tab(filename, fraglengths, identity, coverage, mode="ANIb"):
# Assuming that the filename format holds org1_vs_org2.blast_tab:
qname = os.path.splitext(os.path.split(filename)[-1])[0].split("_vs_")[0]
# Load output as dataframe
if mode == "ANIblastall":
qfraglengths = fraglengths[qname]
columns = [
"sid",
"blast_pid",
"blast_alnlen",
"blast_mismatch",
"blast_gaps",
"q_start",
"q_end",
"s_start",
"s_end",
"e_Value",
"bit_score",
]
else:
columns = [
"sbjct_id",
"blast_alnlen",
"blast_mismatch",
"blast_pid",
"blast_identities",
"qlen",
"slen",
"q_start",
"q_end",
"s_start",
"s_end",
"blast_pos",
"ppos",
"blast_gaps",
]
# We may receive an empty BLASTN output file, if there are no significant
# regions of homology. This causes pandas to throw an error on CSV import.
# To get past this, we create an empty dataframe with the appropriate
# columns.
try:
data = pd.read_csv(filename, header=None, sep="\t", index_col=0)
data.columns = columns
except pd.io.common.EmptyDataError:
data = pd.DataFrame(columns=columns)
# Add new column for fragment length, only for BLASTALL
if mode == "ANIblastall":
data["qlen"] = pd.Series(
[qfraglengths[idx] for idx in data.index], index=data.index
)
# Add new columns for recalculated alignment length, proportion, and
# percentage identity
data["ani_alnlen"] = data["blast_alnlen"] - data["blast_gaps"]
data["ani_alnids"] = data["ani_alnlen"] - data["blast_mismatch"]
data["ani_coverage"] = data["ani_alnlen"] / data["qlen"]
data["ani_pid"] = data["ani_alnids"] / data["qlen"]
# Filter rows on 'ani_coverage' > 0.7, 'ani_pid' > 0.3
filtered = data[(data["ani_coverage"] > coverage) & (data["ani_pid"] > identity)]
# Dedupe query hits, so we only take the best hit
filtered = filtered.groupby(filtered.index).first()
# Replace NaNs with zero
filtered = filtered.fillna(value=0) # Needed if no matches
# The ANI value is then the mean percentage identity.
# We report total alignment length and the number of similarity errors
# (mismatches and gaps), as for ANIm
# NOTE: We report the mean of 'blast_pid' for concordance with JSpecies
# Despite this, the concordance is not exact. Manual inspection during
# development indicated that a handful of fragments are differentially
# filtered out in JSpecies and this script. This is often on the basis
# of rounding differences (e.g. coverage being close to 70%).
# NOTE: If there are no hits, then ani_pid will be nan - we replace this
# with zero if that happens
ani_pid = filtered["blast_pid"].mean()
if pd.isnull(ani_pid): # Happens if there are no matches in ANIb
ani_pid = 0
aln_length = filtered["ani_alnlen"].sum()
sim_errors = filtered["blast_mismatch"].sum() + filtered["blast_gaps"].sum()
filtered.to_csv(filename + ".dataframe", sep="\t")
return aln_length, sim_errors, ani_pid | [
"Returns (alignment length, similarity errors, mean_pid) tuple\n from .blast_tab\n\n - filename - path to .blast_tab file\n\n Calculate the alignment length and total number of similarity errors (as\n we would with ANIm), as well as the Goris et al.-defined mean identity\n of all valid BLAST matches for the passed BLASTALL alignment .blast_tab\n file.\n\n '''ANI between the query genome and the reference genome was calculated as\n the mean identity of all BLASTN matches that showed more than 30% overall\n sequence identity (recalculated to an identity along the entire sequence)\n over an alignable region of at least 70% of their length.\n '''\n "
] |
Please provide a description of the function:def split_seq(iterable, size):
elm = iter(iterable)
item = list(itertools.islice(elm, size))
while item:
yield item
item = list(itertools.islice(elm, size)) | [
"Splits a passed iterable into chunks of a given size."
] |
Please provide a description of the function:def build_joblist(jobgraph):
jobset = set()
for job in jobgraph:
jobset = populate_jobset(job, jobset, depth=1)
return list(jobset) | [
"Returns a list of jobs, from a passed jobgraph."
] |
Please provide a description of the function:def compile_jobgroups_from_joblist(joblist, jgprefix, sgegroupsize):
jobcmds = defaultdict(list)
for job in joblist:
jobcmds[job.command.split(' ', 1)[0]].append(job.command)
jobgroups = []
for cmds in list(jobcmds.items()):
# Break arglist up into batches of sgegroupsize (default: 10,000)
sublists = split_seq(cmds[1], sgegroupsize)
count = 0
for sublist in sublists:
count += 1
sge_jobcmdlist = ['\"%s\"' % jc for jc in sublist]
jobgroups.append(JobGroup("%s_%d" % (jgprefix, count),
"$cmds",
arguments={'cmds': sge_jobcmdlist}))
return jobgroups | [
"Return list of jobgroups, rather than list of jobs."
] |
Please provide a description of the function:def run_dependency_graph(jobgraph, logger=None, jgprefix="ANIm_SGE_JG",
sgegroupsize=10000, sgeargs=None):
joblist = build_joblist(jobgraph)
# Try to be informative by telling the user what jobs will run
dep_count = 0 # how many dependencies are there
if logger:
logger.info("Jobs to run with scheduler")
for job in joblist:
logger.info("{0}: {1}".format(job.name, job.command))
if len(job.dependencies):
dep_count += len(job.dependencies)
for dep in job.dependencies:
logger.info("\t[^ depends on: %s]" % dep.name)
logger.info("There are %d job dependencies" % dep_count)
# If there are no job dependencies, we can use an array (or series of
# arrays) to schedule our jobs. This cuts down on problems with long
# job lists choking up the queue.
if dep_count == 0:
logger.info("Compiling jobs into JobGroups")
joblist = compile_jobgroups_from_joblist(joblist, jgprefix,
sgegroupsize)
# Send jobs to scheduler
logger.info("Running jobs with scheduler...")
logger.info("Jobs passed to scheduler in order:")
for job in joblist:
logger.info("\t%s" % job.name)
build_and_submit_jobs(os.curdir, joblist, sgeargs)
logger.info("Waiting for SGE-submitted jobs to finish (polling)")
for job in joblist:
job.wait() | [
"Creates and runs GridEngine scripts for jobs based on the passed\n jobgraph.\n\n - jobgraph - list of jobs, which may have dependencies.\n - verbose - flag for multiprocessing verbosity\n - logger - a logger module logger (optional)\n - jgprefix - a prefix for the submitted jobs, in the scheduler\n - sgegroupsize - the maximum size for an array job submission\n - sgeargs - additional arguments to qsub\n\n The strategy here is to loop over each job in the list of jobs (jobgraph),\n and create/populate a series of Sets of commands, to be run in\n reverse order with multiprocessing_run as asynchronous pools.\n\n The strategy here is to loop over each job in the dependency graph, and\n add the job to a new list of jobs, swapping out the Job dependency for\n the name of the Job on which it depends.\n "
] |
Please provide a description of the function:def populate_jobset(job, jobset, depth):
jobset.add(job)
if len(job.dependencies) == 0:
return jobset
for j in job.dependencies:
jobset = populate_jobset(j, jobset, depth+1)
return jobset | [
" Creates a set of jobs, containing jobs at difference depths of the\n dependency tree, retaining dependencies as strings, not Jobs.\n "
] |
Please provide a description of the function:def build_directories(root_dir):
# If the root directory doesn't exist, create it
if not os.path.exists(root_dir):
os.mkdir(root_dir)
# Create subdirectories
directories = [os.path.join(root_dir, subdir) for subdir in
("output", "stderr", "stdout", "jobs")]
for dirname in directories:
os.makedirs(dirname, exist_ok=True) | [
"Constructs the subdirectories output, stderr, stdout, and jobs in the\n passed root directory. These subdirectories have the following roles:\n\n jobs Stores the scripts for each job\n stderr Stores the stderr output from SGE\n stdout Stores the stdout output from SGE\n output Stores output (if the scripts place the output here)\n\n - root_dir Path to the top-level directory for creation of subdirectories\n "
] |
Please provide a description of the function:def build_job_scripts(root_dir, jobs):
# Loop over the job list, creating each job script in turn, and then adding
# scriptPath to the Job object
for job in jobs:
scriptpath = os.path.join(root_dir, "jobs", job.name)
with open(scriptpath, "w") as scriptfile:
scriptfile.write("#!/bin/sh\n#$ -S /bin/bash\n%s\n" % job.script)
job.scriptpath = scriptpath | [
"Constructs the script for each passed Job in the jobs iterable\n\n - root_dir Path to output directory\n "
] |
Please provide a description of the function:def submit_safe_jobs(root_dir, jobs, sgeargs=None):
# Loop over each job, constructing SGE command-line based on job settings
for job in jobs:
job.out = os.path.join(root_dir, "stdout")
job.err = os.path.join(root_dir, "stderr")
# Add the job name, current working directory, and SGE stdout/stderr
# directories to the SGE command line
args = " -N %s " % (job.name)
args += " -cwd "
args += " -o %s -e %s " % (job.out, job.err)
# If a queue is specified, add this to the SGE command line
# LP: This has an undeclared variable, not sure why - delete?
#if job.queue is not None and job.queue in local_queues:
# args += local_queues[job.queue]
# If the job is actually a JobGroup, add the task numbering argument
if isinstance(job, JobGroup):
args += "-t 1:%d " % (job.tasks)
# If there are dependencies for this job, hold the job until they are
# complete
if len(job.dependencies) > 0:
args += "-hold_jid "
for dep in job.dependencies:
args += dep.name + ","
args = args[:-1]
# Build the qsub SGE commandline (passing local environment)
qsubcmd = ("%s -V %s %s" %
(pyani_config.QSUB_DEFAULT, args, job.scriptpath))
if sgeargs is not None:
qsubcmd = "%s %s" % (qsubcmd, sgeargs)
os.system(qsubcmd) # Run the command
job.submitted = True | [
"Submit the passed list of jobs to the Grid Engine server, using the\n passed directory as the root for scheduler output.\n\n - root_dir Path to output directory\n - jobs Iterable of Job objects\n "
] |
Please provide a description of the function:def submit_jobs(root_dir, jobs, sgeargs=None):
waiting = list(jobs) # List of jobs still to be done
# Loop over the list of pending jobs, while there still are any
while len(waiting) > 0:
# extract submittable jobs
submittable = extract_submittable_jobs(waiting)
# run those jobs
submit_safe_jobs(root_dir, submittable, sgeargs)
# remove those from the waiting list
for job in submittable:
waiting.remove(job) | [
" Submit each of the passed jobs to the SGE server, using the passed\n directory as root for SGE output.\n\n - root_dir Path to output directory\n - jobs List of Job objects\n "
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.