repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/plot/traceplot.py
|
import numpy as np
import matplotlib.pyplot as plt
# from .. import quad
from ..mesh.cell import cell
def plot_trace(f_trace_list, fmt, legend, title, K: cell, quad_list):
t = _get_trace_param_cell_boundary(K, quad_list)
plt.figure()
for k in range(len(f_trace_list)):
plt.plot(t, f_trace_list[k], fmt[k])
plt.legend(legend)
plt.grid('minor')
plt.title(title)
x_ticks, x_labels = _get_ticks(K)
plt.xticks(ticks=x_ticks, labels=x_labels)
plt.show()
return None
def plot_trace_log(f_trace_list, fmt, legend, title, K: cell, quad_list):
t = _get_trace_param_cell_boundary(K, quad_list)
plt.figure()
for k in range(len(f_trace_list)):
plt.semilogy(t, f_trace_list[k], fmt[k])
plt.legend(legend)
plt.grid('minor')
plt.title(title)
x_ticks, x_labels = _get_ticks(K)
plt.xticks(ticks=x_ticks, labels=x_labels)
plt.show()
return None
def _make_quad_dict(quad_list):
"""
Organize a list of distinct quad objects into a convenient dictionary
"""
quad_dict = dict()
for q in quad_list:
quad_dict[q.type] = q
return quad_dict
def _get_trace_param_cell_boundary(K: cell, quad_list):
quad_dict = _make_quad_dict(quad_list)
t = np.zeros((K.num_pts,))
t0 = 0
idx_start = 0
for e in K.edge_list:
t[idx_start:(idx_start + e.num_pts - 1)] = \
t0 + quad_dict[e.qtype].t[:-1]
idx_start += e.num_pts - 1
t0 += 2 * np.pi
return t
def _get_ticks(K):
x_ticks = np.linspace(0, 2 * np.pi * K.num_edges, K.num_edges + 1)
x_labels = ['0',]
for k in range(1, K.num_edges+1):
x_labels.append(f'{2 * k}$\pi$')
return x_ticks, x_labels
| 1,576 | 20.310811 | 73 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/plot/edges.py
|
import numpy as np
import matplotlib.pyplot as plt
from .. import mesh
def plot_edges(edge_list,
orientation=False,
axis_arg='equal',
grid_arg='minor'):
plt.figure()
plt.axis(axis_arg)
plt.grid(grid_arg)
for e in edge_list:
if orientation:
_plot_oriented_edge(e)
else:
_plot_edge(e)
plt.show()
return None
def plot_boundary(K: mesh.cell.cell,
orientation=False,
hole_int_pts=False,
axis_arg='equal',
grid_arg='minor'):
plt.figure()
plt.axis(axis_arg)
plt.grid(grid_arg)
for e in K.edge_list:
if orientation:
_plot_oriented_edge(e)
else:
_plot_edge(e)
if hole_int_pts:
_plot_hole_interior_points(K)
plt.show()
return None
def _plot_edge(e):
plt.plot(e.x[0,:], e.x[1,:],'k-')
def _plot_oriented_edge(e):
X = e.x[0,:]
Y = e.x[1,:]
U = np.roll(X, -1) - X
V = np.roll(Y, -1) - Y
X = X[:-1]
Y = Y[:-1]
U = U[:-1]
V = V[:-1]
plt.quiver(X, Y, U, V, scale=1, angles='xy', scale_units='xy')
return None
def _plot_hole_interior_points(K):
plt.scatter(K.hole_int_pts[0,:], K.hole_int_pts[1,:])
| 1,057 | 15.53125 | 63 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/plot/__init__.py
| 0 | 0 | 0 |
py
|
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/intval.py
|
import numpy as np
from ..mesh.cell import cell
from .locfun import locfun
def interior_values(v: locfun, K: cell):
"""
Returns (y1, y2, vals) where y1,y2 form a meshgrid covering the cell K
and vals is an array of the same size of the interior values of v.
At points that are not in the interior, vals is nan.
"""
y1, y2, is_inside = generate_interior_points(K)
rows, cols = np.shape(y1)
vals = np.zeros((rows, cols))
grad1 = np.zeros((rows, cols))
grad2 = np.zeros((rows, cols))
# conjugable part
psi = v.get_conjugable_part(K)
psi_hat = v.get_harmonic_conjugate()
x1, x2 = K.get_boundary_points()
# polynomial gradient
Px, Py = v.poly_part.grad()
# compute interior values
for i in range(rows):
for j in range(cols):
if is_inside[i, j]:
# Cauchy's integral formula
xy1 = x1 - y1[i, j]
xy2 = x2 - y2[i, j]
xy_norm_sq = xy1 * xy1 + xy2 * xy2
eta = (xy1 * psi + xy2 * psi_hat) / xy_norm_sq
eta_hat = (xy1 * psi_hat - xy2 * psi) / xy_norm_sq
integrand = K.dot_with_tangent(eta_hat, eta)
vals[i, j] = K.integrate_over_boundary(integrand) * 0.5 / np.pi
# polynomial part
vals[i, j] += v.poly_part.eval(y1[i, j], y2[i, j])
# logarithmic part
for k in range(K.num_holes):
y_xi_norm_sq = (y1[i, j] - K.hole_int_pts[0, k]) ** 2 + \
(y2[i, j] - K.hole_int_pts[1, k]) ** 2
vals[i, j] += 0.5 * v.log_coef[k] * np.log(y_xi_norm_sq)
# Cauchy's integral formula for gradient
omega = (xy1 * eta + xy2 * eta_hat) / xy_norm_sq
omega_hat = (xy1 * eta_hat - xy2 * eta) / xy_norm_sq
integrand = K.dot_with_tangent(omega_hat, omega)
grad1[i, j] = K.integrate_over_boundary(integrand) * 0.5 / np.pi
integrand = K.dot_with_tangent(omega, -omega_hat)
grad2[i, j] = K.integrate_over_boundary(integrand) * 0.5 / np.pi
# gradient polynomial part
grad1[i, j] += Px.eval(y1[i, j], y2[i, j])
grad2[i, j] += Py.eval(y1[i, j], y2[i, j])
# gradient logarithmic part
for k in range(K.num_holes):
y_xi_1 = y1[i, j] - K.hole_int_pts[0, k]
y_xi_2 = y2[i, j] - K.hole_int_pts[1, k]
y_xi_norm_sq = y_xi_1 ** 2 + y_xi_2 ** 2
grad1[i, j] += v.log_coef[k] * y_xi_1 / y_xi_norm_sq
grad2[i, j] += v.log_coef[k] * y_xi_2 / y_xi_norm_sq
else:
vals[i, j] = np.nan
grad1[i, j] = np.nan
grad2[i, j] = np.nan
return y1, y2, vals, grad1, grad2
def generate_interior_points(K: cell, rows=101, cols=101, tol=0.02):
"""
Returns (x, y, is_inside) where x,y are a meshgrid covering the
cell K, and is_inside is a boolean array that is True for
interior points
"""
# find region of interest
xmin, xmax, ymin, ymax = K._get_bounding_box()
# set up grid
x_coord = np.linspace(xmin, xmax, rows)
y_coord = np.linspace(ymin, ymax, cols)
x, y = np.meshgrid(x_coord, y_coord)
# determine which points are inside K
is_inside = K.is_in_interior_cell(x, y)
# set minimum desired distance to the boundary
TOL = tol * np.min([xmax - xmin, ymax - ymin])
# ignore points too close to the boundary
for i in range(rows):
for j in range(cols):
if is_inside[i, j]:
d = K._get_distance_to_boundary(x[i, j], y[i, j])
if d < TOL:
is_inside[i, j] = False
return x, y, is_inside
| 3,232 | 29.5 | 71 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/int_poly.py
|
from .poly.poly import polynomial
from ..mesh.cell import cell
def integrate_poly_over_cell(p: polynomial, K: cell):
""""
Returns the value of
\int_K (self) dx
by reducing this volumetric integral to one on the boundary via
the Divergence Theorem
"""
x1, x2 = K.get_boundary_points()
xn = K.dot_with_normal(x1, x2)
val = 0
for m in p.monos:
integrand = xn * m.eval(x1, x2) / (2 + m.alpha.order)
val += K.integrate_over_boundary(integrand)
return val
| 479 | 27.235294 | 65 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/locfun.py
|
from . import d2n
from . import antilap
from .int_poly import integrate_poly_over_cell
from ..mesh.cell import cell
from .poly.poly import polynomial
class locfun:
"""
Local function object (defined over single cell), consisting of:
* Dirichlet trace values (given as array)
* Laplacian (given as polynomial object)
* Polynomial part (anti-Laplacian of Laplacian)
* Polynomial part trace (array)
* Conjugate trace (trace of harmonic conjugate of conjugable part)
* Logarithmic coefficients (for multiply connected domains)
* Weighted normal derivative of harmonic part
* Trace of anti-Laplacian of harmonic part
* Weighted normal derivative of anti-Laplacian of harmonic part
"""
__slots__ = (
'trace', # array
'lap', # polynomial
'poly_part', # polynomial
'poly_part_trace', # array
'poly_part_wnd', # array
'conj_trace', # array
'log_coef', # (small) array
'harm_part_wnd', # array
'antilap_trace', # array
'antilap_wnd', # array
)
def __init__(self, boundary_trace_values, laplacian_polynomial) -> None:
self.set_trace_values(boundary_trace_values)
self.set_laplacian_polynomial(laplacian_polynomial)
def compute_all(self, K: cell) -> None:
"""
Computes all relevant data for reducing volumetric integrals
to boundary integrals
"""
self.compute_polynomial_part()
self.compute_polynomial_part_trace(K)
self.compute_polynomial_part_weighted_normal_derivative(K)
self.compute_harmonic_conjugate(K)
self.compute_harmonic_weighted_normal_derivative(K)
self.compute_anti_laplacian_harmonic_part(K)
### Dirichlet trace ########################################################
def set_trace_values(self, vals) -> None:
self.trace = vals
def get_trace_values(self):
return self.trace
### Laplacian (polynomial) #################################################
def set_laplacian_polynomial(self, p: polynomial) -> None:
self.lap = p
def get_laplacian_polynomial(self):
return self.lap
### polynomial part (polynomial anti-Laplacian of Laplacian) ###############
def set_polynomial_part(self, P_poly) -> None:
self.poly_part = P_poly
def get_polynomial_part(self):
return self.poly_part
def compute_polynomial_part(self) -> None:
self.poly_part = self.lap.anti_laplacian()
### polynomial part trace ##################################################
def set_polynomial_part_trace(self, P_trace) -> None:
self.poly_part_trace = P_trace
def get_polynomial_part_trace(self):
return self.poly_part_trace
def compute_polynomial_part_trace(self, K: cell) -> None:
x1, x2 = K.get_boundary_points()
self.poly_part_trace = self.poly_part.eval(x1, x2)
### polynomial part weighted normal derivative #############################
def set_polynomial_part_weighted_normal_derivative(self, P_wnd) -> None:
self.poly_part_wnd = P_wnd
def get_polynomial_part_weighted_normal_derivative(self):
return self.poly_part_wnd
def compute_polynomial_part_weighted_normal_derivative(self, K: cell):
x1, x2 = K.get_boundary_points()
g1, g2 = self.poly_part.grad()
P_nd = K.dot_with_normal(g1.eval(x1, x2), g2.eval(x1, x2))
self.poly_part_wnd = K.multiply_by_dx_norm(P_nd)
### harmonic conjugate #####################################################
def set_harmonic_conjugate(self, hc_vals) -> None:
self.conj_trace = hc_vals
def get_harmonic_conjugate(self):
return self.conj_trace
def compute_harmonic_conjugate(self, K, debug=False) -> None:
phi_trace = self.trace - self.poly_part_trace
self.conj_trace, self.log_coef = \
d2n.harmconj.get_harmonic_conjugate(K, phi_trace, debug=debug)
### logarithmic coefficients ###############################################
def set_logarithmic_coefficients(self, log_coef) -> None:
self.log_coef = log_coef
def get_logarithmic_coefficients(self):
return self.log_coef
# no compute method, this is handled by compute_harmonic_conjugate()
### weighted normal derivative of harmonic part ############################
def set_harmonic_weighted_normal_derivative(self, hc_wnd) -> None:
self.harm_part_wnd = hc_wnd
def get_harmonic_weighted_normal_derivative(self):
return self.harm_part_wnd
def compute_harmonic_weighted_normal_derivative(self, K) -> None:
self.harm_part_wnd = \
d2n.trace2tangential.get_weighted_tangential_derivative_from_trace(
K, self.conj_trace)
lam_x1, lam_x2 = d2n.log_terms.get_log_grad(K)
lam_wnd = d2n.log_terms.get_dlam_dn_wgt(K, lam_x1, lam_x2)
self.harm_part_wnd += lam_wnd @ self.log_coef
### harmonic conjugable part psi ###########################################
def get_conjugable_part(self, K: cell):
lam = d2n.log_terms.get_log_trace(K)
return self.trace - self.poly_part_trace - lam @ self.log_coef
### anti-Laplacian #########################################################
def set_anti_laplacian_harmonic_part(self, anti_laplacian_vals) -> None:
self.antilap_trace = anti_laplacian_vals
def get_anti_laplacian_harmonic_part(self):
return self.antilap_trace
def compute_anti_laplacian_harmonic_part(self, K: cell) -> None:
psi = self.get_conjugable_part(K)
self.antilap_trace, self.antilap_wnd = \
antilap.antilap.get_anti_laplacian_harmonic( \
K, psi=psi, psi_hat=self.conj_trace, a=self.log_coef)
### H^1 semi-inner product #################################################
def compute_h1(self, other, K):
"""
Returns the H^1 semi-inner product
\int_K grad(self) * grad(other) dx
"""
# polynomial part
Px, Py = self.poly_part.grad()
Qx, Qy = other.poly_part.grad()
gradP_gradQ = Px * Qx + Py * Qy
val = integrate_poly_over_cell(gradP_gradQ, K)
# remaining terms
integrand = other.trace * self.harm_part_wnd \
+ self.poly_part_trace * other.harm_part_wnd
val += K.integrate_over_boundary_preweighted(integrand)
return val
### L^2 inner product ######################################################
def compute_l2(self, other, K: cell):
"""
Returns the L^2 inner product
\int_K (self) * (other) dx
"""
x1, x2 = K.get_boundary_points()
# P * Q
PQ = self.poly_part * other.poly_part
val = integrate_poly_over_cell(PQ, K)
# phi * psi
integrand = (other.trace - other.poly_part_trace) * self.antilap_wnd \
- self.antilap_trace * other.harm_part_wnd
# phi * Q
R = other.poly_part.anti_laplacian()
R_trace = R.eval(x1, x2)
R_wnd = R.get_weighted_normal_derivative(K)
integrand += (self.trace - self.poly_part_trace) * R_wnd \
- R_trace * self.harm_part_wnd
# psi * P
R = self.poly_part.anti_laplacian()
R_trace = R.eval(x1, x2)
R_wnd = R.get_weighted_normal_derivative(K)
integrand += (other.trace - other.poly_part_trace) * R_wnd\
- R_trace * other.harm_part_wnd
# integrate over boundary
val += K.integrate_over_boundary_preweighted(integrand)
return val
| 6,822 | 31.961353 | 77 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/__init__.py
| 0 | 0 | 0 |
py
|
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/poly/monomial.py
|
import numpy as np
from .multi_index import multi_index_2
class monomial:
"""
Monomials of the form
c (x, y) ^ (alpha_1, alpha_2) = c (x ^ alpha_1) * (y ^ alpha_2)
"""
def __init__(self, alpha: multi_index_2=None, coef: float=0.0) -> None:
if alpha is None:
alpha = multi_index_2()
self.set_coef(coef)
self.set_multidx(alpha)
def copy(self):
return monomial(self.alpha, self.coef)
def is_zero(self, tol=1e-12) -> bool:
return abs(self.coef) < tol
def set_coef(self, coef: float) -> None:
self.coef = coef
def set_multidx(self, alpha: multi_index_2):
self.alpha = alpha
def set_multidx_from_id(self, id: int) -> None:
alpha = multi_index_2()
alpha.set_from_id(id)
self.set_multidx(alpha)
def eval(self, x: float, y: float):
val = self.coef * np.ones(np.shape(x))
if self.alpha.x > 0:
val *= x ** self.alpha.x
if self.alpha.y > 0:
val *= y ** self.alpha.y
return val
def partial_deriv(self, var: str):
if var == 'x':
if self.alpha.x == 0:
# constant wrt x
b = 0.0
beta = multi_index_2()
else:
# power rule
b = self.coef * self.alpha.x
beta = multi_index_2([self.alpha.x - 1, self.alpha.y])
elif var == 'y':
if self.alpha.y == 0:
# constant wrt y
b = 0.0
beta = multi_index_2()
else:
# power rule
b = self.coef * self.alpha.y
beta = multi_index_2([self.alpha.x, self.alpha.y - 1])
else:
raise Exception(f'var must be one of the strings "x" or "y"')
return monomial(alpha=beta, coef=b)
def grad(self):
gx = self.partial_deriv('x')
gy = self.partial_deriv('y')
return gx, gy
def __repr__(self) -> str:
# coefficient
msg = f'+ ({self.coef}) '
# power of x
if self.alpha.x > 0:
msg += 'x'
if self.alpha.x > 1:
msg += f'^{self.alpha.x} '
else:
msg += ' '
# power of y
if self.alpha.y > 0:
msg += 'y'
if self.alpha.y > 1:
msg += f'^{self.alpha.y} '
else:
msg += ' '
return msg
def __eq__(self, other, tol=1e-12) -> bool:
"""
Returns True iff self == other
"""
if type(other) != monomial:
raise TypeError('Comparison of monomial to non-monomial object')
same_id = self.alpha.id == other.alpha.id
same_coef = abs(self.coef - other.coef) < tol
return same_id and same_coef
def __gt__(self, other) -> bool:
"""
Returns True iff self.id > other.id
"""
if type(other) != monomial:
raise TypeError('Comparison of monomial to non-monomial object')
return self.alpha.id > other.alpha.id
def __add__(self, other):
if not isinstance(other, monomial):
raise TypeError('Cannot add monomial to non-monomial object')
if not self.alpha == other.alpha:
raise ValueError('Cannot add monomomials with different mulit-'
+ 'indices. Use a polynomial object instead')
return monomial(self.alpha, self.coef + other.coef)
def __mul__(self, other):
"""
Defines the operation self * other
where other is either a monomial object or a scalar
"""
if isinstance(other, monomial):
# multiplication between two monomials
b = self.coef * other.coef
beta = self.alpha + other.alpha
return monomial(beta, b)
elif isinstance(other, int) or isinstance(other, float):
# scalar multiplication
b = self.coef * other
beta = self.alpha.copy()
return monomial(beta, b)
else:
raise TypeError(
'Multiplication by monomial must be by a scalar or' +
' by another monomial')
def __rmul__(self, other):
"""
Defines the operation: other * self
where other is either a monomial object or a scalar
"""
if isinstance(other, int) or isinstance(other, float):
return self * other
else:
raise TypeError(
'Multiplication by monomial must be by a scalar or' +
' by another monomial')
def __neg__(self):
"""
Defines negation operation: -self
"""
self.coef *= -1
| 3,818 | 23.798701 | 72 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/poly/multi_index.py
|
from math import sqrt, floor
class multi_index_2:
"""
Integer multi-index with two components
"""
def __init__(self, alpha: list[int]=None) -> None:
if alpha is None:
alpha = [0, 0]
self.set(alpha)
def validate(self, alpha: list[int]):
if not isinstance(alpha, list):
raise TypeError('Multi-index must be list of two integers')
if len(alpha) != 2:
raise Exception('Multi-index is assumed to have 2 components')
if not (isinstance(alpha[0], int) and isinstance(alpha[1], int)):
raise TypeError('Multi-index must be list of two integers')
if alpha[0] < 0 or alpha[1] < 0:
raise ValueError('Components of multi-index must be nonnegative')
def set(self, alpha: list[int]):
self.validate(alpha)
self.x = alpha[0]
self.y = alpha[1]
self.order = alpha[0] + alpha[1]
self.id = alpha[1] + self.order * (self.order + 1) // 2
def set_from_id(self, id: int):
t = floor((sqrt(8 * id + 1) - 1) / 2)
N = t * (t + 1) // 2
alpha = []
alpha.append(t - id + N)
alpha.append(id - N)
self.set(alpha)
def copy(self):
return multi_index_2([self.x, self.y])
def __eq__(self, other) -> bool:
if type(other) != multi_index_2:
print(type(other))
raise TypeError('Comparison of multi-index to object' +
' of different type')
return self.id == other.id
def __add__(self, other):
if isinstance(other, multi_index_2):
beta = [self.x + other.x, self.y + other.y]
return multi_index_2(beta)
else:
raise TypeError('Cannot add multi-index to different type')
| 1,519 | 27.148148 | 68 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/poly/__init__.py
| 0 | 0 | 0 |
py
|
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/poly/poly.py
|
import numpy as np
from .monomial import monomial
from .multi_index import multi_index_2
class polynomial:
"""
Treated as a list of monomial objects
"""
def __init__(self, coef_multidx_pairs=None):
self.set(coef_multidx_pairs)
def set(self, coef_multidx_pairs=None):
self.monos = []
if coef_multidx_pairs is None:
return
for triple in coef_multidx_pairs:
if len(triple) != 3:
raise Exception(
'Every multi-index / coefficient pair must consist of' +
'\n\t[0]:\tthe coefficient' +
'\n\t[1]:\tthe exponent on x_1' +
'\n\t[2]:\tthe exponent on x_2'
)
c = float(triple[0])
alpha = multi_index_2([triple[1], triple[2]])
m = monomial(alpha, c)
self.add_monomial(m)
self.consolidate()
def copy(self):
new = polynomial()
new.add_monomials(self.monos)
return new
def add_monomial(self, m: monomial) -> None:
"""
Adds a monomial to the polynomial
"""
if not m.is_zero():
self.monos.append(m)
def add_monomials(self, monos: list[monomial]=None) -> None:
for m in monos:
self.add_monomial(m)
self.consolidate()
def remove_zeros(self):
"""
Removes terms with zero coefficients
"""
for i in range(len(self.monos), 0, -1):
if self.monos[i - 1].is_zero():
del self.monos[i - 1]
def consolidate(self) -> None:
"""
Consolidates the coefficients of repeated indices
"""
N = len(self.monos)
for i in range(N):
for j in range(i + 1, N):
if self.monos[i].alpha == self.monos[j].alpha:
self.monos[i] += self.monos[j]
self.monos[j] *= 0
self.remove_zeros()
self.sort()
def sort(self) -> None:
"""
Sorts the monomials according to multi-index id
(Using Insertion Sort since monomial list is assumed be be short)
"""
for i in range(len(self.monos)):
j = i
while j > 0 and self.monos[j - 1] > self.monos[j]:
temp = self.monos[j - 1]
self.monos[j - 1] = self.monos[j]
self.monos[j] = temp
j -= 1
def add_monomial_with_id(self, coef: float, id: int) -> None:
m = monomial()
m.set_multidx_from_id(id)
m.set_coef(coef)
self.add_monomial(m)
self.consolidate()
def add_monomials_with_ids(self, coef_list: list[float],
id_list: list[int]) -> None:
if len(coef_list) != len(id_list):
raise Exception(
'number of coefficients and multi-indices must be equal')
for i in range(len(coef_list)):
self.add_monomial_with_id(coef_list[i], id_list[i])
self.consolidate()
def is_zero(self) -> bool:
self.consolidate()
return len(self.monos) == 0
def set_to_zero(self) -> None:
self.monos = []
def eval(self, x: float, y: float) -> float:
val = np.zeros(np.shape(x))
for m in self.monos:
val += m.eval(x, y)
return val
def partial_deriv(self, var: str):
new = polynomial()
for m in self.monos:
dm = m.partial_deriv(var)
new.add_monomial(dm)
return new
def grad(self):
gx = self.partial_deriv('x')
gy = self.partial_deriv('y')
return gx, gy
def laplacian(self):
gx, gy = self.grad()
gxx = gx.partial_deriv('x')
gyy = gy.partial_deriv('y')
return gxx + gyy
def anti_laplacian(self):
new = polynomial()
# define |(x, y)|^2 = x^2 + y^2
p1 = polynomial()
p1.add_monomials_with_ids([1, 1], [3, 5])
# loop over monomial terms
for m in self.monos:
# anti-Laplacian of the monomial m
N = m.alpha.order // 2
# (x ^ 2 + y ^ 2) ^ {k + 1}
pk = p1.copy()
# Delta ^ k (x ^ 2 + y ^ 2) ^ alpha
Lk = polynomial()
Lk.add_monomial(m)
# first term: k = 0
scale = 0.25 / (1 + m.alpha.order)
P_alpha = pk * Lk * scale
# sum over k = 1 : N
for k in range(1, N + 1):
pk *= p1
Lk = Lk.laplacian()
scale *= -0.25 / ((k + 1) * (m.alpha.order + 1 - k))
P_alpha += pk * Lk * scale
# add c_alpha * P_alpha to new
new += P_alpha
return new
def get_weighted_normal_derivative(self, K):
x1, x2 = K.get_boundary_points()
gx, gy = self.grad()
gx_trace = gx.eval(x1, x2)
gy_trace = gy.eval(x1, x2)
nd = K.dot_with_normal(gx_trace, gy_trace)
return K.multiply_by_dx_norm(nd)
def __repr__(self) -> str:
self.sort()
if len(self.monos) == 0:
return '+ (0) '
msg = ''
for m in self.monos:
msg += m.__repr__()
return msg
def __eq__(self, other: object) -> bool:
"""
Tests equality between self and other
"""
if not isinstance(other, polynomial):
raise TypeError('Cannot compare polynomial to non-polynomial')
if len(self.monos) != len(other.monos):
return False
self.sort()
other.sort()
for i in range(len(self.monos)):
if self.monos[i] != other.monos[i]:
return False
return True
def __add__(self, other):
"""
Defines the addition operation self + other,
where other is either another polynomial or a scalar
"""
if isinstance(other, polynomial):
new = polynomial()
for m in self.monos:
new.add_monomial(m)
for m in other.monos:
new.add_monomial(m)
elif isinstance(other, int) or isinstance(other, float):
new = polynomial()
for m in self.monos:
new.add_monomial(m)
constant = monomial()
constant.set_multidx_from_id(0)
constant.set_coef(other)
new.add_monomial(constant)
else:
raise TypeError(
'Addition with a polynomial must be with a scalar or' +
' with another polynomial')
new.consolidate()
return new
def __radd__(self, other):
"""
Defines the addition operator other + self,
where other is either another polynomial or a scalar
"""
if isinstance(other, int) or isinstance(other, float):
return self + other
else:
raise TypeError(
'Addition with a polynomial must be with a scalar or' +
' with another polynomial')
def __iadd__(self, other):
"""
Defines the increment operation self += other
where other is either another polynomial or a scalar
"""
if isinstance(other, polynomial):
for m in other.monos:
self.add_monomial(m)
self.consolidate()
elif isinstance(other, int) or isinstance(other, float):
constant = monomial()
constant.set_multidx_from_id(0)
constant.set_coef(other)
self.add_monomial(constant)
self.consolidate()
else:
raise TypeError('Can only add polynomials to other polynomials' +
' or scalars')
return self
def __mul__(self, other):
"""
Defines the multiplication operator self * other,
where other is either another polynomial or a scalar
"""
if isinstance(other, polynomial):
new = polynomial()
for m in self.monos:
for n in other.monos:
new.add_monomial(m * n)
new.consolidate()
return new
elif isinstance(other, int) or isinstance(other, float):
new = polynomial()
for m in self.monos:
new.add_monomial(other * m.copy())
return new
else:
raise TypeError(
'Multiplication by polynomial must be by a scalar or' +
' by another polynomial')
def __rmul__(self, other):
"""
Defines the multiplication operator other * self,
where other is either another polynomial or a scalar
"""
if isinstance(other, int) or isinstance(other, float):
return self * other
else:
raise TypeError(
'Multiplication by polynomial must be by a scalar or' +
' by another polynomial')
def __neg__(self):
return -1 * self
def __sub__(self, other):
return self + (-other)
| 7,240 | 23.629252 | 68 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/nystrom/double_layer.py
|
import numpy as np
# from ...mesh.quad import quad
from ...mesh.cell import cell
from ...mesh.edge import edge
def double_layer_mat(K: cell):
"""
Double layer potential operator matrix
"""
N = K.num_pts
A = np.zeros((N,N))
for i in range(K.num_edges):
for j in range(K.num_edges):
A[K.vert_idx[i] : K.vert_idx[i + 1],
K.vert_idx[j] : K.vert_idx[j + 1]] = \
_double_layer_block(K.edge_list[i], K.edge_list[j])
return A
def _double_layer_block(e: edge, f: edge):
# allocate block
B = np.zeros((e.num_pts - 1, f.num_pts - 1))
# trapezoid step size
h = 1 / (f.num_pts - 1)
# check if edges are the same edge
same_edge = e == f
# adapt quadrature to accomodate both trapezoid and Kress
if f.qtype[0:5] == 'kress':
j_start = 1
else:
j_start = 0
#
for i in range(e.num_pts - 1):
for j in range(j_start, f.num_pts - 1):
if same_edge and i == j:
B[i, i] = 0.5 * e.curvature[i]
else:
xy = e.x[:,i] - f.x[:,j]
xy2 = np.dot(xy, xy)
B[i, j] = np.dot(xy, f.unit_normal[:,j]) / xy2
B[i, j] *= f.dx_norm[j] * h
return B
| 1,081 | 19.415094 | 58 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/nystrom/neumann.py
|
import numpy as np
from scipy.sparse.linalg import gmres, LinearOperator
from ...mesh.cell import cell
from . import single_layer, double_layer, apply_double_layer
def solve_neumann_zero_average(K: cell, u_wnd):
# single and double layer operators
T1 = single_layer.single_layer_mat(K)
T2 = double_layer.double_layer_mat(K)
# RHS
b = T1 @ u_wnd
#
T2_sum = np.sum(T2, 1)
integrator_mat = K.get_integrator()
# total number of points on the boundary
N = K.num_pts
# define linear operator functionality
def linop4harmconj(u):
y = apply_double_layer.apply_T2(u, T2, T2_sum, K.closest_vert_idx)
y += integrator_mat @ u
return y
# define linear operator object
A = LinearOperator(
dtype = float,
shape = (N, N),
matvec = linop4harmconj
)
# solve Nystrom system
u, flag = gmres(A, b, atol=1e-12, tol=1e-12)
if flag > 0:
print(f'Something went wrong: GMRES returned flag = {flag}')
return u
| 926 | 21.071429 | 68 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/nystrom/single_layer.py
|
import numpy as np
from ...mesh.quad.quad import quad
from ...mesh.cell import cell
from ...mesh.edge import edge
def single_layer_mat(K: cell):
"""
Single layer potential operator matrix
"""
N = K.num_pts
A = np.zeros((N,N))
for j in range(K.num_edges):
# Martensen quadrature
nm = (K.edge_list[j].num_pts - 1) // 2
qm = quad(qtype='mart', n=nm)
for i in range(K.num_edges):
# get block corresponding to edges e and f
A[K.vert_idx[i] : K.vert_idx[i + 1],
K.vert_idx[j] : K.vert_idx[j + 1]] = \
_single_layer_block(K.edge_list[i], K.edge_list[j], qm)
return A
def _single_layer_block(e: edge, f: edge, qm: quad):
"""
Returns a block in the single layer matrix corresponding to
x in edge e and y in edge f
"""
# allocate block
B = np.zeros((e.num_pts - 1, f.num_pts - 1))
# trapezoid weight: pi in integrand cancels
h = -0.5 / (f.num_pts - 1)
# check if two edges are the same
same_edge = e == f
# adapt quadrature to accomodate both trapezoid and Kress
if f.qtype[0:5] == 'kress':
j_start = 1
else:
j_start = 0
if same_edge: # Kress and Martensen
for i in range(e.num_pts - 1):
for j in range(j_start,f.num_pts - 1):
ij = abs(i-j)
if ij == 0:
B[i, i] = 2 * np.log(e.dx_norm[i])
else:
xy = e.x[:,i] - f.x[:,j]
xy2 = np.dot(xy, xy)
B[i, j] = np.log(xy2 / qm.t[ij])
B[i, j] *= h
B[i, j] += qm.wgt[ij]
else: # different edges: Kress only
for i in range(e.num_pts - 1):
for j in range(j_start, f.num_pts - 1):
xy = e.x[:,i] - f.x[:,j]
xy2 = np.dot(xy, xy)
B[i, j] = np.log(xy2) * h
return B
| 1,616 | 21.150685 | 60 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/nystrom/apply_double_layer.py
|
def apply_T2(u, T2, T2_sum, closest_vert_idx):
corner_values = u[closest_vert_idx]
return 0.5 * (u - corner_values) + T2 @ u - corner_values * T2_sum
| 151 | 49.666667 | 67 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/nystrom/__init__.py
|
from . import single_layer
from . import double_layer
from . import neumann
from . import apply_double_layer
| 108 | 26.25 | 32 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/antilap/antilap.py
|
import numpy as np
from ...mesh.cell import cell
from ..d2n.fft_deriv import fft_antiderivative
from ..d2n.log_terms import get_log_grad
from . import log_antilap
from .. import nystrom
def get_anti_laplacian_harmonic(K: cell, psi, psi_hat, a):
"""
Returns the trace and weighted normal derivative of an anti-Laplacian of a
harmonic function
phi = psi + sum_{j=1}^m a_j ln |x - xi_j|
given the trace of psi, the trace of its harmonic conjugate psi_hat,
and the logarithmic coefficients a_1,...,a_m
(When K is simply connected, phi = psi and a is an empty list)
"""
if K.num_holes == 0:
PHI, PHI_wnd = _antilap_simply_connected(K, psi, psi_hat)
else:
PHI, PHI_wnd = _antilap_multiply_connected(K, psi, psi_hat, a)
return PHI, PHI_wnd
def _antilap_simply_connected(K: cell, phi, phi_hat):
# length of interval of integration in parameter space
interval_length = 2 * np.pi * K.num_edges
# integrate tangential derivative of rho
rho_td = K.dot_with_tangent(phi, -phi_hat)
rho_wtd = K.multiply_by_dx_norm(rho_td)
rho = fft_antiderivative(rho_wtd, interval_length)
# integrate tangential derivative of rho_hat
rho_hat_td = K.dot_with_tangent(phi_hat, phi)
rho_hat_wtd = K.multiply_by_dx_norm(rho_hat_td)
rho_hat = fft_antiderivative(rho_hat_wtd, interval_length)
# coordinates of boundary points
x1, x2 = K.get_boundary_points()
# construct anti-Laplacian
PHI = 0.25 * (x1 * rho + x2 * rho_hat)
# gradient of anti-Laplacian
PHI_x1 = 0.25 * (rho + x1 * phi + x2 * phi_hat)
PHI_x2 = 0.25 * (rho_hat + x2 * phi - x1 * phi_hat)
# weigthed normal derivative of anti-Laplacian
PHI_nd = K.dot_with_normal(PHI_x1, PHI_x2)
PHI_wnd = K.multiply_by_dx_norm(PHI_nd)
return PHI, PHI_wnd
def _antilap_multiply_connected(K: cell, psi, psi_hat, a):
# compute F * t and \hat F * t
F_t = K.dot_with_tangent(psi, -psi_hat)
F_hat_t = K.dot_with_tangent(psi_hat, psi)
# compute b_j and c_j
b = np.zeros((K.num_holes,))
c = np.zeros((K.num_holes,))
for j in range(K.num_holes):
b[j] = K.integrate_over_specific_contour(F_hat_t, j + 1) / (-2 * np.pi)
c[j] = K.integrate_over_specific_contour(F_t, j + 1) / (2 * np.pi)
# compute \mu_j and \hat\mu_j
mu, mu_hat = get_log_grad(K)
mu_hat *= -1
# compute \psi_0 and \hat\psi_0
psi_0 = psi - (mu @ b - mu_hat @ c)
psi_hat_0 = psi_hat - (mu @ c + mu_hat @ b)
# compute weighted normal derivatives of rho and rho_hat
rho_nd_0 = K.dot_with_normal(psi_0, -psi_hat_0)
rho_wnd_0 = K.multiply_by_dx_norm(rho_nd_0)
rho_hat_nd_0 = K.dot_with_normal(psi_hat_0, psi_0)
rho_hat_wnd_0 = K.multiply_by_dx_norm(rho_hat_nd_0)
# solve for rho_0 and rho_hat_0
rho_0 = nystrom.neumann.solve_neumann_zero_average(K, rho_wnd_0)
rho_hat_0 = nystrom.neumann.solve_neumann_zero_average(K, rho_hat_wnd_0)
# compute anti-Laplacian of psi_0
x1, x2 = K.get_boundary_points()
PHI = 0.25 * (x1 * rho_0 + x2 * rho_hat_0)
PHI_x1 = 0.25 * (rho_0 + x1 * psi_0 + x2 * psi_hat_0)
PHI_x2 = 0.25 * (rho_hat_0 + x2 * psi_0 - x1 * psi_hat_0)
PHI_nd = K.dot_with_normal(PHI_x1, PHI_x2)
PHI_wnd = K.multiply_by_dx_norm(PHI_nd)
# compute M = sum_j M_j
for j in range(K.num_holes):
xi = K.hole_int_pts[:, j]
x_xi_1 = x1 - xi[0]
x_xi_2 = x2 - xi[1]
x_xi_norm_sq = x_xi_1 ** 2 + x_xi_2 ** 2
log_x_xi_norm = 0.5 * np.log(x_xi_norm_sq)
PHI += 0.5 * (b[j] * x_xi_1 + c[j] * x_xi_2) * log_x_xi_norm
M_x1 = 0.5 * (b[j] * mu[:, j] - c[j] * mu_hat[:, j]) *x_xi_1 \
+ 0.5 * b[j] * log_x_xi_norm
M_x2 = 0.5 * (b[j] * mu[:, j] - c[j] * mu_hat[:, j]) *x_xi_2 \
+ 0.5 * c[j] * log_x_xi_norm
M_nd = K.dot_with_normal(M_x1, M_x2)
PHI_wnd += K.multiply_by_dx_norm(M_nd)
# compute \Lambda_j
PHI += log_antilap.get_log_antilap(K) @ a
PHI_wnd += log_antilap.get_log_antilap_weighted_normal_derivative(K) @ a
return PHI, PHI_wnd
def _rational_function_coefficients(K: cell, F_t, F_hat_t):
# TODO
pass
def _antilap_rational_terms(K:cell, b, c):
# TODO
pass
def _antilap_log_terms(K: cell):
# TODO
pass
| 3,989 | 30.666667 | 75 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/antilap/__init__.py
|
from . import antilap
from . import log_antilap
| 47 | 23 | 25 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/antilap/log_antilap.py
|
import numpy as np
from ...mesh.cell import cell
from .. import d2n
def get_log_antilap(K: cell):
"""
Returns traces of an anti-Laplacian of logarithmic terms on the boundary
\Lambda(x) = \frac14 |x|^2 (\ln|x|-1)
"""
LAM_trace = np.zeros((K.num_pts, K.num_holes))
for j in range(K.num_holes):
xi = K.hole_int_pts[:,j]
def LAM(x):
x_xi, x_xi_norm_sq = d2n.log_terms.shifted_coordinates(x, xi)
return 0.125 * x_xi_norm_sq * (np.log(x_xi_norm_sq) - 2)
LAM_trace[:, j] = K.evaluate_function_on_boundary(LAM)
return LAM_trace
def get_log_antilap_weighted_normal_derivative(K: cell):
"""
Returns traces of an anti-Laplacian of logarithmic terms on the boundary:
\Lambda(x) = \frac14 |x|^2 (\ln|x|-1)
"""
dLAM_dn_wgt = np.zeros((K.num_pts, K.num_holes))
for j in range(K.num_holes):
xi = K.hole_int_pts[:,j]
def LAM_x1(x):
x_xi, x_xi_norm_sq = d2n.log_terms.shifted_coordinates(x, xi)
return 0.25 * (np.log(x_xi_norm_sq) - 1) * x_xi[0]
def LAM_x2(x):
x_xi, x_xi_norm_sq = d2n.log_terms.shifted_coordinates(x, xi)
return 0.25 * (np.log(x_xi_norm_sq) - 1) * x_xi[1]
LAM_x1_trace = K.evaluate_function_on_boundary(LAM_x1)
LAM_x2_trace = K.evaluate_function_on_boundary(LAM_x2)
dLAM_dn = K.dot_with_normal(LAM_x1_trace, LAM_x2_trace)
dLAM_dn_wgt[:, j] = K.multiply_by_dx_norm(dLAM_dn)
return dLAM_dn_wgt
| 1,359 | 27.333333 | 74 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/d2n/trace2tangential.py
|
import numpy as np
from ...mesh.cell import cell
from ...mesh.contour import contour
from . import fft_deriv
def get_weighted_tangential_derivative_from_trace(K: cell, f):
"""
Returns df / ds = \nabla f(x(t)) \cdot x'(t) by computing the derivative
of f(x(t)) with respect to the scalar parameter t, where x(t)
is a parameterization of the boundary
"""
N = K.num_pts
df_dt_wgt = np.zeros((N,))
for c_idx in K.contour_idx:
# create contour object
c = contour([K.edge_list[i] for i in c_idx])
# get values of f on c
fc = np.zeros((c.num_pts,))
for i in range(c.num_edges):
fc[c.vert_idx[i]:c.vert_idx[i + 1]] = \
f[K.vert_idx[c_idx[i]]:K.vert_idx[c_idx[i] + 1]]
# compute weighted tangential derivative on this contour
interval_length = 2 * np.pi * c.num_edges
dfc_dt_wgt = fft_deriv.fft_derivative(fc, interval_length)
# assign weighted tangential derivative to position in K
fc = np.zeros((c.num_pts,))
for i in range(c.num_edges):
df_dt_wgt[K.vert_idx[c_idx[i]]:K.vert_idx[c_idx[i] + 1]] = \
dfc_dt_wgt[c.vert_idx[i]:c.vert_idx[i + 1]]
return df_dt_wgt
| 1,107 | 28.157895 | 73 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/d2n/log_terms.py
|
import numpy as np
from ...mesh.cell import cell
def shifted_coordinates(x, xi):
x_xi = np.array([x[0] - xi[0], x[1] - xi[1]])
x_xi_norm_sq = x_xi[0]**2 + x_xi[1]**2
return x_xi, x_xi_norm_sq
def get_log_trace(K: cell):
"""
Returns traces of logarithmic terms on the boundary
"""
lam_trace = np.zeros((K.num_pts, K.num_holes))
for j in range(K.num_holes):
xi = K.hole_int_pts[:,j]
def lam(x):
x_xi, x_xi_norm_sq = shifted_coordinates(x, xi)
return 0.5 * np.log(x_xi_norm_sq)
lam_trace[:, j] = K.evaluate_function_on_boundary(lam)
return lam_trace
def get_log_grad(K: cell):
"""
Returns gradients of logarithmic terms on the boundary
"""
lam_x1_trace = np.zeros((K.num_pts, K.num_holes))
lam_x2_trace = np.zeros((K.num_pts, K.num_holes))
for j in range(K.num_holes):
xi = K.hole_int_pts[:,j]
def lam_x1(x):
x_xi, x_xi_norm_sq = shifted_coordinates(x, xi)
return x_xi[0] / x_xi_norm_sq
def lam_x2(x):
x_xi, x_xi_norm_sq = shifted_coordinates(x, xi)
return x_xi[1] / x_xi_norm_sq
lam_x1_trace[:, j] = K.evaluate_function_on_boundary(lam_x1)
lam_x2_trace[:, j] = K.evaluate_function_on_boundary(lam_x2)
return lam_x1_trace, lam_x2_trace
def get_dlam_dt_wgt(K, lam_x1_trace, lam_x2_trace):
"""
Returns weighted tangential derivative of logarthmic terms
"""
dlam_dt_wgt = np.zeros((K.num_pts, K.num_holes))
for j in range(K.num_holes):
dlam_dt_wgt[:, j] \
= K.dot_with_tangent(lam_x1_trace[:, j], lam_x2_trace[:, j])
dlam_dt_wgt[:, j] = K.multiply_by_dx_norm(dlam_dt_wgt[:, j])
return dlam_dt_wgt
def get_dlam_dn_wgt(K, lam_x1_trace, lam_x2_trace):
"""
Returns weighted normal derivative of logarthmic terms
"""
dlam_dn_wgt = np.zeros((K.num_pts, K.num_holes))
for j in range(K.num_holes):
dlam_dn_wgt[:, j] \
= K.dot_with_normal(lam_x1_trace[:, j], lam_x2_trace[:, j])
dlam_dn_wgt[:, j] = K.multiply_by_dx_norm(dlam_dn_wgt[:, j])
return dlam_dn_wgt
| 1,935 | 26.267606 | 63 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/d2n/fft_deriv.py
|
import numpy as np
def fft_derivative(f, interval_length):
N = len(f)
omega = np.fft.fft(f)
omega *= 1j * N * np.fft.fftfreq(N)
omega *= 2 * np.pi / interval_length
return np.real(np.fft.ifft(omega))
def fft_antiderivative(df, interval_length):
N = len(df)
omega = np.fft.fft(df)
fft_idx = np.fft.fftfreq(len(df))
fft_idx[0] = 1
omega *= -1j / (N * fft_idx)
omega *= 0.5 * interval_length / np.pi
omega[0] = 0
return np.real(np.fft.ifft(omega))
| 460 | 23.263158 | 44 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/d2n/harmconj.py
|
import numpy as np
from scipy.sparse.linalg import gmres, LinearOperator
from ...mesh.cell import cell
from .. import nystrom
from . import log_terms
from . import trace2tangential
def get_harmonic_conjugate(K: cell, phi, debug=False):
phi_wtd = \
trace2tangential.get_weighted_tangential_derivative_from_trace(K, phi)
if K.num_holes == 0:
# simply-connected
psi_hat = nystrom.neumann.solve_neumann_zero_average(K, -phi_wtd)
return psi_hat, []
else:
# multiply-connected
psi_hat, a = \
get_harmonic_conjugate_multiply_connected(K, phi, phi_wtd, debug)
return psi_hat, a
def get_harmonic_conjugate_multiply_connected(
K: cell, phi, dphi_ds, debug=False):
# get single and double layer operator matrices
T1 = nystrom.single_layer.single_layer_mat(K)
T2 = nystrom.double_layer.double_layer_mat(K)
# traces and gradients of logarithmic corrections
lam_trace = log_terms.get_log_trace(K)
lam_x1_trace, lam_x2_trace = log_terms.get_log_grad(K)
# tangential and normal derivatives of logarithmic terms
dlam_dt_wgt = log_terms.get_dlam_dt_wgt(K, lam_x1_trace, lam_x2_trace)
dlam_dn_wgt = log_terms.get_dlam_dn_wgt(K, lam_x1_trace, lam_x2_trace)
# single layer operator applied to tangential derviatives of log terms
T1_dlam_dt = T1 @ dlam_dt_wgt
#
Sn = get_Su(K, dlam_dn_wgt)
St = get_Su(K, dlam_dt_wgt)
# H1 seminorms of logarithmic terms
Sn_lam = Sn @ lam_trace
#
T2_sum = np.sum(T2, 1)
integrator_mat = K.get_integrator()
# array sizes
N = K.num_pts
m = K.num_holes
# block RHS
b = np.zeros((N + m,))
b[:N] = - T1 @ dphi_ds
b[N:] = Sn @ phi
def linop4harmconj(x):
psi_hat = x[:N]
a = x[N:]
y = np.zeros((N + m,))
y[:N] = nystrom.apply_double_layer.apply_T2(\
psi_hat, T2, T2_sum, K.closest_vert_idx)
y[:N] += integrator_mat @ psi_hat
y[:N] -= T1_dlam_dt @ a
y[N:] = - St @ psi_hat + Sn_lam @ a
return y
# define linear operator
A = LinearOperator(
dtype = float,
shape = (N + m, N + m),
matvec = linop4harmconj
)
# solve Nystrom system
x, flag = gmres(A, b, atol=1e-12, tol=1e-12)
psi_hat = x[:N]
a = x[N:]
# DEBUG
if debug:
print(f'gmres flag: get_harmonic_conjugate_multiply_connected = {flag}')
print(f'Condition number = {get_cond_num(A)}')
return psi_hat, a
def get_Su(K, dlam_du_wgt):
Su = np.zeros((K.num_holes, K.num_pts))
Su[:,:] = np.transpose(dlam_du_wgt)
for i in range(K.num_edges):
h = 2 * np.pi / (K.edge_list[i].num_pts - 1)
Su[:, K.vert_idx[i]:K.vert_idx[i + 1]] *= h
return Su
def get_cond_num(A):
"""
FOR DEBUGGING
"""
n = np.shape(A)[0]
I = np.eye(n)
Amat = np.zeros((n, n))
for j in range(n):
Amat[:, j] = A(I[:, j])
# Amat = (Amat[:-2,:-2])
# n -= 2
cond = np.linalg.cond(Amat)
print('condition number = %.4e'%(cond))
r = np.linalg.matrix_rank(Amat)
print(f'rank = {r}')
print(f'nullity = {n - r}')
u, s, vh = np.linalg.svd((Amat))
print(s[(n-10):n])
import matplotlib.pyplot as plt
plt.figure()
plt.semilogy(s, 'k.')
plt.title('singular values')
plt.grid('on')
plt.figure()
plt.title('spanning set of the nullspace')
leg = []
for k in range(n):
if s[k] < 1e-6:
leg.append('%.4e'%(s[k]))
plt.plot(vh[k,:])
plt.legend(leg)
u, s, vh = np.linalg.svd(np.transpose(Amat))
plt.figure()
plt.title('spanning set of the nullspace of transpose')
leg = []
for k in range(n):
if s[k] < 1e-6:
leg.append('%.4e'%(s[k]))
plt.plot(vh[k,:])
plt.legend(leg)
plt.figure()
plt.title('Matrix operator for harmonic conjugate')
plt.imshow(Amat)
plt.colorbar()
plt.show()
return
| 3,557 | 21.377358 | 74 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/locfun/d2n/__init__.py
|
from . import harmconj
from . import log_terms
from . import trace2tangential
from . import fft_deriv
| 101 | 24.5 | 30 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edge.py
|
import copy
import numpy as np
from .quad.quad import quad
class edge:
__slots__ = (
'id',
'etype',
'qtype',
'num_pts',
'x',
'unit_tangent',
'unit_normal',
'dx_norm',
'curvature',
)
def __init__(self, etype: str, q: quad, id: any = [], **kwargs):
# optional identifier (for use in global mesh)
self.id = id
# label edge and quadrature types
self.etype = etype
self.qtype = q.type
# record the number of sampled points
self.num_pts = 2 * q.n + 1
# import edgelib object of this edge type:
# assumes a file called <self.etype>.py exists in mesh/edgelib
# this file should contain definitions for _x(), _dx(), and _ddx()
e = __import__(
f'puncturedfem.mesh.edgelib.{self.etype}',
fromlist=f'mesh.edgelib.{self.etype}'
)
# compute and store points on the boundary
self.x = e._x(q.t, **kwargs)
# unweighted square norm of derivative
dx = e._dx(q.t, **kwargs)
dx2 = dx[0,:] ** 2 + dx[1,:] ** 2
# norm of derivative (with chainrule)
self.dx_norm = np.sqrt(dx2) * q.wgt
# unit tangent vector
self.unit_tangent = dx / np.sqrt(dx2)
# outward unit normal vector
self._set_unit_normal()
# get signed curvature
ddx = e._ddx(q.t, **kwargs)
self.curvature = (
ddx[0, :] * self.unit_normal[0, :] +
ddx[1, :] * self.unit_normal[1, :] ) / dx2
def duplicate(self):
return copy.deepcopy(self)
def evaluate_function(self, fun: callable, ignore_endpoint=False):
"""
Return fun(x) for each sampled point on edge
"""
if ignore_endpoint:
k = 1
else:
k = 0
y = np.zeros((self.num_pts - k,))
for i in range(self.num_pts - k):
y[i] = fun(self.x[:, i])
return y
def reverse_orientation(self) -> None:
"""
Reverse the orientation of this edge using the reparameterization
x(2 pi - t). The chain rule flips the sign of some derivative-based
quanitites.
"""
# vector quantities
self.x = np.fliplr(self.x)
self.unit_tangent = - np.fliplr(self.unit_tangent)
self.unit_normal = - np.fliplr(self.unit_normal)
# scalar quantities
self.dx_norm = np.flip(self.dx_norm)
self.curvature = - np.flip(self.curvature)
def join_points(self, a, b) -> None:
"""
Join the points a to b with this edge.
Throws an error if this edge is a closed contour.
"""
TOL = 1e-12
# check that specified endpoints are distinct
ab_norm = np.sqrt((a[0] - b[0]) ** 2 + (a[1] - b[1]) ** 2)
if ab_norm < TOL:
raise Exception('a and b must be distinct points')
# check that endpoints of edge are distinct
x = self.x[:, 0]
y = self.x[:, self.num_pts-1]
xy_norm = np.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2)
if xy_norm < TOL:
raise Exception('edge must have distinct endpoints')
# anchor starting point to origin
self.translate(-x)
# rotate
theta = - np.arctan2(y[1] - x[1], y[0] - x[0])
theta += np.arctan2(b[1] - a[1], b[0] - a[0])
theta *= 180 / np.pi
self.rotate(theta)
# rescale
alpha = ab_norm / xy_norm
self.dialate(alpha)
# anchor at point a
self.translate(a)
def translate(self, a) -> None:
"""
Translate by (a[0], a[1])
"""
self.x[0,:] += a[0]
self.x[1,:] += a[1]
def dialate(self, alpha: float) -> None:
"""
Dialate by a scalar alpha
"""
if np.abs(alpha) < 1e-12:
raise Exception('Dialation factor alpha must be nonzero')
self.x *= alpha
self.dx_norm *= np.abs(alpha)
self.curvature *= 1 / alpha
def rotate(self, theta: float) -> None:
"""
Rotate counterclockwise by theta (degrees)
"""
if theta % 360 == 0:
return None
c = np.cos(theta * np.pi / 180)
s = np.sin(theta * np.pi / 180)
R = np.array([ [c, -s], [s, c] ])
self.apply_orthogonal_transformation(R)
def reflect_across_x_axis(self) -> None:
"""
Reflect across the horizontal axis
"""
A = np.array([ [1, 0], [0, -1] ])
self.apply_orthogonal_transformation(A)
def reflect_across_y_axis(self) -> None:
"""
Reflect across the vertical axis
"""
A = np.array([ [-1, 0], [0, 1] ])
self.apply_orthogonal_transformation(A)
def apply_orthogonal_transformation(self, A) -> None:
"""
Transforms 2-dimensional space with the linear map
x \mapsto A * x
where A is a 2 by 2 orthogonal matrix, i.e. A^T * A = I
It is important that A is orthogonal, since the first derivative norm
as well as the curvature are invariant under such a transformation.
"""
# safety checks
TOL = 1e-12
msg = 'A must be a 2 by 2 orthogonal matrix'
if np.shape(A) != (2,2):
raise Exception(msg)
if np.linalg.norm(np.transpose(A) @ A - np.eye(2)) > TOL:
raise Exception(msg)
# apply transformation to vector quantities
self.x = A @ self.x
self.unit_tangent = A @ self.unit_tangent
self._set_unit_normal()
# determine if the sign of curvature has flipped
a = A[0,0]
b = A[0,1]
c = A[1,0]
d = A[1,1]
if np.abs(b - c) < TOL and np.abs(a + d) < TOL:
self.curvature *= -1
def _set_unit_normal(self) -> None:
self.unit_normal = np.zeros((2, self.num_pts))
self.unit_normal[0, :] = self.unit_tangent[1, :]
self.unit_normal[1, :] = - self.unit_tangent[0, :]
def __eq__(self, other) -> bool:
TOL = 1e-12
return np.linalg.norm(self.x - other.x) < TOL
def __repr__(self) -> str:
msg = ''
msg += f'id: {self.id}\n'
msg += f'etype: {self.etype}\n'
msg += f'qtype: {self.qtype}\n'
msg += f'num_pts: {self.num_pts}\n'
return msg
| 6,426 | 27.95045 | 77 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/contour.py
|
import numpy as np
from matplotlib import path
from . import edge
class contour:
__slots__ = (
'id',
'edge_list',
'num_edges',
'num_pts',
'vert_idx',
)
def __init__(self, edge_list: list[edge.edge], id='') -> None:
# optional identifier
self.id = id
# save edge list
self.edge_list = edge_list
# record number of edges
self.num_edges = len(self.edge_list)
# record the index of the starting point of each edge
self._find_vert_idx_and_num_pts()
### methods for evaluating traces
def evaluate_function_on_contour(self, fun):
"""
Return fun(x) for each sampled point on contour
"""
y = np.zeros((self.num_pts,))
for j in range(self.num_edges):
y[self.vert_idx[j]:self.vert_idx[j+1]] \
= self.edge_list[j].evaluate_function(fun, ignore_endpoint=True)
return y
def get_boundary_points(self):
"""
Returns the x1 and x2 coordinates of the boundary points
"""
x1_fun = lambda x: x[0]
x1 = self.evaluate_function_on_contour(x1_fun)
x2_fun = lambda x: x[1]
x2 = self.evaluate_function_on_contour(x2_fun)
return x1, x2
def dot_with_tangent(self, v1, v2):
"""
Returns the dot product (v1, v2) * unit_tangent
"""
y = np.zeros((self.num_pts,))
for j in range(self.num_edges):
y[self.vert_idx[j]:self.vert_idx[j+1]] \
= v1[self.vert_idx[j]:self.vert_idx[j+1]] \
* self.edge_list[j].unit_tangent[0, :-1] \
+ v2[self.vert_idx[j]:self.vert_idx[j+1]] \
* self.edge_list[j].unit_tangent[1, :-1]
return y
def dot_with_normal(self, v1, v2):
"""
Returns the dot product (v1, v2) * unit_normal
"""
y = np.zeros((self.num_pts,))
for j in range(self.num_edges):
y[self.vert_idx[j]:self.vert_idx[j+1]] \
= v1[self.vert_idx[j]:self.vert_idx[j+1]] \
* self.edge_list[j].unit_normal[0, :-1] \
+ v2[self.vert_idx[j]:self.vert_idx[j+1]] \
* self.edge_list[j].unit_normal[1, :-1]
return y
def multiply_by_dx_norm(self, vals):
"""
Returns f multiplied against the norm of the derivative of
the curve parameterization
"""
if len(vals) != self.num_pts:
raise Exception('vals must be same length as boundary')
y = np.zeros((self.num_pts,))
for j in range(self.num_edges):
y[self.vert_idx[j]:self.vert_idx[j+1]] \
= vals[self.vert_idx[j]:self.vert_idx[j+1]] \
* self.edge_list[j].dx_norm[:-1]
return y
def integrate_over_contour(self, vals):
y = self.multiply_by_dx_norm(vals)
return self.integrate_over_contour_preweighted(y)
def integrate_over_contour_preweighted(self, vals):
if len(vals) != self.num_pts:
raise Exception('vals must be same length as boundary')
y = np.zeros((self.num_pts,))
for i in range(self.num_edges):
h = 2 * np.pi / (self.edge_list[i].num_pts - 1)
y[self.vert_idx[i]:self.vert_idx[i + 1]] = \
h * vals[self.vert_idx[i]:self.vert_idx[i + 1]]
return np.sum(y)
def get_integrator(self):
one = lambda x: 1
A = self.evaluate_function_on_contour(one)
A = self.multiply_by_dx_norm(A)
for i in range(self.num_edges):
h = 2 * np.pi / (self.edge_list[i].num_pts - 1)
A[self.vert_idx[i]:self.vert_idx[i + 1]] *= h
return A
### methods for interior points
def is_in_interior_contour(self, x, y):
"""
Returns True if the point (x,y) lies in the interior of the
contour specified by edge_list, and returns false otherwise.
Returns false if (x,y) lies on the boundary.
If x,y are arrays of the same size, returns a boolean array
of the same size.
"""
if x.shape != y.shape:
raise Exception('x and y must have same size')
is_inside = np.zeros(x.shape, dtype=bool)
x1, x2 = self.get_boundary_points()
p = path.Path(np.array([x1, x2]).transpose())
if len(x.shape) == 1:
M = x.shape[0]
for i in range(M):
is_inside[i] = p.contains_point([x[i], y[i]])
elif len(x.shape) == 2:
M, N = x.shape
for i in range(M):
for j in range(N):
is_inside[i, j] = p.contains_point([x[i, j], y[i, j]])
return is_inside
def _find_vert_idx_and_num_pts(self):
"""
Get the index of the starting point of each edge,
and record the total number of sampled points on the boundary
"""
self.num_pts = 0
self.vert_idx = [0]
for e in self.edge_list:
self.num_pts += e.num_pts - 1
self.vert_idx.append(self.num_pts)
return None
def _get_bounding_box(self):
xmin = np.inf
xmax = -np.inf
ymin = np.inf
ymax = -np.inf
for e in self.edge_list:
xmin = np.min([xmin, np.min(e.x[0, :])])
xmax = np.max([xmax, np.max(e.x[0, :])])
ymin = np.min([ymin, np.min(e.x[1, :])])
ymax = np.max([ymax, np.max(e.x[1, :])])
return xmin, xmax, ymin, ymax
def _get_distance_to_boundary(self, x, y):
"""
Returns the minimum distance from (x,y) to a point on the boundary
"""
dist = np.inf
for e in self.edge_list:
dist2e = np.min((e.x[0, :] - x) ** 2 + (e.x[1, :] - y) ** 2)
dist = np.min([dist, dist2e])
return np.sqrt(dist)
def _get_int_pt_simple_contour(self):
"""
Returns an interior point.
Uses a brute force search. There is likely a more efficient way.
"""
# find region of interest
xmin, xmax, ymin, ymax = self._get_bounding_box()
# set minimum desired distance to the boundary
TOL = 1e-2 * np.min([xmax - xmin, ymax - ymin])
# search from M by N rectangular grid points
M = 9
N = 9
d = 0.0
while d < TOL:
# set up grid
x_coord = np.linspace(xmin, xmax, M)
y_coord = np.linspace(ymin, ymax, N)
x, y = np.meshgrid(x_coord, y_coord)
# determine which points are in the interior
is_inside = self.is_in_interior_contour(x, y)
# for each interior point in grid, compute distance to the boundary
dist = np.zeros(x.shape)
for i in range(M):
for j in range(N):
if is_inside[i, j]:
dist[i, j] = self._get_distance_to_boundary( \
x[i, j], y[i, j]
)
# pick a point farthest from the boundary
k = np.argmax(dist, keepdims=True)
ii = k // M
jj = k % M
d = dist[ii, jj]
# if the best candidate is too close to the boundary,
# refine grid and search again
M = 4 * (M // 2) + 1
N = 4 * (N // 2) + 1
if M * N > 1_000_000:
raise Exception('Unable to locate an interior point')
int_pt = np.zeros((2,))
int_pt[0] = x[ii, jj]
int_pt[1] = y[ii, jj]
return int_pt
def __repr__(self) -> str:
msg = 'contour object\n'
msg += f'num_edges: \t\t{self.num_edges}\n'
msg += f'num_pts: \t\t{self.num_pts}\n'
return msg
| 6,424 | 25.549587 | 70 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/__init__.py
| 0 | 0 | 0 |
py
|
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/cell.py
|
import numpy as np
from .edge import edge
from .contour import contour
class cell(contour):
__slots__ = (
'num_holes',
'closest_vert_idx',
'contour_idx',
'hole_int_pts',
)
def __init__(self, edge_list: list[edge], id=''):
# call initialization of contour object
super().__init__(edge_list, id)
# identify closed contours
self._find_closed_contours()
# identify outer boundary
self._find_outer_boundary()
# for each point on the boundary, find the nearest vertex
# on the same contour
self._find_closest_vertex_index()
# find point in interior of each puncture automatically
self._find_hole_int_pts()
# TODO check orientation
def evaluate_function_on_boundary(self, fun):
return super().evaluate_function_on_contour(fun)
def integrate_over_boundary(self, vals):
return super().integrate_over_contour(vals)
def integrate_over_boundary_preweighted(self, vals):
return super().integrate_over_contour_preweighted(vals)
def integrate_over_specific_contour(self, vals, contour_j):
c_idx = self.contour_idx[contour_j]
c = contour(edge_list=[self.edge_list[i] for i in c_idx])
vals_on_c = np.zeros((c.num_pts,))
for i in range(c.num_edges):
vals_on_c[c.vert_idx[i]:c.vert_idx[i + 1]] = \
vals[self.vert_idx[c_idx[i]]:self.vert_idx[c_idx[i] + 1]]
return c.integrate_over_contour(vals_on_c)
def is_in_interior_cell(self, y1, y2):
c_outer_idx = self.contour_idx[0]
c_outer = contour(edge_list=[self.edge_list[i] for i in c_outer_idx])
is_inside = c_outer.is_in_interior_contour(y1, y2)
for j in range(self.num_holes):
c_idx = self.contour_idx[j + 1]
c = contour(edge_list=[self.edge_list[i] for i in c_idx])
is_in_hole = c.is_in_interior_contour(y1, y2)
is_inside = np.logical_and(is_inside, np.logical_not(is_in_hole))
return is_inside
def _find_closed_contours(self):
"""
for each edge, finds the index of which closed contour
the edge belongs to, with 0 corresponding to the outer boundary
"""
self.contour_idx = []
incidence = self._get_edge_endpoint_incidence()
is_marked_edge = np.zeros((self.num_edges,), dtype=bool)
num_marked_edges = 0
while num_marked_edges < self.num_edges:
edges_on_contour = []
starting_edge = 0
while is_marked_edge[starting_edge]:
starting_edge += 1
edges_on_contour.append(starting_edge)
is_marked_edge[starting_edge] = True
next_edge = incidence[starting_edge]
while next_edge != starting_edge:
edges_on_contour.append(next_edge)
is_marked_edge[next_edge] = True
next_edge = incidence[next_edge]
num_marked_edges += len(edges_on_contour)
self.contour_idx.append(edges_on_contour)
self.num_holes = -1 + len(self.contour_idx)
def _get_edge_endpoint_incidence(self):
"""
Returns incidence array: for each edge i, point to an edge j
whose starting point is the terminal point of edge i
edge i vertex edge j
--->--->--->--- o --->--->--->---
"""
# form distance matrix between endpoints of edges
distance = np.zeros((self.num_edges, self.num_edges))
for i in range(self.num_edges):
x = self.edge_list[i].x[:, 0]
for j in range(self.num_edges):
N = self.edge_list[j].num_pts
y = self.edge_list[j].x[:, N - 1]
distance[i, j] = np.linalg.norm(x - y)
# mark edges as incident if distance between endpoints is zero
TOL = 1e-6
incidence_mat = np.zeros(distance.shape, dtype=int)
for i in range(self.num_edges):
for j in range(self.num_edges):
if distance[i, j] < TOL:
incidence_mat[i, j] = 1
# check that each edge endpoint is incident to exactly one other edge
row_sum = np.sum(incidence_mat, axis=0)
rows_all_sum_to_one = np.linalg.norm(row_sum - 1) < TOL
col_sum = np.sum(incidence_mat, axis=1)
cols_all_sum_to_one = np.linalg.norm(col_sum - 1) < TOL
if not (rows_all_sum_to_one and cols_all_sum_to_one):
raise Exception('Edge collection must be a union of ' +
'disjoint simple closed contours')
# for each edge, return the index of the edge following it
incidence = np.zeros((self.num_edges,), dtype=int)
for i in range(self.num_edges):
j = 0
while incidence_mat[i, j] == 0:
j += 1
incidence[j] = i
return incidence
def _find_outer_boundary(self):
outer_boundary_idx = -1
for i in range(self.num_holes+1):
if outer_boundary_idx < 0:
edge_list_i = \
[self.edge_list[k] for k in self.contour_idx[i]]
ci = contour(edge_list=edge_list_i)
for j in range(i, self.num_holes+1):
edge_list_j = \
[self.edge_list[k] for k in self.contour_idx[j]]
cj = contour(edge_list=edge_list_j)
# check if cj is contained in ci
x1, x2 = cj.get_boundary_points()
is_inside = ci.is_in_interior_contour(x1, x2)
if all(is_inside):
outer_boundary_idx = i
# swap contour_idx[0] and the outer boundary index
temp = self.contour_idx[0]
self.contour_idx[0] = self.contour_idx[outer_boundary_idx]
self.contour_idx[outer_boundary_idx] = temp
def _find_closest_vertex_index(self):
# get midpoint indices
mid_idx = np.zeros((self.num_edges,), dtype=int)
for i in range(self.num_edges):
n = self.edge_list[i].num_pts // 2 # 2n points per edge
mid_idx[i] = self.vert_idx[i] + n
# on first half of an edge, the closest vertex is the starting
# point on that edge
self.closest_vert_idx = np.zeros((self.num_pts,), dtype=int)
for i in range(self.num_edges):
self.closest_vert_idx[self.vert_idx[i]:mid_idx[i]] \
= self.vert_idx[i]
# on the second half of an edge, the closest vertex is the
# starting point of the next edge on the same closed contour
for c in self.contour_idx:
m = len(c) # number of edges on this contour
for k in range(m):
i = c[k] # current edge
j = c[(k + 1) % m] # next edge on contour
n = self.edge_list[i].num_pts // 2 # 2n points per edge
self.closest_vert_idx[mid_idx[i]:(mid_idx[i] + n)] \
= self.vert_idx[j]
def _find_hole_int_pts(self):
"""
Automatically find a point in the interior of each hole
Finds a point by creating a rectangular grid of points and
eliminating those that are not in the interior. Among those
that are in the interior, a point that lies a maximum distance
from the boundary is chosen.
"""
self.hole_int_pts = np.zeros((2, self.num_holes))
for j in range(self.num_holes):
c = contour(edge_list=[
self.edge_list[i] for i in self.contour_idx[j + 1]
])
self.hole_int_pts[:, j] = c._get_int_pt_simple_contour()
def __repr__(self) -> str:
msg = 'cell object\n'
msg += f'id: {self.id}'
msg += f'num_edges: \t\t{self.num_edges}\n'
msg += f'num_holes: \t\t{self.num_holes}\n'
msg += f'num_pts: \t\t{self.num_pts}\n'
msg += f'contours: \t\t{self.contour_idx}\n'
if self.num_holes > 0:
msg += f'hole_int_pts (x): \t{self.hole_int_pts[0, :]}\n'
msg += f'hole_int_pts (y): \t{self.hole_int_pts[1, :]}\n'
return msg
| 6,911 | 30.561644 | 71 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/bean.py
|
"""
Bean shape
x(t) = (cos t + 0.65 cos(2t), 1.5 sin t)
"""
import numpy as np
def _x(t, **kwargs):
x = np.zeros((2,len(t)))
x[0,:] = np.cos(t) + 0.65 * np.cos(2 * t)
x[1,:] = 1.5 * np.sin(t)
return x
def _dx(t, **kwargs):
dx = np.zeros((2,len(t)))
dx[0,:] = - np.sin(t) - 2 * 0.65 * np.sin(2 * t)
dx[1,:] = 1.5 * np.cos(t)
return dx
def _ddx(t, **kwargs):
ddx = np.zeros((2,len(t)))
ddx[0,:] = - np.cos(t) - 4 * 0.65 * np.cos(2 * t)
ddx[1,:] = - 1.5 * np.sin(t)
return ddx
| 489 | 19.416667 | 50 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/circle.py
|
"""
Parameterization of the unit circle centered at the origin
"""
import numpy as np
def _x(t, **kwargs):
x = np.zeros((2,len(t)))
x[0,:] = np.cos(t)
x[1,:] = np.sin(t)
return x
def _dx(t, **kwargs):
dx = np.zeros((2,len(t)))
dx[0,:] = - np.sin(t)
dx[1,:] = np.cos(t)
return dx
def _ddx(t, **kwargs):
ddx = np.zeros((2,len(t)))
ddx[0,:] = - np.cos(t)
ddx[1,:] = - np.sin(t)
return ddx
| 437 | 18.043478 | 58 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/sine_wave.py
|
"""
Parameterization of sine wave joining the origin to (1,0) of the form
x = t / (2*pi)
y = a * sin( omega/2 * t ) , 0 < t < 2*pi
Include arguments for the amplitude ('amp') and the frequency ('freq').
The frequency argument must be an integer.
"""
import numpy as np
def _x(t, **kwargs):
a = kwargs['amp']
omega = kwargs['freq']
if np.abs(omega - int(omega)) > 1e-12:
raise Exception('freq of the sine wave must be an integer')
x = np.zeros((2,len(t)))
x[0,:] = t / (2*np.pi)
x[1,:] = a*np.sin(omega*t/2)
return x
def _dx(t, **kwargs):
a = kwargs['amp']
omega = kwargs['freq']
dx = np.zeros((2,len(t)))
dx[0,:] = np.ones((len(t),)) / (2*np.pi)
dx[1,:] = 0.5*a*omega*np.cos(omega*t/2)
return dx
def _ddx(t, **kwargs):
a = kwargs['amp']
omega = kwargs['freq']
ddx = np.zeros((2,len(t)))
ddx[1,:] = -0.25*a*omega*omega*np.sin(omega*t/2)
return ddx
| 943 | 23.205128 | 71 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/circular_arc_deg.py
|
"""
Parameterization of a circular arc from (1,0) to (cos(theta0), sin(theta0))
"""
import numpy as np
def unpack(kwargs):
theta0 = kwargs['theta0']
if theta0 <= 0 or theta0 > 360:
raise ValueError('theta0 must be a nontrivial angle between '
+ '0 and 360 degrees')
omega = theta0 / 360.0
return omega
def _x(t, **kwargs):
omega = unpack(kwargs)
x = np.zeros((2,len(t)))
x[0,:] = np.cos(omega * t)
x[1,:] = np.sin(omega * t)
return x
def _dx(t, **kwargs):
omega = unpack(kwargs)
dx = np.zeros((2,len(t)))
dx[0,:] = -omega * np.sin(omega * t)
dx[1,:] = +omega * np.cos(omega * t)
return dx
def _ddx(t, **kwargs):
omega = unpack(kwargs)
ddx = np.zeros((2,len(t)))
ddx[0,:] = -omega * omega * np.cos(omega * t)
ddx[1,:] = -omega * omega * np.sin(omega * t)
return ddx
| 795 | 22.411765 | 75 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/teardrop.py
|
"""
Teardrop shape
x(t) = (2 sin(t/2), −β sin t), β = tan(π/(2α)), α = 3/2
"""
import numpy as np
def _x(t, **kwargs):
alpha = 3/2
beta = np.tan(0.5 * np.pi / alpha)
x = np.zeros((2,len(t)))
x[0,:] = 2 * np.sin(t / 2)
x[1,:] = -beta * np.sin(t)
return x
def _dx(t, **kwargs):
alpha = 3/2
beta = np.tan(0.5 * np.pi / alpha)
dx = np.zeros((2,len(t)))
dx[0,:] = np.cos(t / 2)
dx[1,:] = -beta * np.cos(t)
return dx
def _ddx(t, **kwargs):
alpha = 3/2
beta = np.tan(0.5 * np.pi / alpha)
ddx = np.zeros((2,len(t)))
ddx[0,:] = -0.5 * np.sin(t / 2)
ddx[1,:] = beta * np.sin(t)
return ddx
| 600 | 19.033333 | 55 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/ellipse.py
|
"""
Parameterization of an ellipse centered at the origin
"""
import numpy as np
def _x(t, **kwargs):
x = np.zeros((2,len(t)))
x[0,:] = kwargs['a'] * np.cos(t)
x[1,:] = kwargs['b'] * np.sin(t)
return x
def _dx(t, **kwargs):
dx = np.zeros((2,len(t)))
dx[0,:] = - kwargs['a'] * np.sin(t)
dx[1,:] = kwargs['b'] * np.cos(t)
return dx
def _ddx(t, **kwargs):
ddx = np.zeros((2,len(t)))
ddx[0,:] = - kwargs['a'] * np.cos(t)
ddx[1,:] = - kwargs['b'] * np.sin(t)
return ddx
| 516 | 21.478261 | 53 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/cartioid.py
|
"""
Generalized Teardrop / Cartioid
x(t) = r(t) [cos(t), sin(t)] where
r(t) = 1 + a (1 - t / pi) ^ 8, a > -1 is a fixed parameter
Note:
a = 0 gives the unit circle
-1 < a < 0 is "generalized cartioid" with a reentrant corner
a > 0 is a "generalized teardrop"
"""
import numpy as np
def _x(t, **kwargs):
a = kwargs['a']
r = 1 + a * (1 - t / np.pi) ** 8
x = np.zeros((2,len(t)))
x[0,:] = r * np.cos(t)
x[1,:] = r * np.sin(t)
return x
def _dx(t, **kwargs):
a = kwargs['a']
r = 1 + a * (1 - t / np.pi) ** 8
dr = - (8 * a / np.pi) * (1 - t / np.pi) ** 7
dx = np.zeros((2,len(t)))
dx[0,:] = dr * np.cos(t) - r * np.sin(t)
dx[1,:] = dr * np.sin(t) + r * np.cos(t)
return dx
def _ddx(t, **kwargs):
a = kwargs['a']
r = 1 + a * (1 - t / np.pi) ** 8
dr = - (8 * a / np.pi) * (1 - t / np.pi) ** 7
ddr = (56 * a / (np.pi * np.pi))* (1 - t / np.pi) ** 6
ddx = np.zeros((2,len(t)))
ddx[0,:] = (ddr - r) * np.cos(t) - 2 * dr * np.sin(t)
ddx[1,:] = (ddr - r) * np.sin(t) + 2 * dr * np.cos(t)
return ddx
| 1,016 | 23.804878 | 61 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/line.py
|
"""
Parameterization of a line joining the origin to (1,0).
"""
import numpy as np
def _x(t, **kwargs):
x = np.zeros((2,len(t)))
x[0,:] = t / (2*np.pi)
return x
def _dx(t, **kwargs):
dx = np.zeros((2,len(t)))
dx[0,:] = np.ones((len(t),)) / (2*np.pi)
return dx
def _ddx(t, **kwargs):
ddx = np.zeros((2,len(t)))
return ddx
| 356 | 17.789474 | 55 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/edgelib/circular_arc.py
|
"""
Parameterization of a circular arc joining the origin to (1,0).
The center of the circle lies at (1/2, H). The points on the circle below
the x2 axis are discarded.
"""
import numpy as np
def unpack(kwargs):
H = kwargs['H']
R = np.sqrt(0.25 + H ** 2)
t0 = np.arcsin(H / R)
omega = -0.5 - t0 / np.pi
return H, R, t0, omega
def _x(t, **kwargs):
H, R, t0, omega = unpack(kwargs)
theta = t0 + np.pi + omega * t
x = np.zeros((2,len(t)))
x[0,:] = 0.5 + R * np.cos(theta)
x[1,:] = H + R * np.sin(theta)
return x
def _dx(t, **kwargs):
H, R, t0, omega = unpack(kwargs)
theta = t0 + np.pi + omega * t
dx = np.zeros((2,len(t)))
dx[0,:] = -omega * R * np.sin(theta)
dx[1,:] = +omega * R * np.cos(theta)
return dx
def _ddx(t, **kwargs):
H, R, t0, omega = unpack(kwargs)
theta = t0 + np.pi + omega * t
ddx = np.zeros((2,len(t)))
ddx[0,:] = -omega * omega * R * np.cos(theta)
ddx[1,:] = -omega * omega * R * np.sin(theta)
return ddx
| 951 | 24.052632 | 73 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/quad/quad.py
|
import numpy as np
class quad:
"""
quad: 1-dimensional quadrature object
Attributes:
type: str = label for quadrature variant
n: int = interval sampled at 2*n points, excluding the last endpoint
h: float = pi / n sample spacing in tau
t: array (len=2*n) of sampled parameter between 0 and 2*pi
wgt: array (len=2*n) of values of lambda'(tau)
Comment:
Defaults to the trapezoid rule with n = 16.
Kress parameter defaults to p = 7.
"""
__slots__ = (
'type',
'n',
'h',
't',
'wgt'
)
def __init__(self, qtype: str='trap', n: int=16, p: int=7):
self.type = qtype
self.n = n
self.h = np.pi / n
self.t = np.linspace(0, 2 * np.pi, 2 * n + 1)
if self.type == 'kress':
self.kress(p)
return None
if self.type == 'mart' or type == 'martensen':
self.martensen()
return None
else:
self.trap()
return None
def __repr__(self) -> str:
""""
Print method
"""
msg = f"quad object \n\ttype\t{self.type} \n\tn\t{self.n}"
return msg
def trap(self):
"""
Trapezoid rule (default)
Technically, this defines a left-hand sum. But in our context,
all functions are periodic, since we are parameterizing closed
contours.
"""
self.wgt = np.ones((2 * self.n + 1,))
return None
def kress(self, p: int):
"""
Kress quadrature
Used to parameterize an edge that terminates at corners.
For a complete description, see:
R. Kress, A Nyström method for boundary integral equations in domains
with corners, Numer. Math., 58 (1990), pp. 145-161.
"""
if p < 2:
raise Exception(
"Kress parameter p must be an integer at least 2"
)
self.type += f'_{p}'
s = self.t / np.pi - 1
s2 = s * s
c = (0.5 - 1 / p) * s * s2 + s / p + 0.5
cp = c ** p
denom = cp + (1 - c) ** p
self.t = (2 * np.pi) * cp / denom
self.wgt = ( 3 * (p - 2) * s2 + 2 ) \
* ( c * (1 - c) ) ** (p-1) \
/ denom ** 2
return None
def martensen(self):
"""
Martensen quadrature
E. Martensen, Über eine Methode zum räumlichen Neumannschen Problem
mit einer An-wendung für torusartige Berandungen, Acta Math., 109
(1963), pp. 75-135.
"""
self.wgt = np.zeros((2 * self.n + 1,))
for m in range(1, self.n + 1):
self.wgt += np.cos(m * self.t) / m
self.wgt *= 0.5 / self.n
self.t = 2 * np.sin(self.t / 2)
self.t *= self.t
return None
| 2,844 | 25.100917 | 77 |
py
|
PuncturedFEM
|
PuncturedFEM-main/puncturedfem/mesh/quad/__init__.py
| 0 | 0 | 0 |
py
|
|
cypress
|
cypress-master/scripts/plot_synfire.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- C++ Spiking Neural Network Simulation Framework
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
import matplotlib
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import sys
if len(sys.argv) != 4:
print "Usage: " + sys.argv[0] + " <SYNFIRE_INPUT> <SYNFIRE_RS> <SYNFIRE_FS>"
sys.exit(1)
pop_size = 8
def cm2inch(value):
return value / 2.54
def load_spiketrain(fn, delimiter=","):
res = []
with open(fn) as f:
for s in f:
if (len(s) > 1):
res.append(map(float, s[:-1].split(delimiter)))
else:
res.append([])
return res
def plot_spiketrain(train, ax, offs = 0, color="k", bg_color=None):
for (i, times) in enumerate(train):
grp = i // pop_size
y = grp * pop_size * 2 + i % pop_size + offs
ax.plot(times, [y] * len(times), '.', color=color, markersize=2.5)
if bg_color:
ax.add_patch(mpatches.Rectangle((0.0, y // pop_size * pop_size), 2000.0, pop_size, color=bg_color, linewidth=0))
input_spikes = load_spiketrain(sys.argv[1])
output_spikes_rs = load_spiketrain(sys.argv[2])
output_spikes_fs = load_spiketrain(sys.argv[3])
fig = plt.figure(figsize=(cm2inch(17), cm2inch(8)))
ax = fig.gca()
ax.yaxis.set_ticks(np.arange(0, len(output_spikes_rs) * 2 + 1, pop_size * 2))
plot_spiketrain(input_spikes, ax, -pop_size, "#000000", "#f3f3f3")
plot_spiketrain(output_spikes_rs, ax, 0, "#cc0000")
plot_spiketrain(output_spikes_fs, ax, pop_size, "#3465a4", "#d1e2f4")
ax.set_xlabel("Simulation time [ms]");
ax.set_ylabel("Neuron index");
handles = [
mlines.Line2D([], [], marker='.', color="#000000", markersize=6,
linewidth=0, label="Input spikes"),
mlines.Line2D([], [], marker='.', color="#cc0000", markersize=6,
linewidth=0, label="Excitatory pop."),
mlines.Line2D([], [], marker='.', color="#3465a4", markersize=6,
linewidth=0, label="Inhibitory pop."),
]
ax.legend(handles=handles, loc=9, bbox_to_anchor=(0.5, 1.1), numpoints=1, ncol=3)
fig.savefig("synfire_result.pdf", format='pdf',
bbox_inches='tight')
| 2,907 | 34.463415 | 124 |
py
|
cypress
|
cypress-master/scripts/plot_tuning_curves.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- C++ Spiking Neural Network Simulation Framework
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
import matplotlib
import matplotlib.pyplot as plt
import sys
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <TUNING CURVE INPUT>"
sys.exit(1)
def cm2inch(value):
return value / 2.54
data = np.loadtxt(sys.argv[1], delimiter=",")
fig = plt.figure(figsize=(cm2inch(7), cm2inch(7 )))
ax = fig.gca()
n_pop = data.shape[1] - 1
for i in xrange(1, n_pop + 1):
color = np.array([0.75, 0.0, 0.0] if i % 2 == 0 else [0.0, 0.25, 0.75])
color = (np.array([1.0, 1.0, 1.0]) - color) * i * 0.75 / float(n_pop) + color
ax.plot(data[:, 0], data[:, i], '-', color=color)
ax.set_xlabel("Normalised input spike rate")
ax.set_ylabel("Normalised output spike rate")
ax.set_xlim(0.0, 1.0)
fig.savefig("tuning_curves.pdf", format='pdf',
bbox_inches='tight')
| 1,606 | 31.795918 | 81 |
py
|
cypress
|
cypress-master/scripts/decode_function.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- C++ Spiking Neural Network Simulation Framework
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from numpy.linalg import inv
import math
import matplotlib
import matplotlib.pyplot as plt
import sys
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <TUNING CURVE INPUT>"
sys.exit(1)
def cm2inch(value):
return value / 2.54
input_file = sys.argv[1]
data = np.mat(np.loadtxt(input_file, delimiter=","))
# Fetch the design matrix Phi and the number of samples/functions
Phi = data[:, 1:]
n_samples = Phi.shape[0]
n_func = Phi.shape[1]
# Construct input and desired output vectors
X = data[:, 0]
Y = (np.sin(X * 2.0 * math.pi) + 1.0) * 0.5 # Target function
# Calculate the Moore-Penrose pseudo inverse of Phi
lambda_ = 0.2
PhiI = inv(Phi.T * Phi + lambda_ * np.eye(n_func)) * Phi.T
# Calculate the weights
w = PhiI * Y
print("Maximum w: " + str(np.max(w)))
print("Minimum w: " + str(np.min(w)))
# Reconstruct the function
YRec = Phi * w
print(w)
fig = plt.figure(figsize=(cm2inch(7), cm2inch(7 )))
ax = fig.gca()
n_pop = data.shape[1] - 1
ax.plot(X, Y, ':', color=[0.25, 0.25, 0.25])
ax.plot(X, YRec, '-', color=[0.0, 0.0, 0.0])
ax.set_xlabel("Input value")
ax.set_ylabel("Function value")
ax.set_title("Decoding of $\\frac{1}2 \\cdot (\\sin(x * 2\\pi) + 1)$")
ax.set_xlim(0.0, 1.0)
fig.savefig("reconstructed_function.pdf", format='pdf',
bbox_inches='tight')
| 2,111 | 28.333333 | 72 |
py
|
cypress
|
cypress-master/scripts/plot_winner_take_all.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- C++ Spiking Neural Network Simulation Framework
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
import matplotlib
import matplotlib.lines as mlines
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import sys
if len(sys.argv) != 2:
print "Usage: " + sys.argv[0] + " <WTA_OUT>"
sys.exit(1)
def cm2inch(value):
return value / 2.54
def load_spiketrain(fn, delimiter=","):
res = []
with open(fn) as f:
for s in f:
if (len(s) > 1):
res.append(map(float, s[:-1].split(delimiter)))
else:
res.append([])
return res
def plot_spiketrain(train, ax, offs = 0, color="k"):
for (i, times) in enumerate(train):
ax.plot(times, [i] * len(times), '.', color=color, markersize=2.5)
output_spikes = load_spiketrain(sys.argv[1])
fig = plt.figure(figsize=(cm2inch(17), cm2inch(8)))
ax = fig.gca()
plot_spiketrain(output_spikes, ax)
ax.set_xlabel("Simulation time [ms]");
ax.set_ylabel("Neuron index");
fig.savefig("winner_take_all_result.png", format='png',
bbox_inches='tight', dpi=160)
| 1,837 | 29.131148 | 74 |
py
|
cypress
|
cypress-master/scripts/plot_epsp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- C++ Spiking Neural Network Simulation Framework
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
import math
import matplotlib
import matplotlib.pyplot as plt
import sys
import re
if len(sys.argv) < 2:
print "Usage: " + sys.argv[0] + " <EPSP_FILES...>"
sys.exit(1)
def cm2inch(value):
return value / 2.54
print("Loading data...")
min_v = np.inf; max_v = -np.inf
data = {}
for fn in sys.argv[1:]:
m = re.search("epsp_([A-Za-z]+)_w_([0-9.]+)_g_leak_([0-9.]+)\\.csv", fn)
sim = m.group(1)
w = float(m.group(2))
g_leak = float(m.group(3))
values = np.genfromtxt(fn,delimiter=',')
max_v = max(max_v, np.max(values[:, 1]))
min_v = min(min_v, np.min(values[:, 1]))
data[(w, g_leak, sim)] = values
ws = list(np.unique(np.array(map(lambda x: x[0], data.keys()))))
g_leaks = list(np.unique(np.array(map(lambda x: x[1], data.keys()))))
print("Plotting data...")
fig = plt.figure(figsize=(cm2inch(40), cm2inch(50)))
for key, value in sorted(data.items()):
w, g_leak, sim = key
i = ws.index(w) * len(g_leaks) + g_leaks.index(g_leak) + 1
ax = fig.add_subplot(len(ws), len(g_leaks), i)
ax.plot(value[:, 0], value[:, 1])
ax.set_ylim(min_v, max_v)
ax.set_title("$w = %.3f$ $g_L = %.1f$" % (w, g_leak * 1000.0))
print("Saving to 'epsps.pdf'...")
fig.tight_layout()
fig.savefig("epsps.pdf", format='pdf',
bbox_inches='tight')
| 2,112 | 31.015152 | 76 |
py
|
cypress
|
cypress-master/resource/hexdump.py
|
#!/usr/bin/env python
import sys
while True:
b = sys.stdin.read(1)
if b != "":
sys.stdout.write("0x%02x,"%ord(b))
else:
break
| 133 | 13.888889 | 36 |
py
|
cypress
|
cypress-master/resource/backend/nmpi/broker.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Cypress -- A C++ interface to PyNN
# Copyright (C) 2016 Andreas Stöckel
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# This script uploads an executable to the NMPI, executes it on the specified
# platform, collectes all files generated by the executable and puts them into
# the current directory, just as if the executable had ben executed locally.
from future import standard_library
standard_library.install_aliases()
from builtins import input
from builtins import oct
from builtins import str
from builtins import range
import argparse
import base64
import bz2
import logging
import json
import os
import random
import shutil
import stat
import string
import sys
import tarfile
import time
try:
from urllib.parse import urlparse
from urllib.request import urlretrieve
except ImportError: # Py3
from urllib.parse import urlparse
from urllib.request import urlretrieve
# Required as the Python code is usually concatenated into a single file and
# embedded in the Cypress C++ library.
try:
path = os.path.join(os.path.dirname(__file__),
"lib/nmpi")
if os.path.exists(os.path.join(path, "nmpi_user.py")):
sys.path.append(path)
from nmpi_user import *
except ImportError:
pass
# Setup the logger
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter("%(name)s:%(levelname)s:%(message)s"))
logger = logging.getLogger("cypress")
logger.setLevel(logging.INFO)
logger.addHandler(handler)
#
# Parse the command line
#
parser = argparse.ArgumentParser(
description="Command line interface to the Python part of Cypress")
parser.add_argument("--executable", type=str, action="store", required=True,
help="File to be executed on the NMPI")
parser.add_argument("--platform", type=str, action="store", required=True,
help="Target platform (e.g. NM-PM1, NM-MC1, Spikey, ESS)")
parser.add_argument("--files", type=str, action="store", default=[], nargs='*',
help="List of additional files to be uploaded to the NMPI")
parser.add_argument("--base", type=str, action="store", default=os.getcwd(),
help="Base directory used when determining to which " +
"directory the files should be extracted on the NMPI")
parser.add_argument("--args", type=str, action="store", default=[], nargs='*',
help="Arguments to be passed to the executable")
parser.add_argument("--wafer", type=int, default=0,
help="Wafer for reservation")
args = parser.parse_args()
# Create a python script which contains all the specified files and extracts
# them to a directory upon execution
def tmpdirname(N):
return ''.join(random.choice(string.ascii_uppercase +
string.ascii_lowercase +
string.digits) for _ in range(N))
def file_script(filename, tar_filename, execute):
if not os.path.isfile(filename):
return ""
with open(filename, 'rb') as fd:
compressed = base64.b64encode(bz2.compress(fd.read()))
return ("extract('{}', {} , {})\n").format(
tar_filename, oct(os.stat(filename)[stat.ST_MODE]), compressed)
tmpdir = "cypress_" + tmpdirname(8)
script = """
# Automatically generated by Cypress
import base64
import bz2
import errno
import os
import shutil
import subprocess
import sys
import tarfile
# Remember which files were extracted -- we'll cleanup our traces after running
dir = os.path.realpath(os.path.join(os.getcwd(), '""" + tmpdir + """'))
files = []
# http://stackoverflow.com/questions/600268/mkdir-p-functionality-in-python
def mkdir_p(path):
try:
if (path != ""):
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def setup():
# List files in the current directory and link them into the target
# directory
for filename in os.listdir("."):
source = os.path.realpath(os.path.join(".", filename))
target = os.path.join(dir, filename)
if target == dir or os.path.exists(target):
continue
os.symlink(source, target)
# Important! Unlink file before recursively deleting subdirectory
files.append(target)
def extract(filename, mode, data):
filename = os.path.join(dir, filename)
files.append(filename)
mkdir_p(os.path.dirname(filename))
with open(filename, 'wb') as fd:
fd.write(bz2.decompress(base64.b64decode(data)))
os.chmod(filename, mode)
def run(filename, args):
old_cwd = os.getcwd()
res = 1
try:
os.chdir(dir)
with open(os.path.join(dir, '""" + tmpdir + """.stdout'), 'wb') as out, open(os.path.join(dir, '""" + tmpdir + """.stderr'), 'wb') as err:
p = subprocess.Popen([os.path.join(dir, filename)] + args,
stdout = out, stderr = err)
p.communicate()
res = p.returncode
finally:
os.chdir(old_cwd)
return res
def cleanup():
pass
# Remove extracted files -- we're only interested in newly created files
#for file in files:
# try:
# os.unlink(file)
# except:
# pass
# Create a tar.bz2 of the target folder containing all the output
tarname = os.path.basename(dir)
archive = tarname + ".tar.bz2"
with tarfile.open(archive, "w:bz2") as tar:
tar.add(dir, arcname=tarname)
# Remove the target directory
shutil.rmtree(dir)
"""
files = args.files + [args.executable]
for filename in files:
tar_filename = os.path.relpath(filename, args.base)
if(tar_filename.endswith("libBS2CYPRESS.so")):
tar_filename = "libBS2CYPRESS.so"
if (tar_filename.startswith("..")):
raise Exception(
"Base directory must be a parent directory of all specified files!")
script = script + file_script(filename,
tar_filename,
filename == args.executable)
arguments = []
for arg in args.args:
if os.path.exists(arg) and os.path.isfile(arg):
arguments.append(os.path.relpath(arg, args.base))
else:
arguments.append(arg)
script = script + "setup()\n"
script = script + ("res = run('" + os.path.relpath(filename, args.base)
+ "', " + str(arguments) + ")\n")
script = script + "cleanup()\n"
script = script + "sys.exit(res)\n"
#
# Read the NMPI client configuration
#
config = {}
config_file = os.path.expanduser(os.path.join("~", ".nmpi_config"))
if os.path.isfile(config_file):
try:
with open(config_file, 'r') as fd:
config = json.load(fd)
except Exception as e:
logger.error(e.message)
logger.warning(
"Error while parsing ~/.nmpi_config. Starting with empty configuration!")
else:
logger.warning(
"~/.nmpi_config not found. Starting with empty configuration!")
# Prompt the project name
if not "collab_id" in config:
config["collab_id"] = eval(input("Collab ID: "))
# Prompt the username
if not "username" in config:
config["username"] = str(input("Username: "))
# Create the client instance
token = config["token"] if "token" in config else None
while True:
if token is None:
logger.info(
"No valid access token found or the access token has expired. Please re-enter your password to obtain a new access token.")
sys.stdout.flush()
sys.stderr.flush()
time.sleep(0.1)
# Submit the job, if this fails, explicitly query the password
try:
client = Client(username=config["username"], token=token)
config["token"] = client.token
# Save the configuration, including the current client token
with open(config_file, 'w') as fd:
json.dump(config, fd, indent=4)
hw_config = {}
if(args.wafer != 0):
hw_config = {"WAFER_MODULE" : args.wafer}
job_id = client.submit_job(
source=script,
platform=args.platform,
config=hw_config,
collab_id=config["collab_id"])
job_id = str(job_id).split("/")[-1]
logger.info(
"Created job with ID " +
str(job_id) +
", you can go to https://nmpi.hbpneuromorphic.eu/app/#/queue/"
+ str(job_id) +
" to retrieve the job results")
except:
if token is not None:
token = None
continue
else:
raise
break
# Wait until the job has switched to either the "error" or the "finished" state
status = ""
while True:
new_status = client.job_status(job_id)
if new_status != status:
logger.info("Job status: " + new_status)
status = new_status
if status == "error" or status == "finished":
break
time.sleep(1)
# Download the result archive
job = client.get_job(job_id)
datalist = job["output_data"]
for dataitem in datalist:
url = dataitem["url"]
(scheme, netloc, path, params, query, fragment) = urlparse(url)
archive = tmpdir + ".tar.bz2"
if archive in path:
# Download the archive containing the result data
logger.info("Downloading result...")
urlretrieve(url, archive)
# Extract the output to the temporary directory
logger.info("Extracting data...")
with tarfile.open(archive, "r:*") as tar:
members = [member for member in tar.getmembers(
) if member.name.startswith(tmpdir)]
tar.extractall(members=members)
os.unlink(archive)
# Move the content from the temporary directory to the top-level
# directory, remove the temporary directory
for filename in os.listdir(tmpdir):
src = os.path.join(tmpdir, filename)
dest = os.path.join(os.getcwd(), filename)
try:
if not os.path.isdir(src):
shutil.copy(src, dest)
except:
pass
shutil.rmtree(tmpdir)
logger.info("Done!")
break
# Output both stdout and stderr
if os.path.isfile(tmpdir + ".stderr") and os.stat(tmpdir + ".stderr").st_size > 0:
logger.info("Response stderr (" + tmpdir + ".stderr)")
with open(tmpdir + ".stderr") as f:
for s in f:
sys.stderr.write(s)
if os.path.isfile(tmpdir + ".stdout") and os.stat(tmpdir + ".stdout").st_size > 0:
logger.info("Response stdout (" + tmpdir + ".stdout)")
with open(tmpdir + ".stdout") as f:
for s in f:
sys.stdout.write(s)
# Exit with the correct status
sys.exit(0 if status == "finished" else 1)
| 11,406 | 31.873199 | 148 |
py
|
GenomicsDB
|
GenomicsDB-master/tests/run.py
|
#!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2016-2017 Intel Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import tempfile
import subprocess
import hashlib
import os
import sys
import shutil
from collections import OrderedDict
import jsondiff
query_json_template_string="""
{
"workspace" : "",
"array_name" : "",
"vcf_header_filename" : ["inputs/template_vcf_header.vcf"],
"query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 10000000000
}]
}],
"query_row_ranges": [{
"range_list": [{
"low": 0,
"high": 3
}]
}],
"reference_genome" : "inputs/chr1_10MB.fasta.gz",
"attributes" : [ "REF", "ALT", "BaseQRankSum", "MQ", "RAW_MQ", "MQ0", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "DP", "GT", "GQ", "SB", "AD", "PL", "DP_FORMAT", "MIN_DP", "PID", "PGT" ]
}"""
vcf_attributes_order = [ "END", "REF", "ALT", "BaseQRankSum", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "MQ", "RAW_MQ", "MQ0", "DP", "GT", "GQ", "SB", "AD", "PL", "PGT", "PID", "MIN_DP", "DP_FORMAT", "FILTER" ];
asa_vcf_attributes = [ "END", "REF", "ALT", "BaseQRankSum", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "MQ", "RAW_MQ", "MQ0", "DP", "GT", "GQ", "SB", "AD", "PL", "PGT", "PID", "MIN_DP", "DP_FORMAT", "FILTER", "AS_RAW_MQ", "AS_RAW_MQRankSum" ];
attributes_with_DS_ID = [ "REF", "ALT", "BaseQRankSum", "MQ", "RAW_MQ", "MQ0", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "DP", "GT", "GQ", "SB", "AD", "PL", "DP_FORMAT", "MIN_DP", "PID", "PGT", "DS", "ID" ];
attributes_with_PL_only = [ "PL" ]
attributes_with_MLEAC_only = [ "MLEAC" ]
default_segment_size = 40
def create_query_json(ws_dir, test_name, query_param_dict):
test_dict=json.loads(query_json_template_string);
test_dict["workspace"] = ws_dir
test_dict["array_name"] = test_name
if('query_column_ranges' in query_param_dict):
test_dict["query_column_ranges"] = query_param_dict["query_column_ranges"]
else:
test_dict['scan_full'] = True
if("vid_mapping_file" in query_param_dict):
test_dict["vid_mapping_file"] = query_param_dict["vid_mapping_file"];
if("callset_mapping_file" in query_param_dict):
test_dict["callset_mapping_file"] = query_param_dict["callset_mapping_file"];
if("attributes" in query_param_dict):
test_dict["attributes"] = query_param_dict["attributes"];
if('segment_size' in query_param_dict):
test_dict['segment_size'] = query_param_dict['segment_size'];
else:
test_dict['segment_size'] = default_segment_size;
if('produce_GT_field' in query_param_dict):
test_dict['produce_GT_field'] = query_param_dict['produce_GT_field'];
if('produce_FILTER_field' in query_param_dict):
test_dict['produce_FILTER_field'] = query_param_dict['produce_FILTER_field'];
if('sites_only_query' in query_param_dict):
test_dict['sites_only_query'] = query_param_dict['sites_only_query']
if('produce_GT_with_min_PL_value_for_spanning_deletions' in query_param_dict):
test_dict['produce_GT_with_min_PL_value_for_spanning_deletions'] = \
query_param_dict['produce_GT_with_min_PL_value_for_spanning_deletions']
return test_dict;
loader_json_template_string="""
{
"row_based_partitioning" : false,
"column_partitions" : [
{"begin": 0, "workspace":"", "array_name": "" }
],
"callset_mapping_file" : "",
"vid_mapping_file" : "inputs/vid.json",
"size_per_column_partition": 700 ,
"treat_deletions_as_intervals" : true,
"vcf_header_filename": "inputs/template_vcf_header.vcf",
"reference_genome" : "inputs/chr1_10MB.fasta.gz",
"num_parallel_vcf_files" : 1,
"do_ping_pong_buffering" : false,
"offload_vcf_output_processing" : false,
"discard_vcf_index": true,
"produce_combined_vcf": true,
"produce_tiledb_array" : true,
"delete_and_create_tiledb_array" : true,
"compress_tiledb_array" : false,
"segment_size" : 1048576,
"num_cells_per_tile" : 3
}""";
def create_loader_json(ws_dir, test_name, test_params_dict):
test_dict=json.loads(loader_json_template_string);
if('column_partitions' in test_params_dict):
test_dict['column_partitions'] = test_params_dict['column_partitions'];
test_dict["column_partitions"][0]["workspace"] = ws_dir;
test_dict["column_partitions"][0]["array_name"] = test_name;
test_dict["callset_mapping_file"] = test_params_dict['callset_mapping_file'];
if('vid_mapping_file' in test_params_dict):
test_dict['vid_mapping_file'] = test_params_dict['vid_mapping_file'];
if('size_per_column_partition' in test_params_dict):
test_dict['size_per_column_partition'] = test_params_dict['size_per_column_partition'];
if('segment_size' in test_params_dict):
test_dict['segment_size'] = test_params_dict['segment_size'];
else:
test_dict['segment_size'] = default_segment_size;
return test_dict;
def get_file_content_and_md5sum(filename):
with open(filename, 'rb') as fptr:
data = fptr.read();
md5sum_hash_str = str(hashlib.md5(data).hexdigest())
fptr.close();
return (data, md5sum_hash_str);
def print_diff(golden_output, test_output):
print("=======Golden output:=======");
print(golden_output);
print("=======Test output:=======");
print(test_output);
print("=======END=======");
def modify_query_column_ranges_for_PB(test_query_dict):
if('query_column_ranges' in test_query_dict):
original_query_column_ranges = test_query_dict['query_column_ranges']
new_query_column_ranges = []
for curr_entry in original_query_column_ranges:
if(type(curr_entry) is dict and 'range_list' in curr_entry):
new_interval_list = []
for curr_interval in curr_entry['range_list']:
if(type(curr_interval) is dict and 'low' in curr_interval
and 'high' in curr_interval):
new_interval_list.append({'column_interval': { 'column_interval':
{ 'begin': curr_interval['low'], 'end': curr_interval['high'] } } })
new_entry = { 'column_or_interval_list': new_interval_list }
new_query_column_ranges.append(new_entry)
test_query_dict['query_column_ranges'] = new_query_column_ranges
def cleanup_and_exit(tmpdir, exit_code):
if(exit_code == 0):
shutil.rmtree(tmpdir, ignore_errors=True)
sys.exit(exit_code);
def main():
#lcov gcda directory prefix
gcda_prefix_dir = '../';
if(len(sys.argv) < 3):
sys.stderr.write('Needs 2 arguments <build_dir> <install_dir>\n');
sys.exit(-1);
gcda_prefix_dir = sys.argv[1];
exe_path = sys.argv[2]+os.path.sep+'bin';
#Switch to tests directory
parent_dir=os.path.dirname(os.path.realpath(__file__))
os.chdir(parent_dir)
#Zero line coverage
subprocess.call('lcov --directory '+gcda_prefix_dir+' --zerocounters', shell=True);
tmpdir = tempfile.mkdtemp()
ws_dir=tmpdir+os.path.sep+'ws';
loader_tests = [
{ "name" : "t0_1_2", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0",
"variants" : "golden_outputs/t0_1_2_variants_at_0",
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [ [ 12000, 12142, 12144, 12160, 12290, 12294, 14000, 17384, 18000 ]],
"pass_through_query_json": True,
"golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_multiple_positions",
"vcf" : "golden_outputs/t0_1_2_vcf_at_multiple_positions",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_multiple_positions",
} },
{ "query_column_ranges": [ [ [0, 1000000] ] ],
"pass_through_query_json": True,
"golden_output": {
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges" : [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"sites_only_query": True,
"golden_output": {
"vcf" : "golden_outputs/t0_1_2_vcf_sites_only_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_sites_only_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12100,
"high": 12100
}]
}], "golden_output": { #
"calls" : "golden_outputs/t0_1_2_calls_at_12100",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12100,
"high": 12100
},{
"low": 12141,
"high": 12141
}]
}], "golden_output": { #
"calls" : "golden_outputs/t0_1_2_calls_at_12100_12141",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12100,
"high": 12100
},{
"low": 12141,
"high": 12141
},{
"low": 12150,
"high": 12150
}]
}], "golden_output": { #
"calls" : "golden_outputs/t0_1_2_calls_at_12100_12141_12150",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12100,
"high": 12100
},
{
"low": 12141,
"high": 12150
}]
}], "golden_output": { #
"calls" : "golden_outputs/t0_1_2_calls_at_12100_12141_to_12150",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12100,
"high": 12100
},
{
"low": 12141,
"high": 12150
},
{
"low": 12300,
"high": 12300
},
{
"low": 17384,
"high": 17384
}]
}], "golden_output": { #
"calls" : "golden_outputs/t0_1_2_calls_at_12100_12141_to_12150_12300_17384",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"attributes": attributes_with_PL_only,
"golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0_with_PL_only",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],#vid and callset jsons passed through query json
"query_without_loader": True,
"vid_mapping_file": "inputs/vid.json",
"callset_mapping_file": "inputs/callsets/t0_1_2.json",
"golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0",
"variants" : "golden_outputs/t0_1_2_variants_at_0",
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12150,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_12150",
"variants" : "golden_outputs/t0_1_2_variants_at_12150",
"vcf" : "golden_outputs/t0_1_2_vcf_at_12150",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_12150",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_12150",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"produce_FILTER_field": True, "golden_output": {
"vcf" : "golden_outputs/t0_1_2_vcf_at_0_with_FILTER",
} },
]
},
{ "name" : "java_genomicsdb_importer_from_vcfs_t0_1_2_multi_contig",
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
'chromosome_intervals': [ '1:1-12160', '1:12161-12200', '1:12201-18000' ],
"vid_mapping_file": "inputs/vid_phased_GT.json",
'generate_array_name_from_partition_bounds': True,
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 18000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"java_vcf" : "golden_outputs/java_genomicsdb_importer_from_vcfs_t0_1_2_multi_contig_vcf_0_18000",
} },
{
"query_contig_interval": {
"contig": "1",
"begin": 12151,
"end": 18000
},
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"java_vcf" : "golden_outputs/java_genomicsdb_importer_from_vcfs_t0_1_2_multi_contig_vcf_12150_18000",
} }
]
},
{ "name" : "t0_1_2_csv", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2_csv.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0",
"variants" : "golden_outputs/t0_1_2_variants_at_0",
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12150,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_12150",
"variants" : "golden_outputs/t0_1_2_variants_at_12150",
"vcf" : "golden_outputs/t0_1_2_vcf_at_12150",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_12150",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_12150",
} }
]
},
{ "name" : "t0_overlapping", 'golden_output': 'golden_outputs/t0_overlapping',
'callset_mapping_file': 'inputs/callsets/t0_overlapping.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 12202,
"high": 1000000000
}]
}], "golden_output": {
"vcf" : "golden_outputs/t0_overlapping_at_12202",
}
}
]
},
{ "name" : "t0_overlapping_at_12202", 'golden_output': 'golden_outputs/t0_overlapping_at_12202',
'callset_mapping_file': 'inputs/callsets/t0_overlapping.json',
'column_partitions': [ {"begin": 12202, "workspace":"", "array_name": "" }]
},
{ "name" : "t6_7_8", 'golden_output' : 'golden_outputs/t6_7_8_loading',
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t6_7_8_calls_at_0",
"variants" : "golden_outputs/t6_7_8_variants_at_0",
"vcf" : "golden_outputs/t6_7_8_vcf_at_0",
"batched_vcf": "golden_outputs/t6_7_8_vcf_at_0",
} },
{ "query_column_ranges" : [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"sites_only_query": True, "golden_output": {
"vcf" : "golden_outputs/t6_7_8_vcf_sites_only_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 8029500,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t6_7_8_calls_at_8029500",
"variants" : "golden_outputs/t6_7_8_variants_at_8029500",
"vcf" : "golden_outputs/t6_7_8_vcf_at_8029500",
"batched_vcf": "golden_outputs/t6_7_8_vcf_at_8029500",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 8029500,
"high": 8029500
}]
}], "golden_output": {
"vcf" : "golden_outputs/t6_7_8_vcf_at_8029500-8029500",
} }
]
},
{ "name" : "java_genomicsdb_importer_from_vcfs_t6_7_8_multi_contig",
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
'chromosome_intervals': [ '1:1-8029500','1:8029501-8029501', '1:8029502-10000000' ],
"vid_mapping_file": "inputs/vid_phased_GT.json",
'generate_array_name_from_partition_bounds': True,
"query_params": [
{ 'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"java_vcf" : "golden_outputs/java_t6_7_8_vcf_at_0",
} },
{
"query_contig_interval": {
"contig": "1",
"begin": 8029501,
"end": 8029510
},
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"java_vcf" : "golden_outputs/java_t6_7_8_vcf_at_8029500",
} },
{
"query_contig_interval": {
"contig": "1",
"begin": 8029502,
"end": 8029502
},
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"java_vcf" : "golden_outputs/java_t6_7_8_vcf_at_8029501",
} }
]
},
{ "name" : "java_t0_1_2", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0_phased_GT",
"variants" : "golden_outputs/t0_1_2_variants_at_0_phased_GT",
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12150,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_12150_phased_GT",
"variants" : "golden_outputs/t0_1_2_variants_at_12150_phased_GT",
"vcf" : "golden_outputs/t0_1_2_vcf_at_12150",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_12150",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_12150",
} }
]
},
{ "name" : "java_buffer_stream_t0_1_2", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2_buffer.json',
'stream_name_to_filename_mapping': 'inputs/callsets/t0_1_2_buffer_mapping.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0",
"variants" : "golden_outputs/t0_1_2_variants_at_0",
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12150,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_12150",
"variants" : "golden_outputs/t0_1_2_variants_at_12150",
"vcf" : "golden_outputs/t0_1_2_vcf_at_12150",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_12150",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_12150",
} }
]
},
{ "name" : "java_buffer_stream_multi_contig_t0_1_2", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2_buffer.json',
'stream_name_to_filename_mapping': 'inputs/callsets/t0_1_2_buffer_mapping.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_0",
"variants" : "golden_outputs/t0_1_2_variants_at_0",
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12150,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_1_2_calls_at_12150",
"variants" : "golden_outputs/t0_1_2_variants_at_12150",
"vcf" : "golden_outputs/t0_1_2_vcf_at_12150",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_12150",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_12150",
} }
]
},
{ "name" : "test_new_fields", 'golden_output' : 'golden_outputs/t6_7_8_new_field_gatk.vcf',
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
'vid_mapping_file': 'inputs/vid_MLEAC_MLEAF.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"attributes" : attributes_with_MLEAC_only, "golden_output": {
"calls" : "golden_outputs/test_new_fields_MLEAC_only.json",
} },
]
},
{ "name" : "test_info_combine_ops0", 'golden_output' : 'golden_outputs/info_ops0.vcf',
'callset_mapping_file': 'inputs/callsets/info_ops.json',
'vid_mapping_file': 'inputs/vid_info_ops0.json'
},
{ "name" : "test_info_combine_ops1", 'golden_output' : 'golden_outputs/info_ops1.vcf',
'callset_mapping_file': 'inputs/callsets/info_ops.json',
'vid_mapping_file': 'inputs/vid_info_ops1.json'
},
{ "name" : "java_genomicsdb_importer_from_vcfs_t0_1_2",
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
'chromosome_intervals': [ '1:1-100000000' ],
"vid_mapping_file": "inputs/vid_phased_GT.json",
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"vcf" : "golden_outputs/t0_1_2_vcf_at_0",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_0",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 12150,
"high": 1000000000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"golden_output": {
"vcf" : "golden_outputs/t0_1_2_vcf_at_12150",
"batched_vcf": "golden_outputs/t0_1_2_vcf_at_12150",
"java_vcf" : "golden_outputs/java_t0_1_2_vcf_at_12150",
} }
]
},
{ "name" : "java_genomicsdb_importer_from_vcfs_t6_7_8",
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
'chromosome_intervals': [ '1:1-100000000' ],
"vid_mapping_file": "inputs/vid_phased_GT.json",
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"vid_mapping_file": "inputs/vid_phased_GT.json",
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
"golden_output": {
"calls" : "golden_outputs/t6_7_8_calls_at_0_phased_GT",
"variants" : "golden_outputs/t6_7_8_variants_at_0_phased_GT",
"vcf" : "golden_outputs/t6_7_8_vcf_at_0",
"batched_vcf": "golden_outputs/t6_7_8_vcf_at_0",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 8029500,
"high": 1000000000
}]
}],
"vid_mapping_file": "inputs/vid_phased_GT.json",
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
"golden_output": {
"calls" : "golden_outputs/t6_7_8_calls_at_8029500_phased_GT",
"variants" : "golden_outputs/t6_7_8_variants_at_8029500_phased_GT",
"vcf" : "golden_outputs/t6_7_8_vcf_at_8029500",
"batched_vcf": "golden_outputs/t6_7_8_vcf_at_8029500",
} }
]
},
{ "name" : "t0_1_2_combined", 'golden_output' : 'golden_outputs/t0_1_2_combined',
'callset_mapping_file': 'inputs/callsets/t0_1_2_combined.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"vcf" : "golden_outputs/t0_1_2_combined",
"batched_vcf": "golden_outputs/t0_1_2_combined",
} },
]
},
{ "name" : "test_flag_field", 'golden_output' : 'golden_outputs/t0_1_2_DS_ID_vcf_at_0',
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
'vid_mapping_file': 'inputs/vid_DS_ID.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"attributes": attributes_with_DS_ID, "golden_output": {
"calls" : "golden_outputs/t0_1_2_DS_ID_calls_at_0",
"variants" : "golden_outputs/t0_1_2_DS_ID_variants_at_0",
} },
]
},
{ "name" : "java_genomicsdb_importer_from_vcfs_t0_1_2_with_DS_ID",
'vid_mapping_file': 'inputs/vid_DS_ID_phased_GT.json',
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
'chromosome_intervals': [ '1:1-100000000' ],
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'vid_mapping_file': 'inputs/vid_DS_ID_phased_GT.json',
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"attributes": attributes_with_DS_ID, "golden_output": {
"calls" : "golden_outputs/t0_1_2_DS_ID_calls_at_0_phased_GT",
"variants" : "golden_outputs/t0_1_2_DS_ID_variants_at_0_phased_GT",
} },
]
},
{ "name" : "t0_1_2_as_array", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2_as_array.json',
"vid_mapping_file": "inputs/vid_as_array.json",
},
{ "name" : "t0_with_missing_PL_SB_fields", 'golden_output' : 'golden_outputs/t0_with_missing_PL_SB_fields_t1.vcf',
'callset_mapping_file': 'inputs/callsets/t0_with_missing_PL_SB_fields_t1.json',
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}], "golden_output": {
"calls" : "golden_outputs/t0_with_missing_PL_SB_fields_t1_calls.json",
} },
]
},
{ "name" : "t0_haploid_triploid_1_2_3_triploid_deletion",
'golden_output' : 'golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_loading',
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'size_per_column_partition': 1200,
'segment_size': 100,
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'segment_size': 100,
"golden_output": {
"vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_vcf",
"java_vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'produce_GT_field': True,
'segment_size': 100,
"golden_output": {
"vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_vcf_produce_GT",
"java_vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf_produce_GT",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'produce_GT_field': True,
'produce_GT_with_min_PL_value_for_spanning_deletions': True,
'segment_size': 100,
"golden_output": {
"vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_vcf_produce_GT_for_min_value_PL",
"java_vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf_produce_GT_for_min_PL",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'sites_only_query': True,
'segment_size': 100,
"golden_output": {
"vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_vcf_sites_only",
"java_vcf" : "golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf_sites_only",
} },
]
},
{ "name" : "t0_1_2_all_asa", 'golden_output' : 'golden_outputs/t0_1_2_all_asa_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2_all_asa.json',
'vid_mapping_file': 'inputs/vid_all_asa.json',
'size_per_column_partition': 3000,
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"force_override": True,
'segment_size': 100,
"attributes": asa_vcf_attributes,
"golden_output": {
"vcf" : "golden_outputs/t0_1_2_all_asa_loading",
} },
]
},
{ "name" : "java_genomicsdb_importer_from_vcfs_t0_1_2_all_asa",
'callset_mapping_file': 'inputs/callsets/t0_1_2_all_asa.json',
'vid_mapping_file': 'inputs/vid_all_asa.json',
'chromosome_intervals': [ '1:1-100000000' ],
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
"force_override": True,
'segment_size': 100,
"attributes": asa_vcf_attributes,
"golden_output": {
"vcf" : "golden_outputs/t0_1_2_all_asa_loading",
"java_vcf" : "golden_outputs/t0_1_2_all_asa_java_query_vcf",
} },
]
},
{ "name" : "min_PL_spanning_deletion", 'golden_output' : 'golden_outputs/min_PL_spanning_deletion_load_stdout',
'callset_mapping_file': 'inputs/callsets/min_PL_spanning_deletion.json',
"vid_mapping_file": "inputs/vid_phased_GT.json",
"query_params": [
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'produce_GT_field': True, "golden_output": {
"vcf" : "golden_outputs/min_PL_spanning_deletion_vcf_no_min_PL",
} },
{ "query_column_ranges": [{
"range_list": [{
"low": 0,
"high": 1000000000
}]
}],
'produce_GT_field': True,
'produce_GT_with_min_PL_value_for_spanning_deletions': True,
"golden_output": {
"vcf" : "golden_outputs/min_PL_spanning_deletion_vcf",
} }
]
},
];
for test_params_dict in loader_tests:
test_name = test_params_dict['name']
test_loader_dict = create_loader_json(ws_dir, test_name, test_params_dict);
if(test_name == "t0_1_2"):
test_loader_dict["compress_tiledb_array"] = True;
loader_json_filename = tmpdir+os.path.sep+test_name+'.json'
with open(loader_json_filename, 'wb') as fptr:
json.dump(test_loader_dict, fptr, indent=4, separators=(',', ': '));
fptr.close();
if(test_name == 'java_t0_1_2'):
import_cmd = 'java -ea TestGenomicsDB --load '+loader_json_filename
elif(test_name == 'java_buffer_stream_multi_contig_t0_1_2'):
import_cmd = 'java -ea TestBufferStreamGenomicsDBImporter -iterators '+loader_json_filename+' ' + \
test_params_dict['stream_name_to_filename_mapping'] + \
' 1024 0 0 100 true '
elif(test_name == 'java_buffer_stream_t0_1_2'):
import_cmd = 'java -ea TestBufferStreamGenomicsDBImporter '+loader_json_filename \
+' '+test_params_dict['stream_name_to_filename_mapping']
elif(test_name.find('java_genomicsdb_importer_from_vcfs') != -1):
arg_list = ''
for interval in test_params_dict['chromosome_intervals']:
arg_list += ' -L '+interval
arg_list += ' -w ' + ws_dir +' --use_samples_in_order ' + ' --batchsize=2 '
arg_list += ' --vidmap-output '+ tmpdir + os.path.sep + 'vid.json'
arg_list += ' --callset-output '+ tmpdir + os.path.sep + 'callsets.json'
if('generate_array_name_from_partition_bounds' not in test_params_dict or
not test_params_dict['generate_array_name_from_partition_bounds']):
arg_list += ' -A ' + test_name
with open(test_params_dict['callset_mapping_file'], 'rb') as cs_fptr:
callset_mapping_dict = json.load(cs_fptr, object_pairs_hook=OrderedDict)
for callset_name, callset_info in callset_mapping_dict['callsets'].iteritems():
arg_list += ' '+callset_info['filename'];
cs_fptr.close();
import_cmd = 'java -ea TestGenomicsDBImporterWithMergedVCFHeader --size_per_column_partition 16384 ' \
'--segment_size 10485760'+arg_list
else:
import_cmd = exe_path+os.path.sep+'vcf2tiledb '+loader_json_filename
pid = subprocess.Popen(import_cmd, shell=True, stdout=subprocess.PIPE);
stdout_string = pid.communicate()[0]
if(pid.returncode != 0):
sys.stderr.write('Loader test: '+test_name+' failed\n');
sys.stderr.write(import_cmd+'\n')
cleanup_and_exit(tmpdir, -1);
md5sum_hash_str = str(hashlib.md5(stdout_string).hexdigest())
if('golden_output' in test_params_dict):
golden_stdout, golden_md5sum = get_file_content_and_md5sum(test_params_dict['golden_output']);
if(golden_md5sum != md5sum_hash_str):
sys.stderr.write('Loader stdout mismatch for test: '+test_name+'\n');
print_diff(golden_stdout, stdout_string);
cleanup_and_exit(tmpdir, -1);
if('query_params' in test_params_dict):
for query_param_dict in test_params_dict['query_params']:
test_query_dict = create_query_json(ws_dir, test_name, query_param_dict)
if(test_name.find('java_genomicsdb_importer_from_vcfs') != -1 and
'generate_array_name_from_partition_bounds' in test_params_dict
and test_params_dict['generate_array_name_from_partition_bounds']):
if('array' in test_query_dict):
del test_query_dict['array']
if('array_name' in test_query_dict):
del test_query_dict['array_name']
if('query_column_ranges' in test_query_dict):
del test_query_dict['query_column_ranges']
test_query_dict['scan_full'] = True
query_types_list = [
('calls','--print-calls'),
('variants',''),
('vcf','--produce-Broad-GVCF -p 128'),
('batched_vcf','--produce-Broad-GVCF -p 128'),
('java_vcf', ''),
('consolidate_and_vcf', '--produce-Broad-GVCF'), #keep as the last query test
]
for query_type,cmd_line_param in query_types_list:
if('golden_output' in query_param_dict and query_type in query_param_dict['golden_output']):
if((query_type == 'vcf' or query_type == 'batched_vcf' or query_type.find('java_vcf') != -1)
and 'force_override' not in query_param_dict):
test_query_dict['attributes'] = vcf_attributes_order;
if(query_type.find('java_vcf') != -1 and 'pass_through_query_json' not in query_param_dict):
modify_query_column_ranges_for_PB(test_query_dict)
query_json_filename = tmpdir+os.path.sep+test_name+'_'+query_type+'.json'
with open(query_json_filename, 'wb') as fptr:
json.dump(test_query_dict, fptr, indent=4, separators=(',', ': '));
fptr.close();
if(query_type == 'java_vcf'):
loader_argument = loader_json_filename;
misc_args = ''
if("query_without_loader" in query_param_dict and query_param_dict["query_without_loader"]):
loader_argument = '""'
if("pass_through_query_json" in query_param_dict and query_param_dict["pass_through_query_json"]):
misc_args = "--pass_through_query_json"
if('query_contig_interval' in query_param_dict):
query_contig_interval_dict = query_param_dict['query_contig_interval']
misc_args += ('--chromosome '+query_contig_interval_dict['contig'] \
+ ' --begin %d --end %d')%(query_contig_interval_dict['begin'],
query_contig_interval_dict['end'])
query_command = 'java -ea TestGenomicsDB --query -l '+loader_argument+' '+query_json_filename \
+ ' ' + misc_args;
pid = subprocess.Popen(query_command, shell=True, stdout=subprocess.PIPE);
else:
if(query_type == 'consolidate_and_vcf'):
retcode = subprocess.call(exe_path+os.path.sep+'consolidate_tiledb_array '+ws_dir+' '+test_name,
shell=True)
if(retcode != 0):
sys.stderr.write('TileDB array consolidation failed '+ws_dir+' '+test_name+'\n');
cleanup_and_exit(tmpdir, -1);
loader_argument = ' -l '+loader_json_filename;
if("query_without_loader" in query_param_dict and query_param_dict["query_without_loader"]):
loader_argument = ''
query_command = (exe_path+os.path.sep+'gt_mpi_gather -s %d'+loader_argument
+ ' -j '
+query_json_filename+' '+cmd_line_param)%(test_query_dict['segment_size']);
pid = subprocess.Popen(query_command, shell=True, stdout=subprocess.PIPE);
stdout_string = pid.communicate()[0]
if(pid.returncode != 0):
sys.stderr.write('Command '+query_command+'\n')
sys.stderr.write('Query test: '+test_name+'-'+query_type+' failed\n');
cleanup_and_exit(tmpdir, -1);
md5sum_hash_str = str(hashlib.md5(stdout_string).hexdigest())
golden_stdout, golden_md5sum = get_file_content_and_md5sum(query_param_dict['golden_output'][query_type]);
if(golden_md5sum != md5sum_hash_str):
is_error = True;
#do JSON diff for variant and call format print
json_diff_result = None
if(query_type in set(['calls', 'variants'])):
try:
golden_stdout_dict = json.loads(golden_stdout);
test_stdout_dict = json.loads(stdout_string);
json_diff_result = jsondiff.diff(golden_stdout_dict, test_stdout_dict);
if(len(json_diff_result) == 0):
is_error = False;
except:
json_diff_result = None;
is_error = True
if(is_error):
sys.stderr.write('Mismatch in query test: '+test_name+'-'+query_type+'\n');
print_diff(golden_stdout, stdout_string);
if(json_diff_result):
print(json.dumps(json_diff_result, indent=4, separators=(',', ': ')));
cleanup_and_exit(tmpdir, -1);
shutil.rmtree(ws_dir, ignore_errors=True)
coverage_file='coverage.info'
subprocess.call('lcov --directory '+gcda_prefix_dir+' --capture --output-file '+coverage_file, shell=True);
#Remove protocol buffer generated files from the coverage information
subprocess.call("lcov --remove "+coverage_file+" '/opt*' '/usr*' 'dependencies*' '*.pb.h' '*.pb.cc' -o "+coverage_file, shell=True);
cleanup_and_exit(tmpdir, 0);
if __name__ == '__main__':
main()
| 54,278 | 52.266928 | 250 |
py
|
GenomicsDB
|
GenomicsDB-master/tests/run_spark_hdfs.py
|
#!/usr/bin/env python
#The MIT License (MIT)
#Copyright (c) 2018 University of California, Los Angeles and Intel Corporation
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import json
import tempfile
import subprocess
import hashlib
import os
import sys
import shutil
import difflib
from collections import OrderedDict
query_json_template_string="""
{
"workspace" : "",
"array" : "",
"vcf_header_filename" : ["inputs/template_vcf_header.vcf"],
"query_column_ranges" : [ [ [0, 10000000000 ] ] ],
"query_row_ranges" : [ [ [0, 3 ] ] ],
"query_block_size" : 10000,
"query_block_size_margin" : 500,
"reference_genome" : "inputs/chr1_10MB.fasta.gz",
"query_attributes" : [ "REF", "ALT", "BaseQRankSum", "MQ", "RAW_MQ", "MQ0", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "DP", "GT", "GQ", "SB", "AD", "PL", "DP_FORMAT", "MIN_DP", "PID", "PGT" ]
}"""
vcf_query_attributes_order = [ "END", "REF", "ALT", "BaseQRankSum", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "MQ", "RAW_MQ", "MQ0", "DP", "GT", "GQ", "SB", "AD", "PL", "PGT", "PID", "MIN_DP", "DP_FORMAT", "FILTER" ];
query_attributes_with_DS_ID = [ "REF", "ALT", "BaseQRankSum", "MQ", "RAW_MQ", "MQ0", "ClippingRankSum", "MQRankSum", "ReadPosRankSum", "DP", "GT", "GQ", "SB", "AD", "PL", "DP_FORMAT", "MIN_DP", "PID", "PGT", "DS", "ID" ];
query_attributes_with_PL_only = [ "PL" ]
query_attributes_with_MLEAC_only = [ "MLEAC" ]
default_segment_size = 40
def create_query_json(ws_dir, test_name, query_param_dict, test_dir):
test_dict=json.loads(query_json_template_string);
test_dict["workspace"] = ws_dir
test_dict["array"] = test_name
test_dict["query_column_ranges"] = [ [ query_param_dict["query_column_ranges"] ] ]
if("vid_mapping_file" in query_param_dict):
test_dict["vid_mapping_file"] = query_param_dict["vid_mapping_file"];
if("callset_mapping_file" in query_param_dict):
test_dict["callset_mapping_file"] = query_param_dict["callset_mapping_file"];
if("query_attributes" in query_param_dict):
test_dict["query_attributes"] = query_param_dict["query_attributes"];
if('segment_size' in query_param_dict):
test_dict['segment_size'] = query_param_dict['segment_size'];
else:
test_dict['segment_size'] = default_segment_size;
if('produce_GT_field' in query_param_dict):
test_dict['produce_GT_field'] = query_param_dict['produce_GT_field'];
if('produce_FILTER_field' in query_param_dict):
test_dict['produce_FILTER_field'] = query_param_dict['produce_FILTER_field'];
if('query_block_size' in query_param_dict):
test_dict['query_block_size'] = query_param_dict['query_block_size'];
if('query_block_size_margin' in query_param_dict):
test_dict['query_block_size_margin'] = query_param_dict['query_block_size_margin'];
if('vid_mapping_file' in test_dict):
test_dict['vid_mapping_file'] = test_dir+os.path.sep+test_dict['vid_mapping_file'];
if('callset_mapping_file' in test_dict):
test_dict['callset_mapping_file'] = test_dir+os.path.sep+test_dict['callset_mapping_file'];
if('vcf_header_filename' in test_dict):
for i,val in enumerate(test_dict['vcf_header_filename']):
test_dict['vcf_header_filename'][i] = test_dir+os.path.sep+val;
if('reference_genome' in test_dict):
test_dict['reference_genome'] = test_dir+os.path.sep+test_dict['reference_genome'];
return test_dict;
loader_json_template_string="""
{
"row_based_partitioning" : false,
"column_partitions" : [
{"begin": 0, "workspace":"", "array": "" }
],
"callset_mapping_file" : "",
"vid_mapping_file" : "inputs/vid.json",
"size_per_column_partition": 700 ,
"treat_deletions_as_intervals" : true,
"vcf_header_filename": "inputs/template_vcf_header.vcf",
"reference_genome" : "inputs/chr1_10MB.fasta.gz",
"num_parallel_vcf_files" : 1,
"do_ping_pong_buffering" : false,
"offload_vcf_output_processing" : false,
"discard_vcf_index": true,
"produce_combined_vcf": true,
"produce_tiledb_array" : true,
"delete_and_create_tiledb_array" : true,
"compress_tiledb_array" : true,
"segment_size" : 1048576,
"num_cells_per_tile" : 3
}""";
def create_loader_json(ws_dir, test_name, test_params_dict, col_part, test_dir):
test_dict=json.loads(loader_json_template_string);
test_dict['column_partitions'] = col_part;
for col_part in test_dict['column_partitions']:
col_part["workspace"] = ws_dir;
col_part["array"] = test_name+col_part["array"];
test_dict["callset_mapping_file"] = test_params_dict['callset_mapping_file'];
if('vid_mapping_file' in test_params_dict):
test_dict['vid_mapping_file'] = test_params_dict['vid_mapping_file'];
if('size_per_column_partition' in test_params_dict):
test_dict['size_per_column_partition'] = test_params_dict['size_per_column_partition'];
if('segment_size' in test_params_dict):
test_dict['segment_size'] = test_params_dict['segment_size'];
else:
test_dict['segment_size'] = default_segment_size;
test_dict['vid_mapping_file'] = test_dir+os.path.sep+test_dict['vid_mapping_file'];
test_dict['callset_mapping_file'] = test_dir+os.path.sep+test_dict['callset_mapping_file'];
test_dict['vcf_header_filename'] = test_dir+os.path.sep+test_dict['vcf_header_filename'];
test_dict['reference_genome'] = test_dir+os.path.sep+test_dict['reference_genome'];
return test_dict;
def add_hdfs_to_loader_json(test_dict, namenode):
for col_part in test_dict['column_partitions']:
col_part['workspace'] = namenode+col_part['workspace'];
return test_dict;
def move_arrays_to_hdfs(ws_dir, namenode):
# pid = subprocess.Popen('hadoop fs -rm -r '+namenode+ws_dir+'/*', shell=True, stdout=subprocess.PIPE);
# stdout_string = pid.communicate()[0]
# if(pid.returncode != 0):
# sys.stderr.write('Error deleting arrays from workspace in HDFS:'namenode+ws_dir+'\n');
# sys.exit(-1);
pid = subprocess.Popen('hadoop fs -put '+ws_dir+'/* '+namenode+ws_dir, shell=True, stdout=subprocess.PIPE);
stdout_string = pid.communicate()[0]
if(pid.returncode != 0):
sys.stderr.write('Error copying array to HDFS workspace:'+namenode+ws_dir+'\n');
sys.exit(-1);
def get_file_content_and_md5sum(filename):
with open(filename, 'rb') as fptr:
data = fptr.read();
data_list = data.splitlines(True);
data_list_filter = [k for k in data_list if not k.startswith('##')];
data_filter = "".join(data_list_filter);
md5sum_hash_str = str(hashlib.md5(data_filter).hexdigest());
fptr.close();
return (data_filter, md5sum_hash_str);
def print_diff(golden_output, test_output):
print("=======Golden output:=======");
print(golden_output);
print("=======Test output:=======");
print(test_output);
print("=======END=======");
def cleanup_and_exit(namenode, tmpdir, exit_code):
if(exit_code == 0):
shutil.rmtree(tmpdir, ignore_errors=True)
if("://" in namenode):
pid = subprocess.Popen('hadoop fs -rm -r '+namenode+tmpdir, shell=True, stdout=subprocess.PIPE);
pid = subprocess.Popen('hadoop fs -rm -r '+namenode+'/home/hadoop/.tiledb/', shell=True, stdout=subprocess.PIPE);
sys.exit(exit_code);
def main():
if(len(sys.argv) < 8):
sys.stderr.write('Needs 7 arguments <build_dir> <install_dir> <spark_master> <hdfs_namenode> <spark_deploy> <genomicsdb_version> <test_dir>\n');
sys.exit(-1);
exe_path = sys.argv[2]+os.path.sep+'bin';
spark_master = sys.argv[3];
namenode = sys.argv[4];
jar_dir = sys.argv[1]+os.path.sep+'target';
spark_deploy = sys.argv[5];
genomicsdb_version = sys.argv[6];
test_dir = sys.argv[7];
#Switch to tests directory
parent_dir=os.path.dirname(os.path.realpath(__file__))
os.chdir(parent_dir)
hostfile_path=parent_dir+os.path.sep+'hostfile';
template_vcf_header_path=parent_dir+os.path.sep+'inputs'+os.path.sep+'template_vcf_header.vcf';
tmpdir = tempfile.mkdtemp()
ws_dir=tmpdir+os.path.sep+'ws';
loader_tests = [
{ "name" : "t0_1_2", 'golden_output' : 'golden_outputs/t0_1_2_loading',
'callset_mapping_file': 'inputs/callsets/t0_1_2.json',
"column_partitions": [
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test0"} ],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test1"},
{"begin": 10000, "workspace":"/tmp/ws", "array": "test2"}
],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test3"},
{"begin": 3000, "workspace":"/tmp/ws", "array": "test4"},
{"begin": 6000, "workspace":"/tmp/ws", "array": "test5"},
{"begin": 9000, "workspace":"/tmp/ws", "array": "test6"},
{"begin": 12000, "workspace":"/tmp/ws", "array": "test7"}
]
],
"query_params": [
{ "query_column_ranges" : [12100, 12200], "golden_output": {
"spark" : "golden_outputs/spark_t0_1_2_vcf_at_12100",
} },
{ "query_column_ranges" : [0, 100000], "golden_output": {
"spark" : "golden_outputs/spark_t0_1_2_vcf_at_0",
} },
{ "query_column_ranges" : [12150, 100000], "golden_output": {
"spark" : "golden_outputs/spark_t0_1_2_vcf_at_12150",
} },
]
},
{ "name" : "t0_overlapping", 'golden_output': 'golden_outputs/t0_overlapping',
'callset_mapping_file': 'inputs/callsets/t0_overlapping.json',
"column_partitions": [
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test0"} ],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test1"},
{"begin": 10000, "workspace":"/tmp/ws", "array": "test2"}
],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test3"},
{"begin": 3000, "workspace":"/tmp/ws", "array": "test4"},
{"begin": 6000, "workspace":"/tmp/ws", "array": "test5"},
{"begin": 9000, "workspace":"/tmp/ws", "array": "test6"},
{"begin": 12000, "workspace":"/tmp/ws", "array": "test7"}
]
],
"query_params": [
{ "query_column_ranges" : [12202, 100000], "golden_output": {
"spark" : "golden_outputs/spark_t0_overlapping_at_12202",
}
}
]
},
{ "name" : "t6_7_8", 'golden_output' : 'golden_outputs/t6_7_8_loading',
'callset_mapping_file': 'inputs/callsets/t6_7_8.json',
"column_partitions": [
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test0"} ],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test1"},
{"begin": 500000, "workspace":"/tmp/ws", "array": "test2"},
{"begin": 1000000, "workspace":"/tmp/ws", "array": "test3"}
],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test4"},
{"begin": 250000, "workspace":"/tmp/ws", "array": "test5"},
{"begin": 500000, "workspace":"/tmp/ws", "array": "test6"},
{"begin": 750000, "workspace":"/tmp/ws", "array": "test7"},
{"begin": 1000000, "workspace":"/tmp/ws", "array": "test8"}
]
],
"query_params": [
{ "query_column_ranges" : [0, 10000000], "golden_output": {
"spark": "golden_outputs/spark_t6_7_8_vcf_at_0",
},
"query_block_size" : 1000000, "query_block_size_margin": 50000 },
{ "query_column_ranges" : [8029500, 10000000], "golden_output": {
"spark": "golden_outputs/spark_t6_7_8_vcf_at_8029500",
},
"query_block_size" : 100000, "query_block_size_margin": 5000 },
{ "query_column_ranges" : [8029500, 8029500], "golden_output": {
"spark" : "golden_outputs/spark_t6_7_8_vcf_at_8029500-8029500",
} }
]
},
{ "name" : "t0_1_2_combined", 'golden_output' : 'golden_outputs/t0_1_2_combined',
'callset_mapping_file': 'inputs/callsets/t0_1_2_combined.json',
"column_partitions": [
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test0"} ],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test1"},
{"begin": 10000, "workspace":"/tmp/ws", "array": "test2"}
],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test3"},
{"begin": 3000, "workspace":"/tmp/ws", "array": "test4"},
{"begin": 6000, "workspace":"/tmp/ws", "array": "test5"},
{"begin": 9000, "workspace":"/tmp/ws", "array": "test6"},
{"begin": 12000, "workspace":"/tmp/ws", "array": "test7"}
]
],
"query_params": [
{ "query_column_ranges" : [0, 1000000], "golden_output": {
"spark": "golden_outputs/spark_t0_1_2_combined",
},
"query_block_size" : 100000, "query_block_size_margin": 5000 },
]
},
{ "name" : "t0_haploid_triploid_1_2_3_triploid_deletion",
'golden_output' : 'golden_outputs/t0_haploid_triploid_1_2_3_triploid_deletion_loading',
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'size_per_column_partition': 1200,
'segment_size': 100,
"column_partitions": [
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test0"} ],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test1"},
{"begin": 10000, "workspace":"/tmp/ws", "array": "test2"}
],
[ {"begin": 0, "workspace":"/tmp/ws", "array": "test3"},
{"begin": 3000, "workspace":"/tmp/ws", "array": "test4"},
{"begin": 6000, "workspace":"/tmp/ws", "array": "test5"},
{"begin": 9000, "workspace":"/tmp/ws", "array": "test6"},
{"begin": 12000, "workspace":"/tmp/ws", "array": "test7"}
]
],
"query_params": [
{ "query_column_ranges" : [0, 1000000],
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'segment_size': 100,
"golden_output": {
"spark" : "golden_outputs/spark_t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf",
},
"query_block_size" : 100000, "query_block_size_margin": 5000 },
{ "query_column_ranges" : [0, 1000000],
'callset_mapping_file': 'inputs/callsets/t0_haploid_triploid_1_2_3_triploid_deletion.json',
"vid_mapping_file": "inputs/vid_DS_ID_phased_GT.json",
'produce_GT_field': True,
'segment_size': 100,
"golden_output": {
"spark" : "golden_outputs/spark_t0_haploid_triploid_1_2_3_triploid_deletion_java_vcf_produce_GT",
},
"query_block_size" : 100000, "query_block_size_margin": 5000 }
]
},
];
if("://" in namenode):
pid = subprocess.Popen('hadoop fs -mkdir -p '+namenode+'/home/hadoop/.tiledb/', shell=True, stdout=subprocess.PIPE);
stdout_string = pid.communicate()[0]
if(pid.returncode != 0):
sys.stderr.write('Error creating hdfs:///home/hadoop/.tiledb/');
sys.exit(-1);
for test_params_dict in loader_tests:
test_name = test_params_dict['name']
for col_part in test_params_dict['column_partitions']:
test_loader_dict = create_loader_json(ws_dir, test_name, test_params_dict, col_part, test_dir);
if(test_name == "t0_1_2"):
test_loader_dict["compress_tiledb_array"] = True;
if("://" in namenode):
test_loader_dict = add_hdfs_to_loader_json(test_loader_dict, namenode);
loader_json_filename = tmpdir+os.path.sep+test_name+'-loader.json'
with open(loader_json_filename, 'wb') as fptr:
json.dump(test_loader_dict, fptr, indent=4, separators=(',', ': '));
fptr.close();
# invoke vcf2tiledb -r <rank> where <rank> goes from 0 to num partitions
# otherwise this only loads the first partition
for i in range(0, len(col_part)):
etl_cmd=exe_path+os.path.sep+'vcf2tiledb -r '+str(i)+' '+loader_json_filename
pid = subprocess.Popen(etl_cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE);
stdout_string, stderr_string = pid.communicate()
if(pid.returncode != 0):
sys.stderr.write('Loading failed for test: '+test_name+' rank '+str(i)+'\n');
sys.stderr.write('Loading command: '+etl_cmd+'\n');
sys.stderr.write('Loader file :'+str(test_loader_dict)+'\n');
sys.stderr.write('Loading stdout: '+stdout_string+'\n');
sys.stderr.write('Loading stderr: '+stderr_string+'\n');
cleanup_and_exit(namenode, tmpdir, -1);
with open(loader_json_filename, 'wb') as fptr:
json.dump(test_loader_dict, fptr, indent=4, separators=(',', ': '));
fptr.close();
for query_param_dict in test_params_dict['query_params']:
if("://" in namenode):
test_query_dict = create_query_json(namenode+ws_dir, test_name, query_param_dict, test_dir)
else:
test_query_dict = create_query_json(ws_dir, test_name, query_param_dict, test_dir)
test_query_dict['query_attributes'] = vcf_query_attributes_order;
query_json_filename = tmpdir+os.path.sep+test_name+'-query.json'
with open(query_json_filename, 'wb') as fptr:
json.dump(test_query_dict, fptr, indent=4, separators=(',', ': '));
fptr.close();
loader_argument = loader_json_filename;
spark_cmd = 'spark-submit --class TestGenomicsDBSparkHDFS --master '+spark_master+' --deploy-mode '+spark_deploy+' --total-executor-cores 1 --executor-memory 512M --conf "spark.yarn.executor.memoryOverhead=3700" --jars '+jar_dir+'/genomicsdb-'+genomicsdb_version+'-jar-with-dependencies.jar '+jar_dir+'/genomicsdb-'+genomicsdb_version+'-examples.jar --loader '+loader_json_filename+' --query '+query_json_filename+' --hostfile '+hostfile_path+' --template_vcf_header '+template_vcf_header_path+' --spark_master '+spark_master+' --jar_dir '+jar_dir;
pid = subprocess.Popen(spark_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE);
stdout_string, stderr_string = pid.communicate()
if(pid.returncode != 0):
sys.stderr.write('Query test: '+test_name+' with query file '+query_json_filename+' failed\n');
sys.stderr.write('Spark command was: '+spark_cmd+'\n');
sys.stderr.write('Spark stdout was: '+stdout_string+'\n');
sys.stderr.write('Spark stderr was: '+stderr_string+'\n');
cleanup_and_exit(namenode, tmpdir, -1);
stdout_list = stdout_string.splitlines(True);
stdout_list_filter = [k for k in stdout_list if not k.startswith('##')];
stdout_filter = "".join(stdout_list_filter);
md5sum_hash_str = str(hashlib.md5(stdout_filter).hexdigest())
if('golden_output' in query_param_dict and 'spark' in query_param_dict['golden_output']):
golden_stdout, golden_md5sum = get_file_content_and_md5sum(query_param_dict['golden_output']['spark']);
if(golden_md5sum != md5sum_hash_str):
sys.stdout.write('Mismatch in query test: '+test_name+' with column ranges: '+str(query_param_dict['query_column_ranges'])+' and loaded with '+str(len(col_part))+' partitions\n');
print_diff(golden_stdout, stdout_filter);
sys.stderr.write('Spark stdout was: '+stdout_string+'\n');
sys.stderr.write('Spark stderr was: '+stderr_string+'\n');
cleanup_and_exit(namenode, tmpdir, -1);
else:
sys.stdout.write('Query test: '+test_name+' with column ranges: '+str(query_param_dict['query_column_ranges'])+' and loaded with '+str(len(col_part))+' partitions passed\n');
cleanup_and_exit(namenode, tmpdir, 0);
if __name__ == '__main__':
main()
| 23,088 | 56.15099 | 564 |
py
|
GenomicsDB
|
GenomicsDB-master/docker/vcf_combiner/usr/bin/combine_vcf.py
|
#! /usr/bin/python3
# pylint: disable=missing-docstring, invalid-name, broad-except, too-many-branches, too-many-locals, line-too-long
"""
The MIT License (MIT)
Copyright (c) 2016-2017 Intel Corporation
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import os
import os.path
from datetime import datetime
from subprocess import Popen
import json
from getopt import getopt
import logging
import uuid
from collections import namedtuple
from collections import OrderedDict
import types
import vcf
Version = '0.6'
ProductName = os.path.basename(__file__)
COL_PARTITION_SIZE_UNIT = 16384
DefaultVIDFile = "/usr/share/cont-intel/vid.json"
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
##### loader config file tags
loader_cfg_t = {
"produce_combined_vcf": True,
"num_parallel_vcf_files": 1,
"callset_mapping_file" : "",
"vcf_output_format" : "z",
"vcf_output_filename" : "",
"index_output_VCF": True,
"reference_genome" : "",
"column_partitions" : [],
"do_ping_pong_buffering" : True,
"offload_vcf_output_processing" : True,
"produce_GT_field" : False,
"size_per_column_partition" : COL_PARTITION_SIZE_UNIT,
"vid_mapping_file": ''
}
cp_pos = namedtuple('pos_only', "begin, vcf_output_filename")
cp_chr = namedtuple('chromosome', 'begin, end, vcf_output_filename')
##### loader config file tags
def get_loader_cfg(**kwargs):
''' named tuple loader config '''
f = lambda x: x() if isinstance(x, types.FunctionType) else x
updated = {k : f(kwargs[k]) if k in kwargs else v for k, v in loader_cfg_t.items()}
np_LoaderCfg = namedtuple('loader_cfg', ','.join(updated.keys()))
return np_LoaderCfg(**updated)
def get_col_partition(output_fn, begin, chromosome=None, end=None):
cp_nt = cp_pos(begin=begin, vcf_output_filename=output_fn) if not chromosome else \
cp_chr(begin={chromosome:begin}, end={chromosome:end}, vcf_output_filename=output_fn)
return cp_nt._asdict()
class CombineVCFException(Exception):
pass
class CombineVCF(object):
''' VCF file combiner '''
logger = logging.getLogger("CombineVCF")
brief_options = "i:o:R:c:p"
full_options = ['samples=', 'output=', 'reference=', 'callsets=', 'vid_mapping_file=', \
'produce_GT_field', 'chromosome=', 'begin=', 'end=', 'dryrun']
def __init__(self):
self.dryrun = False
self.output_file = None
self.vid_mapping_file = DefaultVIDFile
def _parse_args(self, args):
def check_chromosome():
assert begin, 'No begin position is given'
if end:
assert end >= begin, 'End position must be greater or equal to begin position'
return get_col_partition(self.output_file, begin, chromosome, end if end else None)
myopts, _ = getopt(args, self.brief_options, self.full_options)
callset_mapping_file = None
produce_GT_field = False
chromosome = None
begin = None
end = None
vid_mapping_file = DefaultVIDFile
for opt, user_input in myopts:
if opt == '-p' or opt == '--produce_GT_field':
produce_GT_field = True
elif opt == '--dryrun':
self.dryrun = True
else:
assert user_input, 'specify a value for option %s' % opt
if opt == '-i' or opt == '--samples':
file_list = user_input.split(',')
vcf_inputfiles = self.__get_inputs(file_list)
elif opt == '-o' or opt == '--output':
self.output_file = self.__check_output(user_input)
elif opt == '-R' or opt == '--reference':
assert os.path.isfile(user_input) or os.path.islink(user_input), "specify a valid reference file name"
reference_genome = user_input
elif opt == '-c' or opt == '--callsets':
assert os.path.isfile(user_input), "specify a valid callset file name"
callset_mapping_file = user_input
num_part_units = self.__check_callset(callset_mapping_file)
elif opt == '--vid_mapping_file':
assert os.path.isfile(user_input), "specify a valid vid mapping file"
vid_mapping_file = user_input
elif opt == '--chromosome':
chromosome = user_input
elif opt == '--begin':
begin = int(user_input)
elif opt == '--end':
end = int(user_input)
else:
print("WARN: unknown option %s, ignored", opt)
if not callset_mapping_file:
callset_mapping_file = "callsets_%s.json" % datetime.now().strftime("%y%m%d%H%M")
num_part_units = self.__generate_callsets_json(vcf_inputfiles, callset_mapping_file)
assert reference_genome, "missing reference file"
assert self.output_file, "missing output file"
assert num_part_units != 0, "No valid callset/sample found in input files"
col_par_setting = check_chromosome() if chromosome \
else get_col_partition(self.output_file, begin if begin else 0)
loader_cfg = get_loader_cfg()
loader_cfg = loader_cfg._replace(reference_genome=reference_genome, \
vcf_output_filename=self.output_file, \
column_partitions=[col_par_setting], \
size_per_column_partition=int(abs(num_part_units)) * COL_PARTITION_SIZE_UNIT, \
callset_mapping_file=callset_mapping_file, \
vid_mapping_file=vid_mapping_file, \
produce_GT_field=True if produce_GT_field else False)
return loader_cfg
@staticmethod
def __check_output(user_input):
if not os.path.exists(os.path.dirname(user_input)):
os.makedirs(os.path.dirname(user_input))
else:
assert not os.path.isdir(user_input), "specify a valid output file name"
return user_input
@staticmethod
def __check_callset(callset_mapping_file):
cs_cfg = json.load(callset_mapping_file)
return 0 - len(cs_cfg["callsets"])
def __generate_callsets_json(self, vcf_inputfiles, json_fname):
global_callset_idx = 0
callsets_dict = OrderedDict()
for vcf_file in vcf_inputfiles:
read_mode = 'r' if vcf_file[-3:] == 'vcf' else 'rb'
with open(vcf_file, read_mode) as fd:
vcf_reader = vcf.Reader(fd)
local_callset_idx = 0
for callset_name in vcf_reader.samples:
curr_callset_info = OrderedDict()
if callset_name in callsets_dict:
ss = str(uuid.uuid4())
self.logger.warning('Duplicate callset name %s: appending _%s', callset_name, ss)
callset_name += ('_' + ss)
curr_callset_info["row_idx"] = global_callset_idx
curr_callset_info["idx_in_file"] = local_callset_idx
curr_callset_info["filename"] = vcf_file
callsets_dict[callset_name] = curr_callset_info
local_callset_idx += 1
global_callset_idx += 1
with open(json_fname, 'w') as ofd:
json.dump({'callsets' : callsets_dict}, ofd, indent=4, separators=(',', ': '))
return global_callset_idx
@staticmethod
def __is_vcf_file_list(fn):
if fn[-3:] == '.gz' or fn[-4:] == 'vcf':
return False
with open(fn, 'r') as fd:
for line in fd:
line = line.strip()
if line:
if fn[-3:] == '.gz' or fn[-4:] == 'vcf' or os.path.isfile(line):
return True
return False
def __get_inputs(self, inputfiles):
if len(inputfiles) == 1 and self.__is_vcf_file_list(inputfiles[0]):
with open(inputfiles[0], 'r') as fd:
inputs = [line.strip() for line in fd if os.path.isfile(line.strip())]
else:
inputs = [line.strip() for line in inputfiles if os.path.isfile(line)]
return inputs if inputs else RuntimeError("No valid samples input files found")
def generate_loader_config(self, nt_loader):
json_fname = os.path.join(os.path.dirname(self.output_file), \
"loader_config_%s.json" % datetime.now().strftime("%y%m%d%H%M"))
with open(json_fname, 'w') as ofd:
json.dump(nt_loader._asdict(), ofd)
return json_fname
@staticmethod
def combine(loader_config):
the_exec_cmd = ['vcf2tiledb', loader_config]
pexec = Popen(the_exec_cmd, shell=False)
pexec.communicate()
return pexec.wait()
def run(self):
err = None
try:
nt_loader_cfg = self._parse_args(sys.argv[1:])
loader_cfg = self.generate_loader_config(nt_loader_cfg)
if self.dryrun:
self.logger.info("Done dryrun: generated loader_cfg at " + loader_cfg)
else:
ret_code = self.combine(loader_cfg)
if ret_code != 0:
raise CombineVCFException("ERROR: failed to combine vcf files. Error code = " + str(ret_code))
elif os.path.isfile(self.output_file):
self.logger.info("Done - combined vcf sample files into a single vcf file " + self.output_file)
else:
raise CombineVCFException("Internal error. Could not produce combined VCF file from vcf sample files")
except CombineVCFException as myex:
self.logger.exception(myex)
raise
except Exception as ex:
self.logger.exception(ex)
err = sys.exc_info()[1]
finally:
if err:
raise CombineVCFException("Failed to combining VCF files: %s" % (err))
@staticmethod
def get_my_name():
return ProductName
def test_code_runs_after_pylint():
''' a quick test for github check-in '''
print("Run a quick test ...")
x = CombineVCF().get_my_name()
assert x
assert x == ProductName
if __name__ == "__main__":
combiner = CombineVCF()
if len(sys.argv) > 1:
combiner.run()
else:
combiner.get_my_name()
| 11,402 | 41.077491 | 122 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/args_fusion.py
|
class args():
# training args
epochs = 2 #"number of training epochs, default is 2"
batch_size = 4 #"batch size for training, default is 4"
dataset_ir = "path of KAIST infrared images"
dataset_vi = "path of KAIST visible images"
HEIGHT = 256
WIDTH = 256
save_fusion_model = "models/train/fusionnet/"
save_loss_dir = './models/train/loss_fusionnet/'
# save_fusion_model_noshort = "models/train/fusionnet_noshort/"
# save_loss_dir_noshort = './models/train/loss_fusionnet_noshort/'
#
# save_fusion_model_onestage = "models/train/fusionnet_onestage/"
# save_loss_dir_onestage = './models/train/loss_fusionnet_onestage/'
image_size = 256 #"size of training images, default is 256 X 256"
cuda = 1 #"set it to 1 for running on GPU, 0 for CPU"
seed = 42 #"random seed for training"
lr = 1e-4 #"learning rate, default is 0.001"
log_interval = 10 #"number of images after which the training loss is logged, default is 500"
resume_fusion_model = None
# nest net model
resume_nestfuse = './models/nestfuse/nestfuse_gray_1e2.model'
# resume_nestfuse = None
# fusion net(RFN) model
# fusion_model = "./models/fusionnet/3_Final_epoch_4_resConv_1e4ssimVI_feaAdd0123_05vi_35ir.model"
# fusion_model = "./models/fusionnet/3_Final_epoch_4_resConv_1e4ssimVI_feaAdd0123_05vi_35ir_nodense_in_decoder.model"
fusion_model = './models/rfn_twostage/'
| 1,363 | 33.974359 | 118 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/utils.py
|
import os
import random
import numpy as np
import torch
from args_fusion import args
from scipy.misc import imread, imsave, imresize
import matplotlib as mpl
from os import listdir
from os.path import join
EPSILON = 1e-5
def list_images(directory):
images = []
names = []
dir = listdir(directory)
dir.sort()
for file in dir:
name = file
if name.endswith('.png'):
images.append(join(directory, file))
elif name.endswith('.jpg'):
images.append(join(directory, file))
elif name.endswith('.jpeg'):
images.append(join(directory, file))
elif name.endswith('.bmp'):
images.append(join(directory, file))
elif name.endswith('.tif'):
images.append(join(directory, file))
# name1 = name.split('.')
names.append(name)
return images, names
# load training images
def load_dataset(image_path, BATCH_SIZE, num_imgs=None):
if num_imgs is None:
num_imgs = len(image_path)
original_imgs_path = image_path[:num_imgs]
# random
random.shuffle(original_imgs_path)
mod = num_imgs % BATCH_SIZE
print('BATCH SIZE %d.' % BATCH_SIZE)
print('Train images number %d.' % num_imgs)
print('Train images samples %s.' % str(num_imgs / BATCH_SIZE))
if mod > 0:
print('Train set has been trimmed %d samples...\n' % mod)
original_imgs_path = original_imgs_path[:-mod]
batches = int(len(original_imgs_path) // BATCH_SIZE)
return original_imgs_path, batches
def get_image(path, height=256, width=256, flag=False):
if flag is True:
image = imread(path, mode='RGB')
else:
image = imread(path, mode='L')
if height is not None and width is not None:
image = imresize(image, [height, width], interp='nearest')
return image
# load images - test phase
def get_test_image(paths, height=None, width=None, flag=False):
if isinstance(paths, str):
paths = [paths]
images = []
for path in paths:
if flag is True:
image = imread(path, mode='RGB')
else:
image = imread(path, mode='L')
# get saliency part
if height is not None and width is not None:
image = imresize(image, [height, width], interp='nearest')
base_size = 512
h = image.shape[0]
w = image.shape[1]
c = 1
if h > base_size or w > base_size:
c = 4
if flag is True:
image = np.transpose(image, (2, 0, 1))
else:
image = np.reshape(image, [1, h, w])
images = get_img_parts(image, h, w)
else:
if flag is True:
image = np.transpose(image, (2, 0, 1))
else:
image = np.reshape(image, [1, image.shape[0], image.shape[1]])
images.append(image)
images = np.stack(images, axis=0)
images = torch.from_numpy(images).float()
return images, h, w, c
def get_img_parts(image, h, w):
images = []
h_cen = int(np.floor(h / 2))
w_cen = int(np.floor(w / 2))
img1 = image[:, 0:h_cen + 3, 0: w_cen + 3]
img1 = np.reshape(img1, [1, img1.shape[0], img1.shape[1], img1.shape[2]])
img2 = image[:, 0:h_cen + 3, w_cen - 2: w]
img2 = np.reshape(img2, [1, img2.shape[0], img2.shape[1], img2.shape[2]])
img3 = image[:, h_cen - 2:h, 0: w_cen + 3]
img3 = np.reshape(img3, [1, img3.shape[0], img3.shape[1], img3.shape[2]])
img4 = image[:, h_cen - 2:h, w_cen - 2: w]
img4 = np.reshape(img4, [1, img4.shape[0], img4.shape[1], img4.shape[2]])
images.append(torch.from_numpy(img1).float())
images.append(torch.from_numpy(img2).float())
images.append(torch.from_numpy(img3).float())
images.append(torch.from_numpy(img4).float())
return images
def recons_fusion_images(img_lists, h, w):
img_f_list = []
h_cen = int(np.floor(h / 2))
w_cen = int(np.floor(w / 2))
c = img_lists[0][0].shape[1]
ones_temp = torch.ones(1, c, h, w).cuda()
for i in range(len(img_lists[0])):
# img1, img2, img3, img4
img1 = img_lists[0][i]
img2 = img_lists[1][i]
img3 = img_lists[2][i]
img4 = img_lists[3][i]
img_f = torch.zeros(1, c, h, w).cuda()
count = torch.zeros(1, c, h, w).cuda()
img_f[:, :, 0:h_cen + 3, 0: w_cen + 3] += img1
count[:, :, 0:h_cen + 3, 0: w_cen + 3] += ones_temp[:, :, 0:h_cen + 3, 0: w_cen + 3]
img_f[:, :, 0:h_cen + 3, w_cen - 2: w] += img2
count[:, :, 0:h_cen + 3, w_cen - 2: w] += ones_temp[:, :, 0:h_cen + 3, w_cen - 2: w]
img_f[:, :, h_cen - 2:h, 0: w_cen + 3] += img3
count[:, :, h_cen - 2:h, 0: w_cen + 3] += ones_temp[:, :, h_cen - 2:h, 0: w_cen + 3]
img_f[:, :, h_cen - 2:h, w_cen - 2: w] += img4
count[:, :, h_cen - 2:h, w_cen - 2: w] += ones_temp[:, :, h_cen - 2:h, w_cen - 2: w]
img_f = img_f / count
img_f_list.append(img_f)
return img_f_list
def save_image_test(img_fusion, output_path):
img_fusion = img_fusion.float()
if args.cuda:
img_fusion = img_fusion.cpu().data[0].numpy()
# img_fusion = img_fusion.cpu().clamp(0, 255).data[0].numpy()
else:
img_fusion = img_fusion.clamp(0, 255).data[0].numpy()
img_fusion = (img_fusion - np.min(img_fusion)) / (np.max(img_fusion) - np.min(img_fusion) + EPSILON)
img_fusion = img_fusion * 255
img_fusion = img_fusion.transpose(1, 2, 0).astype('uint8')
# cv2.imwrite(output_path, img_fusion)
if img_fusion.shape[2] == 1:
img_fusion = img_fusion.reshape([img_fusion.shape[0], img_fusion.shape[1]])
# img_fusion = imresize(img_fusion, [h, w])
imsave(output_path, img_fusion)
def get_train_images(paths, height=256, width=256, flag=False):
if isinstance(paths, str):
paths = [paths]
images = []
for path in paths:
image = get_image(path, height, width, flag)
if flag is True:
image = np.transpose(image, (2, 0, 1))
else:
image = np.reshape(image, [1, height, width])
images.append(image)
images = np.stack(images, axis=0)
images = torch.from_numpy(images).float()
return images
| 6,275 | 33.108696 | 104 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/test_21pairs.py
|
# test phase
import os
import torch
from torch.autograd import Variable
from net import NestFuse_light2_nodense, Fusion_network, Fusion_strategy
import utils
from args_fusion import args
import numpy as np
def load_model(path_auto, path_fusion, fs_type, flag_img):
if flag_img is True:
nc = 3
else:
nc =1
input_nc = nc
output_nc = nc
nb_filter = [64, 112, 160, 208, 256]
nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision=False)
nest_model.load_state_dict(torch.load(path_auto))
fusion_model = Fusion_network(nb_filter, fs_type)
fusion_model.load_state_dict(torch.load(path_fusion))
fusion_strategy = Fusion_strategy(fs_type)
para = sum([np.prod(list(p.size())) for p in nest_model.parameters()])
type_size = 4
print('Model {} : params: {:4f}M'.format(nest_model._get_name(), para * type_size / 1000 / 1000))
para = sum([np.prod(list(p.size())) for p in fusion_model.parameters()])
type_size = 4
print('Model {} : params: {:4f}M'.format(fusion_model._get_name(), para * type_size / 1000 / 1000))
nest_model.eval()
fusion_model.eval()
nest_model.cuda()
fusion_model.cuda()
return nest_model, fusion_model, fusion_strategy
def run_demo(nest_model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path_root, name_ir, fs_type, use_strategy, flag_img, alpha):
img_ir, h, w, c = utils.get_test_image(infrared_path, flag=flag_img) # True for rgb
img_vi, h, w, c = utils.get_test_image(visible_path, flag=flag_img)
# dim = img_ir.shape
if c is 1:
if args.cuda:
img_ir = img_ir.cuda()
img_vi = img_vi.cuda()
img_ir = Variable(img_ir, requires_grad=False)
img_vi = Variable(img_vi, requires_grad=False)
# encoder
en_r = nest_model.encoder(img_ir)
en_v = nest_model.encoder(img_vi)
# fusion net
if use_strategy:
f = fusion_strategy(en_r, en_v)
else:
f = fusion_model(en_r, en_v)
# decoder
img_fusion_list = nest_model.decoder_eval(f)
else:
# fusion each block
img_fusion_blocks = []
for i in range(c):
# encoder
img_vi_temp = img_vi[i]
img_ir_temp = img_ir[i]
if args.cuda:
img_vi_temp = img_vi_temp.cuda()
img_ir_temp = img_ir_temp.cuda()
img_vi_temp = Variable(img_vi_temp, requires_grad=False)
img_ir_temp = Variable(img_ir_temp, requires_grad=False)
en_r = nest_model.encoder(img_ir_temp)
en_v = nest_model.encoder(img_vi_temp)
# fusion net
if use_strategy:
f = fusion_strategy(en_r, en_v)
else:
f = fusion_model(en_r, en_v)
# decoder
img_fusion_temp = nest_model.decoder_eval(f)
img_fusion_blocks.append(img_fusion_temp)
img_fusion_list = utils.recons_fusion_images(img_fusion_blocks, h, w)
# ########################### multi-outputs ##############################################
output_count = 0
for img_fusion in img_fusion_list:
file_name = 'fused_' + alpha + '_' + name_ir
output_path = output_path_root + file_name
output_count += 1
# save images
utils.save_image_test(img_fusion, output_path)
print(output_path)
def main():
# False - gray
flag_img = False
# ################# gray scale ########################################
test_path = "images/21_pairs_tno/ir/"
path_auto = args.resume_nestfuse
output_path_root = "./outputs/alpha_1e4_21/"
if os.path.exists(output_path_root) is False:
os.mkdir(output_path_root)
fs_type = 'res' # res (RFN), add, avg, max, spa, nuclear
use_strategy = False # True - static strategy; False - RFN
path_fusion_root = args.fusion_model
with torch.no_grad():
# alpha_list = [2500, 5000, 15000, 20000, 25000]
alpha_list = [700]
w_all_list = [[6.0, 3.0]]
for alpha in alpha_list:
for w_all in w_all_list:
w, w2 = w_all
temp = 'rfnnest_' + str(alpha) + '_wir_' + str(w) + '_wvi_' + str(w2)
output_path_list = 'fused_' + temp + '_21' + '_' + fs_type
output_path1 = output_path_root + output_path_list + '/'
if os.path.exists(output_path1) is False:
os.mkdir(output_path1)
output_path = output_path1
# load network
path_fusion = path_fusion_root + str(w) + '/' + 'Final_epoch_2_alpha_' + str(alpha) + '_wir_' + str(w) + '_wvi_' + str(w2) + '_ssim_vi.model'
model, fusion_model, fusion_strategy = load_model(path_auto, path_fusion, fs_type, flag_img)
imgs_paths_ir, names = utils.list_images(test_path)
num = len(imgs_paths_ir)
for i in range(num):
name_ir = names[i]
infrared_path = imgs_paths_ir[i]
visible_path = infrared_path.replace('ir/', 'vis/')
if visible_path.__contains__('IR'):
visible_path = visible_path.replace('IR', 'VIS')
else:
visible_path = visible_path.replace('i.', 'v.')
run_demo(model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path, name_ir, fs_type, use_strategy, flag_img, temp)
print('Done......')
if __name__ == '__main__':
main()
| 4,860 | 31.406667 | 152 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/net.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
EPSILON = 1e-10
def var(x, dim=0):
x_zero_meaned = x - x.mean(dim).expand_as(x)
return x_zero_meaned.pow(2).mean(dim)
class MultConst(nn.Module):
def forward(self, input):
return 255*input
class UpsampleReshape_eval(torch.nn.Module):
def __init__(self):
super(UpsampleReshape_eval, self).__init__()
self.up = nn.Upsample(scale_factor=2)
def forward(self, x1, x2):
x2 = self.up(x2)
shape_x1 = x1.size()
shape_x2 = x2.size()
left = 0
right = 0
top = 0
bot = 0
if shape_x1[3] != shape_x2[3]:
lef_right = shape_x1[3] - shape_x2[3]
if lef_right%2 is 0.0:
left = int(lef_right/2)
right = int(lef_right/2)
else:
left = int(lef_right / 2)
right = int(lef_right - left)
if shape_x1[2] != shape_x2[2]:
top_bot = shape_x1[2] - shape_x2[2]
if top_bot%2 is 0.0:
top = int(top_bot/2)
bot = int(top_bot/2)
else:
top = int(top_bot / 2)
bot = int(top_bot - top)
reflection_padding = [left, right, top, bot]
reflection_pad = nn.ReflectionPad2d(reflection_padding)
x2 = reflection_pad(x2)
return x2
# Convolution operation
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, is_last=False):
super(ConvLayer, self).__init__()
reflection_padding = int(np.floor(kernel_size / 2))
self.reflection_pad = nn.ReflectionPad2d(reflection_padding)
self.conv2d = nn.Conv2d(in_channels, out_channels, kernel_size, stride)
self.dropout = nn.Dropout2d(p=0.5)
self.is_last = is_last
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
if self.is_last is False:
# out = F.normalize(out)
out = F.relu(out, inplace=True)
# out = self.dropout(out)
return out
# Dense convolution unit
class DenseConv2d(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(DenseConv2d, self).__init__()
self.dense_conv = ConvLayer(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.dense_conv(x)
out = torch.cat([x, out], 1)
return out
# Dense Block unit
# light version
class DenseBlock_light(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(DenseBlock_light, self).__init__()
# out_channels_def = 16
out_channels_def = int(in_channels / 2)
# out_channels_def = out_channels
denseblock = []
denseblock += [ConvLayer(in_channels, out_channels_def, kernel_size, stride),
ConvLayer(out_channels_def, out_channels, 1, stride)]
self.denseblock = nn.Sequential(*denseblock)
def forward(self, x):
out = self.denseblock(x)
return out
class FusionBlock_res(torch.nn.Module):
def __init__(self, channels, index):
super(FusionBlock_res, self).__init__()
ws = [3, 3, 3, 3]
self.conv_fusion = ConvLayer(2*channels, channels, ws[index], 1)
self.conv_ir = ConvLayer(channels, channels, ws[index], 1)
self.conv_vi = ConvLayer(channels, channels, ws[index], 1)
block = []
block += [ConvLayer(2*channels, channels, 1, 1),
ConvLayer(channels, channels, ws[index], 1),
ConvLayer(channels, channels, ws[index], 1)]
self.bottelblock = nn.Sequential(*block)
def forward(self, x_ir, x_vi):
# initial fusion - conv
# print('conv')
f_cat = torch.cat([x_ir, x_vi], 1)
f_init = self.conv_fusion(f_cat)
out_ir = self.conv_ir(x_ir)
out_vi = self.conv_vi(x_vi) # 原来的代码有问题,写成了conv_ir,现在重新训练
out = torch.cat([out_ir, out_vi], 1)
out = self.bottelblock(out)
out = f_init + out
return out
# Fusion network, 4 groups of features
class Fusion_network(nn.Module):
def __init__(self, nC, fs_type):
super(Fusion_network, self).__init__()
self.fs_type = fs_type
self.fusion_block1 = FusionBlock_res(nC[0], 0)
self.fusion_block2 = FusionBlock_res(nC[1], 1)
self.fusion_block3 = FusionBlock_res(nC[2], 2)
self.fusion_block4 = FusionBlock_res(nC[3], 3)
def forward(self, en_ir, en_vi):
f1_0 = self.fusion_block1(en_ir[0], en_vi[0])
f2_0 = self.fusion_block2(en_ir[1], en_vi[1])
f3_0 = self.fusion_block3(en_ir[2], en_vi[2])
f4_0 = self.fusion_block4(en_ir[3], en_vi[3])
return [f1_0, f2_0, f3_0, f4_0]
class Fusion_ADD(torch.nn.Module):
def forward(self, en_ir, en_vi):
temp = en_ir + en_vi
return temp
class Fusion_AVG(torch.nn.Module):
def forward(self, en_ir, en_vi):
temp = (en_ir + en_vi) / 2
return temp
class Fusion_MAX(torch.nn.Module):
def forward(self, en_ir, en_vi):
temp = torch.max(en_ir, en_vi)
return temp
class Fusion_SPA(torch.nn.Module):
def forward(self, en_ir, en_vi):
shape = en_ir.size()
spatial_type = 'mean'
# calculate spatial attention
spatial1 = spatial_attention(en_ir, spatial_type)
spatial2 = spatial_attention(en_vi, spatial_type)
# get weight map, soft-max
spatial_w1 = torch.exp(spatial1) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON)
spatial_w2 = torch.exp(spatial2) / (torch.exp(spatial1) + torch.exp(spatial2) + EPSILON)
spatial_w1 = spatial_w1.repeat(1, shape[1], 1, 1)
spatial_w2 = spatial_w2.repeat(1, shape[1], 1, 1)
tensor_f = spatial_w1 * en_ir + spatial_w2 * en_vi
return tensor_f
# spatial attention
def spatial_attention(tensor, spatial_type='sum'):
spatial = []
if spatial_type is 'mean':
spatial = tensor.mean(dim=1, keepdim=True)
elif spatial_type is 'sum':
spatial = tensor.sum(dim=1, keepdim=True)
return spatial
# fuison strategy based on nuclear-norm (channel attention form NestFuse)
class Fusion_Nuclear(torch.nn.Module):
def forward(self, en_ir, en_vi):
shape = en_ir.size()
# calculate channel attention
global_p1 = nuclear_pooling(en_ir)
global_p2 = nuclear_pooling(en_vi)
# get weight map
global_p_w1 = global_p1 / (global_p1 + global_p2 + EPSILON)
global_p_w2 = global_p2 / (global_p1 + global_p2 + EPSILON)
global_p_w1 = global_p_w1.repeat(1, 1, shape[2], shape[3])
global_p_w2 = global_p_w2.repeat(1, 1, shape[2], shape[3])
tensor_f = global_p_w1 * en_ir + global_p_w2 * en_vi
return tensor_f
# sum of S V for each chanel
def nuclear_pooling(tensor):
shape = tensor.size()
vectors = torch.zeros(1, shape[1], 1, 1).cuda()
for i in range(shape[1]):
u, s, v = torch.svd(tensor[0, i, :, :] + EPSILON)
s_sum = torch.sum(s)
vectors[0, i, 0, 0] = s_sum
return vectors
# Fusion strategy, two type
class Fusion_strategy(nn.Module):
def __init__(self, fs_type):
super(Fusion_strategy, self).__init__()
self.fs_type = fs_type
self.fusion_add = Fusion_ADD()
self.fusion_avg = Fusion_AVG()
self.fusion_max = Fusion_MAX()
self.fusion_spa = Fusion_SPA()
self.fusion_nuc = Fusion_Nuclear()
def forward(self, en_ir, en_vi):
if self.fs_type is 'add':
fusion_operation = self.fusion_add
elif self.fs_type is 'avg':
fusion_operation = self.fusion_avg
elif self.fs_type is 'max':
fusion_operation = self.fusion_max
elif self.fs_type is 'spa':
fusion_operation = self.fusion_spa
elif self.fs_type is 'nuclear':
fusion_operation = self.fusion_nuc
f1_0 = fusion_operation(en_ir[0], en_vi[0])
f2_0 = fusion_operation(en_ir[1], en_vi[1])
f3_0 = fusion_operation(en_ir[2], en_vi[2])
f4_0 = fusion_operation(en_ir[3], en_vi[3])
return [f1_0, f2_0, f3_0, f4_0]
# NestFuse network - light, no desnse
class NestFuse_light2_nodense(nn.Module):
def __init__(self, nb_filter, input_nc=1, output_nc=1, deepsupervision=True):
super(NestFuse_light2_nodense, self).__init__()
self.deepsupervision = deepsupervision
block = DenseBlock_light
output_filter = 16
kernel_size = 3
stride = 1
self.pool = nn.MaxPool2d(2, 2)
self.up = nn.Upsample(scale_factor=2)
self.up_eval = UpsampleReshape_eval()
# encoder
self.conv0 = ConvLayer(input_nc, output_filter, 1, stride)
self.DB1_0 = block(output_filter, nb_filter[0], kernel_size, 1)
self.DB2_0 = block(nb_filter[0], nb_filter[1], kernel_size, 1)
self.DB3_0 = block(nb_filter[1], nb_filter[2], kernel_size, 1)
self.DB4_0 = block(nb_filter[2], nb_filter[3], kernel_size, 1)
# decoder
self.DB1_1 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1)
self.DB2_1 = block(nb_filter[1] + nb_filter[2], nb_filter[1], kernel_size, 1)
self.DB3_1 = block(nb_filter[2] + nb_filter[3], nb_filter[2], kernel_size, 1)
# # no short connection
# self.DB1_2 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1)
# self.DB2_2 = block(nb_filter[1] + nb_filter[2], nb_filter[1], kernel_size, 1)
# self.DB1_3 = block(nb_filter[0] + nb_filter[1], nb_filter[0], kernel_size, 1)
# short connection
self.DB1_2 = block(nb_filter[0] * 2 + nb_filter[1], nb_filter[0], kernel_size, 1)
self.DB2_2 = block(nb_filter[1] * 2+ nb_filter[2], nb_filter[1], kernel_size, 1)
self.DB1_3 = block(nb_filter[0] * 3 + nb_filter[1], nb_filter[0], kernel_size, 1)
if self.deepsupervision:
self.conv1 = ConvLayer(nb_filter[0], output_nc, 1, stride)
self.conv2 = ConvLayer(nb_filter[0], output_nc, 1, stride)
self.conv3 = ConvLayer(nb_filter[0], output_nc, 1, stride)
# self.conv4 = ConvLayer(nb_filter[0], output_nc, 1, stride)
else:
self.conv_out = ConvLayer(nb_filter[0], output_nc, 1, stride)
def encoder(self, input):
x = self.conv0(input)
x1_0 = self.DB1_0(x)
x2_0 = self.DB2_0(self.pool(x1_0))
x3_0 = self.DB3_0(self.pool(x2_0))
x4_0 = self.DB4_0(self.pool(x3_0))
# x5_0 = self.DB5_0(self.pool(x4_0))
return [x1_0, x2_0, x3_0, x4_0]
def decoder_train(self, f_en):
x1_1 = self.DB1_1(torch.cat([f_en[0], self.up(f_en[1])], 1))
x2_1 = self.DB2_1(torch.cat([f_en[1], self.up(f_en[2])], 1))
x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up(x2_1)], 1))
x3_1 = self.DB3_1(torch.cat([f_en[2], self.up(f_en[3])], 1))
x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up(x3_1)], 1))
x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up(x2_2)], 1))
if self.deepsupervision:
output1 = self.conv1(x1_1)
output2 = self.conv2(x1_2)
output3 = self.conv3(x1_3)
# output4 = self.conv4(x1_4)
return [output1, output2, output3]
else:
output = self.conv_out(x1_3)
return [output]
def decoder_eval(self, f_en):
x1_1 = self.DB1_1(torch.cat([f_en[0], self.up_eval(f_en[0], f_en[1])], 1))
x2_1 = self.DB2_1(torch.cat([f_en[1], self.up_eval(f_en[1], f_en[2])], 1))
x1_2 = self.DB1_2(torch.cat([f_en[0], x1_1, self.up_eval(f_en[0], x2_1)], 1))
x3_1 = self.DB3_1(torch.cat([f_en[2], self.up_eval(f_en[2], f_en[3])], 1))
x2_2 = self.DB2_2(torch.cat([f_en[1], x2_1, self.up_eval(f_en[1], x3_1)], 1))
x1_3 = self.DB1_3(torch.cat([f_en[0], x1_1, x1_2, self.up_eval(f_en[0], x2_2)], 1))
if self.deepsupervision:
output1 = self.conv1(x1_1)
output2 = self.conv2(x1_2)
output3 = self.conv3(x1_3)
# output4 = self.conv4(x1_4)
return [output1, output2, output3]
else:
output = self.conv_out(x1_3)
return [output]
| 12,480 | 34.157746 | 96 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/test_40pairs.py
|
# -*- coding:utf-8 -*-
# @Author: Li Hui, Jiangnan University
# @Email: [email protected]
# @File : test_40pairs.py
# @Time : 2020/8/14 17:11
# test phase
import os
import torch
from torch.autograd import Variable
from net import NestFuse_light2_nodense, Fusion_network, Fusion_strategy
import utils
from args_fusion import args
import numpy as np
def load_model(path_auto, path_fusion, fs_type, flag_img):
if flag_img is True:
nc = 3
else:
nc =1
input_nc = nc
output_nc = nc
nb_filter = [64, 112, 160, 208, 256]
nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision=False)
nest_model.load_state_dict(torch.load(path_auto))
fusion_model = Fusion_network(nb_filter, fs_type)
fusion_model.load_state_dict(torch.load(path_fusion))
fusion_strategy = Fusion_strategy(fs_type)
para = sum([np.prod(list(p.size())) for p in nest_model.parameters()])
type_size = 4
print('Model {} : params: {:4f}M'.format(nest_model._get_name(), para * type_size / 1000 / 1000))
para = sum([np.prod(list(p.size())) for p in fusion_model.parameters()])
type_size = 4
print('Model {} : params: {:4f}M'.format(fusion_model._get_name(), para * type_size / 1000 / 1000))
nest_model.eval()
fusion_model.eval()
nest_model.cuda()
fusion_model.cuda()
return nest_model, fusion_model, fusion_strategy
def run_demo(nest_model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path_root, name_ir, fs_type, use_strategy, flag_img, alpha):
img_ir, h, w, c = utils.get_test_image(infrared_path, flag=flag_img) # True for rgb
img_vi, h, w, c = utils.get_test_image(visible_path, flag=flag_img)
# dim = img_ir.shape
if c is 1:
if args.cuda:
img_ir = img_ir.cuda()
img_vi = img_vi.cuda()
img_ir = Variable(img_ir, requires_grad=False)
img_vi = Variable(img_vi, requires_grad=False)
# encoder
en_r = nest_model.encoder(img_ir)
en_v = nest_model.encoder(img_vi)
# fusion net
if use_strategy:
f = fusion_strategy(en_r, en_v)
else:
f = fusion_model(en_r, en_v)
# decoder
img_fusion_list = nest_model.decoder_eval(f)
else:
# fusion each block
img_fusion_blocks = []
for i in range(c):
# encoder
img_vi_temp = img_vi[i]
img_ir_temp = img_ir[i]
if args.cuda:
img_vi_temp = img_vi_temp.cuda()
img_ir_temp = img_ir_temp.cuda()
img_vi_temp = Variable(img_vi_temp, requires_grad=False)
img_ir_temp = Variable(img_ir_temp, requires_grad=False)
en_r = nest_model.encoder(img_ir_temp)
en_v = nest_model.encoder(img_vi_temp)
# fusion net
if use_strategy:
f = fusion_strategy(en_r, en_v)
else:
f = fusion_model(en_r, en_v)
# decoder
img_fusion_temp = nest_model.decoder_eval(f)
img_fusion_blocks.append(img_fusion_temp)
img_fusion_list = utils.recons_fusion_images(img_fusion_blocks, h, w)
# ########################### multi-outputs ##############################################
output_count = 0
for img_fusion in img_fusion_list:
file_name = 'fused_' + str(alpha) + '_' + name_ir
output_path = output_path_root + file_name
output_count += 1
# save images
utils.save_image_test(img_fusion, output_path)
print(output_path)
def main():
# False - gray
flag_img = False
# ################# gray scale ########################################
test_path = "images/40_pairs_tno_vot/ir/"
path_auto = args.resume_nestfuse
output_path_root = "./outputs/alpha_1e4_40/"
if os.path.exists(output_path_root) is False:
os.mkdir(output_path_root)
fs_type = 'res' # res (RFN), add, avg, max, spa, nuclear
use_strategy = False # True - static strategy; False - RFN
path_fusion_root = args.fusion_model
with torch.no_grad():
alpha_list = [700]
w_all_list = [[6.0, 3.0]]
for alpha in alpha_list:
for w_all in w_all_list:
w, w2 = w_all
temp = 'rfnnest_' + str(alpha) + '_wir_' + str(w) + '_wvi_' + str(w2)
output_path_list = 'fused_' + temp + '_40'
output_path1 = output_path_root + output_path_list + '/'
if os.path.exists(output_path1) is False:
os.mkdir(output_path1)
output_path = output_path1
# load network
path_fusion = path_fusion_root + str(w) + '/' + 'Final_epoch_2_alpha_' + str(alpha) + '_wir_' + str(
w) + '_wvi_' + str(w2) + '_ssim_vi.model'
model, fusion_model, fusion_strategy = load_model(path_auto, path_fusion, fs_type, flag_img)
imgs_paths_ir, names = utils.list_images(test_path)
num = len(imgs_paths_ir)
for i in range(num):
name_ir = names[i]
infrared_path = imgs_paths_ir[i]
visible_path = infrared_path.replace('ir/', 'vis/')
if visible_path.__contains__('IR'):
visible_path = visible_path.replace('IR', 'VIS')
else:
visible_path = visible_path.replace('i.', 'v.')
run_demo(model, fusion_model, fusion_strategy, infrared_path, visible_path, output_path, name_ir, fs_type, use_strategy, flag_img, temp)
print('Done......')
if __name__ == '__main__':
main()
| 4,952 | 30.75 | 152 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/train_fusionnet.py
|
# Training a NestFuse network
# auto-encoder
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import sys
import time
from tqdm import tqdm, trange
import scipy.io as scio
import random
import torch
from torch.optim import Adam
from torch.autograd import Variable
import utils
from net import NestFuse_light2_nodense, Fusion_network
from args_fusion import args
import pytorch_msssim
EPSILON = 1e-5
def main():
original_imgs_path, _ = utils.list_images(args.dataset_ir)
train_num = 80000
original_imgs_path = original_imgs_path[:train_num]
random.shuffle(original_imgs_path)
# True - RGB , False - gray
img_flag = False
alpha_list = [700]
w_all_list = [[6.0, 3.0]]
for w_w in w_all_list:
w1, w2 = w_w
for alpha in alpha_list:
train(original_imgs_path, img_flag, alpha, w1, w2)
def train(original_imgs_path, img_flag, alpha, w1, w2):
batch_size = args.batch_size
# load network model
nc = 1
input_nc = nc
output_nc = nc
nb_filter = [64, 112, 160, 208, 256]
f_type = 'res'
with torch.no_grad():
deepsupervision = False
nest_model = NestFuse_light2_nodense(nb_filter, input_nc, output_nc, deepsupervision)
model_path = args.resume_nestfuse
# load auto-encoder network
print('Resuming, initializing auto-encoder using weight from {}.'.format(model_path))
nest_model.load_state_dict(torch.load(model_path))
nest_model.eval()
# fusion network
fusion_model = Fusion_network(nb_filter, f_type)
if args.resume_fusion_model is not None:
print('Resuming, initializing fusion net using weight from {}.'.format(args.resume_fusion_model))
fusion_model.load_state_dict(torch.load(args.resume_fusion_model))
optimizer = Adam(fusion_model.parameters(), args.lr)
mse_loss = torch.nn.MSELoss()
ssim_loss = pytorch_msssim.msssim
if args.cuda:
nest_model.cuda()
fusion_model.cuda()
tbar = trange(args.epochs)
print('Start training.....')
# creating save path
temp_path_model = os.path.join(args.save_fusion_model)
temp_path_loss = os.path.join(args.save_loss_dir)
if os.path.exists(temp_path_model) is False:
os.mkdir(temp_path_model)
if os.path.exists(temp_path_loss) is False:
os.mkdir(temp_path_loss)
temp_path_model_w = os.path.join(args.save_fusion_model, str(w1))
temp_path_loss_w = os.path.join(args.save_loss_dir, str(w1))
if os.path.exists(temp_path_model_w) is False:
os.mkdir(temp_path_model_w)
if os.path.exists(temp_path_loss_w) is False:
os.mkdir(temp_path_loss_w)
Loss_feature = []
Loss_ssim = []
Loss_all = []
count_loss = 0
all_ssim_loss = 0.
all_fea_loss = 0.
for e in tbar:
print('Epoch %d.....' % e)
# load training database
image_set_ir, batches = utils.load_dataset(original_imgs_path, batch_size)
fusion_model.train()
count = 0
for batch in range(batches):
image_paths_ir = image_set_ir[batch * batch_size:(batch * batch_size + batch_size)]
img_ir = utils.get_train_images(image_paths_ir, height=args.HEIGHT, width=args.WIDTH, flag=img_flag)
image_paths_vi = [x.replace('lwir', 'visible') for x in image_paths_ir]
img_vi = utils.get_train_images(image_paths_vi, height=args.HEIGHT, width=args.WIDTH, flag=img_flag)
count += 1
optimizer.zero_grad()
img_ir = Variable(img_ir, requires_grad=False)
img_vi = Variable(img_vi, requires_grad=False)
if args.cuda:
img_ir = img_ir.cuda()
img_vi = img_vi.cuda()
# get fusion image
# encoder
en_ir = nest_model.encoder(img_ir)
en_vi = nest_model.encoder(img_vi)
# fusion
f = fusion_model(en_ir, en_vi)
# decoder
outputs = nest_model.decoder_eval(f)
# resolution loss: between fusion image and visible image
x_ir = Variable(img_ir.data.clone(), requires_grad=False)
x_vi = Variable(img_vi.data.clone(), requires_grad=False)
######################### LOSS FUNCTION #########################
loss1_value = 0.
loss2_value = 0.
for output in outputs:
output = (output - torch.min(output)) / (torch.max(output) - torch.min(output) + EPSILON)
output = output * 255
# ---------------------- LOSS IMAGES ------------------------------------
# detail loss
# ssim_loss_temp1 = ssim_loss(output, x_ir, normalize=True)
ssim_loss_temp2 = ssim_loss(output, x_vi, normalize=True)
loss1_value += alpha * (1 - ssim_loss_temp2)
# feature loss
g2_ir_fea = en_ir
g2_vi_fea = en_vi
g2_fuse_fea = f
# w_ir = [3.5, 3.5, 3.5, 3.5]
w_ir = [w1, w1, w1, w1]
w_vi = [w2, w2, w2, w2]
w_fea = [1, 10, 100, 1000]
for ii in range(4):
g2_ir_temp = g2_ir_fea[ii]
g2_vi_temp = g2_vi_fea[ii]
g2_fuse_temp = g2_fuse_fea[ii]
(bt, cht, ht, wt) = g2_ir_temp.size()
loss2_value += w_fea[ii]*mse_loss(g2_fuse_temp, w_ir[ii]*g2_ir_temp + w_vi[ii]*g2_vi_temp)
loss1_value /= len(outputs)
loss2_value /= len(outputs)
# total loss
total_loss = loss1_value + loss2_value
total_loss.backward()
optimizer.step()
all_fea_loss += loss2_value.item() #
all_ssim_loss += loss1_value.item() #
if (batch + 1) % args.log_interval == 0:
mesg = "{}\t Alpha: {} \tW-IR: {}\tEpoch {}:\t[{}/{}]\t ssim loss: {:.6f}\t fea loss: {:.6f}\t total: {:.6f}".format(
time.ctime(), alpha, w1, e + 1, count, batches,
all_ssim_loss / args.log_interval,
all_fea_loss / args.log_interval,
(all_fea_loss + all_ssim_loss) / args.log_interval
)
tbar.set_description(mesg)
Loss_ssim.append( all_ssim_loss / args.log_interval)
Loss_feature.append(all_fea_loss / args.log_interval)
Loss_all.append((all_fea_loss + all_ssim_loss) / args.log_interval)
count_loss = count_loss + 1
all_ssim_loss = 0.
all_fea_loss = 0.
if (batch + 1) % (200 * args.log_interval) == 0:
# save model
fusion_model.eval()
fusion_model.cpu()
save_model_filename = "Epoch_" + str(e) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".model"
save_model_path = os.path.join(temp_path_model, save_model_filename)
torch.save(fusion_model.state_dict(), save_model_path)
# save loss data
# pixel loss
loss_data_ssim = Loss_ssim
loss_filename_path = temp_path_loss_w + "/loss_ssim_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_ssim': loss_data_ssim})
# SSIM loss
loss_data_fea = Loss_feature
loss_filename_path = temp_path_loss_w + "/loss_fea_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_fea': loss_data_fea})
# all loss
loss_data = Loss_all
loss_filename_path = temp_path_loss_w + "/loss_all_epoch_" + str(args.epochs) + "_iters_" + str(count) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'loss_all': loss_data})
fusion_model.train()
fusion_model.cuda()
tbar.set_description("\nCheckpoint, trained model saved at", save_model_path)
# ssim loss
loss_data_ssim = Loss_ssim
loss_filename_path = temp_path_loss_w + "/Final_loss_ssim_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_ssim': loss_data_ssim})
loss_data_fea = Loss_feature
loss_filename_path = temp_path_loss_w + "/Final_loss_2_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_fea': loss_data_fea})
# SSIM loss
loss_data = Loss_all
loss_filename_path = temp_path_loss_w + "/Final_loss_all_epoch_" + str(
args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(w1) + "_wvi_" + str(w2) + ".mat"
scio.savemat(loss_filename_path, {'final_loss_all': loss_data})
# save model
fusion_model.eval()
fusion_model.cpu()
save_model_filename = "Final_epoch_" + str(args.epochs) + "_alpha_" + str(alpha) + "_wir_" + str(
w1) + "_wvi_" + str(w2) + ".model"
save_model_path = os.path.join(temp_path_model_w, save_model_filename)
torch.save(fusion_model.state_dict(), save_model_path)
print("\nDone, trained model saved at", save_model_path)
def check_paths(args):
try:
if not os.path.exists(args.vgg_model_dir):
os.makedirs(args.vgg_model_dir)
if not os.path.exists(args.save_model_dir):
os.makedirs(args.save_model_dir)
except OSError as e:
print(e)
sys.exit(1)
if __name__ == "__main__":
main()
| 8,542 | 33.447581 | 181 |
py
|
imagefusion-rfn-nest
|
imagefusion-rfn-nest-main/pytorch_msssim/__init__.py
|
import torch
import torch.nn.functional as F
from math import exp
import numpy as np
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size//2)**2/float(2*sigma**2)) for x in range(window_size)])
return gauss/gauss.sum()
def create_window(window_size, channel=1):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = _2D_window.expand(channel, 1, window_size, window_size).contiguous()
return window
def ssim(img1, img2, window_size=11, window=None, size_average=True, full=False, val_range=None):
# Value range can be different from 255. Other common ranges are 1 (sigmoid) and 2 (tanh).
if val_range is None:
if torch.max(img1) > 128:
max_val = 255
else:
max_val = 1
if torch.min(img1) < -0.5:
min_val = -1
else:
min_val = 0
L = max_val - min_val
else:
L = val_range
padd = 0
(_, channel, height, width) = img1.size()
if window is None:
real_size = min(window_size, height, width)
window = create_window(real_size, channel=channel).to(img1.device)
mu1 = F.conv2d(img1, window, padding=padd, groups=channel)
mu2 = F.conv2d(img2, window, padding=padd, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=padd, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=padd, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=padd, groups=channel) - mu1_mu2
C1 = (0.01 * L) ** 2
C2 = (0.03 * L) ** 2
v1 = 2.0 * sigma12 + C2
v2 = sigma1_sq + sigma2_sq + C2
cs = torch.mean(v1 / v2) # contrast sensitivity
ssim_map = ((2 * mu1_mu2 + C1) * v1) / ((mu1_sq + mu2_sq + C1) * v2)
if size_average:
ret = ssim_map.mean()
else:
ret = ssim_map.mean(1).mean(1).mean(1)
if full:
return ret, cs
return ret
def msssim(img1, img2, window_size=11, size_average=True, val_range=None, normalize=False):
device = img1.device
weights = torch.FloatTensor([0.0448, 0.2856, 0.3001, 0.2363, 0.1333]).to(device)
levels = weights.size()[0]
mssim = []
mcs = []
for _ in range(levels):
sim, cs = ssim(img1, img2, window_size=window_size, size_average=size_average, full=True, val_range=val_range)
mssim.append(sim)
mcs.append(cs)
img1 = F.avg_pool2d(img1, (2, 2))
img2 = F.avg_pool2d(img2, (2, 2))
mssim = torch.stack(mssim)
mcs = torch.stack(mcs)
# Normalize (to avoid NaNs during training unstable models, not compliant with original definition)
if normalize:
mssim = (mssim + 1) / 2
mcs = (mcs + 1) / 2
pow1 = mcs ** weights
pow2 = mssim ** weights
# From Matlab implementation https://ece.uwaterloo.ca/~z70wang/research/iwssim/
output = torch.prod(pow1[:-1] * pow2[-1])
return output
# Classes to re-use window
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, val_range=None):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.val_range = val_range
# Assume 1 channel for SSIM
self.channel = 1
self.window = create_window(window_size)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.dtype == img1.dtype:
window = self.window
else:
window = create_window(self.window_size, channel).to(img1.device).type(img1.dtype)
self.window = window
self.channel = channel
return ssim(img1, img2, window=window, window_size=self.window_size, size_average=self.size_average)
class MSSSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True, channel=3):
super(MSSSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = channel
def forward(self, img1, img2):
# TODO: store window between calls if possible
return msssim(img1, img2, window_size=self.window_size, size_average=self.size_average)
| 4,380 | 31.69403 | 118 |
py
|
tasksource
|
tasksource-main/.github/scripts/release.py
|
#!/usr/bin/env python3
import json
import subprocess
def get_last_version() -> str:
"""Return the version number of the last release."""
json_string = (
subprocess.run(
["gh", "release", "view", "--json", "tagName"],
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
.stdout.decode("utf8")
.strip()
)
return json.loads(json_string)["tagName"]
def bump_patch_number(version_number: str) -> str:
"""Return a copy of `version_number` with the patch number incremented."""
major, minor, patch = version_number.split(".")
return f"{major}.{minor}.{int(patch) + 1}"
def create_new_patch_release():
"""Create a new patch release on GitHub."""
try:
last_version_number = get_last_version()
except subprocess.CalledProcessError as err:
if err.stderr.decode("utf8").startswith("HTTP 404:"):
# The project doesn't have any releases yet.
new_version_number = "0.0.1"
else:
raise
else:
new_version_number = bump_patch_number(last_version_number)
subprocess.run(
["gh", "release", "create", "--generate-notes", new_version_number],
check=True,
)
if __name__ == "__main__":
create_new_patch_release()
| 1,328 | 26.122449 | 78 |
py
|
tasksource
|
tasksource-main/src/tasksource/mtasks.py
|
from .preprocess import cat, get,name, regen, constant, Classification, TokenClassification, MultipleChoice
from .metadata import udep_labels
from datasets import get_dataset_config_names, ClassLabel, Dataset, DatasetDict, concatenate_datasets, Sequence
def all(dataset_name):
try:
config_name=get_dataset_config_names(dataset_name)
except Exception as e:
print(dataset_name,e)
config_name=None
return dict(dataset_name=dataset_name, config_name=config_name)
def concatenate_configs(dataset):
return DatasetDict(train=concatenate_datasets(list(dataset.values())))
# english tasks (few, to keep balance between languages)
xnli = Classification("premise", "hypothesis", "label", **all("metaeval/xnli"))
americas_nli = Classification("premise","hypothesis","label",config_name="all_languages")
moritz_xnli = Classification("premise","hypothesis",name("label",["entailment", "neutral","contradiction"]),
pre_process=concatenate_configs, dataset_name="MoritzLaurer/multilingual-NLI-26lang-2mil7")
stsb_multi_mt = Classification("sentence1", "sentence2",
lambda x: float(x["similarity_score"]/5),
**all('stsb_multi_mt'))
pawsx = Classification("sentence1","sentence2",name('label',['not_paraphrase','paraphrase']), **all('paws-x'))
miam = Classification("Utterance",labels="Label", **all('miam'))
xstance = Classification("question", "comment", "label",
**all("strombergnlp/x-stance"))
offenseval = Classification(lambda x: str(x["text"]), labels=name("subtask_a",['not offensive','offensive']),
pre_process=lambda ds:ds.filter(lambda x: x['subtask_a'] in [0,1]),
dataset_name='strombergnlp/offenseval_2020',
config_name=["ar","da","gr","tr"])
offenseval_dravidian = Classification("text",labels="label",config_name=['kannada','malayalam','tamil'])
mlma_hate = Classification("tweet", labels=lambda x:x["sentiment"].split('_'),
dataset_name="nedjmaou/MLMA_hate_speech")
qam = Classification("question","answer","label", dataset_name="xglue",config_name="qam")
#x_sum_factuality = Classification("summary","generated_summary","label", dataset_name="ylacombe/xsum_factuality")
x_fact = Classification('evidence','claim','label', dataset_name="metaeval/x-fact")
xglue___nc = Classification('news_body',labels='news_category')
xglue___qadsm = Classification('query','ad_description','relevance_label')
xglue___qam = Classification('question','answer','label')
xglue___wpr = Classification('query','web_page_snippet','relavance_label') # relavance_label : sic
xlwic = Classification(
sentence1=cat(["target_word","context_1"], " : "),
sentence2=cat(["target_word","context_2"], " : "),
labels='label',dataset_name="pasinit/xlwic",config_name=['xlwic_de_de','xlwic_it_it','xlwic_fr_fr','xlwic_en_ko'])
#[ "spam", "fails_task", "lang_mismatch", "pii", "not_appropriate", "hate_speech", "sexual_content", "quality", "toxicity", "humor", "helpfulness", "creativity", "violence" ]
oasst1__quality = Classification("parent_text","text",labels="quality", dataset_name="tasksource/oasst1_dense_flat",
pre_process = lambda ds:ds.remove_columns('labels'))
oasst1__toxicity = Classification("parent_text","text",labels="toxicity", dataset_name="tasksource/oasst1_dense_flat",
pre_process = lambda ds:ds.remove_columns('labels'))
oasst1__helpfulness = Classification("parent_text","text",labels="helpfulness", dataset_name="tasksource/oasst1_dense_flat",
pre_process = lambda ds:ds.remove_columns('labels'))
language_identification = Classification("text",labels="labels", dataset_name="papluca/language-identification")
wili_2018_langid = Classification("sentence",labels="label",dataset_name="wili_2018")
exams = MultipleChoice(get.question.stem, choices_list=get.question.choices.text,
labels=lambda x:'ABCDE'.index(x['answerKey']),
dataset_name="exams", config_name='multilingual',
pre_process=lambda ds:ds.filter(lambda x: x['answerKey'] in "ABCDE"))
xcsr = MultipleChoice(get.question.stem, choices_list=get.question.choices.text,
labels=lambda x:'ABCDE'.index(x['answerKey']),
**all('xcsr'))
xcopa = MultipleChoice("premise",choices=['choice1','choice2'],labels="label",
**all('xcopa'))
xstory = MultipleChoice(constant(''),choices=["text_right_ending","text_wrong_ending"],labels=constant(0),
**all("juletxara/xstory_cloze"))
xglue_ner = TokenClassification("words","ner", dataset_name="xglue",config_name="ner")
xglue_pos = TokenClassification("words","pos", dataset_name="xglue",config_name="pos")
#disrpt_23 = Classification("unit1_sent", "unit2_sent", "label",**all("metaeval/disrpt"))
udep__pos = TokenClassification('tokens','upos', **all('universal_dependencies'))
def udep_post_process(ds):
return ds.cast_column('labels', Sequence(ClassLabel(names=udep_labels)))
#udep__deprel = TokenClassification('tokens',lambda x:[udep_labels.index(a) for a in x['deprel']],
# **all('universal_dependencies'),post_process=udep_post_process)
oasst_rlhf = MultipleChoice("prompt",choices=['chosen','rejected'],labels=constant(0),
dataset_name="tasksource/oasst1_pairwise_rlhf_reward")
sentiment = Classification("text",labels="label", dataset_name="tyqiangz/multilingual-sentiments",config_name="all",
pre_process=lambda ds:ds.filter(lambda x: "amazon_reviews" not in x['source']) )
tweet_sentiment = Classification("text", labels="label", **all('cardiffnlp/tweet_sentiment_multilingual'))
review_sentiment = Classification("review_body",labels="stars", dataset_name="amazon_reviews_multi",config_name="all_languages")
emotion = Classification("text",labels="emotion",dataset_name="metaeval/universal-joy")
# in mms
mms_sentiment = Classification("text",labels="label",dataset_name='Brand24/mms')
mapa_fine = TokenClassification("tokens","coarse_grained",dataset_name='joelito/mapa')
mapa_corase = TokenClassification("tokens","fine_grained",dataset_name='joelito/mapa')
aces_ranking = MultipleChoice("source",choices=['good-translation','incorrect-translation'],labels=constant(0), dataset_name='nikitam/ACES')
aces_phenomena = Classification('source','incorrect-translation','phenomena', dataset_name='nikitam/ACES')
amazon_intent = Classification("utt",labels="intent",**all('AmazonScience/massive'))
# dataset_name='glue',config_name=['ocnli','afqmc'])
tidy_as2=Classification("Question","Sentence","Label",dataset_name='tasksource/tydi-as2-balanced')
multiconer = TokenClassification("tokens","ner_tags_index", **all("MultiCoNER/multiconer_v2"))
mtop = Classification("question",labels="intent", dataset_name="tasksource/mtop")
#wino_x
# clue, klue, indic_glue
# SMS_Spam_Multilingual_Collection_Dataset
| 6,676 | 48.828358 | 174 |
py
|
tasksource
|
tasksource-main/src/tasksource/access.py
|
from .preprocess import Preprocessing
import re
import pandas as pd
from . import tasks, recast
from .metadata import dataset_rank
from datasets import load_dataset
import funcy as fc
import os
import copy
from sorcery import dict_of
from functools import cache
import random
class lazy_mtasks:
def __getattr__(self, name):
from . import mtasks
return getattr(mtasks, name)
def __dir__(self):
from . import mtasks
return dir(mtasks)
lmtasks=lazy_mtasks()
def parse_var_name(s):
config_name,task_name = None,None
if '__' in s and '___' not in s: # dataset__task
dataset_name, task_name = s.split('__')
elif '__' not in s.replace('___','') and '___' in s: #dataset___config
dataset_name, config_name = s.split('___')
elif '___' in s and '__' in s.split('___')[1]: #dataset___config__task
dataset_name, config_task=s.split('___')
config_name,task_name = config_task.split('__')
else: # dataset
dataset_name = s
return dataset_name,config_name,task_name
def pretty_name(x):
dn = x.dataset_name.split("/")[-1]
cn = x.config_name if x.config_name else ""
tn = x.task_name if x.task_name else ""
return f"{dn}/{cn}/{tn}".replace('//','/').rstrip('/')
@cache
def list_tasks(tasks_path=f'{os.path.dirname(__file__)}/tasks.py',multilingual=False,instruct=False, excluded=[]):
if multilingual:
tasks_path=tasks_path.replace('/tasks.py','/mtasks.py')
task_order = open(tasks_path).readlines()
task_order = [x.split('=')[0].rstrip() for x in task_order if '=' in x]
task_order = [x for x in task_order if x.isidentifier()]
task_order = fc.flip(dict(enumerate(task_order)))
l = []
_tasks = (lmtasks if multilingual else tasks)
for key in dir(_tasks):
if key not in task_order:
continue
value=getattr(_tasks, key)
if isinstance(value,Preprocessing):
dataset_name, config_name, task_name = parse_var_name(key)
dataset_name = (value.dataset_name if value.dataset_name else dataset_name)
config_name = (value.config_name if value.config_name else config_name)
hasattr(value,key)
l+=[{'dataset_name': dataset_name,
'config_name' : config_name,
'task_name': task_name,
'preprocessing_name': key,
'task_type': value.__class__.__name__,'mapping': value,
'rank':task_order.get(key,None)}]
df=pd.DataFrame(l).explode('config_name')
df = df.sort_values('rank').reset_index(drop=True)
df['id'] = df.apply(lambda x: pretty_name(x), axis=1)
df.insert(0, 'id', df.pop('id'))
del df['rank']
if instruct:
df=df[df.id.map(lambda x: not any(a in x for a in recast.improper_labels))]
df=df[df.id.map(lambda x: not any(x in a for a in excluded))]
return df
#task_df =list_tasks()
#mtask_df =list_tasks(multilingual=True)
def dict_to_query(d=dict(), **kwargs):
d={**d,**kwargs}
return '&'.join([f'`{k}`=="{v}"' for k,v in d.items()])
def load_preprocessing(tasks=tasks, **kwargs):
_tasks_df = list_tasks(multilingual=tasks==lmtasks)
y = _tasks_df.copy().query(dict_to_query(**kwargs)).iloc[0]
preprocessing= copy.copy(getattr(tasks, y.preprocessing_name))
for c in 'dataset_name','config_name':
if not isinstance(getattr(preprocessing,c), str):
setattr(preprocessing,c,getattr(y,c))
return preprocessing
def load_task(id=None, dataset_name=None,config_name=None,task_name=None,preprocessing_name=None,
max_rows=None, max_rows_eval=None, multilingual=False, instruct=False, seed=0, **load_dataset_kwargs):
query = dict_of(id, dataset_name, config_name, task_name,preprocessing_name)
query = {k:v for k,v in query.items() if v}
_tasks = (lmtasks if multilingual else tasks)
preprocessing = load_preprocessing(_tasks, **query)
dataset = load_dataset(preprocessing.dataset_name, preprocessing.config_name, **load_dataset_kwargs)
dataset= preprocessing(dataset,max_rows, max_rows_eval)
dataset.task_type = preprocessing.__class__.__name__
if instruct:
dataset=recast.recast_instruct(dataset)
return dataset
| 4,268 | 38.527778 | 114 |
py
|
tasksource
|
tasksource-main/src/tasksource/recast.py
|
import random
from datasets import DatasetDict, Dataset
from sorcery import dict_of
import string
improper_labels = ['recast/recast_kg_relations','linguisticprobing',"lexglue/scotus","pragmeval/squinky","pragmeval/emobank",'pragmeval/persuasiveness']
improper_labels += ['glue/stsb', 'sick/relatedness', 'joci', 'utilitarianism', 'amazon_counterfactual/en', 'toxic_conversations', 'ethos/multilabel', 'lex_glue/eurlex', 'lex_glue/unfair_tos', 'app_reviews', 'humicroedit/subtask-1', 'stackoverflow-questions', 'go_emotions/simplified', 'google_wellformed_query', 'has_part', 'blog_authorship_corpus/age', 'promptCoherence', 'Sarcasm_News_Headline', 'auditor_review/demo-org--auditor_review', 'Dynasent_Disagreement', 'Politeness_Disagreement', 'SBIC_Disagreement', 'SChem_Disagreement', 'Dilemmas_Disagreement', 'sts-companion', 'acceptability-prediction', 'chaos-mnli-ambiguity', 'headline_cause/en_simple', 'oasst1_dense_flat', 'civil_comments']
improper_labels += ['stsb_multi_mt','MLMA_hate_speech']
def render_options(options):
options = [f'"{x}"' for x in options]
return f"{', '.join(options[:-1])} or {options[-1]}"
def render_classification(text,options,answer):
example = 'A→B' if text.startswith('A:') else 'the following'
inputs = f'With no explanation, label {example} with either {render_options(options)}.\n{text}'
targets = f"{answer}."
return dict_of(inputs,targets)
def render_token_classification(tokens,options,labels):
prefix = f'With no explanation, label each line with {render_options(options)} preceded by ":".\n'
inputs = prefix+"\n".join(tokens)
targets = "\n".join([':'.join(x) for x in zip(tokens,labels)])
return dict_of(inputs,targets)
def render_multiple_choice(prompt, options, labels):
inputs=(prompt+'\n' if prompt else '')
letters = string.ascii_uppercase[:len(options)]
inputs=f'With no explanation, chose the best option from {render_options(letters)}. {inputs}'
for letter, option in zip(letters, options):
inputs+=f'\n{letter}: {option}'
targets = f'{letters[labels]}.'
return dict_of(inputs, targets)
def negative_sample_options(y, labels,N=4):
if len(labels)<N:
return labels
else:
return [y]+random.sample([x for x in labels if x!=y], N-1)
def shuffle_choices(x):
choices = sorted([k for k in x if 'choice' in k])
choices_texts = [x[c] for c in choices]
correct_choice =choices_texts[x['labels']]
random.shuffle(choices_texts)
for c, ct in zip(choices, choices_texts):
x[c]=ct
x["labels"]=choices_texts.index(correct_choice)
return x
def recast_dataset_classification_to_mc(dataset,sep="[SEP]",N=4):
def recast_split(d,N=N):
labels = d.features['labels']
df=d.to_pandas()
df['inputs'] = df.sentence1
if "sentence2" in df:
df['inputs'] +=sep + df.sentence2
N=min(N, len(labels.names))
df['choices']=df.apply(lambda x:negative_sample_options(labels.int2str(x['labels']), labels.names,N),axis=1)
df['labels']=df.apply(lambda x:x['choices'].index(labels.int2str(x['labels'])),axis=1)
for i in range(N):
df[f'choice{i}']= "This example is " + df.choices.map(lambda x:x[i])
choices = [f'choice{i}' for i in range(N)]
return Dataset.from_pandas(df[['inputs',*choices,'labels']],preserve_index=False)
return DatasetDict({k: recast_split(v) for k,v in dataset.items()})
def recast_instruct(dataset):
features = dataset['train'].features
labels = features['labels']
if "sentence1" in features:
task_type='Classification'
if "choice0" in features:
task_type = "MultipleChoice"
if "tokens" in features:
task_type = "TokenClassification"
def recast_MultipleChoice(x):
x=shuffle_choices(x)
choices = sorted([k for k in x if 'choice' in k])
if all([x[c] in x['inputs'] for c in choices]):
return {"inputs":x['inputs'], 'targets': x[f"choice{x['labels']}"].strip()+"."}
else:
return render_multiple_choice(x['inputs'],[x[c] for c in choices],x['labels'])
def recast_TokenClassification(x):
distractors = list(labels.feature.names)
x_labels = [labels.feature.int2str(y) for y in x['labels']]
labels_set= list({labels.feature.int2str(y) for y in x['labels']})
options=list(dict.fromkeys(labels_set+distractors))[:max(len(labels_set),10)]
return render_token_classification(x['tokens'],options,x_labels)
def recast_Classification(x):
if 'sentence2' in x:
text=f"A: {x['sentence1']}\nB: {x['sentence2']}"
else:
text=x['sentence1']
answer=labels.int2str(x['labels']).strip()
options= negative_sample_options(answer, labels._int2str)
return render_classification(text, options, answer)
dataset = dataset.map(eval(f"recast_{task_type}"))
dataset = dataset.remove_columns([k for k in features if k not in ['inputs','targets']])
return dataset
| 5,086 | 44.419643 | 696 |
py
|
tasksource
|
tasksource-main/src/tasksource/__init__.py
|
from .tasks import *
from .preprocess import *
from .access import *
| 69 | 16.5 | 25 |
py
|
tasksource
|
tasksource-main/src/tasksource/tasks.py
|
from .preprocess import cat, get, regen, name, constant, Classification, TokenClassification, MultipleChoice
from .metadata import bigbench_discriminative_english, blimp_hard, imppres_presupposition, imppres_implicature, udep_en_configs, udep_en_labels
from datasets import get_dataset_config_names, Sequence, ClassLabel, Dataset, DatasetDict
# variable name: dataset___config__task
###################### NLI/paraphrase ###############################
glue___mnli = Classification(sentence1="premise", sentence2="hypothesis", labels="label", splits=["train", None, "validation_matched"])
glue___qnli = Classification("question","sentence", labels="label")
glue___rte = Classification(sentence1="sentence1", sentence2="sentence2", labels="label")
glue___wnli = Classification(sentence1="sentence1", sentence2="sentence2", labels="label")
#glue___ax = Classification(sentence1="premise", sentence2="hypothesis", labels="label", splits=["test", None, None]) # fully masked
glue___mrpc = Classification(sentence1="sentence1", sentence2="sentence2", labels="label")
glue___qqp = Classification(sentence1="question1", sentence2="question2", labels="label")
glue___stsb = Classification(sentence1="sentence1", sentence2="sentence2", labels="label")
super_glue___boolq = Classification(sentence1="question", labels="label")
super_glue___cb = Classification(sentence1="premise", sentence2="hypothesis", labels="label")
super_glue___multirc = Classification(
cat(["paragraph", "question"]),
'answer',
labels='label'
)
#super_glue___rte = Classification(sentence1="premise", sentence2="hypothesis", labels="label") # in glue
super_glue___wic = Classification(
sentence1=cat(["word","sentence1"], " : "),
sentence2=cat(["word","sentence2"], " : "),
labels='label'
)
super_glue___axg = Classification(sentence1="premise", sentence2="hypothesis", labels="label", splits=["test", None, None])
anli__a1 = Classification('premise','hypothesis','label', splits=['train_r1','dev_r1','test_r1'])
anli__a2 = Classification('premise','hypothesis','label', splits=['train_r2','dev_r2','test_r2'])
anli__a3 = Classification('premise','hypothesis','label', splits=['train_r3','dev_r3','test_r3'])
babi_nli = Classification("premise", "hypothesis", "label",
dataset_name="metaeval/babi_nli",
config_name=set(get_dataset_config_names("metaeval/babi_nli"))-{"agents-motivations"}
) # agents-motivations task is not as clear-cut as the others
sick__label = Classification('sentence_A','sentence_B','label')
sick__relatedness = Classification('sentence_A','sentence_B','relatedness_score')
sick__entailment_AB = Classification('sentence_A','sentence_B','entailment_AB')
#sick__entailment_BA = Classification('sentence_A','sentence_B','entailment_BA')
def remove_neg_1(dataset):
return dataset.filter(lambda x:x['labels']!=-1)
snli = Classification(sentence1="premise", sentence2="hypothesis", labels="label",
post_process=remove_neg_1)
scitail = Classification("sentence1","sentence2","gold_label",config_name="snli_format")
hans = Classification(sentence1="premise", sentence2="hypothesis", labels="label")
wanli = Classification('premise','hypothesis','gold', dataset_name="alisawuffles/WANLI")
recast_nli = Classification(sentence1="context", sentence2="hypothesis", labels="label", dataset_name="metaeval/recast",
config_name=['recast_kg_relations', 'recast_puns', 'recast_factuality', 'recast_verbnet',
'recast_verbcorner', 'recast_ner', 'recast_sentiment', 'recast_megaveridicality'])
probability_words_nli = Classification(sentence1="context", sentence2="hypothesis", labels="label",
dataset_name="sileod/probability_words_nli",
config_name=["reasoning_1hop","reasoning_2hop","usnli"])
nan_nli = Classification("premise", "hypothesis", "label", dataset_name="joey234/nan-nli", config_name="joey234--nan-nli")
nli_fever = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/nli_fever", splits=["train","dev",None])
breaking_nli = Classification("sentence1","sentence2","label",
dataset_name="pietrolesci/breaking_nli", splits=["full",None,None])
conj_nli = Classification("premise","hypothesis","label",post_process=remove_neg_1,
dataset_name="pietrolesci/conj_nli",splits=['train','dev',None])
fracas = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/fracas")
dialogue_nli = Classification("sentence1","sentence2","label",
dataset_name="pietrolesci/dialogue_nli")
mpe_nli = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/mpe",
splits=["train","dev","test"])
dnc_nli = Classification("context","hypothesis","label",
dataset_name="pietrolesci/dnc")
# gpt3_nli = Classification("text_a","text_b","label",dataset_name="pietrolesci/gpt3_nli") # not sound enough
recast_white__fnplus = Classification("text","hypothesis","label",
dataset_name="pietrolesci/recast_white",splits=['fnplus',None,None])
recast_white__sprl = Classification("text","hypothesis","label",
dataset_name="pietrolesci/recast_white",splits=['sprl',None,None])
recast_white__dpr = Classification("text","hypothesis","label",
dataset_name="pietrolesci/recast_white",splits=['dpr',None,None])
joci = Classification("context","hypothesis",
labels=lambda x: [None, "impossible", "technically possible", "plausible", "likely", "very likely"][x["original_label"]],
pre_process=lambda ds:ds.filter(lambda x:x['original_label']!=0),
dataset_name="pietrolesci/joci",splits=['full',None,None])
#enfever_nli = Classification("evidence","claim","label", dataset_name="ctu-aic/enfever_nli")
#contrast_nli = Classification("premise", "hypothesis", "label",dataset_name="martn-nguyen/contrast_nli") # generated
robust_nli__IS_CS = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["IS_CS",None,None])
robust_nli__LI_LI = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["LI_LI",None,None])
robust_nli__ST_WO = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["ST_WO",None,None])
robust_nli__PI_SP = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["PI_SP",None,None])
robust_nli__PI_CD = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["PI_CD",None,None])
robust_nli__ST_SE = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["ST_SE",None,None])
robust_nli__ST_NE = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["ST_NE",None,None])
robust_nli__ST_LM = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli", splits=["ST_LM",None,None])
robust_nli_is_sd = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli_is_sd")
robust_nli_li_ts = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/robust_nli_li_ts")
gen_debiased_nli__snli_seq_z = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/gen_debiased_nli", splits=["snli_seq_z",None,None])
gen_debiased_nli__snli_z_aug = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/gen_debiased_nli", splits=["snli_z_aug",None,None])
gen_debiased_nli__snli_par_z = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/gen_debiased_nli", splits=["snli_par_z",None,None])
gen_debiased_nli__mnli_par_z = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/gen_debiased_nli", splits=["mnli_par_z",None,None])
gen_debiased_nli__mnli_z_aug = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/gen_debiased_nli", splits=["mnli_z_aug",None,None])
gen_debiased_nli__mnli_seq_z = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/gen_debiased_nli", splits=["mnli_seq_z",None,None])
add_one_rte = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/add_one_rte",splits=["train","dev","test"])
def _imppres_post_process(ds,prefix=''):
# imppres entailment definition is either purely semantic or purely pragmatic
# because of that, we assign differentiate the labels from anli/mnli notation
return ds.cast_column('labels', ClassLabel(
names=[f'{prefix}_entailment',f'{prefix}_neutral',f'{prefix}_contradiction']))
imppres__presupposition = imppres__prag = Classification("premise","hypothesis","gold_label",
dataset_name="metaeval/imppres", config_name=imppres_presupposition,
post_process=_imppres_post_process)
imppres__prag = Classification("premise","hypothesis","gold_label_prag",
dataset_name="metaeval/imppres", config_name=imppres_implicature,
post_process=lambda x: _imppres_post_process(x,'pragmatic'))
imppres__log = Classification("premise","hypothesis","gold_label_log",
dataset_name="metaeval/imppres", config_name=imppres_implicature,
post_process=lambda x: _imppres_post_process(x,'logical'))
glue__diagnostics = Classification("premise","hypothesis","label",
dataset_name="pietrolesci/glue_diagnostics",splits=["test",None,None])
hlgd = Classification("headline_a", "headline_b", labels="label")
paws___labeled_final = Classification("sentence1", "sentence2", name('label',['not_paraphrase','paraphrase']))
paws___labeled_swap = Classification("sentence1", "sentence2", name('label',['not_paraphrase','paraphrase']), splits=["train", None, None])
#paws___unlabeled_final = Classification("sentence1", "sentence2", "label")
#quora = Classification(get.questions.text[0], get.questions.text[1], 'is_duplicate') # in glue
medical_questions_pairs = Classification("question_1","question_2", name("label",['False','True']))
###################### Token Classification #########################
conll2003__pos_tags = TokenClassification(tokens="tokens", labels='pos_tags')
conll2003__chunk_tags = TokenClassification(tokens="tokens", labels='chunk_tags')
conll2003__ner_tags = TokenClassification(tokens="tokens", labels='ner_tags')
#tner___tweebank_ner = TokenClassification(tokens="tokens", labels="tags")
######################## Multiple choice ###########################
anthropic_rlhf = MultipleChoice(constant(''), ['chosen','rejected'], constant(0),
dataset_name="Anthropic/hh-rlhf")
model_written_evals = MultipleChoice('question', choices=['answer_matching_behavior','answer_not_matching_behavior'], labels=constant(0),
dataset_name="Anthropic/model-written-evals")
truthful_qa___multiple_choice = MultipleChoice(
"question",
choices_list=get.mc1_targets.choices,
labels=constant(0)
)
fig_qa = MultipleChoice(
"startphrase",
choices=["ending1","ending2"],
labels="labels",
dataset_name="nightingal3/fig-qa",
splits=["train","validation",None]
)
bigbench = MultipleChoice(
'inputs',
choices_list='multiple_choice_targets',
labels=lambda x:x['multiple_choice_scores'].index(1) if 1 in ['multiple_choice_scores'] else -1,
dataset_name='tasksource/bigbench',
config_name=bigbench_discriminative_english - {"social_i_qa","intersect_geometry"} # english multiple choice tasks, minus duplicates
)
blimp_hard = MultipleChoice(inputs=constant(''),
choices=['sentence_good','sentence_bad'],
labels=constant(0),
dataset_name="blimp",
config_name=blimp_hard # tasks where GPT2 is at least 10% below human accuracy
)
cos_e = MultipleChoice('question',
choices_list='choices',
labels= lambda x: x['choices_list'].index(x['answer']),
config_name='v1.0')
cosmos_qa = MultipleChoice(cat(['context','question']),regen('answer[0-3]'),'label')
dream = MultipleChoice(
lambda x:"\n".join(x['dialogue']+[x['question']]),
choices_list='choice',
labels=lambda x:x['choices_list'].index(x['answer'])
)
openbookqa = MultipleChoice(
'question_stem',
choices_list=get.choices.text,
labels='answerKey'
)
qasc = MultipleChoice(
'question',
choices_list=get.choices.text,
labels=lambda x: "ABCDEFGH".index(x['answerKey']),
splits=['train','validation',None]
)
quartz = MultipleChoice(
'question',
choices_list=get.choices.text,
labels='answerKey'
)
quail = MultipleChoice(
cat(['context','question']),
choices_list='answers',
labels='correct_answer_id'
)
head_qa___en = MultipleChoice("qtext",
choices_list = lambda x:[a['atext'] for a in x["answers"]],
labels = lambda x:[a['aid'] for a in x["answers"]].index(x["ra"])
)
sciq = MultipleChoice(
'question',
['correct_answer']+regen('distractor[1-3]'),
labels=constant(0))
social_i_qa = MultipleChoice(
'question',
['answerA','answerB','answerC'],
'label')
wiki_hop___original = MultipleChoice(
'question',
choices_list='candidates',
labels=lambda x:x['choices_list'].index(x["answer"]))
wiqa = MultipleChoice('question_stem',
choices_list = lambda x: x['choices']['text'],
labels='answer_label_as_choice')
piqa = MultipleChoice('goal', choices=['sol1','sol2'], labels='label')
hellaswag = MultipleChoice('ctx_a',
choices_list=lambda x: [f'{x["ctx_b"]}{e}' for e in x["endings"]],
labels='label', splits=['train','validation',None])
super_glue___copa = MultipleChoice('premise',['choice1','choice2'],'label')
balanced_copa = MultipleChoice('premise',['choice1','choice2'],'label',
dataset_name="pkavumba/balanced-copa")
e_care = MultipleChoice('premise',['choice1','choice2'],'label',
dataset_name="12ml/e-CARE")
art = MultipleChoice(cat(['hypothesis_1','hypothesis_2']),
['observation_1','observation_2'],
labels=lambda x:x['label']-1,
splits=['train','validation',None]
)
mmlu = MultipleChoice('question',labels='answer',choices_list='choices',splits=['validation','dev','test'],
dataset_name="tasksource/mmlu",
config_name=get_dataset_config_names("tasksource/mmlu")
)
winogrande = MultipleChoice('sentence',['option1','option2'],'answer',config_name='winogrande_xl',
splits=['train','validation',None])
codah = MultipleChoice('question_propmt',choices_list='candidate_answers',labels='correct_answer_idx',config_name='codah')
ai2_arc__challenge = MultipleChoice('question',
choices_list=get.choices.text,
labels=lambda x: get.choices.label(x).index(x["answerKey"]),
config_name=["ARC-Challenge","ARC-Easy"])
definite_pronoun_resolution = MultipleChoice(
inputs=cat(["sentence","pronoun"],' : '),
choices_list='candidates',
labels="label",
splits=['train',None,'test'])
swag___regular=MultipleChoice(cat(["sent1","sent2"]),regen("ending[0-3]"),"label")
def _split_choices(s):
import re
return [x.rstrip(', ') for x in re.split(r'[a-e] \) (.*?)',s) if x.strip(', ')]
math_qa = MultipleChoice(
'Problem',
choices_list = lambda x: _split_choices(x['options']),
labels = lambda x:'abcde'.index(x['correct'])
)
#aqua_rat___tokenized = MultipleChoice("question",choices_list="options",labels=lambda x:"ABCDE".index(x['correct'])) in math_qa
######################## Classification (other) ########################
glue___cola = Classification(sentence1="sentence", labels="label")
glue___sst2 = Classification(sentence1="sentence", labels="label")
utilitarianism = Classification("comparison",labels="label",
dataset_name="metaeval/utilitarianism")
amazon_counterfactual = Classification(
"text", labels="label",
dataset_name="mteb/amazon_counterfactual",
config_name="en")
insincere_questions = Classification(
"text", labels="label_text",
dataset_name="SetFit/insincere-questions")
toxic_conversations = Classification(
"text", labels="label",
dataset_name="SetFit/toxic_conversations")
turingbench = Classification("Generation",labels="label",
dataset_name="turingbench/TuringBench",
splits=["train","validation",None])
trec = Classification(sentence1="text", labels="fine_label")
tals_vitaminc = Classification('claim','evidence','label', dataset_name="tals/vitaminc", config_name="tals--vitaminc")
hope_edi = Classification("text", labels="label", splits=["train", "validation", None], config_name=["english"])
#fever___v1_0 = Classification(sentence1="claim", labels="label", splits=["train", "paper_dev", "paper_test"], dataset_name="fever", config_name="v1.0")
#fever___v2_0 = Classification(sentence1="claim", labels="label", splits=[None, "validation", None], dataset_name="fever", config_name="v2.0")
rumoureval_2019 = Classification(
sentence1="source_text",
sentence2=lambda x: str(x["reply_text"]),
labels="label", dataset_name="strombergnlp/rumoureval_2019", config_name="RumourEval2019",
post_process=lambda ds:ds.filter(lambda x:x['labels']!=None)
)
ethos___binary = Classification(sentence1="text", labels="label", splits=["train", None, None])
ethos___multilabel = Classification(
'text',
labels=lambda x: [x[c] for c in
['violence', 'gender', 'race', 'national_origin', 'disability', 'religion', 'sexual_orientation','directed_vs_generalized']
],
splits=["train", None, None]
)
tweet_eval = Classification(sentence1="text", labels="label", config_name=["emoji", "emotion", "hate", "irony", "offensive", "sentiment", "stance_abortion", "stance_atheism", "stance_climate", "stance_feminist", "stance_hillary"])
discovery = Classification("sentence1", "sentence2", labels="label", config_name=["discovery"])
pragmeval_1 = Classification("sentence",labels="label",
dataset_name="pragmeval",
config_name= ["emobank-arousal", "emobank-dominance", "emobank-valence", "squinky-formality", "squinky-implicature",
"squinky-informativeness","switchboard","mrda","verifiability"])
pragmeval_2 = Classification("sentence1","sentence2",labels="label",
dataset_name="pragmeval",
config_name= ["emergent", "gum", "pdtb", "persuasiveness-claimtype",
"persuasiveness-eloquence", "persuasiveness-premisetype", "persuasiveness-relevance", "persuasiveness-specificity",
"persuasiveness-strength", "sarcasm","stac"])
silicone = Classification("Utterance",labels="Label",
config_name=['dyda_da', 'dyda_e', 'iemocap', 'maptask', 'meld_e', 'meld_s', 'oasis', 'sem'] # +['swda', 'mrda'] # in pragmeval
)
#lex_glue___ecthr_a = Classification(sentence1="text", labels="labels") # too long
#lex_glue___ecthr_b = Classification(sentence1="text", labels="labels") # too long
lex_glue___eurlex = Classification(sentence1="text", labels="labels")
lex_glue___scotus = Classification(sentence1="text", labels="label")
lex_glue___ledgar = Classification(sentence1="text", labels="label")
lex_glue___unfair_tos = Classification(sentence1="text", labels="labels")
lex_glue___case_hold = MultipleChoice("context", choices_list='endings', labels="label")
language_identification = Classification("text",labels="labels", dataset_name="papluca/language-identification")
################ Automatically generated (verified)##########
imdb = Classification(sentence1="text", labels="label", splits=["train", None, "test"])
#
rotten_tomatoes = Classification(sentence1="text", labels="label")
ag_news = Classification(sentence1="text", labels="label", splits=["train", None, "test"])
yelp_review_full = Classification(sentence1="text", labels="label", splits=["train", None, "test"], config_name=["yelp_review_full"])
financial_phrasebank = Classification(sentence1="sentence", labels="label", splits=["train", None, None],
config_name=["sentences_allagree"])
poem_sentiment = Classification(sentence1="verse_text", labels="label")
#emotion = Classification(sentence1="text", labels="label") # file not found
dbpedia_14 = Classification(sentence1="content", labels="label", splits=["train", None, "test"], config_name=["dbpedia_14"])
amazon_polarity = Classification(sentence1="content", labels="label", splits=["train", None, "test"], config_name=["amazon_polarity"])
app_reviews = Classification("review", labels="star", splits=["train", None, None])
# multi_nli = Classification(sentence1="premise", sentence2="hypothesis", labels="label", splits=["train", "validation_matched", None]) #glue
hate_speech18 = Classification(sentence1="text", labels="label", splits=["train", None, None])
sms_spam = Classification(sentence1="sms", labels="label", splits=["train", None, None])
humicroedit___subtask_1 = Classification("original", "edit", labels="meanGrade", dataset_name="humicroedit", config_name="subtask-1")
humicroedit___subtask_2 = Classification(
sentence1=cat(['original1','edit1'],' : '),
sentence2=cat(['original2','edit2'],' : '),
labels="label", dataset_name="humicroedit", config_name="subtask-2")
snips_built_in_intents = Classification(sentence1="text", labels="label", splits=["train", None, None])
banking77 = Classification(sentence1="text", labels="label", splits=["train", None, "test"])
hate_speech_offensive = Classification(sentence1="tweet", labels="class", splits=["train", None, None])
yahoo_answers_topics = Classification(
"question_title","question_content",labels="topic")
stackoverflow_questions=Classification("title","body",labels="label",
dataset_name="pacovaldez/stackoverflow-questions")
#hyperpartisan_news_detection___byarticle = Classification(sentence1="text", labels="hyperpartisan", splits=["train", None, None]) # files too heavy
#hyperpartisan_news_detection___bypublisher = Classification(sentence1="text", labels="hyperpartisan", splits=["train","validation", None]) # files too heavy
hyperpartisan_news = Classification("text",labels="label",dataset_name="zapsdcn/hyperpartisan_news")
scierc = Classification("text",labels="label",dataset_name="zapsdcn/sciie")
citation_intent = Classification("text",labels="label",dataset_name="zapsdcn/citation_intent")
#go_emotions___raw = Classification(sentence1="text", splits=["train", None, None])
go_emotions___simplified = Classification(sentence1="text", labels="labels")
#boolq = Classification(sentence1="question", splits=["train", "validation", None]) # in superglue
#ecthr_cases___alleged_violation_prediction = Classification(labels="labels", dataset_name="ecthr_cases", config_name="alleged-violation-prediction")
#ecthr_cases___violation_prediction = Classification(labels="labels", dataset_name="ecthr_cases", config_name="violation-prediction")
# too long
scicite = Classification(sentence1="string", labels="label")
liar = Classification(sentence1="statement", labels="label")
relbert_lexical_relation_classification = Classification(sentence1="head", sentence2="tail", labels="relation",
dataset_name="relbert/lexical_relation_classification",
config_name=["BLESS","CogALexV","EVALution","K&H+N","ROOT09"])
metaeval_linguisticprobing = Classification("sentence", labels="label", dataset_name="metaeval/linguisticprobing",
config_name=['subj_number',
'obj_number',
'past_present',
'sentence_length',
'top_constituents',
'tree_depth',
'coordination_inversion',
'odd_man_out',
'bigram_shift']#+['word_content'] #too many labels
)
metaeval_crowdflower = Classification("text", labels="label",
splits=["train", None, None], dataset_name="metaeval/crowdflower",
config_name=['sentiment_nuclear_power',
'tweet_global_warming',
'airline-sentiment',
'corporate-messaging',
'economic-news',
'political-media-audience',
'political-media-bias',
'political-media-message',
'text_emotion']
)
metaeval_ethics___commonsense = Classification(sentence1="text", labels="label", dataset_name="metaeval/ethics", config_name="commonsense")
metaeval_ethics___deontology = Classification(sentence1="text", labels="label", dataset_name="metaeval/ethics", config_name="deontology")
metaeval_ethics___justice = Classification(sentence1="text", labels="label", dataset_name="metaeval/ethics", config_name="justice")
metaeval_ethics___virtue = Classification(sentence1="sentence1", sentence2="sentence2", labels="label", dataset_name="metaeval/ethics", config_name="virtue")
emo = Classification(sentence1="text", labels="label", splits=["train", None, "test"], config_name=["emo2019"])
google_wellformed_query = Classification(sentence1="content", labels="rating")
tweets_hate_speech_detection = Classification(sentence1="tweet", labels="label", splits=["train", None, None])
#adv_glue___adv_sst2 = Classification(sentence1="sentence", labels="label", splits=["validation", None, None])
#adv_glue___adv_qqp = Classification(sentence1="question1", sentence2="question2", labels="label", splits=["validation", None, None])
#adv_glue___adv_mnli = Classification(sentence1="premise", sentence2="hypothesis", labels="label", splits=["validation", None, None])
#adv_glue___adv_mnli_mismatched = Classification(sentence1="premise", sentence2="hypothesis", labels="label", splits=["validation", None, None])
#adv_glue___adv_qnli = Classification(sentence1="question", labels="label", splits=["validation", None, None])
#adv_glue___adv_rte = Classification(sentence1="sentence1", sentence2="sentence2", labels="label", splits=["validation", None, None])
has_part = Classification("arg1","arg2", labels="score", splits=["train", None, None])
wnut_17 = TokenClassification(tokens="tokens", labels="ner_tags", config_name=["wnut_17"])
ncbi_disease = TokenClassification(tokens="tokens", labels="ner_tags", config_name=["ncbi_disease"])
acronym_identification = TokenClassification(labels="labels", tokens="tokens")
jnlpba = TokenClassification(tokens="tokens", labels="ner_tags", splits=["train", "validation", None], config_name=["jnlpba"])
#species_800 = TokenClassification(tokens="tokens", labels="ner_tags", config_name=["species_800"]) missing files
SpeedOfMagic_ontonotes_english = TokenClassification(tokens="tokens", labels="ner_tags", dataset_name="SpeedOfMagic/ontonotes_english", config_name="SpeedOfMagic--ontonotes_english")
blog_authorship_corpus__gender = Classification(sentence1="text",labels="gender")
blog_authorship_corpus__age = Classification(sentence1="text",labels="age")
blog_authorship_corpus__horoscope = Classification(sentence1="text",labels="horoscope")
blog_authorship_corpus__job = Classification(sentence1="text",labels="job")
launch_open_question_type = Classification(sentence1="question", labels="resolve_type", dataset_name="launch/open_question_type")
health_fact = Classification(sentence1="claim", labels="label",
pre_process = lambda ds:ds.filter(lambda x:x['label'] not in {-1})
)
commonsense_qa = MultipleChoice(
"question",
choices_list=get.choices.text,
labels=lambda x: "ABCDE".index(x["answerKey"]),
splits=["train","validation",None]
)
mc_taco = Classification(
lambda x: f'{x["sentence"]} {x["question"]} {x["answer"]}',
labels="label",
splits=[ "validation",None,"test"]
)
ade_corpus_v2___Ade_corpus_v2_classification = Classification("text",labels="label")
discosense = MultipleChoice("context",choices=regen("option\_[0-3]"),labels="label",
dataset_name="prajjwal1/discosense")
circa = Classification(
sentence1=cat(["context","question-X"]),
sentence2="answer-Y",
labels="goldstandard2", post_process=remove_neg_1)
#code_x_glue_cc_defect_detection = Classification("func", labels="target")
#code_x_glue_cc_clone_detection_big_clone_bench = Classification("func1", "func2", "label") # in bigbench + too heavy (100g)
#code_x_glue_cc_code_refinement = MultipleChoice(
# constant(""), choices=["buggy","fixed"], labels=constant(0),
# config_name="medium")
effective_feedback_student_writing = Classification("discourse_text", labels="discourse_effectiveness",dataset_name="YaHi/EffectiveFeedbackStudentWriting")
#promptSentiment = Classification("text",labels="label",dataset_name="Ericwang/promptSentiment")
#promptNLI = Classification("premise","hypothesis",labels="label",dataset_name="Ericwang/promptNLI")
#promptSpoke = Classification("text",labels="label",dataset_name="Ericwang/promptSpoke")
#promptProficiency = Classification("text",labels="label",dataset_name="Ericwang/promptProficiency")
#promptGrammar = Classification("text",labels="label",dataset_name="Ericwang/promptGrammar")
#promptCoherence = Classification("text",labels="label",dataset_name="Ericwang/promptCoherence")
phrase_similarity = Classification(
sentence1=cat(["phrase1","sentence1"], " : "),
sentence2=cat(["phrase2","sentence2"], " : "),
labels='label',
dataset_name="PiC/phrase_similarity"
)
exaggeration_detection = Classification(
sentence1="press_release_conclusion",
sentence2="abstract_conclusion",
labels="exaggeration_label",
dataset_name="copenlu/scientific-exaggeration-detection"
)
quarel = Classification(
"question",
labels=lambda x: "AB"[x["answer_index"]]
)
mwong_fever_evidence_related = Classification(sentence1="claim", sentence2="evidence", labels="labels", splits=["train", "valid", "test"], dataset_name="mwong/fever-evidence-related", config_name="mwong--fever-related")
numer_sense = Classification("sentence",labels="target",splits=["train",None,None])
dynasent__r1 = Classification("sentence", labels="gold_label",
dataset_name="dynabench/dynasent", config_name="dynabench.dynasent.r1.all")
dynasent__r2 = Classification("sentence", labels="gold_label",
dataset_name="dynabench/dynasent", config_name="dynabench.dynasent.r2.all")
sarcasm_news = Classification("headline", labels="is_sarcastic",
dataset_name="raquiba/Sarcasm_News_Headline")
sem_eval_2010_task_8 = Classification("sentence",labels="relation")
demo_org_auditor_review = Classification(sentence1="sentence", labels="label", splits=["train", None, "test"], dataset_name="demo-org/auditor_review", config_name="demo-org--auditor_review")
medmcqa = MultipleChoice("question", choices=regen('op[a-d]'),labels='cop')
dynasent_disagreement = Classification("text", labels="binary_disagreement", dataset_name="RuyuanWan/Dynasent_Disagreement")
politeness_disagreement = Classification("text", labels="binary_disagreement", dataset_name="RuyuanWan/Politeness_Disagreement")
sbic_disagreement = Classification("text", labels="binary_disagreement", dataset_name="RuyuanWan/SBIC_Disagreement")
schem_disagreement = Classification("text", labels="binary_disagreement", dataset_name="RuyuanWan/SChem_Disagreement")
dilemmas_disagreement = Classification("text", labels="binary_disagreement", dataset_name="RuyuanWan/Dilemmas_Disagreement")
logiqa = MultipleChoice(
cat(["context","query"]),
choices_list = 'options',
labels = "correct_option",
dataset_name="lucasmccabe/logiqa"
)
#proto_qa = MultipleChoice(
# "question",
# choices_list=lambda x:x['answer-clusters']['answers'],
# labels=lambda x: x['answer-clusters']['count'].index(max(x['answer-clusters']['count'])),
# config_name='proto_qa'
#)
wiki_qa = Classification("question","answer", name("label",['False','True']))
cycic_classification = Classification("question",labels=name("correct_answer",['False','True']),
dataset_name = "metaeval/cycic_classification")
cycic_mc = MultipleChoice("question", choices=regen('answer\_option[0-4]'), labels="correct_answer",
dataset_name = "metaeval/cycic_multiplechoice")
def _preprocess_chatgpt_detection(ex):
import random
label=random.random()<0.5
ex['label']=int(label)
ex['answer']=[str(ex['human_answers'][0]),str(ex['chatgpt_answers'][0])][label]
return ex
#chatgpt_detection = Classification("question","answer","label",
# dataset_name = 'Hello-SimpleAI/HC3', config_name="all",
# pre_process=lambda dataset:dataset.map(_preprocess_chatgpt_detection))
sts_companion = Classification("sentence1","sentence2","label",
dataset_name="metaeval/sts-companion")
commonsense_qa_2 = Classification("question",labels="answer",
dataset_name="metaeval/commonsense_qa_2.0")
ling_nli = Classification("premise","hypothesis","label",dataset_name="metaeval/lingnli")
monotonicity_entailment = Classification("sentence1", "sentence2", "gold_label",
dataset_name="metaeval/monotonicity-entailment")
arct = MultipleChoice(cat(["reason","claim"]),choices=["warrant0","warrant1"],
labels="correctLabelW0orW1", dataset_name="metaeval/arct")
scinli = Classification("sentence1", "sentence2", labels="label",
post_process=lambda x:x.shuffle(seed=0),
dataset_name="metaeval/scinli")
naturallogic = Classification(" sent1 "," sent2 "," new_label ",dataset_name="metaeval/naturallogic")
onestop_qa = MultipleChoice(cat(["paragraph","question"]),choices_list="answers",
labels=constant(0))
moral_stories = MultipleChoice(cat(["situation","intention"]),
choices=['moral_action',"immoral_action"],labels=constant(0),
dataset_name="demelin/moral_stories", config_name="full")
prost = MultipleChoice(cat(["context","ex_question"]), choices=['A','B','C','D'],labels="label",
dataset_name="corypaik/prost")
dyna_hate = Classification("text",labels="label",dataset_name="aps/dynahate",splits=['train',None,None])
syntactic_augmentation_nli = Classification('sentence1',"sentence2","gold_label",dataset_name="metaeval/syntactic-augmentation-nli")
autotnli = Classification("premises", "hypothesis", "label", dataset_name="metaeval/autotnli")
#equate = Classification("sentence1", "sentence2", "gold_label",dataset_name="metaeval/equate")
conqada = Classification("sentence1","sentence2","label",dataset_name="lasha-nlp/CONDAQA",
pre_process = lambda ds:ds.filter(lambda x:x['label'] in {"DON'T KNOW","YES","NO"})
)
webgbpt_comparisons = MultipleChoice(get.question.full_text, choices=['answer_0','answer_1'],
labels=lambda x:int(x['score_1']>0),
dataset_name="openai/webgpt_comparisons")
synthetic_instruct = MultipleChoice('prompt', choices=['chosen', 'rejected'],
labels=constant(0), dataset_name="Dahoas/synthetic-instruct-gptj-pairwise")
scruples = Classification("text",labels="binarized_label",dataset_name="metaeval/scruples")
wouldyourather = MultipleChoice(constant('Most people would rather:'), choices=['option_a','option_b'],
labels= lambda x: int(x['votes_a']<x['votes_b']),
dataset_name="metaeval/wouldyourather")
attempto_nli = Classification("premise","hypothesis",
lambda x:f'race-{x["race_label"]}',
dataset_name="sileod/attempto-nli")
defeasible_nli = Classification(cat(["Premise","Hypothesis"]),"Update",labels="UpdateType",
dataset_name="metaeval/defeasible-nli",config_name=['atomic', 'snli'])
#defeasible_nli_social = Classification(cat(["SocialChemROT","Hypothesis"]),"Update",labels="UpdateType",
# dataset_name="metaeval/defeasible-nli",config_name='social')
help_nli = Classification("ori_sentence","new_sentence","gold_label",
dataset_name="metaeval/help-nli")
nli_veridicality_transitivity = Classification("sentence1","sentence2","gold_label",
dataset_name="metaeval/nli-veridicality-transitivity")
nl_satisfiability= Classification("sentence",labels="label",
dataset_name="metaeval/natural-language-satisfiability")
lonli = Classification("premise","hypothesis","label",
dataset_name="metaeval/lonli")
dadc_limit = Classification("sentence1","sentence2","label",
dataset_name="metaeval/dadc-limit-nli")
flute = Classification("premise","hypothesis","label",
dataset_name="ColumbiaNLP/FLUTE")
strategy_qa = Classification('question',labels='answer',
dataset_name="metaeval/strategy-qa",splits=['train',None,None])
summarize_from_feedback = MultipleChoice(get.info.post,
choices_list=lambda x: [x['summaries'][0]['text'],x['summaries'][1]['text']],
labels="choice",
dataset_name="openai/summarize_from_feedback", config_name="comparisons",
pre_process = lambda ds:ds.filter(lambda x: type(get.info.post(x))==str)
)
folio = Classification(lambda x: " ".join(x['premises']),"conclusion",
labels="label",
dataset_name="metaeval/folio")
tomi_nli = Classification("premise","hypothesis","label",
dataset_name="metaeval/tomi-nli")
avicenna = Classification("Premise 1","Premise 2","Syllogistic relation",
dataset_name="metaeval/avicenna")
shp = MultipleChoice("history",
choices=['human_ref_A','human_ref_B'],
labels="labels",
dataset_name="stanfordnlp/SHP")
medqa_usmle = MultipleChoice('sent1',choices=regen('ending[0-3]'),labels='label',
dataset_name="GBaker/MedQA-USMLE-4-options-hf")
wikimedqa = MultipleChoice("text",choices=regen('option\_[0-7]'),labels='label',
dataset_name="sileod/wikimedqa",
config_name=["medwiki"])
cicero = MultipleChoice(lambda x: " ".join(x['Dialogue']),
choices_list="Choices", labels=lambda x:x['Human Written Answer'][0],
dataset_name="declare-lab/cicero")
creak = Classification("sentence",labels="label",
dataset_name='amydeng2000/CREAK')
mutual = MultipleChoice("article",choices_list="options",
labels=lambda x: "ABCD".index(x['answers']),
dataset_name="metaeval/mutual",splits=["train",None,None])
neqa = MultipleChoice('prompt',choices_list='classes',labels="answer_index",
dataset_name="inverse-scaling/NeQA")
quote_repetition = MultipleChoice('prompt',choices_list='classes',labels="answer_index",
dataset_name="inverse-scaling/quote-repetition")
redefine_math = MultipleChoice('prompt',choices_list='classes',labels="answer_index",
dataset_name="inverse-scaling/redefine-math")
puzzte = Classification("puzzle_text","question","answer",
dataset_name="metaeval/puzzte")
implicatures = MultipleChoice(cat(['context','response'],"\n"),
choices=['correct_implicature','incorrect_implicature'],
labels=constant(0),
dataset_name='metaeval/implicatures')
race = MultipleChoice(cat(['question','article'],'\n'), choices_list='options',
labels=lambda x:'ABCDE'.index(x['answer']),
config_name=['middle','high'])
race_c = MultipleChoice(cat(['question','article'],'\n'),choices_list='option',labels='label',
dataset_name='metaeval/race-c')
spartqa_yn=Classification("story","question","answer",
dataset_name="metaeval/spartqa-yn")
spartqa_mc=MultipleChoice(cat(["story","question"]),choices_list="candidate_answers",labels="answer",
dataset_name="metaeval/spartqa-mchoice")
temporal_nli = Classification("Premise","Hypothesis","Label",
dataset_name="metaeval/temporal-nli")
riddle_sense = MultipleChoice("question", choices_list=get.choices.text,
labels=lambda x : "ABCDE".index(x['answerKey']))
clcd = Classification(
"sentence1","sentence2","label",
dataset_name="metaeval/clcd-english")
twentyquestions = Classification("question","subject","answer",dataset_name="maximedb/twentyquestions")
reclor = MultipleChoice(cat(["context","question"]),choices_list="answers",labels="label",
dataset_name="metaeval/reclor",splits=['train','validation',None])
c_aug_imdb = Classification("Text",labels="Sentiment",
dataset_name='metaeval/counterfactually-augmented-imdb')
c_aug_snli = Classification("sentence1","sentence2","gold_label",
dataset_name='metaeval/counterfactually-augmented-snli')
cnli = Classification("premise","hypothesis","label",
dataset_name='metaeval/cnli')
perturbed_boolq = Classification("question",labels="hard_label",
dataset_name='metaeval/boolq-natural-perturbations')
#mega_acceptability = Classification("sentence",labels="average",
# dataset_name='metaeval/mega-acceptability-v2')
graded_acceptability = Classification("text",labels="normalized_score",
dataset_name="metaeval/acceptability-prediction")
equate = Classification("sentence1","sentence2","gold_label",
dataset_name='metaeval/equate')
science_qa = MultipleChoice("question",choices_list="choices",labels="answer",
dataset_name="metaeval/ScienceQA_text_only")
ekar=MultipleChoice("question",choices_list=get.choices.text,
labels=lambda x:"ABCD".index(x['answerKey']),
dataset_name="Jiangjie/ekar_english")
implicit_hate = Classification("post",labels="class",
dataset_name="metaeval/implicit-hate-stg1")
nli_unambiguity = Classification("premise","hypothesis","gini",
dataset_name="metaeval/chaos-mnli-ambiguity")
headline_cause = Classification('left_title','right_title','label',
dataset_name='IlyaGusev/headline_cause',config_name='en_simple')
logiqa_2 = Classification("premise","hypothesis","label",dataset_name="metaeval/logiqa-2.0-nli")
_oasst = dict(dataset_name="tasksource/oasst1_dense_flat",
pre_process = lambda ds:ds.filter(lambda x:x['lang']=='en'))
oasst1__quality = Classification("parent_text","text",labels="quality",**_oasst)
oasst1__toxicity = Classification("parent_text","text",labels="toxicity",**_oasst)
oasst1__helpfulness = Classification("parent_text","text",labels="helpfulness",**_oasst)
para_rules = Classification("context","question",
labels=name("label",["False","True"]),
dataset_name="qbao775/PARARULE-Plus")
mindgames = Classification("premise","hypothesis","label",dataset_name="sileod/mindgames")
def _udep_post_process(ds):
return ds.cast_column('labels', Sequence(ClassLabel(names=udep_en_labels)))
udep__deprel = TokenClassification('tokens',lambda x:[udep_en_labels.index(a) for a in x['deprel']],
config_name=udep_en_configs,dataset_name="universal_dependencies",post_process=_udep_post_process)
ambient= Classification("premise","hypothesis","hypothesis_ambiguous",dataset_name="metaeval/ambient")
path_naturalness = MultipleChoice(constant(""),choices=['choice1','choice2'],labels="label",
dataset_name="metaeval/path-naturalness-prediction")
civil_comments__toxicity = Classification("text",labels="toxicity")
civil_comments__severe_toxicity = Classification("text",labels="severe_toxicity")
civil_comments__obscene = Classification("text",labels="obscene")
civil_comments__threat = Classification("text",labels="threat")
civil_comments__insult = Classification("text",labels="insult")
civil_comments__identity_attack = Classification("text",labels="identity_attack")
civil_comments__sexual_explicit = Classification("text",labels="sexual_explicit")
cloth = MultipleChoice("sentence", choices_list=lambda x:[x["answer"]]+x["distractors"],labels=constant(0), dataset_name="AndyChiang/cloth")
dgen = MultipleChoice("sentence", choices_list=lambda x:[x["answer"]]+x["distractors"],labels=constant(0), dataset_name="AndyChiang/dgen")
oasst_rlhf = MultipleChoice("prompt",choices=['chosen','rejected'],labels=constant(0),
dataset_name="tasksource/oasst1_pairwise_rlhf_reward")
i2d2 = Classification("sentence1",labels=name('label',['False','True']), dataset_name="tasksource/I2D2")
arg_me = Classification('argument','conclusion','stance', dataset_name="webis/args_me")
valueeval_stance = Classification("Premise","Conclusion","Stance", dataset_name="webis/Touche23-ValueEval")
starcon = Classification('argument','topic','label',dataset_name="tasksource/starcon")
banking77 = Classification("text",labels="label",dataset_name="PolyAI/banking77")
ruletaker = Classification("context","text","label",dataset_name="tasksource/ruletaker")
lsat_qa = MultipleChoice(
cat(['passage','question']),
choices_list='references',labels="gold_index",
dataset_name="lighteval/lsat_qa",config_name="all")
control = Classification('premise','hypothesis',"label",dataset_name="tasksource/ConTRoL-nli")
tracie = Classification("premise","hypothesis","answer",dataset_name='tasksource/tracie')
sherliic = Classification("premise","hypothesis","label",dataset_name='tasksource/sherliic')
sen_making__1 = MultipleChoice(constant('Chose most plausible:'), choices=['sentence0','sentence1'],labels='false',
dataset_name="tasksource/sen-making")
sen_making__2 = MultipleChoice(lambda x: [x['sentence0'],x['sentence1']][x['false']] + '\n is not plausible because :',
choices=['A','B','C'],labels=lambda x: 'ABC'.index(x['reason']), dataset_name="tasksource/sen-making")
winowhy = Classification('sentence', lambda x: f'In "{x["wnli_sent1"]}", {x["wnli_sent2"]}',
labels=name('label',['False','True']), dataset_name="tasksource/winowhy")
#for CFG in "cognitive-bias", "fake-news", "gender-bias", "hate-speech", "linguistic-bias", "political-bias", "racial-bias", "text-level-bias":
# print(f"mbib__{CFG.replace('-','_')} = Classification('text',labels=name('label',['not {CFG}','{CFG}']), dataset_name='mediabiasgroup/mbib-base', config_name='{CFG}')")
mbib_cognitive_bias = Classification('text',labels=name('label',['not cognitive-bias','cognitive-bias']), dataset_name='mediabiasgroup/mbib-base', config_name='cognitive-bias')
mbib_fake_news = Classification('text',labels=name('label',['not fake-news','fake-news']), dataset_name='mediabiasgroup/mbib-base', config_name='fake-news')
mbib_gender_bias = Classification('text',labels=name('label',['not gender-bias','gender-bias']), dataset_name='mediabiasgroup/mbib-base', config_name='gender-bias')
mbib_hate_speech = Classification('text',labels=name('label',['not hate-speech','hate-speech']), dataset_name='mediabiasgroup/mbib-base', config_name='hate-speech')
mbib_linguistic_bias = Classification('text',labels=name('label',['not linguistic-bias','linguistic-bias']), dataset_name='mediabiasgroup/mbib-base', config_name='linguistic-bias')
mbib_political_bias = Classification('text',labels=name('label',['not political-bias','political-bias']), dataset_name='mediabiasgroup/mbib-base', config_name='political-bias')
mbib_racial_bias = Classification('text',labels=name('label',['not racial-bias','racial-bias']), dataset_name='mediabiasgroup/mbib-base', config_name='racial-bias')
mbib_text_level_bias = Classification('text',labels=name('label',['not text-level-bias','text-level-bias']), dataset_name='mediabiasgroup/mbib-base', config_name='text-level-bias')
robustLR = Classification("context","statement","label", dataset_name="tasksource/robustLR")
cluttr = Classification("story","query", "target_text",dataset_name="CLUTRR/v1", config_name="gen_train234_test2to10")
logical_fallacy = Classification("source_article", labels="logical_fallacies", dataset_name="tasksource/logical-fallacy")
parade = Classification("Definition1","Definition2", labels=name('Binary labels',["not-paraphrase","paraphrase"]), dataset_name="tasksource/parade")
cladder = Classification("given_info", "question", "answer",dataset_name="tasksource/cladder")
subjectivity = Classification("Sentence",labels="Label",dataset_name="tasksource/subjectivity")
moh = Classification("context","expression","label", dataset_name="tasksource/MOH")
vuac = Classification("context","expression","label", dataset_name="tasksource/VUAC")
trofi = Classification("context","expression","label", dataset_name="tasksource/TroFi", splits=['train',None,'test'])
sharc_classification = Classification("snippet", lambda x:f'{x["scenario"]}\n{x["question"]}',
labels=lambda x:x["answer"] if x['answer'] in {"Yes","No","Irrelevant"} else "Clarification needed",
dataset_name='sharc_modified',config_name='mod')
conceptrules_v2 = Classification("context", "text", "label", dataset_name="tasksource/conceptrules_v2")
scidtb = Classification("unit1_txt","unit2_txt","label", dataset_name="metaeval/disrpt",config_name='eng.dep.scidtb')
chunking = TokenClassification("tokens","chunk_tags", dataset_name="conll2000")
few_nerd = TokenClassification("tokens","fine_ner_tags",dataset_name="DFKI-SLT/few-nerd",config_name='supervised')
finer = TokenClassification('tokens','ner_tags',dataset_name='nlpaueb/finer-139')
label_nli = Classification("premise","hypothesis","labels",dataset_name='tasksource/zero-shot-label-nli')
com2sense = Classification("sent",labels="label",dataset_name="tasksource/com2sense",splits=['train',"validation",None])
scone = Classification('sentence1_edited','sentence2_edited','gold_label_edited',dataset_name="tasksource/scone")
winodict = MultipleChoice(cat(['definition','sentence']),['option1','option2'],'label',dataset_name='tasksource/winodict')
fool_me_twice = Classification(
lambda x: " ".join(a['text'] for a in x['gold_evidence']),
'text', 'label', dataset_name='tasksource/fool-me-twice')
monli = Classification("sentence1","sentence2","gold_label", dataset_name="tasksource/monli")
causality = Classification('premise','hypothesis','relation', dataset_name='tasksource/corr2cause')
lsat = MultipleChoice(cat(['passage','question']), choices_list='references',labels='gold_index',dataset_name='lighteval/lsat_qa',config_name='all')
apt = Classification('text_a','text_b',name('labels',['not_paraphrase','paraphrase']),dataset_name='tasksource/apt')
#xsum_factuality = Classification("summary",labels="is_factual")
financial_sentiment = Classification("text",labels="label",dataset_name="zeroshot/twitter-financial-news-sentiment")
def _icl_rand(x):
import random
return random.Random(x['sentence1'][:50]).randint(0,1) #deterministic label for each input
icl = Classification("inputs", lambda x: x['symbols'][_icl_rand(x)],
labels=lambda x: int(x['symbols'][_icl_rand(x)]==x['targets']),
dataset_name="tasksource/icl-symbol-tuning-instruct",
pre_process=lambda ds:ds.filter(lambda x:len(x['inputs'])<200*4), # 200 tokens of 4 char
post_process=lambda ds:ds.cast_column('labels',ClassLabel(names=['False','True']))
)
space_nli = Classification("premises","hypothesis","label",dataset_name="tasksource/SpaceNLI")
# hate_context
| 49,569 | 46.89372 | 230 |
py
|
tasksource
|
tasksource-main/src/tasksource/preprocess.py
|
from collections.abc import Iterable
from dotwiz import DotWiz
from dataclasses import dataclass
from typing import Union
import itertools
import funcy as fc
import exrex
import magicattr
import numpy as np
import copy
import datasets
import time
def get_column_names(dataset):
cn = dataset.column_names
if type(cn)==dict:
return set(fc.flatten(cn.values()))
else:
return set(cn)
def sample_dataset(dataset,n=10000, n_eval=1000,seed=0):
for k in dataset:
n_k=(n if k=='train' else n_eval)
if n_k and len(dataset[k])>n_k:
dataset[k]=dataset[k].train_test_split(train_size=n_k,seed=seed)['train']
return dataset
class Preprocessing(DotWiz):
default_splits = ('train','validation','test')
@staticmethod
def __map_to_target(x,fn=lambda x:None, target=None):
x[target]=fn(x)
return x
def load(self):
return self(datasets.load_dataset(self.dataset_name,self.config_name))
def __call__(self,dataset, max_rows=None, max_rows_eval=None,seed=0):
dataset = self.pre_process(dataset)
# manage splits
for k,v in zip(self.default_splits, self.splits):
if v and k!=v:
dataset[k]=dataset[v]
del dataset[v]
if k in dataset and not v: # obfuscated label
del dataset[k]
dataset = fix_splits(dataset)
for k in list(dataset.keys()):
if k not in self.default_splits:
del dataset[k]
dataset = sample_dataset(dataset, max_rows, max_rows_eval,seed=seed)
# field annotated with a string
substitutions = {v:k for k,v in self.to_dict().items()
if (k and k not in {'splits','dataset_name','config_name'}
and type(v)==str and k!=v)}
dataset=dataset.remove_columns([c for c in substitutions.values() if c in dataset['train'].features and c not in substitutions])
dataset=dataset.rename_columns(substitutions)
# field annotated with a function
for k in self.to_dict().keys():
v=getattr(self, k)
if callable(v) and k not in {"post_process","pre_process","load"}:
dataset=dataset.map(self.__map_to_target,
fn_kwargs={'fn':v,'target':k})
dataset=dataset.remove_columns(
get_column_names(dataset)-set(self.to_dict().keys()))
dataset = fix_labels(dataset)
dataset = fix_splits(dataset) # again: label mapping changed
dataset = self.post_process(dataset)
return dataset
@dataclass
class cat(Preprocessing):
fields:Union[str,list]=None
separator:str=' '
def __call__(self, example=None):
y=[np.char.array(example[f]) + sep
for f,sep in zip(self.fields[::-1],itertools.repeat(self.separator))]
y=list(sum(*y))
if len(y)==1:
y=y[0]
return y
def pretty(f):
class pretty_f(DotWiz):
def __init__(self,*args):
self.__f_arg = f(*args)
for a in args:
setattr(self,'value',a)
def __call__(self, *args,**kwargs):
return self.__f_arg(*args,**kwargs)
def __repr__(self):
return f"{self.__f_arg.__qualname__ .split('.')[0]}({self.value})"
return pretty_f
class dotgetter:
def __init__(self, path=''):
self.path=path
def __bool__(self):
return bool(self.path)
def __getattr__(self, k):
return self.__class__(f'{self.path}.{k}'.lstrip('.'))
def __getitem__(self, i):
return self.__class__(f'{self.path}[{i}]')
def __call__(self, example=None):
return magicattr.get(DotWiz(example), self.path)
def __hash__(self):
return hash(self.path)
@dataclass
class ClassificationFields(Preprocessing):
sentence1:str='sentence1'
sentence2:str='sentence2'
labels:str='labels'
@dataclass
class Seq2SeqLMFields(Preprocessing):
prompt:str='prompt'
output:str='output'
@dataclass
class TokenClassificationFields(Preprocessing):
tokens:str='tokens'
labels:str='labels'
@dataclass
class MultipleChoiceFields(Preprocessing):
inputs:str='input'
choices:Iterable=tuple()
labels:str='labels'
choices_list:str=None
def __post_init__(self):
for i, c in enumerate(self.choices):
setattr(self,f'choice{i}',c)
delattr(self,'choices')
if not self.choices_list:
delattr(self,'choices_list')
def __call__(self,dataset, *args, **kwargs):
dataset = super().__call__(dataset, *args, **kwargs)
if self.choices_list:
dataset = dataset.filter(lambda x: 1<len(x['choices_list']))
n_options = min([len(x) for k in dataset for x in dataset[k]['choices_list']])
n_options = min(4,n_options)
dataset = dataset.map(self.flatten, fn_kwargs={'n_options':n_options})
return dataset
@staticmethod
def flatten(x, n_options=None):
n_neg = n_options-1 if n_options else None
choices = x['choices_list']
label=x['labels']
neg = choices[:label] + choices[label+1:]
pos = choices[label]
x['labels']=0
x['choices_list']=[pos]+neg[:n_neg]
for i,o in enumerate(x['choices_list']):
x[f'choice{i}']=o
del x['choices_list']
return x
@dataclass
class SharedFields:
splits:list=Preprocessing.default_splits
dataset_name:str = None
config_name:str = None
pre_process: callable = lambda x:x
post_process: callable = lambda x:x
#language:str="en"
@dataclass
class Classification(SharedFields, ClassificationFields): pass
@dataclass
class MultipleChoice(SharedFields, MultipleChoiceFields): pass
@dataclass
class TokenClassification(SharedFields, TokenClassificationFields): pass
@dataclass
class Seq2SeqLM(SharedFields, Seq2SeqLMFields): pass
get=dotgetter()
constant = pretty(fc.constantly)
regen = lambda x: list(exrex.generate(x))
def name(label_name, classes):
return lambda x:classes[x[label_name]]
def fix_splits(dataset):
if len(dataset)==1 and "train" not in dataset:
k = list(dataset)[0]
dataset['train'] = copy.deepcopy(dataset[k])
del dataset[k]
if 'auxiliary_train' in dataset:
del dataset['auxiliary_train']
if 'test' in dataset: # manage obfuscated labels
if 'labels' in dataset['test'].features:
if len(set(fc.flatten(dataset['test'].to_dict()['labels'])))==1:
del dataset['test']
if 'validation' in dataset and 'train' not in dataset:
train_validation = dataset['validation'].train_test_split(0.5, seed=0)
dataset['train'] = train_validation['train']
dataset['validation']=train_validation['test']
if 'validation' in dataset and 'test' not in dataset:
validation_test = dataset['validation'].train_test_split(0.5, seed=0)
dataset['validation'] = validation_test['train']
dataset['test']=validation_test['test']
if 'train' in dataset and 'validation' not in dataset:
train_val = dataset['train'].train_test_split(train_size=0.90, seed=0)
dataset['train'] = train_val['train']
dataset['validation']=train_val['test']
if 'test' in dataset and 'validation' not in dataset:
validation_test = dataset['test'].train_test_split(0.5, seed=0)
dataset['validation'] = validation_test['train']
dataset['test']=validation_test['test']
if 'validation' not in dataset and 'test' not in dataset:
train_val_test = dataset["train"].train_test_split(train_size=0.90, seed=0)
val_test = train_val_test["test"].train_test_split(0.5, seed=0)
dataset["train"] = train_val_test["train"]
dataset["validation"] = val_test["train"]
dataset["test"] = val_test["test"]
return dataset
def fix_labels(dataset, label_key='labels'):
if type(dataset['train'][label_key][0]) in [int,list,float]:
return dataset
labels=set(fc.flatten(dataset[k][label_key] for k in {"train"}))
if set(labels)=={'entailment','neutral','contradiction'}:
order=lambda x:dict(fc.flip(enumerate(['entailment','neutral','contradiction']))).get(x,x)
else:
order=str
labels=sorted(labels, key=order)
dataset=dataset.cast_column(label_key, datasets.ClassLabel(names=labels))
return dataset
def concatenate_dataset_dict(l):
"""Concatenate a list of DatastDict objects sharing same splits and columns."""
keys=l[0].keys()
return datasets.DatasetDict({k: datasets.concatenate_datasets([x[k] for x in l]) for k in keys})
| 8,816 | 32.022472 | 136 |
py
|
tasksource
|
tasksource-main/src/tasksource/metadata/popularity.py
|
dataset_rank = {'glue': 0,
'super_glue': 12,
'tweet_eval': 23,
'blimp': 34,
'imdb': 101,
'wikitext': 102,
'squad': 106,
'trec': 107,
'openwebtext': 108,
'rotten_tomatoes': 109,
'anli': 110,
'adversarial_qa': 111,
'ai2_arc': 115,
'xsum': 117,
'amazon_reviews_multi': 118,
'ag_news': 125,
'yelp_review_full': 126,
'wino_bias': 127,
'piqa': 131,
'duorc': 132,
'quail': 134,
'trivia_qa': 135,
'cnn_dailymail': 143,
'common_gen': 146,
'sst': 147,
'conll2003': 150,
'financial_phrasebank': 151,
'babi_qa': 155,
'poem_sentiment': 163,
'dream': 164,
'paws': 165,
'emotion': 168,
'kilt_tasks': 169,
'sciq': 180,
'cos_e': 181,
'dbpedia_14': 183,
'newsgroup': 184,
'cosmos_qa': 244,
'squad_v2': 245,
'samsum': 246,
'amazon_polarity': 247,
'multi_news': 248,
'wiki_hop': 249,
'quartz': 251,
'qasc': 252,
'wiki_qa': 253,
'openbookqa': 254,
'ropes': 256,
'quoref': 257,
'snli': 258,
'app_reviews': 259,
'gigaword': 260,
'wiki_bio': 261,
'amazon_us_reviews': 262,
'scan': 308,
'race': 320,
'swag': 323,
'codah': 325,
'ccdv/arxiv-summarization': 331,
'subjqa': 333,
'universal_morphologies': 339,
'hans': 447,
'sst2': 448,
'guardian_authorship': 449,
'math_qa': 465,
'librispeech_asr': 466,
'hendrycks_test': 469,
'openai_humaneval': 526,
'ptb_text_only': 527,
'pubmed_qa': 528,
'head_qa': 531,
'ought/raft': 533,
'ade_corpus_v2': 544,
'cbt': 547,
'bookcorpus': 552,
'squadshifts': 553,
'story_cloze': 557,
'multi_nli': 559,
'qanta': 560,
'hate_speech18': 564,
'gem': 565,
'lex_glue': 599,
'deepmind/code_contests': 606,
'imagenet-1k': 607,
'blended_skill_talk': 608,
'sms_spam': 609,
'asset': 610,
'fever': 612,
'commonsense_qa': 615,
'scientific_papers': 616,
'evidence_infer_treatment': 618,
'hotpot_qa': 620,
'superb': 622,
'sick': 628,
'humicroedit': 629,
'snips_built_in_intents': 631,
'winograd_wsc': 632,
'bigbench': 634,
'multi_woz_v22': 801,
'lambada': 803,
'banking77': 804,
'hate_speech_offensive': 805,
'yahoo_answers_topics': 806,
'ccdv/cnn_dailymail': 807,
'hyperpartisan_news_detection': 810,
'gsm8k': 812,
'wikisql': 814,
'the_pile': 815,
'health_fact': 825,
'mdd': 826,
'web_questions': 830,
'ethos': 831,
'wnut_17': 833,
'medical_questions_pairs': 834,
'scitldr': 835,
'drop': 838,
'squad_adversarial': 839,
'e2e_nlg_cleaned': 841,
'onestop_english': 842,
'pragmeval': 843,
'relbert/analogy_questions': 863,
'nq_open': 869,
'daily_dialog': 870,
'mc_taco': 871,
'crows_pairs': 872,
'go_emotions': 873,
'ncbi_disease': 875,
'boolq': 876,
'movie_rationales': 877,
'climate_fever': 878,
'discovery': 879,
'lama': 881,
'ecthr_cases': 885,
'jfleg': 887,
'selqa': 888,
'acronym_identification': 892,
'scicite': 893,
'tab_fact': 894,
'wiki_asp': 896,
'enriched_web_nlg': 916,
'svhn': 918,
'docred': 920,
'conllpp': 921,
'liar': 922,
'multi_x_science_sum': 923,
'discofuse': 924,
'competition_math': 926,
'biosses': 927,
'jnlpba': 928,
'web_nlg': 929,
'qa_srl': 937,
'neural_code_search': 938,
'conv_ai_2': 940,
'craigslist_bargains': 941,
'qed': 942,
'conv_ai_3': 943,
'conv_ai': 944,
'turk': 945,
'covid_qa_castorini': 946,
'sem_eval_2014_task_1': 947,
'mwsc': 948,
'gutenberg_time': 949,
'billsum': 950,
'riddle_sense': 951,
'species_800': 952,
'hlgd': 953,
'definite_pronoun_resolution': 954,
'tmu_gfm_dataset': 955,
'relbert/semeval2012_relational_similarity_v4': 956,
'clinc_oos': 957,
'imppres': 960,
'mrqa': 976,
'cc_news': 977,
'lmqg/qag_tweetqa': 978,
'aeslc': 979,
'big_patent': 980,
'eli5': 990,
'scene_parse_150': 991,
'circa': 993,
'aqua_rat': 994,
'nlu_evaluation_data': 996,
'newspop': 997,
'relbert/lexical_relation_classification': 998,
'yahoo_answers_qa': 1003,
'emo': 1004,
'silicone': 1005,
'cord19': 1015,
'tweet_qa': 1018,
'meta_woz': 1019,
'md_gender_bias': 1021,
'art': 1031,
'google_wellformed_query': 1032,
'ambig_qa': 1033,
'taskmaster2': 1035,
'quac': 1042,
'freebase_qa': 1043,
'quora': 1044,
'numer_sense': 1045,
'narrativeqa': 1046,
'ccdv/pubmed-summarization': 1047,
'qa_zre': 1049,
'limit': 1050,
'tweets_hate_speech_detection': 1051,
'mocha': 1052,
'hatexplain': 1053,
'bing_coronavirus_query_set': 1054,
'great_code': 1055,
'medal': 1056,
'sent_comp': 1057,
'kelm': 1058,
'natural_questions': 1059,
'wiki_split': 1061,
'zest': 1062,
'cfq': 1063,
'multi_re_qa': 1071,
'stereoset': 1080,
'coqa': 1082,
'cuad': 1083,
'break_data': 1084,
'mbpp': 1089,
'knkarthick/dialogsum': 1091,
'wiki_auto': 1092,
'pile-of-law/pile-of-law': 1097,
'pg19': 1132,
'DFKI-SLT/few-nerd': 1133,
'wikicorpus': 1136,
'e2e_nlg': 1142,
'anton-l/superb': 1143,
'ghomasHudson/muld': 1144,
'Exr0n/wiki-entity-similarity': 1150,
'BeIR/nfcorpus': 1156,
'ccdv/govreport-summarization': 1158,
'woz_dialogue': 1159,
'reddit': 1164,
'EMBO/sd-nlp': 1165,
'empathetic_dialogues': 1170,
'BeIR/fiqa': 1171,
'generics_kb': 1173,
'swda': 1177,
'wikitablequestions': 1178,
'pubmed': 1183,
'chr_en': 1184,
'sharc': 1185,
'sharc_modified': 1186,
'BeIR/scifact': 1190,
'nell': 1192,
'patriziobellan/PET': 1196,
'EMBO/biolang': 1198,
'dynabench/qa': 1202,
'reddit_tifu': 1206,
'BeIR/scidocs': 1208,
'pec': 1210,
'tner/tweetner7': 1213,
'BeIR/arguana': 1214,
'multidoc2dial': 1216,
'taskmaster1': 1219,
'spider': 1221,
'adv_glue': 1222,
'allenai/mslr2022': 1228,
'conceptnet5': 1230,
'tyqiangz/multilingual-sentiments': 1233,
'newsqa': 1246,
'metashift': 1249,
'so_stacksample': 1250,
'doc2dial': 1253,
'search_qa': 1256,
'yhavinga/mc4_nl_cleaned': 1258,
'hope_edi': 1270,
'proto_qa': 1273,
'tuple_ie': 1276,
'simple_questions_v2': 1279,
'nlpaueb/finer-139': 1282,
'bookcorpusopen': 1283,
'tner/ontonotes5': 1284,
'crd3': 1285,
'ucberkeley-dlab/measuring-hate-speech': 1286,
'gap': 1287,
'recipe_nlg': 1288,
'schema_guided_dstc8': 1289,
'BeIR/beir': 1291,
'sagnikrayc/mctest': 1294,
'eurlex': 1296,
'corypaik/coda': 1297,
'bc2gm_corpus': 1298,
'ascent_kb': 1299,
'curiosity_dialogs': 1301,
'covid_qa_deepset': 1302,
'air_dialogue': 1303,
'taskmaster3': 1305,
'xsum_factuality': 1306,
'medical_dialog': 1308,
'BeIR/trec-covid': 1312,
'lhoestq/test': 1314,
'newsroom': 1315,
'tne': 1316,
'covid_qa_ucsd': 1317,
'fhamborg/news_sentiment_newsmtsc': 1319,
'prachathai67k': 1321,
'cardiffnlp/tweet_topic_multi': 1322,
'datacommons_factcheck': 1323,
'deal_or_no_dialog': 1325,
'ubuntu_dialogs_corpus': 1327,
'eu_regulatory_ir': 1329,
'scifact': 1331,
'wi_locness': 1333,
'relbert/relation_mapping': 1335,
'coastalcph/fairlex': 1336,
'asnq': 1340,
'peer_read': 1341,
'metaeval/linguisticprobing': 1343,
'jigsaw_unintended_bias': 1353,
'totto': 1354,
'irc_disentangle': 1355,
'med_hop': 1357,
'numeric_fused_head': 1359,
'ollie': 1361,
'per_sent': 1363,
'SocialGrep/ten-million-reddit-answers': 1364,
'lmqg/qg_squad': 1366,
's2orc': 1367,
'Hellisotherpeople/DebateSum': 1368,
'SocialGrep/reddit-crypto-aug-2021': 1369,
'jigsaw_toxicity_pred': 1371,
'GroNLP/ik-nlp-22_slp': 1372,
'SocialGrep/reddit-nonewnormal-complete': 1374,
'SocialGrep/reddit-wallstreetbets-aug-2021': 1376,
'SocialGrep/the-reddit-covid-dataset': 1378,
'SocialGrep/top-american-universities-on-reddit': 1380,
'BeIR/beir-corpus': 1382,
'SocialGrep/one-year-of-r-india': 1384,
'BritishLibraryLabs/EThOS-PhD-metadata': 1386,
'librispeech_lm': 1388,
'few_rel': 1389,
'arxiv_dataset': 1390,
'lc_quad': 1391,
'diplomacy_detection': 1392,
'lmqg/qa_squadshifts_pseudo': 1393,
'grail_qa': 1461,
'tner/wnut2017': 1462,
'demo-org/auditor_review': 1463,
'allenai/real-toxicity-prompts': 1464,
'BeIR/nfcorpus-qrels': 1465,
'onestop_qa': 1466,
'demelin/moral_stories': 1467,
'atomic': 1493,
'crawl_domain': 1494,
'BeIR/quora': 1495,
'Abirate/english_quotes': 1497,
'narrativeqa_manual': 1498,
'BeIR/fiqa-qrels': 1499,
'social_bias_frames': 1500,
'pkavumba/balanced-copa': 1501,
'eraser_multi_rc': 1502,
'sled-umich/TRIP': 1503,
'opinosis': 1504,
'PiC/phrase_sense_disambiguation': 1505,
'enwik8': 1506,
'sem_eval_2020_task_11': 1508,
'gooaq': 1509,
'linnaeus': 1510,
'hover': 1511,
'GonzaloA/fake_news': 1512,
'consumer-finance-complaints': 1513,
'ohsumed': 1514,
'casino': 1515,
'gfissore/arxiv-abstracts-2021': 1516,
'conv_questions': 1517,
'hate_offensive': 1518,
'sofc_materials_articles': 1519,
'wanyu/IteraTeR_human_sent': 1520,
'dialog_re': 1521,
'fake_news_english': 1522,
'dart': 1523,
'blog_authorship_corpus': 1524,
'msr_zhen_translation_parity': 1525,
'cryptonite': 1526,
'disfl_qa': 1527,
'olm/olm-CC-MAIN-2022-21-sampling-ratio-0.14775510204': 1528,
'olm/olm-CC-MAIN-2022-33-sampling-ratio-0.20': 1529,
'coarse_discourse': 1530,
'eth_py150_open': 1531,
'event2Mind': 1532,
'Paul/hatecheck': 1533,
'eli5_category': 1534,
'hippocorpus': 1535,
'the_pile_books3': 1536,
'coached_conv_pref': 1537,
'has_part': 1538,
'times_of_india_news_headlines': 1539,
'medmcqa': 1540,
'Babelscape/rebel-dataset': 1541,
'glucose': 1542,
'msr_text_compression': 1543,
'msr_genomics_kbcomp': 1544,
'SpeedOfMagic/ontonotes_english': 1545,
'msr_sqa': 1546,
'wiki_movies': 1547,
'hybrid_qa': 1548,
'metooma': 1549,
'multi_nli_mismatch': 1550,
'text2log': 1551,
'the_pile_stack_exchange': 1552,
're_dial': 1553,
'inquisitive_qg': 1554,
'SocialGrep/one-million-reddit-jokes': 1555,
'time_dial': 1556,
'BeIR/scifact-qrels': 1557,
'sede': 1558,
'mutual_friends': 1559,
'pass': 1560,
'allenai/multi_lexsum': 1561,
'youtube_caption_corrections': 1562,
'NbAiLab/norec_agg': 1563,
'DanL/scientific-challenges-and-directions-dataset': 1564,
'SocialGrep/one-million-reddit-questions': 1565,
'Motahar/github-issues': 1566,
'SocialGrep/the-2022-trucker-strike-on-reddit': 1567,
'allenai/qasper': 1568,
'CyranoB/polarity': 1569,
'SocialGrep/one-million-reddit-confessions': 1570,
'debatelab/deepa2': 1571,
'bhavnicksm/sentihood': 1572,
'debatelab/aaac': 1573,
'jgammack/SAE-door-abstracts': 1574,
'erwanlc/cocktails_recipe': 1575,
'erwanlc/cocktails_recipe_no_brand': 1576,
'BeIR/arguana-qrels': 1577,
'tner/fin': 1578,
'BeIR/scidocs-qrels': 1579,
'tner/bc5cdr': 1580,
'olm/olm-CC-MAIN-2022-27-sampling-ratio-0.16142697881': 1581,
'BeIR/fever': 1582,
'cardiffnlp/tweet_topic_single': 1584,
'speechcolab/gigaspeech': 1585,
'BeIR/webis-touche2020': 1586,
'aquamuse': 1588,
'olm/olm-CC-MAIN-2022-40-sampling-ratio-0.15894621295': 1590,
'tner/btc': 1591,
'truthful_qa': 1592,
'McGill-NLP/FaithDial': 1594,
'ekinakyurek/ftrace': 1595,
'tomasg25/scientific_lay_summarisation': 1597,
'tner/mit_restaurant': 1599,
'bigscience-biomedical/bioasq_task_b': 1600,
'strombergnlp/broad_twitter_corpus': 1619,
'tner/bionlp2004': 1620,
'metaeval/recast': 1621,
'the_pile_openwebtext2': 1629,
'taln-ls2n/inspec': 1630,
'lmqg/qa_squadshifts': 1631,
'BeIR/hotpotqa': 1636,
'jpwahle/machine-paraphrase-dataset': 1638,
'tner/mit_movie_trivia': 1639,
'tner/conll2003': 1640,
'OxAISH-AL-LLM/wiki_toxic': 1641,
'ccdv/WCEP-10': 1642,
'BeIR/trec-covid-qrels': 1646,
'g8a9/europarl_en-it': 1647,
'carblacac/twitter-sentiment-analysis': 1648,
'usc-isi/WikiConvert': 1649,
'visual_genome': 1650,
'florianbussmann/FUNSD-vu2020revising': 1660,
'Felix-ML/quoteli3': 1661,
'allenai/scico': 1662,
'drAbreu/bc4chemd_ner': 1663,
'tner/tweebank_ner': 1664,
'alisawuffles/WANLI': 1665,
'Team-PIXEL/rendered-bookcorpus': 1666,
'Team-PIXEL/rendered-wikipedia-english': 1667,
'wanyu/IteraTeR_full_sent': 1668,
'EMBO/BLURB': 1669,
'metaeval/crowdflower': 1676,
'AlexaAI/bold': 1685,
'metaeval/ethics': 1686,
'sileod/movie_recommendation': 1691,
'lmqg/qg_subjqa': 1692,
'copenlu/scientific-exaggeration-detection': 1699,
'esb/datasets': 1700,
'BeIR/msmarco': 1701,
'biwi_kinect_head_pose': 1703,
'BeIR/quora-qrels': 1704,
'wardenga/lsoie': 1705,
'nlphuji/vasr': 1707,
'BeIR/nq': 1708,
'BeIR/dbpedia-entity': 1710,
'sadrasabouri/ShahNegar': 1712,
'knkarthick/xsum': 1713,
'ColumbiaNLP/FLUTE': 1714,
'bigscience-biomedical/scitail': 1715,
'lmqg/qg_squadshifts': 1717,
'BeIR/climate-fever': 1722,
'PiC/phrase_retrieval': 1724,
'bdotloh/empathetic-dialogues-contexts': 1726,
'ccdv/mediasum': 1727,
'BeIR/msmarco-qrels': 1735,
'alexfabbri/answersumm': 1736,
'pszemraj/text2image-multi-prompt': 1737,
'shibing624/source_code': 1738,
'kensho/spgispeech': 1741,
'jamescalam/channel-metadata': 1742,
'EMBO/sd-nlp-non-tokenized': 1743,
'facebook/pmd': 1748,
'drt/kqa_pro': 1749,
'BeIR/fever-qrels': 1751,
'TheFusion21/PokemonCards': 1752,
'zeroshot/twitter-financial-news-sentiment': 1753,
'bigscience-biomedical/blurb': 1754,
'mteb/bucc-bitext-mining': 1759,
'pinecone/core-2020-05-10-deduplication': 1763,
'tals/vitaminc': 1764,
'BeIR/hotpotqa-qrels': 1765,
'gigant/ted_descriptions': 1766,
'jpwahle/autoencoder-paraphrase-dataset': 1767,
'beki/privy': 1768,
'Muennighoff/P3': 1770,
'jpwahle/dblp-discovery-dataset': 1771,
'taln-ls2n/kp20k': 1773,
'bigscience-biomedical/biosses': 1774,
'allenai/prosocial-dialog': 1776,
'pacovaldez/stackoverflow-questions': 1777,
'kasnerz/hitab': 1778,
'relbert/semeval2012_relational_similarity': 1779,
'sagnikrayc/snli-cf-kaushik': 1780,
'mwritescode/slither-audited-smart-contracts': 1781,
'BeIR/webis-touche2020-qrels': 1787,
'bigscience-biomedical/mednli': 1788,
'pinecone/movielens-recent-ratings': 1790,
'BeIR/dbpedia-entity-qrels': 1791,
'shanya/crd3': 1792,
'knkarthick/samsum': 1793,
'BeIR/climate-fever-qrels': 1794,
'BeIR/nq-qrels': 1795,
'sanchit-gandhi/librispeech_asr_dummy': 1796,
'taln-ls2n/semeval-2010-pre': 1797,
'Bingsu/openwebtext_20p': 1798,
'PolyAI/banking77': 1799,
'JulesBelveze/tldr_news': 1800,
'Freed-Wu/kodak': 1801,
'biglam/gutenberg-poetry-corpus': 1802,
'SocialGrep/reddit-r-bitcoin-data-for-jun-2022': 1803,
'taln-ls2n/kptimes': 1805,
'biglam/old_bailey_proceedings': 1806,
'launch/gov_report': 1807,
'knkarthick/AMI': 1810,
'voidful/NMSQA': 1811,
'DTU54DL/dmeo': 1812,
'FinanceInc/auditor_sentiment': 1813,
'jamescalam/unsplash-25k-photos': 1814,
'Tidrael/tsl_news': 1815,
'DTU54DL/common3k-train': 1816,
'okite97/news-data': 1817,
'lmqg/qa_squad': 1818,
'ConvLab/woz': 1819,
'ConvLab/camrest': 1820,
'ConvLab/metalwoz': 1821,
'kakaobrain/coyo-700m': 1822,
'taln-ls2n/kpbiomed': 1823,
'abhinavk/openpi_v2': 1826,
'mwong/fever-claim-related': 1831,
'ConvLab/tm1': 1832,
'joey234/nan-nli': 1833,
'ConvLab/tm2': 1834,
'ConvLab/tm3': 1835,
'ConvLab/kvret': 1836,
'ConvLab/sgd': 1837,
'relbert/semeval2012_relational_similarity_v5': 1838,
'cmudrc/wave-energy': 1839,
'llangnickel/long-covid-classification-data': 1840,
'webis/args_me': 1841,
'HuggingFaceM4/something_something_v2': 1844,
'ConvLab/dailydialog': 1845,
'huanggab/reddit_haiku': 1846,
'relbert/semeval2012_relational_similarity_v6': 1847,
'pszemraj/riddlesense_plusplus': 1848,
'rungalileo/20_Newsgroups_Fixed': 1849,
'DTU54DL/common-voice-test16k': 1850,
'lhoestq/custom_squad': 1851,
'merve/poetry': 1852,
'yoshitomo-matsubara/srsd-feynman_easy': 1853,
'nightingal3/fig-qa': 1854,
'matejklemen/vuamc': 1855,
'strombergnlp/twitter_pos': 1856,
'nlphuji/winogavil': 1858,
'DFKI-SLT/tacred': 1859,
'valurank/News_Articles_Categorization': 1861,
'nbroad/mediasum': 1862,
'asapp/slue': 1863,
'zbnsl/emoteModified': 1865,
'adsabs/WIESP2022-NER': 1866,
'arize-ai/ecommerce_reviews_with_language_drift': 1867,
'UCL-DARK/ludwig': 1868,
'Aunsiels/InfantBooks': 1874,
'openclimatefix/uk_pv': 1875,
'copenlu/fever_gold_evidence': 1876,
'rungalileo/mit_movies_fixed_connll_format': 1877,
'jamescalam/youtube-transcriptions': 1878,
'lmqg/qa_harvesting_from_wikipedia': 1879,
'qanastek/Biosses-BLUE': 1880,
'zeronix1020/Strawberry-Disease': 1881,
'dferndz/cSQuAD2': 1882,
'taln-ls2n/pubmed': 1883,
'BeIR/scidocs-generated-queries': 1884,
'jmhessel/newyorker_caption_contest': 1885,
'inverse-scaling/NeQA': 1915,
'DTU54DL/common-voice': 1916,
'turingbench/TuringBench': 1917,
'demelin/understanding_fables': 1937,
'RUCAIBox/Open-Dialogue': 1938,
'allenai/multinews_sparse_max': 1939,
'RamAnanth1/lex-fridman-podcasts': 1940,
'sled-umich/Conversation-Entailment': 1941,
'stevhliu/demo': 1942,
'svakulenk0/qrecc': 1943,
'arize-ai/movie_reviews_with_context_drift': 1944,
'launch/ampere': 1945,
'AnonymousSub/recipe_RL_data_roberta-base': 1946,
'dreamproit/bill_summary_us': 1947,
'bgstud/libri-whisper-raw': 1948,
'jpwahle/etpc': 1949,
'DTU54DL/common-native-proc': 1950,
'mbartolo/synQA': 1951,
'wanyu/IteraTeR_full_doc': 1952,
'wanyu/IteraTeR_human_doc': 1953,
'orieg/elsevier-oa-cc-by': 1954,
'climatebert/environmental_claims': 1955,
'SocialGrep/the-reddit-climate-change-dataset': 1956,
'KGraph/FB15k-237': 1958,
'KheemDH/data': 1959,
'mwong/fever-evidence-related': 1960,
'HuggingFaceM4/TGIF': 1961,
'BeIR/fever-generated-queries': 1962,
'nateraw/ade20k-tiny': 1963,
'BeIR/cqadupstack-qrels': 1964,
'knkarthick/highlightsum': 1965,
'RUCAIBox/Data-to-text-Generation': 1966,
'GateNLP/broad_twitter_corpus': 1967,
'Tidrael/finance-headlines': 1968,
'lmqg/qag_squad': 1969,
'pacovaldez/stackoverflow-questions-2016': 1970,
'BeIR/fiqa-generated-queries': 1971,
'BeIR/signal1m-generated-queries': 1972,
'MicPie/unpredictable_msdn-microsoft-com': 1973,
'zeroshot/twitter-financial-news-topic': 1974,
'inverse-scaling/quote-repetition': 1975,
'esc-bench/esc-diagnostic-backup': 1976,
'lmqg/qg_annotation': 1977,
'sileod/wep-probes': 1978,
'DTU54DL/common-voice-test3k': 1981,
'jakartaresearch/causalqa': 1982,
'copenlu/sufficient_facts': 2002,
'ConvLab/multiwoz21': 2005,
'arka0821/multi_document_summarization': 2006,
'strombergnlp/rumoureval_2019': 2007,
'rongzhangibm/NaturalQuestionsV2': 2008,
'Muennighoff/mbpp': 2009,
'RUCAIBox/Simplification': 2011,
'shubhamg2208/lexicap': 2012,
'olm/olm-wikipedia-20220701': 2013,
'esc-bench/esc-diagnostic-dataset': 2014,
'jpwahle/autoregressive-paraphrase-dataset': 2015,
'GabrielVidal/dead-by-daylight-perks': 2016,
'DTU54DL/common-proc-whisper': 2017,
'valurank/PoliticalBias': 2018,
'McGill-NLP/TopiOCQA': 2019,
'gsarti/magpie': 2020,
'BeIR/cqadupstack-generated-queries': 2021,
'MicPie/unpredictable_mmo-champion-com': 2022,
'RUCAIBox/Question-Generation': 2023,
'allenai/multinews_sparse_mean': 2024,
'demo-org/diabetes': 2025,
'StonyBrookNLP/tellmewhy': 2026,
'bergr7/weakly_supervised_ag_news': 2027,
'din0s/msmarco-nlgen': 2028,
'frankier/cross_domain_reviews': 2029,
'gart-labor/pumpnli': 2030,
'AndyChiang/cloth': 2031,
'olm/olm-CC-MAIN-2017-22-sampling-ratio-0.16178770949': 2032,
'bgstud/libri': 2033,
'DTU54DL/commonvoice_accent_test': 2034,
'lewtun/my-awesome-dataset': 2035,
'peixian/rtGender': 2036,
'pmc/open_access': 2039,
'uva-irlab/trec-cast-2019-multi-turn': 2043,
'DFKI-SLT/scidtb': 2044,
'surrey-nlp/PLOD-filtered': 2045,
'wanyu/IteraTeR_v2': 2046,
'strombergnlp/ipm_nel': 2047,
'HuggingFaceM4/charades': 2048,
'ncats/EpiSet4NER-v2': 2050,
'HuggingFaceM4/ActivitiyNet_Captions': 2051,
'sileod/discourse_marker_qa': 2052,
'yoshitomo-matsubara/srsd-feynman_medium': 2053,
'BeIR/nfcorpus-generated-queries': 2054,
'BeIR/trec-news-generated-queries': 2055,
'BeIR/robust04-generated-queries': 2056,
'BeIR/quora-generated-queries': 2057,
'valurank/Adult-content-dataset': 2058,
'launch/open_question_type': 2059,
'knkarthick/topicsum': 2060,
'yuningm/citesum': 2061,
'elihoole/asrs-aviation-reports': 2062,
'DeveloperOats/DBPedia_Classes': 2063,
'hoskinson-center/proof-pile': 2064,
'RUCAIBox/Summarization': 2065,
'RUCAIBox/Question-Answering': 2066,
'RUCAIBox/Story-Generation': 2067,
'RUCAIBox/Paraphrase': 2068,
'jakartaresearch/semeval-absa': 2069,
'tner/ttc_dummy': 2071,
'copenlu/citeworth': 2072,
'allenai/multinews_sparse_oracle': 2073,
'allenai/multixscience_sparse_oracle': 2074,
'allenai/multixscience_sparse_mean': 2075,
'allenai/multixscience_sparse_max': 2076,
'allenai/ms2_sparse_oracle': 2077,
'mschi/blogspot_raw': 2078,
'gaurikapse/civis-consultation-summaries': 2079,
'chenghao/cuad_qa': 2080,
'esc-bench/esc-datasets': 2081,
'olm/olm-wikipedia-20221001': 2082,
'allenai/wcep_dense_oracle': 2083,
'dennlinger/wiki-paragraphs': 2084,
'AndyChiang/dgen': 2085,
'esb/diagnostic-dataset': 2086,
'havens2/naacl2022': 2087,
'fkdosilovic/docee-event-classification': 2088,
'DTU54DL/demo-common-whisper': 2089,
'dferndz/cSQuAD1': 2090,
'jpcorb20/multidogo': 2091,
'julien-c/reactiongif': 2092,
'lara-martin/Scifi_TV_Shows': 2093,
'lukesjordan/worldbank-project-documents': 2094,
'mnemlaghi/widdd': 2095,
'mvarma/medwiki': 2096,
'nateraw/beans': 2098,
'nateraw/cats_vs_dogs': 2099,
'nateraw/food101': 2100,
'nateraw/sync_food101': 2101,
'ncats/EpiSet4BinaryClassification': 2102,
'ncats/EpiSet4NER-v1': 2103,
'peixian/equity_evaluation_corpus': 2104,
'rajeshradhakrishnan/malayalam_wiki': 2105,
'softcatala/open-source-english-catalan-corpus': 2106,
'toloka/CrowdSpeech': 2107,
'valurank/12-factor': 2108,
'valurank/PoliticalBias_AllSides_Txt': 2109,
'valurank/PoliticalBias_Sources': 2110,
'valurank/hate-multi': 2111,
'valurank/news-12factor': 2112,
'valurank/offensive-multi': 2113,
'webimmunization/COVID-19-vaccine-attitude-tweets': 2114,
'wpicard/nostradamus-propheties': 2115,
'yuanchuan/annotated_reference_strings': 2116,
'ruanchaves/stan_large': 2117,
'ruanchaves/stan_small': 2118,
'ruanchaves/boun': 2119,
'ruanchaves/dev_stanford': 2120,
'ruanchaves/test_stanford': 2121,
'ruanchaves/snap': 2122,
'z-uo/qasper-squad': 2123,
'SocialGrep/the-antiwork-subreddit-dataset': 2124,
'CLUTRR/v1': 2126,
'malteos/test2': 2132,
'TomTBT/pmc_open_access_xml': 2133,
'SocialGrep/the-reddit-dataset-dataset': 2137,
'SocialGrep/the-reddit-place-dataset': 2139,
'projecte-aina/gencata': 2141,
'mwong/climate-evidence-related': 2142,
'mwong/climate-claim-related': 2143,
'surrey-nlp/PLOD-unfiltered': 2144,
'SocialGrep/the-reddit-irl-dataset': 2145,
'Lexi/spanextract': 2147,
'mwong/climatetext-claim-related-evaluation': 2148,
'mwong/climatetext-evidence-related-evaluation': 2149,
'ylacombe/xsum_factuality': 2150,
'mwong/climatetext-climate_evidence-claim-related-evaluation': 2151,
'mwong/climatetext-claim-climate_evidence-related-evaluation': 2152,
'mwong/climatetext-evidence-claim-pair-related-evaluation': 2153,
'mwong/climatetext-claim-evidence-pair-related-evaluation': 2154,
'patrickvonplaten/librispeech_asr_self_contained': 2155,
'BritishLibraryLabs/web_archive_classification': 2158,
'albertxu/CrosswordQA': 2159,
'SocialGrep/the-reddit-nft-dataset': 2160,
'janck/bigscience-lama': 2162,
'strombergnlp/twitter_pos_vcb': 2163,
'Filippo/osdg_cd': 2164,
'Ukhushn/home-depot': 2165,
'pile-of-law/eoir_privacy': 2166,
'drAbreu/sd-nlp-2': 2168,
'Leyo/TGIF': 2173,
'strombergnlp/named_timexes': 2174,
'domenicrosati/TruthfulQA': 2175,
'Roh/ryanspeech': 2176,
'Leyo/ActivityNet_Captions': 2177,
'IsaacBot/SQuAD-single-sentence-QA': 2178,
'morteza/cogtext': 2179,
'wdc/products-2017': 2180,
'rajeshvarma/QA_on_SLA': 2196,
'statworx/haiku': 2197,
'rajistics/million-headlines': 2198,
'feyzaakyurek/BBNLI': 2199,
'launch/gov_report_qs': 2200,
'DFKI-SLT/wikitext_linked': 2202,
'dianalogan/Marketing-Budget-and-Actual-Sales-Dataset': 2204,
'mehnaazasad/arxiv-co-ga': 2205,
'JeremyAlain/123_test': 2206,
'BeIR/arguana-generated-queries': 2209,
'BeIR/climate-fever-generated-queries': 2210,
'BeIR/dbpedia-entity-generated-queries': 2211,
'wise-east/spolin': 2212,
'yoshitomo-matsubara/srsd-feynman_hard': 2213,
'florentgbelidji/edmunds-car-ratings': 2214,
'olivierdehaene/xkcd': 2215,
'rajistics/auditor_review': 2216,
'BeIR/scifact-generated-queries': 2217,
'BeIR/trec-covid-generated-queries': 2218,
'BeIR/webis-touche2020-generated-queries': 2219,
'BeIR/nq-generated-queries': 2220,
'BeIR/hotpotqa-generated-queries': 2221,
'BeIR/bioasq-generated-queries': 2222,
'icelab/ntrs_meta': 2223,
'iejMac/CLIP-Kinetics700': 2224,
'fever/feverous': 2225,
'Livingwithmachines/hmd-erwt-training': 2226,
'wkrl/cord': 2227,
'launch/reddit_qg': 2228,
'arize-ai/xtreme_en': 2229}
dataset_rank['Anthropic/model-written-evals']=13
dataset_rank['Anthropic/hh-rlhf']=14
| 24,310 | 28.290361 | 69 |
py
|
tasksource
|
tasksource-main/src/tasksource/metadata/blimp_groups.py
|
import pandas as pd
dfh=pd.read_csv('https://raw.githubusercontent.com/alexwarstadt/blimp/master/raw_results/summary/human_validation_summary.csv')
dfh['linguistic_term']=dfh['Condition']
dfm=pd.read_json('https://raw.githubusercontent.com/alexwarstadt/blimp/master/raw_results/summary/models_summary.jsonl',lines=True)
df=dfm.join(dfh)
df['diff']=df.total_mean - df.gpt2
blimp_hard = set(df[df['diff']>0.1].UID)
del dfh, dfm, df
blimp_groups = {
"syntax": [
"adjunct_island",
"animate_subject_passive",
"animate_subject_trans",
"causative",
"complex_NP_island",
"coordinate_structure_constraint_complex_left_branch",
"coordinate_structure_constraint_object_extraction",
"drop_argument",
"ellipsis_n_bar_1",
"ellipsis_n_bar_2",
"inchoative",
"intransitive",
"left_branch_island_echo_question",
"left_branch_island_simple_question",
"passive_1",
"passive_2",
"sentential_subject_island",
"transitive",
"wh_island",
"wh_questions_object_gap",
"wh_questions_subject_gap",
"wh_questions_subject_gap_long_distance",
"wh_vs_that_no_gap",
"wh_vs_that_no_gap_long_distance",
"wh_vs_that_with_gap",
"wh_vs_that_with_gap_long_distance"
],
"morphology": [
"anaphor_gender_agreement",
"anaphor_number_agreement",
"determiner_noun_agreement_1",
"determiner_noun_agreement_2",
"determiner_noun_agreement_irregular_1",
"determiner_noun_agreement_irregular_2",
"determiner_noun_agreement_with_adj_2",
"determiner_noun_agreement_with_adj_irregular_1",
"determiner_noun_agreement_with_adj_irregular_2",
"determiner_noun_agreement_with_adjective_1",
"distractor_agreement_relational_noun",
"distractor_agreement_relative_clause",
"irregular_past_participle_adjectives",
"irregular_past_participle_verbs",
"irregular_plural_subject_verb_agreement_1",
"irregular_plural_subject_verb_agreement_2",
"regular_plural_subject_verb_agreement_1",
"regular_plural_subject_verb_agreement_2"
],
"syntax_semantics": [
"existential_there_object_raising",
"existential_there_subject_raising",
"expletive_it_object_raising",
"only_npi_scope",
"principle_A_c_command",
"principle_A_case_1",
"principle_A_domain_1",
"principle_A_domain_2",
"principle_A_domain_3",
"principle_A_reconstruction",
"sentential_negation_npi_scope",
"tough_vs_raising_1",
"tough_vs_raising_2"
],
"semantics": [
"existential_there_quantifiers_1",
"existential_there_quantifiers_2",
"matrix_question_npi_licensor_present",
"npi_present_1",
"npi_present_2",
"only_npi_licensor_present",
"sentential_negation_npi_licensor_present",
"superlative_quantifiers_1",
"superlative_quantifiers_2"
],
"syntax/semantics": [
"principle_A_case_2"
]
}
| 2,721 | 29.244444 | 131 |
py
|
tasksource
|
tasksource-main/src/tasksource/metadata/__init__.py
|
from .bigbench_groups import *
from .blimp_groups import *
from .popularity import *
imppres_presupposition=['presupposition_all_n_presupposition',
'presupposition_both_presupposition',
'presupposition_change_of_state',
'presupposition_cleft_existence',
'presupposition_cleft_uniqueness',
'presupposition_only_presupposition',
'presupposition_possessed_definites_existence',
'presupposition_possessed_definites_uniqueness',
'presupposition_question_presupposition']
imppres_implicature=['implicature_connectives',
'implicature_gradable_adjective',
'implicature_gradable_verb',
'implicature_modals',
'implicature_numerals_10_100',
'implicature_numerals_2_3',
'implicature_quantifiers']
crossfit=['emo',
'wiki_auto',
'liar',
'tab_fact',
'sms_spam',
'google_wellformed_query',
'glue',
'poem_sentiment',
'emotion',
'hate_speech18',
'hatexplain',
'yahoo_answers_topics',
'mc_taco',
'glue',
'mocha',
'super_glue',
'glue',
'yelp_polarity',
'tweet_eval',
'glue',
'art',
'super_glue',
'ethos',
'app_reviews',
'yelp_review_full',
'anli',
'hate_speech_offensive',
'climate_fever',
'circa',
'financial_phrasebank',
'wiki_qa',
'rotten_tomatoes',
'trec',
'medical_questions_pairs',
'glue',
'super_glue',
'ade_corpus_v2',
'sick',
'super_glue',
'blimp',
'discovery',
'health_fact',
'ag_news',
'boolq',
'glue',
'amazon_polarity',
'scicite',
'dbpedia_14',
'onestop_english',
'crows_pairs',
'scitail',
'piqa',
'glue',
'paws',
'imdb',
'glue',
'trec']
#en_esl, en_gumreddit are faulty on HF
udep_en_configs = ['en_ewt', 'en_gum', 'en_lines', 'en_partut']
udep_en_labels = ['_', 'acl', 'acl:relcl', 'advcl', 'advmod', 'amod', 'appos', 'aux', 'aux:pass', 'case', 'cc', 'cc:preconj', 'ccomp', 'compound', 'compound:prt', 'conj', 'cop', 'csubj', 'csubj:pass', 'dep', 'det', 'det:predet', 'discourse', 'dislocated', 'expl', 'fixed', 'flat', 'flat:foreign', 'goeswith', 'iobj', 'list', 'mark', 'nmod', 'nmod:npmod', 'nmod:poss', 'nmod:tmod', 'nsubj', 'nsubj:pass', 'nummod', 'obj', 'obl', 'obl:npmod', 'obl:tmod', 'orphan', 'parataxis', 'punct', 'reparandum', 'root', 'vocative', 'xcomp']
udep_labels = ['_', 'acl', 'acl:adv', 'acl:appos', 'acl:attr', 'acl:cleft', 'acl:focus', 'acl:inf', 'acl:part', 'acl:periph', 'acl:poss', 'acl:relat', 'acl:relcl', 'advcl', 'advcl:arg', 'advcl:cleft', 'advcl:cmpr', 'advcl:cond', 'advcl:coverb', 'advcl:lmod', 'advcl:mmod', 'advcl:periph', 'advcl:relcl', 'advcl:sp', 'advcl:svc', 'advcl:tcl', 'advcl:tmod', 'advmod', 'advmod:arg', 'advmod:cc', 'advmod:deg', 'advmod:det', 'advmod:df', 'advmod:emph', 'advmod:lmod', 'advmod:locy', 'advmod:mmod', 'advmod:mode', 'advmod:neg', 'advmod:periph', 'advmod:que', 'advmod:tfrom', 'advmod:tlocy', 'advmod:tmod', 'advmod:to', 'advmod:tto', 'amod', 'amod:advmod', 'amod:att', 'amod:emph', 'amod:flat', 'amod:mode', 'amod:obl', 'appos', 'appos:trans', 'aux', 'aux:aglt', 'aux:aspect', 'aux:caus', 'aux:clitic', 'aux:cnd', 'aux:imp', 'aux:mood', 'aux:neg', 'aux:opt', 'aux:part', 'aux:pass', 'aux:poss', 'aux:q', 'aux:tense', 'case', 'case:acc', 'case:adv', 'case:circ', 'case:dec', 'case:det', 'case:gen', 'case:loc', 'case:pred', 'case:pref', 'case:voc', 'cc', 'cc:nc', 'cc:preconj', 'ccomp', 'ccomp:agent', 'ccomp:cleft', 'ccomp:obj', 'ccomp:obl', 'ccomp:pmod', 'ccomp:pred', 'clf', 'compound', 'compound:a', 'compound:affix', 'compound:coll', 'compound:conjv', 'compound:dir', 'compound:ext', 'compound:lv', 'compound:lvc', 'compound:nn', 'compound:nv', 'compound:plur', 'compound:preverb', 'compound:prt', 'compound:quant', 'compound:redup', 'compound:smixut', 'compound:svc', 'compound:vo', 'compound:vv', 'conj', 'conj:expl', 'conj:extend', 'conj:svc', 'cop', 'cop:expl', 'cop:locat', 'cop:own', 'csubj', 'csubj:cleft', 'csubj:cop', 'csubj:pass', 'dep', 'dep:alt', 'dep:comp', 'dep:mod', 'dep:prt', 'det', 'det:adj', 'det:def', 'det:noun', 'det:numgov', 'det:nummod', 'det:poss', 'det:predet', 'det:pron', 'det:rel', 'discourse', 'discourse:emo', 'discourse:filler', 'discourse:intj', 'discourse:sp', 'dislocated', 'dislocated:acl', 'dislocated:cleft', 'dislocated:conj', 'dislocated:nmod', 'dislocated:nsubj', 'dislocated:obj', 'dislocated:obl', 'expl', 'expl:comp', 'expl:impers', 'expl:pass', 'expl:poss', 'expl:pv', 'expl:subj', 'fixed', 'flat', 'flat:abs', 'flat:foreign', 'flat:name', 'flat:num', 'flat:range', 'flat:repeat', 'flat:sibl', 'flat:title', 'flat:vv', 'goeswith', 'iobj', 'iobj:agent', 'iobj:appl', 'iobj:caus', 'iobj:loc', 'iobj:patient', 'list', 'mark', 'mark:adv', 'mark:advb', 'mark:comp', 'mark:prt', 'mark:rel', 'mark:relcl', 'nmod', 'nmod:abl', 'nmod:advmod', 'nmod:agent', 'nmod:appos', 'nmod:arg', 'nmod:att', 'nmod:attr', 'nmod:bahuv', 'nmod:cau', 'nmod:clas', 'nmod:cmp', 'nmod:comp', 'nmod:dat', 'nmod:flat', 'nmod:gen', 'nmod:gmod', 'nmod:gobj', 'nmod:gsubj', 'nmod:lmod', 'nmod:npmod', 'nmod:obl', 'nmod:obllvc', 'nmod:own', 'nmod:part', 'nmod:periph', 'nmod:pmod', 'nmod:poss', 'nmod:pred', 'nmod:ref', 'nmod:relat', 'nmod:tmod', 'nsubj', 'nsubj:appos', 'nsubj:bfoc', 'nsubj:caus', 'nsubj:cop', 'nsubj:ifoc', 'nsubj:lfoc', 'nsubj:lvc', 'nsubj:nc', 'nsubj:obj', 'nsubj:own', 'nsubj:pass', 'nsubj:periph', 'nummod', 'nummod:det', 'nummod:entity', 'nummod:flat', 'nummod:gov', 'nummod:mod', 'nummod:periph', 'obj', 'obj:advmod', 'obj:agent', 'obj:appl', 'obj:cau', 'obj:caus', 'obj:lvc', 'obj:periph', 'obl', 'obl:abl', 'obl:advmod', 'obl:agent', 'obl:appl', 'obl:arg', 'obl:ben', 'obl:cmpr', 'obl:inst', 'obl:lmod', 'obl:loc', 'obl:mod', 'obl:npmod', 'obl:own', 'obl:patient', 'obl:pmod', 'obl:poss', 'obl:prep', 'obl:sentcon', 'obl:smod', 'obl:soc', 'obl:tmod', 'obl:x', 'orphan', 'parataxis', 'parataxis:appos', 'parataxis:conj', 'parataxis:deletion', 'parataxis:discourse', 'parataxis:dislocated', 'parataxis:hashtag', 'parataxis:insert', 'parataxis:newsent', 'parataxis:nsubj', 'parataxis:obj', 'parataxis:parenth', 'parataxis:rel', 'parataxis:rep', 'parataxis:restart', 'parataxis:speech', 'parataxis:trans', 'punct', 'reparandum', 'root', 'vocative', 'vocative:mention', 'xcomp', 'xcomp:adj', 'xcomp:cleft', 'xcomp:ds', 'xcomp:obj', 'xcomp:obl', 'xcomp:pred', 'xcomp:sp', 'xcomp:subj']
| 6,152 | 71.388235 | 4,014 |
py
|
tasksource
|
tasksource-main/src/tasksource/metadata/bigbench_groups.py
|
bigbench_discriminative = set("""abstract_narrative_understanding
anachronisms
analogical_similarity
analytic_entailment
arithmetic
authorship_verification
bbq_lite_json
causal_judgment
cause_and_effect
checkmate_in_one
cifar10_classification
code_line_description
color
common_morpheme
conceptual_combinations
contextual_parametric_knowledge_conflicts
crash_blossom
crass_ai
cryobiology_spanish
cs_algorithms
dark_humor_detection
date_understanding
disambiguation_qa
discourse_marker_prediction
dyck_languages
elementary_math_qa
emoji_movie
emojis_emotion_prediction
empirical_judgments
english_proverbs
english_russian_proverbs
entailed_polarity
entailed_polarity_hindi
epistemic_reasoning
evaluating_information_essentiality
fact_checker
fantasy_reasoning
figure_of_speech_detection
formal_fallacies_syllogisms_negation
general_knowledge
geometric_shapes
goal_step_wikihow
gre_reading_comprehension
hhh_alignment
hindu_knowledge
hinglish_toxicity
human_organs_senses
hyperbaton
identify_math_theorems
identify_odd_metaphor
implicatures
implicit_relations
indic_cause_and_effect
intent_recognition
international_phonetic_alphabet_nli
intersect_geometry
irony_identification
kannada
key_value_maps
known_unknowns
language_identification
logic_grid_puzzle
logical_args
logical_deduction
logical_fallacy_detection
logical_sequence
mathematical_induction
medical_questions_russian
metaphor_boolean
metaphor_understanding
misconceptions
misconceptions_russian
mnist_ascii
moral_permissibility
movie_dialog_same_or_different
movie_recommendation
navigate
nonsense_words_grammar
novel_concepts
odd_one_out
parsinlu_qa
penguins_in_a_table
persian_idioms
phrase_relatedness
physical_intuition
physics
play_dialog_same_or_different
presuppositions_as_nli
question_selection
real_or_fake_text
reasoning_about_colored_objects
riddle_sense
ruin_names
salient_translation_error_detection
sentence_ambiguity
similarities_abstraction
simple_arithmetic_json_multiple_choice
simple_ethical_questions
snarks
social_iqa
social_support
sports_understanding
strange_stories
strategyqa
suicide_risk
swahili_english_proverbs
swedish_to_german_proverbs
symbol_interpretation
temporal_sequences
timedial
tracking_shuffled_objects
understanding_fables
undo_permutation
unit_interpretation
vitaminc_fact_verification
what_is_the_tao
which_wiki_edit
winowhy""".split('\n')) - {'simple_arithmetic_json_multiple_choice'}
bigbench_non_english = set("""common_morpheme
cryobiology_spanish
gem
gender_inclusive_sentences_german
kanji_ascii
kannada
language_identification
linguistic_mappings
medical_questions_russian
misconceptions_russian
multiemo
persian_idioms
polish_sequence_labeling
swahili_english_proverbs
swedish_to_german_proverbs
what_is_the_tao
which_wiki_edit""".split('\n')) | {"parsinlu_qa","hinglish_toxicity","indic_cause_and_effect","entailed_polarity_hindi","english_russian_proverbs"}
bbl=set('''auto_debugging
bbq_lite_json
code_line_description
conceptual_combinations
conlang_translation
emoji_movie
formal_fallacies_syllogisms_negation
hindu_knowledge
known_unknowns
language_identification
linguistics_puzzles
logic_grid_puzzle
logical_deduction
misconceptions_russian
novel_concepts
operators
parsinlu_reading_comprehension
play_dialog_same_or_different
repeat_copy_logic
strange_stories
strategyqa
symbol_interpretation
vitaminc_fact_verification
winowhy'''.split('\n'))
bigbench_discriminative_english = bigbench_discriminative - bigbench_non_english
| 3,450 | 20.171779 | 147 |
py
|
afqsfenicsutil
|
afqsfenicsutil-master/with_scipy.py
|
"""
These are utilities for interfacing FEniCS with Scipy.
This is how I look at sparsities or interface with LIS (see pylis by afq).
"""
from fenics import PETScMatrix, assemble
def assemble_as_scipy(form):
K = PETScMatrix()
assemble(form, tensor=K)
ki,kj,kv = K.mat().getValuesCSR()
import scipy
import scipy.sparse
Ksp = scipy.sparse.csr_matrix((kv, kj, ki))
return Ksp
def look_at_a_form(form,fname="foo.png",xlim=None,ylim=None):
from matplotlib import pylab as plt
Ksp = assemble_as_scipy(form)
plt.spy(Ksp)
if xlim != None: plt.xlim(*xlim)
if ylim != None: plt.ylim(*ylim)
plt.savefig(fname,dpi=150)
| 663 | 25.56 | 74 |
py
|
afqsfenicsutil
|
afqsfenicsutil-master/my_restriction_map.py
|
# Copyright (C) 2010-2014 Simula Research Laboratory
#
# This file is part of CBCPOST.
#
# CBCPOST is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# CBCPOST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with CBCPOST. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from cbcpost.utils.mpi_utils import broadcast
from cbcpost.utils import cbc_warning
from scipy.spatial.ckdtree import cKDTree as KDTree
from dolfin import MPI, mpi_comm_world, dolfin_version
from distutils.version import LooseVersion
def restriction_map(V, Vb, _all_coords=None, _all_coordsb=None):
"Return a map between dofs in Vb to dofs in V. Vb's mesh should be a submesh of V's Mesh."
if V.ufl_element().family() == "Discontinuous Lagrange" and V.ufl_element().degree() > 0:
raise RuntimeError("This function does not work for DG-spaces of degree >0 \
(several dofs associated with same point in same subspace).")
if V.ufl_element().family() != "Lagrange":
cbc_warning("This function is only tested for CG-spaces.")
assert V.ufl_element().family() == Vb.ufl_element().family(), "ufl elements differ in the two spaces"
assert V.ufl_element().degree() == Vb.ufl_element().degree(), "ufl elements differ in the two spaces"
assert V.ufl_element().cell() == Vb.ufl_element().cell(), "ufl elements differ in the two spaces"
D = V.mesh().geometry().dim()
# Recursively call this function if V has sub-spaces
if V.num_sub_spaces() > 0:
mapping = {}
if MPI.size(mpi_comm_world()) == 1:
if _all_coords is None:
try:
# For 1.6.0+ and newer
all_coords = V.tabulate_dof_coordinates().reshape(V.dim(), D)
all_coordsb = Vb.tabulate_dof_coordinates().reshape(Vb.dim(), D)
except:
# For 1.6.0 and older
all_coords = V.dofmap().tabulate_all_coordinates(V.mesh()).reshape(V.dim(), D)
all_coordsb = Vb.dofmap().tabulate_all_coordinates(Vb.mesh()).reshape(Vb.dim(), D)
else:
all_coords = _all_coords
all_coordsb = _all_coordsb
else:
all_coords = None
all_coordsb = None
for i in range(V.num_sub_spaces()):
mapping.update(restriction_map(V.sub(i), Vb.sub(i), all_coords, all_coordsb))
return mapping
dm = V.dofmap()
dmb = Vb.dofmap()
N = len(dm.dofs())
Nb = len(dmb.dofs())
dofs = dm.dofs()
# Extract coordinates of dofs
if dm.is_view():
if _all_coords is not None:
coords = _all_coords[V.dofmap().dofs()]
else:
try:
# For 1.6.0+ and newer
coords = V.collapse().tabulate_dof_coordinates().reshape(N, D)
except:
# For 1.6.0 and older
coords = V.collapse().dofmap().tabulate_all_coordinates(V.mesh()).reshape(N, D)
if _all_coordsb is not None:
coordsb = _all_coordsb[Vb.dofmap().dofs()]
else:
try:
# For 1.6.0+ and newer
coordsb = Vb.collapse().tabulate_dof_coordinates().reshape(Nb, D)
except:
# For 1.6.0 and older
coordsb = Vb.collapse().dofmap().tabulate_all_coordinates(Vb.mesh()).reshape(Nb,D)
else:
if LooseVersion(dolfin_version()) > LooseVersion("1.6.0"):
# For 1.6.0+ and newer
coords = V.tabulate_dof_coordinates().reshape(N, D)
coordsb = Vb.tabulate_dof_coordinates().reshape(Nb, D)
else:
# For 1.6.0 and older
coords = V.dofmap().tabulate_all_coordinates(V.mesh()).reshape(N, D)
coordsb = Vb.dofmap().tabulate_all_coordinates(Vb.mesh()).reshape(Nb,D)
# Build KDTree to compute distances from coordinates in base
kdtree = KDTree(coords)
eps = 1e-12
mapping = {}
request_dofs = np.array([])
distances, indices = kdtree.query(coordsb)
for i, subdof in enumerate(dmb.dofs()):
# Find closest dof in base
#d, idx = kdtree.query(coordsb[i])
d, idx = distances[i], indices[i]
if d < eps:
# Dof found on this process, add to map
dof = dofs[idx]
assert subdof not in mapping
mapping[subdof] = dof
else:
# Search for this dof on other processes
add_dofs = np.hstack(([subdof], coordsb[i]))
request_dofs = np.append(request_dofs, add_dofs)
del distances
del indices
# Scatter all dofs not found on current process to all processes
all_request_dofs = [None]*MPI.size(mpi_comm_world())
for j in xrange(MPI.size(mpi_comm_world())):
all_request_dofs[j] = broadcast(request_dofs, j)
# Re-order all requested dofs
# Remove items coming from this process
all_request_dofs[MPI.rank(mpi_comm_world())] = []
all_request_dofs = np.hstack(all_request_dofs)
all_request_dofs = all_request_dofs.reshape(len(all_request_dofs)/(D+1), D+1)
all_request_dofs = dict(zip(all_request_dofs[:,0], all_request_dofs[:,1:]))
# Search this process for all dofs not found on same process as subdof
for subdof, coordsbi in all_request_dofs.items():
subdof = int(subdof)
# Find closest dof in base
d, idx = kdtree.query(coordsbi)
if d < eps:
# Dof found on this process, add to map
dof = dofs[idx]
assert subdof not in mapping
mapping[subdof] = dof
return mapping
if __name__ == '__main__':
from dolfin import (UnitCubeMesh, SubDomain, MeshFunction, tic, toc, VectorFunctionSpace,
FunctionSpace, Function, assemble, Expression, project, dx, inner)
N = 4
mesh = UnitCubeMesh(N,N,N)
class Left(SubDomain):
def inside(self, x, on_boundary):
return x[0] > 0.7
from cbcpost.utils import create_submesh
markers = MeshFunction("size_t", mesh, 3)
markers.set_all(0)
Left().mark(markers, 1)
tic()
mesh2 = create_submesh(mesh, markers, 1)
print "Time create submesh: ", toc()
#bmesh.coordinates()[:] += 0.1
#bmesh2 = Mesh("submesh.xml")
#print bmesh2.size_global(0)
#print bmesh2.size_global(2)
V = FunctionSpace(mesh, "CG", 1)
Vb = FunctionSpace(mesh2, "CG", 1)
tic()
mapping = restriction_map(V, Vb)
print "Time restriction_map: ", toc()
expr = Expression("x[0]*x[1]+x[2]*x[2]+3.0", degree=2)
u = project(expr, V)
u2 = Function(Vb)
u2.vector()[mapping.keys()] = u.vector()[mapping.values()]
print assemble(u*dx(1, subdomain_data=markers)), assemble(u2*dx)
V = VectorFunctionSpace(mesh, "CG", 1)
Vb = VectorFunctionSpace(mesh2, "CG", 1)
mapping = restriction_map(V, Vb)
expr = Expression(("x[0]*x[1]+x[2]*x[2]+3.0", "2+x[1]*x[2]", "x[0]+3*x[2]"), degree=2)
u = project(expr, V)
u2 = Function(Vb)
u2.vector()[mapping.keys()] = u.vector()[mapping.values()]
print assemble(inner(u,u)*dx(1, subdomain_data=markers)), assemble(inner(u2,u2)*dx)
| 7,493 | 35.916256 | 105 |
py
|
afqsfenicsutil
|
afqsfenicsutil-master/various.py
|
"""
by B. E. Abali and A. Queiruga
"""
from fenics import *
import numpy as np
#
# Helper methods for extracting submeshes
# and making mappings between them
#
def SubMesh2(mesh,marker,inds):
mask = MeshFunction("size_t",mesh,marker.dim())
o = marker.array()
m = mask.array()
for i in xrange(len(m)): m[i] = 1 if o[i] in inds else 0
return SubMesh(mesh, mask,1)
def Mesh2Submesh_FacetMap(mesh, sub):
vk = sub.data().array('parent_vertex_indices',0)
vertex_to_facets_name = {}
for f in facets(mesh):
vertex_to_facets_name[tuple(sorted(f.entities(0)))] = f.global_index()
sub.init_global(2)
local_to_global_facets = np.empty(sub.num_facets(),dtype=np.intc)
for i,f in enumerate(facets(sub)):
local_to_global_facets[i] = \
vertex_to_facets_name[tuple(sorted(vk[f.entities(0)]))]
return local_to_global_facets
def Mesh2Submesh_CellMap(mesh, sub):
vk = sub.data().array('parent_vertex_indices',0)
vertex_to_cells_name = {}
for f in cells(mesh):
vertex_to_cells_name[tuple(sorted(f.entities(0)))] = f.global_index()
sub.init_global(2)
local_to_global_cells = np.empty(sub.num_cells(),dtype=np.intc)
for i,f in enumerate(cells(sub)):
local_to_global_cells[i] = \
vertex_to_cells_name[tuple(sorted(vk[f.entities(0)]))]
return local_to_global_cells
def AssignMaterialCoefficients(target_mesh, cells_list, coeffs, mat_marking):
coeffs_list = np.zeros(max(mat_marking)+1)
for i,coeff in enumerate(coeffs): coeffs_list[mat_marking[i]] = coeff
coeff_func = Function(FunctionSpace(target_mesh, 'DG', 0))
markers = np.asarray(cells_list.array(), dtype=np.int32)
coeff_func.vector()[:] = np.choose(markers, coeffs_list)
return coeff_func
#
# Voigt notation utilities
#
def C_IsotropicVoigt(lam, mu):
return np.array([
[lam+2.*mu , lam, lam, 0, 0, 0],
[lam, lam+2.*mu, lam, 0, 0, 0],
[lam, lam, lam+2.*mu, 0, 0, 0],
[0, 0, 0, mu, 0, 0],
[0, 0, 0, 0, mu, 0],
[0, 0, 0, 0, 0, mu] ])
#
# Building rank-4 tensors from Voight notation
#
def VoigtToTensorRank4(A11=0., A12=0., A13=0., A14=0., A15=0., A16=0., A22=0., A23=0., A24=0., A25=0., A26=0., A33=0., A34=0., A35=0., A36=0., A44=0., A45=0., A46=0., A55=0., A56=0., A66=0.):
A21, A31, A41, A51, A61 = A12, A13, A14, A15, A16
A32, A42, A52, A62 = A23, A24, A25, A26
A43, A53, A63 = A34, A35, A36
A54, A64 = A45, A46
A65 = A56
return as_tensor([ \
[ \
[ [A11,A16,A15], [A16,A12,A14], [A15,A14,A13]] , \
[ [A61,A66,A65], [A66,A62,A64], [A65,A64,A63]] , \
[ [A51,A56,A55], [A56,A52,A54], [A55,A54,A53]] \
] , [ \
[ [A61,A66,A65], [A66,A62,A64], [A65,A64,A63]] , \
[ [A21,A26,A25], [A26,A22,A24], [A25,A24,A23]] , \
[ [A41,A46,A45], [A46,A42,A44], [A45,A44,A43]] \
] , [ \
[ [A51,A56,A55], [A56,A52,A54], [A55,A54,A53]] , \
[ [A41,A46,A45], [A46,A42,A44], [A45,A44,A43]] , \
[ [A31,A36,A35], [A36,A32,A34], [A35,A34,A33]] ] \
])
def VoigtToTensorRank3(A11=0.,A12=0.,A13=0.,A14=0.,A15=0.,A16=0., A21=0.,A22=0.,A23=0.,A24=0.,A25=0.,A26=0., A31=0.,A32=0.,A33=0.,A34=0.,A35=0.,A36=0.):
return as_tensor([ \
[ \
[ A11, A16, A15 ] , \
[ A16, A12, A14 ] , \
[ A15, A14, A13 ] \
] , [ \
[ A21, A26, A25 ] , \
[ A26, A22, A24 ] , \
[ A25, A24, A23 ] \
] , [ \
[ A31, A36, A35 ] , \
[ A36, A32, A34 ] , \
[ A35, A34, A33 ] ] \
])
def VoigtToTensorRank2(A11=0.,A12=0.,A13=0., A21=0.,A22=0.,A23=0., A31=0.,A32=0.,A33=0.):
A21, A31, A32 = A12, A13, A23
return as_tensor([ \
[ A11, A12, A13 ] , \
[ A21, A22, A23 ] , \
[ A31, A32, A33 ] \
] )
| 3,608 | 32.110092 | 191 |
py
|
afqsfenicsutil
|
afqsfenicsutil-master/write_vtk.py
|
from six import iteritems
from dolfin import FunctionSpace,VectorFunctionSpace,TensorFunctionSpace,project,cells
def write_vtk_f(fname, mesh=None, nodefunctions=None,cellfunctions=None):
"""
Write a whole bunch of FEniCS functions to the same vtk file.
"""
if mesh==None:
if nodefunctions != None:
mesh = nodefunctions.itervalues().next().function_space().mesh()
else:
mesh = cellfunctions.itervalues().next().function_space().mesh()
C = { 0:FunctionSpace(mesh,"DG",0),
1:VectorFunctionSpace(mesh,"DG",0),
2:TensorFunctionSpace(mesh,"DG",0) }
nodefields = [(k,f.compute_vertex_values().reshape(-1,mesh.num_vertices()).T)
for k,f in iteritems(nodefunctions)] if nodefunctions else None
edgefields=[(k,project(f,C[f.value_rank()]).vector().get_local().reshape(mesh.num_cells(),-1) )
for k,f in iteritems(cellfunctions) ] if cellfunctions else None
write_vtk(fname, mesh.cells(), mesh.coordinates(),
nodefields,edgefields )
def write_vtk(fname, elems, X, nodefields=None,edgefields=None):
""" This is an adapted version of write_graph from cornflakes """
celltypekey = {
1:1, # Pt
2:3, # Line
3:5, # Tri
# 4:9, # Quad
4:10, # Tet
8:12} # Hex
vecformatdict = {
1:"{0} 0 0\n",
2:"{0} {1} 0\n",
3:"{0} {1} {2}\n"
}
tenformatdict = {
1:"{0} 0 0\n0 0 0\n0 0 0\n",
2:"{0} {1} 0\n{2} {3} 0\n0 0 0\n",
3:"{0} {1} {2}\n{3} {4} {5}\n{6} {7} {8}\n"
}
vecfmt = vecformatdict[X.shape[1]]
tenfmt = tenformatdict[X.shape[1]]
import os, errno
path=os.path.dirname(fname)
if path:
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else: raise
fh = open(fname,"w")
fh.write("# vtk DataFile Version 2.0\nGraph connectivity\nASCII\n")
fh.write("DATASET UNSTRUCTURED_GRID\n")
fh.write("POINTS {0} double\n".format(X.shape[0]))
for pt in X:
fh.write(vecfmt.format(*pt))
fh.write("\nCELLS {0} {1}\n".format(elems.shape[0],elems.shape[0]*(1+elems.shape[1]))) # I assume they're all the same
for el in elems:
fh.write("{0} ".format(len(el))+" ".join([str(x) for x in el])+"\n")
fh.write("\nCELL_TYPES {0}\n".format(elems.shape[0]))
for el in elems:
fh.write("{0}\n".format(celltypekey[elems.shape[1]]))
# Macro to write a data block
def PUTFIELD(n,f):
if len(f.shape)==1 or f.shape[1]==1:
fh.write("SCALARS {0} double\n".format(n))
fh.write("LOOKUP_TABLE default\n")
for l in f.ravel():
fh.write(str(l)+"\n")
elif f.shape[1]==X.shape[1]:
fh.write("VECTORS {0} double\n".format(n))
for l in f:
fh.write(vecfmt.format(*l))
else:
fh.write("TENSORS {0} double\n".format(n))
for l in f:
fh.write(tenfmt.format(*l))
# Dump all of the node fields
if nodefields:
fh.write("POINT_DATA {0}\n".format(X.shape[0]))
for n,f in nodefields:
PUTFIELD(n,f)
# Cell fields now
if edgefields:
fh.write("CELL_DATA {0}\n".format(elems.shape[0]))
for n,f in edgefields:
PUTFIELD(n,f)
fh.close()
if __name__=="__main__":
from dolfin import UnitSquareMesh, Function, FunctionSpace, VectorFunctionSpace, TensorFunctionSpace, Expression
mesh = UnitSquareMesh(10,10)
S=FunctionSpace(mesh,"DG",0)
V=VectorFunctionSpace(mesh,"DG",0)
T=TensorFunctionSpace(mesh,"DG",0)
Tsym = TensorFunctionSpace(mesh,"DG",0,symmetry=True)
s = Function(S)
s.interpolate(Expression('x[0]',element=S.ufl_element()))
v = Function(V)
v.interpolate(Expression(('x[0]','x[1]'),element=V.ufl_element()))
t = Function(T)
t.interpolate(Expression(( ('x[0]','1.0'),('2.0','x[1]')),element=T.ufl_element()))
ts = Function(Tsym)
ts.interpolate(Expression(( ('x[0]','1.0'),('x[1]',)),element=Tsym.ufl_element()))
write_vtk_f("test.vtk",cellfunctions={'s':s,'v':v,'t':t,'tsym':ts})
| 4,331 | 34.508197 | 122 |
py
|
afqsfenicsutil
|
afqsfenicsutil-master/mesh_morph.py
|
"""
A super-simple mesh-morpher designed for EMSI
Alejandro F Queiruga
UC Berkeley, 2014
"""
import numpy as np
from scipy.spatial import Delaunay
def do_tri_map(fix_ind,nodes,nodes_orig):
global dela,dela_orig,points,points_orig
gdim = nodes.shape[1]
fix_ind = list(set(fix_ind))
points = nodes[fix_ind,:]
points_orig = nodes_orig[fix_ind,:]
# dela = Delaunay(points)
dela_orig = Delaunay(points_orig)
nodes_new = np.empty(nodes.shape,dtype=nodes.dtype)
for ptid in xrange(nodes_orig.shape[0]):
if ptid in fix_ind:
nodes_new[ptid,:] = nodes[ptid,:]
continue
pt = nodes_orig[ptid,:]
s = dela_orig.find_simplex(pt)
tri = dela_orig.simplices[s,:]
b = np.zeros(gdim+1)
b[0:gdim] = dela_orig.transform[s,:gdim].dot( pt-dela_orig.transform[s,gdim] )
b[gdim] = 1-b[0:gdim].sum()
# nodes_new[ptid,:] = (nodes[tri,:].T.dot(b)).T
for i in range(gdim):
nodes_new[ptid,i] = points[tri,i].dot(b)
return nodes_new
def morph_fenics(mesh, nodes, u, other_fix = []):
"""
Morph using FEniCS Functions.
Returns a CG0 Function of DeltaX, such that
w = DeltaX / dt
"""
X_orig = mesh.coordinates().copy()
X_defo = X_orig.copy()
uN = u.compute_vertex_values().reshape(u.geometric_dimension(),len(nodes)).T
X_defo[list(nodes),:] += uN
# Warp the mesh
X_new = do_tri_map( list(nodes) + list(other_fix), X_defo, X_orig)
mesh.coordinates()[:] = X_new
# Calculate w
from fenics import VectorFunctionSpace, Function
V = VectorFunctionSpace(mesh,"CG",1)
DeltaX = Function(V)
nodeorder = V.dofmap().dofs(mesh,0)
utot = (X_new - X_orig).ravel()
for i,l in enumerate(nodeorder):
DeltaX.vector()[l] = utot[i]
return DeltaX # w = DeltaX / Dt
| 1,850 | 30.913793 | 86 |
py
|
afqsfenicsutil
|
afqsfenicsutil-master/__init__.py
|
"""
This is a little library of helper functions for use with FEniCS
by Alejandro F. Queiruga, with major contributions by B. Emek Abali.
"""
from .various import *
#from my_restriction_map import restriction_map
from .write_vtk import write_vtk_f
from .with_scipy import assemble_as_scipy, look_at_a_form
from .mesh_morph import *
| 337 | 23.142857 | 68 |
py
|
sm-vit
|
sm-vit-main/train.py
|
# coding=utf-8
from __future__ import absolute_import, division, print_function
wnb = False
if wnb:
import wandb
wandb.init(project="sm-vit", entity="xxx")
import logging
import argparse
import os
import random
import numpy as np
from datetime import timedelta
import time
import torch
import torch.distributed as dist
from tqdm import tqdm
from torch.utils.tensorboard import SummaryWriter
from apex import amp
from apex.parallel import DistributedDataParallel as DDP
from models.modeling import VisionTransformer, CONFIGS
from utils.scheduler import WarmupLinearSchedule, WarmupCosineSchedule
from utils.data_utils import get_loader
from utils.dist_util import get_world_size
logger = logging.getLogger(__name__)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def simple_accuracy(preds, labels):
return (preds == labels).mean()
def save_model(args, model):
model_to_save = model.module if hasattr(model, 'module') else model
model_checkpoint = os.path.join(args.output_dir, "%s_checkpoint.bin" % args.name)
torch.save(model_to_save.state_dict(), model_checkpoint)
logger.info("Saved model checkpoint to [DIR: %s]", args.output_dir)
def reduce_mean(tensor, nprocs):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.ReduceOp.SUM)
rt /= nprocs
return rt
def setup(args):
# Prepare model
config = CONFIGS[args.model_type]
if args.dataset == "dogs":
num_classes = 120
elif args.dataset == "CUB":
num_classes=200
elif args.dataset == "nabirds":
num_classes = 555
else:
raise Exception(f'Unknown dataset "{args.dataset}"')
model = VisionTransformer(config, args.img_size, zero_head=True, num_classes=num_classes, vis=True, smoothing_value=args.smoothing_value, dataset=args.dataset, \
coeff_max=args.coeff_max, contr_loss=args.contr_loss, focal_loss=args.focal_loss)
model.load_from(np.load(args.pretrained_dir))
model.to(args.device)
num_params = count_parameters(model)
logger.info("{}".format(config))
logger.info("Training parameters %s", args)
logger.info("Total Parameter: \t%2.1fM" % num_params)
return args, model
def count_parameters(model):
params = sum(p.numel() for p in model.parameters() if p.requires_grad)
return params/1000000
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
def valid(args, model, writer, test_loader, global_step):
# Validation!
eval_losses = AverageMeter()
logger.info("***** Running Validation *****")
logger.info(" Num steps = %d", len(test_loader))
logger.info(" Batch size = %d", args.eval_batch_size)
model.eval()
all_preds, all_label = [], []
epoch_iterator = tqdm(test_loader,
desc="Validating... (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
loss_fct = torch.nn.CrossEntropyLoss()
for step, batch in enumerate(epoch_iterator):
if wnb: wandb.log({"step": step})
batch = tuple(t.to(args.device) for t in batch)
if args.sm_vit:
x, y, mask = batch
else:
x, y = batch
with torch.no_grad():
if args.sm_vit:
logits = model(x, None, mask)[0]
else:
logits = model(x)[0]
eval_loss = loss_fct(logits, y)
if args.contr_loss:
eval_loss = eval_loss.mean()
eval_losses.update(eval_loss.item())
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0 )
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0 )
epoch_iterator.set_description("Validating... (loss=%2.5f)" % eval_losses.val)
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
logger.info("\n")
logger.info("Validation Results")
logger.info("Global Steps: %d" % global_step)
logger.info("Valid Loss: %2.5f" % eval_losses.avg)
logger.info("Valid Accuracy: %2.5f" % accuracy)
writer.add_scalar("test/accuracy", scalar_value=accuracy, global_step=global_step)
if wnb: wandb.log({"acc_test": accuracy})
return accuracy
def train(args, model):
""" Train the model """
if args.local_rank in [-1, 0]:
os.makedirs(args.output_dir, exist_ok=True)
writer = SummaryWriter(log_dir=os.path.join("logs", args.name))
best_step=0
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
# Prepare dataset
train_loader, test_loader = get_loader(args)
# Prepare optimizer and scheduler
optimizer = torch.optim.SGD(model.parameters(),
lr=args.learning_rate,
momentum=0.9,
weight_decay=args.weight_decay)
t_total = args.num_steps
if args.decay_type == "cosine":
scheduler = WarmupCosineSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
else:
scheduler = WarmupLinearSchedule(optimizer, warmup_steps=args.warmup_steps, t_total=t_total)
if args.fp16:
model, optimizer = amp.initialize(models=model,
optimizers=optimizer,
opt_level=args.fp16_opt_level)
amp._amp_state.loss_scalers[0]._loss_scale = 2**20
# Distributed training
if args.local_rank != -1:
model = DDP(model, message_size=250000000, gradient_predivide_factor=get_world_size())
# Train!
start_time = time.time()
logger.info("***** Running training *****")
logger.info(" Total optimization steps = %d", args.num_steps)
logger.info(" Instantaneous batch size per GPU = %d", args.train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps * (
torch.distributed.get_world_size() if args.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
model.zero_grad()
set_seed(args) # Added here for reproducibility (even between python 2 and 3)
losses = AverageMeter()
global_step, best_acc = 0, 0
while True:
model.train()
epoch_iterator = tqdm(train_loader,
desc="Training (X / X Steps) (loss=X.X)",
bar_format="{l_bar}{r_bar}",
dynamic_ncols=True,
disable=args.local_rank not in [-1, 0])
all_preds, all_label = [], []
for step, batch in enumerate(epoch_iterator):
batch = tuple(t.to(args.device) for t in batch)
if args.sm_vit:
x, y, mask = batch
loss, logits = model(x, y, mask)
else:
x, y = batch
loss, logits = model(x, y)
if args.contr_loss:
loss = loss.mean()
preds = torch.argmax(logits, dim=-1)
if len(all_preds) == 0:
all_preds.append(preds.detach().cpu().numpy())
all_label.append(y.detach().cpu().numpy())
else:
all_preds[0] = np.append(
all_preds[0], preds.detach().cpu().numpy(), axis=0 )
all_label[0] = np.append(
all_label[0], y.detach().cpu().numpy(), axis=0 )
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
if (step + 1) % args.gradient_accumulation_steps == 0:
losses.update(loss.item()*args.gradient_accumulation_steps)
if args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
global_step += 1
epoch_iterator.set_description(
"Training (%d / %d Steps) (loss=%2.5f)" % (global_step, t_total, losses.val) )
if args.local_rank in [-1, 0]:
writer.add_scalar("train/loss", scalar_value=losses.val, global_step=global_step)
writer.add_scalar("train/lr", scalar_value=scheduler.get_lr()[0], global_step=global_step)
if global_step % args.eval_every == 0 and args.local_rank in [-1, 0]:
accuracy = valid(args, model, writer, test_loader, global_step)
if best_acc < accuracy:
save_model(args, model)
best_acc = accuracy
best_step = global_step
logger.info("best accuracy so far: %f" % best_acc)
logger.info("best accuracy in step: %f" % best_step)
model.train()
if global_step % t_total == 0:
break
all_preds, all_label = all_preds[0], all_label[0]
accuracy = simple_accuracy(all_preds, all_label)
accuracy = torch.tensor(accuracy).to(args.device)
dist.barrier()
train_accuracy = reduce_mean(accuracy, args.nprocs)
train_accuracy = train_accuracy.detach().cpu().numpy()
writer.add_scalar("train/accuracy", scalar_value=train_accuracy, global_step=global_step)
if wnb: wandb.log({"acc_train": train_accuracy})
logger.info("train accuracy so far: %f" % train_accuracy)
logger.info("best valid accuracy in step: %f" % best_step)
losses.reset()
if global_step % t_total == 0:
break
if args.local_rank in [-1, 0]:
writer.close()
end_time = time.time()
logger.info("Best Accuracy: \t%f" % best_acc)
logger.info("Total Training Time: \t%f" % ((end_time - start_time) / 3600))
logger.info("End Training!")
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--name", required=True,
default="output",
help="Name of this run. Used for monitoring.")
parser.add_argument("--dataset", choices=["CUB", "dogs", "nabirds"], default="CUB",
help="Which downstream task.")
parser.add_argument("--model_type", choices=["ViT-B_16", "ViT-B_32", "ViT-L_16",
"ViT-L_32", "ViT-H_14", "R50-ViT-B_16"],
default="ViT-B_16",
help="Which ViT variant to use.")
parser.add_argument("--pretrained_dir", type=str, default="models/pre_trained/ViT-B_16.npz",
help="Where to search for pretrained ViT models.")
parser.add_argument("--output_dir", default="output", type=str,
help="The output directory where checkpoints will be saved.")
parser.add_argument("--img_size", default=400, type=int,
help="After-crop image resolution")
parser.add_argument("--resize_size", default=448, type=int,
help="Pre-crop image resolution")
parser.add_argument("--train_batch_size", default=16, type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size", default=16, type=int,
help="Total batch size for eval.")
parser.add_argument("--eval_every", default=200, type=int,
help="Run prediction on validation set every so many steps."
"Will always run one evaluation at the end of training.")
parser.add_argument("--num_workers", default=4, type=int,
help="Number of workers for dataset preparation.")
parser.add_argument("--learning_rate", default=3e-2, type=float,
help="The initial learning rate for SGD.")
parser.add_argument("--weight_decay", default=0, type=float,
help="Weight deay if we apply some.")
parser.add_argument("--num_steps", default=10000, type=int,
help="Total number of training steps to perform.")
parser.add_argument("--decay_type", choices=["cosine", "linear"], default="cosine",
help="How to decay the learning rate.")
parser.add_argument("--warmup_steps", default=500, type=int,
help="Step of training to perform learning rate warmup for.")
parser.add_argument("--max_grad_norm", default=1.0, type=float,
help="Max gradient norm.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument('--fp16', action='store_true',
help="Whether to use 16-bit float precision instead of 32-bit")
parser.add_argument('--fp16_opt_level', type=str, default='O2',
help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html")
parser.add_argument('--loss_scale', type=float, default=0,
help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
"0 (default value): dynamic loss scaling.\n"
"Positive power of 2: static loss scaling value.\n")
parser.add_argument('--smoothing_value', type=float, default=0.0,
help="Label smoothing value\n")
parser.add_argument('--sm_vit', action='store_true',
help="Whether to use SM-ViT")
parser.add_argument('--coeff_max', type=float, default=0.25,
help="Coefficient for attention guiding (see Eq. 3 in the SM-ViT paper). Best for CUB adn NABirds: '0.25', best for St Dogs: '0.3'.\n")
parser.add_argument('--low_memory', action='store_true',
help="Allows to use less memory (RAM) during input image feeding. False: Slower - Do image pre-processing for the whole dataset at the beginning and store the results in memory. True: Faster - Do pre-processing on-the-go.")
parser.add_argument('--contr_loss', action='store_true',
help="Whether to use contrastive loss")
parser.add_argument('--focal_loss', action='store_true',
help="Whether to use focal loss")
parser.add_argument('--data_root', type=str, default='./data', # Originall
help="Path to the dataset\n")
# '/l/users/20020067/Datasets/CUB_200_2011/CUB_200_2011/CUB_200_2011') # CUB
# '/l/users/20020067/Datasets/Stanford Dogs/Stanford_Dogs') # dogs
# '/l/users/20020067/Datasets/NABirds/NABirds') # NABirds
args = parser.parse_args()
#args.data_root = '{}/{}'.format(args.data_root, args.dataset) # for future development
# Setup CUDA, GPU & distributed training
if args.local_rank == -1:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl',
timeout=timedelta(minutes=60))
args.n_gpu = 1
args.device = device
args.nprocs = torch.cuda.device_count()
# Setup logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" %
(args.local_rank, args.device, args.n_gpu, bool(args.local_rank != -1), args.fp16))
# Set seed
set_seed(args)
# Model & Tokenizer Setup
args, model = setup(args)
if wnb: wandb.watch(model)
# Training
train(args, model)
if __name__ == "__main__":
main()
| 17,894 | 39.763098 | 247 |
py
|
sm-vit
|
sm-vit-main/models/modeling.py
|
# coding=utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import logging
import math
from os.path import join as pjoin
from re import X
from matplotlib.cbook import flatten
import torch
import torch.nn as nn
import numpy as np
from torch.nn import CrossEntropyLoss, Dropout, Softmax, Linear, Conv2d, LayerNorm
from torch.nn.modules.utils import _pair
from scipy import ndimage
import torch.nn.functional as F
import models.configs as configs
debug_mode = False # For debug
if debug_mode: import random
ATTENTION_Q = "MultiHeadDotProductAttention_1/query"
ATTENTION_K = "MultiHeadDotProductAttention_1/key"
logger = logging.getLogger(__name__)
ATTENTION_V = "MultiHeadDotProductAttention_1/value"
ATTENTION_OUT = "MultiHeadDotProductAttention_1/out"
FC_0 = "MlpBlock_3/Dense_0"
FC_1 = "MlpBlock_3/Dense_1"
ATTENTION_NORM = "LayerNorm_0"
MLP_NORM = "LayerNorm_2"
def np2th(weights, conv=False):
"""Possibly convert HWIO to OIHW."""
if conv:
weights = weights.transpose([3, 2, 0, 1])
return torch.from_numpy(weights)
def swish(x):
return x * torch.sigmoid(x)
class LabelSmoothing(nn.Module):
"""
NLL loss with label smoothing.
"""
def __init__(self, smoothing=0.0):
"""
Constructor for the LabelSmoothing module.
:param smoothing: label smoothing factor
"""
super(LabelSmoothing, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
def forward(self, x, target):
logprobs = torch.nn.functional.log_softmax(x, dim=-1)
nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1))
nll_loss = nll_loss.squeeze(1)
smooth_loss = -logprobs.mean(dim=-1)
loss = self.confidence * nll_loss + self.smoothing * smooth_loss
return loss.mean()
class FocalLoss(torch.nn.Module):
def __init__(self, alpha=1, gamma=2, reduce=True):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduce = reduce
def forward(self, inputs, targets):
BCE_loss = torch.nn.CrossEntropyLoss()(inputs, targets)
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
if self.reduce:
return torch.mean(F_loss)
else:
return F_loss
ACT2FN = {"gelu": torch.nn.functional.gelu, "relu": torch.nn.functional.relu, "swish": swish}
class Attention(nn.Module):
def __init__(self, config, vis, coeff_max=0.25):
super(Attention, self).__init__()
self.coeff_max = coeff_max
self.vis = vis
self.num_attention_heads = config.transformer["num_heads"]
self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = Linear(config.hidden_size, self.all_head_size)
self.key = Linear(config.hidden_size, self.all_head_size)
self.value = Linear(config.hidden_size, self.all_head_size)
self.out = Linear(config.hidden_size, config.hidden_size)
self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
self.softmax = Softmax(dim=-1)
self.softmax2 = Softmax(dim=-2)
def transpose_for_scores(self, x):
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
x = x.view(*new_x_shape)
return x.permute(0, 2, 1, 3)
def forward(self, hidden_states, mask=None):
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer)
key_layer = self.transpose_for_scores(mixed_key_layer)
value_layer = self.transpose_for_scores(mixed_value_layer)
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if mask is not None:
if debug_mode:
print_info = True if (random.random() < 0.000001) else False
x = random.random()
if (x > 0.00005) and (x < 0.00007):
print_info = True
else:
print_info = False
else:
print_info = False
max_as = torch.max(attention_scores[:, :, 0, :], dim=2, keepdim=False)[0]
max_as = max_as.to(device='cuda')
if print_info:
print("mask before:", mask)
print("attn scores before:", attention_scores[:, :, 0, :])
print("attn scores max_min before:")
print(max_as, torch.min(attention_scores[:, :, 0, :], dim=2, keepdim=False)[0])
print(torch.topk(attention_scores[:, :, 0, :], 5, largest=True), torch.topk(attention_scores[:, :, 0, :], 5, largest=False))
mask_626 = torch.zeros(mask.size(0), (mask.size(1) + 1)) #, dtype=torch.float64) # dtype=torch.double)
mask_626 = mask_626.to(device='cuda')
mask_626[:, 1:] = mask[:, :]
mask_626[:, 0] = 0
if print_info: print("mask626:", mask_626)
# positive only, obj + (max * coeff):
attention_scores[:, :, 0, :] = \
torch.where( mask_626[:, None, :] < 0.5, \
torch.add( attention_scores[:, :, 0, :], \
torch.mul( max_as[:, :, None] , torch.tensor(self.coeff_max).cuda()) ), \
attention_scores[:, :, 0, :] #.float()
)
if print_info:
print("attn scores after:", attention_scores[:, :, 0, :])
print("attn scores max_min after:")
print(torch.max(attention_scores[:, :, 0, :]), torch.min(attention_scores[:, :, 0, :]))
print(torch.topk(attention_scores[:, :, 0, :], 5, largest=True), torch.topk(attention_scores[:, :, 0, :], 5, largest=False))
attention_probs = self.softmax(attention_scores)
weights = attention_probs if self.vis else None
attention_probs = self.attn_dropout(attention_probs)
context_layer = torch.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
context_layer = context_layer.view(*new_context_layer_shape)
attention_output = self.out(context_layer)
attention_output = self.proj_dropout(attention_output)
return attention_output, weights, self.softmax2(attention_scores)[:,:,:,0]
class Mlp(nn.Module):
def __init__(self, config):
super(Mlp, self).__init__()
self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"])
self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size)
self.act_fn = ACT2FN["gelu"]
self.dropout = Dropout(config.transformer["dropout_rate"])
self._init_weights()
def _init_weights(self):
nn.init.xavier_uniform_(self.fc1.weight)
nn.init.xavier_uniform_(self.fc2.weight)
nn.init.normal_(self.fc1.bias, std=1e-6)
nn.init.normal_(self.fc2.bias, std=1e-6)
def forward(self, x):
x = self.fc1(x)
x = self.act_fn(x)
x = self.dropout(x)
x = self.fc2(x)
x = self.dropout(x)
return x
class Embeddings(nn.Module):
"""Construct the embeddings from patch, position embeddings.
"""
def __init__(self, config, img_size, in_channels=3):
super(Embeddings, self).__init__()
self.hybrid = None
img_size = _pair(img_size)
# EXPERIMENTAL. Overlapping patches:
overlap = False
if overlap: slide = 12 # 14
if config.patches.get("grid") is not None:
grid_size = config.patches["grid"]
patch_size = (img_size[0] // 16 // grid_size[0], img_size[1] // 16 // grid_size[1])
n_patches = (img_size[0] // 16) * (img_size[1] // 16)
self.hybrid = True
else:
patch_size = _pair(config.patches["size"])
if overlap:
n_patches = ((img_size[0] - patch_size[0]) // slide + 1) * ((img_size[1] - patch_size[1]) // slide + 1)
else:
n_patches = (img_size[0] // patch_size[0]) * (img_size[1] // patch_size[1])
self.hybrid = False
if overlap:
self.patch_embeddings = Conv2d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=(slide, slide) )
else:
self.patch_embeddings = Conv2d(in_channels=in_channels,
out_channels=config.hidden_size,
kernel_size=patch_size,
stride=patch_size )
self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches+1, config.hidden_size))
self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
self.dropout = Dropout(config.transformer["dropout_rate"])
def forward(self, x):
B = x.shape[0]
cls_tokens = self.cls_token.expand(B, -1, -1)
x = self.patch_embeddings(x)
x = x.flatten(2)
x = x.transpose(-1, -2)
x = torch.cat((cls_tokens, x), dim=1)
embeddings = x + self.position_embeddings
embeddings = self.dropout(embeddings)
return embeddings
class Block(nn.Module):
def __init__(self, config, vis, coeff_max):
super(Block, self).__init__()
self.hidden_size = config.hidden_size
self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
self.ffn = Mlp(config)
self.attn = Attention(config, vis, coeff_max)
def forward(self, x, mask=None):
h = x
x = self.attention_norm(x)
x, weights, contribution = self.attn(x, mask)
x = x + h
h = x
x = self.ffn_norm(x)
x = self.ffn(x)
x = x + h
return x, weights, contribution
def load_from(self, weights, n_block):
ROOT = f"Transformer/encoderblock_{n_block}"
with torch.no_grad():
query_weight = np2th(weights[pjoin(ROOT, ATTENTION_Q, "kernel")]).view(self.hidden_size, self.hidden_size).t()
key_weight = np2th(weights[pjoin(ROOT, ATTENTION_K, "kernel")]).view(self.hidden_size, self.hidden_size).t()
value_weight = np2th(weights[pjoin(ROOT, ATTENTION_V, "kernel")]).view(self.hidden_size, self.hidden_size).t()
out_weight = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "kernel")]).view(self.hidden_size, self.hidden_size).t()
query_bias = np2th(weights[pjoin(ROOT, ATTENTION_Q, "bias")]).view(-1)
key_bias = np2th(weights[pjoin(ROOT, ATTENTION_K, "bias")]).view(-1)
value_bias = np2th(weights[pjoin(ROOT, ATTENTION_V, "bias")]).view(-1)
out_bias = np2th(weights[pjoin(ROOT, ATTENTION_OUT, "bias")]).view(-1)
self.attn.query.weight.copy_(query_weight)
self.attn.key.weight.copy_(key_weight)
self.attn.value.weight.copy_(value_weight)
self.attn.out.weight.copy_(out_weight)
self.attn.query.bias.copy_(query_bias)
self.attn.key.bias.copy_(key_bias)
self.attn.value.bias.copy_(value_bias)
self.attn.out.bias.copy_(out_bias)
mlp_weight_0 = np2th(weights[pjoin(ROOT, FC_0, "kernel")]).t()
mlp_weight_1 = np2th(weights[pjoin(ROOT, FC_1, "kernel")]).t()
mlp_bias_0 = np2th(weights[pjoin(ROOT, FC_0, "bias")]).t()
mlp_bias_1 = np2th(weights[pjoin(ROOT, FC_1, "bias")]).t()
self.ffn.fc1.weight.copy_(mlp_weight_0)
self.ffn.fc2.weight.copy_(mlp_weight_1)
self.ffn.fc1.bias.copy_(mlp_bias_0)
self.ffn.fc2.bias.copy_(mlp_bias_1)
self.attention_norm.weight.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "scale")]))
self.attention_norm.bias.copy_(np2th(weights[pjoin(ROOT, ATTENTION_NORM, "bias")]))
self.ffn_norm.weight.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "scale")]))
self.ffn_norm.bias.copy_(np2th(weights[pjoin(ROOT, MLP_NORM, "bias")]))
class Encoder(nn.Module):
def __init__(self, config, vis, coeff_max):
super(Encoder, self).__init__()
self.vis = vis
self.layer = nn.ModuleList()
num_layers = config.transformer["num_layers"]
self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
for _ in range(num_layers):
layer = Block(config, vis, coeff_max)
self.layer.append(copy.deepcopy(layer))
def forward(self, hidden_states, mask=None):
attn_weights = []
contributions = []
tokens = [[] for i in range(hidden_states.shape[0])]
for layer_block in self.layer:
hidden_states, weights, contribution = layer_block(hidden_states, mask)
if self.vis:
attn_weights.append(weights)
contributions.append(contribution)
encoded = self.encoder_norm(hidden_states)
return encoded, attn_weights
class Transformer(nn.Module):
def __init__(self, config, img_size, vis, coeff_max):
super(Transformer, self).__init__()
self.embeddings = Embeddings(config, img_size=img_size)
self.encoder = Encoder(config, vis, coeff_max)
def forward(self, input_ids, mask=None):
embedding_output = self.embeddings(input_ids)
encoded, attn_weights = self.encoder(embedding_output, mask)
return encoded, attn_weights
class VisionTransformer(nn.Module):
def __init__(self, config, img_size=400, num_classes=200, smoothing_value=0, zero_head=False, vis=False, dataset='CUB', coeff_max=0.25, contr_loss=False, focal_loss=False):
super(VisionTransformer, self).__init__()
self.num_classes = num_classes
self.zero_head = zero_head
self.smoothing_value = smoothing_value
self.classifier = config.classifier
self.dataset=dataset
self.contr_loss = contr_loss
self.focal_loss = focal_loss
self.transformer = Transformer(config, img_size, vis, coeff_max)
self.head = Linear(config.hidden_size, num_classes)
def forward(self, x, labels=None, mask=None):
x, attn_weights = self.transformer(x, mask)
logits = self.head(x[:, 0])
if labels is not None:
if self.smoothing_value == 0:
loss_fct = CrossEntropyLoss()
else:
loss_fct = LabelSmoothing(self.smoothing_value)
if self.focal_loss: # enforce another type of loss
loss_fct = FocalLoss()
ce_loss = loss_fct(logits.view(-1, self.num_classes), labels.view(-1))
if self.contr_loss:
contrast_loss = con_loss(x[:, 0], labels.view(-1))
loss = ce_loss + contrast_loss
else:
loss = ce_loss # FFVT
return loss, logits
else:
return logits, attn_weights
def load_from(self, weights):
with torch.no_grad():
if self.zero_head:
nn.init.zeros_(self.head.weight)
nn.init.zeros_(self.head.bias)
else:
self.head.weight.copy_(np2th(weights["head/kernel"]).t())
self.head.bias.copy_(np2th(weights["head/bias"]).t())
self.transformer.embeddings.patch_embeddings.weight.copy_(np2th(weights["embedding/kernel"], conv=True))
self.transformer.embeddings.patch_embeddings.bias.copy_(np2th(weights["embedding/bias"]))
self.transformer.embeddings.cls_token.copy_(np2th(weights["cls"]))
self.transformer.encoder.encoder_norm.weight.copy_(np2th(weights["Transformer/encoder_norm/scale"]))
self.transformer.encoder.encoder_norm.bias.copy_(np2th(weights["Transformer/encoder_norm/bias"]))
posemb = np2th(weights["Transformer/posembed_input/pos_embedding"])
posemb_new = self.transformer.embeddings.position_embeddings
if posemb.size() == posemb_new.size():
self.transformer.embeddings.position_embeddings.copy_(posemb)
else:
logger.info("load_pretrained: resized variant: %s to %s" % (posemb.size(), posemb_new.size()))
ntok_new = posemb_new.size(1)
if self.classifier == "token":
posemb_tok, posemb_grid = posemb[:, :1], posemb[0, 1:]
ntok_new -= 1
else:
posemb_tok, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(np.sqrt(len(posemb_grid)))
gs_new = int(np.sqrt(ntok_new))
print('load_pretrained: grid-size from %s to %s' % (gs_old, gs_new))
posemb_grid = posemb_grid.reshape(gs_old, gs_old, -1)
zoom = (gs_new / gs_old, gs_new / gs_old, 1)
posemb_grid = ndimage.zoom(posemb_grid, zoom, order=1)
posemb_grid = posemb_grid.reshape(1, gs_new * gs_new, -1)
posemb = np.concatenate([posemb_tok, posemb_grid], axis=1)
self.transformer.embeddings.position_embeddings.copy_(np2th(posemb))
for bname, block in self.transformer.encoder.named_children():
if bname.startswith('ff') == False:
for uname, unit in block.named_children():
unit.load_from(weights, n_block=uname)
if self.transformer.embeddings.hybrid:
self.transformer.embeddings.hybrid_model.root.conv.weight.copy_(np2th(weights["conv_root/kernel"], conv=True))
gn_weight = np2th(weights["gn_root/scale"]).view(-1)
gn_bias = np2th(weights["gn_root/bias"]).view(-1)
self.transformer.embeddings.hybrid_model.root.gn.weight.copy_(gn_weight)
self.transformer.embeddings.hybrid_model.root.gn.bias.copy_(gn_bias)
for bname, block in self.transformer.embeddings.hybrid_model.body.named_children():
for uname, unit in block.named_children():
unit.load_from(weights, n_block=bname, n_unit=uname)
def con_loss(features, labels):
B, _ = features.shape
features = F.normalize(features)
cos_matrix = features.mm(features.t())
pos_label_matrix = torch.stack([labels == labels[i] for i in range(B)]).float()
neg_label_matrix = 1 - pos_label_matrix
pos_cos_matrix = 1 - cos_matrix
neg_cos_matrix = cos_matrix - 0.4
neg_cos_matrix[neg_cos_matrix < 0] = 0
loss = (pos_cos_matrix * pos_label_matrix).sum() + (neg_cos_matrix * neg_label_matrix).sum()
loss /= (B * B)
return loss
CONFIGS = {
'ViT-B_16': configs.get_b16_config(),
'ViT-B_32': configs.get_b32_config(),
'ViT-L_16': configs.get_l16_config(),
'ViT-L_32': configs.get_l32_config(),
'ViT-H_14': configs.get_h14_config(),
'R50-ViT-B_16': configs.get_r50_b16_config(),
'testing': configs.get_testing(),
}
| 19,835 | 38.12426 | 176 |
py
|
sm-vit
|
sm-vit-main/models/configs.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ml_collections
def get_testing():
"""Returns a minimal configuration for testing."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 1
config.transformer.num_heads = 1
config.transformer.num_layers = 1
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_b16_config():
"""Returns the ViT-B/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 768
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 3072
config.transformer.num_heads = 12
config.transformer.num_layers = 12
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_r50_b16_config():
"""Returns the Resnet50 + ViT-B/16 configuration."""
config = get_b16_config()
del config.patches.size
config.patches.grid = (28, 28)
config.resnet = ml_collections.ConfigDict()
config.resnet.num_layers = (3, 4, 9)
config.resnet.width_factor = 1
return config
def get_b32_config():
"""Returns the ViT-B/32 configuration."""
config = get_b16_config()
config.patches.size = (32, 32)
return config
def get_l16_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (16, 16)})
config.hidden_size = 1024
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 4096
config.transformer.num_heads = 16
config.transformer.num_layers = 24
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
def get_l32_config():
"""Returns the ViT-L/32 configuration."""
config = get_l16_config()
config.patches.size = (32, 32)
return config
def get_h14_config():
"""Returns the ViT-L/16 configuration."""
config = ml_collections.ConfigDict()
config.patches = ml_collections.ConfigDict({'size': (14, 14)})
config.hidden_size = 1280
config.transformer = ml_collections.ConfigDict()
config.transformer.mlp_dim = 5120
config.transformer.num_heads = 16
config.transformer.num_layers = 32
config.transformer.attention_dropout_rate = 0.0
config.transformer.dropout_rate = 0.1
config.classifier = 'token'
config.representation_size = None
return config
| 3,453 | 31.895238 | 74 |
py
|
sm-vit
|
sm-vit-main/utils/data_utils.py
|
import logging
import torch
from torchvision import transforms, datasets
from .dataset import *
from torch.utils.data import DataLoader, RandomSampler, DistributedSampler, SequentialSampler
from PIL import Image
from .autoaugment import AutoAugImageNetPolicy
import os
logger = logging.getLogger(__name__)
def get_loader(args):
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
transform_train = transforms.Compose([
transforms.RandomResizedCrop((args.img_size, args.img_size), scale=(0.05, 1.0)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
transform_test = transforms.Compose([
transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
if args.dataset == 'dogs':
if args.sm_vit:
train_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
# transforms.RandomHorizontalFlip(), !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = dogs(args.dataset,
root=args.data_root,
is_train=True,
cropped=False,
transform=train_transform,
download=False,
sm_vit=args.sm_vit,
low_memory=args.low_memory,
img_size=args.img_size
)
testset = dogs(args.dataset,
root=args.data_root,
is_train=False,
cropped=False,
transform=test_transform,
download=False,
sm_vit=args.sm_vit,
low_memory=args.low_memory,
img_size=args.img_size
)
elif args.dataset== "CUB":
if args.sm_vit:
train_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size),Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
#transforms.RandomHorizontalFlip(), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
test_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size),Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = eval(args.dataset)(args.dataset, root=args.data_root, is_train=True, \
transform=train_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
testset = eval(args.dataset)(args.dataset, root=args.data_root, is_train=False, \
transform = test_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
elif args.dataset == 'nabirds':
if args.sm_vit:
train_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add (from FFVT) mb try?
#transforms.RandomHorizontalFlip(), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.img_size, args.img_size), Image.BILINEAR),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
else:
train_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
trainset = NABirds(args.dataset, root=args.data_root, is_train=True, \
transform=train_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
testset = NABirds(args.dataset, root=args.data_root, is_train=False, \
transform=test_transform, sm_vit=args.sm_vit, low_memory=args.low_memory, img_size=args.img_size)
### Not optimised datasets:
if args.dataset == 'INat2017':
train_transform=transforms.Compose([transforms.Resize((400, 400), Image.BILINEAR),
transforms.RandomCrop((304, 304)),
transforms.RandomHorizontalFlip(),
AutoAugImageNetPolicy(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
test_transform=transforms.Compose([transforms.Resize((400, 400), Image.BILINEAR),
transforms.CenterCrop((304, 304)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
trainset = INat2017(args.data_root, 'train', train_transform)
testset = INat2017(args.data_root, 'val', test_transform)
elif args.dataset == 'car':
trainset = CarsDataset(os.path.join(args.data_root,'devkit/cars_train_annos.mat'),
os.path.join(args.data_root,'cars_train'),
os.path.join(args.data_root,'devkit/cars_meta.mat'),
# cleaned=os.path.join(data_dir,'cleaned.dat'),
transform=transforms.Compose([
transforms.Resize((600, 600), Image.BILINEAR),
transforms.RandomCrop((448, 448)),
transforms.RandomHorizontalFlip(),
AutoAugImageNetPolicy(),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
)
testset = CarsDataset(os.path.join(args.data_root,'cars_test_annos_withlabels.mat'),
os.path.join(args.data_root,'cars_test'),
os.path.join(args.data_root,'devkit/cars_meta.mat'),
# cleaned=os.path.join(data_dir,'cleaned_test.dat'),
transform=transforms.Compose([
transforms.Resize((600, 600), Image.BILINEAR),
transforms.CenterCrop((448, 448)),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
)
elif args.dataset== "air":
train_transform=transforms.Compose([transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
transforms.RandomCrop((args.img_size, args.img_size)),
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add
transforms.RandomHorizontalFlip(),
#transforms.RandomVerticalFlip(),
transforms.ToTensor(),
#transforms.Normalize([0.8416, 0.867, 0.8233], [0.2852, 0.246, 0.3262])])
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
test_transform=transforms.Compose([
transforms.Resize((args.resize_size, args.resize_size), Image.BILINEAR),
transforms.CenterCrop((args.img_size, args.img_size)),
#transforms.Resize((args.img_size, args.img_size)),
transforms.ToTensor(),
#transforms.Normalize([0.8416, 0.867, 0.8233], [0.2852, 0.246, 0.3262])])
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
trainset = FGVC_aircraft(root=args.data_root, is_train=True, transform=train_transform)
testset = FGVC_aircraft(root=args.data_root, is_train=False, transform = test_transform)
if args.local_rank == 0:
torch.distributed.barrier()
train_sampler = RandomSampler(trainset) if args.local_rank == -1 else DistributedSampler(trainset)
test_sampler = SequentialSampler(testset)
train_loader = DataLoader(trainset,
sampler=train_sampler,
batch_size=args.train_batch_size,
num_workers=args.num_workers,
pin_memory=True)
test_loader = DataLoader(testset,
sampler=test_sampler,
batch_size=args.eval_batch_size,
num_workers=args.num_workers,
pin_memory=True) if testset is not None else None
return train_loader, test_loader
| 12,747 | 46.924812 | 124 |
py
|
sm-vit
|
sm-vit-main/utils/dataset.py
|
import os
import json
from os.path import join
import numpy as np
import scipy
from scipy import io
import scipy.misc
from PIL import Image
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch.utils.data import Dataset
from torchvision.datasets import VisionDataset
from torchvision.datasets.folder import default_loader
from torchvision.datasets.utils import download_url, list_dir, check_integrity, extract_archive, verify_str_arg
# My:
from torchvision import transforms
from torchvision.utils import save_image
import random
from torchvision.transforms import functional as F
import U2Net
from U2Net.u2net_test import mask_hw
from skimage import transform as transform_sk
import gc
#
class Generic_smvit_DS():
def generic_preprocess(self, file_list, file_list_full, shape_hw_list, data_len, train_test_list=None):
img = []
mask = []
## For other experiments:
if self.ds_name != "CUB":
self.gt_bbox = False
self.gt_parts = False
if self.gt_bbox:
bounding_boxes_file = open(os.path.join(self.root, 'bounding_boxes.txt'))
bb_list = []
for line in bounding_boxes_file:
bb_list_x = line[:-1].split(' ')[-4]
bb_list_y = line[:-1].split(' ')[-3]
bb_list_w = line[:-1].split(' ')[-2]
bb_list_h = line[:-1].split(' ')[-1]
bb_list.append( [ int(bb_list_x.split('.')[0]),
int(bb_list_y.split('.')[0]),
int(bb_list_w.split('.')[0]),
int(bb_list_h.split('.')[0]) ]
)
bb_list = [x for i, x in zip(train_test_list, bb_list) if i]
if self.gt_parts:
parts_file = open(os.path.join(self.root, 'parts/part_locs.txt'))
PARTS_NUM = 15
parts_list = []
part_t = []
part_count = 0
for line in parts_file:
part_t_raw_x = line[:-1].split(' ')[-3]
part_t_raw_y = line[:-1].split(' ')[-2]
part_t_pres = line[:-1].split(' ')[-1]
part_t.append ( [ int(part_t_pres),
int(part_t_raw_x.split('.')[0]),
int(part_t_raw_y.split('.')[0]) ]
)
part_count = part_count + 1
if (part_count >= PARTS_NUM):
parts_list.append( part_t )
part_t = []
part_count = 0
parts_list = [x for i, x in zip(train_test_list, parts_list) if i]
##
print(f'[INFO] Pre-processing {self.mode} files...')
if self.sm_vit:
if self.full_ds:
mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list = \
mask_hw(full_ds=self.full_ds, img_path=file_list_full, shape_hw=shape_hw_list)
else: # for debug
img_path = os.path.join(self.root, self.base_folder, file_list)
img_temp = scipy.misc.imread(img_path)
h_max_temp = img_temp.shape[0] # y
w_max_temp = img_temp.shape[1] # x
mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n = \
mask_hw(full_ds=self.full_ds, img_path=img_path, shape_hw=(h_max_temp, w_max_temp))
mask_temp, x, y, h, w = mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n
for ind, file in enumerate(file_list[:data_len]):
if self.debug: print(f"{self.mode} file:", file)
img_temp = scipy.misc.imread(os.path.join(self.root, self.base_folder, file))
## Downscale large images for memory efficiency
if self.ds_name != "CUB":
img_temp = (img_temp).astype(np.uint8)
if (img_temp.shape[0] > self.max_res) or (img_temp.shape[1] > self.max_res):
if self.debug and ind < 10:
print("Before:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_before_tr" + str(ind) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
if img_temp.shape[0] > img_temp.shape[1]:
downscale_coef = img_temp.shape[0] / self.max_res
else:
downscale_coef = img_temp.shape[1] / self.max_res
img_temp = transform_sk.resize( img_temp, ( int((img_temp.shape[0] // downscale_coef)), int((img_temp.shape[1] // downscale_coef)) ), \
mode='constant', anti_aliasing=True, anti_aliasing_sigma=None, preserve_range=True )
if self.debug and ind < 10:
print("After:", img_temp.shape[0], img_temp.shape[1])
img_temp = (img_temp).astype(np.uint8)
img_name = ("test/img_after_tr" + str(ind) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
else:
if self.debug and ind < 10:
print("Normal:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_normal_tr" + str(ind) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
h_max = img_temp.shape[0] # y
w_max = img_temp.shape[1] # x
#ch_max = img_temp.shape[2] # ch
if self.gt_bbox:
x, y, w, h = bb_list[ind] # x - distance from top up left (width), y - distance from top up left (height)
if self.gt_parts:
parts = parts_list[ind] # list of 15 parts with [x, y] center corrdinates
#mask_temp = np.zeros((int(h_max), int(w_max))) # Black mask
mask_temp = np.ones((int(h_max), int(w_max)))
p_part = 16*3 # padding around center point
for part_n in range(len(parts)):
part = parts[part_n]
if part[0] != 0:
x_min_p = part[1] - p_part
if x_min_p < 0:
x_min_p = 0
x_max_p = part[1] + p_part
if x_max_p > w_max:
x_max_p = w_max
y_min_p = part[2] - p_part
if y_min_p < 0:
y_min_p = 0
y_max_p = part[2] + p_part
if y_max_p > h_max:
y_max_p = h_max
#mask_temp[int(y_min_p):int(y_max_p), int(x_min_p):int(x_max_p)] = 1 # Black mask
mask_temp[int(y_min_p):int(y_max_p), int(x_min_p):int(x_max_p)] = 0
if self.sm_vit and self.full_ds:
mask_temp = mask_u2n_list[ind]
x = x_u2n_list[ind]
y = y_u2n_list[ind]
h = h_u2n_list[ind]
w = w_u2n_list[ind]
## Image and Mask Padding:
if self.sm_vit or self.gt_bbox:
if self.padding:
p = 15 # extra space around bbox
else:
p = 0
x_min = x - p
if x_min < 0:
x_min = 0
x_max = x + w + p
if x_max > w_max:
x_max = w_max
y_min = y - p
if y_min < 0:
y_min = 0
y_max = y + h + p
if y_max > h_max:
y_max = h_max
if h_max <=1:
print("[WARNING] bad_h", h_max)
if w_max <=1:
print("[WARNING] bad_w", w_max)
if y_min >= y_max:
print("[WARNING] bad_y", "min:", y_min, "max:", y_max)
print("[WARNING] y:", y, "h:", h)
if x_min >= x_max:
print("[WARNING] bad_x", "min:", x_min, "max:", x_max)
print("[WARNING] x:", x, "w:", w)
##
## Crop with bbox:
if self.rand_crop:
#prob_rcrop = 0.25 # 0.07 # 0.3 # 0.5
#rand_crop_mask_temp = bool(random.random() < prob_rcrop)
#if rand_crop_mask_temp:
h_max_img = img_temp.shape[0]
w_max_img = img_temp.shape[1]
#h_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
h_crop_mid_img = int(h_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
#w_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
w_crop_mid_img = int(w_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
h_crop_min_img = random.randint(0, (h_max_img - h_crop_mid_img)) # 40) #, 400-360) #, h - th)
w_crop_min_img = random.randint(0, (w_max_img - w_crop_mid_img)) # 40) #, 400-360) #, w - tw)
h_crop_max_img = h_crop_mid_img + h_crop_min_img
w_crop_max_img = w_crop_mid_img + w_crop_min_img
# Crop image with bbox:
if len(img_temp.shape) == 3:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img), :] # h, w, ch
else:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)] # h, w
# Crop mask with bbox:
mask_temp = mask_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)]
else:
# Crop image with bbox:
if len(img_temp.shape) == 3:
if self.gt_parts:
for j in range(3):
img_temp[:, :, j] = img_temp[:, :, j] * mask_temp # Black mask
else:
#test_img_temp = test_img_temp[int(y):int(y + h), int(x):int(x + w), :] # h, w, ch
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max), :] # h, w, ch
else:
if self.gt_parts:
img_temp[:, :] = img_temp[:, :] * mask_temp # Black mask:
else:
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max)] # h, w
# Crop mask with bbox:
if self.sm_vit or self.gt_bbox:
mask_temp = mask_temp[int(y_min):int(y_max), int(x_min):int(x_max)]
##
if ( (img_temp.shape[0] != mask_temp.shape[0]) or (img_temp.shape[1] != mask_temp.shape[1]) ):
print("[WARNING] Image shape does not match mask shape for sample:", ind, ". \t" , "Found shapes:", img_temp.shape, mask_temp.shape)
img.append(img_temp)
mask.append(mask_temp)
return img, mask
def generic_preprocess_lowMem(self, file_list, file_list_full, shape_hw_list):
print(f'[INFO] Pre-processing {self.mode} files in the low memory mode...')
if self.sm_vit:
if self.full_ds:
mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list = \
mask_hw(full_ds=self.full_ds, img_path=file_list_full, shape_hw=shape_hw_list)
else: # for debug
img_path = os.path.join(self.root, self.base_folder, file_list)
img_temp = scipy.misc.imread(img_path)
h_max_temp = img_temp.shape[0] # y
w_max_temp = img_temp.shape[1] # x
mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n = \
mask_hw(full_ds=self.full_ds, img_path=img_path, shape_hw=(h_max_temp, w_max_temp))
mask_temp, x, y, h, w = mask_u2n, x_u2n, y_u2n, h_u2n, w_u2n
# mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list = mask_temp, x, y, h, w
return mask_u2n_list, x_u2n_list, y_u2n_list, h_u2n_list, w_u2n_list
def generic_getitem(self, index, img, mask):
if self.is_train:
if self.rand_crop_im_mask:
h_max_img = img.shape[0]
w_max_img = img.shape[1]
h_crop_mid_img = int(h_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
w_crop_mid_img = int(w_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
h_crop_min_img = random.randint(0, (h_max_img - h_crop_mid_img)) # 40) #, 400-360) #, h - th)
w_crop_min_img = random.randint(0, (w_max_img - w_crop_mid_img)) # 40) #, 400-360) #, w - tw)
h_crop_max_img = h_crop_mid_img + h_crop_min_img
w_crop_max_img = w_crop_mid_img + w_crop_min_img
# Crop image:
if len(img.shape) == 3:
img = img[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img), :] # h, w, ch
else:
img = img[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)] # h, w
# Crop mask:
mask = mask[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
if self.ds_name != "CUB":
img = (img).astype(np.uint8)
img = Image.fromarray(img, mode='RGB')
if self.debug and index < 10:
img_tem = transforms.ToTensor()(img)
img_name = ("test/img_bef" + str(index) + ".png")
save_image( img_tem, img_name)
## Image:
if self.transform is not None:
if self.is_train:
if not self.flip_mask_as_image: # normal
img = self.transform(img)
else:
if random.random() < 0.5:
flipped = False
img = self.transform(img)
else:
flipped = True
transform_img_flip = transforms.Compose([
#transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
#transforms.RandomCrop((args.img_size, args.img_size)),
transforms.Resize((self.img_size, self.img_size),Image.BILINEAR), # my for bbox
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add (FFVT)
transforms.RandomHorizontalFlip(p=1.0), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
img = transform_img_flip(img)
else:
img = self.transform(img)
if self.debug and index < 10:
img_name = ("test/img_aft" + str(index) + ".png")
save_image( img, img_name)
## Mask:
if self.crop_mask:
h_max_im = mask.shape[0]
w_max_im = mask.shape[1]
h_crop_mid = int(h_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
w_crop_mid = int(w_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
cropped = np.ones_like(mask)
if self.mid_val:
cropped = cropped * 0.125 # (for 0.2)
h_crop_min = random.randint(0, (h_max_im - h_crop_mid)) # 40) #, 400-360) #, h - th)
w_crop_min = random.randint(0, (w_max_im - w_crop_mid)) # 40) #, 400-360) #, w - tw)
h_crop_max = h_crop_mid + h_crop_min
w_crop_max = w_crop_mid + w_crop_min
cropped[int(h_crop_min):int(h_crop_max), int(w_crop_min):int(w_crop_max)] = 0
mask = mask + cropped
if self.mid_val:
mask[mask > 1.1] = 1
else:
mask[mask > 1] = 1
mask = (mask * 255).astype(np.uint8)
mask = Image.fromarray(mask, mode='L')
if self.debug and index < 10:
mask_tem = transforms.ToTensor()(mask)
img_name = ("test/mask_bef" + str(index) + ".png")
save_image( mask_tem, img_name)
mask_size = int(self.img_size // 16)
if self.is_train:
if not self.flip_mask_as_image: # normal
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
if flipped:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
transforms.RandomHorizontalFlip(p=1.0),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()])
mask = transform_mask(mask)
if self.debug and index < 10:
img_name = ("test/mask_aft" + str(index) + ".png")
save_image(mask, img_name)
mask = torch.flatten(mask)
return img, mask
def generic_getitem_lowMem(self, index):
file_temp = self.file_list[index]
img_temp = scipy.misc.imread(os.path.join(self.root, self.base_folder, file_temp))
## Downscale large images for memory efficiency
if self.ds_name != "CUB":
self.gt_bbox = False
self.gt_parts = False
img_temp = (img_temp).astype(np.uint8)
if (img_temp.shape[0] > self.max_res) or (img_temp.shape[1] > self.max_res):
if self.debug and index < 10:
print("Before:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_before_tr" + str(index) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
if img_temp.shape[0] > img_temp.shape[1]:
downscale_coef = img_temp.shape[0] / self.max_res
else:
downscale_coef = img_temp.shape[1] / self.max_res
img_temp = transform_sk.resize( img_temp, ( int((img_temp.shape[0] // downscale_coef)), int((img_temp.shape[1] // downscale_coef)) ), \
mode='constant', anti_aliasing=True, anti_aliasing_sigma=None, preserve_range=True )
if self.debug and index < 10:
print("After:", img_temp.shape[0], img_temp.shape[1])
img_temp = (img_temp).astype(np.uint8)
img_name = ("test/img_after_tr" + str(index) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
else:
if self.debug and index < 10:
print("Normal:", img_temp.shape[0], img_temp.shape[1])
img_name = ("test/img_normal_tr" + str(index) + ".png")
Image.fromarray(img_temp, mode='RGB').save(img_name)
##
h_max = img_temp.shape[0] # y
w_max = img_temp.shape[1] # x
#ch_max = img_temp.shape[2] # ch
mask_temp = self.mask_u2n_list[index]
x, y, h, w = self.x_u2n_list[index], self.y_u2n_list[index], self.h_u2n_list[index], self.w_u2n_list[index]
## Image and Mask Padding:
if self.sm_vit or self.gt_bbox:
if self.padding:
p = 15 # extra space around bbox
else:
p = 0
x_min = x - p
if x_min < 0:
x_min = 0
x_max = x + w + p
if x_max > w_max:
x_max = w_max
y_min = y - p
if y_min < 0:
y_min = 0
y_max = y + h + p
if y_max > h_max:
y_max = h_max
if h_max <=1:
print("[WARNING] bad_h", h_max)
if w_max <=1:
print("[WARNING] bad_w", w_max)
if y_min >= y_max:
print("[WARNING] bad_y", "min:", y_min, "max:", y_max)
print("[WARNING] y:", y, "h:", h)
if x_min >= x_max:
print("[WARNING] bad_x", "min:", x_min, "max:", x_max)
print("[WARNING] x:", x, "w:", w)
##
## Crop with bbox:
if self.rand_crop:
#prob_rcrop = 0.25 # 0.07 # 0.3 # 0.5
#rand_crop_mask_temp = bool(random.random() < prob_rcrop)
#if rand_crop_mask_temp:
h_max_img = img_temp.shape[0]
w_max_img = img_temp.shape[1]
#h_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
h_crop_mid_img = int(h_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
#w_crop_mid = 368 # 384 (92%), 368 (84%), 352 (77%), 336 (70%), 320 (64%), 304 (57%)
w_crop_mid_img = int(w_max_img * 0.88) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
h_crop_min_img = random.randint(0, (h_max_img - h_crop_mid_img)) # 40) #, 400-360) #, h - th)
w_crop_min_img = random.randint(0, (w_max_img - w_crop_mid_img)) # 40) #, 400-360) #, w - tw)
h_crop_max_img = h_crop_mid_img + h_crop_min_img
w_crop_max_img = w_crop_mid_img + w_crop_min_img
# Crop image with bbox:
if len(img_temp.shape) == 3:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img), :] # h, w, ch
else:
img_temp = img_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)] # h, w
# Crop mask with bbox:
mask_temp = mask_temp[int(h_crop_min_img):int(h_crop_max_img), int(w_crop_min_img):int(w_crop_max_img)]
else:
# Crop image with bbox:
if len(img_temp.shape) == 3:
if self.gt_parts:
for j in range(3):
img_temp[:, :, j] = img_temp[:, :, j] * mask_temp # Black mask
else:
#test_img_temp = test_img_temp[int(y):int(y + h), int(x):int(x + w), :] # h, w, ch
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max), :] # h, w, ch
else:
if self.gt_parts:
img_temp[:, :] = img_temp[:, :] * mask_temp # Black mask:
else:
img_temp = img_temp[int(y_min):int(y_max), int(x_min):int(x_max)] # h, w
# Crop mask with bbox:
if self.sm_vit or self.gt_bbox:
mask_temp = mask_temp[int(y_min):int(y_max), int(x_min):int(x_max)]
##
if ( (img_temp.shape[0] != mask_temp.shape[0]) or (img_temp.shape[1] != mask_temp.shape[1]) ):
print("[WARNING] Image shape does not match mask shape for sample:", index, ". \t" , \
"Found shapes:", img_temp.shape, mask_temp.shape)
img = img_temp
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
if self.ds_name != "CUB":
img = (img).astype(np.uint8)
img = Image.fromarray(img, mode='RGB')
if self.debug and index < 10:
img_tem = transforms.ToTensor()(img)
img_name = ("test/img_bef" + str(index) + ".png")
save_image( img_tem, img_name)
## Image:
if self.transform is not None:
if self.is_train:
if not self.flip_mask_as_image: # normal
img = self.transform(img)
else:
if random.random() < 0.5:
flipped = False
img = self.transform(img)
else:
flipped = True
transform_img_flip = transforms.Compose([
#transforms.Resize((args.resize_size, args.resize_size),Image.BILINEAR),
#transforms.RandomCrop((args.img_size, args.img_size)),
transforms.Resize((self.img_size, self.img_size),Image.BILINEAR), # my for bbox
transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4), # my add (FFVT)
transforms.RandomHorizontalFlip(p=1.0), # !!! FLIPPING in dataset.py !!!
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
])
img = transform_img_flip(img)
else:
img = self.transform(img)
if self.debug and index < 10:
img_name = ("test/img_aft" + str(index) + ".png")
save_image( img, img_name)
## Mask:
mask = mask_temp
if self.crop_mask:
h_max_im = mask.shape[0]
w_max_im = mask.shape[1]
h_crop_mid = int(h_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
w_crop_mid = int(w_max_im * 0.84) # 384 (92% - 0.96), 368 (84% - 0.92), 352 (77% - 0.88), 336 (70% - 0.84), 320 (64% - 0.80), 304 (57% - 0.75)
cropped = np.ones_like(mask)
if self.mid_val:
cropped = cropped * 0.125 # (for 0.2)
h_crop_min = random.randint(0, (h_max_im - h_crop_mid)) # 40) #, 400-360) #, h - th)
w_crop_min = random.randint(0, (w_max_im - w_crop_mid)) # 40) #, 400-360) #, w - tw)
h_crop_max = h_crop_mid + h_crop_min
w_crop_max = w_crop_mid + w_crop_min
cropped[int(h_crop_min):int(h_crop_max), int(w_crop_min):int(w_crop_max)] = 0
mask = mask + cropped
if self.mid_val:
mask[mask > 1.1] = 1
else:
mask[mask > 1] = 1
mask = (mask * 255).astype(np.uint8)
mask = Image.fromarray(mask, mode='L')
if self.debug and index < 10:
mask_tem = transforms.ToTensor()(mask)
img_name = ("test/mask_bef" + str(index) + ".png")
save_image( mask_tem, img_name)
mask_size = int(self.img_size // 16)
if self.is_train:
if not self.flip_mask_as_image: # normal
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
if flipped:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
transforms.RandomHorizontalFlip(p=1.0),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()
])
else:
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(), #(mode='1'),
# non-overlapped:
transforms.Resize((mask_size, mask_size), interpolation=Image.NEAREST), #Image.BILINEAR), # interpolation=T.InterpolationMode.NEAREST
transforms.ToTensor()])
mask = transform_mask(mask)
if self.debug and index < 10:
img_name = ("test/mask_aft" + str(index) + ".png")
save_image(mask, img_name)
mask = torch.flatten(mask)
return img, mask
class CUB(Generic_smvit_DS):
def __init__(self, ds_name, root, is_train=True, data_len=None, transform=None, sm_vit=True, low_memory=True, img_size=400):
self.ds_name = ds_name
self.img_size = img_size
self.max_res = int(self.img_size * 1.5)
self.full_ds = True # pre-processing full dataset
self.padding = True # image and mask padding
self.rand_crop = False # if no other cropping
self.flip_mask_as_image = True # if False - turn on RandomHorizontalFlip in data_utils !!!
self.rand_crop_im_mask = False # randomly crop both image and mask
self.crop_mask = False # randomly crop mask only
self.mid_val = False # 3-state mask
self.debug = False # for debug info
if self.debug:
os.makedirs("./test")
self.gt_bbox = False # for other experiments
self.gt_parts = False # for other experiments
self.sm_vit = sm_vit
self.low_memory = low_memory
if (self.sm_vit + self.gt_bbox + self.gt_parts) > 1 :
raise Exception("Only one cropping mode (SM-ViT, bbox, parts) can be chosen")
self.root = root
self.base_folder = "images"
self.transform = transform
self.is_train = is_train
if self.is_train:
self.mode = "Train"
else:
self.mode = "Test"
img_txt_file = open(os.path.join(self.root, 'images.txt'))
label_txt_file = open(os.path.join(self.root, 'image_class_labels.txt'))
train_val_file = open(os.path.join(self.root, 'train_test_split.txt'))
img_name_list = []
for line in img_txt_file:
img_name_list.append(line[:-1].split(' ')[-1])
label_list = []
for line in label_txt_file:
label_list.append(int(line[:-1].split(' ')[-1]) - 1)
train_test_list = []
for line in train_val_file:
train_test_list.append(int(line[:-1].split(' ')[-1]))
if self.is_train:
self.file_list = [x for i, x in zip(train_test_list, img_name_list) if i]
file_list_full = [ os.path.join(self.root, self.base_folder, x) for i, x in zip(train_test_list, img_name_list) if i]
else:
self.file_list = [x for i, x in zip(train_test_list, img_name_list) if not i]
file_list_full = [ os.path.join(self.root, self.base_folder, x) for i, x in zip(train_test_list, img_name_list) if not i]
if self.sm_vit:
print(f"[INFO] Preparing {self.mode} shape_hw list...")
shape_hw_list = []
for img_name in self.file_list:
img_temp = scipy.misc.imread(os.path.join(self.root, self.base_folder, img_name))
shape_hw_temp = [img_temp.shape[0], img_temp.shape[1]] # h_max (y), w_max (x)
shape_hw_list.append(shape_hw_temp)
if self.low_memory:
self.mask_u2n_list, self.x_u2n_list, self.y_u2n_list, self.h_u2n_list, self.w_u2n_list = \
super(CUB, self).generic_preprocess_lowMem(self.file_list,
file_list_full,
shape_hw_list
)
del shape_hw_list
del file_list_full
gc.collect()
else:
self.img, self.mask = \
super(CUB, self).generic_preprocess(self.file_list,
file_list_full,
shape_hw_list,
data_len,
train_test_list
)
else:
self.img = \
[scipy.misc.imread(os.path.join(self.root, self.base_folder, file)) \
for file in self.file_list[:data_len]]
if self.is_train:
self.label = [x for i, x in zip(train_test_list, label_list) if i][:data_len]
else:
self.label = [x for i, x in zip(train_test_list, label_list) if not i][:data_len]
self.imgname = [x for x in self.file_list[:data_len]]
def __getitem__(self, index):
if self.sm_vit:
if self.low_memory:
target, imgname = self.label[index], self.imgname[index]
img, mask = super(CUB, self).generic_getitem_lowMem(index)
else:
img, target, imgname, mask = self.img[index], self.label[index], self.imgname[index], self.mask[index]
img, mask = super(CUB, self).generic_getitem(index, img, mask)
return img, target, mask
else:
img, target, imgname = self.img[index], self.label[index], self.imgname[index]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.label)
class dogs(Generic_smvit_DS): #(Dataset):
"""`Stanford Dogs <http://vision.stanford.edu/aditya86/ImageNetDogs/>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``omniglot-py`` exists.
cropped (bool, optional): If true, the images will be cropped into the bounding box specified
in the annotations
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset tar files from the internet and
puts it in root directory. If the tar files are already downloaded, they are not
downloaded again.
"""
folder = 'dog'
download_url_prefix = 'http://vision.stanford.edu/aditya86/ImageNetDogs'
def __init__(self,
ds_name,
root,
is_train=True,
cropped=False,
transform=None,
target_transform=None,
download=False,
sm_vit=True,
low_memory=True,
img_size=400):
self.ds_name = ds_name
self.img_size = img_size
self.max_res = int(self.img_size * 1.5)
self.full_ds = True # pre-processing full dataset
self.padding = True # image and mask padding
self.rand_crop = False # if no other cropping
self.flip_mask_as_image = True # if False - turn on RandomHorizontalFlip in data_utils !!!
self.rand_crop_im_mask = False # randomly crop both image and mask
self.crop_mask = False # randomly crop mask only
self.mid_val = False # 3-state mask
self.debug = False # for debug info
if self.debug:
os.makedirs("./test")
self.sm_vit = sm_vit
self.low_memory = low_memory
# self.root = join(os.path.expanduser(root), self.folder)
self.root = root
self.base_folder = "Images"
self.is_train = is_train
if self.is_train:
self.mode = "Train"
else:
self.mode = "Test"
self.cropped = cropped
self.transform = transform
self.target_transform = target_transform
if download:
self.download()
split = self.load_split()
self.images_folder = join(self.root, 'Images')
self.annotations_folder = join(self.root, 'Annotation')
self._breeds = list_dir(self.images_folder)
if self.cropped:
self._breed_annotations = [[(annotation, box, idx)
for box in self.get_boxes(join(self.annotations_folder, annotation))]
for annotation, idx in split]
self._flat_breed_annotations = sum(self._breed_annotations, [])
self._flat_breed_images = [(annotation+'.jpg', idx) for annotation, box, idx in self._flat_breed_annotations]
else:
self._breed_images = [(annotation+'.jpg', idx) for annotation, idx in split]
self._flat_breed_images = self._breed_images
self.classes = ["Chihuaha",
"Japanese Spaniel",
"Maltese Dog",
"Pekinese",
"Shih-Tzu",
"Blenheim Spaniel",
"Papillon",
"Toy Terrier",
"Rhodesian Ridgeback",
"Afghan Hound",
"Basset Hound",
"Beagle",
"Bloodhound",
"Bluetick",
"Black-and-tan Coonhound",
"Walker Hound",
"English Foxhound",
"Redbone",
"Borzoi",
"Irish Wolfhound",
"Italian Greyhound",
"Whippet",
"Ibizian Hound",
"Norwegian Elkhound",
"Otterhound",
"Saluki",
"Scottish Deerhound",
"Weimaraner",
"Staffordshire Bullterrier",
"American Staffordshire Terrier",
"Bedlington Terrier",
"Border Terrier",
"Kerry Blue Terrier",
"Irish Terrier",
"Norfolk Terrier",
"Norwich Terrier",
"Yorkshire Terrier",
"Wirehaired Fox Terrier",
"Lakeland Terrier",
"Sealyham Terrier",
"Airedale",
"Cairn",
"Australian Terrier",
"Dandi Dinmont",
"Boston Bull",
"Miniature Schnauzer",
"Giant Schnauzer",
"Standard Schnauzer",
"Scotch Terrier",
"Tibetan Terrier",
"Silky Terrier",
"Soft-coated Wheaten Terrier",
"West Highland White Terrier",
"Lhasa",
"Flat-coated Retriever",
"Curly-coater Retriever",
"Golden Retriever",
"Labrador Retriever",
"Chesapeake Bay Retriever",
"German Short-haired Pointer",
"Vizsla",
"English Setter",
"Irish Setter",
"Gordon Setter",
"Brittany",
"Clumber",
"English Springer Spaniel",
"Welsh Springer Spaniel",
"Cocker Spaniel",
"Sussex Spaniel",
"Irish Water Spaniel",
"Kuvasz",
"Schipperke",
"Groenendael",
"Malinois",
"Briard",
"Kelpie",
"Komondor",
"Old English Sheepdog",
"Shetland Sheepdog",
"Collie",
"Border Collie",
"Bouvier des Flandres",
"Rottweiler",
"German Shepard",
"Doberman",
"Miniature Pinscher",
"Greater Swiss Mountain Dog",
"Bernese Mountain Dog",
"Appenzeller",
"EntleBucher",
"Boxer",
"Bull Mastiff",
"Tibetan Mastiff",
"French Bulldog",
"Great Dane",
"Saint Bernard",
"Eskimo Dog",
"Malamute",
"Siberian Husky",
"Affenpinscher",
"Basenji",
"Pug",
"Leonberg",
"Newfoundland",
"Great Pyrenees",
"Samoyed",
"Pomeranian",
"Chow",
"Keeshond",
"Brabancon Griffon",
"Pembroke",
"Cardigan",
"Toy Poodle",
"Miniature Poodle",
"Standard Poodle",
"Mexican Hairless",
"Dingo",
"Dhole",
"African Hunting Dog"]
data_len = None
if self.sm_vit:
print(f"[INFO] Preparing {self.mode} shape_hw list...")
shape_hw_list = []
self.file_list = []
file_list_full = []
for image_name, target_class in self._flat_breed_images:
img_name = join(self.images_folder, image_name)
img_temp = scipy.misc.imread(os.path.join(img_name))
shape_hw_temp = [img_temp.shape[0], img_temp.shape[1]] # h_max (y), w_max (x)
if (shape_hw_temp[0] > self.max_res) or (shape_hw_temp[1] > self.max_res):
if shape_hw_temp[0] > shape_hw_temp[1]:
downscale_coef = shape_hw_temp[0] / self.max_res
else:
downscale_coef = shape_hw_temp[1] / self.max_res
shape_hw_temp[0] = int(shape_hw_temp[0] // downscale_coef)
shape_hw_temp[1] = int(shape_hw_temp[1] // downscale_coef)
shape_hw_list.append(shape_hw_temp)
self.file_list.append(image_name)
file_list_full.append(img_name)
if self.low_memory:
self.mask_u2n_list, self.x_u2n_list, self.y_u2n_list, self.h_u2n_list, self.w_u2n_list = \
super(dogs, self).generic_preprocess_lowMem(self.file_list,
file_list_full,
shape_hw_list
)
del shape_hw_list
del file_list_full
gc.collect()
else:
self.img, self.mask = \
super(dogs, self).generic_preprocess(self.file_list,
file_list_full,
shape_hw_list,
data_len
)
if self.is_train:
self.label = [x for i, x in self._flat_breed_images][:data_len]
else:
self.label = [x for i, x in self._flat_breed_images][:data_len]
self.imgname = [x for x in self.file_list[:data_len]]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target character class.
"""
if self.sm_vit:
if self.low_memory:
target, imgname = self.label[index], self.imgname[index]
img, mask = super(dogs, self).generic_getitem_lowMem(index)
else:
img, target, imgname, mask = self.img[index], self.label[index], self.imgname[index], self.mask[index]
img, mask = super(dogs, self).generic_getitem(index, img, mask)
return img, target, mask
else:
image_name, target = self._flat_breed_images[index]
image_path = join(self.images_folder, image_name)
img = Image.open(image_path).convert('RGB')
if self.cropped:
img = img.crop(self._flat_breed_annotations[index][1])
if self.transform:
img = self.transform(img)
if self.target_transform:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self._flat_breed_images)
def download(self):
import tarfile
if os.path.exists(join(self.root, 'Images')) and os.path.exists(join(self.root, 'Annotation')):
if len(os.listdir(join(self.root, 'Images'))) == len(os.listdir(join(self.root, 'Annotation'))) == 120:
print('Files already downloaded and verified')
return
for filename in ['images', 'annotation', 'lists']:
tar_filename = filename + '.tar'
url = self.download_url_prefix + '/' + tar_filename
download_url(url, self.root, tar_filename, None)
print('Extracting downloaded file: ' + join(self.root, tar_filename))
with tarfile.open(join(self.root, tar_filename), 'r') as tar_file:
tar_file.extractall(self.root)
os.remove(join(self.root, tar_filename))
@staticmethod
def get_boxes(path):
import xml.etree.ElementTree
e = xml.etree.ElementTree.parse(path).getroot()
boxes = []
for objs in e.iter('object'):
boxes.append([int(objs.find('bndbox').find('xmin').text),
int(objs.find('bndbox').find('ymin').text),
int(objs.find('bndbox').find('xmax').text),
int(objs.find('bndbox').find('ymax').text)])
return boxes
def load_split(self):
if self.is_train:
# split = scipy.io.loadmat(join(self.root, 'train_list.mat'))['annotation_list']
# labels = scipy.io.loadmat(join(self.root, 'train_list.mat'))['labels']
split = scipy.io.loadmat(join(self.root, 'splits/train_list.mat'))['annotation_list']
labels = scipy.io.loadmat(join(self.root, 'splits/train_list.mat'))['labels']
else:
# split = scipy.io.loadmat(join(self.root, 'test_list.mat'))['annotation_list']
# labels = scipy.io.loadmat(join(self.root, 'test_list.mat'))['labels']
split = scipy.io.loadmat(join(self.root, 'splits/test_list.mat'))['annotation_list']
labels = scipy.io.loadmat(join(self.root, 'splits/test_list.mat'))['labels']
split = [item[0][0] for item in split]
labels = [item[0]-1 for item in labels]
return list(zip(split, labels))
def stats(self):
counts = {}
for index in range(len(self._flat_breed_images)):
image_name, target_class = self._flat_breed_images[index]
if target_class not in counts.keys():
counts[target_class] = 1
else:
counts[target_class] += 1
print("%d samples spanning %d classes (avg %f per class)"%(len(self._flat_breed_images), len(counts.keys()), float(len(self._flat_breed_images))/float(len(counts.keys()))))
return counts
class NABirds(Generic_smvit_DS): #(Dataset):
"""`NABirds <https://dl.allaboutbirds.org/nabirds>`_ Dataset.
Args:
root (string): Root directory of the dataset.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
#base_folder = 'nabirds/images'
def __init__(self, ds_name, root, is_train=True, data_len=None, transform=None, sm_vit=True, low_memory=True, img_size=448):
self.ds_name = ds_name
self.img_size = img_size
self.max_res = int(self.img_size * 1.25) # 1.5
self.full_ds = True # pre-processing full dataset
self.padding = True # image and mask padding
self.rand_crop = False # if no other cropping
self.flip_mask_as_image = True # if False - turn on RandomHorizontalFlip in data_utils !!!
self.rand_crop_im_mask = False # randomly crop both image and mask
self.crop_mask = False # randomly crop mask only
self.mid_val = False # 3-state mask
self.debug = False # for debug info
if self.debug:
os.makedirs("./test")
self.sm_vit = sm_vit
self.low_memory = low_memory
dataset_path = os.path.join(root)
self.root = root
self.base_folder = "images"
self.loader = default_loader
self.transform = transform
self.is_train = is_train
if self.is_train:
self.mode = "Train"
else:
self.mode = "Test"
image_paths = pd.read_csv(os.path.join(dataset_path, 'images.txt'),
sep=' ', names=['img_id', 'filepath'])
image_class_labels = pd.read_csv(os.path.join(dataset_path, 'image_class_labels.txt'),
sep=' ', names=['img_id', 'target'])
# Since the raw labels are non-continuous, map them to new ones
self.label_map = get_continuous_class_map(image_class_labels['target'])
train_test_split = pd.read_csv(os.path.join(dataset_path, 'train_test_split.txt'),
sep=' ', names=['img_id', 'is_training_img'])
data = image_paths.merge(image_class_labels, on='img_id')
self.data = data.merge(train_test_split, on='img_id')
# Load in the train / test split
if self.is_train:
self.data = self.data[self.data.is_training_img == 1]
else:
self.data = self.data[self.data.is_training_img == 0]
# Load in the class data
self.class_names = load_class_names(dataset_path)
self.class_hierarchy = load_hierarchy(dataset_path)
self.data_len = None
if self.sm_vit:
print(f"[INFO] Preparing {self.mode} shape_hw list...")
shape_hw_list = []
self.file_list = []
file_list_full = []
for sample in self.data.iloc:
image_name = sample.filepath
img_name_full = join(self.root, self.base_folder, image_name)
img_temp = scipy.misc.imread(os.path.join(img_name_full))
shape_hw_temp = [img_temp.shape[0], img_temp.shape[1]] # h_max (y), w_max (x)
if (shape_hw_temp[0] > self.max_res) or (shape_hw_temp[1] > self.max_res):
if shape_hw_temp[0] > shape_hw_temp[1]:
downscale_coef = shape_hw_temp[0] / self.max_res
else:
downscale_coef = shape_hw_temp[1] / self.max_res
shape_hw_temp[0] = int(shape_hw_temp[0] // downscale_coef)
shape_hw_temp[1] = int(shape_hw_temp[1] // downscale_coef)
shape_hw_list.append(shape_hw_temp)
self.file_list.append(image_name)
file_list_full.append(img_name_full)
if self.low_memory:
self.mask_u2n_list, self.x_u2n_list, self.y_u2n_list, self.h_u2n_list, self.w_u2n_list = \
super(NABirds, self).generic_preprocess_lowMem(self.file_list,
file_list_full,
shape_hw_list
)
del shape_hw_list
del file_list_full
gc.collect()
else:
self.img, self.mask = \
super(NABirds, self).generic_preprocess(self.file_list,
file_list_full,
shape_hw_list,
self.data_len
)
if self.is_train:
self.label = [ (self.label_map[x.target]) for x in self.data.iloc ][:self.data_len]
else:
self.label = [ (self.label_map[x.target]) for x in self.data.iloc ][:self.data_len]
self.imgname = [x for x in self.file_list[:self.data_len]]
def __getitem__(self, index):
if self.sm_vit:
if self.low_memory:
target, imgname = self.label[index], self.imgname[index]
img, mask = super(NABirds, self).generic_getitem_lowMem(index)
else:
img, target, imgname, mask = self.img[index], self.label[index], self.imgname[index], self.mask[index]
img, mask = super(NABirds, self).generic_getitem(index, img, mask)
return img, target, mask
else:
sample = self.data.iloc[index]
path = os.path.join(self.root, self.base_folder, sample.filepath)
target = self.label_map[sample.target]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
return len(self.data)
def get_continuous_class_map(class_labels):
label_set = set(class_labels)
return {k: i for i, k in enumerate(label_set)}
def load_class_names(dataset_path=''):
names = {}
with open(os.path.join(dataset_path, 'classes.txt')) as f:
for line in f:
pieces = line.strip().split()
class_id = pieces[0]
names[class_id] = ' '.join(pieces[1:])
return names
def load_hierarchy(dataset_path=''):
parents = {}
with open(os.path.join(dataset_path, 'hierarchy.txt')) as f:
for line in f:
pieces = line.strip().split()
child_id, parent_id = pieces
parents[child_id] = parent_id
return parents
### Not optimised datasets:
class INat2017(VisionDataset):
"""`iNaturalist 2017 <https://github.com/visipedia/inat_comp/blob/master/2017/README.md>`_ Dataset.
Args:
root (string): Root directory of the dataset.
split (string, optional): The dataset split, supports ``train``, or ``val``.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = 'train_val_images/'
file_list = {
'imgs': ('https://storage.googleapis.com/asia_inat_data/train_val/train_val_images.tar.gz',
'train_val_images.tar.gz',
'7c784ea5e424efaec655bd392f87301f'),
'annos': ('https://storage.googleapis.com/asia_inat_data/train_val/train_val2017.zip',
'train_val2017.zip',
'444c835f6459867ad69fcb36478786e7')
}
def __init__(self, root, split='train', transform=None, target_transform=None, download=False):
super(INat2017, self).__init__(root, transform=transform, target_transform=target_transform)
self.loader = default_loader
self.split = verify_str_arg(split, "split", ("train", "val",))
if self._check_exists():
print('Files already downloaded and verified.')
elif download:
if not (os.path.exists(os.path.join(self.root, self.file_list['imgs'][1]))
and os.path.exists(os.path.join(self.root, self.file_list['annos'][1]))):
print('Downloading...')
self._download()
print('Extracting...')
extract_archive(os.path.join(self.root, self.file_list['imgs'][1]))
extract_archive(os.path.join(self.root, self.file_list['annos'][1]))
else:
raise RuntimeError(
'Dataset not found. You can use download=True to download it.')
anno_filename = split + '2017.json'
with open(os.path.join(self.root, anno_filename), 'r') as fp:
all_annos = json.load(fp)
self.annos = all_annos['annotations']
self.images = all_annos['images']
def __getitem__(self, index):
path = os.path.join(self.root, self.images[index]['file_name'])
target = self.annos[index]['category_id']
image = self.loader(path)
if self.transform is not None:
image = self.transform(image)
if self.target_transform is not None:
target = self.target_transform(target)
return image, target
def __len__(self):
return len(self.images)
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.base_folder))
def _download(self):
for url, filename, md5 in self.file_list.values():
download_url(url, root=self.root, filename=filename)
if not check_integrity(os.path.join(self.root, filename), md5):
raise RuntimeError("File not found or corrupted.")
class CarsDataset(Dataset):
def __init__(self, mat_anno, data_dir, car_names, cleaned=None, transform=None):
"""
Args:
mat_anno (string): Path to the MATLAB annotation file.
data_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.full_data_set = io.loadmat(mat_anno)
self.car_annotations = self.full_data_set['annotations']
self.car_annotations = self.car_annotations[0]
if cleaned is not None:
cleaned_annos = []
print("Cleaning up data set (only take pics with rgb chans)...")
clean_files = np.loadtxt(cleaned, dtype=str)
for c in self.car_annotations:
if c[-1][0] in clean_files:
cleaned_annos.append(c)
self.car_annotations = cleaned_annos
self.car_names = scipy.io.loadmat(car_names)['class_names']
self.car_names = np.array(self.car_names[0])
self.data_dir = data_dir
self.transform = transform
def __len__(self):
return len(self.car_annotations)
def __getitem__(self, idx):
img_name = os.path.join(self.data_dir, self.car_annotations[idx][-1][0])
image = Image.open(img_name).convert('RGB')
car_class = self.car_annotations[idx][-2][0][0]
car_class = torch.from_numpy(np.array(car_class.astype(np.float32))).long() - 1
assert car_class < 196
if self.transform:
image = self.transform(image)
# return image, car_class, img_name
return image, car_class
def map_class(self, id):
id = np.ravel(id)
ret = self.car_names[id - 1][0][0]
return ret
def show_batch(self, img_batch, class_batch):
for i in range(img_batch.shape[0]):
ax = plt.subplot(1, img_batch.shape[0], i + 1)
title_str = self.map_class(int(class_batch[i]))
img = np.transpose(img_batch[i, ...], (1, 2, 0))
ax.imshow(img)
ax.set_title(title_str.__str__(), {'fontsize': 5})
plt.tight_layout()
def make_dataset(dir, image_ids, targets):
assert(len(image_ids) == len(targets))
images = []
dir = os.path.expanduser(dir)
for i in range(len(image_ids)):
item = (os.path.join(dir, 'data', 'images',
'%s.jpg' % image_ids[i]), targets[i])
images.append(item)
return images
def find_classes(classes_file):
# read classes file, separating out image IDs and class names
image_ids = []
targets = []
f = open(classes_file, 'r')
for line in f:
split_line = line.split(' ')
image_ids.append(split_line[0])
targets.append(' '.join(split_line[1:]))
f.close()
# index class names
classes = np.unique(targets)
class_to_idx = {classes[i]: i for i in range(len(classes))}
targets = [class_to_idx[c] for c in targets]
return (image_ids, targets, classes, class_to_idx)
class FGVC_aircraft():
def __init__(self, root, is_train=True, data_len=None, transform=None):
self.root = root
self.is_train = is_train
self.transform = transform
train_img_path = os.path.join(self.root, 'data', 'images')
test_img_path = os.path.join(self.root, 'data', 'images')
train_label_file = open(os.path.join(self.root, 'data', 'train.txt'))
test_label_file = open(os.path.join(self.root, 'data', 'test.txt'))
train_img_label = []
test_img_label = []
for line in train_label_file:
train_img_label.append([os.path.join(train_img_path,line[:-1].split(' ')[0]), int(line[:-1].split(' ')[1])-1])
for line in test_label_file:
test_img_label.append([os.path.join(test_img_path,line[:-1].split(' ')[0]), int(line[:-1].split(' ')[1])-1])
self.train_img_label = train_img_label[:data_len]
self.test_img_label = test_img_label[:data_len]
def __getitem__(self, index):
if self.is_train:
img, target = scipy.misc.imread(self.train_img_label[index][0]), self.train_img_label[index][1]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
else:
img, target = scipy.misc.imread(self.test_img_label[index][0]), self.test_img_label[index][1]
if len(img.shape) == 2:
img = np.stack([img] * 3, 2)
img = Image.fromarray(img, mode='RGB')
if self.transform is not None:
img = self.transform(img)
return img, target
def __len__(self):
if self.is_train:
return len(self.train_img_label)
else:
return len(self.test_img_label)
| 67,209 | 39.659407 | 180 |
py
|
sm-vit
|
sm-vit-main/utils/scheduler.py
|
import logging
import math
from torch.optim.lr_scheduler import LambdaLR
logger = logging.getLogger(__name__)
class ConstantLRSchedule(LambdaLR):
""" Constant learning rate schedule.
"""
def __init__(self, optimizer, last_epoch=-1):
super(ConstantLRSchedule, self).__init__(optimizer, lambda _: 1.0, last_epoch=last_epoch)
class WarmupConstantSchedule(LambdaLR):
""" Linear warmup and then constant.
Linearly increases learning rate schedule from 0 to 1 over `warmup_steps` training steps.
Keeps learning rate schedule equal to 1. after warmup_steps.
"""
def __init__(self, optimizer, warmup_steps, last_epoch=-1):
self.warmup_steps = warmup_steps
super(WarmupConstantSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
return 1.
class WarmupLinearSchedule(LambdaLR):
""" Linear warmup and then linear decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Linearly decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps.
"""
def __init__(self, optimizer, warmup_steps, t_total, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
super(WarmupLinearSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1, self.warmup_steps))
return max(0.0, float(self.t_total - step) / float(max(1.0, self.t_total - self.warmup_steps)))
class WarmupCosineSchedule(LambdaLR):
""" Linear warmup and then cosine decay.
Linearly increases learning rate from 0 to 1 over `warmup_steps` training steps.
Decreases learning rate from 1. to 0. over remaining `t_total - warmup_steps` steps following a cosine curve.
If `cycles` (default=0.5) is different from default, learning rate follows cosine function after warmup.
"""
def __init__(self, optimizer, warmup_steps, t_total, cycles=.5, last_epoch=-1):
self.warmup_steps = warmup_steps
self.t_total = t_total
self.cycles = cycles
super(WarmupCosineSchedule, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup_steps:
return float(step) / float(max(1.0, self.warmup_steps))
# progress after warmup
progress = float(step - self.warmup_steps) / float(max(1, self.t_total - self.warmup_steps))
return max(0.0, 0.5 * (1. + math.cos(math.pi * float(self.cycles) * 2.0 * progress)))
| 2,799 | 42.75 | 117 |
py
|
sm-vit
|
sm-vit-main/utils/autoaugment.py
|
"""
Copy from https://github.com/DeepVoltaire/AutoAugment/blob/master/autoaugment.py
"""
from PIL import Image, ImageEnhance, ImageOps
import numpy as np
import random
__all__ = ['AutoAugImageNetPolicy', 'AutoAugCIFAR10Policy', 'AutoAugSVHNPolicy']
class AutoAugImageNetPolicy(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.4, "posterize", 8, 0.6, "rotate", 9, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "posterize", 7, 0.6, "posterize", 6, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.4, "equalize", 4, 0.8, "rotate", 8, fillcolor),
SubPolicy(0.6, "solarize", 3, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.8, "posterize", 5, 1.0, "equalize", 2, fillcolor),
SubPolicy(0.2, "rotate", 3, 0.6, "solarize", 8, fillcolor),
SubPolicy(0.6, "equalize", 8, 0.4, "posterize", 6, fillcolor),
SubPolicy(0.8, "rotate", 8, 0.4, "color", 0, fillcolor),
SubPolicy(0.4, "rotate", 9, 0.6, "equalize", 2, fillcolor),
SubPolicy(0.0, "equalize", 7, 0.8, "equalize", 8, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor),
SubPolicy(0.8, "rotate", 8, 1.0, "color", 2, fillcolor),
SubPolicy(0.8, "color", 8, 0.8, "solarize", 7, fillcolor),
SubPolicy(0.4, "sharpness", 7, 0.6, "invert", 8, fillcolor),
SubPolicy(0.6, "shearX", 5, 1.0, "equalize", 9, fillcolor),
SubPolicy(0.4, "color", 0, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.4, "equalize", 7, 0.2, "solarize", 4, fillcolor),
SubPolicy(0.6, "solarize", 5, 0.6, "autocontrast", 5, fillcolor),
SubPolicy(0.6, "invert", 4, 1.0, "equalize", 8, fillcolor),
SubPolicy(0.6, "color", 4, 1.0, "contrast", 8, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment ImageNet Policy"
class AutoAugCIFAR10Policy(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.1, "invert", 7, 0.2, "contrast", 6, fillcolor),
SubPolicy(0.7, "rotate", 2, 0.3, "translateX", 9, fillcolor),
SubPolicy(0.8, "sharpness", 1, 0.9, "sharpness", 3, fillcolor),
SubPolicy(0.5, "shearY", 8, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.5, "autocontrast", 8, 0.9, "equalize", 2, fillcolor),
SubPolicy(0.2, "shearY", 7, 0.3, "posterize", 7, fillcolor),
SubPolicy(0.4, "color", 3, 0.6, "brightness", 7, fillcolor),
SubPolicy(0.3, "sharpness", 9, 0.7, "brightness", 9, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.5, "equalize", 1, fillcolor),
SubPolicy(0.6, "contrast", 7, 0.6, "sharpness", 5, fillcolor),
SubPolicy(0.7, "color", 7, 0.5, "translateX", 8, fillcolor),
SubPolicy(0.3, "equalize", 7, 0.4, "autocontrast", 8, fillcolor),
SubPolicy(0.4, "translateY", 3, 0.2, "sharpness", 6, fillcolor),
SubPolicy(0.9, "brightness", 6, 0.2, "color", 8, fillcolor),
SubPolicy(0.5, "solarize", 2, 0.0, "invert", 3, fillcolor),
SubPolicy(0.2, "equalize", 0, 0.6, "autocontrast", 0, fillcolor),
SubPolicy(0.2, "equalize", 8, 0.8, "equalize", 4, fillcolor),
SubPolicy(0.9, "color", 9, 0.6, "equalize", 6, fillcolor),
SubPolicy(0.8, "autocontrast", 4, 0.2, "solarize", 8, fillcolor),
SubPolicy(0.1, "brightness", 3, 0.7, "color", 0, fillcolor),
SubPolicy(0.4, "solarize", 5, 0.9, "autocontrast", 3, fillcolor),
SubPolicy(0.9, "translateY", 9, 0.7, "translateY", 9, fillcolor),
SubPolicy(0.9, "autocontrast", 2, 0.8, "solarize", 3, fillcolor),
SubPolicy(0.8, "equalize", 8, 0.1, "invert", 3, fillcolor),
SubPolicy(0.7, "translateY", 9, 0.9, "autocontrast", 1, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment CIFAR10 Policy"
class AutoAugSVHNPolicy(object):
def __init__(self, fillcolor=(128, 128, 128)):
self.policies = [
SubPolicy(0.9, "shearX", 4, 0.2, "invert", 3, fillcolor),
SubPolicy(0.9, "shearY", 8, 0.7, "invert", 5, fillcolor),
SubPolicy(0.6, "equalize", 5, 0.6, "solarize", 6, fillcolor),
SubPolicy(0.9, "invert", 3, 0.6, "equalize", 3, fillcolor),
SubPolicy(0.6, "equalize", 1, 0.9, "rotate", 3, fillcolor),
SubPolicy(0.9, "shearX", 4, 0.8, "autocontrast", 3, fillcolor),
SubPolicy(0.9, "shearY", 8, 0.4, "invert", 5, fillcolor),
SubPolicy(0.9, "shearY", 5, 0.2, "solarize", 6, fillcolor),
SubPolicy(0.9, "invert", 6, 0.8, "autocontrast", 1, fillcolor),
SubPolicy(0.6, "equalize", 3, 0.9, "rotate", 3, fillcolor),
SubPolicy(0.9, "shearX", 4, 0.3, "solarize", 3, fillcolor),
SubPolicy(0.8, "shearY", 8, 0.7, "invert", 4, fillcolor),
SubPolicy(0.9, "equalize", 5, 0.6, "translateY", 6, fillcolor),
SubPolicy(0.9, "invert", 4, 0.6, "equalize", 7, fillcolor),
SubPolicy(0.3, "contrast", 3, 0.8, "rotate", 4, fillcolor),
SubPolicy(0.8, "invert", 5, 0.0, "translateY", 2, fillcolor),
SubPolicy(0.7, "shearY", 6, 0.4, "solarize", 8, fillcolor),
SubPolicy(0.6, "invert", 4, 0.8, "rotate", 4, fillcolor),
SubPolicy(0.3, "shearY", 7, 0.9, "translateX", 3, fillcolor),
SubPolicy(0.1, "shearX", 6, 0.6, "invert", 5, fillcolor),
SubPolicy(0.7, "solarize", 2, 0.6, "translateY", 7, fillcolor),
SubPolicy(0.8, "shearY", 4, 0.8, "invert", 8, fillcolor),
SubPolicy(0.7, "shearX", 9, 0.8, "translateY", 3, fillcolor),
SubPolicy(0.8, "shearY", 5, 0.7, "autocontrast", 3, fillcolor),
SubPolicy(0.7, "shearX", 2, 0.1, "invert", 5, fillcolor)
]
def __call__(self, img):
policy_idx = random.randint(0, len(self.policies) - 1)
return self.policies[policy_idx](img)
def __repr__(self):
return "AutoAugment SVHN Policy"
class SubPolicy(object):
def __init__(self, p1, operation1, magnitude_idx1, p2, operation2, magnitude_idx2, fillcolor=(128, 128, 128)):
ranges = {
"shearX": np.linspace(0, 0.3, 10),
"shearY": np.linspace(0, 0.3, 10),
"translateX": np.linspace(0, 150 / 331, 10),
"translateY": np.linspace(0, 150 / 331, 10),
"rotate": np.linspace(0, 30, 10),
"color": np.linspace(0.0, 0.9, 10),
"posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
"solarize": np.linspace(256, 0, 10),
"contrast": np.linspace(0.0, 0.9, 10),
"sharpness": np.linspace(0.0, 0.9, 10),
"brightness": np.linspace(0.0, 0.9, 10),
"autocontrast": [0] * 10,
"equalize": [0] * 10,
"invert": [0] * 10
}
def rotate_with_fill(img, magnitude):
rot = img.convert("RGBA").rotate(magnitude)
return Image.composite(rot, Image.new("RGBA", rot.size, (128,) * 4), rot).convert(img.mode)
func = {
"shearX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, magnitude * random.choice([-1, 1]), 0, 0, 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"shearY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, magnitude * random.choice([-1, 1]), 1, 0),
Image.BICUBIC, fillcolor=fillcolor),
"translateX": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice([-1, 1]), 0, 1, 0),
fillcolor=fillcolor),
"translateY": lambda img, magnitude: img.transform(
img.size, Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.choice([-1, 1])),
fillcolor=fillcolor),
"rotate": lambda img, magnitude: rotate_with_fill(img, magnitude),
# "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
"color": lambda img, magnitude: ImageEnhance.Color(img).enhance(1 + magnitude * random.choice([-1, 1])),
"posterize": lambda img, magnitude: ImageOps.posterize(img, magnitude),
"solarize": lambda img, magnitude: ImageOps.solarize(img, magnitude),
"contrast": lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"sharpness": lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"brightness": lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
1 + magnitude * random.choice([-1, 1])),
"autocontrast": lambda img, magnitude: ImageOps.autocontrast(img),
"equalize": lambda img, magnitude: ImageOps.equalize(img),
"invert": lambda img, magnitude: ImageOps.invert(img)
}
# self.name = "{}_{:.2f}_and_{}_{:.2f}".format(
# operation1, ranges[operation1][magnitude_idx1],
# operation2, ranges[operation2][magnitude_idx2])
self.p1 = p1
self.operation1 = func[operation1]
self.magnitude1 = ranges[operation1][magnitude_idx1]
self.p2 = p2
self.operation2 = func[operation2]
self.magnitude2 = ranges[operation2][magnitude_idx2]
def __call__(self, img):
if random.random() < self.p1:
img = self.operation1(img, self.magnitude1)
if random.random() < self.p2:
img = self.operation2(img, self.magnitude2)
return img
| 10,387 | 49.673171 | 116 |
py
|
sm-vit
|
sm-vit-main/utils/dist_util.py
|
import torch.distributed as dist
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def is_main_process():
return get_rank() == 0
def format_step(step):
if isinstance(step, str):
return step
s = ""
if len(step) > 0:
s += "Training Epoch: {} ".format(step[0])
if len(step) > 1:
s += "Training Iteration: {} ".format(step[1])
if len(step) > 2:
s += "Validation Iteration: {} ".format(step[2])
return s
| 711 | 21.967742 | 56 |
py
|
sm-vit
|
sm-vit-main/U2Net/data_loader.py
|
# data loader
from __future__ import print_function, division
import glob
import torch
from skimage import io, transform, color
import numpy as np
import random
import math
import matplotlib.pyplot as plt
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms, utils
from PIL import Image
#==========================dataset load==========================
class RescaleT(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# # #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# # img = transform.resize(image,(new_h,new_w),mode='constant')
# # lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
# img = transform.resize(image,(self.output_size,self.output_size),mode='constant')
# lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(self.output_size,self.output_size),mode='constant', anti_aliasing=True, anti_aliasing_sigma=None)
lbl = transform.resize(label,(self.output_size,self.output_size),mode='constant', order=0, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None)
return {'imidx':imidx, 'image':img,'label':lbl}
class Rescale(object):
def __init__(self,output_size):
assert isinstance(output_size,(int,tuple))
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'],sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
if isinstance(self.output_size,int):
if h > w:
new_h, new_w = self.output_size*h/w,self.output_size
else:
new_h, new_w = self.output_size,self.output_size*w/h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
# #resize the image to new_h x new_w and convert image from range [0,255] to [0,1]
# img = transform.resize(image,(new_h,new_w),mode='constant')
# lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True)
img = transform.resize(image,(new_h,new_w),mode='constant', anti_aliasing=True, anti_aliasing_sigma=None)
lbl = transform.resize(label,(new_h,new_w),mode='constant', order=0, preserve_range=True, anti_aliasing=True, anti_aliasing_sigma=None)
return {'imidx':imidx, 'image':img,'label':lbl}
class RandomCrop(object):
def __init__(self,output_size):
assert isinstance(output_size, (int, tuple))
if isinstance(output_size, int):
self.output_size = (output_size, output_size)
else:
assert len(output_size) == 2
self.output_size = output_size
def __call__(self,sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
if random.random() >= 0.5:
image = image[::-1]
label = label[::-1]
h, w = image.shape[:2]
new_h, new_w = self.output_size
top = np.random.randint(0, h - new_h)
left = np.random.randint(0, w - new_w)
image = image[top: top + new_h, left: left + new_w]
label = label[top: top + new_h, left: left + new_w]
return {'imidx':imidx,'image':image, 'label':label}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
imidx, image, label = sample['imidx'], sample['image'], sample['label']
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
tmpLbl = np.zeros(label.shape)
image = image/np.max(image)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class ToTensorLab(object):
"""Convert ndarrays in sample to Tensors."""
def __init__(self,flag=0):
self.flag = flag
def __call__(self, sample):
imidx, image, label =sample['imidx'], sample['image'], sample['label']
tmpLbl = np.zeros(label.shape)
if(np.max(label)<1e-6):
label = label
else:
label = label/np.max(label)
# change the color space
if self.flag == 2: # with rgb and Lab colors
tmpImg = np.zeros((image.shape[0],image.shape[1],6))
tmpImgt = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImgt[:,:,0] = image[:,:,0]
tmpImgt[:,:,1] = image[:,:,0]
tmpImgt[:,:,2] = image[:,:,0]
else:
tmpImgt = image
tmpImgtl = color.rgb2lab(tmpImgt)
# nomalize image to range [0,1]
tmpImg[:,:,0] = (tmpImgt[:,:,0]-np.min(tmpImgt[:,:,0]))/(np.max(tmpImgt[:,:,0])-np.min(tmpImgt[:,:,0]))
tmpImg[:,:,1] = (tmpImgt[:,:,1]-np.min(tmpImgt[:,:,1]))/(np.max(tmpImgt[:,:,1])-np.min(tmpImgt[:,:,1]))
tmpImg[:,:,2] = (tmpImgt[:,:,2]-np.min(tmpImgt[:,:,2]))/(np.max(tmpImgt[:,:,2])-np.min(tmpImgt[:,:,2]))
tmpImg[:,:,3] = (tmpImgtl[:,:,0]-np.min(tmpImgtl[:,:,0]))/(np.max(tmpImgtl[:,:,0])-np.min(tmpImgtl[:,:,0]))
tmpImg[:,:,4] = (tmpImgtl[:,:,1]-np.min(tmpImgtl[:,:,1]))/(np.max(tmpImgtl[:,:,1])-np.min(tmpImgtl[:,:,1]))
tmpImg[:,:,5] = (tmpImgtl[:,:,2]-np.min(tmpImgtl[:,:,2]))/(np.max(tmpImgtl[:,:,2])-np.min(tmpImgtl[:,:,2]))
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
tmpImg[:,:,3] = (tmpImg[:,:,3]-np.mean(tmpImg[:,:,3]))/np.std(tmpImg[:,:,3])
tmpImg[:,:,4] = (tmpImg[:,:,4]-np.mean(tmpImg[:,:,4]))/np.std(tmpImg[:,:,4])
tmpImg[:,:,5] = (tmpImg[:,:,5]-np.mean(tmpImg[:,:,5]))/np.std(tmpImg[:,:,5])
elif self.flag == 1: #with Lab color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
if image.shape[2]==1:
tmpImg[:,:,0] = image[:,:,0]
tmpImg[:,:,1] = image[:,:,0]
tmpImg[:,:,2] = image[:,:,0]
else:
tmpImg = image
tmpImg = color.rgb2lab(tmpImg)
# tmpImg = tmpImg/(np.max(tmpImg)-np.min(tmpImg))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.min(tmpImg[:,:,0]))/(np.max(tmpImg[:,:,0])-np.min(tmpImg[:,:,0]))
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.min(tmpImg[:,:,1]))/(np.max(tmpImg[:,:,1])-np.min(tmpImg[:,:,1]))
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.min(tmpImg[:,:,2]))/(np.max(tmpImg[:,:,2])-np.min(tmpImg[:,:,2]))
tmpImg[:,:,0] = (tmpImg[:,:,0]-np.mean(tmpImg[:,:,0]))/np.std(tmpImg[:,:,0])
tmpImg[:,:,1] = (tmpImg[:,:,1]-np.mean(tmpImg[:,:,1]))/np.std(tmpImg[:,:,1])
tmpImg[:,:,2] = (tmpImg[:,:,2]-np.mean(tmpImg[:,:,2]))/np.std(tmpImg[:,:,2])
else: # with rgb color
tmpImg = np.zeros((image.shape[0],image.shape[1],3))
image = image/np.max(image)
if image.shape[2]==1:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,2] = (image[:,:,0]-0.485)/0.229
else:
tmpImg[:,:,0] = (image[:,:,0]-0.485)/0.229
tmpImg[:,:,1] = (image[:,:,1]-0.456)/0.224
tmpImg[:,:,2] = (image[:,:,2]-0.406)/0.225
tmpLbl[:,:,0] = label[:,:,0]
tmpImg = tmpImg.transpose((2, 0, 1))
tmpLbl = label.transpose((2, 0, 1))
return {'imidx':torch.from_numpy(imidx), 'image': torch.from_numpy(tmpImg), 'label': torch.from_numpy(tmpLbl)}
class SalObjDataset(Dataset):
def __init__(self,img_name_list,lbl_name_list,transform=None):
# self.root_dir = root_dir
# self.image_name_list = glob.glob(image_dir+'*.png')
# self.label_name_list = glob.glob(label_dir+'*.png')
self.image_name_list = img_name_list
self.label_name_list = lbl_name_list
self.transform = transform
def __len__(self):
return len(self.image_name_list)
def __getitem__(self,idx):
# image = Image.open(self.image_name_list[idx])#io.imread(self.image_name_list[idx])
# label = Image.open(self.label_name_list[idx])#io.imread(self.label_name_list[idx])
image = io.imread(self.image_name_list[idx])
imname = self.image_name_list[idx]
imidx = np.array([idx])
if(0==len(self.label_name_list)):
label_3 = np.zeros(image.shape)
else:
label_3 = io.imread(self.label_name_list[idx])
label = np.zeros(label_3.shape[0:2])
if(3==len(label_3.shape)):
label = label_3[:,:,0]
elif(2==len(label_3.shape)):
label = label_3
if(3==len(image.shape) and 2==len(label.shape)):
label = label[:,:,np.newaxis]
elif(2==len(image.shape) and 2==len(label.shape)):
image = image[:,:,np.newaxis]
label = label[:,:,np.newaxis]
sample = {'imidx':imidx, 'image':image, 'label':label}
if self.transform:
sample = self.transform(sample)
return sample
| 9,327 | 32.797101 | 159 |
py
|
sm-vit
|
sm-vit-main/U2Net/u2net_test.py
|
import os
from re import X
from skimage import io, transform
import torch
import torchvision
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms#, utils
# import torch.optim as optim
import numpy as np
from PIL import Image
import glob
from .data_loader import RescaleT
from .data_loader import ToTensor
from .data_loader import ToTensorLab
from .data_loader import SalObjDataset
from .model import U2NET # full size version 173.6 MB
from .model import U2NETP # small version u2net 4.7 MB
#import cv2
import copy
from torchvision.utils import save_image
# normalize the predicted SOD probability map
def normPRED(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d-mi)/(ma-mi)
return dn
def save_output(image_name,pred,d_dir):
predict = pred
predict = predict.squeeze()
predict_np = predict.cpu().data.numpy()
im = Image.fromarray(predict_np*255).convert('RGB')
img_name = image_name.split(os.sep)[-1]
image = io.imread(image_name)
imo = im.resize((image.shape[1],image.shape[0]),resample=Image.BILINEAR)
pb_np = np.array(imo)
aaa = img_name.split(".")
bbb = aaa[0:-1]
imidx = bbb[0]
for i in range(1,len(bbb)):
imidx = imidx + "." + bbb[i]
imo.save(d_dir+imidx+'.png')
def mask_hw(full_ds=True, img_path=None, shape_hw=None):
print_info = False
# --------- 1. get image path and name ---------
model_name='u2net' #u2netp
if img_path is None:
image_dir = os.path.join(os.getcwd(), 'U2Net/images')
img_name_list = glob.glob(image_dir + os.sep + '*')
if print_info: print("local image")
if print_info: print(img_name_list)
else:
if full_ds:
img_name_list = img_path
shape_hw_list = shape_hw
else:
img_name_list = glob.glob(img_path)
if print_info: print(img_path)
model_dir = os.path.join(os.getcwd(), 'U2Net/model/pre_trained', model_name + '.pth')
# --------- 2. dataloader ---------
test_salobj_dataset = SalObjDataset(img_name_list = img_name_list,
lbl_name_list = [],
transform=transforms.Compose([RescaleT(320),
ToTensorLab(flag=0)])
)
test_salobj_dataloader = DataLoader(test_salobj_dataset,
batch_size=1,
shuffle=False,
num_workers=4) # 1
# --------- 3. model define ---------
if(model_name=='u2net'):
net = U2NET(3,1)
elif(model_name=='u2netp'):
net = U2NETP(3,1)
if torch.cuda.is_available():
net.load_state_dict(torch.load(model_dir))
net.cuda()
else:
net.load_state_dict(torch.load(model_dir, map_location='cpu'))
net.eval()
# --------- 4. inference for each image ---------
mask_out_np_list = []
start_x_list = []
start_y_list = []
h_list = []
w_list = []
bad_mask_count = 0
refined_mask_count = 0
for i_test, data_test in enumerate(test_salobj_dataloader):
if print_info: print("U2N:", i_test, img_name_list[i_test])
inputs_test = data_test['image']
inputs_test = inputs_test.type(torch.FloatTensor)
if full_ds:
shape_hw_i = shape_hw_list[i_test]
if torch.cuda.is_available():
inputs_test = Variable(inputs_test.cuda())
else:
inputs_test = Variable(inputs_test)
with torch.no_grad():
d1,d2,d3,d4,d5,d6,d7= net(inputs_test)
# normalization
pred = d1[:,0,:,:]
pred = normPRED(pred)
THRESHOLD = 0.8 # 0.5 # 0.8 # 0.5 #0.8 # for the original mask (better not smaller than 0.7 cuz artifacts)
THRESHOLD_resize = 0.2 # 0.1 # 0.2 # for the resized mask
THRESHOLD_deep = 0.1 # for the non-detected mask
pred = pred[0, :, :]
pred_cpu = pred.cpu()
out_img = pred_cpu.detach().numpy()
out_img_refine = copy.deepcopy(out_img)
# BACKGROUND REMOVAL
out_img[out_img > THRESHOLD] = 1
out_img[out_img <= THRESHOLD] = 0
out_img = (out_img * 255).astype(np.uint8)
out_img = Image.fromarray(out_img, mode='L')
# BOUNDING BOX CREATION
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(mode='L'), #(mode='1'),
#transforms.Resize((image.shape[1],image.shape[0]), Image.BILINEAR),
transforms.Resize((shape_hw_i[0], shape_hw_i[1]), Image.BILINEAR), # shape_hw (0 - height, 1 - width)
transforms.ToTensor(),
])
out_img = transform_mask(out_img)
out_img = out_img[0, :, :]
mask_out = out_img
mask_out = mask_out.cpu()
mask_out = torch.where( mask_out > THRESHOLD_resize, torch.tensor(0.),
torch.tensor(1.))
mask_out_np = mask_out.detach().numpy()
out_layer = out_img
out_layer = out_layer.cpu()
out_layer = torch.where( out_layer > THRESHOLD_resize, torch.tensor(1.),
torch.tensor(0.))
out_layer = out_layer.detach().numpy()
x_starts = [np.where(out_layer[i]==1)[0][0] if len(np.where(out_layer[i]==1)[0])!=0 \
else out_layer.shape[0]+1 for i in range(out_layer.shape[0])]
x_ends = [np.where(out_layer[i]==1)[0][-1] if len(np.where(out_layer[i]==1)[0])!=0 \
else 0 for i in range(out_layer.shape[0])]
y_starts = [np.where(out_layer.T[i]==1)[0][0] if len(np.where(out_layer.T[i]==1)[0])!=0 \
else out_layer.T.shape[0]+1 for i in range(out_layer.T.shape[0])]
y_ends = [np.where(out_layer.T[i]==1)[0][-1] if len(np.where(out_layer.T[i]==1)[0])!=0 \
else 0 for i in range(out_layer.T.shape[0])]
startx = min(x_starts)
endx = max(x_ends)
starty = min(y_starts)
endy = max(y_ends)
start = (startx,starty)
end = (endx,endy)
## For cases when U2N couldn't detect mask:
# [DONE] 1.1 if (end - start) < 30-50px -> decrease the THRESHOLD
# [DONE] 1.2 if (start>end) or end==0 ? -> decrease the THRESHOLD
# [DONE] 2.1 if no mask found anyway -> create center crop mask (x, y) +-10 %
# [DONE] 2.2 + restore h,w from (0,0) to (x, y) +-10 %
w_temp = end[0] - start[0]
h_temp = end[1] - start[1]
mask_px = np.count_nonzero(out_layer > 0.9) # (expected to be == 1.0)
if print_info: print("Mask px old:", mask_px)
if (end[0] <= start[0]) or (end[1] <= start[1]) or (mask_px < 5000) or (w_temp < 50) or (h_temp < 50) :
if print_info: print("[WARNING] Mask was not detected by U2N for image", img_name_list[i_test])
if print_info: print("Trying to refine image and then detect mask again.")
if print_info: print("Old x (start, end):", startx, endx)
if print_info: print("Old y (start, end):", starty, endy)
# img_dir = ("test/" + str(i_test))
# if not os.path.exists(img_dir):
# os.makedirs(img_dir, exist_ok=True)
# img_name = ("test/" + str(i_test) + "/1mask_init" + str(i_test) + ".png")
# img_temp = transforms.ToTensor()(out_img_refine)
# save_image(img_temp, img_name)
# img_name = ("test/" + str(i_test) + "/2mask_old" + str(i_test) + ".png")
# img_temp = transforms.ToTensor()(mask_out_np)
# save_image(img_temp, img_name)
out_img_refine[out_img_refine > THRESHOLD_deep] = 1
out_img_refine[out_img_refine <= THRESHOLD_deep] = 0
out_img_refine = (out_img_refine * 255).astype(np.uint8)
out_img_refine = Image.fromarray(out_img_refine, mode='L')
transform_mask = transforms.Compose([
transforms.ToTensor(),
transforms.ToPILImage(mode='L'), #(mode='1'),
#transforms.Resize((image.shape[1],image.shape[0]), Image.BILINEAR),
transforms.Resize((shape_hw_i[0], shape_hw_i[1]), Image.BILINEAR), # shape_hw (0 - height, 1 - width)
transforms.ToTensor(),
])
out_img_refine = transform_mask(out_img_refine)
out_img_refine = out_img_refine[0, :, :]
out_layer_refine = out_img_refine
out_layer_refine = out_layer_refine.cpu()
out_layer_refine = torch.where( out_img_refine > THRESHOLD_resize, torch.tensor(1.),
torch.tensor(0.))
out_layer_refine = out_layer_refine.detach().numpy()
x_starts = [np.where(out_layer_refine[i]==1)[0][0] if len(np.where(out_layer_refine[i]==1)[0])!=0 \
else out_layer_refine.shape[0]+1 for i in range(out_layer_refine.shape[0])]
x_ends = [np.where(out_layer_refine[i]==1)[0][-1] if len(np.where(out_layer_refine[i]==1)[0])!=0 \
else 0 for i in range(out_layer_refine.shape[0])]
y_starts = [np.where(out_layer_refine.T[i]==1)[0][0] if len(np.where(out_layer_refine.T[i]==1)[0])!=0 \
else out_layer_refine.T.shape[0]+1 for i in range(out_layer_refine.T.shape[0])]
y_ends = [np.where(out_layer_refine.T[i]==1)[0][-1] if len(np.where(out_layer_refine.T[i]==1)[0])!=0 \
else 0 for i in range(out_layer_refine.T.shape[0])]
startx = min(x_starts)
endx = max(x_ends)
starty = min(y_starts)
endy = max(y_ends)
start = (startx,starty)
end = (endx,endy)
if print_info: print("New x (start, end):", startx, endx)
if print_info: print("New y (start, end):", starty, endy)
w_temp = end[0] - start[0]
h_temp = end[1] - start[1]
mask_px = np.count_nonzero(out_layer_refine > 0.9) # (expected to be == 1.0)
if print_info: print("Mask px new:", mask_px)
if (end[0] <= start[0]) or (end[1] <= start[1]) or (mask_px < 5000) or (w_temp < 50) or (h_temp < 50) :
if print_info: print("[WARNING] Mask was not deteted by U2N even after refining.")
if print_info: print("Changing mask size (0, 0) to img size (", shape_hw_i[1], shape_hw_i[0], ") -10 p/c boundaries: ")
if print_info: print("Old x (start, end):", startx, endx)
startx = shape_hw_i[1] * 0.1
endx = shape_hw_i[1] * 0.9 # w -> x
if print_info: print("New x (start, end):", startx, endx)
if print_info: print("Old y (start, end):", starty, endy)
starty = shape_hw_i[0] * 0.1
endy = shape_hw_i[0] * 0.9 # h -> y
if print_info: print("New y (start, end):", starty, endy)
start = (startx,starty)
end = (endx,endy)
mask_out_np = np.ones((int(shape_hw_i[0]), int(shape_hw_i[1])))
mask_out_np[int(starty):int(endy), int(startx):int(endx)] = 0
# img_name = ("test/" + str(i_test) + "/4mask_new2_" + str(i_test) + ".png")
# img_temp = transforms.ToTensor()(mask_out_np)
# save_image(img_temp, img_name)
bad_mask_count+=1
else:
mask_out_refine = out_img_refine
mask_out_refine = mask_out_refine.cpu()
mask_out_refine = torch.where( mask_out_refine > THRESHOLD_resize, torch.tensor(0.),
torch.tensor(1.))
# img_name = ("test/" + str(i_test) + "/3mask_new1_" + str(i_test) + ".png")
# #mask_tem = transforms.ToTensor()(mask)
# save_image(mask_out_refine, img_name)
mask_out_np = mask_out_refine.detach().numpy()
refined_mask_count+=1
w = end[0] - start[0]
h = end[1] - start[1]
# save results to test_results folder
# if not os.path.exists(prediction_dir):
# os.makedirs(prediction_dir, exist_ok=True)
# save_output(img_name_list[i_test], mask_out, prediction_dir)
del d1,d2,d3,d4,d5,d6,d7
if print_info: print(start[0], start[1], h, w)
mask_out_np_list.append(mask_out_np)
start_x_list.append(start[0])
start_y_list.append(start[1])
h_list.append(h)
w_list.append(w)
if i_test % 1000 == 0:
print(i_test)
print("Refined masks total:", refined_mask_count)
print("Bad masks total:", bad_mask_count)
return mask_out_np_list, start_x_list, start_y_list, h_list, w_list
if __name__ == "__main__":
mask_hw()
| 13,512 | 37.719198 | 139 |
py
|
sm-vit
|
sm-vit-main/U2Net/model/u2net.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class REBNCONV(nn.Module):
def __init__(self,in_ch=3,out_ch=3,dirate=1):
super(REBNCONV,self).__init__()
self.conv_s1 = nn.Conv2d(in_ch,out_ch,3,padding=1*dirate,dilation=1*dirate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self,x):
hx = x
xout = self.relu_s1(self.bn_s1(self.conv_s1(hx)))
return xout
## upsample tensor 'src' to have the same spatial size with tensor 'tar'
def _upsample_like(src,tar):
src = F.upsample(src,size=tar.shape[2:],mode='bilinear')
return src
### RSU-7 ###
class RSU7(nn.Module):#UNet07DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU7,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool5 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv7 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv6d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx = self.pool5(hx5)
hx6 = self.rebnconv6(hx)
hx7 = self.rebnconv7(hx6)
hx6d = self.rebnconv6d(torch.cat((hx7,hx6),1))
hx6dup = _upsample_like(hx6d,hx5)
hx5d = self.rebnconv5d(torch.cat((hx6dup,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-6 ###
class RSU6(nn.Module):#UNet06DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU6,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool4 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv6 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv5d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx = self.pool4(hx4)
hx5 = self.rebnconv5(hx)
hx6 = self.rebnconv6(hx5)
hx5d = self.rebnconv5d(torch.cat((hx6,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.rebnconv4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-5 ###
class RSU5(nn.Module):#UNet05DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU5,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool3 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv5 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv4d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx = self.pool3(hx3)
hx4 = self.rebnconv4(hx)
hx5 = self.rebnconv5(hx4)
hx4d = self.rebnconv4d(torch.cat((hx5,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.rebnconv3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4 ###
class RSU4(nn.Module):#UNet04DRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.pool1 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.pool2 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=1)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=1)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx = self.pool1(hx1)
hx2 = self.rebnconv2(hx)
hx = self.pool2(hx2)
hx3 = self.rebnconv3(hx)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.rebnconv2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.rebnconv1d(torch.cat((hx2dup,hx1),1))
return hx1d + hxin
### RSU-4F ###
class RSU4F(nn.Module):#UNet04FRES(nn.Module):
def __init__(self, in_ch=3, mid_ch=12, out_ch=3):
super(RSU4F,self).__init__()
self.rebnconvin = REBNCONV(in_ch,out_ch,dirate=1)
self.rebnconv1 = REBNCONV(out_ch,mid_ch,dirate=1)
self.rebnconv2 = REBNCONV(mid_ch,mid_ch,dirate=2)
self.rebnconv3 = REBNCONV(mid_ch,mid_ch,dirate=4)
self.rebnconv4 = REBNCONV(mid_ch,mid_ch,dirate=8)
self.rebnconv3d = REBNCONV(mid_ch*2,mid_ch,dirate=4)
self.rebnconv2d = REBNCONV(mid_ch*2,mid_ch,dirate=2)
self.rebnconv1d = REBNCONV(mid_ch*2,out_ch,dirate=1)
def forward(self,x):
hx = x
hxin = self.rebnconvin(hx)
hx1 = self.rebnconv1(hxin)
hx2 = self.rebnconv2(hx1)
hx3 = self.rebnconv3(hx2)
hx4 = self.rebnconv4(hx3)
hx3d = self.rebnconv3d(torch.cat((hx4,hx3),1))
hx2d = self.rebnconv2d(torch.cat((hx3d,hx2),1))
hx1d = self.rebnconv1d(torch.cat((hx2d,hx1),1))
return hx1d + hxin
##### U^2-Net ####
class U2NET(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NET,self).__init__()
self.stage1 = RSU7(in_ch,32,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,32,128)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(128,64,256)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(256,128,512)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(512,256,512)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(512,256,512)
# decoder
self.stage5d = RSU4F(1024,256,512)
self.stage4d = RSU4(1024,128,256)
self.stage3d = RSU5(512,64,128)
self.stage2d = RSU6(256,32,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(128,out_ch,3,padding=1)
self.side4 = nn.Conv2d(256,out_ch,3,padding=1)
self.side5 = nn.Conv2d(512,out_ch,3,padding=1)
self.side6 = nn.Conv2d(512,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#-------------------- decoder --------------------
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
### U^2-Net small ###
class U2NETP(nn.Module):
def __init__(self,in_ch=3,out_ch=1):
super(U2NETP,self).__init__()
self.stage1 = RSU7(in_ch,16,64)
self.pool12 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage2 = RSU6(64,16,64)
self.pool23 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage3 = RSU5(64,16,64)
self.pool34 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage4 = RSU4(64,16,64)
self.pool45 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage5 = RSU4F(64,16,64)
self.pool56 = nn.MaxPool2d(2,stride=2,ceil_mode=True)
self.stage6 = RSU4F(64,16,64)
# decoder
self.stage5d = RSU4F(128,16,64)
self.stage4d = RSU4(128,16,64)
self.stage3d = RSU5(128,16,64)
self.stage2d = RSU6(128,16,64)
self.stage1d = RSU7(128,16,64)
self.side1 = nn.Conv2d(64,out_ch,3,padding=1)
self.side2 = nn.Conv2d(64,out_ch,3,padding=1)
self.side3 = nn.Conv2d(64,out_ch,3,padding=1)
self.side4 = nn.Conv2d(64,out_ch,3,padding=1)
self.side5 = nn.Conv2d(64,out_ch,3,padding=1)
self.side6 = nn.Conv2d(64,out_ch,3,padding=1)
self.outconv = nn.Conv2d(6*out_ch,out_ch,1)
def forward(self,x):
hx = x
#stage 1
hx1 = self.stage1(hx)
hx = self.pool12(hx1)
#stage 2
hx2 = self.stage2(hx)
hx = self.pool23(hx2)
#stage 3
hx3 = self.stage3(hx)
hx = self.pool34(hx3)
#stage 4
hx4 = self.stage4(hx)
hx = self.pool45(hx4)
#stage 5
hx5 = self.stage5(hx)
hx = self.pool56(hx5)
#stage 6
hx6 = self.stage6(hx)
hx6up = _upsample_like(hx6,hx5)
#decoder
hx5d = self.stage5d(torch.cat((hx6up,hx5),1))
hx5dup = _upsample_like(hx5d,hx4)
hx4d = self.stage4d(torch.cat((hx5dup,hx4),1))
hx4dup = _upsample_like(hx4d,hx3)
hx3d = self.stage3d(torch.cat((hx4dup,hx3),1))
hx3dup = _upsample_like(hx3d,hx2)
hx2d = self.stage2d(torch.cat((hx3dup,hx2),1))
hx2dup = _upsample_like(hx2d,hx1)
hx1d = self.stage1d(torch.cat((hx2dup,hx1),1))
#side output
d1 = self.side1(hx1d)
d2 = self.side2(hx2d)
d2 = _upsample_like(d2,d1)
d3 = self.side3(hx3d)
d3 = _upsample_like(d3,d1)
d4 = self.side4(hx4d)
d4 = _upsample_like(d4,d1)
d5 = self.side5(hx5d)
d5 = _upsample_like(d5,d1)
d6 = self.side6(hx6)
d6 = _upsample_like(d6,d1)
d0 = self.outconv(torch.cat((d1,d2,d3,d4,d5,d6),1))
return F.sigmoid(d0), F.sigmoid(d1), F.sigmoid(d2), F.sigmoid(d3), F.sigmoid(d4), F.sigmoid(d5), F.sigmoid(d6)
| 14,719 | 26.984791 | 118 |
py
|
sm-vit
|
sm-vit-main/U2Net/model/u2net_refactor.py
|
import torch
import torch.nn as nn
import math
__all__ = ['U2NET_full', 'U2NET_lite']
def _upsample_like(x, size):
return nn.Upsample(size=size, mode='bilinear', align_corners=False)(x)
def _size_map(x, height):
# {height: size} for Upsample
size = list(x.shape[-2:])
sizes = {}
for h in range(1, height):
sizes[h] = size
size = [math.ceil(w / 2) for w in size]
return sizes
class REBNCONV(nn.Module):
def __init__(self, in_ch=3, out_ch=3, dilate=1):
super(REBNCONV, self).__init__()
self.conv_s1 = nn.Conv2d(in_ch, out_ch, 3, padding=1 * dilate, dilation=1 * dilate)
self.bn_s1 = nn.BatchNorm2d(out_ch)
self.relu_s1 = nn.ReLU(inplace=True)
def forward(self, x):
return self.relu_s1(self.bn_s1(self.conv_s1(x)))
class RSU(nn.Module):
def __init__(self, name, height, in_ch, mid_ch, out_ch, dilated=False):
super(RSU, self).__init__()
self.name = name
self.height = height
self.dilated = dilated
self._make_layers(height, in_ch, mid_ch, out_ch, dilated)
def forward(self, x):
sizes = _size_map(x, self.height)
x = self.rebnconvin(x)
# U-Net like symmetric encoder-decoder structure
def unet(x, height=1):
if height < self.height:
x1 = getattr(self, f'rebnconv{height}')(x)
if not self.dilated and height < self.height - 1:
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
else:
x2 = unet(x1, height + 1)
x = getattr(self, f'rebnconv{height}d')(torch.cat((x2, x1), 1))
return _upsample_like(x, sizes[height - 1]) if not self.dilated and height > 1 else x
else:
return getattr(self, f'rebnconv{height}')(x)
return x + unet(x)
def _make_layers(self, height, in_ch, mid_ch, out_ch, dilated=False):
self.add_module('rebnconvin', REBNCONV(in_ch, out_ch))
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
self.add_module(f'rebnconv1', REBNCONV(out_ch, mid_ch))
self.add_module(f'rebnconv1d', REBNCONV(mid_ch * 2, out_ch))
for i in range(2, height):
dilate = 1 if not dilated else 2 ** (i - 1)
self.add_module(f'rebnconv{i}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
self.add_module(f'rebnconv{i}d', REBNCONV(mid_ch * 2, mid_ch, dilate=dilate))
dilate = 2 if not dilated else 2 ** (height - 1)
self.add_module(f'rebnconv{height}', REBNCONV(mid_ch, mid_ch, dilate=dilate))
class U2NET(nn.Module):
def __init__(self, cfgs, out_ch):
super(U2NET, self).__init__()
self.out_ch = out_ch
self._make_layers(cfgs)
def forward(self, x):
sizes = _size_map(x, self.height)
maps = [] # storage for maps
# side saliency map
def unet(x, height=1):
if height < 6:
x1 = getattr(self, f'stage{height}')(x)
x2 = unet(getattr(self, 'downsample')(x1), height + 1)
x = getattr(self, f'stage{height}d')(torch.cat((x2, x1), 1))
side(x, height)
return _upsample_like(x, sizes[height - 1]) if height > 1 else x
else:
x = getattr(self, f'stage{height}')(x)
side(x, height)
return _upsample_like(x, sizes[height - 1])
def side(x, h):
# side output saliency map (before sigmoid)
x = getattr(self, f'side{h}')(x)
x = _upsample_like(x, sizes[1])
maps.append(x)
def fuse():
# fuse saliency probability maps
maps.reverse()
x = torch.cat(maps, 1)
x = getattr(self, 'outconv')(x)
maps.insert(0, x)
return [torch.sigmoid(x) for x in maps]
unet(x)
maps = fuse()
return maps
def _make_layers(self, cfgs):
self.height = int((len(cfgs) + 1) / 2)
self.add_module('downsample', nn.MaxPool2d(2, stride=2, ceil_mode=True))
for k, v in cfgs.items():
# build rsu block
self.add_module(k, RSU(v[0], *v[1]))
if v[2] > 0:
# build side layer
self.add_module(f'side{v[0][-1]}', nn.Conv2d(v[2], self.out_ch, 3, padding=1))
# build fuse layer
self.add_module('outconv', nn.Conv2d(int(self.height * self.out_ch), self.out_ch, 1))
def U2NET_full():
full = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 32, 64), -1],
'stage2': ['En_2', (6, 64, 32, 128), -1],
'stage3': ['En_3', (5, 128, 64, 256), -1],
'stage4': ['En_4', (4, 256, 128, 512), -1],
'stage5': ['En_5', (4, 512, 256, 512, True), -1],
'stage6': ['En_6', (4, 512, 256, 512, True), 512],
'stage5d': ['De_5', (4, 1024, 256, 512, True), 512],
'stage4d': ['De_4', (4, 1024, 128, 256), 256],
'stage3d': ['De_3', (5, 512, 64, 128), 128],
'stage2d': ['De_2', (6, 256, 32, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=full, out_ch=1)
def U2NET_lite():
lite = {
# cfgs for building RSUs and sides
# {stage : [name, (height(L), in_ch, mid_ch, out_ch, dilated), side]}
'stage1': ['En_1', (7, 3, 16, 64), -1],
'stage2': ['En_2', (6, 64, 16, 64), -1],
'stage3': ['En_3', (5, 64, 16, 64), -1],
'stage4': ['En_4', (4, 64, 16, 64), -1],
'stage5': ['En_5', (4, 64, 16, 64, True), -1],
'stage6': ['En_6', (4, 64, 16, 64, True), 64],
'stage5d': ['De_5', (4, 128, 16, 64, True), 64],
'stage4d': ['De_4', (4, 128, 16, 64), 64],
'stage3d': ['De_3', (5, 128, 16, 64), 64],
'stage2d': ['De_2', (6, 128, 16, 64), 64],
'stage1d': ['De_1', (7, 128, 16, 64), 64],
}
return U2NET(cfgs=lite, out_ch=1)
| 6,097 | 35.08284 | 101 |
py
|
sm-vit
|
sm-vit-main/U2Net/model/__init__.py
|
from .u2net import U2NET
from .u2net import U2NETP
| 51 | 16.333333 | 25 |
py
|
sm-vit
|
sm-vit-main/U2Net/model/setup_model_weights.py
|
import os
import gdown
os.makedirs('./saved_models/u2net', exist_ok=True)
os.makedirs('./saved_models/u2net_portrait', exist_ok=True)
gdown.download('https://drive.google.com/uc?id=1ao1ovG1Qtx4b7EoskHXmi2E9rp5CHLcZ',
'./saved_models/u2net/u2net.pth',
quiet=False)
gdown.download('https://drive.google.com/uc?id=1IG3HdpcRiDoWNookbncQjeaPN28t90yW',
'./saved_models/u2net_portrait/u2net_portrait.pth',
quiet=False)
| 431 | 29.857143 | 82 |
py
|
HighOrderAtten
|
HighOrderAtten-master/image_model/download_model.py
|
"""
Download the VGG and deep residual model to extract image features.
Version: 1.0
Contributor: Jiasen Lu
"""
import os
import argparse
import json
def download_VGG():
print('Downloading VGG model from http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel')
os.system('wget http://www.robots.ox.ac.uk/~vgg/software/very_deep/caffe/VGG_ILSVRC_19_layers.caffemodel')
os.system('wget https://gist.githubusercontent.com/ksimonyan/3785162f95cd2d5fee77/raw/bb2b4fe0a9bb0669211cf3d0bc949dfdda173e9e/VGG_ILSVRC_19_layers_deploy.prototxt')
def download_deep_residual():
print('Downloading deep residual model from https://d2j0dndfm35trm.cloudfront.net/resnet-200.t7')
os.system('wget https://d2j0dndfm35trm.cloudfront.net/resnet-200.t7')
os.system('wget https://raw.githubusercontent.com/facebook/fb.resnet.torch/master/datasets/transforms.lua')
def main(params):
if params['download'] == 'VGG':
download_VGG()
else:
download_deep_residual()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--download', default='VGG', help='VGG or Residual')
# input json
args = parser.parse_args()
params = vars(args)
print 'parsed input parameters:'
print json.dumps(params, indent = 2)
main(params)
| 1,337 | 35.162162 | 169 |
py
|
HighOrderAtten
|
HighOrderAtten-master/data/vqa_preprocess.py
|
"""
Download the vqa data and preprocessing.
Version: 1.0
Contributor: Jiasen Lu
"""
# Download the VQA Questions from http://www.visualqa.org/download.html
import json
import os
import argparse
def download_vqa():
os.system('wget http://visualqa.org/data/mscoco/vqa/Questions_Train_mscoco.zip -P zip/')
os.system('wget http://visualqa.org/data/mscoco/vqa/Questions_Val_mscoco.zip -P zip/')
os.system('wget http://visualqa.org/data/mscoco/vqa/Questions_Test_mscoco.zip -P zip/')
# Download the VQA Annotations
os.system('wget http://visualqa.org/data/mscoco/vqa/Annotations_Train_mscoco.zip -P zip/')
os.system('wget http://visualqa.org/data/mscoco/vqa/Annotations_Val_mscoco.zip -P zip/')
# Unzip the annotations
os.system('unzip zip/Questions_Train_mscoco.zip -d annotations/')
os.system('unzip zip/Questions_Val_mscoco.zip -d annotations/')
os.system('unzip zip/Questions_Test_mscoco.zip -d annotations/')
os.system('unzip zip/Annotations_Train_mscoco.zip -d annotations/')
os.system('unzip zip/Annotations_Val_mscoco.zip -d annotations/')
def main(params):
if params['download'] == 1:
download_vqa()
'''
Put the VQA data into single json file, where [[Question_id, Image_id, Question, multipleChoice_answer, Answer] ... ]
'''
train = []
test = []
imdir='%s/COCO_%s_%012d.jpg'
if params['split'] == 1:
print 'Loading annotations and questions...'
train_anno = json.load(open('annotations/mscoco_train2014_annotations.json', 'r'))
val_anno = json.load(open('annotations/mscoco_val2014_annotations.json', 'r'))
train_ques = json.load(open('annotations/MultipleChoice_mscoco_train2014_questions.json', 'r'))
val_ques = json.load(open('annotations/MultipleChoice_mscoco_val2014_questions.json', 'r'))
subtype = 'train2014'
for i in range(len(train_anno['annotations'])):
ans = train_anno['annotations'][i]['multiple_choice_answer']
question_id = train_anno['annotations'][i]['question_id']
image_path = imdir%(subtype, subtype, train_anno['annotations'][i]['image_id'])
question = train_ques['questions'][i]['question']
mc_ans = train_ques['questions'][i]['multiple_choices']
train.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans, 'ans': ans})
subtype = 'val2014'
for i in range(len(val_anno['annotations'])):
ans = val_anno['annotations'][i]['multiple_choice_answer']
question_id = val_anno['annotations'][i]['question_id']
image_path = imdir%(subtype, subtype, val_anno['annotations'][i]['image_id'])
question = val_ques['questions'][i]['question']
mc_ans = val_ques['questions'][i]['multiple_choices']
test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans, 'ans': ans})
elif params['split'] == 2:
print 'Loading annotations and questions...'
train_anno = json.load(open('annotations/mscoco_train2014_annotations.json', 'r'))
val_anno = json.load(open('annotations/mscoco_val2014_annotations.json', 'r'))
train_ques = json.load(open('annotations/MultipleChoice_mscoco_train2014_questions.json', 'r'))
val_ques = json.load(open('annotations/MultipleChoice_mscoco_val2014_questions.json', 'r'))
test_ques = json.load(open('annotations/MultipleChoice_mscoco_test-dev2015_questions.json', 'r'))
subtype = 'train2014'
for i in range(len(train_anno['annotations'])):
ans = train_anno['annotations'][i]['multiple_choice_answer']
question_id = train_anno['annotations'][i]['question_id']
image_path = imdir%(subtype, subtype, train_anno['annotations'][i]['image_id'])
question = train_ques['questions'][i]['question']
mc_ans = train_ques['questions'][i]['multiple_choices']
train.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans, 'ans': ans})
subtype = 'val2014'
for i in range(len(val_anno['annotations'])):
ans = val_anno['annotations'][i]['multiple_choice_answer']
question_id = val_anno['annotations'][i]['question_id']
image_path = imdir%(subtype, subtype, val_anno['annotations'][i]['image_id'])
question = val_ques['questions'][i]['question']
mc_ans = val_ques['questions'][i]['multiple_choices']
train.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans, 'ans': ans})
subtype = 'test2015'
for i in range(len(test_ques['questions'])):
question_id = test_ques['questions'][i]['question_id']
image_path = imdir%(subtype, subtype, test_ques['questions'][i]['image_id'])
question = test_ques['questions'][i]['question']
mc_ans = test_ques['questions'][i]['multiple_choices']
test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans})
elif params['split'] == 3:
print 'Loading annotations and questions...'
train_anno = json.load(open('annotations/mscoco_train2014_annotations.json', 'r'))
val_anno = json.load(open('annotations/mscoco_val2014_annotations.json', 'r'))
train_ques = json.load(open('annotations/MultipleChoice_mscoco_train2014_questions.json', 'r'))
val_ques = json.load(open('annotations/MultipleChoice_mscoco_val2014_questions.json', 'r'))
test_ques = json.load(open('annotations/MultipleChoice_mscoco_test2015_questions.json', 'r'))
subtype = 'train2014'
for i in range(len(train_anno['annotations'])):
ans = train_anno['annotations'][i]['multiple_choice_answer']
question_id = train_anno['annotations'][i]['question_id']
image_path = imdir%(subtype, subtype, train_anno['annotations'][i]['image_id'])
question = train_ques['questions'][i]['question']
mc_ans = train_ques['questions'][i]['multiple_choices']
train.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans, 'ans': ans})
subtype = 'val2014'
for i in range(len(val_anno['annotations'])):
ans = val_anno['annotations'][i]['multiple_choice_answer']
question_id = val_anno['annotations'][i]['question_id']
image_path = imdir%(subtype, subtype, val_anno['annotations'][i]['image_id'])
question = val_ques['questions'][i]['question']
mc_ans = val_ques['questions'][i]['multiple_choices']
train.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans, 'ans': ans})
subtype = 'test2015'
for i in range(len(test_ques['questions'])):
question_id = test_ques['questions'][i]['question_id']
image_path = imdir%(subtype, subtype, test_ques['questions'][i]['image_id'])
question = test_ques['questions'][i]['question']
mc_ans = test_ques['questions'][i]['multiple_choices']
test.append({'ques_id': question_id, 'img_path': image_path, 'question': question, 'MC_ans': mc_ans})
print 'Training sample %d, Testing sample %d...' %(len(train), len(test))
json.dump(train, open('vqa_raw_train.json', 'w'))
json.dump(test, open('vqa_raw_test.json', 'w'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--download', default=0, type=int, help='Download and extract data from VQA server')
parser.add_argument('--split', default=1, type=int, help='1: train on Train and test on Val, 2: train on Train+Val and test on Test')
args = parser.parse_args()
params = vars(args)
print 'parsed input parameters:'
print json.dumps(params, indent = 2)
main(params)
| 8,123 | 43.393443 | 137 |
py
|
HighOrderAtten
|
HighOrderAtten-master/data/prepro_vqa.py
|
'''
Preoricess a raw json dataset into hdf5/json files.
Caption: Use NLTK or split function to get tokens.
'''
from random import shuffle, seed
import sys
import os.path
import argparse
import numpy as np
import scipy.io
import pdb
import h5py
from nltk.tokenize import word_tokenize
import json
import re
import math
def tokenize(sentence):
return [i for i in re.split(r"([-.\"',:? !\$#@~()*&\^%;\[\]/\\\+<>\n=])", sentence) if i!='' and i!=' ' and i!='\n'];
def prepro_question(dataset, params):
# preprocess all the question
print 'example processed tokens:'
for i,ques in enumerate(dataset):
s = ques['question']
if params['token_method'] == 'nltk':
txt = word_tokenize(str(s).lower())
else:
txt = tokenize(s)
ques['processed_tokens'] = txt
if i < 10: print txt
if i % 1000 == 0:
sys.stdout.write("processing %d/%d (%.2f%% done) \r" % (i, len(dataset), i*100.0/len(dataset)) )
sys.stdout.flush()
return dataset
def build_vocab_question(dataset, params):
# build vocabulary for question and answers.
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for ques in dataset:
for w in ques['processed_tokens']:
counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top words and their counts:'
print '\n'.join(map(str,cw[:20]))
# print some stats
total_words = sum(counts.itervalues())
print 'total words:', total_words
bad_words = [w for w,n in counts.iteritems() if n <= count_thr]
vocab = [w for w,n in counts.iteritems() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print 'number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts))
print 'number of words in vocab would be %d' % (len(vocab), )
print 'number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words)
# lets now produce the final annotation
# additional special UNK token we will use below to map infrequent words to
print 'inserting the special UNK token'
vocab.append('UNK')
for ques in dataset:
txt = ques['processed_tokens']
question = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
ques['final_question'] = question
return dataset, vocab
def apply_vocab_question(dataset, wtoi):
# apply the vocab on test.
for ques in dataset:
txt = ques['processed_tokens']
question = [w if w in wtoi else 'UNK' for w in txt]
ques['final_question'] = question
return dataset
def get_top_answers(dataset, params):
counts = {}
for ques in dataset:
ans = ques['ans']
counts[ans] = counts.get(ans, 0) + 1
cw = sorted([(count,w) for w,count in counts.iteritems()], reverse=True)
print 'top answer and their counts:'
print '\n'.join(map(str,cw[:20]))
vocab = []
for i in range(params['num_ans']):
vocab.append(cw[i][1])
return vocab[:params['num_ans']]
def encode_question(dataset, params, wtoi):
max_length = params['max_length']
N = len(dataset)
label_arrays = np.zeros((N, max_length), dtype='uint32')
label_length = np.zeros(N, dtype='uint32')
question_id = np.zeros(N, dtype='uint32')
question_counter = 0
for i,ques in enumerate(dataset):
question_id[question_counter] = ques['ques_id']
label_length[question_counter] = min(max_length, len(ques['final_question'])) # record the length of this sequence
question_counter += 1
for k,w in enumerate(ques['final_question']):
if k < max_length:
label_arrays[i,k] = wtoi[w]
return label_arrays, label_length, question_id
def encode_answer(dataset, atoi, num_ans):
N = len(dataset)
ans_arrays = np.zeros(N, dtype='uint32')
for i, ques in enumerate(dataset):
ans_arrays[i] = atoi.get(ques['ans'], num_ans+1) # -1 means wrong answer.
return ans_arrays
def encode_mc_answer(dataset, atoi, num_ans):
N = len(dataset)
mc_ans_arrays = np.zeros((N, 18), dtype='uint32')
for i, ques in enumerate(dataset):
for j, ans in enumerate(ques['MC_ans']):
mc_ans_arrays[i,j] = atoi.get(ans, num_ans+1)
return mc_ans_arrays
def filter_question(dataset, atoi):
new_dataset = []
for i, ques in enumerate(dataset):
if ques['ans'] in atoi:
new_dataset.append(ques)
print 'question number reduce from %d to %d '%(len(dataset), len(new_dataset))
return new_dataset
def get_unqiue_img(dataset):
count_img = {}
N = len(dataset)
img_pos = np.zeros(N, dtype='uint32')
ques_pos_tmp = {}
for ques in dataset:
count_img[ques['img_path']] = count_img.get(ques['img_path'], 0) + 1
unique_img = [w for w,n in count_img.iteritems()]
imgtoi = {w:i+1 for i,w in enumerate(unique_img)} # add one for torch, since torch start from 1.
for i, ques in enumerate(dataset):
idx = imgtoi.get(ques['img_path'])
img_pos[i] = idx
if idx-1 not in ques_pos_tmp:
ques_pos_tmp[idx-1] = []
ques_pos_tmp[idx-1].append(i+1)
img_N = len(ques_pos_tmp)
ques_pos = np.zeros((img_N,3), dtype='uint32')
ques_pos_len = np.zeros(img_N, dtype='uint32')
for idx, ques_list in ques_pos_tmp.iteritems():
ques_pos_len[idx] = len(ques_list)
for j in range(len(ques_list)):
ques_pos[idx][j] = ques_list[j]
return unique_img, img_pos, ques_pos, ques_pos_len
def main(params):
# create output h5 file for training set.
f = h5py.File(params['output_h5'], "w")
if params['input_json']=='':
dataset_train = json.load(open(params['input_train_json'], 'r'))
#dataset_train = dataset_train[:5000]
#dataset_test = dataset_test[:5000]
# get top answers
top_ans = get_top_answers(dataset_train, params)
atoi = {w:i+1 for i,w in enumerate(top_ans)}
atoi['error'] = params['num_ans']+1
itoa = {i+1:w for i,w in enumerate(top_ans)}
itoa[params['num_ans']+1] = 'error'
# filter question, which isn't in the top answers.
dataset_train = filter_question(dataset_train, atoi)
# tokenization and preprocessing training question
dataset_train = prepro_question(dataset_train, params)
# create the vocab for question
dataset_train, vocab = build_vocab_question(dataset_train, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
ques_train, ques_length_train, question_id_train = encode_question(dataset_train, params, wtoi)
# get the unique image for train
unique_img_train, img_pos_train, ques_pos_train, ques_pos_len_train = get_unqiue_img(dataset_train)
# get the answer encoding.
ans_train = encode_answer(dataset_train, atoi, params['num_ans'])
MC_ans_train = encode_mc_answer(dataset_train, atoi, params['num_ans'])
N_train = len(dataset_train)
split_train = np.zeros(N_train)
f.create_dataset("ques_train", dtype='uint32', data=ques_train)
f.create_dataset("answers", dtype='uint32', data=ans_train)
f.create_dataset("ques_id_train", dtype='uint32', data=question_id_train)
f.create_dataset("img_pos_train", dtype='uint32', data=img_pos_train)
f.create_dataset("ques_pos_train", dtype='uint32', data=ques_pos_train)
f.create_dataset("ques_pos_len_train", dtype='uint32', data=ques_pos_len_train)
f.create_dataset("split_train", dtype='uint32', data=split_train)
f.create_dataset("ques_len_train", dtype='uint32', data=ques_length_train)
f.create_dataset("MC_ans_train", dtype='uint32', data=MC_ans_train)
else:
loaded_train_data = json.load(open(params['input_json'], 'r'))
itow = loaded_train_data['ix_to_word']
wtoi = {v: k for k, v in itow.iteritems()}
itoa = loaded_train_data['ix_to_ans']
atoi = {v: k for k, v in itoa.iteritems()}
unique_img_train = loaded_train_data['unique_img_train']
dataset_test = json.load(open(params['input_test_json'], 'r'))
# tokenization and preprocessing testing question
dataset_test = prepro_question(dataset_test, params)
dataset_test = apply_vocab_question(dataset_test, wtoi)
ques_test, ques_length_test, question_id_test = encode_question(dataset_test, params, wtoi)
# get the unique image for test
unique_img_test, img_pos_test, ques_pos_test, ques_pos_len_test = get_unqiue_img(dataset_test)
if not params['test']:
ans_test = encode_answer(dataset_test, atoi, params['num_ans']) #also comment line 238
MC_ans_test = encode_mc_answer(dataset_test, atoi, params['num_ans'])
# get the split
N_test = len(dataset_test)
# since the train image is already suffled, we just use the last val_num image as validation
# train = 0, val = 1, test = 2
#split_train[N_train - params['val_num']: N_train] = 1
split_test = np.zeros(N_test)
split_test[:] = 2
f.create_dataset("ques_test", dtype='uint32', data=ques_test)
if not params['test']:
f.create_dataset("ans_test", dtype='uint32', data=ans_test)
f.create_dataset("ques_id_test", dtype='uint32', data=question_id_test)
f.create_dataset("img_pos_test", dtype='uint32', data=img_pos_test)
f.create_dataset("ques_pos_test", dtype='uint32', data=ques_pos_test)
f.create_dataset("ques_pos_len_test", dtype='uint32', data=ques_pos_len_test)
f.create_dataset("split_test", dtype='uint32', data=split_test)
f.create_dataset("ques_len_test", dtype='uint32', data=ques_length_test)
f.create_dataset("MC_ans_test", dtype='uint32', data=MC_ans_test)
f.close()
print 'wrote ', params['output_h5']
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['ix_to_ans'] = itoa
out['unique_img_train'] = unique_img_train
out['uniuqe_img_test'] = unique_img_test
json.dump(out, open(params['output_json'], 'w'))
print 'wrote ', params['output_json']
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_train_json', default='vqa_raw_train.json', help='input json file to process into hdf5')
parser.add_argument('--input_test_json', default='vqa_raw_test.json', help='input json file to process into hdf5')
parser.add_argument('--num_ans', default=3000, type=int, help='number of top answers for the final classifications.')
parser.add_argument('--input_json', default='' ,help='input existing train perprocess, usefull to process a new test file')
parser.add_argument('--output_json', default='vqa_data_prepro.json', help='output json file')
parser.add_argument('--output_h5', default='vqa_data_prepro.h5', help='output h5 file')
# options
parser.add_argument('--max_length', default=15, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=0, type=int, help='only words that occur more than this number of times will be put in vocab')
parser.add_argument('--token_method', default='nltk', help='token method, nltk is much more slower.')
parser.add_argument('--test', default=0 ,type=int, help='token method, nltk is much more slower.')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print 'parsed input parameters:'
print json.dumps(params, indent = 2)
main(params)
| 11,897 | 37.882353 | 153 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/setuptest.py
|
try:
from pymclevel import _nbt
print "Succesfully imported _nbt"
except ImportError as err:
print "An error occurred while importing _nbt.c ({0})".format(err)
| 172 | 27.833333 | 70 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/renderer.py
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
renderer.py
What is going on in this file?
Here is an attempt to show the relationships between classes and
their responsibilities
MCRenderer:
has "position", "origin", optionally "viewFrustum"
Loads chunks near position+origin, draws chunks offset by origin
Calls visible on viewFrustum to exclude chunks
(+) ChunkRenderer
Has "chunkPosition", "invalidLayers", "lists"
One per chunk and detail level.
Creates display lists from BlockRenderers
(*) BlockRenderer
Has "vertexArrays"
One per block type, plus one for low detail and one for Entity
BlockRender documentation
Each Block renderer renders a particular block types or entities.
The block renderer that is chosen to draw that block type(by ID)
is the block renderer class that is lowest in the list within the
makeRenderstates method. Each blockRenderer is assigned a materialIndex
and the blockMaterial parameter indicates what material index each block
in the chunk is therefore what block renderer is used to render it.
Vertex arrays are arrays of vertices(groups of six elements) and
every group of 4 vertices is a quad that will be drawn.
Before the vertex arrays will be drawn `.ravel()` will be called
(flattened to one dimension arrays).
The vertex arrays will draw quads and each vertex elements
with the foramt:
0:3 index - xyz values
3:5 index - st(texture coordinates) values
5 index - rgba(colour) value
Note: each element of rgba value is a uint8 type(the 4 colour
elements makes up 32 bits) to view/change the values use
`.view('uint8')` to change the view of the array into uint8 type.
To implement a block renderer either makeVertices or makeFaceVertices
needs to be implemented. The base class BlockRenderer implements
makeVertices in terms of makeFaceVertices, by iterating over the different
face directions.
The makeVertices function is called on the block renderer to gat a
list of vertexArrays that will draw the blocks for a 16x16x16 chunk.
parameters:
all parameters are in xzy order
facingBlockIndices:
list of 6, (16, 16, 16) numpy boolean arrays
each array corresponds to the blocks within the chunk that
has it face exposed in that direction. The direction is the
index into the list defined by the constants in pymclevel/faces.py
This is used to only draw exposed faces
blocks:
(16, 16, 16) numpy array of the id of blocks in the chunk
blockMaterials:
(16, 16, 16) numpy array of the material index of each block in the chunk
each material refers to a different block renderer to get the
material index for this block renderer `self.materialIndex`
blockData:
(16, 16, 16) numpy array of the metadata value of each block
in the chunk
areaBlockLights:
(18, 18, 18) numpy array of light value(max of block light and
skylight) of the chunk and 1 block 'border' aroun it.
texMap:
function that takes id, data value and directions
and returns texture coordinates
returns a list of vertex arrays in the form of float32 numpy arrays.
For this chunk.
The makeFaceVertices gets an vertexArray for a particular face.
parameters:
all parameters are in xzy order
direction:
the face defined by constants in pymclevel/faces.py
materialIndices:
list of (x, z, y) indices of blocks in this chunks that
is of this material(in blocktypes).
exposedFaceIndices:
list of (x, z, y) indices of blocks that has an exposed face
in the direction `direction`.
blocks:
(16, 16, 16) numpy array of the id of blocks in the chunk
blockData:
(16, 16, 16) numpy array of the metadata value of each block
in the chunk
blockLights:
(16, 16, 16) numpy array of light values(max of block light and
skylight) of the blocks in the chunk chunk.
facingBlockLight:
(16, 16, 16) numpy array of light values(max of block light and
skylight) of the blocks just in front of the face.
i.e.
if direction = pymclevel.faces.FaceXDecreasing
facingBlockLight[1, 0, 0] refers to the light level
at position (0, 0, 0) within the chunk.
texMap:
function that takes id, data value and directions
and returns texture coordinates
returns a list of vertex arrays in the form of float32 numpy arrays.
Fields
blocktypes / getBlocktypes(mats)
list of block ids the block renderer handles
detailLevels
what detail level the renderer render at
layer
what layer is this block renderer in
renderstate
the render state this block renderer uses
Models:
There are also several functions that make it easy to translate
json models to block renderer.
makeVertexTemplatesFromJsonModel:
creates a template from information that is in json models
rotateTemplate:
rotate templates. This is equivalent to the rotation in block states files.
makeVerticesFromModel:
creates function based on templates to be used for makeVertices function in block renderer.
Helper functions:
self.MaterialIndices(blockMaterial):
Given blockMaterial(parameter in makeVertices) it return a list of
(x, z, y) indices of blocks in the chunk that are of this block renderer
material(blocktypes).
self.makeTemplate(direction, blockIndices):
get a vertex array filled with default values for face `direction`
and for the block relating to `blockIndices`
makeVertexTemplates(xmin=0, ymin=0, zmin=0, xmax=1, ymax=1, zmax=1):
returns a numpy array with dimensions (6, 4, 6) filled with values to create
a vertex array for a cube.
For Entities:
renderer's for entities are similar to blocks but:
- they extend EntityRendererGeneric class
- they are added to the list in calcFacesForChunkRenderer method
- makeChunkVertices(chunk) where chunk is a chunk object
is called rather than makeVertices
there is also a helper method _computeVertices(positions, colors, offset, chunkPosition):
parameters:
positions
locations of entity
colors
colors of entity boxes
offset
whether to offset the box
chunkPosition
chunk position of the chunk
creates a vertex array that draws entity boxes
"""
from collections import defaultdict, deque
from datetime import datetime, timedelta
from depths import DepthOffset
from glutils import gl, Texture
from albow.resource import _2478aq_heot
import logging
import numpy
from OpenGL import GL
import pymclevel
from pymclevel.materials import alphaMaterials, pocketMaterials
import sys
from config import config
# import time
def get_materials():
alphaMaterials = pymclevel.materials.alphaMaterials
pocketMaterials = pymclevel.materials.pocketMaterials
def chunkMarkers(chunkSet):
""" Returns a mapping { size: [position, ...] } for different powers of 2
as size.
"""
sizedChunks = defaultdict(list)
size = 1
def all4(cx, cz):
cx &= ~size
cz &= ~size
return [(cx, cz), (cx + size, cz), (cx + size, cz + size), (cx, cz + size)]
# lastsize = 6
size = 1
while True:
nextsize = size << 1
chunkSet = set(chunkSet)
while len(chunkSet):
cx, cz = chunkSet.pop()
chunkSet.add((cx, cz))
o = all4(cx, cz)
others = set(o).intersection(chunkSet)
if len(others) == 4:
sizedChunks[nextsize].append(o[0]) # Possibly cache append?
for c in others:
chunkSet.discard(c)
else:
for c in others:
sizedChunks[size].append(c) # Possibly cache append?
chunkSet.discard(c)
if len(sizedChunks[nextsize]):
chunkSet = set(sizedChunks[nextsize])
sizedChunks[nextsize] = []
size <<= 1
else:
break
return sizedChunks
class ChunkRenderer(object):
maxlod = 2
minlod = 0
def __init__(self, renderer, chunkPosition):
self.renderer = renderer
self.blockRenderers = []
self.detailLevel = 0
self.invalidLayers = set(Layer.AllLayers)
self.chunkPosition = chunkPosition
self.bufferSize = 0
self.renderstateLists = None
@property
def visibleLayers(self):
return self.renderer.visibleLayers
def forgetDisplayLists(self, states=None):
if self.renderstateLists is not None:
# print "Discarded {0}, gained {1} bytes".format(self.chunkPosition,self.bufferSize)
for k in states or self.renderstateLists.iterkeys():
a = self.renderstateLists.get(k, [])
# print a
for i in a:
gl.glDeleteLists(i, 1)
if states:
del self.renderstateLists[states]
else:
self.renderstateLists = None
self.needsRedisplay = True
self.renderer.discardMasterList()
def debugDraw(self):
for blockRenderer in self.blockRenderers:
blockRenderer.drawArrays(self.chunkPosition, False)
def makeDisplayLists(self):
if not self.needsRedisplay:
return
self.forgetDisplayLists()
if not self.blockRenderers:
return
lists = defaultdict(list)
showRedraw = self.renderer.showRedraw
if not (showRedraw and self.needsBlockRedraw):
GL.glEnableClientState(GL.GL_COLOR_ARRAY)
renderers = self.blockRenderers
for blockRenderer in renderers:
if self.detailLevel not in blockRenderer.detailLevels:
continue
if blockRenderer.layer not in self.visibleLayers:
continue
l = blockRenderer.makeArrayList(self.chunkPosition, self.needsBlockRedraw and showRedraw)
lists[blockRenderer.renderstate].append(l)
if not (showRedraw and self.needsBlockRedraw):
GL.glDisableClientState(GL.GL_COLOR_ARRAY)
self.needsRedisplay = False
self.renderstateLists = lists
@property
def needsBlockRedraw(self):
return Layer.Blocks in self.invalidLayers
def invalidate(self, layers=None):
if layers is None:
layers = Layer.AllLayers
if layers:
layers = set(layers)
self.invalidLayers.update(layers)
blockRenderers = [br for br in self.blockRenderers
if br.layer is Layer.Blocks
or br.layer not in layers]
if len(blockRenderers) < len(self.blockRenderers):
self.forgetDisplayLists()
self.blockRenderers = blockRenderers
if self.renderer.showRedraw and Layer.Blocks in layers:
self.needsRedisplay = True
def calcFaces(self):
minlod = self.renderer.detailLevelForChunk(self.chunkPosition)
minlod = min(minlod, self.maxlod)
if self.detailLevel != minlod:
self.forgetDisplayLists()
self.detailLevel = minlod
self.invalidLayers.add(Layer.Blocks)
# discard the standard detail renderers
if minlod > 0:
blockRenderers = []
append = blockRenderers.append
for br in self.blockRenderers:
if br.detailLevels != (0,):
append(br)
self.blockRenderers = blockRenderers
if self.renderer.chunkCalculator:
for _ in self.renderer.chunkCalculator.calcFacesForChunkRenderer(self):
yield
else:
raise StopIteration
def vertexArraysDone(self):
bufferSize = 0
for br in self.blockRenderers:
bufferSize += br.bufferSize()
if self.renderer.alpha != 0xff:
br.setAlpha(self.renderer.alpha)
self.bufferSize = bufferSize
self.invalidLayers = set()
self.needsRedisplay = True
self.renderer.invalidateMasterList()
needsRedisplay = False
@property
def done(self):
return not self.invalidLayers
_XYZ = numpy.s_[..., 0:3]
_ST = numpy.s_[..., 3:5]
_XYZST = numpy.s_[..., :5]
_RGBA = numpy.s_[..., 20:24]
_RGB = numpy.s_[..., 20:23]
_A = numpy.s_[..., 23]
def makeVertexTemplatesFromJsonModel(fromVertices, toVertices, uv):
"""
This is similar to makeVertexTemplates but is a more convenient
when reading off of the json model files.
:param fromVertices: from
:param toVertices: to
:param uv: keywords uv map
:return: template for a cube
"""
xmin = fromVertices[0] / 16.
xmax = toVertices[0] / 16.
ymin = fromVertices[1] / 16.
ymax = toVertices[1] / 16.
zmin = fromVertices[2] / 16.
zmax = toVertices[2] / 16.
return numpy.array([
# FaceXIncreasing:
[[xmax, ymin, zmax, uv["east"][0], uv["east"][3], 0x0b],
[xmax, ymin, zmin, uv["east"][2], uv["east"][3], 0x0b],
[xmax, ymax, zmin, uv["east"][2], uv["east"][1], 0x0b],
[xmax, ymax, zmax, uv["east"][0], uv["east"][1], 0x0b],
],
# FaceXDecreasing:
[[xmin, ymin, zmin, uv["west"][0], uv["west"][3], 0x0b],
[xmin, ymin, zmax, uv["west"][2], uv["west"][3], 0x0b],
[xmin, ymax, zmax, uv["west"][2], uv["west"][1], 0x0b],
[xmin, ymax, zmin, uv["west"][0], uv["west"][1], 0x0b]],
# FaceYIncreasing:
[[xmin, ymax, zmin, uv["up"][0], uv["up"][1], 0x11], # ne
[xmin, ymax, zmax, uv["up"][0], uv["up"][3], 0x11], # nw
[xmax, ymax, zmax, uv["up"][2], uv["up"][3], 0x11], # sw
[xmax, ymax, zmin, uv["up"][2], uv["up"][1], 0x11]], # se
# FaceYDecreasing:
[[xmin, ymin, zmin, uv["down"][0], uv["down"][3], 0x08],
[xmax, ymin, zmin, uv["down"][2], uv["down"][3], 0x08],
[xmax, ymin, zmax, uv["down"][2], uv["down"][1], 0x08],
[xmin, ymin, zmax, uv["down"][0], uv["down"][1], 0x08]],
# FaceZIncreasing:
[[xmin, ymin, zmax, uv["south"][0], uv["south"][3], 0x0d],
[xmax, ymin, zmax, uv["south"][2], uv["south"][3], 0x0d],
[xmax, ymax, zmax, uv["south"][2], uv["south"][1], 0x0d],
[xmin, ymax, zmax, uv["south"][0], uv["south"][1], 0x0d]],
# FaceZDecreasing:
[[xmax, ymin, zmin, uv["north"][0], uv["north"][3], 0x0d],
[xmin, ymin, zmin, uv["north"][2], uv["north"][3], 0x0d],
[xmin, ymax, zmin, uv["north"][2], uv["north"][1], 0x0d],
[xmax, ymax, zmin, uv["north"][0], uv["north"][1], 0x0d],
],
])
def rotateTemplate(template, x=0, y=0):
"""
Rotate template around x-axis and then around
y-axis. Both angles must to multiples of 90.
TODO: Add ability for multiples of 45
"""
template = template.copy()
for _ in xrange(0, x, 90):
# y -> -z and z -> y
template[..., (1, 2)] = template[..., (2, 1)]
template[..., 2] -= 0.5
template[..., 2] *= -1
template[..., 2] += 0.5
for _ in xrange(0, y, 90):
# z -> -x and x -> z
template[..., (0, 2)] = template[..., (2, 0)]
template[..., 0] -= 0.5
template[..., 0] *= -1
template[..., 0] += 0.5
return template
def makeVerticesFromModel(templates, dataMask=0):
"""
Returns a function that creates vertex arrays.
This produces vertex arrays based on the passed
templates. This doesn't cull any faces based on
if they are exposed.
:param templates: list of templates to draw
:param dataMask: mask to mask the data
"""
if isinstance(templates, list):
templates = numpy.array(templates)
if templates.shape == (6, 4, 6):
templates = numpy.array([templates])
if len(templates.shape) == 4:
templates = templates[numpy.newaxis, ...]
elements = templates.shape[0]
def makeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
mask = self.getMaterialIndices(blockMaterials)
blockIndices = mask.nonzero()
yield
data = blockData[mask]
data &= dataMask
self.vertexArrays = []
append = self.vertexArrays.append
for i in xrange(elements):
vertexArray = numpy.zeros((len(blockIndices[0]), 6, 4, 6), dtype='float32')
for indicies in xrange(3):
dimension = (0, 2, 1)[indicies]
vertexArray[..., indicies] = blockIndices[dimension][:, numpy.newaxis,
numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += templates[i, data][..., 0:5]
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices] & 15)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = templates[i, data][..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][blockIndices][
..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
append(vertexArray)
return makeVertices
def makeVertexTemplates(xmin=0, ymin=0, zmin=0, xmax=1, ymax=1, zmax=1):
return numpy.array([
# FaceXIncreasing:
[[xmax, ymin, zmax, (zmin * 16), 16 - (ymin * 16), 0x0b],
[xmax, ymin, zmin, (zmax * 16), 16 - (ymin * 16), 0x0b],
[xmax, ymax, zmin, (zmax * 16), 16 - (ymax * 16), 0x0b],
[xmax, ymax, zmax, (zmin * 16), 16 - (ymax * 16), 0x0b],
],
# FaceXDecreasing:
[[xmin, ymin, zmin, (zmin * 16), 16 - (ymin * 16), 0x0b],
[xmin, ymin, zmax, (zmax * 16), 16 - (ymin * 16), 0x0b],
[xmin, ymax, zmax, (zmax * 16), 16 - (ymax * 16), 0x0b],
[xmin, ymax, zmin, (zmin * 16), 16 - (ymax * 16), 0x0b]],
# FaceYIncreasing:
[[xmin, ymax, zmin, xmin * 16, 16 - (zmax * 16), 0x11], # ne
[xmin, ymax, zmax, xmin * 16, 16 - (zmin * 16), 0x11], # nw
[xmax, ymax, zmax, xmax * 16, 16 - (zmin * 16), 0x11], # sw
[xmax, ymax, zmin, xmax * 16, 16 - (zmax * 16), 0x11]], # se
# FaceYDecreasing:
[[xmin, ymin, zmin, xmin * 16, 16 - (zmax * 16), 0x08],
[xmax, ymin, zmin, xmax * 16, 16 - (zmax * 16), 0x08],
[xmax, ymin, zmax, xmax * 16, 16 - (zmin * 16), 0x08],
[xmin, ymin, zmax, xmin * 16, 16 - (zmin * 16), 0x08]],
# FaceZIncreasing:
[[xmin, ymin, zmax, xmin * 16, 16 - (ymin * 16), 0x0d],
[xmax, ymin, zmax, xmax * 16, 16 - (ymin * 16), 0x0d],
[xmax, ymax, zmax, xmax * 16, 16 - (ymax * 16), 0x0d],
[xmin, ymax, zmax, xmin * 16, 16 - (ymax * 16), 0x0d]],
# FaceZDecreasing:
[[xmax, ymin, zmin, xmin * 16, 16 - (ymin * 16), 0x0d],
[xmin, ymin, zmin, xmax * 16, 16 - (ymin * 16), 0x0d],
[xmin, ymax, zmin, xmax * 16, 16 - (ymax * 16), 0x0d],
[xmax, ymax, zmin, xmin * 16, 16 - (ymax * 16), 0x0d],
],
])
elementByteLength = 24
def createPrecomputedVertices():
height = 16
precomputedVertices = [numpy.zeros(shape=(16, 16, height, 4, 6), # x,y,z,s,t,rg, ba
dtype='float32') for d in faceVertexTemplates]
xArray = numpy.arange(16)[:, numpy.newaxis, numpy.newaxis, numpy.newaxis]
zArray = numpy.arange(16)[numpy.newaxis, :, numpy.newaxis, numpy.newaxis]
yArray = numpy.arange(height)[numpy.newaxis, numpy.newaxis, :, numpy.newaxis]
for dir in xrange(len(faceVertexTemplates)):
precomputedVertices[dir][_XYZ][..., 0] = xArray
precomputedVertices[dir][_XYZ][..., 1] = yArray
precomputedVertices[dir][_XYZ][..., 2] = zArray
precomputedVertices[dir][_XYZ] += faceVertexTemplates[dir][..., 0:3] # xyz
precomputedVertices[dir][_ST] = faceVertexTemplates[dir][..., 3:5] # s
precomputedVertices[dir].view('uint8')[_RGB] = faceVertexTemplates[dir][..., 5, numpy.newaxis]
precomputedVertices[dir].view('uint8')[_A] = 0xff
return precomputedVertices
faceVertexTemplates = makeVertexTemplates()
class ChunkCalculator(object):
cachedTemplate = None
cachedTemplateHeight = 0
whiteLight = numpy.array([[[15] * 16] * 16] * 16, numpy.uint8)
precomputedVertices = createPrecomputedVertices()
def __init__(self, level):
if not hasattr(alphaMaterials, 'Stone'):
get_materials()
self.stoneid = stoneid = alphaMaterials.Stone.ID
self.hiddenOreMaterials[alphaMaterials.Dirt.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Grass.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Sand.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Gravel.ID] = stoneid
self.hiddenOreMaterials[alphaMaterials.Netherrack.ID] = stoneid
self.level = level
self.makeRenderstates(level.materials)
# del xArray, zArray, yArray
self.nullVertices = numpy.zeros((0,) * len(self.precomputedVertices[0].shape),
dtype=self.precomputedVertices[0].dtype)
config.settings.fastLeaves.addObserver(self)
config.settings.roughGraphics.addObserver(self)
class renderstatePlain(object):
@classmethod
def bind(cls):
pass
@classmethod
def release(cls):
pass
class renderstateVines(object):
@classmethod
def bind(cls):
GL.glDisable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_ALPHA_TEST)
@classmethod
def release(cls):
GL.glEnable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_ALPHA_TEST)
class renderstateLowDetail(object):
@classmethod
def bind(cls):
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_TEXTURE_2D)
@classmethod
def release(cls):
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_TEXTURE_2D)
class renderstateAlphaTest(object):
@classmethod
def bind(cls):
GL.glEnable(GL.GL_ALPHA_TEST)
@classmethod
def release(cls):
GL.glDisable(GL.GL_ALPHA_TEST)
class _renderstateAlphaBlend(object):
@classmethod
def bind(cls):
GL.glEnable(GL.GL_BLEND)
@classmethod
def release(cls):
GL.glDisable(GL.GL_BLEND)
class renderstateWater(_renderstateAlphaBlend):
pass
class renderstateIce(_renderstateAlphaBlend):
pass
class renderstateEntity(object):
@classmethod
def bind(cls):
GL.glDisable(GL.GL_DEPTH_TEST)
# GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glEnable(GL.GL_BLEND)
@classmethod
def release(cls):
GL.glEnable(GL.GL_DEPTH_TEST)
# GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_BLEND)
renderstates = (
renderstatePlain,
renderstateVines,
renderstateLowDetail,
renderstateAlphaTest,
renderstateIce,
renderstateWater,
renderstateEntity,
)
def makeRenderstates(self, materials):
self.blockRendererClasses = [
GenericBlockRenderer,
LeafBlockRenderer,
PlantBlockRenderer,
TorchBlockRenderer,
WaterBlockRenderer,
SlabBlockRenderer,
]
if materials.name in ("Alpha", "Pocket"):
self.blockRendererClasses += [
RailBlockRenderer,
LadderBlockRenderer,
SnowBlockRenderer,
CarpetBlockRenderer,
CactusBlockRenderer,
PaneBlockRenderer,
CakeBlockRenderer,
DaylightBlockRenderer,
StandingSignRenderer,
WallSignBlockRenderer,
LeverBlockRenderer,
BedBlockRenderer,
EnchantingBlockRenderer,
RedstoneBlockRenderer,
IceBlockRenderer,
DoorRenderer,
ButtonRenderer,
TrapDoorRenderer,
FenceBlockRenderer,
FenceGateBlockRenderer,
StairBlockRenderer,
RepeaterBlockRenderer,
VineBlockRenderer,
PlateBlockRenderer,
EndRodRenderer,
# button, floor plate, door -> 1-cube features
# lever, sign, wall sign, stairs -> 2-cube features
# fence
# portal
]
self.materialMap = materialMap = numpy.zeros((pymclevel.materials.id_limit,), 'uint8')
materialMap[1:] = 1 # generic blocks
materialCount = 2
for br in self.blockRendererClasses[1:]: # skip generic blocks
# materialMap[br.getBlocktypes(materials)] = materialCount
materialMap[br(self).getBlocktypes(materials)] = materialCount
br.materialIndex = materialCount
materialCount += 1
self.exposedMaterialMap = numpy.array(materialMap)
self.addTransparentMaterials(self.exposedMaterialMap, materialCount)
def addTransparentMaterials(self, mats, materialCount):
logging.debug("renderer::ChunkCalculator: Dynamically adding transparent materials.")
for b in self.level.materials:
yaml = getattr(b, 'yaml', None)
if yaml is not None and yaml.get('opacity', 1) < 1:
logging.debug("Adding '%s'" % b)
mats[b.ID] = materialCount
materialCount += 1
logging.debug("renderer::ChunkCalculator: Transparent materials added.")
# don't show boundaries between dirt,grass,sand,gravel,or stone.
# This hiddenOreMaterial definition shall be delayed after the level is loaded, in order to get the exact ones from the game versionned data.
hiddenOreMaterials = numpy.arange(pymclevel.materials.id_limit, dtype='uint16')
roughMaterials = numpy.ones((pymclevel.materials.id_limit,), dtype='uint8')
roughMaterials[0] = 0
def calcFacesForChunkRenderer(self, cr):
if not cr.invalidLayers:
return
lod = cr.detailLevel
cx, cz = cr.chunkPosition
level = cr.renderer.level
try:
chunk = level.getChunk(cx, cz)
except Exception as e:
if "Session lock lost" in e.message:
yield
return
logging.warn(u"Error reading chunk: %s", e)
traceback.print_exc()
yield
return
yield
brs = []
append = brs.append
classes = (
TileEntityRenderer,
MonsterRenderer,
ItemRenderer,
TileTicksRenderer,
TerrainPopulatedRenderer,
ChunkBorderRenderer,
LowDetailBlockRenderer,
OverheadBlockRenderer,
)
existingBlockRenderers = dict(((type(b), b) for b in cr.blockRenderers))
for blockRendererClass in classes:
if cr.detailLevel not in blockRendererClass.detailLevels:
continue
if blockRendererClass.layer not in cr.visibleLayers:
continue
if blockRendererClass.layer not in cr.invalidLayers:
if blockRendererClass in existingBlockRenderers:
append(existingBlockRenderers[blockRendererClass])
continue
br = blockRendererClass(self)
br.detailLevel = cr.detailLevel
for _ in br.makeChunkVertices(chunk):
yield
append(br)
blockRenderers = []
# Recalculate high detail blocks if needed, otherwise retain the high detail renderers
if lod == 0 and Layer.Blocks in cr.invalidLayers:
for _ in self.calcHighDetailFaces(cr, blockRenderers):
yield
else:
blockRenderers.extend(br for br in cr.blockRenderers if not isinstance(br, classes))
# Add the layer renderers
blockRenderers.extend(brs)
cr.blockRenderers = blockRenderers
cr.vertexArraysDone()
raise StopIteration
@staticmethod
def getNeighboringChunks(chunk):
cx, cz = chunk.chunkPosition
level = chunk.world
neighboringChunks = {}
for dir, dx, dz in ((pymclevel.faces.FaceXDecreasing, -1, 0),
(pymclevel.faces.FaceXIncreasing, 1, 0),
(pymclevel.faces.FaceZDecreasing, 0, -1),
(pymclevel.faces.FaceZIncreasing, 0, 1)):
if not level.containsChunk(cx + dx, cz + dz):
neighboringChunks[dir] = pymclevel.infiniteworld.ZeroChunk(level.Height)
else:
try:
neighboringChunks[dir] = level.getChunk(cx + dx, cz + dz)
except (EnvironmentError, pymclevel.mclevelbase.ChunkNotPresent, pymclevel.mclevelbase.ChunkMalformed):
neighboringChunks[dir] = pymclevel.infiniteworld.ZeroChunk(level.Height)
return neighboringChunks
@staticmethod
def getAreaBlocks(chunk, neighboringChunks):
chunkWidth, chunkLength, chunkHeight = chunk.Blocks.shape
areaBlocks = numpy.zeros((chunkWidth + 2, chunkLength + 2, chunkHeight + 2), numpy.uint16)
areaBlocks[1:-1, 1:-1, 1:-1] = chunk.Blocks
zeros = numpy.zeros((16, 16, 128), dtype=areaBlocks.dtype)
nb_fxd = neighboringChunks[pymclevel.faces.FaceXDecreasing].Blocks
if nb_fxd.shape[2] == chunkHeight / 2:
nb_fxd = numpy.concatenate((nb_fxd, zeros), axis=2)
areaBlocks[:1, 1:-1, 1:-1] = nb_fxd[-1:, :chunkLength,
:chunkHeight]
nb_fxi = neighboringChunks[pymclevel.faces.FaceXIncreasing].Blocks
if nb_fxi.shape[2] == chunkHeight / 2:
nb_fxi = numpy.concatenate((nb_fxi, zeros), axis=2)
areaBlocks[-1:, 1:-1, 1:-1] = nb_fxi[:1, :chunkLength,
:chunkHeight]
nb_fzd = neighboringChunks[pymclevel.faces.FaceZDecreasing].Blocks
if nb_fzd.shape[2] == chunkHeight / 2:
nb_fzd = numpy.concatenate((nb_fzd, zeros), axis=2)
areaBlocks[1:-1, :1, 1:-1] = nb_fzd[:chunkWidth, -1:,
:chunkHeight]
nb_fzi = neighboringChunks[pymclevel.faces.FaceZIncreasing].Blocks
if nb_fzi.shape[2] == chunkHeight / 2:
nb_fzi = numpy.concatenate((nb_fzi, zeros), axis=2)
areaBlocks[1:-1, -1:, 1:-1] = nb_fzi[:chunkWidth, :1,
:chunkHeight]
return areaBlocks
@staticmethod
def getFacingBlockIndices(areaBlocks, areaBlockMats):
facingBlockIndices = [None] * 6
exposedFacesX = (areaBlockMats[:-1, 1:-1, 1:-1] != areaBlockMats[1:, 1:-1, 1:-1])
facingBlockIndices[pymclevel.faces.FaceXDecreasing] = exposedFacesX[:-1]
facingBlockIndices[pymclevel.faces.FaceXIncreasing] = exposedFacesX[1:]
exposedFacesZ = (areaBlockMats[1:-1, :-1, 1:-1] != areaBlockMats[1:-1, 1:, 1:-1])
facingBlockIndices[pymclevel.faces.FaceZDecreasing] = exposedFacesZ[:, :-1]
facingBlockIndices[pymclevel.faces.FaceZIncreasing] = exposedFacesZ[:, 1:]
exposedFacesY = (areaBlockMats[1:-1, 1:-1, :-1] != areaBlockMats[1:-1, 1:-1, 1:])
facingBlockIndices[pymclevel.faces.FaceYDecreasing] = exposedFacesY[:, :, :-1]
facingBlockIndices[pymclevel.faces.FaceYIncreasing] = exposedFacesY[:, :, 1:]
return facingBlockIndices
def getAreaBlockLights(self, chunk, neighboringChunks):
chunkWidth, chunkLength, chunkHeight = chunk.Blocks.shape
lights = chunk.BlockLight
skyLight = chunk.SkyLight
finalLight = self.whiteLight
if lights is not None:
finalLight = lights
if skyLight is not None:
finalLight = numpy.maximum(skyLight, lights)
areaBlockLights = numpy.ones((chunkWidth + 2, chunkLength + 2, chunkHeight + 2), numpy.uint8)
areaBlockLights[:] = 15
areaBlockLights[1:-1, 1:-1, 1:-1] = finalLight
zeros = numpy.zeros((16, 16, 128), dtype=areaBlockLights.dtype)
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceXDecreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceXDecreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[-1:, :chunkLength, :chunkHeight],
blockLight[-1:, :chunkLength, :chunkHeight],
areaBlockLights[0:1, 1:-1, 1:-1])
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceXIncreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceXIncreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[:1, :chunkLength, :chunkHeight],
blockLight[:1, :chunkLength, :chunkHeight],
areaBlockLights[-1:, 1:-1, 1:-1])
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceZDecreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceZDecreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[:chunkWidth, -1:, :chunkHeight],
blockLight[:chunkWidth, -1:, :chunkHeight],
areaBlockLights[1:-1, 0:1, 1:-1])
skyLight, blockLight = neighboringChunks[pymclevel.faces.FaceZIncreasing].SkyLight, neighboringChunks[pymclevel.faces.FaceZIncreasing].BlockLight
if skyLight.shape[2] == chunkHeight / 2:
skyLight = numpy.concatenate((skyLight, zeros), axis=2)
blockLight = numpy.concatenate((blockLight, zeros), axis=2)
numpy.maximum(skyLight[:chunkWidth, :1, :chunkHeight],
blockLight[:chunkWidth, :1, :chunkHeight],
areaBlockLights[1:-1, -1:, 1:-1])
fxd = neighboringChunks[pymclevel.faces.FaceXDecreasing]
fxi = neighboringChunks[pymclevel.faces.FaceXIncreasing]
fzd = neighboringChunks[pymclevel.faces.FaceZDecreasing]
fzi = neighboringChunks[pymclevel.faces.FaceZIncreasing]
fxd_skyLight = fxd.SkyLight
fxi_skyLight = fxi.SkyLight
fzd_skyLight = fzd.SkyLight
fzi_skyLight = fzi.SkyLight
fxd_blockLight = fxd.BlockLight
fxi_blockLight = fxi.BlockLight
fzd_blockLight = fzd.BlockLight
fzi_blockLight = fzi.BlockLight
if fxd_skyLight.shape[2] == chunkHeight / 2:
fxd_skyLight = numpy.concatenate((fxd_skyLight, zeros), axis=2)
fxd_blockLight = numpy.concatenate((fxd_blockLight, zeros), axis=2)
if fxi_skyLight.shape[2] == chunkHeight / 2:
fxi_skyLight = numpy.concatenate((fxi_skyLight, zeros), axis=2)
fxi_blockLight = numpy.concatenate((fxi_blockLight, zeros), axis=2)
if fzd_skyLight.shape[2] == chunkHeight / 2:
fzd_skyLight = numpy.concatenate((fzd_skyLight, zeros), axis=2)
fzd_blockLight = numpy.concatenate((fzd_blockLight, zeros), axis=2)
if fzi_skyLight.shape[2] == chunkHeight / 2:
fzi_skyLight = numpy.concatenate((fzi_skyLight, zeros), axis=2)
fzi_blockLight = numpy.concatenate((fzi_blockLight, zeros), axis=2)
numpy.maximum(fxd_skyLight[-1:, :chunkLength, :chunkHeight],
fxd_blockLight[-1:, :chunkLength, :chunkHeight],
areaBlockLights[0:1, 1:-1, 1:-1])
numpy.maximum(fxi_skyLight[:1, :chunkLength, :chunkHeight],
fxi_blockLight[:1, :chunkLength, :chunkHeight],
areaBlockLights[-1:, 1:-1, 1:-1])
numpy.maximum(fzd_skyLight[:chunkWidth, -1:, :chunkHeight],
fzd_blockLight[:chunkWidth, -1:, :chunkHeight],
areaBlockLights[1:-1, 0:1, 1:-1])
numpy.maximum(fzi_skyLight[:chunkWidth, :1, :chunkHeight],
fzi_blockLight[:chunkWidth, :1, :chunkHeight],
areaBlockLights[1:-1, -1:, 1:-1])
minimumLight = 4
numpy.clip(areaBlockLights, minimumLight, 16, areaBlockLights)
return areaBlockLights
def calcHighDetailFaces(self, cr, blockRenderers):
""" calculate the geometry for a chunk renderer from its blockMats, data,
and lighting array. fills in the cr's blockRenderers with verts
for each block facing and material"""
# chunkBlocks and chunkLights shall be indexed [x,z,y] to follow infdev's convention
cx, cz = cr.chunkPosition
level = cr.renderer.level
chunk = level.getChunk(cx, cz)
# if isinstance(chunk, pymclevel.level.FakeChunk):
# return
neighboringChunks = self.getNeighboringChunks(chunk)
areaBlocks = self.getAreaBlocks(chunk, neighboringChunks)
yield
areaBlockLights = self.getAreaBlockLights(chunk, neighboringChunks)
yield
allSlabs = set([b.ID for b in alphaMaterials.allBlocks if "Slab" in b.name])
for slab in allSlabs:
slabs = areaBlocks == slab
if slabs.any():
areaBlockLights[slabs] = areaBlockLights[:, :, 1:][slabs[:, :, :-1]]
yield
showHiddenOres = cr.renderer.showHiddenOres
if showHiddenOres:
facingMats = self.hiddenOreMaterials[areaBlocks]
else:
facingMats = self.exposedMaterialMap[areaBlocks]
yield
if self.roughGraphics:
areaBlockMats = self.roughMaterials[areaBlocks]
else:
areaBlockMats = self.materialMap[areaBlocks]
facingBlockIndices = self.getFacingBlockIndices(areaBlocks, facingMats)
yield
for _ in self.computeGeometry(chunk, areaBlockMats, facingBlockIndices, areaBlockLights, cr, blockRenderers):
yield
def computeGeometry(self, chunk, areaBlockMats, facingBlockIndices, areaBlockLights, chunkRenderer, blockRenderers):
blocks, blockData = chunk.Blocks, chunk.Data
blockData &= 0xf
blockMaterials = areaBlockMats[1:-1, 1:-1, 1:-1]
if self.roughGraphics:
blockMaterials.clip(0, 1, blockMaterials)
else:
# Special case for doors
#
# Each part of a door itself does not have all of the information required
# to render, as direction/whether its open is on the lower part and the hinge
# side is on the upper part. So here we combine the metadata of the bottom part
# with the top to form 0-32 metadata(which would be used in door renderer).
#
copied = False
for door in DoorRenderer.blocktypes:
doors = blocks == door
if doors.any():
if not copied:
# copy if required but only once
blockData = blockData.copy()
copied = True
# only accept lower part one block below upper part
valid = doors[:, :, :-1] & doors[:, :, 1:] & (blockData[:, :, :-1] < 8) & (blockData[:, :, 1:] >= 8)
mask = valid.nonzero()
upper_mask = (mask[0], mask[1], mask[2]+1)
blockData[mask] += (blockData[upper_mask] - 8) * 16
blockData[upper_mask] = blockData[mask] + 8
sx = sz = slice(0, 16)
asx = asz = slice(0, 18)
for y in xrange(0, chunk.world.Height, 16):
sy = slice(y, y + 16)
asy = slice(y, y + 18)
for _ in self.computeCubeGeometry(
y,
blockRenderers,
blocks[sx, sz, sy],
blockData[sx, sz, sy],
chunk.materials,
blockMaterials[sx, sz, sy],
[f[sx, sz, sy] for f in facingBlockIndices],
areaBlockLights[asx, asz, asy],
chunkRenderer):
yield
def computeCubeGeometry(self, y, blockRenderers, blocks, blockData, materials, blockMaterials, facingBlockIndices,
areaBlockLights, chunkRenderer):
materialCounts = numpy.bincount(blockMaterials.ravel())
append = blockRenderers.append
def texMap(blocks, blockData=0, direction=slice(None)):
return materials.blockTextures[blocks, blockData, direction] # xxx slow
for blockRendererClass in self.blockRendererClasses:
mi = blockRendererClass.materialIndex
if mi >= len(materialCounts) or materialCounts[mi] == 0:
continue
blockRenderer = blockRendererClass(self)
blockRenderer.y = y
blockRenderer.materials = materials
for _ in blockRenderer.makeVertices(facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights,
texMap):
yield
append(blockRenderer)
yield
def makeTemplate(self, direction, blockIndices):
return self.precomputedVertices[direction][numpy.where(blockIndices)]
class Layer:
Blocks = "Blocks"
Entities = "Entities"
Monsters = "Monsters"
Items = "Items"
TileEntities = "TileEntities"
TileTicks = "TileTicks"
TerrainPopulated = "TerrainPopulated"
ChunkBorder = "ChunkBorder"
AllLayers = (Blocks, Entities, Monsters, Items, TileEntities, TileTicks, TerrainPopulated, ChunkBorder)
class BlockRenderer(object):
detailLevels = (0,)
layer = Layer.Blocks
directionOffsets = {
pymclevel.faces.FaceXDecreasing: numpy.s_[:-2, 1:-1, 1:-1],
pymclevel.faces.FaceXIncreasing: numpy.s_[2:, 1:-1, 1:-1],
pymclevel.faces.FaceYDecreasing: numpy.s_[1:-1, 1:-1, :-2],
pymclevel.faces.FaceYIncreasing: numpy.s_[1:-1, 1:-1, 2:],
pymclevel.faces.FaceZDecreasing: numpy.s_[1:-1, :-2, 1:-1],
pymclevel.faces.FaceZIncreasing: numpy.s_[1:-1, 2:, 1:-1],
}
renderstate = ChunkCalculator.renderstateAlphaTest
used = False
def __init__(self, cc):
self.makeTemplate = cc.makeTemplate
self.chunkCalculator = cc
self.vertexArrays = []
self.materials = cc.level.materials
pass
def getBlocktypes(self, mats):
return self.blocktypes
def setAlpha(self, alpha):
"alpha is an unsigned byte value"
for a in self.vertexArrays:
a.view('uint8')[_RGBA][..., 3] = alpha
def bufferSize(self):
return sum(a.size for a in self.vertexArrays) * 4
def getMaterialIndices(self, blockMaterials):
return blockMaterials == self.materialIndex
def makeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
vertexArray = self.makeFaceVertices(direction, materialIndices, exposedFaceIndices, blocks, blockData,
blockLight, facingBlockLight, texMap)
yield
if len(vertexArray):
append(vertexArray)
self.vertexArrays = arrays
def makeArrayList(self, chunkPosition, showRedraw):
l = gl.glGenLists(1)
GL.glNewList(l, GL.GL_COMPILE)
self.drawArrays(chunkPosition, showRedraw)
GL.glEndList()
return l
def drawArrays(self, chunkPosition, showRedraw):
cx, cz = chunkPosition
y = getattr(self, "y", 0)
with gl.glPushMatrix(GL.GL_MODELVIEW):
GL.glTranslate(cx << 4, y, cz << 4)
if showRedraw:
GL.glColor(1.0, 0.25, 0.25, 1.0)
self.drawVertices()
def drawVertices(self):
if self.vertexArrays:
for buf in self.vertexArrays:
self.drawFaceVertices(buf)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
class EntityRendererGeneric(BlockRenderer):
renderstate = ChunkCalculator.renderstateEntity
detailLevels = (0, 1, 2)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDepthMask(False)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(2.0)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glPolygonOffset(DepthOffset.TerrainWire, DepthOffset.TerrainWire)
with gl.glEnable(GL.GL_POLYGON_OFFSET_FILL, GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glDepthMask(True)
@staticmethod
def _computeVertices(positions, colors, offset=False, chunkPosition=(0, 0)):
cx, cz = chunkPosition
x = cx << 4
z = cz << 4
vertexArray = numpy.zeros(shape=(len(positions), 6, 4, 6), dtype='float32')
if positions:
positions = numpy.array(positions)
positions[:, (0, 2)] -= (x, z)
if offset:
positions -= 0.5
vertexArray.view('uint8')[_RGBA] = colors
vertexArray[_XYZ] = positions[:, numpy.newaxis, numpy.newaxis, :]
vertexArray[_XYZ] += faceVertexTemplates[_XYZ]
vertexArray.shape = (len(positions) * 6, 4, 6)
return vertexArray
class TileEntityRenderer(EntityRendererGeneric):
layer = Layer.TileEntities
def makeChunkVertices(self, chunk):
tilePositions = []
append = tilePositions.append
for i, ent in enumerate(chunk.TileEntities):
if i % 10 == 0:
yield
if 'x' not in ent:
continue
append(pymclevel.TileEntity.pos(ent))
tiles = self._computeVertices(tilePositions, (0xff, 0xff, 0x33, 0x44), chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [tiles]
class BaseEntityRenderer(EntityRendererGeneric):
pass
class MonsterRenderer(BaseEntityRenderer):
layer = Layer.Entities # xxx Monsters
notMonsters = {"Item", "XPOrb", "Painting", "ItemFrame", "ArmorStand"}
def makeChunkVertices(self, chunk):
monsterPositions = []
append = monsterPositions.append
notMonsters = self.chunkCalculator.level.defsIds.mcedit_defs.get('notMonsters', self.notMonsters)
for i, ent in enumerate(chunk.Entities):
if i % 10 == 0:
yield
id = ent["id"].value
if id in notMonsters:
continue
pos = pymclevel.Entity.pos(ent)
pos[1] += 0.5
append(pos)
monsters = self._computeVertices(monsterPositions,
(0xff, 0x22, 0x22, 0x44),
offset=True,
chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [monsters]
class EntityRenderer(BaseEntityRenderer):
@staticmethod
def makeChunkVertices(chunk):
yield
class ItemRenderer(BaseEntityRenderer):
layer = Layer.Items
def makeChunkVertices(self, chunk):
entityPositions = []
entityColors = []
colorMap = {
"Item": (0x22, 0xff, 0x22, 0x5f),
"XPOrb": (0x88, 0xff, 0x88, 0x5f),
"Painting": (134, 96, 67, 0x5f),
"ItemFrame": (134, 96, 67, 0x5f),
"ArmorStand": (0x22, 0xff, 0x22, 0x5f),
}
pos_append = entityPositions.append
color_append = entityColors.append
defsIds = self.chunkCalculator.level.defsIds
mcedit_defs = defsIds.mcedit_defs
mcedit_ids = defsIds.mcedit_ids
for i, ent in enumerate(chunk.Entities):
if i % 10 == 0:
yield
# Let get the color from the versionned data, and use the 'old' way as fallback
color = mcedit_defs.get(mcedit_ids.get(ent["id"].value), {}).get("mapcolor")
if color is None:
color = colorMap.get(ent["id"].value)
if color is None:
continue
pos = pymclevel.Entity.pos(ent)
noRenderDelta = mcedit_defs.get('noRenderDelta', ("Painting", "ItemFrame"))
if ent["id"].value not in noRenderDelta:
pos[1] += 0.5
pos_append(pos)
color_append(color)
entities = self._computeVertices(entityPositions,
numpy.array(entityColors, dtype='uint8')[:, numpy.newaxis, numpy.newaxis],
offset=True, chunkPosition=chunk.chunkPosition)
yield
self.vertexArrays = [entities]
class TileTicksRenderer(EntityRendererGeneric):
layer = Layer.TileTicks
def makeChunkVertices(self, chunk):
if hasattr(chunk, "TileTicks"):
self.vertexArrays.append(self._computeVertices([[tick[j].value for j in "xyz"] for i, tick in enumerate(chunk.TileTicks)],
(0xff, 0xff, 0xff, 0x44),
chunkPosition=chunk.chunkPosition))
yield
class TerrainPopulatedRenderer(EntityRendererGeneric):
layer = Layer.TerrainPopulated
vertexTemplate = numpy.zeros((6, 4, 6), 'float32')
vertexTemplate[_XYZ] = faceVertexTemplates[_XYZ]
vertexTemplate[_XYZ] *= (16, 256, 16)
color = (255, 200, 155)
vertexTemplate.view('uint8')[_RGBA] = color + (72,)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glDepthMask(False)
GL.glDisable(GL.GL_CULL_FACE)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glEnable(GL.GL_CULL_FACE)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(1.0)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(2.0)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(1.0)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
GL.glDepthMask(True)
def makeChunkVertices(self, chunk):
neighbors = self.chunkCalculator.getNeighboringChunks(chunk)
def getpop(ch):
return getattr(ch, "TerrainPopulated", True)
pop = getpop(chunk)
yield
if pop:
return
visibleFaces = [
getpop(neighbors[pymclevel.faces.FaceXIncreasing]),
getpop(neighbors[pymclevel.faces.FaceXDecreasing]),
True,
True,
getpop(neighbors[pymclevel.faces.FaceZIncreasing]),
getpop(neighbors[pymclevel.faces.FaceZDecreasing]),
]
visibleFaces = numpy.array(visibleFaces, dtype='bool')
verts = self.vertexTemplate[visibleFaces]
self.vertexArrays.append(verts)
yield
class ChunkBorderRenderer(EntityRendererGeneric):
layer = Layer.ChunkBorder
color = (0, 210, 225)
vertexTemplate = numpy.zeros((6, 4, 6), 'float32')
vertexTemplate[_XYZ] = faceVertexTemplates[_XYZ]
vertexTemplate[_XYZ] *= (16, 256, 16)
vertexTemplate.view('uint8')[_RGBA] = color + (150,)
def makeChunkVertices(self, chunk):
visibleFaces = [
True,
True,
True,
True,
True,
True,
]
yield
visibleFaces = numpy.array(visibleFaces, dtype='bool')
verts = self.vertexTemplate[visibleFaces]
self.vertexArrays.append(verts)
yield
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = elementByteLength
GL.glVertexPointer(3, GL.GL_FLOAT, stride, (buf.ravel()))
GL.glTexCoordPointer(2, GL.GL_FLOAT, stride, (buf.ravel()[3:]))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype=numpy.uint8).ravel()[20:]))
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_LINE)
GL.glLineWidth(1)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(2.0)
with gl.glEnable(GL.GL_DEPTH_TEST):
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glLineWidth(1.0)
GL.glPolygonMode(GL.GL_FRONT_AND_BACK, GL.GL_FILL)
class LowDetailBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateLowDetail
detailLevels = (1,)
def drawFaceVertices(self, buf):
if not len(buf):
return
stride = 16
GL.glVertexPointer(3, GL.GL_FLOAT, stride, numpy.ravel(buf.ravel()))
GL.glColorPointer(4, GL.GL_UNSIGNED_BYTE, stride, (buf.view(dtype='uint8').ravel()[12:]))
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glDrawArrays(GL.GL_QUADS, 0, len(buf) * 4)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
def setAlpha(self, alpha):
for va in self.vertexArrays:
va.view('uint8')[..., -1] = alpha
def makeChunkVertices(self, ch):
step = 1
level = ch.world
vertexArrays = []
blocks = ch.Blocks
heightMap = ch.HeightMap
heightMap = heightMap[::step, ::step]
blocks = blocks[::step, ::step]
if 0 in blocks.shape:
return
chunkWidth, chunkLength, chunkHeight = blocks.shape
blockIndices = numpy.zeros((chunkWidth, chunkLength, chunkHeight), bool)
gridaxes = list(numpy.indices((chunkWidth, chunkLength)))
h = numpy.swapaxes(heightMap - 1, 0, 1)[:chunkWidth, :chunkLength]
numpy.clip(h, 0, chunkHeight - 1, out=h)
gridaxes = (gridaxes[0], gridaxes[1], h)
depths = numpy.zeros((chunkWidth, chunkLength), dtype='uint16')
depths[1:-1, 1:-1] = reduce(numpy.minimum, (h[1:-1, :-2], h[1:-1, 2:], h[:-2, 1:-1]), h[2:, 1:-1])
yield
try:
topBlocks = blocks[gridaxes]
nonAirBlocks = (topBlocks != 0)
blockIndices[gridaxes] = nonAirBlocks
h += 1
numpy.clip(h, 0, chunkHeight - 1, out=h)
overblocks = blocks[gridaxes][nonAirBlocks].ravel()
except ValueError as e:
raise ValueError(str(e.args) + "Chunk shape: {0}".format(blockIndices.shape), sys.exc_info()[-1])
if nonAirBlocks.any():
blockTypes = blocks[blockIndices]
flatcolors = level.materials.flatColors[blockTypes, ch.Data[blockIndices] & 0xf][:, numpy.newaxis, :]
x, z, y = blockIndices.nonzero()
yield
vertexArray = numpy.zeros((len(x), 4, 4), dtype='float32')
vertexArray[_XYZ][..., 0] = x[:, numpy.newaxis]
vertexArray[_XYZ][..., 1] = y[:, numpy.newaxis]
vertexArray[_XYZ][..., 2] = z[:, numpy.newaxis]
va0 = numpy.array(vertexArray)
va0[..., :3] += faceVertexTemplates[pymclevel.faces.FaceYIncreasing, ..., :3]
overmask = overblocks > 0
flatcolors[overmask] = level.materials.flatColors[:, 0][overblocks[overmask]][:, numpy.newaxis]
if self.detailLevel == 2:
heightfactor = (y / float(2.0 * ch.world.Height)) + 0.5
flatcolors[..., :3] = flatcolors[..., :3].astype(float) * heightfactor[:, numpy.newaxis, numpy.newaxis]
_RGBA = numpy.s_[..., 12:16]
va0.view('uint8')[_RGBA] = flatcolors
va0[_XYZ][:, :, 0] *= step
va0[_XYZ][:, :, 2] *= step
yield
if self.detailLevel == 2:
self.vertexArrays = [va0]
return
va1 = numpy.array(vertexArray)
va1[..., :3] += faceVertexTemplates[pymclevel.faces.FaceXIncreasing, ..., :3]
va1[_XYZ][:, (0, 1), 1] = depths[nonAirBlocks].ravel()[:, numpy.newaxis] # stretch to floor
va1[_XYZ][:, (1, 2), 0] -= 1.0 # turn diagonally
va1[_XYZ][:, (2, 3), 1] -= 0.5 # drop down to prevent intersection pixels
va1[_XYZ][:, :, 0] *= step
va1[_XYZ][:, :, 2] *= step
flatcolors = flatcolors.astype(float) * 0.8
va1.view('uint8')[_RGBA] = flatcolors
grassmask = topBlocks[nonAirBlocks] == 2
# color grass sides with dirt's color
va1.view('uint8')[_RGBA][grassmask] = level.materials.flatColors[:, 0][[3]][:, numpy.newaxis]
va2 = numpy.array(va1)
va2[_XYZ][:, (1, 2), 0] += step
va2[_XYZ][:, (0, 3), 0] -= step
vertexArrays = [va1, va2, va0]
self.vertexArrays = vertexArrays
class OverheadBlockRenderer(LowDetailBlockRenderer):
detailLevels = (2,)
class GenericBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateAlphaTest
materialIndex = 1
def makeGenericVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
vertexArrays = []
append = vertexArrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
blockIndices = materialIndices & exposedFaceIndices
theseBlocks = blocks[blockIndices]
bdata = blockData[blockIndices]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(theseBlocks, bdata, direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
if self.materials.name in ("Alpha", "Pocket"):
if direction == pymclevel.faces.FaceYIncreasing:
grass = theseBlocks == alphaMaterials.Grass.ID
vertexArray.view('uint8')[_RGB][grass] = vertexArray.view('uint8')[_RGB][grass].astype(float) * self.grassColor
yield
append(vertexArray)
self.vertexArrays = vertexArrays
grassColor = grassColorDefault = [0.39, 0.71, 0.23] # 62C743
makeVertices = makeGenericVertices
class LeafBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["LEAVES"]]
@property
def renderstate(self):
if self.chunkCalculator.fastLeaves:
return ChunkCalculator.renderstatePlain
else:
return ChunkCalculator.renderstateAlphaTest
def makeLeafVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
if self.materials.name in ("Alpha", "Pocket"):
if not self.chunkCalculator.fastLeaves:
blockIndices = materialIndices
data = blockData[blockIndices]
data &= 0x3 # ignore decay states
leaves = (data == alphaMaterials.Leaves.blockData)
pines = (data == alphaMaterials.PineLeaves.blockData)
birches = (data == alphaMaterials.BirchLeaves.blockData)
jungle = (data == alphaMaterials.JungleLeaves.blockData)
acacia = (data == alphaMaterials.AcaciaLeaves.blockData)
darkoak = (data == alphaMaterials.DarkOakLeaves.blockData)
texes = texMap(blocks[blockIndices], [0], 0)
else:
blockIndices = materialIndices
texes = texMap(blocks[blockIndices], [0], 0)
for (direction, exposedFaceIndices) in enumerate(facingBlockIndices):
if self.materials.name in ("Alpha", "Pocket"):
if self.chunkCalculator.fastLeaves:
blockIndices = materialIndices & exposedFaceIndices
data = blockData[blockIndices]
data &= 0x3 # ignore decay states
leaves = (data == alphaMaterials.Leaves.blockData)
pines = (data == alphaMaterials.PineLeaves.blockData)
birches = (data == alphaMaterials.BirchLeaves.blockData)
jungle = (data == alphaMaterials.JungleLeaves.blockData)
acacia = (data == alphaMaterials.AcaciaLeaves.blockData)
darkoak = (data == alphaMaterials.DarkOakLeaves.blockData)
texes = texMap(blocks[blockIndices], data, 0)
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texes[:, numpy.newaxis]
if not self.chunkCalculator.fastLeaves:
vertexArray[_ST] -= (0x10, 0x0)
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
if self.materials.name in ("Alpha", "Pocket"):
vertexArray.view('uint8')[_RGB][leaves] = vertexArray.view('uint8')[_RGB][leaves].astype(float) * self.leafColor
vertexArray.view('uint8')[_RGB][pines] = vertexArray.view('uint8')[_RGB][pines].astype(float) * self.pineLeafColor
vertexArray.view('uint8')[_RGB][birches] = vertexArray.view('uint8')[_RGB][birches].astype(float) * self.birchLeafColor
vertexArray.view('uint8')[_RGB][jungle] = vertexArray.view('uint8')[_RGB][jungle].astype(float) * self.jungleLeafColor
vertexArray.view('uint8')[_RGB][acacia] = vertexArray.view('uint8')[_RGB][acacia].astype(float) * self.acaciaLeafColor
vertexArray.view('uint8')[_RGB][darkoak] = vertexArray.view('uint8')[_RGB][darkoak].astype(float) * self.darkoakLeafColor
yield
append(vertexArray)
self.vertexArrays = arrays
leafColor = leafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
pineLeafColor = pineLeafColorDefault = [0x61 / 255., 0x99 / 255., 0x61 / 255.] # 0x619961
birchLeafColor = birchLeafColorDefault = [0x80 / 255., 0xa7 / 255., 0x55 / 255.] # 0x80a755
jungleLeafColor = jungleLeafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
acaciaLeafColor = acaciaLeafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
darkoakLeafColor = darkoakLeafColorDefault = [0x48 / 255., 0xb5 / 255., 0x18 / 255.] # 48b518
makeVertices = makeLeafVertices
class PlantBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
# blocktypes = [6, 37, 38, 39, 40, 59, 83]
# if mats.name != "Classic": blocktypes += [31, 32] # shrubs, tall grass
# if mats.name == "Alpha": blocktypes += [115] # nether wart
blocktypes = [b.ID for b in mats if b.type in ("DECORATION_CROSS", "NETHER_WART", "CROPS", "STEM")]
return blocktypes
renderstate = ChunkCalculator.renderstateAlphaTest
def makePlantVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
blockIndices = self.getMaterialIndices(blockMaterials)
yield
theseBlocks = blocks[blockIndices]
bdata = blockData[blockIndices]
texes = texMap(blocks[blockIndices], bdata, 0)
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
lights = blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
colorize = None
if self.materials.name != "Classic": #so hacky, someone more competent fix this
colorize = (theseBlocks == self.materials.TallGrass.ID) & (bdata != 0)
colorize2 = (theseBlocks == self.materials.TallFlowers.ID) & (bdata != 0) & (
bdata != 1) & (bdata != 4) & (bdata != 5)
for direction in (
pymclevel.faces.FaceXIncreasing, pymclevel.faces.FaceXDecreasing, pymclevel.faces.FaceZIncreasing,
pymclevel.faces.FaceZDecreasing):
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 1:3, 0] -= 1
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 1:3, 0] += 1
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 1:3, 2] -= 1
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 1:3, 2] += 1
vertexArray[_ST] += texes[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] = 0xf # ignore precomputed directional light
vertexArray.view('uint8')[_RGB] *= lights
if colorize is not None:
vertexArray.view('uint8')[_RGB][colorize] = vertexArray.view('uint8')[_RGB][colorize].astype(float) * LeafBlockRenderer.leafColor
vertexArray.view('uint8')[_RGB][colorize2] = vertexArray.view('uint8')[_RGB][colorize2].astype(float) * LeafBlockRenderer.leafColor
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePlantVertices
class TorchBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["TORCH"]]
renderstate = ChunkCalculator.renderstateAlphaTest
torchOffsetsStraight = [
[ # FaceXIncreasing
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
(-7 / 16., 0, 0),
],
[ # FaceXDecreasing
(7 / 16., 0, 0),
(7 / 16., 0, 0),
(7 / 16., 0, 0),
(7 / 16., 0, 0),
],
[ # FaceYIncreasing
(7 / 16., -6 / 16., 7 / 16.),
(7 / 16., -6 / 16., -7 / 16.),
(-7 / 16., -6 / 16., -7 / 16.),
(-7 / 16., -6 / 16., 7 / 16.),
],
[ # FaceYDecreasing
(7 / 16., 0., 7 / 16.),
(-7 / 16., 0., 7 / 16.),
(-7 / 16., 0., -7 / 16.),
(7 / 16., 0., -7 / 16.),
],
[ # FaceZIncreasing
(0, 0, -7 / 16.),
(0, 0, -7 / 16.),
(0, 0, -7 / 16.),
(0, 0, -7 / 16.)
],
[ # FaceZDecreasing
(0, 0, 7 / 16.),
(0, 0, 7 / 16.),
(0, 0, 7 / 16.),
(0, 0, 7 / 16.)
],
]
torchOffsetsSouth = [
[ # FaceXIncreasing
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
(-7 / 16., 3 / 16., 0),
],
[ # FaceXDecreasing
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
(7 / 16., 3 / 16., 0),
],
[ # FaceYIncreasing
(7 / 16., -3 / 16., 7 / 16.),
(7 / 16., -3 / 16., -7 / 16.),
(-7 / 16., -3 / 16., -7 / 16.),
(-7 / 16., -3 / 16., 7 / 16.),
],
[ # FaceYDecreasing
(7 / 16., 3 / 16., 7 / 16.),
(-7 / 16., 3 / 16., 7 / 16.),
(-7 / 16., 3 / 16., -7 / 16.),
(7 / 16., 3 / 16., -7 / 16.),
],
[ # FaceZIncreasing
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.),
(0, 3 / 16., -7 / 16.)
],
[ # FaceZDecreasing
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
(0, 3 / 16., 7 / 16.),
],
]
torchOffsetsNorth = torchOffsetsWest = torchOffsetsEast = torchOffsetsSouth
torchOffsets = [
torchOffsetsStraight,
torchOffsetsSouth,
torchOffsetsNorth,
torchOffsetsWest,
torchOffsetsEast,
torchOffsetsStraight,
] + [torchOffsetsStraight] * 10
torchOffsets = numpy.array(torchOffsets, dtype='float32')
torchOffsets[1][..., 3, :, 0] -= 0.5
torchOffsets[1][..., 0:2, 0:2, 0] -= 0.5
torchOffsets[1][..., 4:6, 0:2, 0] -= 0.5
torchOffsets[1][..., 0:2, 2:4, 0] -= 0.1
torchOffsets[1][..., 4:6, 2:4, 0] -= 0.1
torchOffsets[1][..., 2, :, 0] -= 0.25
torchOffsets[2][..., 3, :, 0] += 0.5
torchOffsets[2][..., 0:2, 0:2, 0] += 0.5
torchOffsets[2][..., 4:6, 0:2, 0] += 0.5
torchOffsets[2][..., 0:2, 2:4, 0] += 0.1
torchOffsets[2][..., 4:6, 2:4, 0] += 0.1
torchOffsets[2][..., 2, :, 0] += 0.25
torchOffsets[3][..., 3, :, 2] -= 0.5
torchOffsets[3][..., 0:2, 0:2, 2] -= 0.5
torchOffsets[3][..., 4:6, 0:2, 2] -= 0.5
torchOffsets[3][..., 0:2, 2:4, 2] -= 0.1
torchOffsets[3][..., 4:6, 2:4, 2] -= 0.1
torchOffsets[3][..., 2, :, 2] -= 0.25
torchOffsets[4][..., 3, :, 2] += 0.5
torchOffsets[4][..., 0:2, 0:2, 2] += 0.5
torchOffsets[4][..., 4:6, 0:2, 2] += 0.5
torchOffsets[4][..., 0:2, 2:4, 2] += 0.1
torchOffsets[4][..., 4:6, 2:4, 2] += 0.1
torchOffsets[4][..., 2, :, 2] += 0.25
upCoords = ((7, 6), (7, 8), (9, 8), (9, 6))
downCoords = ((7, 14), (7, 16), (9, 16), (9, 14))
def makeTorchVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
torchOffsets = self.torchOffsets[blockData[blockIndices]]
texes = texMap(blocks[blockIndices], blockData[blockIndices])
yield
arrays = []
append = arrays.append
for direction in xrange(6):
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
vertexArray.view('uint8')[_RGBA] = 0xff
vertexArray[_XYZ] += torchOffsets[:, direction]
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_ST] = self.upCoords
if direction == pymclevel.faces.FaceYDecreasing:
vertexArray[_ST] = self.downCoords
vertexArray[_ST] += texes[:, numpy.newaxis, direction]
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeTorchVertices
class LeverBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:lever"].ID]
leverBaseTemplate = makeVertexTemplatesFromJsonModel((5, 0, 4), (11, 3, 12), {
"down": (10, 0, 16, 8),
"up": (10, 0, 16, 8),
"north": (10, 8, 16, 11),
"south": (10, 8, 16, 11),
"west": (2, 0, 10, 3),
"east": (2, 0, 10, 3)
})
leverBaseTemplates = numpy.array([
rotateTemplate(leverBaseTemplate, x=180, y=90),
rotateTemplate(leverBaseTemplate, x=90, y=90),
rotateTemplate(leverBaseTemplate, x=90, y=270),
rotateTemplate(leverBaseTemplate, x=90, y=180),
rotateTemplate(leverBaseTemplate, x=270, y=180),
leverBaseTemplate,
rotateTemplate(leverBaseTemplate, y=90),
rotateTemplate(leverBaseTemplate, x=180),
rotateTemplate(leverBaseTemplate, x=180, y=90),
rotateTemplate(leverBaseTemplate, x=90, y=90),
rotateTemplate(leverBaseTemplate, x=270, y=90),
rotateTemplate(leverBaseTemplate, x=270),
rotateTemplate(leverBaseTemplate, x=270, y=180),
leverBaseTemplate,
rotateTemplate(leverBaseTemplate, y=90),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
leverTemplate = makeVertexTemplatesFromJsonModel((7, 1, 7), (9, 11, 9), {
"down": (7, 6, 9, 8),
"up": (7, 6, 9, 8),
"north": (7, 6, 9, 16),
"south": (7, 6, 9, 16),
"west": (7, 6, 9, 16),
"east": (7, 6, 9, 16)
})
leverTemplates = numpy.array([
rotateTemplate(leverTemplate, x=180),
rotateTemplate(leverTemplate, x=90, y=90),
rotateTemplate(leverTemplate, x=90, y=270),
rotateTemplate(leverTemplate, x=90, y=180),
rotateTemplate(leverTemplate, x=270, y=180),
leverTemplate,
rotateTemplate(leverTemplate, y=90),
rotateTemplate(leverTemplate, x=180),
rotateTemplate(leverTemplate, x=180),
rotateTemplate(leverTemplate, x=90, y=90),
rotateTemplate(leverTemplate, x=270, y=90),
rotateTemplate(leverTemplate, x=270),
rotateTemplate(leverTemplate, x=270, y=180),
leverTemplate,
leverTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel([leverBaseTemplates, leverTemplates], 15)
class RailBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateAlphaTest
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
self.railTextures = numpy.array([
[(0, 128), (0, 144), (16, 144), (16, 128)], # east-west
[(0, 128), (16, 128), (16, 144), (0, 144)], # north-south
[(0, 128), (16, 128), (16, 144), (0, 144)], # south-ascending
[(0, 128), (16, 128), (16, 144), (0, 144)], # north-ascending
[(0, 128), (0, 144), (16, 144), (16, 128)], # east-ascending
[(0, 128), (0, 144), (16, 144), (16, 128)], # west-ascending
[(0, 112), (0, 128), (16, 128), (16, 112)], # northeast corner
[(0, 128), (16, 128), (16, 112), (0, 112)], # southeast corner
[(16, 128), (16, 112), (0, 112), (0, 128)], # southwest corner
[(16, 112), (0, 112), (0, 128), (16, 128)], # northwest corner
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
], dtype='float32')
self.railTextures -= self.materials.blockTextures[self.materials.Rail.ID, 0, 0]
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["SIMPLE_RAIL"]]
railOffsets = numpy.array([
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 1, 1], # south-ascending
[1, 1, 0, 0], # north-ascending
[1, 0, 0, 1], # east-ascending
[0, 1, 1, 0], # west-ascending
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
], dtype='float32')
def makeRailVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
direction = pymclevel.faces.FaceYIncreasing
blockIndices = self.getMaterialIndices(blockMaterials)
yield
bdata = blockData[blockIndices]
railBlocks = blocks[blockIndices]
tex = texMap(railBlocks, bdata, pymclevel.faces.FaceYIncreasing)[:, numpy.newaxis, :]
# disable 'powered' or 'pressed' bit for powered and detector rails
bdata[railBlocks != self.materials.Rail.ID] = bdata[railBlocks != self.materials.Rail.ID].astype(int) & ~0x8
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] = self.railTextures[bdata]
vertexArray[_ST] += tex
vertexArray[_XYZ][..., 1] -= 0.9
vertexArray[_XYZ][..., 1] += self.railOffsets[bdata]
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
vertexArray.view('uint8')[_RGB] *= blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
yield
self.vertexArrays = [vertexArray]
makeVertices = makeRailVertices
class LadderBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:ladder"].ID]
ladderOffsets = numpy.array([
[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)],
[(0, -1, 0.9), (0, 0, -0.1), (0, 0, -0.1), (0, -1, 0.9)], # facing east
[(0, 0, 0.1), (0, -1, -.9), (0, -1, -.9), (0, 0, 0.1)], # facing west
[(.9, -1, 0), (.9, -1, 0), (-.1, 0, 0), (-.1, 0, 0)], # north
[(0.1, 0, 0), (0.1, 0, 0), (-.9, -1, 0), (-.9, -1, 0)], # south
] + [[(0, 0, 0), (0, 0, 0), (0, 0, 0), (0, 0, 0)]] * 10, dtype='float32')
ladderTextures = numpy.array([
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(0, 192), (0, 208), (16, 208), (16, 192)], # unknown
[(64, 96), (64, 80), (48, 80), (48, 96), ], # e
[(48, 80), (48, 96), (64, 96), (64, 80), ], # w
[(48, 96), (64, 96), (64, 80), (48, 80), ], # n
[(64, 80), (48, 80), (48, 96), (64, 96), ], # s
] + [[(0, 192), (0, 208), (16, 208), (16, 192)]] * 10, dtype='float32')
def ladderVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
blockLight = areaBlockLights[1:-1, 1:-1, 1:-1]
yield
bdata = blockData[blockIndices]
vertexArray = self.makeTemplate(pymclevel.faces.FaceYIncreasing, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] = self.ladderTextures[bdata]
vertexArray[_XYZ] += self.ladderOffsets[bdata]
vertexArray.view('uint8')[_RGB] *= blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
yield
self.vertexArrays = [vertexArray]
makeVertices = ladderVertices
class WallSignBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:wall_sign"].ID]
wallSignTemplate = makeVertexTemplatesFromJsonModel((0, 4.5, 0), (16, 13.5, 2), {
"down": (0, 11, 18, 13),
"up": (0, 6, 16, 8),
"north": (0, 4, 16, 13),
"south": (0, 4, 16, 13),
"west": (0, 4, 2, 13),
"east": (10, 4, 12, 13)
})
# I don't know how this sytem works and how it should be structured, but this seem to do the job
wallSignTemplates = numpy.array([
wallSignTemplate,
wallSignTemplate,
rotateTemplate(wallSignTemplate, y=180),
wallSignTemplate,
rotateTemplate(wallSignTemplate, y=90),
rotateTemplate(wallSignTemplate, y=270),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel(wallSignTemplates, 7)
class StandingSignRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:standing_sign"].ID]
signTemplate = makeVertexTemplatesFromJsonModel((0, 7, 7), (16, 16, 9), {
"down": (0, 14, 16, 16),
"up": (0, 12, 16, 14),
"north": (0, 7, 16, 16),
"south": (0, 7, 16, 16),
"west": (0, 7, 2, 16),
"east": (14, 7, 16, 16)
})
signTemplates = numpy.array([
signTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
postTemplate = makeVertexTemplatesFromJsonModel((7, 0, 7), (9, 7, 9), {
"down": (7, 0, 9, 6),
"up": (7, 0, 9, 6),
"north": (7, 0, 9, 6),
"south": (7, 0, 9, 6),
"west": (7, 0, 9, 6),
"east": (7, 0, 9, 6),
})
postTemplates = numpy.array([
postTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel([signTemplates, postTemplates])
class SnowBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:snow_layer"].ID]
def makeSnowVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
#snowIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.875
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.875
vertexArray[_ST][..., 2:4, 1] += 14
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeSnowVertices
class CarpetBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:carpet"].ID, mats["minecraft:waterlily"].ID] #Separate before implementing layers
def makeCarpetVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
#snowIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.937
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.937
vertexArray[_ST][..., 2:4, 1] += 15
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeCarpetVertices
class CactusBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:cactus"].ID]
def makeCactusVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.063
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.063
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.063
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.063
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeCactusVertices
class PaneBlockRenderer(BlockRenderer): #Basic no thickness panes, add more faces to widen.
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["SOLID_PANE"]]
def makePaneVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.5
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.5
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.5
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.5
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePaneVertices
class PlateBlockRenderer(BlockRenderer): #suggestions to make this the proper shape is appreciated.
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["PRESSURE_PLATE"]]
def makePlateVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], 0)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.937
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.937
vertexArray[_ST][..., 2:4, 1] += 15
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makePlateVertices
class EnchantingBlockRenderer(
BlockRenderer): #Note: Enderportal frame side sprite has been lowered 1 pixel to use this renderer, will need separate renderer for eye.
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:enchanting_table"].ID, mats["minecraft:end_portal_frame"].ID]
def makeEnchantingVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.25
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeEnchantingVertices
class DaylightBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:daylight_detector"].ID, mats.DaylightSensorOn.ID]
def makeDaylightVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.625
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.625
vertexArray[_ST][..., 2:4, 1] += 10
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeDaylightVertices
class BedBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:bed"].ID]
def makeBedVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
if direction != pymclevel.faces.FaceYIncreasing:
blockIndices = materialIndices & exposedFaceIndices
else:
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.438
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeBedVertices
class CakeBlockRenderer(BlockRenderer): #Only shows whole cakes
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:cake"].ID]
def makeCakeVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.5
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.063
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.063
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.063
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.063
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeCakeVertices
class RepeaterBlockRenderer(BlockRenderer): #Sticks would be nice
@classmethod
def getBlocktypes(cls, mats):
return [block.ID for block in mats.blocksByType["THINSLICE"]]
def makeRepeaterVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
materialIndices = self.getMaterialIndices(blockMaterials)
arrays = []
append = arrays.append
yield
for direction, exposedFaceIndices in enumerate(facingBlockIndices):
blockIndices = materialIndices
facingBlockLight = areaBlockLights[self.directionOffsets[direction]]
lights = facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
continue
vertexArray[_ST] += texMap(blocks[blockIndices], blockData[blockIndices], direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.875
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.875
vertexArray[_ST][..., 2:4, 1] += 14
append(vertexArray)
yield
self.vertexArrays = arrays
makeVertices = makeRepeaterVertices
class RedstoneBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [mats["minecraft:redstone_wire"].ID]
def redstoneVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
blockIndices = self.getMaterialIndices(blockMaterials)
yield
vertexArray = self.makeTemplate(pymclevel.faces.FaceYIncreasing, blockIndices)
if not len(vertexArray):
return
vertexArray[_ST] += self.materials.blockTextures[55, 0, 0]
vertexArray[_XYZ][..., 1] -= 0.9
bdata = blockData[blockIndices]
bdata <<= 3
# bdata &= 0xe0
bdata[bdata > 0] |= 0x80
vertexArray.view('uint8')[_RGBA][..., 0] = bdata[..., numpy.newaxis]
vertexArray.view('uint8')[_RGBA][..., 0:3] = vertexArray.view('uint8')[_RGBA][..., 0:3] * [1, 0, 0]
yield
self.vertexArrays = [vertexArray]
makeVertices = redstoneVertices
# button, floor plate, door -> 1-cube features
class DoorRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
cls.blocktypes = [block.ID for block in mats.blocksByType["DOOR"]]
return cls.blocktypes
doorTemplate = makeVertexTemplatesFromJsonModel(
(0, 0, 0), (3, 16, 16),
{
"down": (13, 0, 16, 16),
# TODO handle faces that should not appear
"up": (13, 0, 16, 16),
"north": (3, 0, 0, 16),
"south": (0, 0, 3, 16),
"west": (0, 0, 16, 16),
"east": (16, 0, 0, 16)
}
)
doorRHTemplate = makeVertexTemplatesFromJsonModel(
(0, 0, 0), (3, 16, 16),
{
"down": (13, 0, 16, 16),
# TODO handle faces that should not appear
"up": (13, 0, 16, 16),
"north": (3, 0, 0, 16),
"south": (0, 0, 3, 16),
"west": (16, 0, 0, 16),
"east": (0, 0, 16, 16)
}
)
doorTemplates = numpy.array([
# lower hinge left
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
rotateTemplate(doorTemplate, y=270),
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
doorRHTemplate,
# upper hinge left
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
rotateTemplate(doorTemplate, y=270),
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
doorRHTemplate,
# lower hinge right
doorRHTemplate,
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
rotateTemplate(doorTemplate, y=270),
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
# upper hinge right
doorRHTemplate,
rotateTemplate(doorRHTemplate, y=90),
rotateTemplate(doorRHTemplate, y=180),
rotateTemplate(doorRHTemplate, y=270),
rotateTemplate(doorTemplate, y=270),
doorTemplate,
rotateTemplate(doorTemplate, y=90),
rotateTemplate(doorTemplate, y=180),
])
makeVertices = makeVerticesFromModel(doorTemplates, 31)
class ButtonRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.blocksByType["BUTTON"]]
buttonTemplate = makeVertexTemplatesFromJsonModel((5, 0, 6), (11, 2, 10), {
"down": (5, 6, 11, 10),
"up": (5, 10, 11, 6),
"north": (5, 14, 11, 16),
"south": (5, 14, 11, 16),
"west": (6, 14, 10, 16),
"east": (6, 14, 10, 16)
})
buttonTemplatePressed = makeVertexTemplatesFromJsonModel((5, 0, 6), (11, 1, 10), {
"down": (5, 6, 11, 10),
"up": (5, 10, 11, 6),
"north": (5, 15, 11, 16),
"south": (5, 15, 11, 16),
"west": (6, 15, 10, 16),
"east": (6, 15, 10, 16)
})
buttonTemplates = numpy.array([
rotateTemplate(buttonTemplate, 180, 0),
rotateTemplate(buttonTemplate, 90, 90),
rotateTemplate(buttonTemplate, 90, 270),
rotateTemplate(buttonTemplate, 90, 180),
rotateTemplate(buttonTemplate, 90, 0),
buttonTemplate,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6)),
rotateTemplate(buttonTemplatePressed, 180, 0),
rotateTemplate(buttonTemplatePressed, 90, 90),
rotateTemplate(buttonTemplatePressed, 90, 270),
rotateTemplate(buttonTemplatePressed, 90, 180),
rotateTemplate(buttonTemplatePressed, 90, 0),
buttonTemplatePressed,
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6)),
])
makeVertices = makeVerticesFromModel(buttonTemplates, 15)
class TrapDoorRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.blocksByType["TRAPDOOR"]]
openTemplate = makeVertexTemplatesFromJsonModel((0, 0, 13), (16, 16, 16), {
"down": (0, 13, 16, 16),
"up": (0, 16, 16, 13),
"north": (0, 0, 16, 16),
"south": (0, 0, 16, 16),
"west": (16, 0, 13, 16),
"east": (13, 0, 16, 16)
})
topTemplate = makeVertexTemplatesFromJsonModel((0, 13, 0), (16, 16, 16), {
"down": (0, 0, 16, 16),
"up": (0, 0, 16, 16),
"north": (0, 16, 16, 13),
"south": (0, 16, 16, 13),
"west": (0, 16, 16, 13),
"east": (0, 16, 16, 13)
})
bottomTemplate = makeVertexTemplatesFromJsonModel((0, 0, 0), (16, 3, 16), {
"down": (0, 0, 16, 16),
"up": (0, 0, 16, 16),
"north": (0, 16, 16, 13),
"south": (0, 16, 16, 13),
"west": (0, 16, 16, 13),
"east": (0, 16, 16, 13)
})
trapDoorTemplates = numpy.array([
bottomTemplate,
bottomTemplate,
bottomTemplate,
bottomTemplate,
openTemplate,
rotateTemplate(openTemplate, y=180),
rotateTemplate(openTemplate, y=270),
rotateTemplate(openTemplate, y=90),
topTemplate,
topTemplate,
topTemplate,
topTemplate,
openTemplate,
rotateTemplate(openTemplate, y=180),
rotateTemplate(openTemplate, y=270),
rotateTemplate(openTemplate, y=90),
])
makeVertices = makeVerticesFromModel(trapDoorTemplates, 15)
class FenceBlockRenderer(BlockRenderer):
# def __init__(self, *args, **kwargs):
# BlockRenderer.__init__(self, *args, **kwargs)
# self.blocktypes = [block.ID for block in self.materials.blocksByType["FENCE"]]
fenceTemplates = makeVertexTemplates(3 / 8., 0, 3 / 8., 5 / 8., 1, 5 / 8.)
makeVertices = makeVerticesFromModel(fenceTemplates)
@classmethod
def getBlocktypes(cls, mats):
# if mats.name == "Pocket":
# cls.blocktypes = cls.blocktypes_pocket
# else:
# cls.blocktypes = cls.blocktypes_alpha
# return cls.blocktypes
return [block.ID for block in mats.blocksByType["FENCE"]]
class FenceGateBlockRenderer(BlockRenderer):
closedFenceTemplates = numpy.array([
makeVertexTemplates(0, 0, 3 / 8., 1, .8, 5 / 8.),
makeVertexTemplates(3 / 8., 0, 0, 5 / 8., .8, 1)])
openFenceTemplates = numpy.array([
[makeVertexTemplates(0, 0, 3 / 8., 1 / 8., .8, 1),
makeVertexTemplates(7 / 8., 0, 3 / 8., 1, .8, 1)],
[makeVertexTemplates(0, 0, 0, 5 / 8., .8, 1 / 8.),
makeVertexTemplates(0, 0, 7 / 8., 5 / 8., .8, 1)],
[makeVertexTemplates(0, 0, 0, 1 / 8., .8, 5 / 8.),
makeVertexTemplates(7 / 8., 0, 0, 1, .8, 5 / 8.)],
[makeVertexTemplates(3 / 8., 0, 0, 1, .8, 1 / 8.),
makeVertexTemplates(3 / 8., 0, 7 / 8., 1, .8, 1)]])
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.AllStairs]
def fenceGateVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
fenceMask = self.getMaterialIndices(blockMaterials)
closedGateMask = fenceMask.copy()
closedGateMask[blockData & 4 == 4] = 0
openGateMask = fenceMask.copy()
openGateMask[blockData & 4 == 0] = 0
closedGateIndices = closedGateMask.nonzero()
openGateIndices = openGateMask.nonzero()
closedGateData = blockData[closedGateMask]
closedGateData &= 1
openGateData = blockData[openGateMask]
openGateData &= 3
yield
# closed gate
vertexArray = numpy.zeros((len(closedGateIndices[0]), 6, 4, 6), dtype='float32')
for indicies in xrange(3):
dimension = (0, 2, 1)[indicies]
vertexArray[..., indicies] = closedGateIndices[dimension][:, numpy.newaxis,
numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += self.closedFenceTemplates[closedGateData][..., 0:5]
vertexArray[_ST] += texMap(blocks[closedGateIndices], 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.closedFenceTemplates[closedGateData][..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][closedGateIndices][
..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
self.vertexArrays = [vertexArray]
append = self.vertexArrays.append
# open gate
for i in xrange(2):
vertexArray = numpy.zeros((len(openGateIndices[0]), 6, 4, 6), dtype='float32')
for indicies in xrange(3):
dimension = (0, 2, 1)[indicies]
vertexArray[..., indicies] = openGateIndices[dimension][:, numpy.newaxis,
numpy.newaxis] # xxx swap z with y using ^
vertexArray[..., 0:5] += self.openFenceTemplates[openGateData, i][..., 0:5]
vertexArray[_ST] += texMap(blocks[openGateIndices], 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.openFenceTemplates[openGateData, i] \
[..., 5][..., numpy.newaxis]
vertexArray.view('uint8')[_A] = 0xFF
vertexArray.view('uint8')[_RGB] *= areaBlockLights[1:-1, 1:-1, 1:-1][openGateIndices][
..., numpy.newaxis, numpy.newaxis, numpy.newaxis]
vertexArray.shape = (vertexArray.shape[0] * 6, 4, 6)
yield
append(vertexArray)
makeVertices = fenceGateVertices
class StairBlockRenderer(BlockRenderer):
@classmethod
def getBlocktypes(cls, mats):
return [a.ID for a in mats.AllStairs]
# South - FaceXIncreasing
# North - FaceXDecreasing
# West - FaceZIncreasing
# East - FaceZDecreasing
stairTemplates = numpy.array([makeVertexTemplates(**kw) for kw in [
# South - FaceXIncreasing
{"xmin": 0.5},
# North - FaceXDecreasing
{"xmax": 0.5},
# West - FaceZIncreasing
{"zmin": 0.5},
# East - FaceZDecreasing
{"zmax": 0.5},
# Slabtype
{"ymax": 0.5},
]
])
def stairVertices(self, facingBlockIndices, blocks, blockMaterials, blockData, areaBlockLights, texMap):
arrays = []
append = arrays.append
materialIndices = self.getMaterialIndices(blockMaterials)
yield
stairBlocks = blocks[materialIndices]
stairData = blockData[materialIndices]
stairTop = (stairData >> 2).astype(bool)
stairData &= 3
x, z, y = materialIndices.nonzero()
for _ in ("slab", "step"):
vertexArray = numpy.zeros((len(x), 6, 4, 6), dtype='float32')
for i in xrange(3):
vertexArray[_XYZ][..., i] = (x, y, z)[i][:, numpy.newaxis, numpy.newaxis]
if _ == "step":
vertexArray[_XYZST] += self.stairTemplates[4][..., :5]
vertexArray[_XYZ][..., 1][stairTop] += 0.5
else:
vertexArray[_XYZST] += self.stairTemplates[stairData][..., :5]
vertexArray[_ST] += texMap(stairBlocks, 0)[..., numpy.newaxis, :]
vertexArray.view('uint8')[_RGB] = self.stairTemplates[4][numpy.newaxis, ..., 5, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= 0xf
vertexArray.view('uint8')[_A] = 0xff
vertexArray.shape = (len(x) * 6, 4, 6)
yield
append(vertexArray)
self.vertexArrays = arrays
makeVertices = stairVertices
class VineBlockRenderer(BlockRenderer):
SouthBit = 1 #FaceZIncreasing
WestBit = 2 #FaceXDecreasing
NorthBit = 4 #FaceZDecreasing
EastBit = 8 #FaceXIncreasing
renderstate = ChunkCalculator.renderstateVines
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
self.blocktypes = [self.materials["minecraft:vine"].ID]
def vineFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight,
facingBlockLight, texMap):
bdata = blockData[blockIndices]
blockIndices = numpy.array(blockIndices)
if direction == pymclevel.faces.FaceZIncreasing:
blockIndices[blockIndices] = (bdata & 1).astype(bool)
elif direction == pymclevel.faces.FaceXDecreasing:
blockIndices[blockIndices] = (bdata & 2).astype(bool)
elif direction == pymclevel.faces.FaceZDecreasing:
blockIndices[blockIndices] = (bdata & 4).astype(bool)
elif direction == pymclevel.faces.FaceXIncreasing:
blockIndices[blockIndices] = (bdata & 8).astype(bool)
else:
return []
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return vertexArray
vertexArray[_ST] += texMap(self.blocktypes[0], [0], direction)[:, numpy.newaxis, 0:2]
lights = blockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= lights
vertexArray.view('uint8')[_RGB] = vertexArray.view('uint8')[_RGB].astype(float) * LeafBlockRenderer.leafColor
if direction == pymclevel.faces.FaceZIncreasing:
vertexArray[_XYZ][..., 2] -= 0.0625
if direction == pymclevel.faces.FaceXDecreasing:
vertexArray[_XYZ][..., 0] += 0.0625
if direction == pymclevel.faces.FaceZDecreasing:
vertexArray[_XYZ][..., 2] += 0.0625
if direction == pymclevel.faces.FaceXIncreasing:
vertexArray[_XYZ][..., 0] -= 0.0625
return vertexArray
makeFaceVertices = vineFaceVertices
class SlabBlockRenderer(BlockRenderer):
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
materials = self.materials
# self.blocktypes = [materials["minecraft:wooden_slab"].ID,
# materials["minecraft:stone_slab"].ID,
# materials["minecraft:stone_slab2"].ID,
# materials["minecraft:purpur_slab"].ID]
# print "self.blocktypes", self.blocktypes
# print "self.materials.AllSlabs", list(set(a.ID for a in self.materials.AllSlabs if "double" not in a.name.lower()))
# print list(set(a for a in self.materials.AllSlabs if "double" not in a.name.lower()))
self.blocktypes = list(set(a.ID for a in materials.AllSlabs if "double" not in a.name.lower()))
def slabFaceVertices(self, direction, blockIndices, facingBlockLight, blocks, blockData, blockLight,
areaBlockLights, texMap):
lights = areaBlockLights[blockIndices][..., numpy.newaxis, numpy.newaxis]
bdata = blockData[blockIndices]
top = (bdata >> 3).astype(bool)
bdata &= 7
vertexArray = self.makeTemplate(direction, blockIndices)
if not len(vertexArray):
return vertexArray
vertexArray[_ST] += texMap(blocks[blockIndices], bdata, direction)[:, numpy.newaxis, 0:2]
vertexArray.view('uint8')[_RGB] *= lights
if direction == pymclevel.faces.FaceYIncreasing:
vertexArray[_XYZ][..., 1] -= 0.5
if direction != pymclevel.faces.FaceYIncreasing and direction != pymclevel.faces.FaceYDecreasing:
vertexArray[_XYZ][..., 2:4, 1] -= 0.5
vertexArray[_ST][..., 2:4, 1] += 8
vertexArray[_XYZ][..., 1][top] += 0.5
return vertexArray
makeFaceVertices = slabFaceVertices
# 1.9 renderer's
class EndRodRenderer(BlockRenderer):
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
self.blocktypes = [self.materials["minecraft:end_rod"].ID]
rodTemplate = makeVertexTemplatesFromJsonModel((7, 1, 7), (9, 16, 9), {
"down": (4, 2, 2, 0),
"up": (2, 0, 4, 2),
"north": (0, 0, 2, 15),
"south": (0, 0, 2, 15),
"west": (0, 0, 2, 15),
"east": (0, 0, 2, 15)
})
rodTemplates = numpy.array([
rotateTemplate(rodTemplate, x=180),
rodTemplate,
rotateTemplate(rodTemplate, x=90),
rotateTemplate(rodTemplate, y=180, x=90),
rotateTemplate(rodTemplate, y=270, x=90),
rotateTemplate(rodTemplate, y=90, x=90),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
handleTemplate = makeVertexTemplatesFromJsonModel((6, 0, 6), (10, 1, 10), {
"down": (6, 6, 2, 2),
"up": (2, 2, 6, 6),
"north": (2, 6, 6, 7),
"south": (2, 6, 6, 7),
"west": (2, 6, 6, 7),
"east": (2, 6, 6, 7)
})
handleTemplates = numpy.array([
rotateTemplate(handleTemplate, x=180),
handleTemplate,
rotateTemplate(handleTemplate, x=90),
rotateTemplate(handleTemplate, y=180, x=90),
rotateTemplate(handleTemplate, y=270, x=90),
rotateTemplate(handleTemplate, y=90, x=90),
numpy.zeros((6, 4, 6)), numpy.zeros((6, 4, 6))
])
makeVertices = makeVerticesFromModel([rodTemplates, handleTemplates], 7)
class WaterBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateWater
def __init__(self, *args, **kwargs):
BlockRenderer.__init__(self, *args, **kwargs)
materials = self.materials
self.waterID = materials["minecraft:water"].ID
self.blocktypes = [materials["minecraft:flowing_water"].ID, self.waterID]
@classmethod
def getBlocktypes(cls, mats):
cls.waterID = mats["minecraft:water"].ID
return [mats["minecraft:flowing_water"].ID, cls.waterID]
def waterFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight,
facingBlockLight, texMap):
blockIndices = blockIndices & exposedFaceIndices
vertexArray = self.makeTemplate(direction, blockIndices)
vertexArray[_ST] += texMap(self.waterID, 0, 0)[numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
return vertexArray
makeFaceVertices = waterFaceVertices
class IceBlockRenderer(BlockRenderer):
renderstate = ChunkCalculator.renderstateIce
@classmethod
def getBlocktypes(cls, mats):
cls.iceID = mats["minecraft:ice"].ID
return [cls.iceID]
def iceFaceVertices(self, direction, blockIndices, exposedFaceIndices, blocks, blockData, blockLight,
facingBlockLight, texMap):
blockIndices = blockIndices & exposedFaceIndices
vertexArray = self.makeTemplate(direction, blockIndices)
vertexArray[_ST] += texMap(self.iceID, 0, 0)[numpy.newaxis, numpy.newaxis]
vertexArray.view('uint8')[_RGB] *= facingBlockLight[blockIndices][..., numpy.newaxis, numpy.newaxis]
return vertexArray
makeFaceVertices = iceFaceVertices
from glutils import DisplayList
class MCRenderer(object):
isPreviewer = False
def __init__(self, level=None, alpha=1.0):
self.render = True
self.origin = (0, 0, 0)
self.rotation = 0
self.bufferUsage = 0
self.invalidChunkQueue = deque()
self._chunkWorker = None
self.chunkRenderers = {}
self.loadableChunkMarkers = DisplayList()
self.visibleLayers = set(Layer.AllLayers)
self.masterLists = None
alpha *= 255
self.alpha = (int(alpha) & 0xff)
self.chunkStartTime = datetime.now()
self.oldChunkStartTime = self.chunkStartTime
self.oldPosition = None
self.chunkSamples = [timedelta(0, 0, 0)] * 15
self.chunkIterator = None
config.settings.fastLeaves.addObserver(self)
config.settings.roughGraphics.addObserver(self)
config.settings.showHiddenOres.addObserver(self)
config.settings.vertexBufferLimit.addObserver(self)
config.settings.drawEntities.addObserver(self)
config.settings.drawTileEntities.addObserver(self)
config.settings.drawTileTicks.addObserver(self)
config.settings.drawUnpopulatedChunks.addObserver(self, "drawTerrainPopulated")
config.settings.drawChunkBorders.addObserver(self, "drawChunkBorder")
config.settings.drawMonsters.addObserver(self)
config.settings.drawItems.addObserver(self)
config.settings.showChunkRedraw.addObserver(self, "showRedraw")
config.settings.spaceHeight.addObserver(self)
config.settings.targetFPS.addObserver(self, "targetFPS")
for ore in config.settings.hiddableOres.get():
config.settings["showOre{}".format(ore)].addObserver(self, callback=lambda x, id=ore: self.showOre(id, x))
self.level = level
if self.level.__class__.__name__ in ("FakeLevel", "MCSchematic"):
self.toggleLayer(False, 'ChunkBorder')
chunkClass = ChunkRenderer
calculatorClass = ChunkCalculator
minViewDistance = 2
_viewDistance = 8
needsRedraw = True
def toggleLayer(self, val, layer):
if val:
self.visibleLayers.add(layer)
else:
self.visibleLayers.discard(layer)
for cr in self.chunkRenderers.itervalues():
cr.invalidLayers.add(layer)
self.loadNearbyChunks()
def layerProperty(layer, default=True): # @NoSelf
attr = intern("_draw" + layer)
def _get(self):
return getattr(self, attr, default)
def _set(self, val):
if val != _get(self):
setattr(self, attr, val)
self.toggleLayer(val, layer)
return property(_get, _set)
drawEntities = layerProperty(Layer.Entities)
drawTileEntities = layerProperty(Layer.TileEntities)
drawTileTicks = layerProperty(Layer.TileTicks)
drawMonsters = layerProperty(Layer.Monsters)
drawItems = layerProperty(Layer.Items)
drawTerrainPopulated = layerProperty(Layer.TerrainPopulated)
drawChunkBorder = layerProperty(Layer.ChunkBorder)
def inSpace(self):
if self.level is None:
return True
h = self.position[1]
if self.level.dimNo == 1:
_2478aq_heot(h)
return ((h > self.level.Height + self.spaceHeight) or
(h <= -self.spaceHeight))
def chunkDistance(self, cpos):
camx, camy, camz = self.position
# if the renderer is offset into the world somewhere, adjust for that
ox, oy, oz = self.origin
camx -= ox
camz -= oz
camcx = int(numpy.floor(camx)) >> 4
camcz = int(numpy.floor(camz)) >> 4
cx, cz = cpos
return max(abs(cx - camcx), abs(cz - camcz))
overheadMode = False
def detailLevelForChunk(self, cpos):
if self.overheadMode:
return 2
if self.isPreviewer:
w, l, h = self.level.bounds.size
if w + l < 256:
return 0
distance = self.chunkDistance(cpos) - self.viewDistance
if distance > 0 or self.inSpace():
return 1
return 0
def getViewDistance(self):
return self._viewDistance
def setViewDistance(self, vd):
vd = int(vd) & 0xfffe
vd = min(max(vd, self.minViewDistance), config.settings.maxViewDistance.get())
if vd != self._viewDistance:
self._viewDistance = vd
self.viewDistanceChanged()
viewDistance = property(getViewDistance, setViewDistance, None, "View Distance")
@property
def effectiveViewDistance(self):
if self.inSpace():
return self.viewDistance * 4
else:
return self.viewDistance * 2
def viewDistanceChanged(self):
self.oldPosition = None # xxx
self.discardMasterList()
self.loadNearbyChunks()
self.discardChunksOutsideViewDistance()
maxWorkFactor = 64
minWorkFactor = 1
workFactor = 2
chunkCalculator = None
_level = None
@property
def level(self):
return self._level
@level.setter
def level(self, level):
""" this probably warrants creating a new renderer """
self.stopWork()
self._level = level
self.oldPosition = None
self.position = (0, 0, 0)
self.chunkCalculator = None
self.invalidChunkQueue = deque()
self.discardAllChunks()
self.loadableChunkMarkers.invalidate()
if level:
self.chunkCalculator = self.calculatorClass(self.level)
self.oldPosition = None
self.loadNearbyChunks()
position = (0, 0, 0)
def loadChunksStartingFrom(self, wx, wz, distance=None): # world position
if None is self.level:
return
if self.level.saving:
return
if distance is None:
d = self.effectiveViewDistance
else:
d = distance
self.chunkIterator = self.iterateChunks(wx, wz, d * 2)
def iterateChunks(self, x, z, d):
cx = x >> 4
cz = z >> 4
yield (cx, cz)
step = dir = 1
while True:
for i in xrange(step):
cx += dir
yield (cx, cz)
for i in xrange(step):
cz += dir
yield (cx, cz)
step += 1
if step > d and not self.overheadMode:
raise StopIteration
dir = -dir
chunkIterator = None
@property
def chunkWorker(self):
if self._chunkWorker is None:
self._chunkWorker = self.makeWorkIterator()
return self._chunkWorker
def stopWork(self):
self._chunkWorker = None
def discardAllChunks(self):
self.bufferUsage = 0
self.forgetAllDisplayLists()
self.chunkRenderers = {}
self.oldPosition = None # xxx force reload
def discardChunksInBox(self, box):
self.discardChunks(box.chunkPositions)
def discardChunksOutsideViewDistance(self):
if self.overheadMode:
return
# print "discardChunksOutsideViewDistance"
d = self.effectiveViewDistance
cx = (self.position[0] - self.origin[0]) / 16
cz = (self.position[2] - self.origin[2]) / 16
origin = (cx - d, cz - d)
size = d * 2
if not len(self.chunkRenderers):
return
(ox, oz) = origin
chunks = numpy.fromiter(self.chunkRenderers.iterkeys(), dtype='i,i', count=len(self.chunkRenderers))
chunks.dtype = 'int32'
chunks.shape = len(self.chunkRenderers), 2
if size:
outsideChunks = chunks[:, 0] < ox - 1
outsideChunks |= chunks[:, 0] > ox + size
outsideChunks |= chunks[:, 1] < oz - 1
outsideChunks |= chunks[:, 1] > oz + size
chunks = chunks[outsideChunks]
self.discardChunks(chunks)
def discardChunks(self, chunks):
for cx, cz in chunks:
self.discardChunk(cx, cz)
self.oldPosition = None # xxx force reload
def discardChunk(self, cx, cz):
" discards the chunk renderer for this chunk and compresses the chunk "
if (cx, cz) in self.chunkRenderers:
self.bufferUsage -= self.chunkRenderers[cx, cz].bufferSize
self.chunkRenderers[cx, cz].forgetDisplayLists()
del self.chunkRenderers[cx, cz]
_fastLeaves = False
@property
def fastLeaves(self):
return self._fastLeaves
@fastLeaves.setter
def fastLeaves(self, val):
if self._fastLeaves != bool(val):
self.discardAllChunks()
self._fastLeaves = bool(val)
_roughGraphics = False
@property
def roughGraphics(self):
return self._roughGraphics
@roughGraphics.setter
def roughGraphics(self, val):
if self._roughGraphics != bool(val):
self.discardAllChunks()
self._roughGraphics = bool(val)
_showHiddenOres = False
@property
def showHiddenOres(self):
return self._showHiddenOres
@showHiddenOres.setter
def showHiddenOres(self, val):
if self._showHiddenOres != bool(val):
self.discardAllChunks()
self._showHiddenOres = bool(val)
def showOre(self, ore, show):
ChunkCalculator.hiddenOreMaterials[ore] = ore if show else 1
if self.showHiddenOres:
self.discardAllChunks()
def invalidateChunk(self, cx, cz, layers=None):
" marks the chunk for regenerating vertex data and display lists "
if (cx, cz) in self.chunkRenderers:
self.chunkRenderers[(cx, cz)].invalidate(layers)
self.invalidChunkQueue.append((cx, cz)) # xxx encapsulate
def invalidateChunksInBox(self, box, layers=None):
# If the box is at the edge of any chunks, expanding by 1 makes sure the neighboring chunk gets redrawn.
box = box.expand(1)
self.invalidateChunks(box.chunkPositions, layers)
def invalidateEntitiesInBox(self, box):
self.invalidateChunks(box.chunkPositions, [Layer.Entities])
def invalidateTileTicksInBox(self, box):
self.invalidateChunks(box.chunkPositions, [Layer.TileTicks])
def invalidateChunks(self, chunks, layers=None):
for (cx, cz) in chunks:
self.invalidateChunk(cx, cz, layers)
self.stopWork()
self.discardMasterList()
self.loadNearbyChunks()
def invalidateAllChunks(self, layers=None):
self.invalidateChunks(self.chunkRenderers.iterkeys(), layers)
def forgetAllDisplayLists(self):
for cr in self.chunkRenderers.itervalues():
cr.forgetDisplayLists()
def invalidateMasterList(self):
self.discardMasterList()
shouldRecreateMasterList = True
def discardMasterList(self):
self.shouldRecreateMasterList = True
@property
def shouldDrawAll(self):
box = self.level.bounds
return self.isPreviewer and box.width + box.length < 256
distanceToChunkReload = 32.0
def cameraMovedFarEnough(self):
if self.shouldDrawAll:
return False
if self.oldPosition is None:
return True
cPos = self.position
oldPos = self.oldPosition
cameraDelta = self.distanceToChunkReload
return any([abs(x - y) > cameraDelta for x, y in zip(cPos, oldPos)])
def loadVisibleChunks(self):
""" loads nearby chunks if the camera has moved beyond a certain distance """
# print "loadVisibleChunks"
if self.cameraMovedFarEnough():
if datetime.now() - self.lastVisibleLoad > timedelta(0, 0.5):
self.discardChunksOutsideViewDistance()
self.loadNearbyChunks()
self.oldPosition = self.position
self.lastVisibleLoad = datetime.now()
lastVisibleLoad = datetime.now()
def loadNearbyChunks(self):
if None is self.level:
return
# print "loadNearbyChunks"
cameraPos = self.position
if self.shouldDrawAll:
self.loadAllChunks()
else:
# subtract self.origin to load nearby chunks correctly for preview renderers
self.loadChunksStartingFrom(int(cameraPos[0]) - self.origin[0], int(cameraPos[2]) - self.origin[2])
def loadAllChunks(self):
box = self.level.bounds
self.loadChunksStartingFrom(box.origin[0] + box.width / 2, box.origin[2] + box.length / 2,
max(box.width, box.length))
_floorTexture = None
@property
def floorTexture(self):
if self._floorTexture is None:
self._floorTexture = Texture(self.makeFloorTex)
return self._floorTexture
@staticmethod
def makeFloorTex():
color0 = (0xff, 0xff, 0xff, 0x22)
color1 = (0xff, 0xff, 0xff, 0x44)
img = numpy.array([color0, color1, color1, color0], dtype='uint8')
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER, GL.GL_NEAREST)
GL.glTexParameter(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER, GL.GL_NEAREST)
GL.glTexImage2D(GL.GL_TEXTURE_2D, 0, GL.GL_RGBA, 2, 2, 0, GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, img)
def invalidateChunkMarkers(self):
self.loadableChunkMarkers.invalidate()
def _drawLoadableChunkMarkers(self):
if self.level.chunkCount:
chunkSet = set(self.level.allChunks)
sizedChunks = chunkMarkers(chunkSet)
GL.glPushAttrib(GL.GL_FOG_BIT)
GL.glDisable(GL.GL_FOG)
GL.glEnable(GL.GL_BLEND)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPolygonOffset(DepthOffset.ChunkMarkers, DepthOffset.ChunkMarkers)
GL.glEnable(GL.GL_DEPTH_TEST)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glColor(1.0, 1.0, 1.0, 1.0)
self.floorTexture.bind()
for size, chunks in sizedChunks.iteritems():
if not len(chunks):
continue
chunks = numpy.array(chunks, dtype='float32')
chunkPosition = numpy.zeros(shape=(chunks.shape[0], 4, 3), dtype='float32')
chunkPosition[:, :, (0, 2)] = numpy.array(((0, 0), (0, 1), (1, 1), (1, 0)), dtype='float32')
chunkPosition[:, :, (0, 2)] *= size
chunkPosition[:, :, (0, 2)] += chunks[:, numpy.newaxis, :]
chunkPosition *= 16
GL.glVertexPointer(3, GL.GL_FLOAT, 0, chunkPosition.ravel())
GL.glTexCoordPointer(2, GL.GL_FLOAT, 0, (chunkPosition[..., (0, 2)] * 16).ravel())
GL.glDrawArrays(GL.GL_QUADS, 0, len(chunkPosition) * 4)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisable(GL.GL_BLEND)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glPopAttrib()
def drawLoadableChunkMarkers(self):
if not self.isPreviewer or isinstance(self.level, pymclevel.MCInfdevOldLevel):
self.loadableChunkMarkers.call(self._drawLoadableChunkMarkers)
needsImmediateRedraw = False
viewingFrustum = None
if "-debuglists" in sys.argv:
def createMasterLists(self):
pass
def callMasterLists(self):
for cr in self.chunkRenderers.itervalues():
cr.debugDraw()
else:
def createMasterLists(self):
if self.shouldRecreateMasterList:
lists = {}
chunkLists = defaultdict(list)
chunksPerFrame = 80
shouldRecreateAgain = False
for ch in self.chunkRenderers.itervalues():
if chunksPerFrame:
if ch.needsRedisplay:
chunksPerFrame -= 1
ch.makeDisplayLists()
else:
shouldRecreateAgain = True
if ch.renderstateLists:
for rs in ch.renderstateLists:
chunkLists[rs] += ch.renderstateLists[rs]
for rs in chunkLists:
if len(chunkLists[rs]):
lists[rs] = numpy.array(chunkLists[rs], dtype='uint32').ravel()
self.masterLists = lists
self.shouldRecreateMasterList = shouldRecreateAgain
self.needsImmediateRedraw = shouldRecreateAgain
def callMasterLists(self):
for renderstate in self.chunkCalculator.renderstates:
if renderstate not in self.masterLists:
continue
if self.alpha != 0xff and renderstate is not ChunkCalculator.renderstateLowDetail:
GL.glEnable(GL.GL_BLEND)
renderstate.bind()
GL.glCallLists(self.masterLists[renderstate])
renderstate.release()
if self.alpha != 0xff and renderstate is not ChunkCalculator.renderstateLowDetail:
GL.glDisable(GL.GL_BLEND)
errorLimit = 10
def draw(self):
self.needsRedraw = False
if not self.level:
return
if not self.chunkCalculator:
return
if not self.render:
return
if self.level.materials.name in ("Pocket", "Alpha"):
GL.glMatrixMode(GL.GL_TEXTURE)
GL.glScalef(1 / 2., 1 / 2., 1 / 2.)
with gl.glPushMatrix(GL.GL_MODELVIEW):
dx, dy, dz = self.origin
GL.glTranslate(dx, dy, dz)
GL.glEnable(GL.GL_CULL_FACE)
GL.glEnable(GL.GL_DEPTH_TEST)
self.level.materials.terrainTexture.bind()
GL.glEnable(GL.GL_TEXTURE_2D)
GL.glEnableClientState(GL.GL_TEXTURE_COORD_ARRAY)
offset = DepthOffset.PreviewRenderer if self.isPreviewer else DepthOffset.Renderer
GL.glPolygonOffset(offset, offset)
GL.glEnable(GL.GL_POLYGON_OFFSET_FILL)
self.createMasterLists()
try:
self.callMasterLists()
except GL.GLError as e:
if self.errorLimit:
self.errorLimit -= 1
traceback.print_exc()
print e
GL.glDisable(GL.GL_POLYGON_OFFSET_FILL)
GL.glDisable(GL.GL_CULL_FACE)
GL.glDisable(GL.GL_DEPTH_TEST)
GL.glDisable(GL.GL_TEXTURE_2D)
GL.glDisableClientState(GL.GL_TEXTURE_COORD_ARRAY)
self.drawLoadableChunkMarkers()
if self.level.materials.name in ("Pocket", "Alpha"):
GL.glMatrixMode(GL.GL_TEXTURE)
GL.glScalef(2., 2., 2.)
renderErrorHandled = False
def addDebugInfo(self, addDebugString):
addDebugString("BU: {0} MB, ".format(
self.bufferUsage / 1000000,
))
addDebugString("WQ: {0}, ".format(len(self.invalidChunkQueue)))
if self.chunkIterator:
addDebugString("[LR], ")
addDebugString("CR: {0}, ".format(len(self.chunkRenderers), ))
def next(self):
self.chunkWorker.next()
def makeWorkIterator(self):
''' does chunk face and vertex calculation work. returns a generator that can be
iterated over for smaller work units.'''
try:
while True:
if self.level is None:
raise StopIteration
if len(self.invalidChunkQueue) > 1024:
self.invalidChunkQueue.clear()
if len(self.invalidChunkQueue):
c = self.invalidChunkQueue[0]
for _ in self.workOnChunk(c):
yield
self.invalidChunkQueue.popleft()
elif self.chunkIterator is None:
raise StopIteration
else:
c = self.chunkIterator.next()
if self.vertexBufferLimit:
while self.bufferUsage > (0.9 * (self.vertexBufferLimit << 20)):
deadChunk = None
deadDistance = self.chunkDistance(c)
for cr in self.chunkRenderers.itervalues():
dist = self.chunkDistance(cr.chunkPosition)
if dist > deadDistance:
deadChunk = cr
deadDistance = dist
if deadChunk is not None:
self.discardChunk(*deadChunk.chunkPosition)
else:
break
else:
for _ in self.workOnChunk(c):
yield
else:
for _ in self.workOnChunk(c):
yield
yield
finally:
self._chunkWorker = None
if self.chunkIterator:
self.chunkIterator = None
vertexBufferLimit = 384
def getChunkRenderer(self, c):
if c not in self.chunkRenderers:
return self.chunkClass(self, c)
return self.chunkRenderers[c]
def calcFacesForChunkRenderer(self, cr):
self.bufferUsage -= cr.bufferSize
calc = cr.calcFaces()
work = 0
for _ in calc:
yield
work += 1
self.chunkDone(cr, work)
def workOnChunk(self, c):
work = 0
if self.level.containsChunk(*c):
cr = self.getChunkRenderer(c)
if self.viewingFrustum:
if not self.viewingFrustum.visible1([c[0] * 16 + 8, self.level.Height / 2, c[1] * 16 + 8, 1.0],
self.level.Height / 2):
raise StopIteration
faceInfoCalculator = self.calcFacesForChunkRenderer(cr)
try:
for _ in faceInfoCalculator:
work += 1
if (work % MCRenderer.workFactor) == 0:
yield
self.invalidateMasterList()
except Exception as e:
traceback.print_exc()
fn = c
logging.info(u"Skipped chunk {f}: {e}".format(e=e, f=fn))
redrawChunks = 0
def chunkDone(self, chunkRenderer, work):
self.chunkRenderers[chunkRenderer.chunkPosition] = chunkRenderer
self.bufferUsage += chunkRenderer.bufferSize
# print "Chunk {0} used {1} work units".format(chunkRenderer.chunkPosition, work)
if not self.needsRedraw:
if self.redrawChunks:
self.redrawChunks -= 1
if not self.redrawChunks:
self.needsRedraw = True
else:
self.redrawChunks = 2
if work > 0:
self.oldChunkStartTime = self.chunkStartTime
self.chunkStartTime = datetime.now()
self.chunkSamples.pop(0)
self.chunkSamples.append(self.chunkStartTime - self.oldChunkStartTime)
class PreviewRenderer(MCRenderer):
isPreviewer = True
def rendermain():
renderer = MCRenderer()
renderer.level = pymclevel.mclevel.loadWorld("World1")
renderer.viewDistance = 6
renderer.detailLevelForChunk = lambda *x: 0
start = datetime.now()
renderer.loadVisibleChunks()
try:
while True:
# for i in range(100):
renderer.next()
except StopIteration:
pass
except Exception as e:
traceback.print_exc()
print repr(e)
duration = datetime.now() - start
perchunk = duration / len(renderer.chunkRenderers)
print "Duration: {0} ({1} chunks per second, {2} per chunk, {3} chunks)".format(duration,
1000000.0 / perchunk.microseconds,
perchunk,
len(renderer.chunkRenderers))
# display.init( (640, 480), OPENGL | DOUBLEBUF )
from utilities.gl_display_context import GLDisplayContext
from OpenGL import GLU
import pygame
# distance = 4000
GL.glMatrixMode(GL.GL_PROJECTION)
GL.glLoadIdentity()
GLU.gluPerspective(35, 640.0 / 480.0, 0.5, 4000.0)
h = 366
pos = (0, h, 0)
look = (0.0001, h - 1, 0.0001)
up = (0, 1, 0)
GL.glMatrixMode(GL.GL_MODELVIEW)
GL.glLoadIdentity()
GLU.gluLookAt(pos[0], pos[1], pos[2],
look[0], look[1], look[2],
up[0], up[1], up[2])
GL.glClearColor(0.0, 0.0, 0.0, 1.0)
framestart = datetime.now()
frames = 200
for i in xrange(frames):
GL.glClear(GL.GL_COLOR_BUFFER_BIT | GL.GL_DEPTH_BUFFER_BIT)
renderer.draw()
pygame.display.flip()
delta = datetime.now() - framestart
seconds = delta.seconds + delta.microseconds / 1000000.0
print "{0} frames in {1} ({2} per frame, {3} FPS)".format(frames, delta, delta / frames, frames / seconds)
while True:
evt = pygame.event.poll()
if evt.type == pygame.MOUSEBUTTONDOWN:
break
# time.sleep(3.0)
import traceback
if __name__ == "__main__":
import cProfile
cProfile.run("rendermain()", "mcedit.profile")
| 146,617 | 35.572213 | 153 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/glbackground.py
|
"""Copyright (c) 2010-2012 David Rio Vierra
Permission to use, copy, modify, and/or distribute this software for any
purpose with or without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE."""
"""
glbackground.py
A UI element that only draws a single OpenGL quad.
"""
from albow.openglwidgets import GLOrtho
from albow import unparented
from OpenGL.GL import glEnable, glColor, glVertexPointer, glDrawArrays, glDisable, GL_BLEND, GL_FLOAT, GL_QUADS
from numpy import array
class GLBackground(GLOrtho):
margin = 8
bg_color = (0.0, 0.0, 0.0, 0.6)
# bg_color = (30/255.0,0,255/255.0, 100/255.0)
def gl_draw(self):
#if hasattr(self, 'highlight_bg_color') and self in self.get_root().find_widget(mouse.get_pos()).all_parents():
# color = self.highlight_bg_color
#else:
color = tuple(self.bg_color) + (1.0,)
glEnable(GL_BLEND)
glColor(color[0], color[1], color[2], color[3])
glVertexPointer(2, GL_FLOAT, 0, array([-1, -1, -1, 1, 1, 1, 1, -1], dtype='float32'))
glDrawArrays(GL_QUADS, 0, 4)
glDisable(GL_BLEND)
class Panel(GLBackground):
def __init__(self, *args, **kwargs):
GLBackground.__init__(self, *args, **kwargs)
if not self.parent:
name = kwargs.get('name', repr(self))
# Destroy the former widget with the same name.
w = unparented.get(name, None)
if w:
del w
unparented[name] = self
| 2,034 | 36 | 119 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/player_cache.py
|
import json
import urllib2
from directories import userCachePath
import os
import time
from PIL import Image
import atexit
import threading
import logging
from uuid import UUID
import httplib
import base64
import datetime
import traceback
log = logging.getLogger(__name__)
class ThreadRS(threading.Thread):
# This class comes from: http://stackoverflow.com/questions/6893968/how-to-get-the-return-value-from-a-thread-in-python
# And may have been tweaked ;)
"""
This class uses a _return instance member to store the result of the underlying Thread object.
If 'callbacks' objects are send to the constructor, this '_result' object will be sent to all of them
at the end of the 'run' and 'join' method. The latest one also returns '_return' object.
"""
def __init__(self, group=None, target=None, name=None, callbacks=[],
args=(), kwargs={}, Verbose=None):
"""
:callbacks: list: callable objects to send the thread result to.
For other arguments, see threading.Thread documentation.
"""
self.target = target
self.callbacks = callbacks
threading.Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
for callback in self.callbacks:
callback(self._return)
def join(self):
try:
threading.Thread.join(self)
except Exception as e:
print e
for callback in self.callbacks:
callback(self._return)
return self._return
def __repr__(self, *args, **kwargs):
return '%s::%s' % (ThreadRS, self.target)
def threadable(func):
def wrapper(*args, **kwargs):
instance = None
for arg in args:
if isinstance(arg, PlayerCache):
instance = arg
break
with instance.cache_lock:
t = ThreadRS(target=func, args=args, kwargs=kwargs, callbacks=instance.targets)
t.daemon = True
t.start()
return t
return wrapper
#@Singleton
class PlayerCache:
"""
Used to cache Player names and UUID's, provides an small API to interface with it
"""
_PATH = userCachePath
TIMEOUT = 2.5
targets = [] # Used to send data update when subprocesses has finished.
__shared_state = {}
def __init__(self):
self.__dict__ = self.__shared_state
self.last_error = None
self.error_count = 0
# --- Utility Functions ---
def add_target(self, target):
global targets
if target not in self.targets:
self.targets.append(target)
@staticmethod
def insertSeperators(uuid):
return uuid[:8] + "-" + uuid[8:12] + "-" + uuid[12:16] + "-" + uuid[16:20] + "-" + uuid[20:]
@staticmethod
def getDeltaTime(timestamp, unit):
t = time.time()
old = datetime.datetime.fromtimestamp(timestamp)
current = datetime.datetime.fromtimestamp(t)
delta = current - old
return getattr(delta, unit, "hours")
def __convert(self, json_in):
for player in json_in:
new_dict = {
"Name": player["Playername"],
"Timestamp": player["Timestamp"],
"Successful": player["WasSuccessful"]
}
self._cache["Cache"][player["UUID (No Separator)"]] = new_dict
def save(self):
if hasattr(self, "_cache"):
fp = open(self._PATH, 'w')
json.dump(self._cache, fp, indent=4, separators=(',', ':'))
fp.close()
def load(self):
"""
Loads from the usercache.json file if it exists, if not an empty one will be generated
"""
self._cache = {"Version": 2, "Connection Timeout": 10, "Cache": {}}
if not os.path.exists(self._PATH):
fp = open(self._PATH, 'w')
json.dump(self._cache, fp)
fp.close()
fp = open(self._PATH, 'r')
try:
json_in = json.load(fp)
if "Version" not in json_in or json_in.get("Version", 0) != 2:
self.__convert(json_in)
else:
self._cache = json_in
except:
log.warning("Usercache.json may be corrupted")
finally:
fp.close()
self.temp_skin_cache = {}
self.TIMEOUT = self._cache.get("Connection Timeout", 2.5)
self.cache_lock = threading.RLock()
self.player_refeshing = threading.Thread(target=self._batchRefreshPlayers)
#self.player_refeshing.daemon(True) # No idea whether to use the property setter function or the attribute, so I'll use both
self.player_refeshing.daemon = True
self.player_refeshing.start()
# --- Refreshing ---
def _batchRefreshPlayers(self):
to_refresh_successful = []
to_refresh_failed = []
to_refresh = []
with self.cache_lock:
# TODO: Put this into a thread, since it could take alot of time to run
# TODO: Handle entries that weren't successful last time the cache was modified
for uuid in self._cache["Cache"].keys():
player = self._cache["Cache"][uuid]
if player["Successful"]:
if self.getDeltaTime(player["Timestamp"], "hours") > 6:
to_refresh_successful.append(uuid)
else:
to_refresh_failed.append(uuid)
to_refresh = to_refresh_successful + to_refresh_failed
for uuid in to_refresh:
if self.error_count >= 4:
break
self._getPlayerInfoUUID(uuid)
self.save()
def force_refresh(self):
for uuid in self._cache["Cache"].keys():
self.getPlayerInfo(uuid, force=True)
self.save()
# --- Checking if supplied data is in the Cache ---
def UUIDInCache(self, uuid):
"""
Checks to see if the UUID is already in the cache
:param uuid: The UUID of the player
:type uuid: str
:rtype: bool
"""
return uuid.replace("-", "") in self._cache["Cache"]
def nameInCache(self, name):
"""
Checks to see if the name is already in the cache
:param name: The name of the player
:type name: str
:rtype: bool
"""
for uuid in self._cache["Cache"].keys():
if self._cache["Cache"][uuid].get("Name", "") == name:
return True
return False
# --- Getting data from the Cache ---
def _getDataFromCacheUUID(self, uuid):
"""
Checks if the UUID is already in the cache
:param uuid: The UUID that might be in the cache
:type uuid: str
:return: The player data that is in the cache for the specified UUID, same format as getPlayerInfo()
:rtype: tuple
"""
clean_uuid = uuid.replace("-","")
player = self._cache["Cache"].get(clean_uuid, {})
return self.insertSeperators(clean_uuid), player.get("Name", "<Unknown Name>"), clean_uuid
def _getDataFromCacheName(self, name):
"""
Checks if the Player name is already in the cache
:param name: The name of the Player that might be in the cache
:return: The player data that is in the cache for the specified Player name, same format as getPlayerInfo()
:rtype: tuple
"""
for uuid in self._cache["Cache"].keys():
clean_uuid = uuid.replace("-","")
player = self._cache["Cache"][uuid]
if player.get("Name", "") == name and player.get("Successful", False):
return (self.insertSeperators(clean_uuid), player["Name"], clean_uuid)
return ("<Unknown UUID>", name, "<Unknown UUID>")
def _wasSuccessfulUUID(self, uuid):
"""
Returns whether retrieving the player data was Successful
:param uuid: The UUID of the player to check
:return: True if the last time the player data retrieval from Mojang's API was successful, False otherwise
:rtype: bool
"""
clean_uuid = uuid.replace("-","")
player = self._cache["Cache"].get(clean_uuid, {})
return player.get("Successful", False)
def _wasSuccessfulName(self, name):
"""
Returns whether retrieving the player data was Successful
:param name: The name of the player to check
:return: True if the last time the player data retrieval from Mojang's API was successful, False otherwise
:rtype: bool
"""
for uuid in self._cache["Cache"].keys():
player = self._cache["Cache"][uuid]
if player.get("Name", "") == name:
return player.get("Successful", False)
return False
def getPlayerInfo(self, arg, force=False, use_old_data=False):
"""
Recommended method to call to get Player data. Roughly determines whether a UUID or Player name was passed in 'arg'
:param arg: Either a UUID or Player name to retrieve from the cache/Mojang's API
:type arg: str
:param force: True if the Player name should be forcefully fetched from Mojang's API
:type force: bool
:param use_old_data: Fallback to old data even if force is True
:type use_old_data: bool
:return: A tuple with the data in this order: (UUID with separator, Player name, UUID without separator)
:rtype: tuple
"""
try:
UUID(arg, version=4)
if self.UUIDInCache(arg) and self._wasSuccessfulUUID(arg) and not force:
return self._getDataFromCacheUUID(arg)
else:
r = self._getPlayerInfoUUID(arg, use_old_data)
if r.__class__ == ThreadRS:
c = arg.replace('-', '')
return self.insertSeperators(c), 'Server not ready', c
except ValueError:
if self.nameInCache(arg) and self._wasSuccessfulName(arg) and not force:
return self._getDataFromCacheName(arg)
else:
r = self._getPlayerInfoName(arg)
if r.__class__ == ThreadRS:
return 'Server not ready', arg, 'Server not ready'
else:
return r
# --- Player Data Getters ---
@threadable
def _getPlayerInfoUUID(self, uuid, use_old_data=False):
clean_uuid = uuid.replace("-", "")
player = self._cache["Cache"].get(clean_uuid, {})
# If delta between the player timestamp and the actual time is lower the 30 seconds,
# return the current player data to avoid 429 error.
delta = self.getDeltaTime(player.get("Timestamp", 0), 'seconds')
if delta < 30:
return self.insertSeperators(clean_uuid), player.get("Name", "<Unknown Name>"), clean_uuid
player["Timestamp"] = time.time()
response = self._getDataFromURL("https://sessionserver.mojang.com/session/minecraft/profile/{}".format(clean_uuid))
if response:
try:
data = response
response = json.loads(response)
player["Name"] = response.get("name", player.get("Name", "<Unknown Name>"))
player["Successful"] = True
self._cache["Cache"][clean_uuid] = player
self.temp_skin_cache[clean_uuid] = data
self.save()
return self.insertSeperators(clean_uuid), player["Name"], clean_uuid
except:
player["Successful"] = False
self._cache["Cache"][clean_uuid] = player
if use_old_data and player.get("Name", "<Unknown Name>") != "<Unknown Name>":
return self.insertSeperators(clean_uuid), player["Name"], clean_uuid
else:
return self.insertSeperators(clean_uuid), "<Unknown Name>", clean_uuid
else:
player["Successful"] = False
self._cache["Cache"][clean_uuid] = player
if use_old_data and player.get("Name", "<Unknown Name>") != "<Unknown Name>":
return self.insertSeperators(clean_uuid), player["Name"], clean_uuid
else:
return self.insertSeperators(clean_uuid), "<Unknown Name>", clean_uuid
@threadable
def _getPlayerInfoName(self, name):
response = self._getDataFromURL("https://api.mojang.com/users/profiles/minecraft/{}".format(name))
if response:
try:
response = json.loads(response)
uuid = response["id"]
player = self._cache["Cache"].get(uuid,{})
player["Name"] = response.get("name", player.get("Name", "<Unknown Name>"))
player["Timestamp"] = time.time()
player["Successful"] = True
self._cache["Cache"][uuid] = player
self.save()
return self.insertSeperators(uuid), player["Name"], uuid
except:
return "<Unknown UUID>", name, "<Unknown UUID>"
else:
return "<Unknown UUID>", name, "<Unknown UUID>"
# --- Skin Getting ---
def _parseSkinResponse(self, response):
try:
resp = json.loads(response)
decoded = base64.b64decode(resp["properties"][0]["value"])
resp = json.loads(decoded)
if "SKIN" in resp["textures"]:
resp = self._getDataFromURL(resp["textures"]["SKIN"]["url"])
return resp
except:
print "Couldn't parse skin response JSON"
print traceback.format_exc()
return None
@threadable
def getPlayerSkin(self, arg, force_download=True, instance=None):
"""
Gets the player's skin from Mojang's skin servers
:param arg: The UUID of the player
:type arg: str
:param force_download: Should the skin be re-downloaded even if it has already been downloaded
:type force_download: bool
:param instance: The instance of the PlayerTool
:type instance: PlayerTool
:return: The path to the player skin
:rtype: str
"""
toReturn = 'char.png'
raw_data = self.getPlayerInfo(arg)
if raw_data.__class__ != ThreadRS:
uuid_sep, name, uuid = raw_data
if uuid == "<Unknown UUID>" or "Server not ready" in raw_data:
return toReturn
player = self._cache["Cache"][uuid]
skin_path = os.path.join("player-skins", uuid_sep.replace("-","_") + ".png")
try:
if not force_download and os.path.exists(skin_path):
skin = Image.open(skin_path)
if skin.size == (64,64):
skin = skin.crop((0,0,64,32))
skin.save(skin_path)
toReturn = skin_path
elif force_download or not os.path.exists(skin_path):
if uuid in self.temp_skin_cache:
parsed = self._parseSkinResponse(self.temp_skin_cache[uuid])
if parsed is not None:
self._saveSkin(uuid, parsed)
toReturn = skin_path
player["Skin"] = { "Timestamp": time.time() }
self._cache["Cache"][uuid] = player
del self.temp_skin_cache[uuid]
self.save()
else:
# If delta between the player timestamp and the actual time is lower the 30 seconds,
# set the 'response' to None to avoid 429 error.
delta = self.getDeltaTime(player.get("Timestamp", 0), 'seconds')
if delta < 30:
response = None
else:
response = self._getDataFromURL("https://sessionserver.mojang.com/session/minecraft/profile/{}".format(uuid))
if response is not None:
parsed = self._parseSkinResponse(response)
if parsed is not None:
self._saveSkin(uuid, parsed)
toReturn = skin_path
player["Skin"] = { "Timestamp": time.time() }
self._cache["Cache"][uuid] = player
self.save()
except IOError:
print "Couldn't find Image file ("+skin_path+") or the file may be corrupted"
if instance is not None:
instance.delete_skin(uuid_sep.replace("-","_"))
os.remove(skin_path)
print "Something happened, retrying"
toReturn = self.getPlayerSkin(arg, True, instance)
except Exception:
print "Unknown error occurred while reading/downloading skin for "+str(uuid.replace("-","_")+".png")
print traceback.format_exc()
else:
toReturn = raw_data.join()
return toReturn
def _saveSkin(self, uuid, data):
if "-" not in uuid:
uuid = self.insertSeperators(uuid)
try:
os.mkdir("player-skins")
except OSError:
pass
skin_path = os.path.join("player-skins", uuid.replace("-","_") + ".png")
with open(skin_path, 'wb') as fp:
fp.write(data)
skin = Image.open(skin_path)
if skin.size == (64,64):
skin = skin.crop((0,0,64,32))
skin.save(skin_path)
def _getDataFromURL(self, url):
conn = None
try:
conn = urllib2.urlopen(url, timeout=self.TIMEOUT)
response = conn.read()
self.last_error = False
return response
except urllib2.HTTPError, e:
log.warn("Encountered a HTTPError while trying to access \"" + url + "\"")
log.warn("Error: " + str(e.code))
self.error_count += 1
except urllib2.URLError, e:
log.warn("Encountered an URLError while trying to access \"" + url + "\"")
log.warn("Error: " + str(e.reason))
self.error_count += 1
except httplib.HTTPException:
log.warn("Encountered a HTTPException while trying to access \"" + url + "\"")
self.error_count += 1
except Exception:
log.warn("Unknown error occurred while trying to get data from URL: " + url)
log.warn(traceback.format_exc())
self.error_count += 1
finally:
if conn:
conn.close()
return None
def _postDataToURL(self, url, payload, headers):
conn = None
try:
request = urllib2.Request(url, payload, headers)
conn = urllib2.urlopen(request, timeout=self.TIMEOUT)
response = conn.read()
return response
except urllib2.HTTPError, e:
log.warn("Encountered a HTTPError while trying to POST to \"" + url + "\"")
log.warn("Error: " + str(e.code))
except urllib2.URLError, e:
log.warn("Encountered an URLError while trying to POST to \"" + url + "\"")
log.warn("Error: " + str(e.reason))
except httplib.HTTPException:
log.warn("Encountered a HTTPException while trying to POST to \"" + url + "\"")
except Exception:
log.warn("Unknown error occurred while trying to POST data to URL: " + url)
log.warn(traceback.format_exc())
finally:
if conn: conn.close()
return None
def _cleanup():
if os.path.exists("player-skins"):
for image_file in os.listdir("player-skins"):
fp = None
try:
fp = open(os.path.join("player-skins", image_file), 'rb')
im = Image.open(fp)
if hasattr(im, 'close'):
im.close()
except IOError:
os.remove(os.path.join("player-skins", image_file))
except AttributeError:
pass # I have no idea why an Attribute Error is thrown on .close(), but this fixes it
finally:
if fp and not fp.closed:
fp.close()
atexit.register(_cleanup)
atexit.register(PlayerCache().save)
| 20,783 | 38.739962 | 137 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/bresenham.py
|
def bresenham(p1, p2):
"""Bresenham line algorithm
adapted for 3d. slooooow."""
coords = []
x, y, z = p1
x2, y2, z2 = p2
dx = abs(x2 - x)
if (x2 - x) > 0:
sx = 1
else:
sx = -1
dy = abs(y2 - y)
if (y2 - y) > 0:
sy = 1
else:
sy = -1
dz = abs(z2 - z)
if (z2 - z) > 0:
sz = 1
else:
sz = -1
dl = [dx, dy, dz]
longestAxis = dl.index(max(dl))
d = [2 * a - dl[longestAxis] for a in dl]
# if dy > dx:
# steep = 1
#d = (2 * dy) + (2 * dz) - dx
otherAxes = [0, 1, 2]
otherAxes.remove(longestAxis)
p = [x, y, z]
sp = [sx, sy, sz]
for i in xrange(0, int(dl[longestAxis])):
coords.append(tuple(p))
for j in otherAxes:
while d[j] >= 0:
p[j] += sp[j]
d[j] -= 2 * dl[longestAxis]
p[longestAxis] += sp[longestAxis]
d = map(lambda a, b: a + 2 * b, d, dl)
return coords # added by me
| 1,005 | 19.958333 | 46 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/fileEdits.py
|
from editortools.operation import Operation
import itertools
from albow import alert
class fileEdit:
def __init__(self, filename, timeChanged, box, editor, level):
self.filename = filename
self.timeChanged = timeChanged
self.box = box
self.editor = editor
self.level = level
self.order = []
def makeChanges(self):
try:
f = open(self.filename, 'rb')
except:
alert("Couldn't open the file")
return
lines = []
for line in f.readlines():
line = line.replace("\r", "")
if line != "\n":
lines.append(line.replace("\n", ""))
f.close()
tileEntities = []
for (x, y, z) in self.order:
blockAtXYZ = self.level.blockAt(x, y, z)
if blockAtXYZ in (137, 210, 211, 188, 189):
tileEntities.append(self.level.tileEntityAt(x, y, z))
else:
alert("The blocks are different now!")
return
if len(lines) != len(tileEntities):
alert("You have %d lines and %d command blocks, it should be the same." % (len(lines), len(tileEntities)))
return
op = FileEditsOperation(self.editor, self.level, self.box, lines, tileEntities)
self.editor.addOperation(op)
if op.canUndo:
self.editor.addUnsavedEdit()
def writeCommandInFile(self, first, space, (x, y, z), fileTemp, skip, chain, done, order):
block = self.editor.level.tileEntityAt(x, y, z)
if chain:
if not block or (x, y, z) in done:
return
if not first:
if space:
fileTemp.write("\n\n")
else:
fileTemp.write("\n")
text = block["Command"].value
if text == "":
text = "\"\""
order.append((x, y, z))
fileTemp.write(text.encode('utf-8'))
if chain:
done.append((x, y, z))
blockData = self.editor.level.blockDataAt(x, y, z)
if blockData == 0 and self.level.blockAt(x, y-1, z) in (211, 189):
skip.append((x, y-1, z))
self.writeCommandInFile(False, space, (x, y-1, z), fileTemp, skip, True, done, order)
elif blockData == 1 and self.level.blockAt(x, y+1, z) in (211, 189):
skip.append((x, y+1, z))
self.writeCommandInFile(False, space, (x, y+1, z), fileTemp, skip, True, done, order)
elif blockData == 2 and self.level.blockAt(x, y, z-1) in (211, 189):
skip.append((x, y, z-1))
self.writeCommandInFile(False, space, (x, y, z-1), fileTemp, skip, True, done, order)
elif blockData == 3 and self.level.blockAt(x, y, z+1) in (211, 189):
skip.append((x, y, z+1))
self.writeCommandInFile(False, space, (x, y, z+1), fileTemp, skip, True, done, order)
elif blockData == 4 and self.level.blockAt(x-1, y, z) in (211, 189):
skip.append((x-1, y, z))
self.writeCommandInFile(False, space, (x-1, y, z), fileTemp, skip, True, done, order)
elif blockData == 5 and self.level.blockAt(x+1, y, z) in (211, 189):
skip.append((x+1, y, z))
self.writeCommandInFile(False, space, (x+1, y, z), fileTemp, skip, True, done, order)
class FileEditsOperation(Operation):
def __init__(self, editor, level, box, lines, tileEntities):
self.editor = editor
self.level = level
self.box = box
self.lines = lines
self.tileEntities = tileEntities
self.undoLevel = None
self.canUndo = False
def perform(self, recordUndo=True):
if self.level.saving:
alert("Cannot perform action while saving is taking place")
return
if recordUndo:
self.undoLevel = self.extractUndo(self.level, self.box)
for i, line in enumerate(self.lines):
tileEntity = self.tileEntities[i]
line = line.decode('utf-8')
line = line.replace(u"\u201c\u202a", "\"")
line = line.replace(u"\u201d\u202c", "\"")
if line == "\"\"":
line = ""
if tileEntity["Command"].value != line:
tileEntity["Command"].value = line
self.level.addTileEntity(tileEntity)
if not self.canUndo and recordUndo:
self.canUndo = True
def dirtyBox(self):
return self.box
def GetSort(box, sorting):
if sorting == "xz" or sorting == "chain":
return itertools.product(
xrange(box.minx, box.maxx),
xrange(box.miny, box.maxy),
xrange(box.minz, box.maxz)
)
else:
return itertools.product(
xrange(box.minz, box.maxz),
xrange(box.miny, box.maxy),
xrange(box.minx, box.maxx)
)
| 4,986 | 37.068702 | 118 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/keys.py
|
# -*- coding: utf_8 -*-
#.# Marks the layout modifications. -- D.C.-G.
from config import config
import albow
from albow.dialogs import Dialog
from albow.translate import _
from glbackground import Panel
from albow import Button, Column
ESCAPE = '\033'
def remapMouseButton(button):
buttons = [0, 1, 3, 2, 4, 5, 6, 7] # mouse2 is right button, mouse3 is middle
if button < len(buttons):
return buttons[button]
return button
class KeyConfigPanel(Dialog):
keyConfigKeys = [
"<Movement>",
"Forward",
"Back",
"Left",
"Right",
"Up",
"Down",
"Brake",
"Sprint",
"",
"<Camera>",
"Pan Up",
"Pan Down",
"Pan Left",
"Pan Right",
"Toggle View",
"Goto Panel",
"View Distance",
"Toggle Renderer",
"Fly Mode",
"",
"<Selection>",
"Increase Reach",
"Decrease Reach",
"Reset Reach",
"Show Block Info",
"Pick Block",
"Snap Clone to Axis",
"Long-Distance Mode",
"Blocks-Only Modifier",
"",
"<Brush Tool>",
"Rotate (Brush)",
"Roll (Brush)",
"Increase Brush",
"Decrease Brush",
"Brush Line Tool",
"",
"<Clone Tool>",
"Rotate (Clone)",
"Roll (Clone)",
"Flip",
"Mirror",
"",
"<Fill and Replace Tool>",
"Replace Shortcut",
"Swap",
"",
"<Chunk Control Tool>",
"Select Chunks",
"Deselect Chunks",
"",
"<Function>",
"Delete Blocks",
"Select All",
"Deselect",
"Undo",
"Redo",
"Cut",
"Copy",
"Paste",
"Export Selection",
"",
"<Menu>",
"New World",
"Quick Load",
"Open",
"Save",
"Save As",
"Reload World",
"Close World",
"World Info",
"Quit",
"",
"<Miscellaneous>",
"Take a Screenshot",
"Debug Overlay",
"Fast Nudge",
"Fast Increment Modifier (Hold)",
"",
"<Toolbar>",
"Select",
"Brush",
"Clone",
"Fill and Replace",
"Filter",
"Import Key",
"Players",
"World Spawnpoint",
"Chunk Control",
"NBT Explorer"
]
otherNames = {
"Goto Panel": "Goto Position",
"Show Block Info": "Show Block Info (Hold)",
"Pick Block": "Pick Block (Hold + Click)",
"Snap Clone to Axis": "Snap Clone to Axis (Hold)",
"Blocks-Only Modifier": "Blocks-Only Modifier (Hold)",
"Rotate (Brush)": "Rotate",
"Roll (Brush)": "Roll",
"Increase Brush": "Increase Size",
"Decrease Brush": "Decrease Size",
"Brush Line Tool": "Line Tool (Hold)",
"Rotate (Clone)": "Rotate",
"Roll (Clone)": "Roll",
"Replace Shortcut": "Replace",
"Select Chunks": "Select Chunks (Hold)",
"Deselect Chunks": "Deselect Chunks (Hold)",
"Delete Blocks": "Delete",
"Export Selection": "Export",
"Take a Screenshot": "Take Screenshot",
"Import Key": "Import"
}
presets = {
"WASD": [
("Forward", "W"),
("Back", "S"),
("Left", "A"),
("Right", "D"),
("Up", "Space"),
("Down", "Shift"),
("Brake", "C"),
("Sprint", "None"),
("Pan Up", "I"),
("Pan Down", "K"),
("Pan Left", "J"),
("Pan Right", "L"),
("Toggle View", "Tab"),
("Goto Panel", "Ctrl-G"),
("View Distance", "Ctrl-F"),
("Toggle Renderer", "Ctrl-M"),
("Fly Mode", "None"),
("Increase Reach", "Scroll Up"),
("Decrease Reach", "Scroll Down"),
("Reset Reach", "Button 3"),
("Show Block Info", "Alt"),
("Pick Block", "Alt"),
("Snap Clone to Axis", "Ctrl"),
("Long-Distance Mode", "Alt-Z"),
("Blocks-Only Modifier", "Alt"),
("Rotate (Brush)", "E"),
("Roll (Brush)", "G"),
("Increase Brush", "R"),
("Decrease Brush", "F"),
("Brush Line Tool", "Z"),
("Rotate (Clone)", "E"),
("Roll (Clone)", "R"),
("Flip", "F"),
("Mirror", "G"),
("Replace Shortcut", "R"),
("Swap", "X"),
("Select Chunks", "Z"),
("Deselect Chunks", "Alt"),
("Delete Blocks", "Delete"),
("Select All", "Ctrl-A"),
("Deselect", "Ctrl-D"),
("Undo", "Ctrl-Z"),
("Redo", "Ctrl-Y"),
("Cut", "Ctrl-X"),
("Copy", "Ctrl-C"),
("Paste", "Ctrl-V"),
("Export Selection", "Ctrl-E"),
("New World", "Ctrl-N"),
("Quick Load", "Ctrl-L"),
("Open", "Ctrl-O"),
("Save", "Ctrl-S"),
("Save As", "Ctrl-Alt-S"),
("Reload World", "Ctrl-R"),
("Close World", "Ctrl-W"),
("World Info", "Ctrl-I"),
("Quit", "Ctrl-Q"),
("Take a Screenshot", "F6"),
("Debug Overlay", "0"),
("Fast Nudge", "None"),
("Fast Increment Modifier (Hold)", "Ctrl"),
("Select", "1"),
("Brush", "2"),
("Clone", "3"),
("Fill and Replace", "4"),
("Filter", "5"),
("Import Key", "6"),
("Players", "7"),
("World Spawnpoint", "8"),
("Chunk Control", "9"),
("NBT Explorer", "None")
],
"ESDF": [
("Forward", "E"),
("Back", "D"),
("Left", "S"),
("Right", "F"),
("Up", "Space"),
("Down", "Shift"),
("Brake", "C"),
("Sprint", "None"),
("Pan Up", "I"),
("Pan Down", "K"),
("Pan Left", "J"),
("Pan Right", "L"),
("Toggle View", "Tab"),
("Goto Panel", "Ctrl-G"),
("View Distance", "Ctrl-F"),
("Toggle Renderer", "Ctrl-M"),
("Fly Mode", "None"),
("Increase Reach", "Scroll Up"),
("Decrease Reach", "Scroll Down"),
("Reset Reach", "Button 3"),
("Show Block Info", "Alt"),
("Pick Block", "Alt"),
("Snap Clone to Axis", "Ctrl"),
("Long-Distance Mode", "Alt-Z"),
("Blocks-Only Modifier", "Alt"),
("Rotate (Brush)", "R"),
("Roll (Brush)", "H"),
("Increase Brush", "T"),
("Decrease Brush", "G"),
("Brush Line Tool", "Z"),
("Rotate (Clone)", "R"),
("Roll (Clone)", "T"),
("Flip", "G"),
("Mirror", "H"),
("Replace Shortcut", "R"),
("Swap", "X"),
("Select Chunks", "Z"),
("Deselect Chunks", "Alt"),
("Delete Blocks", "Delete"),
("Select All", "Ctrl-A"),
("Deselect", "Ctrl-D"),
("Undo", "Ctrl-Z"),
("Redo", "Ctrl-Y"),
("Cut", "Ctrl-X"),
("Copy", "Ctrl-C"),
("Paste", "Ctrl-V"),
("Export Selection", "Ctrl-E"),
("New World", "Ctrl-N"),
("Quick Load", "Ctrl-L"),
("Open", "Ctrl-O"),
("Save", "Ctrl-S"),
("Save As", "Ctrl-Alt-S"),
("Reload World", "Ctrl-R"),
("Close World", "Ctrl-W"),
("World Info", "Ctrl-I"),
("Quit", "Ctrl-Q"),
("Take a Screenshot", "F6"),
("Debug Overlay", "0"),
("Fast Nudge", "None"),
("Fast Increment Modifier (Hold)", "Ctrl"),
("Select", "1"),
("Brush", "2"),
("Clone", "3"),
("Fill and Replace", "4"),
("Filter", "5"),
("Import Key", "6"),
("Players", "7"),
("World Spawnpoint", "8"),
("Chunk Control", "9"),
("NBT Explorer", "None")
],
"Arrows": [
("Forward", "Up"),
("Back", "Down"),
("Left", "Left"),
("Right", "Right"),
("Up", "Page Up"),
("Down", "Page Down"),
("Brake", "Space"),
("Sprint", "None"),
("Pan Up", "I"),
("Pan Down", "K"),
("Pan Left", "J"),
("Pan Right", "L"),
("Toggle View", "Tab"),
("Goto Panel", "Ctrl-G"),
("View Distance", "Ctrl-F"),
("Toggle Renderer", "Ctrl-M"),
("Fly Mode", "None"),
("Increase Reach", "Scroll Up"),
("Decrease Reach", "Scroll Down"),
("Reset Reach", "Button 3"),
("Show Block Info", "Alt"),
("Pick Block", "Alt"),
("Snap Clone to Axis", "Ctrl"),
("Long-Distance Mode", "Alt-Z"),
("Blocks-Only Modifier", "Alt"),
("Rotate (Brush)", "Home"),
("Roll (Brush)", "Delete"),
("Increase Brush", "End"),
("Decrease Brush", "Insert"),
("Brush Line Tool", "Z"),
("Rotate (Clone)", "Home"),
("Roll (Clone)", "End"),
("Flip", "Insert"),
("Mirror", "Delete"),
("Replace Shortcut", "R"),
("Swap", "\\"),
("Select Chunks", "Z"),
("Deselect Chunks", "Alt"),
("Delete Blocks", "Backspace"),
("Select All", "Ctrl-A"),
("Deselect", "Ctrl-D"),
("Undo", "Ctrl-Z"),
("Redo", "Ctrl-Y"),
("Cut", "Ctrl-X"),
("Copy", "Ctrl-C"),
("Paste", "Ctrl-V"),
("Export Selection", "Ctrl-E"),
("New World", "Ctrl-N"),
("Quick Load", "Ctrl-L"),
("Open", "Ctrl-O"),
("Save", "Ctrl-S"),
("Save As", "Ctrl-Alt-S"),
("Reload World", "Ctrl-R"),
("Close World", "Ctrl-W"),
("World Info", "Ctrl-I"),
("Quit", "Ctrl-Q"),
("Take a Screenshot", "F6"),
("Debug Overlay", "0"),
("Fast Nudge", "None"),
("Fast Increment Modifier (Hold)", "Ctrl"),
("Select", "1"),
("Brush", "2"),
("Clone", "3"),
("Fill and Replace", "4"),
("Filter", "5"),
("Import Key", "6"),
("Players", "7"),
("World Spawnpoint", "8"),
("Chunk Control", "9"),
("NBT Explorer", "None")
],
"Numpad": [
("Forward", "[8]"),
("Back", "[5]"),
("Left", "[4]"),
("Right", "[6]"),
("Up", "[7]"),
("Down", "[1]"),
("Brake", "[0]"),
("Sprint", "None"),
("Pan Up", "I"),
("Pan Down", "K"),
("Pan Left", "J"),
("Pan Right", "L"),
("Toggle View", "Tab"),
("Goto Panel", "Ctrl-G"),
("View Distance", "Ctrl-F"),
("Toggle Renderer", "Ctrl-M"),
("Fly Mode", "None"),
("Increase Reach", "Scroll Up"),
("Decrease Reach", "Scroll Down"),
("Reset Reach", "Button 3"),
("Show Block Info", "Alt"),
("Pick Block", "Alt"),
("Snap Clone to Axis", "Ctrl"),
("Long-Distance Mode", "Alt-Z"),
("Blocks-Only Modifier", "Alt"),
("Rotate (Brush)", "[-]"),
("Roll (Brush)", "[*]"),
("Increase Brush", "[+]"),
("Decrease Brush", "[/]"),
("Brush Line Tool", "Z"),
("Rotate (Clone)", "[-]"),
("Roll (Clone)", "[+]"),
("Flip", "[/]"),
("Mirror", "[*]"),
("Replace Shortcut", "R"),
("Swap", "[.]"),
("Select Chunks", "Z"),
("Deselect Chunks", "Alt"),
("Delete Blocks", "Delete"),
("Select All", "Ctrl-A"),
("Deselect", "Ctrl-D"),
("Undo", "Ctrl-Z"),
("Redo", "Ctrl-Y"),
("Cut", "Ctrl-X"),
("Copy", "Ctrl-C"),
("Paste", "Ctrl-V"),
("Export Selection", "Ctrl-E"),
("New World", "Ctrl-N"),
("Quick Load", "Ctrl-L"),
("Open", "Ctrl-O"),
("Save", "Ctrl-S"),
("Save As", "Ctrl-Alt-S"),
("Reload World", "Ctrl-R"),
("Close World", "Ctrl-W"),
("World Info", "Ctrl-I"),
("Quit", "Ctrl-Q"),
("Take a Screenshot", "F6"),
("Debug Overlay", "0"),
("Fast Nudge", "None"),
("Fast Increment Modifier (Hold)", "Ctrl"),
("Select", "1"),
("Brush", "2"),
("Clone", "3"),
("Fill and Replace", "4"),
("Filter", "5"),
("Import Key", "6"),
("Players", "7"),
("World Spawnpoint", "8"),
("Chunk Control", "9"),
("NBT Explorer", "None")
],
"WASD Old": [
("Forward", "W"),
("Back", "S"),
("Left", "A"),
("Right", "D"),
("Up", "Q"),
("Down", "Z"),
("Brake", "Space"),
("Sprint", "None"),
("Pan Up", "I"),
("Pan Down", "K"),
("Pan Left", "J"),
("Pan Right", "L"),
("Toggle View", "Tab"),
("Goto Panel", "Ctrl-G"),
("View Distance", "Ctrl-F"),
("Toggle Renderer", "Ctrl-M"),
("Fly Mode", "None"),
("Increase Reach", "Scroll Up"),
("Decrease Reach", "Scroll Down"),
("Reset Reach", "Button 3"),
("Show Block Info", "Alt"),
("Pick Block", "Alt"),
("Snap Clone to Axis", "Shift"),
("Long-Distance Mode", "Alt-Z"),
("Blocks-Only Modifier", "Alt"),
("Rotate (Brush)", "E"),
("Roll (Brush)", "G"),
("Increase Brush", "R"),
("Decrease Brush", "F"),
("Brush Line Tool", "Shift"),
("Rotate (Clone)", "E"),
("Roll (Clone)", "R"),
("Flip", "F"),
("Mirror", "G"),
("Replace Shortcut", "R"),
("Swap", "X"),
("Select Chunks", "Ctrl"),
("Deselect Chunks", "Shift"),
("Delete Blocks", "Delete"),
("Select All", "Ctrl-A"),
("Deselect", "Ctrl-D"),
("Undo", "Ctrl-Z"),
("Redo", "Ctrl-Y"),
("Cut", "Ctrl-X"),
("Copy", "Ctrl-C"),
("Paste", "Ctrl-V"),
("Export Selection", "Ctrl-E"),
("New World", "Ctrl-N"),
("Quick Load", "Ctrl-L"),
("Open", "Ctrl-O"),
("Save", "Ctrl-S"),
("Save As", "Ctrl-Alt-S"),
("Reload World", "Ctrl-R"),
("Close World", "Ctrl-W"),
("World Info", "Ctrl-I"),
("Quit", "Ctrl-Q"),
("Take a Screenshot", "F6"),
("Debug Overlay", "0"),
("Fast Nudge", "Shift"),
("Fast Increment Modifier (Hold)", "Shift"),
("Select", "1"),
("Brush", "2"),
("Clone", "3"),
("Fill and Replace", "4"),
("Filter", "5"),
("Import Key", "6"),
("Players", "7"),
("World Spawnpoint", "8"),
("Chunk Control", "9"),
("NBT Explorer", "None")
]}
selectedKeyIndex = 0
def __init__(self, mcedit):
Dialog.__init__(self)
self.changes = {}
self.changesNum = False
self.enter = 0
self.root = None
self.editor = None
buttonRow = (albow.Button("Assign Key...", action=self.askAssignSelectedKey),
albow.Button("Done", action=self.done), albow.Button("Cancel", action=self.cancel))
buttonRow = albow.Row(buttonRow)
resetToDefaultRow = albow.Row((albow.Button("Reset to default", action=self.resetDefault),))
choiceButton = albow.ChoiceButton(["WASD", "ESDF", "Arrows", "Numpad", "WASD Old"], choose=self.choosePreset)
if config.keys.forward.get() == "E":
choiceButton.selectedChoice = "ESDF"
elif config.keys.forward.get() == "Up":
choiceButton.selectedChoice = "Arrows"
elif config.keys.forward.get() == "[8]":
choiceButton.selectedChoice = "Numpad"
elif config.keys.brake.get() == "Space":
choiceButton.selectedChoice = "WASD Old"
self.oldChoice = choiceButton.selectedChoice
choiceRow = albow.Row((albow.Label("Keybind Presets:"), choiceButton))
self.choiceButton = choiceButton
#.#
spacing = 0
tb = albow.TableView()
self.nrows = 581 / tb.font.get_linesize()
keyConfigTable = albow.TableView(nrows=581 / tb.font.get_linesize(),
columns=[albow.TableColumn("Command", 200, "l"), albow.TableColumn("Assigned Key", 150, "r")])
del tb
keyConfigTable.num_rows = lambda: len(self.keyConfigKeys)
keyConfigTable.row_data = self.getRowData
keyConfigTable.row_is_selected = lambda x: x == self.selectedKeyIndex
keyConfigTable.click_row = self.selectTableRow
keyConfigTable.key_down = self.key_down
keyConfigTable.key_up = self.key_up
tableWidget = albow.Widget()
tableWidget.add(keyConfigTable)
tableWidget.shrink_wrap()
self.keyConfigTable = keyConfigTable
#.#
col = albow.Column((tableWidget, choiceRow, buttonRow, resetToDefaultRow), spacing=spacing, margin=0)
self.add(col)
self.shrink_wrap()
def presentControls(self):
self.present()
self.oldChoice = self.choiceButton.selectedChoice
def done(self):
self.changesNum = False
self.changes = {}
config.save()
self.editor.movements = [
config.keys.left.get(),
config.keys.right.get(),
config.keys.forward.get(),
config.keys.back.get(),
config.keys.up.get(),
config.keys.down.get()
]
self.editor.toolbarKeys = [
config.keys.select.get(),
config.keys.brush.get(),
config.keys.clone.get(),
config.keys.fillAndReplace.get(),
config.keys.filter.get(),
config.keys.importKey.get(),
config.keys.players.get(),
config.keys.worldSpawnpoint.get(),
config.keys.chunkControl.get(),
config.keys.nbtExplorer.get()
]
self.editor.cameraPan = [
config.keys.panLeft.get(),
config.keys.panRight.get(),
config.keys.panUp.get(),
config.keys.panDown.get()
]
self.editor.sprintKey = config.keys.sprint.get()
self.root.movementLabel.text = _("{0}/{1}/{2}/{3}/{4}/{5} to move").format(
_(config.keys.forward.get()),
_(config.keys.left.get()),
_(config.keys.back.get()),
_(config.keys.right.get()),
_(config.keys.up.get()),
_(config.keys.down.get()),
)
self.root.slowDownLabel.text = _("{0} to slow down").format(_(config.keys.brake.get()))
self.root.detailsLabel.text = _("Hold {0} for details").format(_(config.keys.showBlockInfo.get()))
self.root.commandRow.labels[0].text = config.keys.newWorld.get()
self.root.commandRow.labels[1].text = config.keys.quickLoad.get()
self.root.commandRow.labels[2].text = config.keys.open.get()
self.dismiss()
def choosePreset(self):
preset = self.choiceButton.selectedChoice
keypairs = self.presets[preset]
for configKey, k in keypairs:
oldOne = config.keys[config.convert(configKey)].get()
if k != oldOne:
self.changesNum = True
if configKey not in self.changes:
self.changes[configKey] = oldOne
config.keys[config.convert(configKey)].set(k)
def getRowData(self, i):
if self.root is None:
self.root = self.get_root()
if self.editor is None:
self.editor = self.root.editor
configKey = self.keyConfigKeys[i]
if self.isConfigKey(configKey):
key = config.keys[config.convert(configKey)].get()
try:
oldKey = key
key = self.editor.different_keys[key]
if key != oldKey:
config.keys[config.convert(configKey)].set(key)
config.save()
except:
pass
if configKey in self.otherNames.keys():
configKey = self.otherNames[configKey]
else:
key = ""
return configKey, key
@staticmethod
def isConfigKey(configKey):
return not (len(configKey) == 0 or configKey[0] == "<")
def selectTableRow(self, i, evt):
self.selectedKeyIndex = i
if evt.num_clicks == 2:
self.askAssignSelectedKey()
def resetDefault(self):
self.choiceButton.selectedChoice = "WASD"
self.choosePreset()
def cancel(self):
if self.changesNum:
result = albow.ask("Do you want to save your changes?", ["Save", "Don't Save", "Cancel"])
if result == "Save":
self.done()
elif result == "Don't Save":
for k in self.changes.keys():
config.keys[config.convert(k)].set(self.changes[k])
self.changesNum = False
self.changes = {}
self.choiceButton.selectedChoice = self.oldChoice
config.save()
self.dismiss()
else:
self.dismiss()
def unbind(self):
configKey = self.keyConfigKeys[self.selectedKeyIndex]
if config.keys[config.convert(configKey)].get() != "None":
self.changesNum = True
config.keys[config.convert(configKey)].set("None")
self.panel.dismiss()
def key_down(self, evt):
keyname = self.root.getKey(evt)
if keyname == 'Escape':
self.cancel()
elif keyname == 'Up' and self.selectedKeyIndex > 0:
self.selectedKeyIndex -= 1
elif keyname == 'Down' and self.selectedKeyIndex < len(self.keyConfigKeys) - 1:
self.selectedKeyIndex += 1
elif keyname == 'Return':
self.enter += 1
self.askAssignSelectedKey()
elif keyname == 'Page down':
self.selectedKeyIndex = min(len(self.keyConfigKeys) - 1, self.selectedKeyIndex + self.nrows)
elif keyname == 'Page up':
self.selectedKeyIndex = max(0, self.selectedKeyIndex - self.nrows)
if self.keyConfigTable.rows.cell_to_item_no(0, 0) + self.keyConfigTable.rows.num_rows() -1 > self.selectedKeyIndex or self.keyConfigTable.rows.cell_to_item_no(0, 0) + self.keyConfigTable.rows.num_rows() -1 < self.selectedKeyIndex:
self.keyConfigTable.rows.scroll_to_item(self.selectedKeyIndex)
def key_up(self, evt):
pass
def askAssignSelectedKey(self):
self.askAssignKey(self.keyConfigKeys[self.selectedKeyIndex])
def askAssignKey(self, configKey, labelString=None):
if not self.isConfigKey(configKey):
self.enter = 0
return
panel = Panel(name='Panel.KeyConfigPanel')
panel.bg_color = (0.3, 0.3, 0.3, 1.0)
if labelString is None and configKey != "Fast Nudge":
labelString = _("Press a key to assign to the action \"{0}\"\n\nPress ESC to cancel.").format(_(configKey))
elif labelString is None:
labelString = _("Press a key to assign to the action \"{0}\"\nNo key means right click to fast nudge.\nPress ESC to cancel.").format(_(configKey))
label = albow.Label(labelString)
unbind_button = Button("Press to unbind", action=self.unbind)
column = Column((label, unbind_button))
panel.add(column)
panel.shrink_wrap()
def panelKeyUp(evt):
keyname = self.root.getKey(evt)
panel.dismiss(keyname)
def panelMouseUp(evt):
button = remapMouseButton(evt.button)
if button == 3:
keyname = "Button 3"
elif button == 4:
keyname = "Scroll Up"
elif button == 5:
keyname = "Scroll Down"
elif button == 6:
keyname = "Button 4"
elif button == 7:
keyname = "Button 5"
if button > 2:
panel.dismiss(keyname)
panel.key_up = panelKeyUp
panel.mouse_up = panelMouseUp
self.panel = panel
keyname = panel.present()
if isinstance(keyname, bool):
return True
if keyname == "Return" and self.enter == 1:
self.enter = 0
self.askAssignKey(configKey)
return True
self.enter = 0
_keyname = _(keyname)
if keyname != "Escape" and keyname not in ("Alt-F4","F1","F2","F3","F4","F5","1","2","3","4","5","6","7","8","9","Ctrl-Alt-F9","Ctrl-Alt-F10"):
if "Modifier" in configKey and keyname != "Ctrl" and keyname != "Alt" and keyname != "Shift":
self.askAssignKey(configKey,
_("{0} is not a modifier. Press a new key.\n\nPress ESC to cancel.")
.format(_keyname))
return True
if configKey in ('Down','Up','Back','Forward','Left','Right','Pan Down','Pan Up','Pan Left','Pan Right'):
if 'Ctrl' in keyname or '-' in keyname:
self.askAssignKey(configKey,
"Movement keys can't use Ctrl or be with modifiers. Press a new key.\n\nPress ESC to cancel.")
return True
filter_keys = [i for (i, j) in config.config._sections["Filter Keys"].items() if j == _keyname]
if filter_keys:
self.askAssignKey(configKey,
_("Can't bind. {0} is already used by the \"{1}\" filter.\n Press a new key.\n\nPress ESC to cancel.").format(_keyname, filter_keys[0]))
return True
oldkey = config.keys[config.convert(configKey)].get()
config.keys[config.convert(configKey)].set(keyname)
if oldkey != keyname and configKey not in self.changes:
self.changes[configKey] = oldkey
self.changesNum = True
elif keyname != "Escape":
self.askAssignKey(configKey,
_("You can't use the key {0}. Press a new key.\n\nPress ESC to cancel.")
.format(_keyname))
return True
else:
return True
| 30,426 | 35.395933 | 238 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/hook-updater4pyi.py
|
# hook for pyinstaller
import updater4pyi
import updater4pyi.util
import os.path
def locpath(x):
return os.path.realpath(os.path.join(os.path.dirname(updater4pyi.__file__), x))
datas = [
(locpath('cacert.pem'), 'updater4pyi'),
]
if updater4pyi.util.is_linux() or updater4pyi.util.is_macosx():
datas += [
(locpath('installers/unix/do_install.sh'), 'updater4pyi/installers/unix'),
]
elif updater4pyi.util.is_win():
datas += [
(locpath('installers/win/do_install.exe.zip'), 'updater4pyi/installers/win'),
]
# from hookutils import collect_data_files
#datas = collect_data_files('updater4pyi')
print "DATAS IS\n\t%r" % datas
| 668 | 22.892857 | 85 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/setup.py
|
import sys
import os
import platform
import distutils.file_util
from setuptools import setup
from Cython.Build import cythonize
# Output annotated .html
import Cython.Compiler.Options
Cython.Compiler.Options.annotate = True
modules_map = {
"png": {"source": "cpngfilters.pyx",
"description": "Build the accelerator to work with PNG images."},
"nbt": {"source": "pymclevel/_nbt.pyx",
"description": "Build the accelerator to work with NBT data."}
}
__help__ = """setup.py
Build Cython extensions for MCEdit-Unified.
You have to use at least one argument on the command line to build extensions.
Valid arguments are:
help Print this message.
all Build all extensions.
%s
-- Makes this script send all remaing arguments to setuptool.
In case setuptools commands like 'build' or 'install are given after a doule
dash ('--'), you can expect unwanted behaviour, because the 'build_ext' and
'--inplace' are forced (added dynamicaly on the command line).
""" % "\n".join(["%s %s" % (k, v["description"]) for k, v in modules_map.items()])
# Let people choose what to build.
# If no argument is given on the command line, display help message.
# If a wrong argument is given, break.
if len(sys.argv) == 1:
print __help__
sys.exit(0)
else:
ext_modules = []
args = sys.argv[1:]
msg = "Following extensions will be built: %s."
ext_list = []
for arg in args:
# Let send setuptools argument be on the command line.
print arg
if arg == '--':
sys.argv.remove(arg)
break
if not arg.startswith('-'):
if arg == 'help':
print __help__
sys.exit(0)
elif arg == 'all':
ext_list = list(modules_map.keys())
ext_modules = [v["source"] for v in modules_map.values()]
elif arg not in modules_map.keys():
print "'%s' is not a valid argument. Use 'help' one for information." % arg
sys.exit(1)
else:
src = modules_map[arg]["source"]
if src not in ext_modules:
ext_list.append(arg)
ext_modules.append(src)
sys.argv.remove(arg)
print msg % ", ".join(ext_list)
sys.argv.insert(1, '--inplace')
sys.argv.insert(1, 'build_ext')
setup(
ext_modules=cythonize(ext_modules)
)
# On Linux, we want _nbt.so in pymclevel.
if platform.system() == "Linux":
if os.path.isfile("_nbt.so"):
nbt_dest = os.path.join("pymclevel", "_nbt.so")
if os.path.isfile(nbt_dest):
os.remove(nbt_dest)
distutils.file_util.move_file("_nbt.so", nbt_dest)
| 2,709 | 30.511628 | 91 |
py
|
MCEdit-Unified
|
MCEdit-Unified-master/pyperclip.py
|
"""
Pyperclip
A cross-platform clipboard module for Python. (only handles plain text for now)
By Al Sweigart [email protected]
BSD License
Usage:
import pyperclip
pyperclip.copy('The text to be copied to the clipboard.')
spam = pyperclip.paste()
On Windows, no additional modules are needed.
On Mac, this module makes use of the pbcopy and pbpaste commands, which should come with the os.
On Linux, this module makes use of the xclip or xsel commands, which should come with the os. Otherwise run "sudo apt-get install xclip" or "sudo apt-get install xsel"
Otherwise on Linux, you will need the gtk or PyQt4 modules installed.
The gtk module is not available for Python 3, and this module does not work with PyGObject yet.
"""
# Modified by Podshot to work with MCEdit-Unified's python environment
# * Removed Python 3 compatability
# * Removed PyQT support
__version__ = '1.5.6'
import platform, os
from subprocess import call, Popen, PIPE
def _pasteWindows():
CF_UNICODETEXT = 13
d = ctypes.windll
d.user32.OpenClipboard(None)
handle = d.user32.GetClipboardData(CF_UNICODETEXT)
data = ctypes.c_wchar_p(handle).value
d.user32.CloseClipboard()
return data
def _copyWindows(text):
GMEM_DDESHARE = 0x2000
CF_UNICODETEXT = 13
d = ctypes.windll # cdll expects 4 more bytes in user32.OpenClipboard(None)
try: # Python 2
if not isinstance(text, unicode):
text = text.decode('mbcs')
except NameError:
if not isinstance(text, str):
text = text.decode('mbcs')
d.user32.OpenClipboard(None)
d.user32.EmptyClipboard()
hCd = d.kernel32.GlobalAlloc(GMEM_DDESHARE, len(text.encode('utf-16-le')) + 2)
pchData = d.kernel32.GlobalLock(hCd)
ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
d.kernel32.GlobalUnlock(hCd)
d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
d.user32.CloseClipboard()
def _pasteCygwin():
CF_UNICODETEXT = 13
d = ctypes.cdll
d.user32.OpenClipboard(None)
handle = d.user32.GetClipboardData(CF_UNICODETEXT)
data = ctypes.c_wchar_p(handle).value
d.user32.CloseClipboard()
return data
def _copyCygwin(text):
GMEM_DDESHARE = 0x2000
CF_UNICODETEXT = 13
d = ctypes.cdll
try: # Python 2
if not isinstance(text, unicode):
text = text.decode('mbcs')
except NameError:
if not isinstance(text, str):
text = text.decode('mbcs')
d.user32.OpenClipboard(None)
d.user32.EmptyClipboard()
hCd = d.kernel32.GlobalAlloc(GMEM_DDESHARE, len(text.encode('utf-16-le')) + 2)
pchData = d.kernel32.GlobalLock(hCd)
ctypes.cdll.msvcrt.wcscpy(ctypes.c_wchar_p(pchData), text)
d.kernel32.GlobalUnlock(hCd)
d.user32.SetClipboardData(CF_UNICODETEXT, hCd)
d.user32.CloseClipboard()
def _copyOSX(text):
text = str(text)
p = Popen(['pbcopy', 'w'], stdin=PIPE)
p.communicate(input=bytes(text))
def _pasteOSX():
p = Popen(['pbpaste', 'r'], stdout=PIPE)
stdout, stderr = p.communicate()
return bytes.decode(stdout)
def _pasteGtk():
return gtk.Clipboard().wait_for_text()
def _copyGtk(text):
global cb
text = str(text)
cb = gtk.Clipboard()
cb.set_text(text)
cb.store()
def _pasteQt():
return str(cb.text())
def _copyQt(text):
text = str(text)
cb.setText(text)
def _copyXclip(text):
p = Popen(['xclip', '-selection', 'c'], stdin=PIPE)
p.communicate(input=bytes(text))
def _pasteXclip():
p = Popen(['xclip', '-selection', 'c', '-o'], stdout=PIPE)
stdout, stderr = p.communicate()
return bytes.decode(stdout)
def _copyXsel(text):
p = Popen(['xsel', '-i'], stdin=PIPE)
p.communicate(input=bytes(text))
def _pasteXsel():
p = Popen(['xsel', '-o'], stdout=PIPE)
stdout, stderr = p.communicate()
return bytes.decode(stdout)
# Determine the OS/platform and set the copy() and paste() functions accordingly.
if 'cygwin' in platform.system().lower():
_functions = 'Cygwin' # for debugging
import ctypes
paste = _pasteCygwin
copy = _copyCygwin
elif os.name == 'nt' or platform.system() == 'Windows':
_functions = 'Windows' # for debugging
import ctypes # @Reimport
paste = _pasteWindows
copy = _copyWindows
elif os.name == 'mac' or platform.system() == 'Darwin':
_functions = 'OS X pbcopy/pbpaste' # for debugging
paste = _pasteOSX
copy = _copyOSX
elif os.name == 'posix' or platform.system() == 'Linux':
# Determine which command/module is installed, if any.
xclipExists = call(['which', 'xclip'],
stdout=PIPE, stderr=PIPE) == 0
xselExists = call(['which', 'xsel'],
stdout=PIPE, stderr=PIPE) == 0
gtkInstalled = False
try:
# Check it gtk is installed.
import gtk
gtkInstalled = True
except ImportError:
pass
if not gtkInstalled and not xclipExists:
raise Exception('Pyperclip requires the gtk module installed or the xclip command.')
# Set one of the copy & paste functions.
if xclipExists:
_functions = 'xclip command' # for debugging
paste = _pasteXclip
copy = _copyXclip
elif gtkInstalled:
_functions = 'gtk module' # for debugging
paste = _pasteGtk
copy = _copyGtk
elif xselExists:
# TODO: xsel doesn't seem to work on Raspberry Pi (my test Linux environment). Putting this as the last method tried.
_functions = 'xsel command' # for debugging
paste = _pasteXsel
copy = _copyXsel
else:
raise Exception('Pyperclip requires the xclip or xsel application, or the gtk module.')
else:
raise RuntimeError('pyperclip does not support your system.')
| 5,760 | 27.805 | 167 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.