repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/funksvd.py | """
FunkSVD (biased MF).
"""
import logging
import time
import pandas as pd
import numpy as np
import numba as n
from . import basic
from .mf_common import BiasMFPredictor
from .. import util
_logger = logging.getLogger(__name__)
@n.jitclass([
('user_features', n.double[:, :]),
('item_features', n.double[:, :]),
('feature_count', n.int32),
('user_count', n.int32),
('item_count', n.int32),
('initial_value', n.double)
])
class Model:
"Internal model class for training SGD MF."
def __init__(self, umat, imat):
self.user_features = umat
self.item_features = imat
self.feature_count = umat.shape[1]
assert imat.shape[1] == self.feature_count
self.user_count = umat.shape[0]
self.item_count = imat.shape[0]
self.initial_value = np.nan
def _fresh_model(nfeatures, nusers, nitems, init=0.1):
umat = np.full([nusers, nfeatures], init, dtype=np.float_)
imat = np.full([nitems, nfeatures], init, dtype=np.float_)
model = Model(umat, imat)
model.initial_value = init
assert model.feature_count == nfeatures
assert model.user_count == nusers
assert model.item_count == nitems
return model
@n.jitclass([
('iter_count', n.int32),
('lrate', n.double),
('reg_term', n.double),
('rmin', n.double),
('rmax', n.double)
])
class _Params:
def __init__(self, niters, lrate, reg, rmin, rmax):
self.iter_count = niters
self.lrate = lrate
self.reg_term = reg
self.rmin = rmin
self.rmax = rmax
def make_params(niters, lrate, reg, range):
if range is None:
rmin = -np.inf
rmax = np.inf
else:
rmin, rmax = range
return _Params(niters, lrate, reg, rmin, rmax)
@n.jitclass([
('est', n.double[:]),
('feature', n.int32),
('trail', n.double)
])
class _FeatContext:
def __init__(self, est, feature, trail):
self.est = est
self.feature = feature
self.trail = trail
@n.jitclass([
('users', n.int32[:]),
('items', n.int32[:]),
('ratings', n.double[:]),
('bias', n.double[:]),
('n_samples', n.uint64)
])
class Context:
def __init__(self, users, items, ratings, bias):
self.users = users
self.items = items
self.ratings = ratings
self.bias = bias
self.n_samples = users.shape[0]
assert items.shape[0] == self.n_samples
assert ratings.shape[0] == self.n_samples
assert bias.shape[0] == self.n_samples
@n.njit
def _feature_loop(ctx: Context, params: _Params, model: Model, fc: _FeatContext):
users = ctx.users
items = ctx.items
ratings = ctx.ratings
umat = model.user_features
imat = model.item_features
est = fc.est
f = fc.feature
trail = fc.trail
sse = 0.0
acc_ud = 0.0
acc_id = 0.0
for s in range(ctx.n_samples):
user = users[s]
item = items[s]
ufv = umat[user, f]
ifv = imat[item, f]
pred = est[s] + ufv * ifv + trail
if pred < params.rmin:
pred = params.rmin
elif pred > params.rmax:
pred = params.rmax
error = ratings[s] - pred
sse += error * error
# compute deltas
ufd = error * ifv - params.reg_term * ufv
ufd = ufd * params.lrate
acc_ud += ufd * ufd
ifd = error * ufv - params.reg_term * ifv
ifd = ifd * params.lrate
acc_id += ifd * ifd
umat[user, f] += ufd
imat[item, f] += ifd
return np.sqrt(sse / ctx.n_samples)
@n.njit
def _train_feature(ctx, params, model, fc):
for epoch in range(params.iter_count):
rmse = _feature_loop(ctx, params, model, fc)
return rmse
def train(ctx: Context, params: _Params, model: Model, timer):
est = ctx.bias
for f in range(model.feature_count):
start = time.perf_counter()
trail = model.initial_value * model.initial_value * (model.feature_count - f - 1)
fc = _FeatContext(est, f, trail)
rmse = _train_feature(ctx, params, model, fc)
end = time.perf_counter()
_logger.info('[%s] finished feature %d (RMSE=%f) in %.2fs',
timer, f, rmse, end - start)
est = est + model.user_features[ctx.users, f] * model.item_features[ctx.items, f]
est = np.maximum(est, params.rmin)
est = np.minimum(est, params.rmax)
def _align_add_bias(bias, index, keys, series):
"Realign a bias series with an index, and add to a series"
# realign bias to make sure it matches
bias = bias.reindex(index, fill_value=0)
assert len(bias) == len(index)
# look up bias for each key
ibs = bias.loc[keys]
# change index
ibs.index = keys.index
# and add
series = series + ibs
return bias, series
class FunkSVD(BiasMFPredictor):
"""
Algorithm class implementing FunkSVD matrix factorization.
Args:
features(int): the number of features to train
iterations(int): the number of iterations to train each feature
lrate(double): the learning rate
reg(double): the regularization factor
damping(double): damping factor for the underlying mean
bias(Predictor): the underlying bias model to fit. If ``True``, then a
:py:class:`.basic.Bias` model is fit with ``damping``.
range(tuple):
the ``(min, max)`` rating values to clamp ratings, or ``None`` to leave
predictions unclamped.
"""
def __init__(self, features, iterations=100, *, lrate=0.001, reg=0.015,
damping=5, range=None, bias=True):
self.features = features
self.iterations = iterations
self.lrate = lrate
self.reg = reg
self.damping = damping
self.range = range
if bias is True:
self.bias = basic.Bias(damping=damping)
else:
self.bias = bias
def fit(self, ratings):
"""
Train a FunkSVD model.
Args:
ratings: the ratings data frame.
"""
timer = util.Stopwatch()
if self.bias is not None:
_logger.info('[%s] fitting bias model', timer)
self.bias.fit(ratings)
_logger.info('[%s] preparing rating data for %d samples', timer, len(ratings))
_logger.debug('shuffling rating data')
shuf = np.arange(len(ratings), dtype=np.int_)
np.random.shuffle(shuf)
ratings = ratings.iloc[shuf, :]
_logger.debug('[%s] indexing users and items', timer)
uidx = pd.Index(ratings.user.unique())
iidx = pd.Index(ratings.item.unique())
users = uidx.get_indexer(ratings.user).astype(np.int32)
assert np.all(users >= 0)
items = iidx.get_indexer(ratings.item).astype(np.int32)
assert np.all(items >= 0)
_logger.debug('[%s] computing initial estimates', timer)
if self.bias is not None:
initial = pd.Series(self.bias.mean_, index=ratings.index, dtype=np.float_)
ibias, initial = _align_add_bias(self.bias.item_offsets_, iidx, ratings.item, initial)
ubias, initial = _align_add_bias(self.bias.user_offsets_, uidx, ratings.user, initial)
else:
initial = pd.Series(0.0, index=ratings.index)
ibias = ubias = None
_logger.debug('have %d estimates for %d ratings', len(initial), len(ratings))
assert len(initial) == len(ratings)
_logger.debug('[%s] initializing data structures', timer)
context = Context(users, items, ratings.rating.astype(np.float_).values,
initial.values)
params = make_params(self.iterations, self.lrate, self.reg, self.range)
model = _fresh_model(self.features, len(uidx), len(iidx))
_logger.info('[%s] training biased MF model with %d features', timer, self.features)
train(context, params, model, timer)
_logger.info('finished model training in %s', timer)
self.user_index_ = uidx
self.item_index_ = iidx
self.global_bias_ = self.bias.mean_ if self.bias is not None else 0
self.user_bias_ = ubias.values if ubias is not None else None
self.item_bias_ = ibias.values if ibias is not None else None
self.user_features_ = model.user_features
self.item_features_ = model.item_features
return self
def predict_for_user(self, user, items, ratings=None):
# look up user index
uidx = self.lookup_user(user)
if uidx < 0:
_logger.debug('user %s not in model', user)
return pd.Series(np.nan, index=items)
# get item index & limit to valid ones
items = np.array(items)
iidx = self.lookup_items(items)
good = iidx >= 0
good_items = items[good]
good_iidx = iidx[good]
# multiply
_logger.debug('scoring %d items for user %s', len(good_items), user)
rv = self.score(uidx, good_iidx)
# clamp if suitable
if self.range is not None:
rmin, rmax = self.range
rv = np.maximum(rv, rmin)
rv = np.minimum(rv, rmax)
res = pd.Series(rv, index=good_items)
res = res.reindex(items)
return res
def __str__(self):
return 'FunkSVD(features={}, reg={})'.\
format(self.features, self.reg)
| 9,416 | 29.377419 | 98 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/mf_common.py | """
Common utilities & implementations for matrix factorization.
"""
import pathlib
import logging
import numpy as np
import pandas as pd
from .. import util
from . import Predictor
_logger = logging.getLogger(__name__)
class MFPredictor(Predictor):
"""
Common predictor for matrix factorization.
Attributes:
user_index_(pandas.Index): Users in the model (length=:math:`m`).
item_index_(pandas.Index): Items in the model (length=:math:`n`).
user_features_(numpy.ndarray): The :math:`m \\times k` user-feature matrix.
item_features_(numpy.ndarray): The :math:`n \\times k` item-feature matrix.
"""
@property
def n_features(self):
"The number of features."
return self.user_features_.shape[1]
@property
def n_users(self):
"The number of users."
return len(self.user_index_)
@property
def n_items(self):
"The number of items."
return len(self.item_index_)
def lookup_user(self, user):
"""
Look up the index for a user.
Args:
user: the user ID to look up
Returns:
int: the user index.
"""
try:
return self.user_index_.get_loc(user)
except KeyError:
return -1
def lookup_items(self, items):
"""
Look up the indices for a set of items.
Args:
items(array-like): the item IDs to look up.
Returns:
numpy.ndarray: the item indices. Unknown items will have negative indices.
"""
return self.item_index_.get_indexer(items)
def score(self, user, items):
"""
Score a set of items for a user. User and item parameters must be indices
into the matrices.
Args:
user(int): the user index
items(array-like of int): the item indices
raw(bool): if ``True``, do return raw scores without biases added back.
Returns:
numpy.ndarray: the scores for the items.
"""
# get user vector
uv = self.user_features_[user, :]
# get item matrix
im = self.item_features_[items, :]
rv = np.matmul(im, uv)
assert rv.shape[0] == len(items)
assert len(rv.shape) == 1
return rv
def score_by_ids(self, user, items):
uidx = self.lookup_user(user)
if uidx < 0:
_logger.debug('user %s not in model', user)
return pd.Series(np.nan, index=items)
# get item index & limit to valid ones
items = np.array(items)
iidx = self.lookup_items(items)
good = iidx >= 0
good_items = items[good]
good_iidx = iidx[good]
# multiply
_logger.debug('scoring %d items for user %s', len(good_items), user)
rv = self.score(uidx, good_iidx)
res = pd.Series(rv, index=good_items)
res = res.reindex(items)
return res
class BiasMFPredictor(MFPredictor):
"""
Common model for biased matrix factorization.
Attributes:
user_index_(pandas.Index): Users in the model (length=:math:`m`).
item_index_(pandas.Index): Items in the model (length=:math:`n`).
global_bias_(double): The global bias term.
user_bias_(numpy.ndarray): The user bias terms.
item_bias_(numpy.ndarray): The item bias terms.
user_features_(numpy.ndarray): The :math:`m \\times k` user-feature matrix.
item_features_(numpy.ndarray): The :math:`n \\times k` item-feature matrix.
"""
def score(self, user, items, raw=False):
"""
Score a set of items for a user. User and item parameters must be indices
into the matrices.
Args:
user(int): the user index
items(array-like of int): the item indices
raw(bool): if ``True``, do return raw scores without biases added back.
Returns:
numpy.ndarray: the scores for the items.
"""
rv = super().score(user, items)
if not raw:
# add bias back in
rv = rv + self.global_bias_
if self.user_bias_ is not None:
rv = rv + self.user_bias_[user]
if self.item_bias_ is not None:
rv = rv + self.item_bias_[items]
return rv
| 4,358 | 27.122581 | 86 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/als.py | import logging
from collections import namedtuple
import numpy as np
from numba import njit, prange
from . import basic
from .mf_common import BiasMFPredictor, MFPredictor
from ..matrix import sparse_ratings, _CSR
from .. import util
from ..math.solve import _dposv
_logger = logging.getLogger(__name__)
Context = namedtuple('Context', [
'users', 'items',
'user_matrix', 'item_matrix'
])
@njit(parallel=True, nogil=True)
def _train_matrix(mat: _CSR, other: np.ndarray, reg: float):
"One half of an explicit ALS training round."
nr = mat.nrows
nf = other.shape[1]
regI = np.identity(nf) * reg
assert mat.ncols == other.shape[0]
result = np.zeros((nr, nf))
for i in prange(nr):
cols = mat.row_cs(i)
if len(cols) == 0:
continue
vals = mat.row_vs(i)
M = other[cols, :]
MMT = M.T @ M
# assert MMT.shape[0] == ctx.n_features
# assert MMT.shape[1] == ctx.n_features
A = MMT + regI * len(cols)
V = M.T @ vals
# and solve
_dposv(A, V, True)
result[i, :] = V
return result
@njit(parallel=True, nogil=True)
def _train_implicit_matrix(mat: _CSR, other: np.ndarray, reg: float):
"One half of an implicit ALS training round."
nr = mat.nrows
nc = other.shape[0]
nf = other.shape[1]
assert mat.ncols == nc
regmat = np.identity(nf) * reg
Ot = other.T
OtO = Ot @ other
OtOr = OtO + regmat
assert OtO.shape[0] == OtO.shape[1]
assert OtO.shape[0] == nf
result = np.zeros((nr, nf))
for i in prange(nr):
cols = mat.row_cs(i)
if len(cols) == 0:
continue
rates = mat.row_vs(i)
# we can optimize by only considering the nonzero entries of Cu-I
# this means we only need the corresponding matrix columns
M = other[cols, :]
# Compute M^T C_u M, restricted to these nonzero entries
MMT = (M.T.copy() * rates) @ M
# assert MMT.shape[0] == ctx.n_features
# assert MMT.shape[1] == ctx.n_features
# Build the matrix for solving
A = OtOr + MMT
# Compute RHS - only used columns (p_ui != 0) values needed
# Cu is rates + 1 for the cols, so just trim Ot
y = Ot[:, cols] @ (rates + 1.0)
# and solve
_dposv(A, y, True)
# assert len(uv) == ctx.n_features
result[i, :] = y
return result
class BiasedMF(BiasMFPredictor):
"""
Biased matrix factorization trained with alternating least squares [ZWSP2008]_. This is a
prediction-oriented algorithm suitable for explicit feedback data.
.. [ZWSP2008] Yunhong Zhou, Dennis Wilkinson, Robert Schreiber, and Rong Pan. 2008.
Large-Scale Parallel Collaborative Filtering for the Netflix Prize.
In +Algorithmic Aspects in Information and Management_, LNCS 5034, 337–348.
DOI `10.1007/978-3-540-68880-8_32 <http://dx.doi.org/10.1007/978-3-540-68880-8_32>`_.
Args:
features(int): the number of features to train
iterations(int): the number of iterations to train
reg(double): the regularization factor
damping(double): damping factor for the underlying mean
"""
timer = None
def __init__(self, features, *, iterations=20, reg=0.1, damping=5, bias=True):
self.features = features
self.iterations = iterations
self.regularization = reg
self.damping = damping
if bias is True:
self.bias = basic.Bias(damping=damping)
else:
self.bias = bias
def fit(self, ratings):
"""
Run ALS to train a model.
Args:
ratings: the ratings data frame.
Returns:
The algorithm (for chaining).
"""
self.timer = util.Stopwatch()
if self.bias is not None:
_logger.info('[%s] fitting bias model', self.timer)
self.bias.fit(ratings)
current, bias, uctx, ictx = self._initial_model(ratings)
_logger.info('[%s] training biased MF model with ALS for %d features',
self.timer, self.features)
for epoch, model in enumerate(self._train_iters(current, uctx, ictx)):
current = model
_logger.info('trained model in %s', self.timer)
# unpack and de-Series bias
gb, ub, ib = bias
self.global_bias_ = gb
self.user_bias_ = np.require(ub.values, None, 'C') if ub is not None else None
self.item_bias_ = np.require(ib.values, None, 'C') if ib is not None else None
self.item_index_ = current.items
self.user_index_ = current.users
self.item_features_ = current.item_matrix
self.user_features_ = current.user_matrix
return self
def _initial_model(self, ratings, bias=None):
"Initialize a model and build contexts."
rmat, users, items = sparse_ratings(ratings)
n_users = len(users)
n_items = len(items)
rmat, bias = self._normalize(rmat, users, items)
_logger.debug('setting up contexts')
trmat = rmat.transpose()
_logger.debug('initializing item matrix')
imat = np.random.randn(n_items, self.features) * 0.01
umat = np.full((n_users, self.features), np.nan)
return Context(users, items, umat, imat), bias, rmat, trmat
def _normalize(self, ratings, users, items):
"Apply bias normalization to the data in preparation for training."
n_users = len(users)
n_items = len(items)
assert ratings.nrows == n_users
assert ratings.ncols == n_items
if self.bias is not None:
gbias = self.bias.mean_
ibias = self.bias.item_offsets_
ubias = self.bias.user_offsets_
else:
gbias = 0
ibias = ubias = None
_logger.info('[%s] normalizing %dx%d matrix (%d nnz)',
self.timer, n_users, n_items, ratings.nnz)
ratings.values = ratings.values - gbias
if ibias is not None:
ibias = ibias.reindex(items, fill_value=0)
ratings.values = ratings.values - ibias.values[ratings.colinds]
if ubias is not None:
ubias = ubias.reindex(users, fill_value=0)
# create a user index array the size of the data
reps = np.repeat(np.arange(len(users), dtype=np.int32),
ratings.row_nnzs())
assert len(reps) == ratings.nnz
# subtract user means
ratings.values = ratings.values - ubias.values[reps]
del reps
return ratings, (gbias, ubias, ibias)
def _train_iters(self, current, uctx, ictx):
"Generator of training iterations."
for epoch in range(self.iterations):
umat = _train_matrix(uctx.N, current.item_matrix, self.regularization)
_logger.debug('[%s] finished user epoch %d', self.timer, epoch)
imat = _train_matrix(ictx.N, umat, self.regularization)
_logger.debug('[%s] finished item epoch %d', self.timer, epoch)
di = np.linalg.norm(imat - current.item_matrix, 'fro')
du = np.linalg.norm(umat - current.user_matrix, 'fro')
_logger.info('[%s] finished epoch %d (|ΔI|=%.3f, |ΔU|=%.3f)', self.timer, epoch, di, du)
current = current._replace(user_matrix=umat, item_matrix=imat)
yield current
def predict_for_user(self, user, items, ratings=None):
# look up user index
return self.score_by_ids(user, items)
def __str__(self):
return 'als.BiasedMF(features={}, regularization={})'.\
format(self.features, self.regularization)
class ImplicitMF(MFPredictor):
"""
Implicit matrix factorization trained with alternating least squares [HKV2008]_. This
algorithm outputs 'predictions', but they are not on a meaningful scale. If its input
data contains ``rating`` values, these will be used as the 'confidence' values; otherwise,
confidence will be 1 for every rated item.
.. [HKV2008] Y. Hu, Y. Koren, and C. Volinsky. 2008.
Collaborative Filtering for Implicit Feedback Datasets.
In _Proceedings of the 2008 Eighth IEEE International Conference on Data Mining_, 263–272.
DOI `10.1109/ICDM.2008.22 <http://dx.doi.org/10.1109/ICDM.2008.22>`_
Args:
features(int): the number of features to train
iterations(int): the number of iterations to train
reg(double): the regularization factor
weight(double): the scaling weight for positive samples (:math:`\\alpha` in [HKV2008]_).
"""
timer = None
def __init__(self, features, *, iterations=20, reg=0.1, weight=40):
self.features = features
self.iterations = iterations
self.reg = reg
self.weight = weight
def fit(self, ratings):
self.timer = util.Stopwatch()
current, uctx, ictx = self._initial_model(ratings)
_logger.info('[%s] training implicit MF model with ALS for %d features',
self.timer, self.features)
_logger.info('have %d observations for %d users and %d items',
uctx.nnz, uctx.nrows, ictx.nrows)
for model in self._train_iters(current, uctx, ictx):
current = model
_logger.info('[%s] finished training model with %d features',
self.timer, self.features)
self.item_index_ = current.items
self.user_index_ = current.users
self.item_features_ = current.item_matrix
self.user_features_ = current.user_matrix
return self
def _train_iters(self, current, uctx, ictx):
"Generator of training iterations."
for epoch in range(self.iterations):
umat = _train_implicit_matrix(uctx.N, current.item_matrix,
self.reg)
_logger.debug('[%s] finished user epoch %d', self.timer, epoch)
imat = _train_implicit_matrix(ictx.N, umat, self.reg)
_logger.debug('[%s] finished item epoch %d', self.timer, epoch)
di = np.linalg.norm(imat - current.item_matrix, 'fro')
du = np.linalg.norm(umat - current.user_matrix, 'fro')
_logger.info('[%s] finished epoch %d (|ΔI|=%.3f, |ΔU|=%.3f)', self.timer, epoch, di, du)
current = current._replace(user_matrix=umat, item_matrix=imat)
yield current
def _initial_model(self, ratings):
"Initialize a model and build contexts."
rmat, users, items = sparse_ratings(ratings)
n_users = len(users)
n_items = len(items)
_logger.debug('setting up contexts')
# force values to exist
if rmat.values is None:
rmat.values = np.ones(rmat.nnz)
rmat.values *= self.weight
trmat = rmat.transpose()
imat = np.random.randn(n_items, self.features) * 0.01
imat = np.square(imat)
umat = np.full((n_users, self.features), np.nan)
return Context(users, items, umat, imat), rmat, trmat
def predict_for_user(self, user, items, ratings=None):
# look up user index
return self.score_by_ids(user, items)
def __str__(self):
return 'als.ImplicitMF(features={}, reg={}, w={})'.\
format(self.features, self.reg, self.weight)
| 11,409 | 35.222222 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/user_knn.py | """
User-based k-NN collaborative filtering.
"""
from sys import intern
import pathlib
import logging
import pandas as pd
import numpy as np
from scipy import stats
from .. import util, matrix
from . import Predictor
_logger = logging.getLogger(__name__)
class UserUser(Predictor):
"""
User-user nearest-neighbor collaborative filtering with ratings. This user-user implementation
is not terribly configurable; it hard-codes design decisions found to work well in the previous
Java-based LensKit code.
Attributes:
user_index_(pandas.Index): User index.
item_index_(pandas.Index): Item index.
user_means_(numpy.ndarray): User mean ratings.
rating_matrix_(matrix.CSR): Normalized user-item rating matrix.
transpose_matrix_(matrix.CSR): Transposed un-normalized rating matrix.
"""
AGG_SUM = intern('sum')
AGG_WA = intern('weighted-average')
def __init__(self, nnbrs, min_nbrs=1, min_sim=0, center=True, aggregate='weighted-average'):
"""
Args:
nnbrs(int):
the maximum number of neighbors for scoring each item (``None`` for unlimited)
min_nbrs(int): the minimum number of neighbors for scoring each item
min_sim(double): minimum similarity threshold for considering a neighbor
center(bool):
whether to normalize (mean-center) rating vectors. Turn this off when working
with unary data and other data types that don't respond well to centering.
aggregate:
the type of aggregation to do. Can be ``weighted-average`` or ``sum``.
"""
self.nnbrs = nnbrs
self.min_nbrs = min_nbrs
self.min_sim = min_sim
self.center = center
self.aggregate = intern(aggregate)
def fit(self, ratings):
"""
"Train" a user-user CF model. This memorizes the rating data in a format that is usable
for future computations.
Args:
ratings(pandas.DataFrame): (user, item, rating) data for collaborative filtering.
Returns:
UUModel: a memorized model for efficient user-based CF computation.
"""
uir, users, items = matrix.sparse_ratings(ratings)
# mean-center ratings
if self.center:
umeans = np.zeros(len(users))
for u in range(uir.nrows):
sp, ep = uir.row_extent(u)
v = uir.values[sp:ep]
umeans[u] = m = v.mean()
uir.values[sp:ep] -= m
else:
umeans = None
# compute centered transpose
iur = uir.transpose()
# L2-normalize ratings
if uir.values is None:
uir.values = np.full(uir.nnz, 1.0)
for u in range(uir.nrows):
sp, ep = uir.row_extent(u)
v = uir.values[sp:ep]
n = np.linalg.norm(v)
uir.values[sp:ep] /= n
mkl = matrix.mkl_ops()
mkl_m = mkl.SparseM.from_csr(uir) if mkl else None
self.rating_matrix_ = uir
self.user_index_ = users
self.user_means_ = umeans
self.item_index_ = items
self.transpose_matrix_ = iur
self._mkl_m_ = mkl_m
return self
def predict_for_user(self, user, items, ratings=None):
"""
Compute predictions for a user and items.
Args:
user: the user ID
items (array-like): the items to predict
ratings (pandas.Series):
the user's ratings (indexed by item id); if provided, will be used to
recompute the user's bias at prediction time.
Returns:
pandas.Series: scores for the items, indexed by item id.
"""
watch = util.Stopwatch()
items = pd.Index(items, name='item')
ratings, umean = self._get_user_data(user, ratings)
if ratings is None:
return pd.Series(index=items)
assert len(ratings) == len(self.item_index_) # ratings is a dense vector
# now ratings is normalized to be a mean-centered unit vector
# this means we can dot product to score neighbors
# score the neighbors!
if self._mkl_m_:
nsims = np.zeros(len(self.user_index_))
nsims = self._mkl_m_.mult_vec(1, ratings, 0, nsims)
else:
rmat = self.rating_matrix_.to_scipy()
nsims = rmat @ ratings
assert len(nsims) == len(self.user_index_)
if user in self.user_index_:
nsims[self.user_index_.get_loc(user)] = 0
_logger.debug('computed user similarities')
results = np.full(len(items), np.nan, dtype=np.float_)
ri_pos = self.item_index_.get_indexer(items.values)
for i in range(len(results)):
ipos = ri_pos[i]
if ipos < 0:
continue
# get the item's users & ratings
i_users = self.transpose_matrix_.row_cs(ipos)
# find and limit the neighbors
i_sims = nsims[i_users]
mask = np.abs(i_sims >= 1.0e-10)
if self.nnbrs is not None and self.nnbrs > 0:
rank = stats.rankdata(-i_sims, 'ordinal')
mask = np.logical_and(mask, rank <= self.nnbrs)
if self.min_sim is not None:
mask = np.logical_and(mask, i_sims >= self.min_sim)
if np.sum(mask) < self.min_nbrs:
continue
# now we have picked weights, take a dot product
ism = i_sims[mask]
if self.aggregate == self.AGG_WA:
i_rates = self.transpose_matrix_.row_vs(ipos)
v = np.dot(i_rates[mask], ism)
v = v / np.sum(ism)
elif self.aggregate == self.AGG_SUM:
v = np.sum(ism)
else:
raise ValueError('invalid aggregate ' + self.aggregate)
results[i] = v + umean
results = pd.Series(results, index=items, name='prediction')
_logger.debug('scored %d of %d items for %s in %s',
results.notna().sum(), len(items), user, watch)
return results
def _get_user_data(self, user, ratings):
"Get a user's data for user-user CF"
rmat = self.rating_matrix_
if ratings is None:
try:
upos = self.user_index_.get_loc(user)
ratings = rmat.row(upos)
umean = self.user_means_[upos] if self.user_means_ is not None else 0
except KeyError:
_logger.warning('user %d has no ratings and none provided', user)
return None, 0
else:
_logger.debug('using provided ratings for user %d', user)
if self.center:
umean = ratings.mean()
ratings = ratings - umean
else:
umean = 0
unorm = np.linalg.norm(ratings)
ratings = ratings / unorm
ratings = ratings.reindex(self.item_index_, fill_value=0).values
return ratings, umean
def __getstate__(self):
state = dict(self.__dict__)
if '_mkl_m_' in state:
del state['_mkl_m_']
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.aggregate = intern(self.aggregate)
mkl = matrix.mkl_ops()
self._mkl_m_ = mkl.SparseM.from_csr(self.rating_matrix_) if mkl else None
def __str__(self):
return 'UserUser(nnbrs={}, min_sim={})'.format(self.nnbrs, self.min_sim)
| 7,625 | 33.506787 | 99 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/hpf.py | import logging
import pandas as pd
from .mf_common import MFPredictor
_logger = logging.getLogger(__name__)
class HPF(MFPredictor):
"""
Hierarchical Poisson factorization, provided by hpfrec_.
.. _hpfrec: https://hpfrec.readthedocs.io/en/latest/
Args:
features(int): the number of features
**kwargs: arguments passed to :py:class:`hpfrec.HPF`.
"""
def __init__(self, features, **kwargs):
self.features = features
self._kwargs = kwargs
def fit(self, ratings):
import hpfrec
users = pd.Index(ratings.user.unique())
items = pd.Index(ratings.item.unique())
hpfdf = pd.DataFrame({
'UserId': users.get_indexer(ratings.user),
'ItemId': items.get_indexer(ratings.item),
'Count': ratings.rating.values.copy()
})
hpf = hpfrec.HPF(self.features, reindex=False, **self._kwargs)
_logger.info('fitting HPF model with %d features', self.features)
hpf.fit(hpfdf)
self.user_index_ = users
self.item_index_ = items
self.user_features_ = hpf.Theta
self.item_features_ = hpf.Beta
return self
def predict_for_user(self, user, items, ratings=None):
# look up user index
return self.score_by_ids(user, items)
| 1,322 | 24.442308 | 73 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/__init__.py | """
LensKit algorithms.
The `lenskit.algorithms` package contains several example algorithms for carrying out recommender
experiments. These algorithm implementations are designed to mimic the characteristics of the
implementations provided by the original LensKit Java package. It also provides abstract base
classes (:py:mod:`abc`) representing different algorithm capabilities.
"""
from abc import ABCMeta, abstractmethod
import inspect
import pandas as pd
import numpy as np
class Algorithm(metaclass=ABCMeta):
"""
Base class for LensKit algorithms. These algorithms follow the SciKit design pattern
for estimators.
"""
@abstractmethod
def fit(self, ratings, *args, **kwargs):
"""
Train a model using the specified ratings (or similar) data.
Args:
ratings(pandas.DataFrame): The ratings data.
args: Additional training data the algorithm may require.
kwargs: Additional training data the algorithm may require.
Returns:
The algorithm object.
"""
raise NotImplementedError()
def get_params(self, deep=True):
"""
Get the parameters for this algorithm (as in scikit-learn). Algorithm parameters
should match constructor argument names.
The default implementation returns all attributes that match a constructor parameter
name. It should be compatible with :py:meth:`scikit.base.BaseEstimator.get_params`
method so that LensKit alogrithms can be cloned with :py:func:`scikit.base.clone`
as well as :py:func:`lenskit.util.clone`.
Returns:
dict: the algorithm parameters.
"""
sig = inspect.signature(self.__class__)
names = list(sig.parameters.keys())
params = {}
for name in names:
if hasattr(self, name):
value = getattr(self, name)
params[name] = value
if deep and hasattr(value, 'get_params'):
sps = value.get_params(deep)
for k, sv in sps.items():
params[name + '__' + k] = sv
return params
class Predictor(Algorithm, metaclass=ABCMeta):
"""
Predicts user ratings of items. Predictions are really estimates of the user's like or
dislike, and the ``Predictor`` interface makes no guarantees about their scale or
granularity.
"""
def predict(self, pairs, ratings=None):
"""
Compute predictions for user-item pairs. This method is designed to be compatible with the
general SciKit paradigm; applications typically want to use :py:meth:`predict_for_user`.
Args:
pairs(pandas.DataFrame): The user-item pairs, as ``user`` and ``item`` columns.
ratings(pandas.DataFrame): user-item rating data to replace memorized data.
Returns:
pandas.Series: The predicted scores for each user-item pair.
"""
if ratings is not None:
raise NotImplementedError()
def upred(df):
user, = df['user'].unique()
items = df['item']
preds = self.predict_for_user(user, items)
preds.name = 'prediction'
res = df.join(preds, on='item', how='left')
return res.prediction
res = pairs.loc[:, ['user', 'item']].groupby('user', sort=False).apply(upred)
res.reset_index(level='user', inplace=True, drop=True)
res.name = 'prediction'
return res.loc[pairs.index.values]
@abstractmethod
def predict_for_user(self, user, items, ratings=None):
"""
Compute predictions for a user and items.
Args:
user: the user ID
items (array-like): the items to predict
ratings (pandas.Series):
the user's ratings (indexed by item id); if provided, they may be used to
override or augment the model's notion of a user's preferences.
Returns:
pandas.Series: scores for the items, indexed by item id.
"""
raise NotImplementedError()
class Recommender(Algorithm, metaclass=ABCMeta):
"""
Recommends lists of items for users.
"""
@abstractmethod
def recommend(self, user, n=None, candidates=None, ratings=None):
"""
Compute recommendations for a user.
Args:
user: the user ID
n(int): the number of recommendations to produce (``None`` for unlimited)
candidates (array-like):
The set of valid candidate items; if ``None``, a default set will be used.
For many algorithms, this is their :py:class:`CandidateSelector`.
ratings (pandas.Series):
the user's ratings (indexed by item id); if provided, they may be used to
override or augment the model's notion of a user's preferences.
Returns:
pandas.DataFrame:
a frame with an ``item`` column; if the recommender also produces scores,
they will be in a ``score`` column.
"""
raise NotImplementedError()
@classmethod
def adapt(cls, algo):
"""
Ensure that an algorithm is a :class:`Recommender`. If it is not a recommender,
it is wrapped in a :class:`lenskit.basic.TopN` with a default candidate selector.
.. note::
Since 0.6.0, since algorithms are fit directly, you should call this method
**before** calling :meth:`Algorithm.fit`, unless you will always be passing
explicit candidate sets to :meth:`recommend`.
Args:
algo(Predictor): the underlying rating predictor.
"""
from .basic import TopN
if isinstance(algo, Recommender):
return algo
else:
return TopN(algo)
class CandidateSelector(Algorithm, metaclass=ABCMeta):
"""
Select candidates for recommendation for a user, possibly with some
additional ratings.
"""
@abstractmethod
def candidates(self, user, ratings=None):
"""
Select candidates for the user.
Args:
user:
The user key or ID.
ratings(pandas.Series or array-like):
Ratings or items to use instead of whatever ratings were memorized
for this user. If a :py:class:`pandas.Series`, the series index
is used; if it is another array-like it is assumed to be an array
of items.
"""
raise NotImplementedError()
@staticmethod
def rated_items(ratings):
"""
Utility function for converting a series or array into an array of item
IDs. Useful in implementations of :py:meth:`candidates`.
"""
if isinstance(ratings, pd.Series):
return ratings.index.values
elif isinstance(ratings, np.ndarray):
return ratings
else:
return np.array(ratings)
| 7,065 | 34.154229 | 99 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/implicit.py | import logging
import inspect
import pandas as pd
from ..matrix import sparse_ratings
from . import Recommender
_logger = logging.getLogger(__name__)
class BaseRec(Recommender):
"""
Base class for Implicit-backed recommenders.
Args:
delegate(implicit.RecommenderBase):
The delegate algorithm.
Attributes:
delegate(implicit.RecommenderBase):
The :py:mod:`implicit` delegate algorithm.
matrix_(scipy.sparse.csr_matrix):
The user-item rating matrix.
user_index_(pandas.Index):
The user index.
item_index_(pandas.Index):
The item index.
"""
def __init__(self, delegate):
self.delegate = delegate
def fit(self, ratings):
matrix, users, items = sparse_ratings(ratings, scipy=True)
iur = matrix.T.tocsr()
_logger.info('training %s on %s matrix (%d nnz)', self.delegate, iur.shape, iur.nnz)
self.delegate.fit(iur)
self.matrix_ = matrix
self.user_index_ = users
self.item_index_ = items
return self
def recommend(self, user, n=None, candidates=None, ratings=None):
try:
uid = self.user_index_.get_loc(user)
except KeyError:
return pd.DataFrame({'item': []})
if candidates is None:
recs = self.delegate.recommend(uid, self.matrix_, N=n)
else:
cands = self.item_index_.get_indexer(candidates)
cands = cands[cands >= 0]
recs = self.delegate.rank_items(uid, self.matrix_, cands)
if n is not None:
recs = recs[:n]
rec_df = pd.DataFrame.from_records(recs, columns=['item_pos', 'score'])
rec_df['item'] = self.item_index_[rec_df.item_pos]
return rec_df.loc[:, ['item', 'score']]
def __getattr__(self, name):
if 'delegate' not in self.__dict__:
raise AttributeError()
dd = self.delegate.__dict__
if name in dd:
return dd[name]
else:
raise AttributeError()
def get_params(self, deep=True):
dd = self.delegate.__dict__
sig = inspect.signature(self.delegate.__class__)
names = list(sig.parameters.keys())
return dict([(k, dd.get(k)) for k in names])
def __str__(self):
return 'Implicit({})'.format(self.delegate)
class ALS(BaseRec):
"""
LensKit interface to :py:mod:`implicit.als`.
"""
def __init__(self, *args, **kwargs):
"""
Construct an ALS recommender. The arguments are passed as-is to
:py:class:`implicit.als.AlternatingLeastSquares`.
"""
from implicit.als import AlternatingLeastSquares
super().__init__(AlternatingLeastSquares(*args, **kwargs))
class BPR(BaseRec):
"""
LensKit interface to :py:mod:`implicit.bpr`.
"""
def __init__(self, *args, **kwargs):
"""
Construct an ALS recommender. The arguments are passed as-is to
:py:class:`implicit.als.BayesianPersonalizedRanking`.
"""
from implicit.bpr import BayesianPersonalizedRanking
super().__init__(BayesianPersonalizedRanking(*args, **kwargs))
| 3,209 | 28.449541 | 92 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/algorithms/basic.py | """
Basic utility algorithms and combiners.
"""
import logging
from collections.abc import Iterable, Sequence
import pandas as pd
import numpy as np
from .. import check
from . import Predictor, Recommender, CandidateSelector
_logger = logging.getLogger(__name__)
class Bias(Predictor):
"""
A user-item bias rating prediction algorithm. This implements the following
predictor algorithm:
.. math::
s(u,i) = \\mu + b_i + b_u
where :math:`\\mu` is the global mean rating, :math:`b_i` is item bias, and
:math:`b_u` is the user bias. With the provided damping values
:math:`\\beta_{\\mathrm{u}}` and :math:`\\beta_{\\mathrm{i}}`, they are computed
as follows:
.. math::
\\begin{align*}
\\mu & = \\frac{\\sum_{r_{ui} \\in R} r_{ui}}{|R|} &
b_i & = \\frac{\\sum_{r_{ui} \\in R_i} (r_{ui} - \\mu)}{|R_i| + \\beta_{\\mathrm{i}}} &
b_u & = \\frac{\\sum_{r_{ui} \\in R_u} (r_{ui} - \\mu - b_i)}{|R_u| + \\beta_{\\mathrm{u}}}
\\end{align*}
The damping values can be interpreted as the number of default (mean) ratings to assume
*a priori* for each user or item, damping low-information users and items towards a mean instead
of permitting them to take on extreme values based on few ratings.
Args:
items: whether to compute item biases
users: whether to compute user biases
damping(number or tuple):
Bayesian damping to apply to computed biases. Either a number, to
damp both user and item biases the same amount, or a (user,item) tuple
providing separate damping values.
Attributes:
mean_(double): The global mean rating.
item_offsets_(pandas.Series): The item offsets (:math:`b_i` values)
user_offsets_(pandas.Series): The item offsets (:math:`b_u` values)
"""
def __init__(self, items=True, users=True, damping=0.0):
self.items = items
self.users = users
if isinstance(damping, tuple):
self.damping = damping
self.user_damping, self.item_damping = damping
else:
self.damping = damping
self.user_damping = damping
self.item_damping = damping
check.check_value(self.user_damping >= 0, "user damping value {} must be nonnegative",
self.user_damping)
check.check_value(self.item_damping >= 0, "item damping value {} must be nonnegative",
self.item_damping)
def fit(self, data):
"""
Train the bias model on some rating data.
Args:
data (DataFrame): a data frame of ratings. Must have at least `user`,
`item`, and `rating` columns.
Returns:
Bias: the fit bias object.
"""
_logger.info('building bias model for %d ratings', len(data))
self.mean_ = data.rating.mean()
_logger.info('global mean: %.3f', self.mean_)
nrates = data.assign(rating=lambda df: df.rating - self.mean_)
if self.items:
group = nrates.groupby('item').rating
self.item_offsets_ = self._mean(group, self.item_damping)
_logger.info('computed means for %d items', len(self.item_offsets_))
else:
self.item_offsets_ = None
if self.users:
if self.item_offsets_ is not None:
nrates = nrates.join(pd.DataFrame(self.item_offsets_), on='item', how='inner',
rsuffix='_im')
nrates = nrates.assign(rating=lambda df: df.rating - df.rating_im)
self.user_offsets_ = self._mean(nrates.groupby('user').rating, self.user_damping)
_logger.info('computed means for %d users', len(self.user_offsets_))
else:
self.user_offsets_ = None
return self
def predict_for_user(self, user, items, ratings=None):
"""
Compute predictions for a user and items. Unknown users and items
are assumed to have zero bias.
Args:
user: the user ID
items (array-like): the items to predict
ratings (pandas.Series): the user's ratings (indexed by item id); if
provided, will be used to recompute the user's
bias at prediction time.
Returns:
pandas.Series: scores for the items, indexed by item id.
"""
idx = pd.Index(items)
preds = pd.Series(self.mean_, idx)
if self.item_offsets_ is not None:
preds = preds + self.item_offsets_.reindex(items, fill_value=0)
if self.users and ratings is not None:
uoff = ratings - self.mean_
if self.item_offsets_ is not None:
uoff = uoff - self.item_offsets_
umean = uoff.mean()
preds = preds + umean
elif self.user_offsets_ is not None:
umean = self.user_offsets_.get(user, 0.0)
_logger.debug('using mean(user %s) = %.3f', user, umean)
preds = preds + umean
return preds
def _mean(self, series, damping):
if damping is not None and damping > 0:
return series.sum() / (series.count() + damping)
else:
return series.mean()
def __str__(self):
return 'Bias(ud={}, id={})'.format(self.user_damping, self.item_damping)
class Popular(Recommender):
"""
Recommend the most popular items.
Args:
selector(CandidateSelector):
The candidate selector to use. If ``None``, uses a new
:class:`UnratedItemCandidateSelector`.
"""
def __init__(self, selector=None):
if selector is None:
self.selector = UnratedItemCandidateSelector()
else:
self.selector = selector
def fit(self, ratings):
pop = ratings.groupby('item').user.count()
pop.name = 'score'
self.item_pop_ = pop
self.selector.fit(ratings)
return self
def recommend(self, user, n=None, candidates=None, ratings=None):
scores = self.item_pop_
if candidates is None:
candidates = self.selector.candidates(user, ratings)
idx = scores.index.get_indexer(candidates)
idx = idx[idx >= 0]
scores = scores.iloc[idx]
if n is None:
return scores.sort_values(ascending=False).reset_index()
else:
return scores.nlargest(n).reset_index()
def __str__(self):
return 'Popular'
class Memorized(Predictor):
"""
The memorized algorithm memorizes socres provided at construction time.
"""
def __init__(self, scores):
"""
Args:
scores(pandas.DataFrame): the scores to memorize.
"""
self.scores = scores
def fit(self, *args, **kwargs):
return self
def predict_for_user(self, user, items, ratings=None):
uscores = self.scores[self.scores.user == user]
urates = uscores.set_index('item').rating
return urates.reindex(items)
class Fallback(Predictor):
"""
The Fallback algorithm predicts with its first component, uses the second to fill in
missing values, and so forth.
"""
def __init__(self, algorithms, *others):
"""
Args:
algorithms: a list of component algorithms. Each one will be trained.
others:
additional algorithms, in which case ``algorithms`` is taken to be
a single algorithm.
"""
if others:
self.algorithms = [algorithms] + list(others)
elif isinstance(algorithms, Iterable) or isinstance(algorithms, Sequence):
self.algorithms = algorithms
else:
self.algorithms = [algorithms]
def fit(self, ratings, *args, **kwargs):
for algo in self.algorithms:
algo.fit(ratings, *args, **kwargs)
return self
def predict_for_user(self, user, items, ratings=None):
remaining = pd.Index(items)
preds = None
for algo in self.algorithms:
_logger.debug('predicting for %d items for user %s', len(remaining), user)
aps = algo.predict_for_user(user, remaining, ratings=ratings)
aps = aps[aps.notna()]
if preds is None:
preds = aps
else:
preds = pd.concat([preds, aps])
remaining = remaining.difference(preds.index)
if len(remaining) == 0:
break
return preds.reindex(items)
def __str__(self):
str_algos = [str(algo) for algo in self.algorithms]
return 'Fallback([{}])'.format(', '.join(str_algos))
class TopN(Recommender):
"""
Basic recommender that implements top-N recommendation using a predictor.
.. note::
This class does not do anything of its own in :meth:`fit`. If its
predictor and candidate selector are both fit, the top-N recommender
does not need to be fit.
Args:
predictor(Predictor):
The underlying predictor.
selector(CandidateSelector):
The candidate selector. If ``None``, uses :class:`UnratedItemCandidateSelector`.
"""
def __init__(self, predictor, selector=None):
self.predictor = predictor
self.selector = selector if selector is not None else UnratedItemCandidateSelector()
def fit(self, ratings, *args, **kwargs):
"""
Fit the recommender.
Args:
ratings(pandas.DataFrame):
The rating or interaction data. Passed changed to the predictor and
candidate selector.
args, kwargs:
Additional arguments for the predictor to use in its training process.
"""
self.predictor.fit(ratings, *args, **kwargs)
self.selector.fit(ratings)
return self
def recommend(self, user, n=None, candidates=None, ratings=None):
if candidates is None:
candidates = self.selector.candidates(user, ratings)
scores = self.predictor.predict_for_user(user, candidates, ratings)
scores = scores[scores.notna()]
scores = scores.sort_values(ascending=False)
if n is not None:
scores = scores.iloc[:n]
scores.name = 'score'
scores.index.name = 'item'
return scores.reset_index()
def __str__(self):
return 'TopN/' + str(self.predictor)
class UnratedItemCandidateSelector(CandidateSelector):
"""
:class:`CandidateSelector` that selects items a user has not rated as
candidates. When this selector is fit, it memorizes the rated items.
Attributes:
items_(pandas.Index): All known items.
user_items_(dict):
Items rated by each known user, as positions in the ``items`` index.
"""
items_ = None
user_items_ = None
def fit(self, ratings):
self.items_ = pd.Index(np.unique(ratings['item']))
uimap = {}
for u, g in ratings.groupby('user'):
uimap[u] = self.items_.get_indexer(np.unique(g['item']))
self.user_items_ = uimap
return self
def candidates(self, user, ratings=None):
if ratings is None:
uis = self.user_items_.get(user, None)
else:
uis = self.items_.get_indexer(self.rated_items(ratings))
uis = uis[uis >= 0]
if uis is not None:
mask = np.full(len(self.items_), True)
mask[uis] = False
return self.items_.values[mask]
else:
return self.items_.values
| 11,732 | 32.144068 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/metrics/topn.py | """
Top-N evaluation metrics.
"""
import numpy as np
def precision(recs, truth):
"""
Compute recommendation precision.
"""
nrecs = len(recs)
if nrecs == 0:
return None
ngood = recs['item'].isin(truth.index).sum()
return ngood / nrecs
def recall(recs, truth):
"""
Compute recommendation recall.
"""
nrel = len(truth)
if nrel == 0:
return None
ngood = recs['item'].isin(truth.index).sum()
return ngood / nrel
def recip_rank(recs, truth):
"""
Compute the reciprocal rank of the first relevant item in a list of recommendations.
If no elements are relevant, the reciprocal rank is 0.
"""
good = recs['item'].isin(truth.index)
npz, = np.nonzero(good)
if len(npz):
return 1.0 / (npz[0] + 1.0)
else:
return 0.0
def _dcg(scores, discount=np.log2):
"""
Compute the Discounted Cumulative Gain of a series of recommended items with rating scores.
These should be relevance scores; they can be :math:`{0,1}` for binary relevance data.
This is not a true top-N metric, but is a utility function for other metrics.
Args:
scores(array-like):
The utility scores of a list of recommendations, in recommendation order.
discount(ufunc):
the rank discount function. Each item's score will be divided the discount of its rank,
if the discount is greater than 1.
Returns:
double: the DCG of the scored items.
"""
scores = np.nan_to_num(scores)
ranks = np.arange(1, len(scores) + 1)
disc = discount(ranks)
np.maximum(disc, 1, out=disc)
np.reciprocal(disc, out=disc)
return np.dot(scores, disc)
def ndcg(recs, truth, discount=np.log2):
"""
Compute the normalized discounted cumulative gain.
Discounted cumultative gain is computed as:
.. math::
\\begin{align*}
\\mathrm{DCG}(L,u) & = \\sum_{i=1}^{|L|} \\frac{r_{ui}}{d(i)}
\\end{align*}
This is then normalized as follows:
.. math::
\\begin{align*}
\\mathrm{nDCG}(L, u) & = \\frac{\\mathrm{DCG}(L,u)}{\\mathrm{DCG}(L_{\\mathrm{ideal}}, u)}
\\end{align*}
Args:
recs: The recommendation list.
truth: The user's test data.
discount(ufunc):
The rank discount function. Each item's score will be divided the discount of its rank,
if the discount is greater than 1.
"""
if 'rating' in truth.columns:
ideal = _dcg(truth.rating.sort_values(ascending=False), discount)
merged = recs[['item']].join(truth[['rating']], on='item', how='left')
achieved = _dcg(merged.rating, discount)
else:
ideal = _dcg(np.ones(len(truth)), discount)
achieved = _dcg(recs.item.isin(truth.index), discount)
return achieved / ideal
| 2,864 | 26.285714 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/metrics/__init__.py | """
Metrics for evaluating recommendations.
"""
| 48 | 11.25 | 39 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/metrics/predict.py | """
Prediction accuracy metrics.
"""
import numpy as np
import pandas as pd
def _check_missing(truth, missing):
"""
Check for missing truth values.
Args:
truth: the series of truth values
missing: what to do with missing values
"""
if missing == 'error' and truth.isna().any():
nmissing = truth.isna().sum()
raise ValueError('missing truth for {} predictions'.format(nmissing))
def rmse(predictions, truth, missing='error'):
"""
Compute RMSE (root mean squared error).
Args:
predictions(pandas.Series): the predictions
truth(pandas.Series): the ground truth ratings from data
missing(string):
how to handle predictions without truth. Can be one of
``'error'`` or ``'ignore'``.
Returns:
double: the root mean squared approximation error
"""
# force into series (basically no-op if already a series)
predictions = pd.Series(predictions)
truth = pd.Series(truth)
# realign
predictions, truth = predictions.align(truth, join='left')
_check_missing(truth, missing)
diff = predictions - truth
sqdiff = diff.apply(np.square)
msq = sqdiff.mean()
return np.sqrt(msq)
def mae(predictions, truth, missing='error'):
"""
Compute MAE (mean absolute error).
Args:
predictions(pandas.Series): the predictions
truth(pandas.Series): the ground truth ratings from data
missing(string):
how to handle predictions without truth. Can be one of
``'error'`` or ``'ignore'``.
Returns:
double: the mean absolute approximation error
"""
# force into series
predictions = pd.Series(predictions)
truth = pd.Series(truth)
predictions, truth = predictions.align(truth, join='left')
_check_missing(truth, missing)
diff = predictions - truth
adiff = diff.apply(np.abs)
return adiff.mean()
| 1,952 | 24.038462 | 77 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/batch/_multi.py | import logging
import pathlib
import collections
import json
from copy import copy
import pandas as pd
from ..algorithms import Predictor
from .. import topn, util
from ._recommend import recommend
from ._predict import predict
try:
import fastparquet
except ImportError:
fastparquet = None
_logger = logging.getLogger(__name__)
_AlgoRec = collections.namedtuple('_AlgoRec', [
'algorithm',
'parallel',
'attributes'
])
_DSRec = collections.namedtuple('_DSRec', [
'dataset',
'candidates',
'attributes'
])
class MultiEval:
"""
A runner for carrying out multiple evaluations, such as parameter sweeps.
Args:
path(str or :py:class:`pathlib.Path`):
the working directory for this evaluation.
It will be created if it does not exist.
predict(bool):
whether to generate rating predictions.
recommend(int):
the number of recommendations to generate per user. Any false-y value (``None``,
``False``, ``0``) will disable top-n. The literal value ``True`` will generate
recommendation lists of unlimited size.
candidates(function):
the default candidate set generator for recommendations. It should take the
training data and return a candidate generator, itself a function mapping user
IDs to candidate sets.
combine(bool):
whether to combine output; if ``False``, output will be left in separate files, if
``True``, it will be in a single set of files (runs, recommendations, and predictions).
"""
def __init__(self, path, predict=True,
recommend=100, candidates=topn.UnratedCandidates,
nprocs=None, combine=True):
self.workdir = pathlib.Path(path)
self.predict = predict
self.recommend = recommend
self.candidate_generator = candidates
self.algorithms = []
self.datasets = []
self.nprocs = nprocs
self.combine_output = combine
self._is_flat = True
@property
def run_csv(self):
return self.workdir / 'runs.csv'
@property
def run_file(self):
return self.workdir / 'runs.parquet'
@property
def preds_file(self):
return self.workdir / 'predictions.parquet'
@property
def recs_file(self):
return self.workdir / 'recommendations.parquet'
def add_algorithms(self, algos, parallel=False, attrs=[], **kwargs):
"""
Add one or more algorithms to the run.
Args:
algos(algorithm or list): the algorithm(s) to add.
parallel(bool):
if ``True``, allow this algorithm to be trained in parallel with others.
attrs(list of str):
a list of attributes to extract from the algorithm objects and include in
the run descriptions.
kwargs:
additional attributes to include in the run descriptions.
"""
if not isinstance(algos, collections.Iterable):
algos = [algos]
for algo in algos:
aa = {'AlgoClass': algo.__class__.__name__, 'AlgoStr': str(algo)}
aa.update(kwargs)
for an in attrs:
aa[an] = getattr(algo, an, None)
self.algorithms.append(_AlgoRec(algo, parallel, aa))
def add_datasets(self, data, name=None, candidates=None, **kwargs):
"""
Add one or more datasets to the run.
Args:
data:
The input data set(s) to run. Can be one of the following:
* A tuple of (train, test) data.
* An iterable of (train, test) pairs, in which case the iterable
is not consumed until it is needed.
* A function yielding either of the above, to defer data load
until it is needed.
Data can be either data frames or paths; paths are loaded after
detection using :py:func:`util.read_df_detect`.
kwargs:
additional attributes pertaining to these data sets.
"""
attrs = {}
if name is not None:
attrs['DataSet'] = name
attrs.update(kwargs)
# special-case lists to keep multis flat
if isinstance(data, list):
for part, e in enumerate(data):
self.add_datasets(e, name, candidates,
Partition=part+1, **kwargs)
return
if not isinstance(data, tuple):
self._is_flat = False
self.datasets.append(_DSRec(data, candidates, attrs))
def persist_data(self):
"""
Persist the data for an experiment, replacing in-memory data sets with file names.
Once this has been called, the sweep can be pickled.
"""
self.workdir.mkdir(parents=True, exist_ok=True)
ds2 = []
for i, (ds, cand_f, ds_attrs) in enumerate(self._flat_datasets()):
train, test = ds
if isinstance(train, pd.DataFrame):
fn = self.workdir / 'ds{}-train.parquet'.format(i+1)
_logger.info('serializing to %s', fn)
train.to_parquet(fn)
train = fn
if isinstance(test, pd.DataFrame):
fn = self.workdir / 'ds{}-test.parquet'.format(i+1)
_logger.info('serializing to %s', fn)
test.to_parquet(fn)
test = fn
ds2.append(((train, test), cand_f, ds_attrs))
self.datasets = ds2
self._is_flat = True
def _normalize_ds_entry(self, entry):
# normalize data set to be an iterable of tuples
ds, cand_f, attrs = entry
if callable(ds):
ds = ds()
if isinstance(ds, tuple):
yield _DSRec(ds, cand_f, attrs)
else:
yield from (_DSRec(dse, cand_f, dict(Partition=part+1, **attrs))
for (part, dse) in enumerate(ds))
def _flat_datasets(self):
for entry in self.datasets:
yield from self._normalize_ds_entry(entry)
def _read_data(self, df):
if isinstance(df, str) or isinstance(df, pathlib.Path):
_logger.info('reading from %s', df)
return util.read_df_detect(df)
else:
return df
def _flat_runs(self):
for dse in self._flat_datasets():
for arec in self.algorithms:
yield (dse, arec)
def run_count(self):
"Get the number of runs in this evaluation."
if self._is_flat:
nds = len(self.datasets)
else:
_logger.warning('attempting to count runs in a non-flattened evaluation')
nds = len(list(self._flat_datasets()))
return nds * len(self.algorithms)
def run(self, runs=None, *, progress=None):
"""
Run the evaluation.
Args:
runs(int or set-like):
If provided, a specific set of runs to run. Useful for splitting
an experiment into individual runs. This is a set of 1-based run
IDs, not 0-based indexes.
progress:
A :py:func:`tqdm.tqdm`-compatible progress function.
"""
if runs is not None and self.combine_output:
raise ValueError('Cannot select runs with combined output')
if runs is not None and not isinstance(runs, collections.Iterable):
runs = [runs]
self.workdir.mkdir(parents=True, exist_ok=True)
run_id = 0
run_data = []
train_load = util.LastMemo(self._read_data)
test_load = util.LastMemo(self._read_data)
iter = self._flat_runs()
if progress is not None:
n = self.run_count() if self._is_flat else None
iter = progress(iter, total=n)
for i, (dsrec, arec) in enumerate(iter):
run_id = i + 1
if runs is not None and run_id not in runs:
_logger.info('skipping deselected run %d', run_id)
continue
ds, cand_f, ds_attrs = dsrec
if cand_f is None:
cand_f = self.candidate_generator
train, test = ds
train = train_load(train)
test = test_load(test)
ds_name = ds_attrs.get('DataSet', None)
ds_part = ds_attrs.get('Partition', None)
cand = cand_f(train)
_logger.info('starting run %d: %s on %s:%s', run_id, arec.algorithm,
ds_name, ds_part)
run = self._run_algo(run_id, arec, (train, test, ds_attrs, cand))
_logger.info('finished run %d: %s on %s:%s', run_id, arec.algorithm,
ds_name, ds_part)
run_data.append(run)
self._write_run(run, run_data)
def _run_algo(self, run_id, arec, data):
train, test, dsp_attrs, cand = data
run = {'RunId': run_id}
run.update(dsp_attrs)
run.update(arec.attributes)
algo, train_time = self._train_algo(arec.algorithm, train)
run['TrainTime'] = train_time
preds, pred_time = self._predict(run_id, algo, test)
run['PredTime'] = pred_time
self._write_results('predictions', preds, run_id)
recs, rec_time = self._recommend(run_id, algo, test, cand)
run['RecTime'] = rec_time
self._write_results('recommendations', recs, run_id)
return run
def _train_algo(self, algo, train):
watch = util.Stopwatch()
_logger.info('training algorithm %s on %d ratings', algo, len(train))
# clone the algorithm in case some cannot be reused
clone = util.clone(algo)
clone.fit(train)
watch.stop()
_logger.info('trained algorithm %s in %s', algo, watch)
return clone, watch.elapsed()
def _predict(self, rid, algo, test):
if not self.predict:
return None, None
if not isinstance(algo, Predictor):
return None, None
watch = util.Stopwatch()
_logger.info('generating %d predictions for %s', len(test), algo)
preds = predict(algo, test, nprocs=self.nprocs)
watch.stop()
_logger.info('generated predictions in %s', watch)
preds['RunId'] = rid
preds = preds[['RunId', 'user', 'item', 'rating', 'prediction']]
return preds, watch.elapsed()
def _recommend(self, rid, algo, test, candidates):
if not self.recommend: # if recommend is any false-y val (0, None, False), turn off recs
return None, None
elif self.recommend is True: # special value True means unlimited
nrecs = None
else: # recommend has rec size
nrecs = self.recommend
watch = util.Stopwatch()
users = test.user.unique()
_logger.info('generating recommendations for %d users for %s', len(users), algo)
recs = recommend(algo, users, nrecs, candidates,
nprocs=self.nprocs)
watch.stop()
_logger.info('generated recommendations in %s', watch)
recs['RunId'] = rid
return recs, watch.elapsed()
def _write_run(self, run, run_data):
if self.combine_output:
run_df = pd.DataFrame(run_data)
# overwrite files to show progress
run_df.to_csv(self.run_csv, index=False)
run_df.to_parquet(self.run_file, compression=None)
else:
rf = self.workdir / 'run-{}.json'.format(run['RunId'])
with rf.open('w') as f:
json.dump(run, f)
def _write_results(self, name, df, run_id):
if df is None:
return
if self.combine_output:
out = self.workdir / '{}.parquet'.format(name)
_logger.info('run %d: writing results to %s', run_id, out)
append = run_id > 1
util.write_parquet(out, df, append=append)
else:
out = self.workdir / '{}-{}.parquet'.format(name, run_id)
_logger.info('run %d: writing results to %s', run_id, out)
df.to_parquet(out)
def collect_results(self):
"""
Collect the results from non-combined runs into combined output files.
"""
oc = self.combine_output
try:
self.combine_output = True
n = self.run_count()
runs = (self._read_json('run-{}.json', i+1) for i in range(n))
runs = pd.DataFrame.from_records(runs)
runs.to_parquet(self.run_file)
runs.to_csv(self.run_csv, index=False)
for i in range(n):
preds = self._read_parquet('predictions-{}.parquet', i+1)
self._write_results('predictions', preds, i+1)
recs = self._read_parquet('recommendations-{}.parquet', i+1)
self._write_results('recommendations', recs, i+1)
finally:
self.combine_output = oc
def _read_parquet(self, name, *args):
fn = self.workdir / name.format(*args)
if not fn.exists():
_logger.warning('file %s does not exist', fn)
return None
return pd.read_parquet(fn)
def _read_json(self, name, *args):
fn = self.workdir / name.format(*args)
if not fn.exists():
_logger.warning('file %s does not exist', fn)
return {}
with fn.open('r') as f:
return json.load(f)
def __getstate__(self):
if not self._is_flat:
_logger.warning('attempting to pickle non-flattened experiment')
state = copy(self.__dict__)
# clone the algorithms to only pickle their parameters
state['algorithms'] = [a._replace(algorithm=util.clone(a.algorithm))
for a in self.algorithms]
return state
def __setstate__(self, state):
self.__dict__.update(state)
| 14,040 | 33.927861 | 99 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/batch/_recommend.py | import logging
import warnings
import multiprocessing as mp
from multiprocessing.pool import Pool
import pandas as pd
import numpy as np
from ..algorithms import Recommender
from .. import util
_logger = logging.getLogger(__name__)
_rec_context = None
class MPRecContext:
def __init__(self, algo, candidates, size):
self.algo = algo
self.candidates = candidates
self.size = size
def __enter__(self):
global _rec_context
_logger.debug('installing context for %s', self.algo)
_rec_context = self
return self
def __exit__(self, *args, **kwargs):
global _rec_context
_logger.debug('uninstalling context for %s', self.algo)
_rec_context = None
def _recommend_user(algo, user, n, candidates):
_logger.debug('generating recommendations for %s', user)
watch = util.Stopwatch()
res = algo.recommend(user, n, candidates)
_logger.debug('%s recommended %d/%d items for %s in %s', algo, len(res), n, user, watch)
res['user'] = user
res['rank'] = np.arange(1, len(res) + 1)
return res
def _recommend_seq(algo, users, n, candidates):
if isinstance(candidates, dict):
candidates = candidates.get
if candidates is None:
candidates = lambda u: None
results = [_recommend_user(algo, user, n, candidates(user))
for user in users]
return results
def _recommend_worker(user):
candidates = _rec_context.candidates(user) if _rec_context.candidates is not None else None
res = _recommend_user(_rec_context.algo, user, _rec_context.size, candidates)
return res.to_msgpack()
def recommend(algo, users, n, candidates=None, *, nprocs=None, **kwargs):
"""
Batch-recommend for multiple users. The provided algorithm should be a
:py:class:`algorithms.Recommender`.
Args:
algo: the algorithm
users(array-like): the users to recommend for
n(int): the number of recommendations to generate (None for unlimited)
candidates:
the users' candidate sets. This can be a function, in which case it will
be passed each user ID; it can also be a dictionary, in which case user
IDs will be looked up in it. Pass ``None`` to use the recommender's
built-in candidate selector (usually recommended).
nprocs(int):
The number of processes to use for parallel recommendations.
Returns:
A frame with at least the columns ``user``, ``rank``, and ``item``; possibly also
``score``, and any other columns returned by the recommender.
"""
rec_algo = Recommender.adapt(algo)
if candidates is None and rec_algo is not algo:
warnings.warn('no candidates provided and algo is not a recommender, unlikely to work')
if 'ratings' in kwargs:
warnings.warn('Providing ratings to recommend is not supported', DeprecationWarning)
if nprocs and nprocs > 1 and mp.get_start_method() == 'fork':
_logger.info('starting recommend process with %d workers', nprocs)
with MPRecContext(rec_algo, candidates, n), Pool(nprocs) as pool:
results = pool.map(_recommend_worker, users)
results = [pd.read_msgpack(r) for r in results]
else:
_logger.info('starting sequential recommend process')
results = _recommend_seq(rec_algo, users, n, candidates)
results = pd.concat(results, ignore_index=True)
return results
| 3,470 | 33.366337 | 95 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/batch/__init__.py | """
Batch-run predictors and recommenders for evaluation.
"""
from ._predict import predict
from ._recommend import recommend
from ._multi import MultiEval
| 157 | 18.75 | 53 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/batch/_predict.py | import logging
import multiprocessing as mp
from multiprocessing.pool import Pool
import pandas as pd
from .. import util
from .. import crossfold
_logger = logging.getLogger(__name__)
_rec_context = None
class MPRecContext:
def __init__(self, algo):
self.algo = algo
def __enter__(self):
global _rec_context
_logger.debug('installing context for %s', self.algo)
_rec_context = self
return self
def __exit__(self, *args, **kwargs):
global _rec_context
_logger.debug('uninstalling context for %s', self.algo)
_rec_context = None
def _predict_user(algo, user, udf):
watch = util.Stopwatch()
res = algo.predict_for_user(user, udf['item'])
res = pd.DataFrame({'user': user, 'item': res.index, 'prediction': res.values})
_logger.debug('%s produced %f/%d predictions for %s in %s',
algo, res.prediction.notna().sum(), len(udf), user, watch)
return res
def _predict_worker(job):
user, udf = job
res = _predict_user(_rec_context.algo, user, udf)
return res.to_msgpack()
def predict(algo, pairs, *, nprocs=None):
"""
Generate predictions for user-item pairs. The provided algorithm should be a
:py:class:`algorithms.Predictor` or a function of two arguments: the user ID and
a list of item IDs. It should return a dictionary or a :py:class:`pandas.Series`
mapping item IDs to predictions.
To use this function, provide a pre-fit algorithm::
>>> from lenskit.algorithms.basic import Bias
>>> from lenskit.metrics.predict import rmse
>>> ratings = util.load_ml_ratings()
>>> bias = Bias()
>>> bias.fit(ratings[:-1000])
<lenskit.algorithms.basic.Bias object at ...>
>>> preds = predict(bias, ratings[-1000:])
>>> preds.head()
user item rating timestamp prediction
99004 664 8361 3.0 1393891425 3.288286
99005 664 8528 3.5 1393891047 3.559119
99006 664 8529 4.0 1393891173 3.573008
99007 664 8636 4.0 1393891175 3.846268
99008 664 8641 4.5 1393890852 3.710635
>>> rmse(preds['prediction'], preds['rating'])
0.8326992222...
Args:
algo(lenskit.algorithms.Predictor):
A rating predictor function or algorithm.
pairs(pandas.DataFrame):
A data frame of (``user``, ``item``) pairs to predict for. If this frame also
contains a ``rating`` column, it will be included in the result.
nprocs(int):
The number of processes to use for parallel batch prediction.
Returns:
pandas.DataFrame:
a frame with columns ``user``, ``item``, and ``prediction`` containing
the prediction results. If ``pairs`` contains a `rating` column, this
result will also contain a `rating` column.
"""
if nprocs and nprocs > 1 and mp.get_start_method() == 'fork':
_logger.info('starting predict process with %d workers', nprocs)
with MPRecContext(algo), Pool(nprocs) as pool:
results = pool.map(_predict_worker, pairs.groupby('user'))
results = [pd.read_msgpack(r) for r in results]
_logger.info('finished predictions')
else:
results = []
for user, udf in pairs.groupby('user'):
res = _predict_user(algo, user, udf)
results.append(res)
results = pd.concat(results)
if 'rating' in pairs:
return pairs.join(results.set_index(['user', 'item']), on=('user', 'item'))
return results
| 3,635 | 34.300971 | 89 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/math/solve.py | """
Efficient solver routines.
"""
import numpy as np
import cffi
import numba as n
from numba.extending import get_cython_function_address
__ffi = cffi.FFI()
__uplo_U = np.array([ord('U')], dtype=np.int8)
__uplo_L = np.array([ord('L')], dtype=np.int8)
__trans_N = np.array([ord('N')], dtype=np.int8)
__trans_T = np.array([ord('T')], dtype=np.int8)
__trans_C = np.array([ord('C')], dtype=np.int8)
__diag_U = np.array([ord('U')], dtype=np.int8)
__diag_N = np.array([ord('N')], dtype=np.int8)
__inc_1 = np.ones(1, dtype=np.int32)
__dtrsv = __ffi.cast("void (*) (char*, char*, char*, int*, double*, int*, double*, int*)",
get_cython_function_address("scipy.linalg.cython_blas", "dtrsv"))
__dposv = __ffi.cast("void (*) (char*, int*, int*, double*, int*, double*, int*, int*)",
get_cython_function_address("scipy.linalg.cython_lapack", "dposv"))
@n.njit(n.void(n.boolean, n.boolean, n.double[:, ::1], n.double[::1]), nogil=True)
def _dtrsv(lower, trans, a, x):
inc1 = __ffi.from_buffer(__inc_1)
# dtrsv uses Fortran-layout arrays. Because we use C-layout arrays, we will
# invert the meaning of 'lower' and 'trans', and the function will work fine.
# We also need to swap index orders
uplo = __uplo_U if lower else __uplo_L
tspec = __trans_N if trans else __trans_T
n_p = np.array([a.shape[0]], dtype=np.intc)
n_p = __ffi.from_buffer(n_p)
lda_p = np.array([a.shape[1]], dtype=np.intc)
lda_p = __ffi.from_buffer(lda_p)
__dtrsv(__ffi.from_buffer(uplo), __ffi.from_buffer(tspec), __ffi.from_buffer(__diag_N),
n_p, __ffi.from_buffer(a), lda_p,
__ffi.from_buffer(x), inc1)
def solve_tri(A, b, transpose=False, lower=True):
"""
Solve the system :math:`Ax = b`, where :math:`A` is triangular.
This is equivalent to :py:func:`scipy.linalg.solve_triangular`, but does *not*
check for non-singularity. It is a thin wrapper around the BLAS ``dtrsv``
function.
Args:
A(ndarray): the matrix.
b(ndarray): the taget vector.
transpose(bool): whether to solve :math:`Ax = b` or :math:`A^T x = b`.
lower(bool): whether :math:`A` is lower- or upper-triangular.
"""
x = b.copy()
_dtrsv(lower, transpose, A, x)
return x
@n.njit(n.intc(n.float64[:, ::1], n.float64[::1], n.boolean), nogil=True)
def _dposv(A, b, lower):
if A.shape[0] != A.shape[1]:
return -11
if A.shape[0] != b.shape[0]:
return -12
# dposv uses Fortran-layout arrays. Because we use C-layout arrays, we will
# invert the meaning of 'lower' and 'trans', and the function will work fine.
# We also need to swap index orders
uplo = __uplo_U if lower else __uplo_L
n_p = __ffi.from_buffer(np.array([A.shape[0]], dtype=np.intc))
nrhs_p = __ffi.from_buffer(np.ones(1, dtype=np.intc))
info = np.zeros(1, dtype=np.intc)
info_p = __ffi.from_buffer(info)
__dposv(__ffi.from_buffer(uplo), n_p, nrhs_p,
__ffi.from_buffer(A), n_p,
__ffi.from_buffer(b), n_p,
info_p)
return info[0]
def dposv(A, b, lower=False):
"""
Interface to the BLAS dposv function. A Numba-accessible verison without
error checking is exposed as :py:func:`_dposv`.
"""
info = _dposv(A, b, lower)
if info < 0:
raise ValueError('invalid args to dposv, code ' + str(info))
elif info > 0:
raise RuntimeError('error in dposv, code ' + str(info))
| 3,490 | 33.564356 | 91 | py |
MachineUnlearningPy | MachineUnlearningPy-master/lenskit/math/__init__.py | """
Mathematical helper routines.
"""
| 38 | 8.75 | 29 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_topn_recall.py | import numpy as np
import pandas as pd
from pytest import approx
from lenskit.topn import recall
def _test_recall(items, rel):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return recall(recs, truth)
def test_recall_empty_zero():
prec = _test_recall([], [1, 3])
assert prec == approx(0)
def test_recall_norel_na():
prec = _test_recall([1, 3], [])
assert prec is None
def test_recall_simple_cases():
prec = _test_recall([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_recall([1], [1, 3])
assert prec == approx(0.5)
prec = _test_recall([1, 2, 3, 4], [1, 3])
assert prec == approx(1.0)
prec = _test_recall([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(2.0 / 3)
prec = _test_recall([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_recall([1, 2, 3, 4], range(4, 9))
assert prec == approx(0.2)
def test_recall_series():
prec = _test_recall(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3]), pd.Series([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 9)))
assert prec == approx(0.2)
def test_recall_series_set():
prec = _test_recall(pd.Series([1, 2, 3, 4]), [1, 3, 5, 7])
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), range(4, 9))
assert prec == approx(0.2)
def test_recall_series_index():
prec = _test_recall(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 9)))
assert prec == approx(0.2)
def test_recall_series_array():
prec = _test_recall(pd.Series([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(pd.Series([1, 2, 3, 4]), np.array([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(pd.Series([1, 2, 3, 4]), np.arange(4, 9, 1, 'u4'))
assert prec == approx(0.2)
def test_recall_array():
prec = _test_recall(np.array([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_recall(np.array([1, 2, 3, 4]), np.array([1, 3, 5, 7]))
assert prec == approx(0.5)
prec = _test_recall(np.array([1, 2, 3, 4]), np.arange(4, 9, 1, 'u4'))
assert prec == approx(0.2)
| 2,502 | 25.347368 | 74 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_batch_sweep.py | import pathlib
import json
import pickle
import pandas as pd
import numpy as np
from lk_test_utils import ml_pandas, norm_path
from lenskit import batch, crossfold as xf
from lenskit.algorithms import Predictor
from lenskit.algorithms.basic import Bias, Popular
from pytest import mark
@mark.slow
@mark.parametrize('ncpus', [None, 2])
def test_sweep_bias(tmp_path, ncpus):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path, nprocs=ncpus)
ratings = ml_pandas.renamed.ratings
folds = xf.partition_users(ratings, 5, xf.SampleN(5))
sweep.add_datasets(folds, DataSet='ml-small')
sweep.add_algorithms([Bias(damping=0), Bias(damping=5), Bias(damping=10)],
attrs=['damping'])
sweep.add_algorithms(Popular())
try:
sweep.run()
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
# 4 algorithms by 5 partitions
assert len(runs) == 20
assert all(np.sort(runs.AlgoClass.unique()) == ['Bias', 'Popular'])
bias_runs = runs[runs.AlgoClass == 'Bias']
assert all(bias_runs.damping.notna())
pop_runs = runs[runs.AlgoClass == 'Popular']
assert all(pop_runs.damping.isna())
preds = pd.read_parquet(work / 'predictions.parquet')
assert all(preds.RunId.isin(bias_runs.RunId))
recs = pd.read_parquet(work / 'recommendations.parquet')
assert all(recs.RunId.isin(runs.RunId))
@mark.slow
def test_sweep_norecs(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path, recommend=None)
ratings = ml_pandas.renamed.ratings
folds = xf.partition_users(ratings, 5, xf.SampleN(5))
sweep.add_datasets(folds, DataSet='ml-small')
sweep.add_algorithms([Bias(damping=0), Bias(damping=5), Bias(damping=10)],
attrs=['damping'])
sweep.add_algorithms(Popular())
try:
sweep.run()
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert not (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
# 4 algorithms by 5 partitions
assert len(runs) == 20
assert all(np.sort(runs.AlgoClass.unique()) == ['Bias', 'Popular'])
bias_runs = runs[runs.AlgoClass == 'Bias']
assert all(bias_runs.damping.notna())
pop_runs = runs[runs.AlgoClass == 'Popular']
assert all(pop_runs.damping.isna())
preds = pd.read_parquet(work / 'predictions.parquet')
assert all(preds.RunId.isin(bias_runs.RunId))
@mark.slow
def test_sweep_allrecs(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path, recommend=True)
ratings = ml_pandas.renamed.ratings
folds = xf.partition_users(ratings, 5, xf.SampleN(5))
sweep.add_datasets(folds, DataSet='ml-small')
sweep.add_algorithms([Bias(damping=0), Bias(damping=5), Bias(damping=10)],
attrs=['damping'])
sweep.add_algorithms(Popular())
try:
sweep.run()
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
# 4 algorithms by 5 partitions
assert len(runs) == 20
assert all(np.sort(runs.AlgoClass.unique()) == ['Bias', 'Popular'])
bias_runs = runs[runs.AlgoClass == 'Bias']
assert all(bias_runs.damping.notna())
pop_runs = runs[runs.AlgoClass == 'Popular']
assert all(pop_runs.damping.isna())
preds = pd.read_parquet(work / 'predictions.parquet')
assert all(preds.RunId.isin(bias_runs.RunId))
recs = pd.read_parquet(work / 'recommendations.parquet')
assert all(recs.RunId.isin(runs.RunId))
@mark.slow
def test_sweep_filenames(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path)
ratings = ml_pandas.renamed.ratings
folds = []
for part, (train, test) in enumerate(xf.partition_users(ratings, 2, xf.SampleN(5))):
trfn = work / 'p{}-train.csv'.format(part)
tefn = work / 'p{}-test.csv'.format(part)
train.to_csv(trfn)
test.to_csv(tefn)
folds.append((trfn, tefn))
sweep.add_datasets(folds, DataSet='ml-small')
sweep.add_algorithms([Bias(damping=0), Bias(damping=5), Bias(damping=10)],
attrs=['damping'])
sweep.add_algorithms(Popular())
def progress(iter, total=None):
assert total == len(folds) * 4
return iter
try:
sweep.run(progress=progress)
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
# 4 algorithms by 2 partitions
assert len(runs) == 8
@mark.slow
def test_sweep_persist(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path)
ratings = ml_pandas.renamed.ratings
sweep.add_datasets(lambda: xf.partition_users(ratings, 5, xf.SampleN(5)), name='ml-small')
sweep.persist_data()
for i in range(1, 6):
assert (work / 'ds{}-train.parquet'.format(i)).exists()
assert (work / 'ds{}-test.parquet'.format(i)).exists()
for ds, cf, dsa in sweep.datasets:
assert isinstance(ds, tuple)
train, test = ds
assert isinstance(train, pathlib.Path)
assert isinstance(test, pathlib.Path)
sweep.add_algorithms([Bias(damping=0), Bias(damping=5), Bias(damping=10)],
attrs=['damping'])
sweep.add_algorithms(Popular())
try:
sweep.run()
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
# 4 algorithms by 5 partitions
assert len(runs) == 20
@mark.slow
def test_sweep_oneshot(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path, combine=False)
ratings = ml_pandas.renamed.ratings
sweep.add_datasets(lambda: xf.partition_users(ratings, 5, xf.SampleN(5)), name='ml-small')
sweep.add_algorithms(Bias(damping=5))
try:
sweep.run(3)
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert not (work / 'runs.csv').exists()
assert not (work / 'runs.parquet').exists()
assert not (work / 'predictions.parquet').exists()
assert not (work / 'recommendations.parquet').exists()
assert (work / 'run-3.json').exists()
assert (work / 'predictions-3.parquet').exists()
assert (work / 'recommendations-3.parquet').exists()
with (work / 'run-3.json').open() as f:
run = json.load(f)
assert run['RunId'] == 3
@mark.slow
def test_sweep_save(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path)
ratings = ml_pandas.renamed.ratings
sweep.add_datasets(lambda: xf.partition_users(ratings, 5, xf.SampleN(5)), name='ml-small')
sweep.add_algorithms(Bias(damping=5))
sweep.persist_data()
pf = work / 'sweep.dat'
with pf.open('wb') as f:
pickle.dump(sweep, f)
with pf.open('rb') as f:
sweep = pickle.load(f)
try:
sweep.run()
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
# 1 algorithms by 5 partitions
assert len(runs) == 5
@mark.slow
def test_sweep_combine(tmp_path):
tmp_path = norm_path(tmp_path)
work = pathlib.Path(tmp_path)
sweep = batch.MultiEval(tmp_path, combine=False)
ratings = ml_pandas.renamed.ratings
sweep.add_datasets(lambda: xf.partition_users(ratings, 5, xf.SampleN(5)), name='ml-small')
sweep.add_algorithms([Bias(damping=0), Bias(damping=5)],
attrs=['damping'])
sweep.add_algorithms(Popular())
sweep.persist_data()
for i in range(1, 6):
assert (work / 'ds{}-train.parquet'.format(i)).exists()
assert (work / 'ds{}-test.parquet'.format(i)).exists()
for ds, cf, dsa in sweep.datasets:
assert isinstance(ds, tuple)
train, test = ds
assert isinstance(train, pathlib.Path)
assert isinstance(test, pathlib.Path)
assert sweep.run_count() == 5 * 3
try:
sweep.run()
finally:
if (work / 'runs.csv').exists():
runs = pd.read_csv(work / 'runs.csv')
print(runs)
assert not (work / 'runs.csv').exists()
assert not (work / 'runs.parquet').exists()
assert not (work / 'predictions.parquet').exists()
assert not (work / 'recommendations.parquet').exists()
for i, (ds, a) in enumerate(sweep._flat_runs()):
run = i + 1
assert (work / 'run-{}.json'.format(run)).exists()
if isinstance(a.algorithm, Predictor):
assert (work / 'predictions-{}.parquet'.format(run)).exists()
assert (work / 'recommendations-{}.parquet'.format(run)).exists()
sweep.collect_results()
assert (work / 'runs.csv').exists()
assert (work / 'runs.parquet').exists()
assert (work / 'predictions.parquet').exists()
assert (work / 'recommendations.parquet').exists()
runs = pd.read_parquet(work / 'runs.parquet')
assert len(runs) == 5 * 3
| 10,792 | 30.651026 | 94 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_check.py | from pytest import raises
import numpy as np
from lenskit.check import check_value, check_dimension
def test_check_value_passes():
check_value(True, "value should be true")
# it should complete successfully!
def test_check_value_fails():
with raises(ValueError):
check_value(False, "value should be true")
def test_check_meaningful_value_fails():
with raises(ValueError):
check_value(5 < 4, "five should be less than four")
def test_check_meaningful_value_succeeds():
check_value(3 < 4, "three should be less than four")
# it should complete successfully
def test_check_dim_len():
check_dimension([], [])
with raises(ValueError):
check_dimension([], [3])
with raises(ValueError):
check_dimension([1], [])
check_dimension(range(10), range(10, 20))
def test_check_dim_shape():
check_dimension(np.arange(5), np.arange(5), d1=0, d2=0)
with raises(ValueError):
check_dimension(np.arange(5), np.arange(6), d1=0, d2=0)
with raises(ValueError):
check_dimension(np.random.randn(8, 10),
np.random.randn(8, 9), d1=1, d2=1)
check_dimension(np.random.randn(8, 10),
np.random.randn(23, 8), d1=0, d2=1)
| 1,251 | 25.638298 | 63 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_funksvd.py | import logging
import pickle
from pathlib import Path
import lenskit.algorithms.funksvd as svd
import pandas as pd
import numpy as np
from pytest import approx, mark
import lk_test_utils as lktu
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_fsvd_basic_build():
algo = svd.FunkSVD(20, iterations=20)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
def test_fsvd_clamp_build():
algo = svd.FunkSVD(20, iterations=20, range=(1, 5))
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
def test_fsvd_predict_basic():
algo = svd.FunkSVD(20, iterations=20)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= 0
assert preds.loc[3] <= 5
def test_fsvd_predict_clamp():
algo = svd.FunkSVD(20, iterations=20, range=(1, 5))
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= 1
assert preds.loc[3] <= 5
def test_fsvd_no_bias():
algo = svd.FunkSVD(20, iterations=20, bias=None)
algo.fit(simple_df)
assert algo.global_bias_ == 0
assert algo.item_bias_ is None
assert algo.user_bias_ is None
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert all(preds.notna())
def test_fsvd_predict_bad_item():
algo = svd.FunkSVD(20, iterations=20)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [4])
assert len(preds) == 1
assert preds.index[0] == 4
assert np.isnan(preds.loc[4])
def test_fsvd_predict_bad_item_clamp():
algo = svd.FunkSVD(20, iterations=20, range=(1, 5))
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [4])
assert len(preds) == 1
assert preds.index[0] == 4
assert np.isnan(preds.loc[4])
def test_fsvd_predict_bad_user():
algo = svd.FunkSVD(20, iterations=20)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert algo.item_features_.shape == (3, 20)
assert algo.user_features_.shape == (3, 20)
preds = algo.predict_for_user(50, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert np.isnan(preds.loc[3])
@lktu.wantjit
@mark.slow
def test_fsvd_save_load():
ratings = lktu.ml_pandas.renamed.ratings
original = svd.FunkSVD(20, iterations=20)
original.fit(ratings)
assert original.global_bias_ == approx(ratings.rating.mean())
assert original.item_features_.shape == (ratings.item.nunique(), 20)
assert original.user_features_.shape == (ratings.user.nunique(), 20)
mod = pickle.dumps(original)
_log.info('serialized to %d bytes', len(mod))
algo = pickle.loads(mod)
assert algo.global_bias_ == original.global_bias_
assert np.all(algo.user_bias_ == original.user_bias_)
assert np.all(algo.item_bias_ == original.item_bias_)
assert np.all(algo.user_features_ == original.user_features_)
assert np.all(algo.item_features_ == original.item_features_)
assert np.all(algo.item_index_ == original.item_index_)
assert np.all(algo.user_index_ == original.user_index_)
@lktu.wantjit
@mark.slow
def test_fsvd_known_preds():
algo = svd.FunkSVD(15, iterations=125, lrate=0.001)
_log.info('training %s on ml data', algo)
algo.fit(lktu.ml_pandas.renamed.ratings)
dir = Path(__file__).parent
pred_file = dir / 'funksvd-preds.csv'
_log.info('reading known predictions from %s', pred_file)
known_preds = pd.read_csv(str(pred_file))
pairs = known_preds.loc[:, ['user', 'item']]
preds = algo.predict(pairs)
known_preds.rename(columns={'prediction': 'expected'}, inplace=True)
merged = known_preds.assign(prediction=preds)
merged['error'] = merged.expected - merged.prediction
assert not any(merged.prediction.isna() & merged.expected.notna())
err = merged.error
err = err[err.notna()]
try:
assert all(err.abs() < 0.01)
except AssertionError as e:
bad = merged[merged.error.notna() & (merged.error.abs() >= 0.01)]
_log.error('erroneous predictions:\n%s', bad)
raise e
@lktu.wantjit
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_fsvd_batch_accuracy():
from lenskit.algorithms import basic
import lenskit.crossfold as xf
from lenskit import batch
import lenskit.metrics.predict as pm
ratings = lktu.ml100k.load_ratings()
svd_algo = svd.FunkSVD(25, 125, damping=10)
algo = basic.Fallback(svd_algo, basic.Bias(damping=10))
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
return batch.predict(algo, test)
folds = xf.partition_users(ratings, 5, xf.SampleFrac(0.2))
preds = pd.concat(eval(train, test) for (train, test) in folds)
mae = pm.mae(preds.prediction, preds.rating)
assert mae == approx(0.74, abs=0.025)
user_rmse = preds.groupby('user').apply(lambda df: pm.rmse(df.prediction, df.rating))
assert user_rmse.mean() == approx(0.92, abs=0.05)
| 6,346 | 29.514423 | 89 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_batch_predict.py | import pytest
import logging
from collections import namedtuple
import pandas as pd
import numpy as np
import lk_test_utils as lktu
from lenskit.algorithms.basic import Bias
import lenskit.batch as lkb
_log = logging.getLogger(__name__)
MLB = namedtuple('MLB', ['ratings', 'algo'])
@pytest.fixture
def mlb():
ratings = lktu.ml_pandas.renamed.ratings
algo = Bias()
algo.fit(ratings)
return MLB(ratings, algo)
def test_predict_single(mlb):
tf = pd.DataFrame({'user': [1], 'item': [31]})
res = lkb.predict(mlb.algo, tf)
assert len(res) == 1
assert all(res.user == 1)
assert set(res.columns) == set(['user', 'item', 'prediction'])
assert all(res.item == 31)
expected = mlb.algo.mean_ + mlb.algo.item_offsets_.loc[31] + mlb.algo.user_offsets_.loc[1]
assert res.prediction.iloc[0] == pytest.approx(expected)
def test_predict_user(mlb):
uid = 5
urates = mlb.ratings[mlb.ratings.user == uid]
test_rated = urates.item.sample(5)
unrated = np.setdiff1d(mlb.ratings.item.unique(), urates.item.values)
test_unrated = np.random.choice(unrated, 10, replace=False)
test_items = pd.concat([test_rated, pd.Series(test_unrated)])
tf = pd.DataFrame({'user': uid, 'item': test_items})
res = lkb.predict(mlb.algo, tf)
assert len(res) == 15
assert set(res.columns) == set(['user', 'item', 'prediction'])
assert all(res.user == uid)
assert set(res.item) == set(test_items)
# did we get the right predictions?
preds = res.set_index(['user', 'item'])
preds['rating'] = mlb.algo.mean_
preds['rating'] += mlb.algo.item_offsets_
preds['rating'] += mlb.algo.user_offsets_.loc[uid]
assert preds.prediction.values == pytest.approx(preds.rating.values)
def test_predict_two_users(mlb):
uids = [5, 10]
tf = None
# make sure we get both UIDs
while tf is None or len(set(tf.user)) < 2:
tf = mlb.ratings[mlb.ratings.user.isin(uids)].loc[:, ('user', 'item')].sample(10)
res = lkb.predict(mlb.algo, tf)
assert len(res) == 10
assert set(res.user) == set(uids)
preds = res.set_index(['user', 'item'])
preds['rating'] = mlb.algo.mean_
preds['rating'] += mlb.algo.item_offsets_
preds['rating'] += mlb.algo.user_offsets_
assert preds.prediction.values == pytest.approx(preds.rating.values)
def test_predict_include_rating(mlb):
uids = [5, 10]
tf = None
# make sure we get both UIDs
while tf is None or len(set(tf.user)) < 2:
tf = mlb.ratings[mlb.ratings.user.isin(uids)].loc[:, ('user', 'item', 'rating')].sample(10)
res = lkb.predict(mlb.algo, tf)
assert len(res) == 10
assert set(res.user) == set(uids)
preds = res.set_index(['user', 'item'])
preds['expected'] = mlb.algo.mean_
preds['expected'] += mlb.algo.item_offsets_
preds['expected'] += mlb.algo.user_offsets_
assert preds.prediction.values == pytest.approx(preds.expected.values)
urv = mlb.ratings.set_index(['user', 'item'])
assert all(preds.rating.values == urv.loc[preds.index, :].rating.values)
@pytest.mark.skipif(not lktu.ml100k.available, reason='ML-100K required')
@pytest.mark.eval
@pytest.mark.parametrize('ncpus', [None, 2])
def test_bias_batch_predict(ncpus):
from lenskit.algorithms import basic
import lenskit.crossfold as xf
from lenskit import batch
import lenskit.metrics.predict as pm
ratings = lktu.ml100k.load_ratings()
algo = basic.Bias(damping=5)
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
recs = batch.predict(algo, test, nprocs=ncpus)
return recs
preds = pd.concat((eval(train, test)
for (train, test)
in xf.partition_users(ratings, 5, xf.SampleFrac(0.2))))
_log.info('analyzing predictions')
rmse = pm.rmse(preds.prediction, preds.rating)
_log.info('RMSE is %f', rmse)
assert rmse == pytest.approx(0.95, abs=0.1)
| 4,039 | 29.37594 | 99 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/conftest.py | import logging
from pytest import fixture
_log = logging.getLogger('lenskit.tests')
@fixture(autouse=True)
def log_test(request):
_log.info('running test %s:%s', request.module.__name__, request.function.__name__)
def pytest_collection_modifyitems(items):
# add 'slow' to all 'eval' tests
for item in items:
evm = item.get_closest_marker('eval')
slm = item.get_closest_marker('slow')
if evm is not None and slm is None:
_log.debug('adding slow mark to %s', item)
item.add_marker('slow')
| 552 | 26.65 | 87 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_matrix_csr.py | import pickle
import numpy as np
import scipy.sparse as sps
import lenskit.matrix as lm
import lk_test_utils as lktu
from pytest import mark, approx, raises
@mark.parametrize('copy', [True, False])
def test_csr_from_sps(copy):
# initialize sparse matrix
mat = np.random.randn(10, 5)
mat[mat <= 0] = 0
smat = sps.csr_matrix(mat)
# make sure it's sparse
assert smat.nnz == np.sum(mat > 0)
csr = lm.CSR.from_scipy(smat, copy=copy)
assert csr.nnz == smat.nnz
assert csr.nrows == smat.shape[0]
assert csr.ncols == smat.shape[1]
assert all(csr.rowptrs == smat.indptr)
assert all(csr.colinds == smat.indices)
assert all(csr.values == smat.data)
assert isinstance(csr.rowptrs, np.ndarray)
assert isinstance(csr.colinds, np.ndarray)
assert isinstance(csr.values, np.ndarray)
def test_csr_is_numpy_compatible():
# initialize sparse matrix
mat = np.random.randn(10, 5)
mat[mat <= 0] = 0
smat = sps.csr_matrix(mat)
# make sure it's sparse
assert smat.nnz == np.sum(mat > 0)
csr = lm.CSR.from_scipy(smat)
d2 = csr.values * 10
assert d2 == approx(smat.data * 10)
def test_csr_from_coo():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
assert csr.nrows == 4
assert csr.ncols == 3
assert csr.nnz == 4
assert csr.values == approx(vals)
def test_csr_rowinds():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
ris = csr.rowinds()
assert all(ris == rows)
def test_csr_set_values():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
v2 = np.random.randn(4)
csr.values = v2
assert all(csr.values == v2)
def test_csr_set_values_oversize():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
v2 = np.random.randn(6)
csr.values = v2
assert all(csr.values == v2[:4])
def test_csr_set_values_undersize():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
v2 = np.random.randn(3)
with raises(ValueError):
csr.values = v2
assert all(csr.values == vals)
def test_csr_set_values_none():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
csr.values = None
assert csr.values is None
assert all(csr.row(0) == [0, 1, 1])
assert all(csr.row(1) == [1, 0, 0])
assert all(csr.row(3) == [0, 1, 0])
def test_csr_str():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
assert '4x3' in str(csr)
def test_csr_row():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_) + 1
csr = lm.CSR.from_coo(rows, cols, vals)
assert all(csr.row(0) == np.array([0, 1, 2], dtype=np.float_))
assert all(csr.row(1) == np.array([3, 0, 0], dtype=np.float_))
assert all(csr.row(2) == np.array([0, 0, 0], dtype=np.float_))
assert all(csr.row(3) == np.array([0, 4, 0], dtype=np.float_))
def test_csr_sparse_row():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
assert all(csr.row_cs(0) == np.array([1, 2], dtype=np.int32))
assert all(csr.row_cs(1) == np.array([0], dtype=np.int32))
assert all(csr.row_cs(2) == np.array([], dtype=np.int32))
assert all(csr.row_cs(3) == np.array([1], dtype=np.int32))
assert all(csr.row_vs(0) == np.array([0, 1], dtype=np.float_))
assert all(csr.row_vs(1) == np.array([2], dtype=np.float_))
assert all(csr.row_vs(2) == np.array([], dtype=np.float_))
assert all(csr.row_vs(3) == np.array([3], dtype=np.float_))
def test_csr_transpose():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
csc = csr.transpose()
assert csc.nrows == csr.ncols
assert csc.ncols == csr.nrows
assert all(csc.rowptrs == [0, 1, 3, 4])
assert csc.colinds.max() == 3
assert csc.values.sum() == approx(vals.sum())
for r, c, v in zip(rows, cols, vals):
row = csc.row(c)
assert row[r] == v
def test_csr_transpose_coords():
rows = np.array([0, 0, 1, 3], dtype=np.int32)
cols = np.array([1, 2, 0, 1], dtype=np.int32)
vals = np.arange(4, dtype=np.float_)
csr = lm.CSR.from_coo(rows, cols, vals)
csc = csr.transpose(False)
assert csc.nrows == csr.ncols
assert csc.ncols == csr.nrows
assert all(csc.rowptrs == [0, 1, 3, 4])
assert csc.colinds.max() == 3
assert csc.values is None
for r, c, v in zip(rows, cols, vals):
row = csc.row(c)
assert row[r] == 1
def test_csr_row_nnzs():
# initialize sparse matrix
mat = np.random.randn(10, 5)
mat[mat <= 0] = 0
smat = sps.csr_matrix(mat)
# make sure it's sparse
assert smat.nnz == np.sum(mat > 0)
csr = lm.CSR.from_scipy(smat)
nnzs = csr.row_nnzs()
assert nnzs.sum() == csr.nnz
for i in range(10):
row = mat[i, :]
assert nnzs[i] == np.sum(row > 0)
def test_csr_from_coo_rand():
for i in range(100):
coords = np.random.choice(np.arange(50 * 100, dtype=np.int32), 1000, False)
rows = np.mod(coords, 100, dtype=np.int32)
cols = np.floor_divide(coords, 100, dtype=np.int32)
vals = np.random.randn(1000)
csr = lm.CSR.from_coo(rows, cols, vals, (100, 50))
rowinds = csr.rowinds()
assert csr.nrows == 100
assert csr.ncols == 50
assert csr.nnz == 1000
for i in range(100):
sp = csr.rowptrs[i]
ep = csr.rowptrs[i+1]
assert ep - sp == np.sum(rows == i)
points, = np.nonzero(rows == i)
assert len(points) == ep - sp
po = np.argsort(cols[points])
points = points[po]
assert all(np.sort(csr.colinds[sp:ep]) == cols[points])
assert all(np.sort(csr.row_cs(i)) == cols[points])
assert all(csr.values[np.argsort(csr.colinds[sp:ep]) + sp] == vals[points])
assert all(rowinds[sp:ep] == i)
row = np.zeros(50)
row[cols[points]] = vals[points]
assert np.sum(csr.row(i)) == approx(np.sum(vals[points]))
assert all(csr.row(i) == row)
def test_csr_from_coo_novals():
for i in range(50):
coords = np.random.choice(np.arange(50 * 100, dtype=np.int32), 1000, False)
rows = np.mod(coords, 100, dtype=np.int32)
cols = np.floor_divide(coords, 100, dtype=np.int32)
csr = lm.CSR.from_coo(rows, cols, None, (100, 50))
assert csr.nrows == 100
assert csr.ncols == 50
assert csr.nnz == 1000
for i in range(100):
sp = csr.rowptrs[i]
ep = csr.rowptrs[i+1]
assert ep - sp == np.sum(rows == i)
points, = np.nonzero(rows == i)
po = np.argsort(cols[points])
points = points[po]
assert all(np.sort(csr.colinds[sp:ep]) == cols[points])
assert np.sum(csr.row(i)) == len(points)
def test_csr_to_sps():
# initialize sparse matrix
mat = np.random.randn(10, 5)
mat[mat <= 0] = 0
# get COO
smat = sps.coo_matrix(mat)
# make sure it's sparse
assert smat.nnz == np.sum(mat > 0)
csr = lm.CSR.from_coo(smat.row, smat.col, smat.data, shape=smat.shape)
assert csr.nnz == smat.nnz
assert csr.nrows == smat.shape[0]
assert csr.ncols == smat.shape[1]
smat2 = csr.to_scipy()
assert sps.isspmatrix(smat2)
assert sps.isspmatrix_csr(smat2)
for i in range(csr.nrows):
assert smat2.indptr[i] == csr.rowptrs[i]
assert smat2.indptr[i+1] == csr.rowptrs[i+1]
sp = smat2.indptr[i]
ep = smat2.indptr[i+1]
assert all(smat2.indices[sp:ep] == csr.colinds[sp:ep])
assert all(smat2.data[sp:ep] == csr.values[sp:ep])
@mark.parametrize("values", [True, False])
def test_csr_pickle(values):
coords = np.random.choice(np.arange(50 * 100, dtype=np.int32), 1000, False)
rows = np.mod(coords, 100, dtype=np.int32)
cols = np.floor_divide(coords, 100, dtype=np.int32)
if values:
vals = np.random.randn(1000)
else:
vals = None
csr = lm.CSR.from_coo(rows, cols, vals, (100, 50))
assert csr.nrows == 100
assert csr.ncols == 50
assert csr.nnz == 1000
data = pickle.dumps(csr)
csr2 = pickle.loads(data)
assert csr2.nrows == csr.nrows
assert csr2.ncols == csr.ncols
assert csr2.nnz == csr.nnz
assert all(csr2.rowptrs == csr.rowptrs)
assert all(csr2.colinds == csr.colinds)
if values:
assert all(csr2.values == csr.values)
else:
assert csr2.values is None
| 9,667 | 28.747692 | 87 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_als_explicit.py | import logging
import pickle
from lenskit.algorithms import als
import pandas as pd
import numpy as np
from pytest import approx, mark
import lk_test_utils as lktu
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_als_basic_build():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
assert set(algo.user_index_) == set([10, 12, 13])
assert set(algo.item_index_) == set([1, 2, 3])
assert algo.user_features_.shape == (3, 20)
assert algo.item_features_.shape == (3, 20)
assert algo.n_features == 20
assert algo.n_users == 3
assert algo.n_items == 3
def test_als_no_bias():
algo = als.BiasedMF(20, iterations=10, bias=None)
algo.fit(simple_df)
assert algo.bias is None
assert algo.global_bias_ == 0
assert algo.item_bias_ is None
assert algo.user_bias_ is None
assert set(algo.user_index_) == set([10, 12, 13])
assert set(algo.item_index_) == set([1, 2, 3])
assert algo.user_features_.shape == (3, 20)
assert algo.item_features_.shape == (3, 20)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
def test_als_predict_basic():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= -0.1
assert preds.loc[3] <= 5.1
def test_als_predict_bad_item():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
preds = algo.predict_for_user(10, [4])
assert len(preds) == 1
assert preds.index[0] == 4
assert np.isnan(preds.loc[4])
def test_als_predict_bad_user():
algo = als.BiasedMF(20, iterations=10)
algo.fit(simple_df)
assert algo.global_bias_ == approx(simple_df.rating.mean())
preds = algo.predict_for_user(50, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert np.isnan(preds.loc[3])
@lktu.wantjit
@mark.slow
def test_als_train_large():
algo = als.BiasedMF(20, iterations=10)
ratings = lktu.ml_pandas.renamed.ratings
algo.fit(ratings)
assert algo.global_bias_ == approx(ratings.rating.mean())
assert algo.n_features == 20
assert algo.n_items == ratings.item.nunique()
assert algo.n_users == ratings.user.nunique()
icounts = ratings.groupby('item').rating.count()
isums = ratings.groupby('item').rating.sum()
is2 = isums - icounts * ratings.rating.mean()
imeans = is2 / (icounts + 5)
ibias = pd.Series(algo.item_bias_, index=algo.item_index_)
imeans, ibias = imeans.align(ibias)
assert ibias.values == approx(imeans.values)
# don't use wantjit, use this to do a non-JIT test
def test_als_save_load():
original = als.BiasedMF(20, iterations=5)
ratings = lktu.ml_pandas.renamed.ratings
original.fit(ratings)
assert original.global_bias_ == approx(ratings.rating.mean())
mod = pickle.dumps(original)
_log.info('serialized to %d bytes', len(mod))
algo = pickle.loads(mod)
assert algo.global_bias_ == original.global_bias_
assert np.all(algo.user_bias_ == original.user_bias_)
assert np.all(algo.item_bias_ == original.item_bias_)
assert np.all(algo.user_features_ == original.user_features_)
assert np.all(algo.item_features_ == original.item_features_)
assert np.all(algo.item_index_ == original.item_index_)
assert np.all(algo.user_index_ == original.user_index_)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_als_batch_accuracy():
from lenskit.algorithms import basic
import lenskit.crossfold as xf
import lenskit.metrics.predict as pm
ratings = lktu.ml100k.load_ratings()
svd_algo = als.BiasedMF(25, iterations=20, damping=5)
algo = basic.Fallback(svd_algo, basic.Bias(damping=5))
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
return test.assign(prediction=algo.predict(test))
folds = xf.partition_users(ratings, 5, xf.SampleFrac(0.2))
preds = pd.concat(eval(train, test) for (train, test) in folds)
mae = pm.mae(preds.prediction, preds.rating)
assert mae == approx(0.73, abs=0.025)
user_rmse = preds.groupby('user').apply(lambda df: pm.rmse(df.prediction, df.rating))
assert user_rmse.mean() == approx(0.91, abs=0.05)
| 4,743 | 29.216561 | 89 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_util.py | import time
import re
import pathlib
import numpy as np
import pandas as pd
from lenskit import util as lku
def test_stopwatch_instant():
w = lku.Stopwatch()
assert w.elapsed() > 0
def test_stopwatch_sleep():
w = lku.Stopwatch()
time.sleep(0.5)
assert w.elapsed() >= 0.45
def test_stopwatch_stop():
w = lku.Stopwatch()
time.sleep(0.5)
w.stop()
time.sleep(0.5)
assert w.elapsed() >= 0.45
def test_stopwatch_str():
w = lku.Stopwatch()
time.sleep(0.5)
s = str(w)
assert s.endswith('ms')
def test_stopwatch_long_str():
w = lku.Stopwatch()
time.sleep(1.2)
s = str(w)
assert s.endswith('s')
def test_stopwatch_minutes():
w = lku.Stopwatch()
w.stop()
w.start_time = w.stop_time - 62
s = str(w)
p = re.compile(r'1m2.\d\ds')
assert p.match(s)
def test_stopwatch_hours():
w = lku.Stopwatch()
w.stop()
w.start_time = w.stop_time - 3663
s = str(w)
p = re.compile(r'1h1m3.\d\ds')
assert p.match(s)
def test_accum_init_empty():
values = np.empty(0)
acc = lku.Accumulator(values, 10)
assert acc is not None
assert acc.size == 0
assert acc.peek() < 0
assert acc.remove() < 0
assert len(acc.top_keys()) == 0
def test_accum_add_get():
values = np.array([1.5])
acc = lku.Accumulator(values, 10)
assert acc is not None
assert acc.size == 0
assert acc.peek() < 0
assert acc.remove() < 0
acc.add(0)
assert acc.size == 1
assert acc.peek() == 0
assert acc.remove() == 0
assert acc.size == 0
assert acc.peek() == -1
def test_accum_add_a_few():
values = np.array([1.5, 2, -1])
acc = lku.Accumulator(values, 10)
assert acc is not None
assert acc.size == 0
acc.add(1)
acc.add(0)
acc.add(2)
assert acc.size == 3
assert acc.peek() == 2
assert acc.remove() == 2
assert acc.size == 2
assert acc.remove() == 0
assert acc.remove() == 1
assert acc.size == 0
def test_accum_add_a_few_lim():
values = np.array([1.5, 2, -1])
acc = lku.Accumulator(values, 2)
assert acc is not None
assert acc.size == 0
acc.add(1)
acc.add(0)
acc.add(2)
assert acc.size == 2
assert acc.remove() == 0
assert acc.size == 1
assert acc.remove() == 1
assert acc.size == 0
def test_accum_add_more_lim():
for run in range(10):
values = np.random.randn(100)
acc = lku.Accumulator(values, 10)
order = np.arange(len(values), dtype=np.int_)
np.random.shuffle(order)
for i in order:
acc.add(i)
assert acc.size <= 10
topn = []
# start with the smallest remaining one, grab!
while acc.size > 0:
topn.append(acc.remove())
topn = np.array(topn)
xs = np.argsort(values)
assert all(topn == xs[-10:])
def test_accum_top_indices():
for run in range(10):
values = np.random.randn(100)
acc = lku.Accumulator(values, 10)
order = np.arange(len(values), dtype=np.int_)
np.random.shuffle(order)
for i in order:
acc.add(i)
assert acc.size <= 10
topn = acc.top_keys()
xs = np.argsort(values)
# should be top N values in decreasing order
assert all(topn == np.flip(xs[-10:], axis=0))
def test_last_memo():
history = []
def func(foo):
history.append(foo)
cache = lku.LastMemo(func)
cache("foo")
assert len(history) == 1
# string literals are interned
cache("foo")
assert len(history) == 1
cache("bar")
assert len(history) == 2
def test_fspath():
path = pathlib.Path('lenskit')
fn = lku.fspath(path)
assert fn == 'lenskit'
def test_write_parquet(tmp_path):
assert tmp_path.exists()
fn = tmp_path / 'out.parquet'
frame = pd.DataFrame({'n': np.arange(10), 'x': np.random.randn(10) + 5})
lku.write_parquet(fn, frame)
f2 = pd.read_parquet(fn)
assert all(f2.n == frame.n)
assert all(f2.x == frame.x)
def test_append_parquet(tmp_path):
fn = tmp_path / 'out.parquet'
frame = pd.DataFrame({'n': np.arange(10), 'x': np.random.randn(10) + 5})
lku.write_parquet(fn, frame.iloc[:5], True)
lku.write_parquet(fn, frame.iloc[5:], True)
f2 = pd.read_parquet(fn)
assert all(f2.n == frame.n)
assert all(f2.x == frame.x)
def test_read_ml():
ratings = lku.load_ml_ratings()
assert len(ratings) > 100000
assert set(ratings.columns) == set(['user', 'item', 'rating', 'timestamp'])
| 4,581 | 20.613208 | 79 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_implicit.py | import logging
import pickle
import pandas as pd
import numpy as np
from pytest import mark
try:
import implicit
have_implicit = True
except ImportError:
have_implicit = False
import lk_test_utils as lktu
from lenskit.algorithms.implicit import ALS, BPR
from lenskit import util
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
@mark.slow
@mark.skipif(not have_implicit, reason='implicit not installed')
def test_implicit_als_train_rec():
algo = ALS(25)
assert algo.factors == 25
ratings = lktu.ml_pandas.renamed.ratings
ret = algo.fit(ratings)
assert ret is algo
recs = algo.recommend(100, n=20)
assert len(recs) == 20
_log.info('serializing implicit model')
mod = pickle.dumps(algo)
_log.info('serialized to %d bytes')
a2 = pickle.loads(mod)
r2 = a2.recommend(100, n=20)
assert len(r2) == 20
assert all(r2 == recs)
@mark.slow
@mark.eval
@mark.skipif(not have_implicit, reason='implicit not installed')
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_implicit_als_batch_accuracy():
import lenskit.crossfold as xf
from lenskit import batch, topn
ratings = lktu.ml100k.load_ratings()
algo_t = ALS(25)
def eval(train, test):
_log.info('running training')
train['rating'] = train.rating.astype(np.float_)
algo = util.clone(algo_t)
algo.fit(train)
users = test.user.unique()
_log.info('testing %d users', len(users))
candidates = topn.UnratedCandidates(train)
recs = batch.recommend(algo, users, 100, candidates)
return recs
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
test = pd.concat(f.test for f in folds)
recs = pd.concat(eval(train, test) for (train, test) in folds)
_log.info('analyzing recommendations')
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, test)
dcg = results.ndcg
_log.info('nDCG for %d users is %.4f', len(dcg), dcg.mean())
assert dcg.mean() > 0
@mark.slow
@mark.skipif(not have_implicit, reason='implicit not installed')
def test_implicit_bpr_train_rec():
algo = BPR(25)
assert algo.factors == 25
ratings = lktu.ml_pandas.renamed.ratings
algo.fit(ratings)
recs = algo.recommend(100, n=20)
assert len(recs) == 20
_log.info('serializing implicit model')
mod = pickle.dumps(algo)
_log.info('serialized to %d bytes')
a2 = pickle.loads(mod)
r2 = a2.recommend(100, n=20)
assert len(r2) == 20
assert all(r2 == recs)
@mark.skipif(not have_implicit, reason='implicit not installed')
def test_implicit_pickle_untrained(tmp_path):
mf = tmp_path / 'bpr.dat'
algo = BPR(25)
with mf.open('wb') as f:
pickle.dump(algo, f)
with mf.open('rb') as f:
a2 = pickle.load(f)
assert a2 is not algo
assert a2.factors == 25
| 3,075 | 24.421488 | 73 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_basic.py | from lenskit.algorithms import basic
from lenskit import util as lku
import pandas as pd
import numpy as np
import pickle
import lk_test_utils as lktu
from pytest import approx
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_memorized():
algo = basic.Memorized(simple_df)
preds = algo.predict_for_user(10, [1, 2])
assert set(preds.index) == set([1, 2])
assert all(preds == pd.Series({1: 4.0, 2: 5.0}))
preds = algo.predict_for_user(12, [1, 3])
assert set(preds.index) == set([1, 3])
assert preds.loc[1] == 3.0
assert np.isnan(preds.loc[3])
def test_memorized_batch():
algo = basic.Memorized(simple_df)
preds = algo.predict(pd.DataFrame({'user': [10, 10, 12], 'item': [1, 2, 1]}))
assert isinstance(preds, pd.Series)
assert preds.name == 'prediction'
assert set(preds.index) == set([0, 1, 2])
assert all(preds == [4.0, 5.0, 3.0])
def test_memorized_batch_ord():
algo = basic.Memorized(simple_df)
preds = algo.predict(pd.DataFrame({'user': [10, 12, 10], 'item': [1, 1, 2]}))
assert set(preds.index) == set([0, 1, 2])
assert all(preds == [4.0, 3.0, 5.0])
def test_memorized_batch_missing():
algo = basic.Memorized(simple_df)
preds = algo.predict(pd.DataFrame({'user': [10, 12, 12], 'item': [1, 1, 3]}))
assert set(preds.index) == set([0, 1, 2])
assert all(preds.iloc[:2] == [4.0, 3.0])
assert np.isnan(preds.iloc[2])
def test_memorized_batch_keep_index():
algo = basic.Memorized(simple_df)
query = pd.DataFrame({'user': [10, 10, 12], 'item': [1, 2, 1]},
index=np.random.choice(np.arange(10), 3, False))
preds = algo.predict(query)
assert all(preds.index == query.index)
assert all(preds == [4.0, 5.0, 3.0])
def test_fallback_train_one():
algo = basic.Fallback(basic.Bias())
algo.fit(lktu.ml_pandas.renamed.ratings)
assert len(algo.algorithms) == 1
assert isinstance(algo.algorithms[0], basic.Bias)
assert algo.algorithms[0].mean_ == approx(lktu.ml_pandas.ratings.rating.mean())
def test_fallback_train_one_pred_impossible():
algo = basic.Fallback(basic.Memorized(simple_df))
algo.fit(lktu.ml_pandas.renamed.ratings)
preds = algo.predict_for_user(10, [1, 2])
assert set(preds.index) == set([1, 2])
assert all(preds == pd.Series({1: 4.0, 2: 5.0}))
preds = algo.predict_for_user(12, [1, 3])
assert set(preds.index) == set([1, 3])
assert preds.loc[1] == 3.0
assert np.isnan(preds.loc[3])
def test_fallback_list():
algo = basic.Fallback([basic.Memorized(simple_df), basic.Bias()])
algo.fit(lktu.ml_pandas.renamed.ratings)
assert len(algo.algorithms) == 2
params = algo.get_params()
assert list(params.keys()) == ['algorithms']
assert len(params['algorithms']) == 2
assert isinstance(params['algorithms'][0], basic.Memorized)
assert isinstance(params['algorithms'][1], basic.Bias)
def test_fallback_string():
algo = basic.Fallback([basic.Memorized(simple_df), basic.Bias()])
assert 'Fallback' in str(algo)
def test_fallback_clone():
algo = basic.Fallback([basic.Memorized(simple_df), basic.Bias()])
algo.fit(lktu.ml_pandas.renamed.ratings)
assert len(algo.algorithms) == 2
clone = lku.clone(algo)
assert clone is not algo
for a1, a2 in zip(algo.algorithms, clone.algorithms):
assert a1 is not a2
assert type(a2) == type(a1)
def test_fallback_predict():
algo = basic.Fallback(basic.Memorized(simple_df), basic.Bias())
algo.fit(lktu.ml_pandas.renamed.ratings)
assert len(algo.algorithms) == 2
bias = algo.algorithms[1]
assert isinstance(bias, basic.Bias)
assert bias.mean_ == approx(lktu.ml_pandas.ratings.rating.mean())
def exp_val(user, item):
v = bias.mean_
if user is not None:
v += bias.user_offsets_.loc[user]
if item is not None:
v += bias.item_offsets_.loc[item]
return v
# first user + item
preds = algo.predict_for_user(10, [1])
assert preds.loc[1] == 4.0
# second user + first item
preds = algo.predict_for_user(15, [1])
assert preds.loc[1] == approx(exp_val(15, 1))
# second item + user item
preds = algo.predict_for_user(12, [2])
assert preds.loc[2] == approx(exp_val(12, 2))
# blended
preds = algo.predict_for_user(10, [1, 5])
assert preds.loc[1] == 4.0
assert preds.loc[5] == approx(exp_val(10, 5))
# blended unknown
preds = algo.predict_for_user(10, [5, 1, -23081])
assert len(preds) == 3
assert preds.loc[1] == 4.0
assert preds.loc[5] == approx(exp_val(10, 5))
assert preds.loc[-23081] == approx(exp_val(10, None))
def test_fallback_save_load(tmp_path):
original = basic.Fallback(basic.Memorized(simple_df), basic.Bias())
original.fit(lktu.ml_pandas.renamed.ratings)
fn = tmp_path / 'fb.mod'
with fn.open('wb') as f:
pickle.dump(original, f)
with fn.open('rb') as f:
algo = pickle.load(f)
bias = algo.algorithms[1]
assert bias.mean_ == approx(lktu.ml_pandas.ratings.rating.mean())
def exp_val(user, item):
v = bias.mean_
if user is not None:
v += bias.user_offsets_.loc[user]
if item is not None:
v += bias.item_offsets_.loc[item]
return v
# first user + item
preds = algo.predict_for_user(10, [1])
assert preds.loc[1] == 4.0
# second user + first item
preds = algo.predict_for_user(15, [1])
assert preds.loc[1] == approx(exp_val(15, 1))
# second item + user item
preds = algo.predict_for_user(12, [2])
assert preds.loc[2] == approx(exp_val(12, 2))
# blended
preds = algo.predict_for_user(10, [1, 5])
assert preds.loc[1] == 4.0
assert preds.loc[5] == approx(exp_val(10, 5))
# blended unknown
preds = algo.predict_for_user(10, [5, 1, -23081])
assert len(preds) == 3
assert preds.loc[1] == 4.0
assert preds.loc[5] == approx(exp_val(10, 5))
assert preds.loc[-23081] == approx(exp_val(10, None))
def test_topn_recommend():
pred = basic.Memorized(simple_df)
rec = basic.TopN(pred)
rec.fit(simple_df)
rec10 = rec.recommend(10, candidates=[1, 2])
assert all(rec10.item == [2, 1])
assert all(rec10.score == [5, 4])
rec2 = rec.recommend(12, candidates=[1, 2])
assert len(rec2) == 1
assert all(rec2.item == [1])
assert all(rec2.score == [3])
rec10 = rec.recommend(10, n=1, candidates=[1, 2])
assert len(rec10) == 1
assert all(rec10.item == [2])
assert all(rec10.score == [5])
def test_topn_config():
pred = basic.Memorized(simple_df)
rec = basic.TopN(pred)
rs = str(rec)
assert rs.startswith('TopN/')
def test_topn_big():
ratings = lktu.ml_pandas.renamed.ratings
users = ratings.user.unique()
items = ratings.item.unique()
user_items = ratings.set_index('user').item
algo = basic.TopN(basic.Bias())
a2 = algo.fit(ratings)
assert a2 is algo
# test 100 random users
for u in np.random.choice(users, 100, False):
recs = algo.recommend(u, 100)
assert len(recs) == 100
rated = user_items.loc[u]
assert all(~recs['item'].isin(rated))
unrated = np.setdiff1d(items, rated)
scores = algo.predictor.predict_for_user(u, unrated)
top = scores.nlargest(100)
assert top.values == approx(recs.score.values)
def test_popular():
algo = basic.Popular()
algo.fit(lktu.ml_pandas.renamed.ratings)
counts = lktu.ml_pandas.renamed.ratings.groupby('item').user.count()
counts = counts.nlargest(100)
assert algo.item_pop_.max() == counts.max()
recs = algo.recommend(2038, 100)
assert len(recs) == 100
assert all(np.diff(recs.score) <= 0)
assert recs.score.iloc[0] == counts.max()
# the 10 most popular should be the same
assert all(counts.index[:10] == recs.item[:10])
def test_popular_excludes_rated():
algo = basic.Popular()
algo.fit(lktu.ml_pandas.renamed.ratings)
counts = lktu.ml_pandas.renamed.ratings.groupby('item').user.count()
counts = counts.nlargest(100)
recs = algo.recommend(100, 100)
assert len(recs) == 100
assert all(np.diff(recs.score) <= 0)
# make sure we didn't recommend anything the user likes
ratings = lktu.ml_pandas.renamed.ratings
urates = ratings.set_index(['user', 'item'])
urates = urates.loc[100, :]
match = recs.join(urates, on='item', how='inner')
assert len(match) == 0
def test_pop_candidates():
algo = basic.Popular()
algo.fit(lktu.ml_pandas.renamed.ratings)
counts = lktu.ml_pandas.renamed.ratings.groupby('item').user.count()
items = lktu.ml_pandas.renamed.ratings.item.unique()
assert algo.item_pop_.max() == counts.max()
candidates = np.random.choice(items, 500, replace=False)
recs = algo.recommend(2038, 100, candidates)
assert len(recs) == 100
assert all(np.diff(recs.score) <= 0)
ccs = counts.loc[candidates]
ccs = ccs.sort_values(ascending=False)
assert recs.score.iloc[0] == ccs.max()
equiv = ccs[ccs == ccs.max()]
assert recs.item.iloc[0] in equiv.index
def test_pop_save_load():
original = basic.Popular()
original.fit(lktu.ml_pandas.renamed.ratings)
mod = pickle.dumps(original)
algo = pickle.loads(mod)
counts = lktu.ml_pandas.renamed.ratings.groupby('item').user.count()
counts = counts.nlargest(100)
assert algo.item_pop_.max() == counts.max()
recs = algo.recommend(2038, 100)
assert len(recs) == 100
assert all(np.diff(recs.score) <= 0)
assert recs.score.iloc[0] == counts.max()
# the 10 most popular should be the same
assert all(counts.index[:10] == recs.item[:10])
def test_unrated_selector():
sel = basic.UnratedItemCandidateSelector()
s2 = sel.fit(simple_df)
assert s2 is sel
assert set(sel.candidates(10)) == set([3])
assert set(sel.candidates(12)) == set([3, 2])
assert set(sel.candidates(11)) == set([1, 2, 3])
def test_unrated_override():
sel = basic.UnratedItemCandidateSelector()
sel.fit(simple_df)
assert set(sel.candidates(10, [2])) == set([1, 3])
def test_unrated_big():
ratings = lktu.ml_pandas.renamed.ratings
users = ratings.user.unique()
items = ratings.item.unique()
user_items = ratings.set_index('user').item
sel = basic.UnratedItemCandidateSelector()
s2 = sel.fit(ratings)
assert s2 is sel
# test 100 random users
for u in np.random.choice(users, 100, False):
candidates = sel.candidates(u)
candidates = pd.Series(candidates)
uis = user_items.loc[u]
assert len(uis) + len(candidates) == len(items)
assert candidates.nunique() == len(candidates)
assert all(~candidates.isin(uis))
| 10,950 | 28.758152 | 83 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_topn_analysis.py | from pathlib import Path
import logging
import numpy as np
import pandas as pd
from pytest import approx
from lenskit.metrics.topn import _dcg
from lenskit import topn
_log = logging.getLogger(__name__)
def test_run_one():
rla = topn.RecListAnalysis()
rla.add_metric(topn.precision)
rla.add_metric(topn.recall)
recs = pd.DataFrame({'user': 1, 'item': [2]})
truth = pd.DataFrame({'user': 1, 'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
res = rla.compute(recs, truth)
assert res.index.name == 'user'
assert res.index.is_unique
assert len(res) == 1
assert all(res.index == 1)
assert all(res.precision == 1.0)
assert res.recall.values == approx(1/3)
def test_run_two():
rla = topn.RecListAnalysis()
rla.add_metric(topn.precision)
rla.add_metric(topn.recall)
rla.add_metric(topn.ndcg)
recs = pd.DataFrame({
'data': 'a',
'user': ['a', 'a', 'a', 'b', 'b'],
'item': [2, 3, 1, 4, 5],
'rank': [1, 2, 3, 1, 2]
})
truth = pd.DataFrame({
'user': ['a', 'a', 'a', 'b', 'b', 'b'],
'item': [1, 2, 3, 1, 5, 6],
'rating': [3.0, 5.0, 4.0, 3.0, 5.0, 4.0]
})
def prog(inner):
assert len(inner) == 2
return inner
res = rla.compute(recs, truth, progress=prog)
print(res)
assert len(res) == 2
assert res.index.nlevels == 2
assert res.index.names == ['data', 'user']
assert all(res.index.levels[0] == 'a')
assert all(res.index.levels[1] == ['a', 'b'])
assert all(res.reset_index().user == ['a', 'b'])
partial_ndcg = _dcg([0.0, 5.0]) / _dcg([5, 4, 3])
assert res.ndcg.values == approx([1.0, partial_ndcg])
assert res.precision.values == approx([1.0, 1/2])
assert res.recall.values == approx([1.0, 1/3])
def test_inner_format():
rla = topn.RecListAnalysis()
recs = pd.DataFrame({
'data': 'a',
'user': ['a', 'a', 'a', 'b', 'b'],
'item': [2, 3, 1, 4, 5],
'rank': [1, 2, 3, 1, 2]
})
truth = pd.DataFrame({
'user': ['a', 'a', 'a', 'b', 'b', 'b'],
'item': [1, 2, 3, 1, 5, 6],
'rating': [3.0, 5.0, 4.0, 3.0, 5.0, 4.0]
})
def inner(recs, truth, foo='a'):
assert foo == 'b'
assert set(recs.columns) == set(['data', 'user', 'item', 'rank'])
assert len(recs[['data', 'user']].drop_duplicates()) == 1
assert truth.index.name == 'item'
assert truth.index.is_unique
assert all(truth.columns == ['rating'])
return len(recs.join(truth, on='item', how='inner'))
rla.add_metric(inner, name='bob', foo='b')
res = rla.compute(recs, truth)
print(res)
assert len(res) == 2
assert res.index.nlevels == 2
assert res.index.names == ['data', 'user']
assert all(res.index.levels[0] == 'a')
assert all(res.index.levels[1] == ['a', 'b'])
assert all(res.reset_index().user == ['a', 'b'])
assert all(res['bob'] == [3, 1])
def test_spec_group_cols():
rla = topn.RecListAnalysis(group_cols=['data', 'user'])
rla.add_metric(topn.precision)
rla.add_metric(topn.recall)
rla.add_metric(topn.ndcg)
recs = pd.DataFrame({
'data': 'a',
'user': ['a', 'a', 'a', 'b', 'b'],
'item': [2, 3, 1, 4, 5],
'rank': [1, 2, 3, 1, 2],
'wombat': np.random.randn(5)
})
truth = pd.DataFrame({
'user': ['a', 'a', 'a', 'b', 'b', 'b'],
'item': [1, 2, 3, 1, 5, 6],
'rating': [3.0, 5.0, 4.0, 3.0, 5.0, 4.0]
})
res = rla.compute(recs, truth)
print(res)
assert len(res) == 2
assert res.index.nlevels == 2
assert res.index.names == ['data', 'user']
assert all(res.index.levels[0] == 'a')
assert all(res.index.levels[1] == ['a', 'b'])
assert all(res.reset_index().user == ['a', 'b'])
partial_ndcg = _dcg([0.0, 5.0]) / _dcg([5, 4, 3])
assert res.ndcg.values == approx([1.0, partial_ndcg])
assert res.precision.values == approx([1.0, 1/2])
assert res.recall.values == approx([1.0, 1/3])
def test_java_equiv():
dir = Path(__file__).parent
metrics = pd.read_csv(str(dir / 'topn-java-metrics.csv'))
recs = pd.read_csv(str(dir / 'topn-java-recs.csv'))
truth = pd.read_csv(str(dir / 'topn-java-truth.csv'))
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
res = rla.compute(recs, truth)
umm = pd.merge(metrics, res.reset_index())
umm['err'] = umm['ndcg'] - umm['Java.nDCG']
_log.info('merged: \n%s', umm)
assert umm['err'].values == approx(0, abs=1.0e-6)
| 4,560 | 28.425806 | 83 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_knn_item_item.py | from lenskit import DataWarning
import lenskit.algorithms.item_knn as knn
from pathlib import Path
import logging
import os.path
import pickle
import pandas as pd
import numpy as np
from scipy import linalg as la
import pytest
from pytest import approx, mark
import lk_test_utils as lktu
_log = logging.getLogger(__name__)
ml_ratings = lktu.ml_pandas.renamed.ratings
simple_ratings = pd.DataFrame.from_records([
(1, 6, 4.0),
(2, 6, 2.0),
(1, 7, 3.0),
(2, 7, 2.0),
(3, 7, 5.0),
(4, 7, 2.0),
(1, 8, 3.0),
(2, 8, 4.0),
(3, 8, 3.0),
(4, 8, 2.0),
(5, 8, 3.0),
(6, 8, 2.0),
(1, 9, 3.0),
(3, 9, 4.0)
], columns=['user', 'item', 'rating'])
def test_ii_train():
algo = knn.ItemItem(30, save_nbrs=500)
algo.fit(simple_ratings)
assert isinstance(algo.item_index_, pd.Index)
assert isinstance(algo.item_means_, np.ndarray)
assert isinstance(algo.item_counts_, np.ndarray)
matrix = algo.sim_matrix_.to_scipy()
# 6 is a neighbor of 7
six, seven = algo.item_index_.get_indexer([6, 7])
_log.info('six: %d', six)
_log.info('seven: %d', seven)
_log.info('matrix: %s', algo.sim_matrix_)
assert matrix[six, seven] > 0
# and has the correct score
six_v = simple_ratings[simple_ratings.item == 6].set_index('user').rating
six_v = six_v - six_v.mean()
seven_v = simple_ratings[simple_ratings.item == 7].set_index('user').rating
seven_v = seven_v - seven_v.mean()
denom = la.norm(six_v.values) * la.norm(seven_v.values)
six_v, seven_v = six_v.align(seven_v, join='inner')
num = six_v.dot(seven_v)
assert matrix[six, seven] == approx(num / denom, 0.01)
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
def test_ii_train_unbounded():
algo = knn.ItemItem(30)
algo.fit(simple_ratings)
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
# 6 is a neighbor of 7
matrix = algo.sim_matrix_.to_scipy()
six, seven = algo.item_index_.get_indexer([6, 7])
assert matrix[six, seven] > 0
# and has the correct score
six_v = simple_ratings[simple_ratings.item == 6].set_index('user').rating
six_v = six_v - six_v.mean()
seven_v = simple_ratings[simple_ratings.item == 7].set_index('user').rating
seven_v = seven_v - seven_v.mean()
denom = la.norm(six_v.values) * la.norm(seven_v.values)
six_v, seven_v = six_v.align(seven_v, join='inner')
num = six_v.dot(seven_v)
assert matrix[six, seven] == approx(num / denom, 0.01)
def test_ii_simple_predict():
algo = knn.ItemItem(30, save_nbrs=500)
algo.fit(simple_ratings)
res = algo.predict_for_user(3, [6])
assert res is not None
assert len(res) == 1
assert 6 in res.index
assert not np.isnan(res.loc[6])
def test_ii_simple_implicit_predict():
algo = knn.ItemItem(30, center=False, aggregate='sum')
algo.fit(simple_ratings.loc[:, ['user', 'item']])
res = algo.predict_for_user(3, [6])
assert res is not None
assert len(res) == 1
assert 6 in res.index
assert not np.isnan(res.loc[6])
assert res.loc[6] > 0
def test_ii_warn_duplicates():
extra = pd.DataFrame.from_records([
(3, 7, 4.5)
], columns=['user', 'item', 'rating'])
ratings = pd.concat([simple_ratings, extra])
algo = knn.ItemItem(5)
algo.fit(ratings)
try:
with pytest.warns(DataWarning):
algo.predict_for_user(3, [6])
except AssertionError:
pass # this is fine
@lktu.wantjit
@mark.skip("redundant with large_models")
def test_ii_train_big():
"Simple tests for bounded models"
algo = knn.ItemItem(30, save_nbrs=500)
algo.fit(ml_ratings)
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
assert algo.item_counts_.sum() == algo.sim_matrix_.nnz
means = ml_ratings.groupby('item').rating.mean()
assert means[algo.item_index_].values == approx(algo.item_means_)
@lktu.wantjit
@mark.skip("redundant with large_models")
def test_ii_train_big_unbounded():
"Simple tests for unbounded models"
algo = knn.ItemItem(30)
algo.fit(ml_ratings)
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
assert algo.item_counts_.sum() == algo.sim_matrix_.nnz
means = ml_ratings.groupby('item').rating.mean()
assert means[algo.item_index_].values == approx(algo.item_means_)
@lktu.wantjit
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_ii_train_ml100k(tmp_path):
"Test an unbounded model on ML-100K"
ratings = lktu.ml100k.load_ratings()
algo = knn.ItemItem(30)
_log.info('training model')
algo.fit(ratings)
_log.info('testing model')
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
assert algo.item_counts_.sum() == algo.sim_matrix_.nnz
means = ratings.groupby('item').rating.mean()
assert means[algo.item_index_].values == approx(algo.item_means_)
# save
fn = tmp_path / 'ii.mod'
_log.info('saving model to %s', fn)
with fn.open('wb') as modf:
pickle.dump(algo, modf)
_log.info('reloading model')
with fn.open('rb') as modf:
restored = pickle.load(modf)
assert all(restored.sim_matrix_.values > 0)
r_mat = restored.sim_matrix_
o_mat = algo.sim_matrix_
assert all(r_mat.rowptrs == o_mat.rowptrs)
for i in range(len(restored.item_index_)):
sp = r_mat.rowptrs[i]
ep = r_mat.rowptrs[i + 1]
# everything is in decreasing order
assert all(np.diff(r_mat.values[sp:ep]) <= 0)
assert all(r_mat.values[sp:ep] == o_mat.values[sp:ep])
@lktu.wantjit
def test_ii_large_models():
"Several tests of large trained I-I models"
_log.info('training limited model')
MODEL_SIZE = 100
algo_lim = knn.ItemItem(30, save_nbrs=MODEL_SIZE)
algo_lim.fit(ml_ratings)
_log.info('training unbounded model')
algo_ub = knn.ItemItem(30)
algo_ub.fit(ml_ratings)
_log.info('testing models')
assert all(np.logical_not(np.isnan(algo_lim.sim_matrix_.values)))
assert all(algo_lim.sim_matrix_.values > 0)
# a little tolerance
assert all(algo_lim.sim_matrix_.values < 1 + 1.0e-6)
means = ml_ratings.groupby('item').rating.mean()
assert means[algo_lim.item_index_].values == approx(algo_lim.item_means_)
assert all(np.logical_not(np.isnan(algo_ub.sim_matrix_.values)))
assert all(algo_ub.sim_matrix_.values > 0)
# a little tolerance
assert all(algo_ub.sim_matrix_.values < 1 + 1.0e-6)
means = ml_ratings.groupby('item').rating.mean()
assert means[algo_ub.item_index_].values == approx(algo_ub.item_means_)
mc_rates = ml_ratings.set_index('item')\
.join(pd.DataFrame({'item_mean': means}))\
.assign(rating=lambda df: df.rating - df.item_mean)
mat_lim = algo_lim.sim_matrix_.to_scipy()
mat_ub = algo_ub.sim_matrix_.to_scipy()
_log.info('checking a sample of neighborhoods')
items = pd.Series(algo_ub.item_index_)
items = items[algo_ub.item_counts_ > 0]
for i in items.sample(50):
ipos = algo_ub.item_index_.get_loc(i)
_log.debug('checking item %d at position %d', i, ipos)
assert ipos == algo_lim.item_index_.get_loc(i)
irates = mc_rates.loc[[i], :].set_index('user').rating
ub_row = mat_ub.getrow(ipos)
b_row = mat_lim.getrow(ipos)
assert b_row.nnz <= MODEL_SIZE
assert all(pd.Series(b_row.indices).isin(ub_row.indices))
# it should be sorted !
# check this by diffing the row values, and make sure they're negative
assert all(np.diff(b_row.data) < 1.0e-6)
assert all(np.diff(ub_row.data) < 1.0e-6)
# spot-check some similarities
for n in pd.Series(ub_row.indices).sample(min(10, len(ub_row.indices))):
n_id = algo_ub.item_index_[n]
n_rates = mc_rates.loc[n_id, :].set_index('user').rating
ir, nr = irates.align(n_rates, fill_value=0)
cor = ir.corr(nr)
assert mat_ub[ipos, n] == approx(cor)
# short rows are equal
if b_row.nnz < MODEL_SIZE:
_log.debug('short row of length %d', b_row.nnz)
assert b_row.nnz == ub_row.nnz
ub_row.sort_indices()
b_row.sort_indices()
assert b_row.data == approx(ub_row.data)
continue
# row is truncated - check that truncation is correct
ub_nbrs = pd.Series(ub_row.data, algo_ub.item_index_[ub_row.indices])
b_nbrs = pd.Series(b_row.data, algo_lim.item_index_[b_row.indices])
assert len(ub_nbrs) >= len(b_nbrs)
assert len(b_nbrs) <= MODEL_SIZE
assert all(b_nbrs.index.isin(ub_nbrs.index))
# the similarities should be equal!
b_match, ub_match = b_nbrs.align(ub_nbrs, join='inner')
assert all(b_match == b_nbrs)
assert b_match.values == approx(ub_match.values)
assert b_nbrs.max() == approx(ub_nbrs.max())
if len(ub_nbrs) > MODEL_SIZE:
assert len(b_nbrs) == MODEL_SIZE
ub_shrink = ub_nbrs.nlargest(MODEL_SIZE)
# the minimums should be equal
assert ub_shrink.min() == approx(b_nbrs.min())
# everything above minimum value should be the same set of items
ubs_except_min = ub_shrink[ub_shrink > b_nbrs.min()]
assert all(ubs_except_min.index.isin(b_nbrs.index))
@lktu.wantjit
def test_ii_save_load(tmp_path):
"Save and load a model"
original = knn.ItemItem(30, save_nbrs=500)
_log.info('building model')
original.fit(lktu.ml_sample())
fn = tmp_path / 'ii.mod'
_log.info('saving model to %s', fn)
with fn.open('wb') as modf:
pickle.dump(original, modf)
_log.info('reloading model')
with fn.open('rb') as modf:
algo = pickle.load(modf)
_log.info('checking model')
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
assert all(algo.item_counts_ == original.item_counts_)
assert algo.item_counts_.sum() == algo.sim_matrix_.nnz
assert algo.sim_matrix_.nnz == original.sim_matrix_.nnz
assert all(algo.sim_matrix_.rowptrs == original.sim_matrix_.rowptrs)
assert algo.sim_matrix_.values == approx(original.sim_matrix_.values)
r_mat = algo.sim_matrix_
o_mat = original.sim_matrix_
assert all(r_mat.rowptrs == o_mat.rowptrs)
for i in range(len(algo.item_index_)):
sp = r_mat.rowptrs[i]
ep = r_mat.rowptrs[i + 1]
# everything is in decreasing order
assert all(np.diff(r_mat.values[sp:ep]) <= 0)
assert all(r_mat.values[sp:ep] == o_mat.values[sp:ep])
means = ml_ratings.groupby('item').rating.mean()
assert means[algo.item_index_].values == approx(original.item_means_)
matrix = algo.sim_matrix_.to_scipy()
items = pd.Series(algo.item_index_)
items = items[algo.item_counts_ > 0]
for i in items.sample(50):
ipos = algo.item_index_.get_loc(i)
_log.debug('checking item %d at position %d', i, ipos)
row = matrix.getrow(ipos)
# it should be sorted !
# check this by diffing the row values, and make sure they're negative
assert all(np.diff(row.data) < 1.0e-6)
def test_ii_implicit_save_load(tmp_path):
"Save and load a model"
original = knn.ItemItem(30, save_nbrs=500, center=False, aggregate='sum')
_log.info('building model')
original.fit(lktu.ml_sample().loc[:, ['user', 'item']])
fn = tmp_path / 'ii.mod'
_log.info('saving model to %s', fn)
with fn.open('wb') as modf:
pickle.dump(original, modf)
_log.info('reloading model')
with fn.open('rb') as modf:
algo = pickle.load(modf)
_log.info('checking model')
assert all(np.logical_not(np.isnan(algo.sim_matrix_.values)))
assert all(algo.sim_matrix_.values > 0)
# a little tolerance
assert all(algo.sim_matrix_.values < 1 + 1.0e-6)
assert all(algo.item_counts_ == original.item_counts_)
assert algo.item_counts_.sum() == algo.sim_matrix_.nnz
assert algo.sim_matrix_.nnz == original.sim_matrix_.nnz
assert all(algo.sim_matrix_.rowptrs == original.sim_matrix_.rowptrs)
assert algo.sim_matrix_.values == approx(original.sim_matrix_.values)
assert algo.rating_matrix_.values is None
r_mat = algo.sim_matrix_
o_mat = original.sim_matrix_
assert all(r_mat.rowptrs == o_mat.rowptrs)
for i in range(len(algo.item_index_)):
sp = r_mat.rowptrs[i]
ep = r_mat.rowptrs[i + 1]
# everything is in decreasing order
assert all(np.diff(r_mat.values[sp:ep]) <= 0)
assert all(r_mat.values[sp:ep] == o_mat.values[sp:ep])
assert algo.item_means_ is None
matrix = algo.sim_matrix_.to_scipy()
items = pd.Series(algo.item_index_)
items = items[algo.item_counts_ > 0]
for i in items.sample(50):
ipos = algo.item_index_.get_loc(i)
_log.debug('checking item %d at position %d', i, ipos)
row = matrix.getrow(ipos)
# it should be sorted !
# check this by diffing the row values, and make sure they're negative
assert all(np.diff(row.data) < 1.0e-6)
@lktu.wantjit
@mark.slow
def test_ii_implicit():
algo = knn.ItemItem(20, save_nbrs=100, center=False, aggregate='sum')
data = ml_ratings.loc[:, ['user', 'item']]
algo.fit(data)
assert algo.item_counts_.sum() == algo.sim_matrix_.nnz
assert all(algo.sim_matrix_.values > 0)
assert all(algo.item_counts_ <= 100)
preds = algo.predict_for_user(50, [1, 2, 42])
assert all(preds[preds.notna()] > 0)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_ii_batch_accuracy():
from lenskit.algorithms import basic
import lenskit.crossfold as xf
from lenskit import batch
import lenskit.metrics.predict as pm
ratings = lktu.ml100k.load_ratings()
ii_algo = knn.ItemItem(30)
algo = basic.Fallback(ii_algo, basic.Bias())
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
return batch.predict(algo, test)
preds = pd.concat((eval(train, test)
for (train, test)
in xf.partition_users(ratings, 5, xf.SampleFrac(0.2))))
mae = pm.mae(preds.prediction, preds.rating)
assert mae == approx(0.70, abs=0.025)
user_rmse = preds.groupby('user').apply(lambda df: pm.rmse(df.prediction, df.rating))
assert user_rmse.mean() == approx(0.90, abs=0.05)
@lktu.wantjit
def test_ii_known_preds():
from lenskit import batch
algo = knn.ItemItem(20, min_sim=1.0e-6)
_log.info('training %s on ml data', algo)
algo.fit(lktu.ml_pandas.renamed.ratings)
assert algo.center
assert algo.item_means_ is not None
_log.info('model means: %s', algo.item_means_)
dir = Path(__file__).parent
pred_file = dir / 'item-item-preds.csv'
_log.info('reading known predictions from %s', pred_file)
known_preds = pd.read_csv(str(pred_file))
pairs = known_preds.loc[:, ['user', 'item']]
preds = batch.predict(algo, pairs)
merged = pd.merge(known_preds.rename(columns={'prediction': 'expected'}), preds)
assert len(merged) == len(preds)
merged['error'] = merged.expected - merged.prediction
assert not any(merged.prediction.isna() & merged.expected.notna())
err = merged.error
err = err[err.notna()]
try:
assert all(err.abs() < 0.03) # FIXME this threshold is too high
except AssertionError as e:
bad = merged[merged.error.notna() & (merged.error.abs() >= 0.01)]
_log.error('erroneous predictions:\n%s', bad)
raise e
@lktu.wantjit
@mark.slow
@mark.eval
@mark.parametrize('ncpus', [1, 2])
def test_ii_batch_recommend(ncpus):
import lenskit.crossfold as xf
from lenskit import batch, topn
if not os.path.exists('ml-100k/u.data'):
raise pytest.skip()
ratings = pd.read_csv('ml-100k/u.data', sep='\t', names=['user', 'item', 'rating', 'timestamp'])
algo = knn.ItemItem(30)
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
cand_fun = topn.UnratedCandidates(train)
recs = batch.recommend(algo, test.user.unique(), 100, cand_fun, nprocs=ncpus)
return recs
test_frames = []
recs = []
for train, test in xf.partition_users(ratings, 5, xf.SampleFrac(0.2)):
test_frames.append(test)
recs.append(eval(train, test))
test = pd.concat(test_frames)
recs = pd.concat(recs)
_log.info('analyzing recommendations')
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, test)
dcg = results.ndcg
_log.info('nDCG for %d users is %f', len(dcg), dcg.mean())
assert dcg.mean() > 0.03
| 17,725 | 31.704797 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_knn_user_user.py | import lenskit.algorithms.user_knn as knn
from pathlib import Path
import logging
import pickle
import pandas as pd
import numpy as np
from scipy import sparse as sps
from pytest import approx, mark
import lk_test_utils as lktu
_log = logging.getLogger(__name__)
ml_ratings = lktu.ml_pandas.renamed.ratings
def test_uu_train():
algo = knn.UserUser(30)
ret = algo.fit(ml_ratings)
assert ret is algo
# it should have computed correct means
umeans = ml_ratings.groupby('user').rating.mean()
mlmeans = pd.Series(algo.user_means_, index=algo.user_index_, name='mean')
umeans, mlmeans = umeans.align(mlmeans)
assert mlmeans.values == approx(umeans.values)
# we should be able to reconstruct rating values
uir = ml_ratings.set_index(['user', 'item']).rating
r_items = algo.transpose_matrix_.rowinds()
ui_rbdf = pd.DataFrame({
'user': algo.user_index_[algo.transpose_matrix_.colinds],
'item': algo.item_index_[r_items],
'nrating': algo.transpose_matrix_.values
}).set_index(['user', 'item'])
ui_rbdf = ui_rbdf.join(mlmeans)
ui_rbdf['rating'] = ui_rbdf['nrating'] + ui_rbdf['mean']
ui_rbdf['orig_rating'] = uir
assert ui_rbdf.rating.values == approx(ui_rbdf.orig_rating.values)
def test_uu_predict_one():
algo = knn.UserUser(30)
algo.fit(ml_ratings)
preds = algo.predict_for_user(4, [1016])
assert len(preds) == 1
assert preds.index == [1016]
assert preds.values == approx([3.62221550680778])
def test_uu_predict_too_few():
algo = knn.UserUser(30, min_nbrs=2)
algo.fit(ml_ratings)
preds = algo.predict_for_user(4, [2091])
assert len(preds) == 1
assert preds.index == [2091]
assert all(preds.isna())
def test_uu_predict_too_few_blended():
algo = knn.UserUser(30, min_nbrs=2)
algo.fit(ml_ratings)
preds = algo.predict_for_user(4, [1016, 2091])
assert len(preds) == 2
assert np.isnan(preds.loc[2091])
assert preds.loc[1016] == approx(3.62221550680778)
def test_uu_predict_live_ratings():
algo = knn.UserUser(30, min_nbrs=2)
no4 = ml_ratings[ml_ratings.user != 4]
algo.fit(no4)
ratings = ml_ratings[ml_ratings.user == 4].set_index('item').rating
preds = algo.predict_for_user(20381, [1016, 2091], ratings)
assert len(preds) == 2
assert np.isnan(preds.loc[2091])
assert preds.loc[1016] == approx(3.62221550680778)
def test_uu_save_load(tmp_path):
orig = knn.UserUser(30)
_log.info('training model')
orig.fit(ml_ratings)
fn = tmp_path / 'uu.model'
_log.info('saving to %s', fn)
with fn.open('wb') as f:
pickle.dump(orig, f)
_log.info('reloading model')
with fn.open('rb') as f:
algo = pickle.load(f)
_log.info('checking model')
# it should have computed correct means
umeans = ml_ratings.groupby('user').rating.mean()
mlmeans = pd.Series(algo.user_means_, index=algo.user_index_, name='mean')
umeans, mlmeans = umeans.align(mlmeans)
assert mlmeans.values == approx(umeans.values)
# we should be able to reconstruct rating values
uir = ml_ratings.set_index(['user', 'item']).rating
r_items = algo.transpose_matrix_.rowinds()
ui_rbdf = pd.DataFrame({
'user': algo.user_index_[algo.transpose_matrix_.colinds],
'item': algo.item_index_[r_items],
'nrating': algo.transpose_matrix_.values
}).set_index(['user', 'item'])
ui_rbdf = ui_rbdf.join(mlmeans)
ui_rbdf['rating'] = ui_rbdf['nrating'] + ui_rbdf['mean']
ui_rbdf['orig_rating'] = uir
assert ui_rbdf.rating.values == approx(ui_rbdf.orig_rating.values)
# running the predictor should work
preds = algo.predict_for_user(4, [1016])
assert len(preds) == 1
assert preds.index == [1016]
assert preds.values == approx([3.62221550680778])
def test_uu_predict_unknown_empty():
algo = knn.UserUser(30, min_nbrs=2)
algo.fit(ml_ratings)
preds = algo.predict_for_user(-28018, [1016, 2091])
assert len(preds) == 2
assert all(preds.isna())
def test_uu_implicit():
"Train and use user-user on an implicit data set."
algo = knn.UserUser(20, center=False, aggregate='sum')
data = ml_ratings.loc[:, ['user', 'item']]
algo.fit(data)
assert algo.user_means_ is None
mat = algo.rating_matrix_.to_scipy()
norms = sps.linalg.norm(mat, 2, 1)
assert norms == approx(1.0)
preds = algo.predict_for_user(50, [1, 2, 42])
assert all(preds[preds.notna()] > 0)
@mark.slow
def test_uu_save_load_implicit(tmp_path):
"Save and load user-user on an implicit data set."
orig = knn.UserUser(20, center=False, aggregate='sum')
data = ml_ratings.loc[:, ['user', 'item']]
orig.fit(data)
ser = pickle.dumps(orig)
algo = pickle.loads(ser)
assert algo.user_means_ is None
assert all(algo.user_index_ == orig.user_index_)
assert all(algo.item_index_ == orig.item_index_)
assert all(algo.rating_matrix_.rowptrs == orig.rating_matrix_.rowptrs)
assert all(algo.rating_matrix_.colinds == orig.rating_matrix_.colinds)
assert all(algo.rating_matrix_.values == orig.rating_matrix_.values)
assert all(algo.transpose_matrix_.rowptrs == orig.transpose_matrix_.rowptrs)
assert all(algo.transpose_matrix_.colinds == orig.transpose_matrix_.colinds)
assert algo.transpose_matrix_.values is None
@mark.slow
def test_uu_known_preds():
from lenskit import batch
algo = knn.UserUser(30, min_sim=1.0e-6)
_log.info('training %s on ml data', algo)
algo.fit(lktu.ml_pandas.renamed.ratings)
dir = Path(__file__).parent
pred_file = dir / 'user-user-preds.csv'
_log.info('reading known predictions from %s', pred_file)
known_preds = pd.read_csv(str(pred_file))
pairs = known_preds.loc[:, ['user', 'item']]
preds = batch.predict(algo, pairs)
merged = pd.merge(known_preds.rename(columns={'prediction': 'expected'}), preds)
assert len(merged) == len(preds)
merged['error'] = merged.expected - merged.prediction
assert not any(merged.prediction.isna() & merged.expected.notna())
err = merged.error
err = err[err.notna()]
try:
assert all(err.abs() < 0.01)
except AssertionError as e:
bad = merged[merged.error.notna() & (merged.error.abs() >= 0.01)]
_log.error('erroneous predictions:\n%s', bad)
raise e
def __batch_eval(job):
from lenskit import batch
algo, train, test = job
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
return batch.predict(algo, test)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_uu_batch_accuracy():
from lenskit.algorithms import basic
import lenskit.crossfold as xf
import lenskit.metrics.predict as pm
ratings = lktu.ml100k.load_ratings()
uu_algo = knn.UserUser(30)
algo = basic.Fallback(uu_algo, basic.Bias())
folds = xf.partition_users(ratings, 5, xf.SampleFrac(0.2))
preds = [__batch_eval((algo, train, test)) for (train, test) in folds]
preds = pd.concat(preds)
mae = pm.mae(preds.prediction, preds.rating)
assert mae == approx(0.71, abs=0.028)
user_rmse = preds.groupby('user').apply(lambda df: pm.rmse(df.prediction, df.rating))
assert user_rmse.mean() == approx(0.91, abs=0.055)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_uu_implicit_batch_accuracy():
from lenskit import batch, topn
import lenskit.crossfold as xf
ratings = lktu.ml100k.load_ratings()
algo = knn.UserUser(30, center=False, aggregate='sum')
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
all_test = pd.concat(f.test for f in folds)
rec_lists = []
for train, test in folds:
_log.info('running training')
algo.fit(train.loc[:, ['user', 'item']])
cands = topn.UnratedCandidates(train)
_log.info('testing %d users', test.user.nunique())
recs = batch.recommend(algo, test.user.unique(), 100, cands, nprocs=2)
rec_lists.append(recs)
recs = pd.concat(rec_lists)
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, all_test)
user_dcg = results.ndcg
dcg = user_dcg.mean()
assert dcg >= 0.03
| 8,382 | 29.933579 | 89 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_batch_recommend.py | import pytest
import os
import os.path
from collections import namedtuple
import logging
import pandas as pd
import numpy as np
import lk_test_utils as lktu
from lenskit.algorithms.basic import Bias, TopN
import lenskit.batch as lkb
MLB = namedtuple('MLB', ['ratings', 'algo'])
_log = logging.getLogger(__name__)
@pytest.fixture
def mlb():
ratings = lktu.ml_pandas.renamed.ratings
algo = TopN(Bias())
algo.fit(ratings)
return MLB(ratings, algo)
def test_recommend_single(mlb):
res = lkb.recommend(mlb.algo, [1], None, {1: [31]})
assert len(res) == 1
assert all(res['user'] == 1)
assert all(res['rank'] == 1)
assert set(res.columns) == set(['user', 'rank', 'item', 'score'])
algo = mlb.algo.predictor
expected = algo.mean_ + algo.item_offsets_.loc[31] + algo.user_offsets_.loc[1]
assert res.score.iloc[0] == pytest.approx(expected)
def test_recommend_user(mlb):
uid = 5
items = mlb.ratings.item.unique()
def candidates(user):
urs = mlb.ratings[mlb.ratings.user == user]
return np.setdiff1d(items, urs.item.unique())
res = lkb.recommend(mlb.algo, [5], 10, candidates)
assert len(res) == 10
assert set(res.columns) == set(['user', 'rank', 'item', 'score'])
assert all(res['user'] == uid)
assert all(res['rank'] == np.arange(10) + 1)
# they should be in decreasing order
assert all(np.diff(res.score) <= 0)
def test_recommend_two_users(mlb):
items = mlb.ratings.item.unique()
def candidates(user):
urs = mlb.ratings[mlb.ratings.user == user]
return np.setdiff1d(items, urs.item.unique())
res = lkb.recommend(mlb.algo, [5, 10], 10, candidates)
assert len(res) == 20
assert set(res.user) == set([5, 10])
assert all(res.groupby('user').item.count() == 10)
assert all(res.groupby('user')['rank'].max() == 10)
assert all(np.diff(res[res.user == 5].score) <= 0)
assert all(np.diff(res[res.user == 5]['rank']) == 1)
assert all(np.diff(res[res.user == 10].score) <= 0)
assert all(np.diff(res[res.user == 10]['rank']) == 1)
def test_recommend_no_cands(mlb):
res = lkb.recommend(mlb.algo, [5, 10], 10)
assert len(res) == 20
assert set(res.user) == set([5, 10])
assert all(res.groupby('user').item.count() == 10)
assert all(res.groupby('user')['rank'].max() == 10)
assert all(np.diff(res[res.user == 5].score) <= 0)
assert all(np.diff(res[res.user == 5]['rank']) == 1)
assert all(np.diff(res[res.user == 10].score) <= 0)
assert all(np.diff(res[res.user == 10]['rank']) == 1)
idx_rates = mlb.ratings.set_index(['user', 'item'])
merged = res.join(idx_rates, on=['user', 'item'], how='inner')
assert len(merged) == 0
@pytest.mark.eval
def test_bias_batch_recommend():
from lenskit.algorithms import basic
import lenskit.crossfold as xf
from lenskit import batch, topn
if not os.path.exists('ml-100k/u.data'):
raise pytest.skip()
ratings = pd.read_csv('ml-100k/u.data', sep='\t', names=['user', 'item', 'rating', 'timestamp'])
algo = basic.Bias(damping=5)
algo = TopN(algo)
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
recs = batch.recommend(algo, test.user.unique(), 100)
return recs
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
test = pd.concat(y for (x, y) in folds)
recs = pd.concat(eval(train, test) for (train, test) in folds)
_log.info('analyzing recommendations')
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, test)
dcg = results.ndcg
_log.info('nDCG for %d users is %f (max=%f)', len(dcg), dcg.mean(), dcg.max())
assert dcg.mean() > 0
@pytest.mark.parametrize('ncpus', [None, 2])
@pytest.mark.eval
def test_pop_batch_recommend(ncpus):
from lenskit.algorithms import basic
import lenskit.crossfold as xf
from lenskit import batch, topn
if not os.path.exists('ml-100k/u.data'):
raise pytest.skip()
ratings = pd.read_csv('ml-100k/u.data', sep='\t', names=['user', 'item', 'rating', 'timestamp'])
algo = basic.Popular()
def eval(train, test):
_log.info('running training')
algo.fit(train)
_log.info('testing %d users', test.user.nunique())
recs = batch.recommend(algo, test.user.unique(), 100,
nprocs=ncpus)
return recs
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
test = pd.concat(f.test for f in folds)
recs = pd.concat(eval(train, test) for (train, test) in folds)
_log.info('analyzing recommendations')
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, test)
dcg = results.ndcg
_log.info('NDCG for %d users is %f (max=%f)', len(dcg), dcg.mean(), dcg.max())
assert dcg.mean() > 0
| 4,975 | 29.527607 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_hpf.py | import logging
import pickle
from lenskit.algorithms import hpf, basic
import pandas as pd
import numpy as np
from pytest import mark
import lk_test_utils as lktu
try:
import hpfrec
have_hpfrec = True
except ImportError:
have_hpfrec = False
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
@mark.slow
@mark.skipif(not have_hpfrec, reason='hpfrec not installed')
def test_hpf_train_large(tmp_path):
algo = hpf.HPF(20)
ratings = lktu.ml_pandas.renamed.ratings
ratings = ratings.assign(rating=ratings.rating + 0.5)
algo.fit(ratings)
assert algo.n_users == ratings.user.nunique()
assert algo.n_items == ratings.item.nunique()
mfile = tmp_path / 'hpf.dat'
with mfile.open('wb') as mf:
pickle.dump(algo, mf)
with mfile.open('rb') as mf:
a2 = pickle.load(mf)
assert a2.n_users == algo.n_users
assert a2.n_items == algo.n_items
csel = basic.UnratedItemCandidateSelector()
csel.fit(ratings)
rec = basic.TopN(algo, csel)
for u in np.random.choice(ratings.user.unique(), size=50, replace=False):
recs = rec.recommend(u, 50)
assert len(recs) == 50
assert recs.item.nunique() == 50
| 1,341 | 23.4 | 77 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_matrix.py | import scipy.sparse as sps
import scipy.linalg as sla
import numpy as np
import lenskit.matrix as lm
import lk_test_utils as lktu
from pytest import approx
def test_sparse_matrix():
ratings = lktu.ml_pandas.renamed.ratings
mat, uidx, iidx = lm.sparse_ratings(ratings)
assert mat.nrows == len(uidx)
assert mat.nrows == ratings.user.nunique()
assert mat.ncols == len(iidx)
assert mat.ncols == ratings.item.nunique()
# user indicators should correspond to user item counts
ucounts = ratings.groupby('user').item.count()
ucounts = ucounts.loc[uidx].cumsum()
assert all(mat.rowptrs[1:] == ucounts.values)
def test_sparse_matrix_implicit():
ratings = lktu.ml_pandas.renamed.ratings
ratings = ratings.loc[:, ['user', 'item']]
mat, uidx, iidx = lm.sparse_ratings(ratings)
assert mat.nrows == len(uidx)
assert mat.nrows == ratings.user.nunique()
assert mat.ncols == len(iidx)
assert mat.ncols == ratings.item.nunique()
assert mat.values is None
def test_sparse_matrix_scipy():
ratings = lktu.ml_pandas.renamed.ratings
mat, uidx, iidx = lm.sparse_ratings(ratings, scipy=True)
assert sps.issparse(mat)
assert sps.isspmatrix_csr(mat)
assert len(uidx) == ratings.user.nunique()
assert len(iidx) == ratings.item.nunique()
# user indicators should correspond to user item counts
ucounts = ratings.groupby('user').item.count()
ucounts = ucounts.loc[uidx].cumsum()
assert all(mat.indptr[1:] == ucounts.values)
def test_sparse_matrix_scipy_implicit():
ratings = lktu.ml_pandas.renamed.ratings
ratings = ratings.loc[:, ['user', 'item']]
mat, uidx, iidx = lm.sparse_ratings(ratings, scipy=True)
assert sps.issparse(mat)
assert sps.isspmatrix_csr(mat)
assert len(uidx) == ratings.user.nunique()
assert len(iidx) == ratings.item.nunique()
assert all(mat.data == 1.0)
| 1,907 | 28.353846 | 60 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_matrix_mkl.py | import numpy as np
import scipy.sparse as sps
from pytest import mark, approx
import lenskit.matrix as lm
mkl_ops = lm.mkl_ops()
@mark.skipif(mkl_ops is None, reason='MKL not available')
def test_mkl_mult_vec():
for i in range(50):
m = np.random.randint(5, 100)
n = np.random.randint(5, 100)
M = np.random.randn(m, n)
M[M <= 0] = 0
s = sps.csr_matrix(M)
assert s.nnz == np.sum(M > 0)
csr = lm.CSR.from_scipy(s)
mklM = mkl_ops.SparseM.from_csr(csr)
x = np.random.randn(n)
y = np.zeros(m)
y = mklM.mult_vec(1, x, 0, y)
assert len(y) == m
y2 = s @ x
assert y == approx(y2)
@mark.skipif(mkl_ops is None, reason='MKL not available')
def test_mkl_syrk():
for i in range(50):
M = np.random.randn(10, 5)
M[M <= 0] = 0
s = sps.csr_matrix(M)
assert s.nnz == np.sum(M > 0)
csr = lm.CSR.from_scipy(s)
ctc = mkl_ops.csr_syrk(csr)
res = ctc.to_scipy().toarray()
res = res.T + res
rd = np.diagonal(res)
res = res - np.diagflat(rd) * 0.5
mtm = M.T @ M
assert res == approx(mtm)
| 1,193 | 21.528302 | 57 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_crossfold.py | import itertools as it
import functools as ft
import pytest
import math
import numpy as np
import lk_test_utils as lktu
import lenskit.crossfold as xf
def test_partition_rows():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.partition_rows(ratings, 5)
splits = list(splits)
assert len(splits) == 5
for s in splits:
assert len(s.test) + len(s.train) == len(ratings)
assert all(s.test.index.union(s.train.index) == ratings.index)
test_idx = s.test.set_index(['user', 'item']).index
train_idx = s.train.set_index(['user', 'item']).index
assert len(test_idx.intersection(train_idx)) == 0
# we should partition!
for s1, s2 in it.product(splits, splits):
if s1 is s2:
continue
i1 = s1.test.set_index(['user', 'item']).index
i2 = s2.test.set_index(['user', 'item']).index
inter = i1.intersection(i2)
assert len(inter) == 0
union = ft.reduce(lambda i1, i2: i1.union(i2), (s.test.index for s in splits))
assert len(union.unique()) == len(ratings)
def test_sample_rows():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_rows(ratings, partitions=5, size=1000)
splits = list(splits)
assert len(splits) == 5
for s in splits:
assert len(s.test) == 1000
assert len(s.test) + len(s.train) == len(ratings)
test_idx = s.test.set_index(['user', 'item']).index
train_idx = s.train.set_index(['user', 'item']).index
assert len(test_idx.intersection(train_idx)) == 0
for s1, s2 in it.product(splits, splits):
if s1 is s2:
continue
i1 = s1.test.set_index(['user', 'item']).index
i2 = s2.test.set_index(['user', 'item']).index
inter = i1.intersection(i2)
assert len(inter) == 0
def test_sample_rows_more_smaller_parts():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_rows(ratings, partitions=10, size=500)
splits = list(splits)
assert len(splits) == 10
for s in splits:
assert len(s.test) == 500
assert len(s.test) + len(s.train) == len(ratings)
test_idx = s.test.set_index(['user', 'item']).index
train_idx = s.train.set_index(['user', 'item']).index
assert len(test_idx.intersection(train_idx)) == 0
for s1, s2 in it.product(splits, splits):
if s1 is s2:
continue
i1 = s1.test.set_index(['user', 'item']).index
i2 = s2.test.set_index(['user', 'item']).index
inter = i1.intersection(i2)
assert len(inter) == 0
def test_sample_non_disjoint():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_rows(ratings, partitions=10, size=1000, disjoint=False)
splits = list(splits)
assert len(splits) == 10
for s in splits:
assert len(s.test) == 1000
assert len(s.test) + len(s.train) == len(ratings)
test_idx = s.test.set_index(['user', 'item']).index
train_idx = s.train.set_index(['user', 'item']).index
assert len(test_idx.intersection(train_idx)) == 0
# There are enough splits & items we should pick at least one duplicate
ipairs = ((s1.test.set_index('user', 'item').index, s2.test.set_index('user', 'item').index)
for (s1, s2) in it.product(splits, splits))
isizes = [len(i1.intersection(i2)) for (i1, i2) in ipairs]
assert any(n > 0 for n in isizes)
@pytest.mark.slow
def test_sample_oversize():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_rows(ratings, 150, 1000)
splits = list(splits)
assert len(splits) == 150
for s in splits:
assert len(s.test) + len(s.train) == len(ratings)
assert all(s.test.index.union(s.train.index) == ratings.index)
test_idx = s.test.set_index(['user', 'item']).index
train_idx = s.train.set_index(['user', 'item']).index
assert len(test_idx.intersection(train_idx)) == 0
def test_sample_n():
ratings = lktu.ml_pandas.renamed.ratings
users = np.random.choice(ratings.user.unique(), 5, replace=False)
s5 = xf.SampleN(5)
for u in users:
udf = ratings[ratings.user == u]
tst = s5(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) == 5
assert len(tst) + len(trn) == len(udf)
s10 = xf.SampleN(10)
for u in users:
udf = ratings[ratings.user == u]
tst = s10(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) == 10
assert len(tst) + len(trn) == len(udf)
def test_sample_frac():
ratings = lktu.ml_pandas.renamed.ratings
users = np.random.choice(ratings.user.unique(), 5, replace=False)
samp = xf.SampleFrac(0.2)
for u in users:
udf = ratings[ratings.user == u]
tst = samp(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) + len(trn) == len(udf)
assert len(tst) >= math.floor(len(udf) * 0.2)
assert len(tst) <= math.ceil(len(udf) * 0.2)
samp = xf.SampleFrac(0.5)
for u in users:
udf = ratings[ratings.user == u]
tst = samp(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) + len(trn) == len(udf)
assert len(tst) >= math.floor(len(udf) * 0.5)
assert len(tst) <= math.ceil(len(udf) * 0.5)
def test_last_n():
ratings = lktu.ml_pandas.renamed.ratings
users = np.random.choice(ratings.user.unique(), 5, replace=False)
samp = xf.LastN(5)
for u in users:
udf = ratings[ratings.user == u]
tst = samp(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) == 5
assert len(tst) + len(trn) == len(udf)
assert tst.timestamp.min() >= trn.timestamp.max()
samp = xf.LastN(7)
for u in users:
udf = ratings[ratings.user == u]
tst = samp(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) == 7
assert len(tst) + len(trn) == len(udf)
assert tst.timestamp.min() >= trn.timestamp.max()
def test_last_frac():
ratings = lktu.ml_pandas.renamed.ratings
users = np.random.choice(ratings.user.unique(), 5, replace=False)
samp = xf.LastFrac(0.2, 'timestamp')
for u in users:
udf = ratings[ratings.user == u]
tst = samp(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) + len(trn) == len(udf)
assert len(tst) >= math.floor(len(udf) * 0.2)
assert len(tst) <= math.ceil(len(udf) * 0.2)
assert tst.timestamp.min() >= trn.timestamp.max()
samp = xf.LastFrac(0.5, 'timestamp')
for u in users:
udf = ratings[ratings.user == u]
tst = samp(udf)
trn = udf.loc[udf.index.difference(tst.index), :]
assert len(tst) + len(trn) == len(udf)
assert len(tst) >= math.floor(len(udf) * 0.5)
assert len(tst) <= math.ceil(len(udf) * 0.5)
assert tst.timestamp.min() >= trn.timestamp.max()
def test_partition_users():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.partition_users(ratings, 5, xf.SampleN(5))
splits = list(splits)
assert len(splits) == 5
for s in splits:
ucounts = s.test.groupby('user').agg('count')
assert all(ucounts == 5)
assert all(s.test.index.union(s.train.index) == ratings.index)
assert len(s.test) + len(s.train) == len(ratings)
users = ft.reduce(lambda us1, us2: us1 | us2,
(set(s.test.user) for s in splits))
assert len(users) == ratings.user.nunique()
assert users == set(ratings.user)
def test_partition_users_frac():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.partition_users(ratings, 5, xf.SampleFrac(0.2))
splits = list(splits)
assert len(splits) == 5
ucounts = ratings.groupby('user').item.count()
uss = ucounts * 0.2
for s in splits:
tucs = s.test.groupby('user').item.count()
assert all(tucs >= uss.loc[tucs.index] - 1)
assert all(tucs <= uss.loc[tucs.index] + 1)
assert all(s.test.index.union(s.train.index) == ratings.index)
assert len(s.test) + len(s.train) == len(ratings)
# we have all users
users = ft.reduce(lambda us1, us2: us1 | us2,
(set(s.test.user) for s in splits))
assert len(users) == ratings.user.nunique()
assert users == set(ratings.user)
def test_sample_users():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_users(ratings, 5, 100, xf.SampleN(5))
splits = list(splits)
assert len(splits) == 5
for s in splits:
ucounts = s.test.groupby('user').agg('count')
assert len(s.test) == 5 * 100
assert len(ucounts) == 100
assert all(ucounts == 5)
assert all(s.test.index.union(s.train.index) == ratings.index)
assert len(s.test) + len(s.train) == len(ratings)
# no overlapping users
for s1, s2 in it.product(splits, splits):
if s1 is s2:
continue
us1 = s1.test.user.unique()
us2 = s2.test.user.unique()
assert len(np.intersect1d(us1, us2)) == 0
def test_sample_users_frac():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_users(ratings, 5, 100, xf.SampleFrac(0.2))
splits = list(splits)
assert len(splits) == 5
ucounts = ratings.groupby('user').item.count()
uss = ucounts * 0.2
for s in splits:
tucs = s.test.groupby('user').item.count()
assert len(tucs) == 100
assert all(tucs >= uss.loc[tucs.index] - 1)
assert all(tucs <= uss.loc[tucs.index] + 1)
assert all(s.test.index.union(s.train.index) == ratings.index)
assert len(s.test) + len(s.train) == len(ratings)
# no overlapping users
for s1, s2 in it.product(splits, splits):
if s1 is s2:
continue
us1 = s1.test.user.unique()
us2 = s2.test.user.unique()
assert len(np.intersect1d(us1, us2)) == 0
@pytest.mark.slow
def test_sample_users_frac_oversize():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_users(ratings, 20, 100, xf.SampleN(5))
splits = list(splits)
assert len(splits) == 20
for s in splits:
ucounts = s.test.groupby('user').agg('count')
assert len(ucounts) < 100
assert all(ucounts == 5)
assert all(s.test.index.union(s.train.index) == ratings.index)
assert len(s.test) + len(s.train) == len(ratings)
users = ft.reduce(lambda us1, us2: us1 | us2,
(set(s.test.user) for s in splits))
assert len(users) == ratings.user.nunique()
assert users == set(ratings.user)
for s1, s2 in it.product(splits, splits):
if s1 is s2:
continue
us1 = s1.test.user.unique()
us2 = s2.test.user.unique()
assert len(np.intersect1d(us1, us2)) == 0
def test_sample_users_frac_oversize_ndj():
ratings = lktu.ml_pandas.renamed.ratings
splits = xf.sample_users(ratings, 20, 100, xf.SampleN(5), disjoint=False)
splits = list(splits)
assert len(splits) == 20
for s in splits:
ucounts = s.test.groupby('user').agg('count')
assert len(ucounts) == 100
assert len(s.test) == 5 * 100
assert all(ucounts == 5)
assert all(s.test.index.union(s.train.index) == ratings.index)
assert len(s.test) + len(s.train) == len(ratings)
| 11,496 | 32.616959 | 96 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_math_solve.py | import os
import numpy as np
import scipy.linalg as sla
from pytest import approx
from lenskit.math.solve import solve_tri, dposv
_runs = int(os.environ.get('RAND_TEST_ITERS', 10))
def test_solve_ltri():
for i in range(_runs):
size = np.random.randint(5, 50)
Af = np.random.randn(size, size)
b = np.random.randn(size)
A = np.tril(Af)
x = solve_tri(A, b)
assert len(x) == size
xexp = sla.solve_triangular(A, b, lower=True)
assert x == approx(xexp, rel=1.0e-6)
def test_solve_ltri_transpose():
for i in range(_runs):
size = np.random.randint(5, 50)
Af = np.random.randn(size, size)
b = np.random.randn(size)
A = np.tril(Af)
x = solve_tri(A, b, True)
assert len(x) == size
xexp = sla.solve_triangular(A.T, b, lower=False)
assert x == approx(xexp, rel=1.0e-6)
def test_solve_utri():
for i in range(_runs):
size = np.random.randint(5, 50)
Af = np.random.randn(size, size)
b = np.random.randn(size)
A = np.triu(Af)
x = solve_tri(A, b, lower=False)
assert len(x) == size
xexp = sla.solve_triangular(A, b, lower=False)
assert x == approx(xexp, rel=1.0e-6)
def test_solve_utri_transpose():
for i in range(_runs):
size = np.random.randint(5, 50)
Af = np.random.randn(size, size)
b = np.random.randn(size)
A = np.triu(Af)
x = solve_tri(A, b, True, lower=False)
assert len(x) == size
xexp = sla.solve_triangular(A.T, b, lower=True)
assert x == approx(xexp, rel=1.0e-6)
def test_solve_cholesky():
for i in range(_runs):
size = np.random.randint(5, 50)
A = np.random.randn(size, size)
b = np.random.randn(size)
# square values of A
A = A * A
# and solve
xexp, resid, rank, s = np.linalg.lstsq(A, b)
# chol solve
L = np.linalg.cholesky(A.T @ A)
w = solve_tri(L, A.T @ b)
x = solve_tri(L, w, transpose=True)
assert x == approx(xexp, abs=1.0e-3)
def test_solve_dposv():
for i in range(_runs):
size = np.random.randint(5, 50)
A = np.random.randn(size, size)
b = np.random.randn(size)
# square values of A
A = A * A
# and solve
xexp, resid, rank, s = np.linalg.lstsq(A, b)
F = A.T @ A
x = A.T @ b
dposv(F, x, True)
assert x == approx(xexp, rel=1.0e-3)
| 2,527 | 23.307692 | 56 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_topn_ndcg.py | import numpy as np
import pandas as pd
from pytest import approx
from lenskit.metrics.topn import _dcg, ndcg
import lk_test_utils as lktu
def test_dcg_empty():
"empty should be zero"
assert _dcg(np.array([])) == approx(0)
def test_dcg_zeros():
assert _dcg(np.zeros(10)) == approx(0)
def test_dcg_single():
"a single element should be scored at the right place"
assert _dcg(np.array([0.5])) == approx(0.5)
assert _dcg(np.array([0, 0.5])) == approx(0.5)
assert _dcg(np.array([0, 0, 0.5])) == approx(0.5 / np.log2(3))
assert _dcg(np.array([0, 0, 0.5, 0])) == approx(0.5 / np.log2(3))
def test_dcg_mult():
"multiple elements should score correctly"
assert _dcg(np.array([np.e, np.pi])) == approx(np.e + np.pi)
assert _dcg(np.array([np.e, 0, 0, np.pi])) == approx(np.e + np.pi / np.log2(4))
def test_dcg_empty2():
"empty should be zero"
assert _dcg(np.array([])) == approx(0)
def test_dcg_zeros2():
assert _dcg(np.zeros(10)) == approx(0)
def test_dcg_single2():
"a single element should be scored at the right place"
assert _dcg(np.array([0.5])) == approx(0.5)
assert _dcg(np.array([0, 0.5])) == approx(0.5)
assert _dcg(np.array([0, 0, 0.5])) == approx(0.5 / np.log2(3))
assert _dcg(np.array([0, 0, 0.5, 0])) == approx(0.5 / np.log2(3))
def test_dcg_nan():
"NANs should be 0"
assert _dcg(np.array([np.nan, 0.5])) == approx(0.5)
def test_dcg_series():
"The DCG function should work on a series"
assert _dcg(pd.Series([np.e, 0, 0, np.pi])) == \
approx((np.e + np.pi / np.log2(4)))
def test_dcg_mult2():
"multiple elements should score correctly"
assert _dcg(np.array([np.e, np.pi])) == approx(np.e + np.pi)
assert _dcg(np.array([np.e, 0, 0, np.pi])) == \
approx((np.e + np.pi / np.log2(4)))
def test_ndcg_empty():
recs = pd.DataFrame({'item': []})
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
assert ndcg(recs, truth) == approx(0.0)
def test_ndcg_no_match():
recs = pd.DataFrame({'item': [4]})
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
assert ndcg(recs, truth) == approx(0.0)
def test_ndcg_perfect():
recs = pd.DataFrame({'item': [2, 3, 1]})
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
assert ndcg(recs, truth) == approx(1.0)
def test_ndcg_wrong():
recs = pd.DataFrame({'item': [1, 2]})
truth = pd.DataFrame({'item': [1, 2, 3], 'rating': [3.0, 5.0, 4.0]})
truth = truth.set_index('item')
assert ndcg(recs, truth) == approx(_dcg([3.0, 5.0] / _dcg([5.0, 4.0, 3.0])))
| 2,744 | 28.202128 | 83 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_als_implicit.py | import logging
import pickle
from lenskit import topn
from lenskit.algorithms import als
import pandas as pd
import numpy as np
from pytest import mark
import lk_test_utils as lktu
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_als_basic_build():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
assert set(algo.user_index_) == set([10, 12, 13])
assert set(algo.item_index_) == set([1, 2, 3])
assert algo.user_features_.shape == (3, 20)
assert algo.item_features_.shape == (3, 20)
def test_als_predict_basic():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(10, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert preds.loc[3] >= -0.1
assert preds.loc[3] <= 5
def test_als_predict_bad_item():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(10, [4])
assert len(preds) == 1
assert preds.index[0] == 4
assert np.isnan(preds.loc[4])
def test_als_predict_bad_user():
algo = als.ImplicitMF(20, iterations=10)
algo.fit(simple_df)
preds = algo.predict_for_user(50, [3])
assert len(preds) == 1
assert preds.index[0] == 3
assert np.isnan(preds.loc[3])
@lktu.wantjit
def test_als_train_large():
algo = als.ImplicitMF(20, iterations=20)
ratings = lktu.ml_pandas.renamed.ratings
algo.fit(ratings)
assert len(algo.user_index_) == ratings.user.nunique()
assert len(algo.item_index_) == ratings.item.nunique()
assert algo.user_features_.shape == (ratings.user.nunique(), 20)
assert algo.item_features_.shape == (ratings.item.nunique(), 20)
def test_als_save_load():
algo = als.ImplicitMF(20, iterations=5)
ratings = lktu.ml_pandas.renamed.ratings
algo.fit(ratings)
mod = pickle.dumps(algo)
_log.info('serialized to %d bytes', len(mod))
restored = pickle.loads(mod)
assert np.all(restored.user_features_ == algo.user_features_)
assert np.all(restored.item_features_ == algo.item_features_)
assert np.all(restored.item_index_ == algo.item_index_)
assert np.all(restored.user_index_ == algo.user_index_)
@lktu.wantjit
def test_als_train_large_noratings():
algo = als.ImplicitMF(20, iterations=20)
ratings = lktu.ml_pandas.renamed.ratings
ratings = ratings.loc[:, ['user', 'item']]
algo.fit(ratings)
assert len(algo.user_index_) == ratings.user.nunique()
assert len(algo.item_index_) == ratings.item.nunique()
assert algo.user_features_.shape == (ratings.user.nunique(), 20)
assert algo.item_features_.shape == (ratings.item.nunique(), 20)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_als_implicit_batch_accuracy():
import lenskit.crossfold as xf
from lenskit import batch
from lenskit import topn
ratings = lktu.ml100k.load_ratings()
algo = als.ImplicitMF(25, iterations=20)
def eval(train, test):
_log.info('running training')
train['rating'] = train.rating.astype(np.float_)
algo.fit(train)
users = test.user.unique()
_log.info('testing %d users', len(users))
candidates = topn.UnratedCandidates(train)
recs = batch.recommend(algo, users, 100, candidates)
return recs
folds = list(xf.partition_users(ratings, 5, xf.SampleFrac(0.2)))
test = pd.concat(te for (tr, te) in folds)
recs = pd.concat(eval(train, test) for (train, test) in folds)
_log.info('analyzing recommendations')
rla = topn.RecListAnalysis()
rla.add_metric(topn.ndcg)
results = rla.compute(recs, test)
_log.info('nDCG for users is %.4f', results.ndcg.mean())
assert results.ndcg.mean() > 0
| 3,923 | 28.283582 | 73 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_predict_metrics.py | import numpy as np
import pandas as pd
import os.path
from pytest import approx, raises, mark, skip
import lenskit.metrics.predict as pm
import lk_test_utils as lktu
def test_check_missing_empty():
pm._check_missing(pd.Series([]), 'error')
# should pass
assert True
def test_check_missing_has_values():
pm._check_missing(pd.Series([1, 3, 2]), 'error')
# should pass
assert True
def test_check_missing_nan_raises():
with raises(ValueError):
pm._check_missing(pd.Series([1, np.nan, 3]), 'error')
def test_check_missing_raises():
data = pd.Series([1, 7, 3], ['a', 'b', 'd'])
ref = pd.Series([3, 2, 4], ['b', 'c', 'd'])
ref, data = ref.align(data, join='left')
with raises(ValueError):
pm._check_missing(data, 'error')
def test_check_joined_ok():
data = pd.Series([1, 7, 3], ['a', 'b', 'd'])
ref = pd.Series([3, 2, 4], ['b', 'c', 'd'])
ref, data = ref.align(data, join='inner')
pm._check_missing(ref, 'error')
# should get here
assert True
def test_check_missing_ignore():
data = pd.Series([1, 7, 3], ['a', 'b', 'd'])
ref = pd.Series([3, 2, 4], ['b', 'c', 'd'])
ref, data = ref.align(data, join='left')
pm._check_missing(data, 'ignore')
# should get here
assert True
def test_rmse_one():
rmse = pm.rmse([1], [1])
assert isinstance(rmse, float)
assert rmse == approx(0)
rmse = pm.rmse([1], [2])
assert rmse == approx(1)
rmse = pm.rmse([1], [0.5])
assert rmse == approx(0.5)
def test_rmse_two():
rmse = pm.rmse([1, 2], [1, 2])
assert isinstance(rmse, float)
assert rmse == approx(0)
rmse = pm.rmse([1, 1], [2, 2])
assert rmse == approx(1)
rmse = pm.rmse([1, 3], [3, 1])
assert rmse == approx(2)
rmse = pm.rmse([1, 3], [3, 2])
assert rmse == approx(np.sqrt(5 / 2))
def test_rmse_array_two():
rmse = pm.rmse(np.array([1, 2]), np.array([1, 2]))
assert isinstance(rmse, float)
assert rmse == approx(0)
rmse = pm.rmse(np.array([1, 1]), np.array([2, 2]))
assert rmse == approx(1)
rmse = pm.rmse(np.array([1, 3]), np.array([3, 1]))
assert rmse == approx(2)
def test_rmse_series_two():
rmse = pm.rmse(pd.Series([1, 2]), pd.Series([1, 2]))
assert isinstance(rmse, float)
assert rmse == approx(0)
rmse = pm.rmse(pd.Series([1, 1]), pd.Series([2, 2]))
assert rmse == approx(1)
rmse = pm.rmse(pd.Series([1, 3]), pd.Series([3, 1]))
assert rmse == approx(2)
def test_rmse_series_subset_axis():
rmse = pm.rmse(pd.Series([1, 3], ['a', 'c']), pd.Series([3, 4, 1], ['a', 'b', 'c']))
assert rmse == approx(2)
def test_rmse_series_missing_value_error():
with raises(ValueError):
pm.rmse(pd.Series([1, 3], ['a', 'd']), pd.Series([3, 4, 1], ['a', 'b', 'c']))
def test_rmse_series_missing_value_ignore():
rmse = pm.rmse(pd.Series([1, 3], ['a', 'd']), pd.Series([3, 4, 1], ['a', 'b', 'c']),
missing='ignore')
assert rmse == approx(2)
def test_mae_two():
mae = pm.mae([1, 2], [1, 2])
assert isinstance(mae, float)
assert mae == approx(0)
mae = pm.mae([1, 1], [2, 2])
assert mae == approx(1)
mae = pm.mae([1, 3], [3, 1])
assert mae == approx(2)
mae = pm.mae([1, 3], [3, 2])
assert mae == approx(1.5)
def test_mae_array_two():
mae = pm.mae(np.array([1, 2]), np.array([1, 2]))
assert isinstance(mae, float)
assert mae == approx(0)
mae = pm.mae(np.array([1, 1]), np.array([2, 2]))
assert mae == approx(1)
mae = pm.mae(np.array([1, 3]), np.array([3, 1]))
assert mae == approx(2)
def test_mae_series_two():
mae = pm.mae(pd.Series([1, 2]), pd.Series([1, 2]))
assert isinstance(mae, float)
assert mae == approx(0)
mae = pm.mae(pd.Series([1, 1]), pd.Series([2, 2]))
assert mae == approx(1)
mae = pm.mae(pd.Series([1, 3]), pd.Series([3, 1]))
assert mae == approx(2)
@mark.slow
@mark.eval
@mark.skipif(not lktu.ml100k.available, reason='ML100K data not present')
def test_batch_rmse():
import lenskit.crossfold as xf
import lenskit.batch as batch
import lenskit.algorithms.basic as bl
ratings = lktu.ml100k.load_ratings()
algo = bl.Bias(damping=5)
def eval(train, test):
algo.fit(train)
preds = batch.predict(algo, test)
return preds.set_index(['user', 'item'])
results = pd.concat((eval(train, test)
for (train, test)
in xf.partition_users(ratings, 5, xf.SampleN(5))))
user_rmse = results.groupby('user').apply(lambda df: pm.rmse(df.prediction, df.rating))
# we should have all users
users = ratings.user.unique()
assert len(user_rmse) == len(users)
missing = np.setdiff1d(users, user_rmse.index)
assert len(missing) == 0
# we should not have any missing values
assert all(user_rmse.notna())
# we should have a reasonable mean
assert user_rmse.mean() == approx(0.93, abs=0.05)
| 5,003 | 24.927461 | 91 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_topn_mrr.py | import numpy as np
import pandas as pd
from pytest import approx
from lenskit.topn import recip_rank
def _test_rr(items, rel):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return recip_rank(recs, truth)
def test_mrr_empty_zero():
rr = _test_rr([], [1, 3])
assert rr == approx(0)
def test_mrr_norel_zero():
"no relevant items -> zero"
rr = _test_rr([1, 2, 3], [4, 5])
assert rr == approx(0)
def test_mrr_first_one():
"first relevant -> one"
rr = _test_rr([1, 2, 3], [1, 4])
assert rr == approx(1.0)
def test_mrr_second_one_half():
"second relevant -> 0.5"
rr = _test_rr([1, 2, 3], [5, 2, 3])
assert rr == approx(0.5)
def test_mrr_series():
"second relevant -> 0.5 in pd series"
rr = _test_rr(pd.Series([1, 2, 3]), pd.Series([5, 2, 3]))
assert rr == approx(0.5)
def test_mrr_series_idx():
"second relevant -> 0.5 in pd series w/ index"
rr = _test_rr(pd.Series([1, 2, 3]), pd.Index([5, 2, 3]))
assert rr == approx(0.5)
def test_mrr_array_late():
"deep -> 0.1"
rr = _test_rr(np.arange(1, 21, 1, 'u4'), [20, 10])
assert rr == approx(0.1)
| 1,189 | 21.037037 | 61 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_baselines.py | import lenskit.algorithms.basic as bl
from lenskit import util as lku
import logging
import pickle
import pandas as pd
import numpy as np
from pytest import approx
import lk_test_utils as lktu
from lk_test_utils import ml_pandas
_log = logging.getLogger(__name__)
simple_df = pd.DataFrame({'item': [1, 1, 2, 3],
'user': [10, 12, 10, 13],
'rating': [4.0, 3.0, 5.0, 2.0]})
def test_bias_full():
algo = bl.Bias()
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is not None
assert algo.item_offsets_.index.name == 'item'
assert set(algo.item_offsets_.index) == set([1, 2, 3])
assert algo.item_offsets_.loc[1:3].values == approx(np.array([0, 1.5, -1.5]))
assert algo.user_offsets_ is not None
assert algo.user_offsets_.index.name == 'user'
assert set(algo.user_offsets_.index) == set([10, 12, 13])
assert algo.user_offsets_.loc[[10, 12, 13]].values == approx(np.array([0.25, -0.5, 0]))
def test_bias_clone():
algo = bl.Bias()
algo.fit(simple_df)
params = algo.get_params()
assert sorted(params.keys()) == ['damping', 'items', 'users']
a2 = lku.clone(algo)
assert a2 is not algo
assert getattr(a2, 'mean_', None) is None
assert getattr(a2, 'item_offsets_', None) is None
assert getattr(a2, 'user_offsets_', None) is None
def test_bias_global_only():
algo = bl.Bias(users=False, items=False)
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is None
assert algo.user_offsets_ is None
def test_bias_no_user():
algo = bl.Bias(users=False)
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is not None
assert algo.item_offsets_.index.name == 'item'
assert set(algo.item_offsets_.index) == set([1, 2, 3])
assert algo.item_offsets_.loc[1:3].values == approx(np.array([0, 1.5, -1.5]))
assert algo.user_offsets_ is None
def test_bias_no_item():
algo = bl.Bias(items=False)
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is None
assert algo.user_offsets_ is not None
assert algo.user_offsets_.index.name == 'user'
assert set(algo.user_offsets_.index) == set([10, 12, 13])
assert algo.user_offsets_.loc[[10, 12, 13]].values == approx(np.array([1.0, -0.5, -1.5]))
def test_bias_global_predict():
algo = bl.Bias(items=False, users=False)
algo.fit(simple_df)
p = algo.predict_for_user(10, [1, 2, 3])
assert len(p) == 3
assert (p == algo.mean_).all()
assert p.values == approx(algo.mean_)
def test_bias_item_predict():
algo = bl.Bias(users=False)
algo.fit(simple_df)
p = algo.predict_for_user(10, [1, 2, 3])
assert len(p) == 3
assert p.values == approx((algo.item_offsets_ + algo.mean_).values)
def test_bias_user_predict():
algo = bl.Bias(items=False)
algo.fit(simple_df)
p = algo.predict_for_user(10, [1, 2, 3])
assert len(p) == 3
assert p.values == approx(algo.mean_ + 1.0)
p = algo.predict_for_user(12, [1, 3])
assert len(p) == 2
assert p.values == approx(algo.mean_ - 0.5)
def test_bias_new_user_predict():
algo = bl.Bias()
algo.fit(simple_df)
ratings = pd.DataFrame({'item': [1, 2, 3], 'rating': [1.5, 2.5, 3.5]})
ratings = ratings.set_index('item').rating
p = algo.predict_for_user(None, [1, 3], ratings=ratings)
offs = ratings - algo.mean_ - algo.item_offsets_
umean = offs.mean()
_log.info('user mean is %f', umean)
assert len(p) == 2
assert p.values == approx((algo.mean_ + algo.item_offsets_ + umean).loc[[1, 3]].values)
def test_bias_predict_unknown_item():
algo = bl.Bias()
algo.fit(simple_df)
p = algo.predict_for_user(10, [1, 3, 4])
assert len(p) == 3
intended = algo.item_offsets_.loc[[1, 3]] + algo.mean_ + 0.25
assert p.loc[[1, 3]].values == approx(intended.values)
assert p.loc[4] == approx(algo.mean_ + 0.25)
def test_bias_predict_unknown_user():
algo = bl.Bias()
algo.fit(simple_df)
p = algo.predict_for_user(15, [1, 3])
assert len(p) == 2
assert p.values == approx((algo.item_offsets_.loc[[1, 3]] + algo.mean_).values)
def test_bias_train_ml_ratings():
algo = bl.Bias()
ratings = ml_pandas.ratings.rename(columns={'userId': 'user', 'movieId': 'item'})
algo.fit(ratings)
assert algo.mean_ == approx(ratings.rating.mean())
imeans_data = ratings.groupby('item').rating.mean()
imeans_algo = algo.item_offsets_ + algo.mean_
ares, data = imeans_algo.align(imeans_data)
assert ares.values == approx(data.values)
urates = ratings.set_index('user').loc[2].set_index('item').rating
umean = (urates - imeans_data[urates.index]).mean()
p = algo.predict_for_user(2, [10, 11, -1])
assert len(p) == 3
assert p.iloc[0] == approx(imeans_data.loc[10] + umean)
assert p.iloc[1] == approx(imeans_data.loc[11] + umean)
assert p.iloc[2] == approx(ratings.rating.mean() + umean)
def test_bias_item_damp():
algo = bl.Bias(users=False, damping=5)
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is not None
assert algo.item_offsets_.index.name == 'item'
assert set(algo.item_offsets_.index) == set([1, 2, 3])
assert algo.item_offsets_.loc[1:3].values == approx(np.array([0, 0.25, -0.25]))
assert algo.user_offsets_ is None
def test_bias_user_damp():
algo = bl.Bias(items=False, damping=5)
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is None
assert algo.user_offsets_ is not None
assert algo.user_offsets_.index.name == 'user'
assert set(algo.user_offsets_.index) == set([10, 12, 13])
assert algo.user_offsets_.loc[[10, 12, 13]].values == \
approx(np.array([0.2857, -0.08333, -0.25]), abs=1.0e-4)
def test_bias_damped():
algo = bl.Bias(damping=5)
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is not None
assert algo.item_offsets_.index.name == 'item'
assert set(algo.item_offsets_.index) == set([1, 2, 3])
assert algo.item_offsets_.loc[1:3].values == approx(np.array([0, 0.25, -0.25]))
assert algo.user_offsets_ is not None
assert algo.user_offsets_.index.name == 'user'
assert set(algo.user_offsets_.index) == set([10, 12, 13])
assert algo.user_offsets_.loc[[10, 12, 13]].values == \
approx(np.array([0.25, -00.08333, -0.20833]), abs=1.0e-4)
def test_bias_separate_damping():
algo = bl.Bias(damping=(5, 10))
algo.fit(simple_df)
assert algo.mean_ == approx(3.5)
assert algo.item_offsets_ is not None
assert algo.item_offsets_.index.name == 'item'
assert set(algo.item_offsets_.index) == set([1, 2, 3])
assert algo.item_offsets_.loc[1:3].values == \
approx(np.array([0, 0.136364, -0.13636]), abs=1.0e-4)
assert algo.user_offsets_ is not None
assert algo.user_offsets_.index.name == 'user'
assert set(algo.user_offsets_.index) == set([10, 12, 13])
assert algo.user_offsets_.loc[[10, 12, 13]].values == \
approx(np.array([0.266234, -0.08333, -0.22727]), abs=1.0e-4)
def test_bias_save():
original = bl.Bias(damping=5)
original.fit(simple_df)
assert original.mean_ == approx(3.5)
_log.info('saving baseline model')
mod = pickle.dumps(original)
_log.info('serialized to %d bytes', len(mod))
algo = pickle.loads(mod)
assert algo.mean_ == original.mean_
assert algo.item_offsets_ is not None
assert algo.item_offsets_.index.name == 'item'
assert set(algo.item_offsets_.index) == set([1, 2, 3])
assert algo.item_offsets_.loc[1:3].values == approx(np.array([0, 0.25, -0.25]))
assert algo.user_offsets_ is not None
assert algo.user_offsets_.index.name == 'user'
assert set(algo.user_offsets_.index) == set([10, 12, 13])
assert algo.user_offsets_.loc[[10, 12, 13]].values == \
approx(np.array([0.25, -00.08333, -0.20833]), abs=1.0e-4)
| 8,090 | 30.119231 | 93 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/lk_test_utils.py | """
Test utilities for LKPY tests.
"""
import os
import os.path
import tempfile
import pathlib
import logging
from contextlib import contextmanager
import pandas as pd
import pytest
_log = logging.getLogger('lktu')
ml_dir = os.path.join(os.path.dirname(__file__), '../ml-latest-small')
class Renamer:
def __init__(self, dl):
self._dl = dl
def __getattribute__(self, name):
dl = object.__getattribute__(self, '_dl')
df = getattr(dl, name)
return df.rename(columns={'userId': 'user', 'movieId': 'item'})
class MLDataLoader:
_ratings = None
_movies = None
_tags = None
def __init__(self, reader):
self._read = reader
@property
def ratings(self):
if self._ratings is None:
self._ratings = self._read(os.path.join(ml_dir, 'ratings.csv'))
return self._ratings
@property
def movies(self):
if self._movies is None:
self._movies = self._read(os.path.join(ml_dir, 'movies.csv'))
return self._movies
@property
def tags(self):
if self.tags is None:
self.tags = self._read(os.path.join(ml_dir, 'tags.csv'))
return self.tags
@property
def renamed(self):
return Renamer(self)
class _ML100K:
@property
def location(self):
return os.path.expanduser(os.environ.get('ML100K_DIR', 'ml-100k'))
@property
def rating_file(self):
return os.path.join(self.location, 'u.data')
@property
def available(self):
return os.path.exists(self.rating_file)
def load_ratings(self):
return pd.read_csv(self.rating_file, sep='\t',
names=['user', 'item', 'rating', 'timestamp'])
def ml_sample():
ratings = ml_pandas.renamed.ratings
icounts = ratings.groupby('item').rating.count()
top = icounts.nlargest(500)
ratings = ratings.set_index('item')
top_rates = ratings.loc[top.index, :]
_log.info('top 500 items yield %d of %d ratings', len(top_rates), len(ratings))
return top_rates.reset_index()
ml100k = _ML100K()
wantjit = pytest.mark.skipif('NUMBA_DISABLE_JIT' in os.environ,
reason='JIT required')
@contextmanager
def envvars(**vars):
save = {}
for k in vars.keys():
if k in os.environ:
save[k] = os.environ[k]
else:
save[k] = None
os.environ[k] = vars[k]
try:
yield
finally:
for k, v in save.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
ml_pandas = MLDataLoader(pd.read_csv)
def norm_path(path):
if isinstance(path, pathlib.Path):
return path
elif hasattr(path, '__fspath__'):
return pathlib.Path(path.__fspath__())
elif isinstance(path, str):
return pathlib.Path(str)
else:
raise ValueError('invalid path: ' + repr(path))
| 2,954 | 22.267717 | 83 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_topn_utils.py | from lenskit import topn
from lenskit.algorithms import CandidateSelector
import pandas as pd
import numpy as np
import lk_test_utils as lktu
def test_unrated():
ratings = lktu.ml_pandas.renamed.ratings
unrate = topn.UnratedCandidates(ratings)
cs = unrate(100)
items = ratings.item.unique()
rated = ratings[ratings.user == 100].item.unique()
assert len(cs) == len(items) - len(rated)
assert len(np.intersect1d(cs, rated)) == 0
def test_cs_rated_items_series():
"rated_items should de-index series"
items = ['a', 'b', 'wombat']
series = pd.Series(np.random.randn(3), index=items)
i2 = CandidateSelector.rated_items(series)
assert isinstance(i2, np.ndarray)
assert all(i2 == items)
def test_cs_rated_items():
"rated_items should return list as array"
items = ['a', 'b', 'wombat']
i2 = CandidateSelector.rated_items(items)
assert isinstance(i2, np.ndarray)
assert all(i2 == items)
def test_cs_rated_items_array():
"rated_items should return array as itself"
items = ['a', 'b', 'wombat']
items = np.array(items)
i2 = CandidateSelector.rated_items(items)
assert isinstance(i2, np.ndarray)
assert all(i2 == items)
assert i2 is items
| 1,241 | 24.346939 | 55 | py |
MachineUnlearningPy | MachineUnlearningPy-master/tests/test_topn_precision.py | import numpy as np
import pandas as pd
from pytest import approx
from lenskit.topn import precision
def _test_prec(items, rel):
recs = pd.DataFrame({'item': items})
truth = pd.DataFrame({'item': rel}).set_index('item')
return precision(recs, truth)
def test_precision_empty_none():
prec = _test_prec([], [1, 3])
assert prec is None
def test_precision_simple_cases():
prec = _test_prec([1, 3], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1], [1, 3])
assert prec == approx(1.0)
prec = _test_prec([1, 2, 3, 4], [1, 3])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec([1, 2, 3, 4], range(5, 10))
assert prec == approx(0.0)
prec = _test_prec([1, 2, 3, 4], range(4, 10))
assert prec == approx(0.25)
def test_precision_series():
prec = _test_prec(pd.Series([1, 3]), pd.Series([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Series(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_set():
prec = _test_prec(pd.Series([1, 2, 3, 4]), [1, 3, 5])
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), range(4, 10))
assert prec == approx(0.25)
def test_precision_series_index():
prec = _test_prec(pd.Series([1, 3]), pd.Index([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), pd.Index(range(4, 10)))
assert prec == approx(0.25)
def test_precision_series_array():
prec = _test_prec(pd.Series([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(pd.Series([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
def test_precision_array():
prec = _test_prec(np.array([1, 3]), np.array([1, 3]))
assert prec == approx(1.0)
prec = _test_prec(np.array([1, 2, 3, 4]), np.array([1, 3, 5]))
assert prec == approx(0.5)
prec = _test_prec(np.array([1, 2, 3, 4]), np.arange(4, 10, 1, 'u4'))
assert prec == approx(0.25)
| 2,386 | 25.522222 | 73 | py |
MachineUnlearningPy | MachineUnlearningPy-master/unlearn/visualization.py | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
df = pd.read_csv('output_matrix.csv')
time_mean = df.groupby("n").mean().values
ns = df.groupby("n").mean().index.get_level_values(0)
plt.plot(ns,time_mean[:,0],label="native learning")
plt.plot(ns,time_mean[:,1],label="unlearn supported learning")
plt.plot(ns,time_mean[:,2],label="unlearn")
plt.legend()
plt.xlabel("Number of Rating")
plt.ylabel("Time in second")
plt.title("Time Cost, Vectorized Unlearn supported Learning, Nested Loop Unlearn with CSR Indexing")
plt.show() | 550 | 33.4375 | 100 | py |
MachineUnlearningPy | MachineUnlearningPy-master/unlearn/basic.py | import sys
sys.path.insert(0,'../.')
from lenskit import batch, topn, util
from lenskit import crossfold as xf
from lenskit.algorithms import Recommender, als, item_knn as knn
import pandas as pd
import matplotlib
import time
ratings = pd.read_csv('../ml-100k/u.data', sep='\t',
names=['user','item','rating','timestamp'])
print(ratings.head())
#Define Algorithms
alg_li = knn.ItemItem(20)
alg_als = als.BiasedMF(50)
#Evaluation
def eval(aname, algo, train, test):
fittable = util.clone(algo)
fittable = Recommender.adapt(fittable)
results = fittable.fit(train)
return
print(ratings.shape)
#############################################################################
# Run Algorithms with differnt input Size
#for n in range(200,10000,100):
for n in range(10000,100000,1000):
#for n in [200]:
print(n)
for i in range(10):
train = ratings[['user','item','rating']][:n]
eval('ItemItem',alg_li,train,train)
| 984 | 21.386364 | 77 | py |
MachineUnlearningPy | MachineUnlearningPy-master/doc/conf.py | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'LensKit'
copyright = '2018 Boise State University'
author = 'Michael D. Ekstrand'
# The short X.Y version
version = '0.6.1'
# The full version, including alpha/beta/rc tags
release = '0.6.1'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'recommonmark',
'sphinx.ext.napoleon',
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'python3'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'github_user': 'lenskit',
'github_repo': 'lkpy',
'travis_button': False,
'canonical_url': 'https://lkpy.lenskit.org/',
'font_family': 'Charter, serif'
# 'font_family': '"Source Sans Pro", "Georgia Pro", Georgia, serif',
# 'font_size': '15px',
# 'head_font_family': '"Merriweather Sans", "Arial", sans-serif',
# 'code_font_size': '1em',
# 'code_font_family': '"Source Code Pro", "Consolas", "Menlo", sans-serif'
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'LensKitdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'LensKit.tex', 'LensKit Documentation',
'Michael D. Ekstrand', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'lenskit', 'LensKit Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'LensKit', 'LensKit Documentation',
author, 'LensKit', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'pandas': ('http://pandas.pydata.org/pandas-docs/stable/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
'hpfrec': ('https://hpfrec.readthedocs.io/en/latest/', None),
'implicit': ('https://implicit.readthedocs.io/en/latest/', None),
'scikit': ('https://scikit-learn.org/stable/', None),
'tqdm': ('https://tqdm.github.io/', None)
}
| 6,129 | 31.263158 | 79 | py |
GWE | GWE-master/convAE/make_char_feat_dict_nopool.py | import cPickle as pkl
import pdb
import sys
import numpy as np
import tensorflow as tf
sys.path.insert(0, './models/')
from conv_ae_char_nopool import Model
checkpoint_filename = './checkpoints/conv_ae_char_nopool.ckpt-100'
char_bitmap_dict_pkl_filename = '../data/char_dict.pkl'
char_feat_dict_filename = '../data/char_feat_dict.pkl'
char_feat_dict = {}
# load char bitmap data
with open(char_bitmap_dict_pkl_filename, 'rb') as f:
char_bitmap_dict = pkl.load(f)
bitmaps = []
chars = []
for k in char_bitmap_dict:
chars.append(k)
bitmaps.append(char_bitmap_dict[k])
BATCH_SIZE = 20
with tf.Graph().as_default(), tf.Session() as sess:
model = Model('', sess, layerwise=True)
model.load_model(checkpoint_filename)
for batch_num in xrange(0, int(len(chars)/BATCH_SIZE)):
start = batch_num * BATCH_SIZE
end = (batch_num+1) * BATCH_SIZE
if end > len(chars):
start = len(chars)-BATCH_SIZE
end = len(chars)
bitmap_batch_list = [ bitmaps[idx] / 255.0 for idx in xrange(start,end) ]
X = np.stack(bitmap_batch_list, axis=0)
#embs = model.get_embs(X)
_, _, _, _, embs = model.get_layers_n_args(X)
for idx in xrange(BATCH_SIZE):
char_feat_dict[chars[start+idx]] = embs[idx,0,0,:]
sys.stdout.write('\rbatch #{0} '.format(batch_num))
sys.stdout.flush()
#pdb.set_trace()
with open(char_feat_dict_filename, 'wb') as f:
pkl.dump(char_feat_dict, f, pkl.HIGHEST_PROTOCOL)
| 1,455 | 25.472727 | 77 | py |
GWE | GWE-master/convAE/tsne_feature_nopool.py | # -*- coding: utf-8 -*-
import cPickle as pkl
import pdb
import sys
import numpy as np
#from PIL import Image
import tensorflow as tf
from tsne import bh_sne
sys.path.insert(0, './models/')
from conv_ae_char_nopool import Model
def save_collection_img(img_filename, n_row, n_col, img_size, offset, imgs):
image=Image.new("RGB", (n_col*img_size + (n_col+1)*offset,
n_row*img_size + (n_row+1)*offset), 'black')
pixels = image.load()
offset_h = offset
offset_w = offset
for n_h in xrange(n_row):
offset_w = offset
for n_w in xrange(n_col):
feat_idx = n_col * n_h + n_w
bitmap = imgs[feat_idx]
for p_h in xrange(img_size):
for p_w in xrange(img_size):
mag = bitmap[p_h, p_w] * 255
pixels[offset_w+p_w, offset_h+p_h] = (mag,mag,mag)
offset_w += offset + img_size
offset_h += offset + img_size
image.save(img_filename)
#checkpoint_filename = './checkpoints/conv_ae_char_switch_var.ckpt-100'
#checkpoint_filename = './log/conv_ae_char_switch_var.ckpt-100'
checkpoint_filename = './checkpoints/conv_ae_char_nopool.ckpt-100'
char_dict_pkl_filename = '../data/char_dict.pkl'
# load char bitmap data
with open(char_dict_pkl_filename, 'rb') as f:
char_bitmap_dict = pkl.load(f)
feats = []
bitmaps = []
chars = []
for k in char_bitmap_dict:
chars.append(k)
bitmaps.append(char_bitmap_dict[k])
BATCH_SIZE = 20
with tf.Graph().as_default(), tf.Session() as sess:
model = Model('', sess, True)
model.load_model(checkpoint_filename)
for batch_num in xrange(0, int(len(chars)/BATCH_SIZE)):
start = batch_num * BATCH_SIZE
end = (batch_num+1) * BATCH_SIZE
if end > len(chars):
start = len(chars)-BATCH_SIZE
end = len(chars)
bitmap_batch_list = [ bitmaps[idx] / 255.0 for idx in xrange(start,end) ]
X = np.stack(bitmap_batch_list, axis=0)
conv1, conv2, conv3, conv4, conv5 = model.get_layers_n_args(X)
for idx in xrange(BATCH_SIZE):
#char_feat_dict[chars[start+idx]] = embs[idx,0,0,:]
feats.append(conv5[idx,0,0,:])
#feats.append(np.reshape(conv4[idx], (-1,)))
#embs = model.get_embs(X)
vis_data = bh_sne(np.asarray(feats, dtype='float64'))
vis_x = vis_data[:, 0]
vis_y = vis_data[:, 1]
print vis_data.shape
#vis_x *= 3
#vis_y *= 3
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
img_filename = 'tsne_conv_ae_feat_nopool_layerwise_conv5.png'
fig = plt.gcf()
fig.clf()
fig.set_size_inches(40, 40)
fig.set_dpi(80)
ax = plt.subplot(111)
ax.set_xlim([np.min(vis_x) - 5, np.max(vis_x) + 5])
ax.set_ylim([np.min(vis_y) - 5, np.max(vis_y) + 5])
for idx in xrange(8780):
bitmap = np.tile(bitmaps[idx], (1,1,3))
imagebox = OffsetImage(bitmap, zoom=0.5)
xy = [vis_x[idx], vis_y[idx]]
#pdb.set_trace()
#xy = vis_data[idx]
ab = AnnotationBbox(imagebox, xy,
#xybox=(10., -10.),
xycoords='data',
boxcoords="offset points",
pad=0)
ax.add_artist(ab)
if idx % 100 == 0:
print idx
#if idx == 10:
# print idx
# break
ax.grid(True)
plt.draw()
plt.savefig(img_filename)
#plt.show()
'''
img_size = 60
image=Image.new("RGB", (int(np.max(vis_x) - np.min(vis_x)) + 2*img_size,
int(np.max(vis_y) - np.min(vis_y)) + 2*img_size), 'white')
pixels = image.load()
for idx in xrange(4000):
bitmap = bitmaps[idx]
offset_w, offset_h = int(vis_x[idx] - img_size), int(vis_y[idx] - img_size)
for p_h in xrange(img_size):
for p_w in xrange(img_size):
mag = 255 - bitmap[p_h, p_w] * 255
pixels[offset_w+p_w, offset_h+p_h] = (mag,mag,mag)
image.save(img_filename)
'''
| 3,809 | 25.643357 | 82 | py |
GWE | GWE-master/convAE/train_conv_ae_char_nopool.py | import cPickle as pkl
import pdb
import random
import sys
import time
import numpy as np
from PIL import Image
import tensorflow as tf
sys.path.insert(0, './models/')
from conv_ae_char_nopool import Model
def save_collection_img(img_filename, n_row, n_col, img_size, offset, imgs):
image=Image.new("RGB", (n_col*img_size + (n_col+1)*offset,
n_row*img_size + (n_row+1)*offset), 'black')
pixels = image.load()
offset_h = offset
offset_w = offset
for n_h in xrange(n_row):
offset_w = offset
for n_w in xrange(n_col):
feat_idx = n_col * n_h + n_w
bitmap = imgs[feat_idx]
for p_h in xrange(img_size):
for p_w in xrange(img_size):
mag = bitmap[p_h, p_w] * 255
pixels[offset_w+p_w, offset_h+p_h] = (mag,mag,mag)
offset_w += offset + img_size
offset_h += offset + img_size
image.save(img_filename)
log_dir = './log'
dict_pkl_filename = '../data/char_word_dicts.pkl'
char_dict_pkl_filename = '../data/char_dict.pkl'
# load char bitmap data
with open(char_dict_pkl_filename, 'rb') as f:
char_bitmap_dict = pkl.load(f)
#char_bitmap_dict.pop(u'UNK')
chars = []
bitmaps = []
for k in char_bitmap_dict:
chars.append(k)
bitmaps.append(char_bitmap_dict[k])
pickList = range(0, len(bitmaps))
random.shuffle(pickList)
MAX_EPOCH = 100
BATCH_SIZE = 20
def mask_activation(act, n):
mask = np.zeros_like(act)
for b in xrange(BATCH_SIZE):
values = np.reshape(np.abs(act[b]), (-1,))
values = np.sort(values)[::-1]
threshold = values[n]
indices = np.where(np.abs(act[b]) > threshold)
mask[b,indices[0],indices[1],indices[2]] = 1
return act * mask
with tf.Graph().as_default(), tf.Session() as sess:
# initialize model.
model = Model(log_dir, sess, True)
n_activations = [75, 50, 25, 5, 1]
for layer_idx in xrange(5):
for epoch in xrange(MAX_EPOCH):
tStart = time.time()
print "Epoch:",epoch+1
cost = 0
l1_loss = 0
random.shuffle(pickList)
for batch_num in xrange(0, int(len(pickList)/BATCH_SIZE)):
tStart_batch = time.time()
start = batch_num * BATCH_SIZE
end = (batch_num+1) * BATCH_SIZE
if end > len(pickList):
start = len(pickList)-BATCH_SIZE
end = len(pickList)
bitmap_batch_list = [ bitmaps[idx] / 255.0 for idx in pickList[start:end] ]
X = np.stack(bitmap_batch_list, axis=0)
batch_cost, batch_l1_loss = model.train_layer(layer_idx, X)
batch_time = time.time() - tStart_batch
cost += batch_cost
l1_loss += batch_l1_loss
sys.stdout.write(
('\rbatch #{0}, '
'loss_val: {1}, '
'l1_loss: {2}, '
'total_batch_loss: {3}, '
'epoch_time: {4} ').format(
batch_num+1, batch_cost, batch_l1_loss, cost, batch_time))
sys.stdout.flush()
average_cost = cost/float(int(len(pickList)/BATCH_SIZE))
print "Total cost =",cost," ,Average cost =",average_cost
print "Total l1_loss = ",l1_loss
tEnd = time.time()
print "Time used:", tEnd-tStart
print "Finished training layer {0}!".format(layer_idx+1)
'''
The following code is used to generate reconstruction of character glyphs.
test_bitmap_batch = [ bitmaps[idx] / 255.0 for idx in pickList[:BATCH_SIZE] ]
X = np.stack(test_bitmap_batch, axis=0)
X_hat = model.test_layer(layer_idx, X)
selected_imgs = np.zeros((20,60,60,1))
selected_imgs[0:20:2, :,:,:] = X[:10,:,:,:]
selected_imgs[1:20:2, :,:,:] = X_hat[:10,:,:,:]
save_collection_img("conv_ae_char_nopool_layerwise_l{0}.png".format(layer_idx+1),
n_row=2, n_col=10,
img_size=60, offset=20,
imgs=selected_imgs)
l1, l2, l3, l4, l5 = model.get_layers_n_args(X)
acts = [l1, l2, l3, l4, l5]
ma = mask_activation(acts[layer_idx], n_activations[layer_idx])
images_hat = model.reconstruct_from_layer(layer_idx, ma)
img_filename = "analyze_conv_ae_nopool_layerwise_l{0}_{1}.png".format(layer_idx+1, n_activations[layer_idx])
save_collection_img(img_filename,
n_row=2, n_col=10,
img_size=60, offset=20,
imgs=images_hat)
print "Finished analyzing layer {0}!".format(layer_idx+1)
'''
model.save_model(100)
'''
embs = model.get_embs(X)
val = np.mean(embs)
embs = np.zeros((512,1,1,512))
for i in xrange(512):
embs[i,0,0,i] = val
reconstructed_imgs = np.zeros((512,60,60,1))
imgs = model.reconstruct_from_embs(embs[:100])
reconstructed_imgs[:100,:,:,:] = imgs
imgs = model.reconstruct_from_embs(embs[100:200])
reconstructed_imgs[100:200,:,:,:] = imgs
imgs = model.reconstruct_from_embs(embs[200:300])
reconstructed_imgs[200:300,:,:,:] = imgs
imgs = model.reconstruct_from_embs(embs[300:400])
reconstructed_imgs[300:400,:,:,:] = imgs
imgs = model.reconstruct_from_embs(embs[400:500])
reconstructed_imgs[400:500,:,:,:] = imgs
imgs = model.reconstruct_from_embs(embs[412:])
reconstructed_imgs[412:,:,:,:] = imgs
save_collection_img("conv_ae_nopool_emb.png",
n_row=16, n_col=32,
img_size=60, offset=15,
imgs=reconstructed_imgs)
'''
| 5,361 | 29.99422 | 112 | py |
GWE | GWE-master/convAE/models/conv_ae_char_nopool.py | import os
import numpy as np
import tensorflow as tf
def unpool(updates, ksize=[1, 2, 2, 1]):
original_shape = updates.get_shape()
original_shape = tuple([i.__int__() for i in original_shape])
new_size = tf.shape(updates)[1:3]
new_size *= tf.constant(np.array([ksize[1], ksize[2]]).astype('int32'))
#input_shape = updates.get_shape().as_list()
#new_size = tf.to_int32(tf.pack([input_shape[1]*ksize[1], input_shape[2]*ksize[2]]))
ret = tf.image.resize_nearest_neighbor(updates, new_size)
ret.set_shape((original_shape[0],
original_shape[1] * ksize[1] if original_shape[1] is not None else None,
original_shape[2] * ksize[2] if original_shape[2] is not None else None,
original_shape[3]))
return ret
BATCH_SIZE = 20
class Model:
def __init__(self,
log_dir,
session,
layerwise=False):
self._log_dir = log_dir
self._session = session
if layerwise:
self.build_layer_wise_graph()
else:
self.build_graph()
def conv_n_kernel(self, x, kernel_shape, scope_name, strides=[1,1,1,1], padding='VALID', act=tf.nn.relu6):
with tf.variable_scope(scope_name) as scope:
kernel = tf.get_variable("kernel",
kernel_shape,
initializer=tf.contrib.layers.xavier_initializer_conv2d())
biases = tf.get_variable("biases",[kernel_shape[-1]],initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(x, kernel, strides, padding=padding)
bias = tf.nn.bias_add(conv, biases)
x = act(bias, name=scope.name)
return x, kernel
def conv_transpose(self, x, kernel, scope_name, strides=[1,1,1,1], padding='VALID', act=tf.nn.relu6):
x_shape = x.get_shape().as_list()
W_shape = kernel.get_shape().as_list()
if padding == 'SAME':
out_shape = tf.pack([x_shape[0], x_shape[1], x_shape[2], W_shape[2]])
elif padding == 'VALID':
out_shape = tf.pack([x_shape[0],
(x_shape[1] - 1)*strides[1]+W_shape[0],
(x_shape[2] - 1)*strides[2]+W_shape[1],
W_shape[2]])
with tf.variable_scope(scope_name) as scope:
biases = tf.get_variable("biases",[W_shape[2]],initializer=tf.constant_initializer(0.0))
deconv = tf.nn.conv2d_transpose(x,
kernel,
output_shape=out_shape,
strides=strides,
padding=padding)
bias = tf.nn.bias_add(deconv, biases)
x = act(bias, name=scope.name)
return x
def forward(self, images):
#x = 60
# conv1
x, kernel1 = self.conv_n_kernel(images, [5,5,1,16], "conv1", strides=[1,1,1,1])
feat_map_l1_norm = tf.reduce_sum(tf.abs(x))
#x = 56
# conv2
x, kernel2 = self.conv_n_kernel(x, [4,4,16,64], 'conv2', strides=[1,2,2,1])
feat_map_l1_norm += tf.reduce_sum(tf.abs(x))
# x = 27
# conv3
x, kernel3 = self.conv_n_kernel(x, [5,5,64,64], 'conv3', strides=[1,2,2,1])
feat_map_l1_norm += tf.reduce_sum(tf.abs(x))
# x = 12
# conv4
x, kernel4 = self.conv_n_kernel(x, [4,4,64,128], 'conv4', strides=[1,2,2,1])
feat_map_l1_norm += tf.reduce_sum(tf.abs(x))
# x = 5
# conv5
x, kernel5 = self.conv_n_kernel(x, [5,5,128,512], 'conv5', strides=[1,1,1,1])
feat_map_l1_norm += tf.reduce_sum(tf.abs(x))
# x = 1
self._embs = x
#deconv5
x = self.conv_transpose(x, kernel5, 'deconv5', strides=[1,1,1,1])
# x = 5
#deconv4
#x, _ = self.conv_n_kernel(x, [5,5,128,64], "deconv4")
x = self.conv_transpose(x, kernel4, 'deconv4', strides=[1,2,2,1])
# x = 12
#deconv3
#x, _ = self.conv_n_kernel(x, [5,5,256,32], "deconv3")
x = self.conv_transpose(x, kernel3, 'deconv3', strides=[1,2,2,1])
# x = 27
#deconv2
#x, kernel2 = self.conv_n_kernel(x, [5,5,32,16], "deconv2")
x = self.conv_transpose(x, kernel2, 'deconv2', strides=[1,2,2,1])
# x = 56
#deconv1
#images_hat, _ = self.conv_n_kernel(x, [5,5,16,1], "deconv1", act=tf.nn.sigmoid)
images_hat = self.conv_transpose(x, kernel1, 'deconv1', strides=[1,1,1,1], act=tf.nn.sigmoid)
# x = 60
return images_hat, feat_map_l1_norm
def layer_wise_forward(self, images):
# conv1
x, kernel1 = self.conv_n_kernel(images, [5,5,1,16], "conv1", strides=[1,1,1,1])
l1_norm = tf.reduce_sum(tf.abs(x))
self._conv1 = x
#l1_deconv1
l1_img_hat = self.conv_transpose(x, kernel1, 'l1_deconv1', strides=[1,1,1,1])
# conv2
x, kernel2 = self.conv_n_kernel(x, [4,4,16,16], 'conv2', strides=[1,2,2,1])
l2_norm = tf.reduce_sum(tf.abs(x))
self._conv2 = x
#l2_deconv2
l2_x = self.conv_transpose(x, kernel2, 'l2_deconv2', strides=[1,2,2,1])
#l2_deconv1
l2_img_hat = self.conv_transpose(l2_x, kernel1, 'l2_deconv1', strides=[1,1,1,1])
# conv3
x, kernel3 = self.conv_n_kernel(x, [5,5,16,256], 'conv3', strides=[1,2,2,1])
l3_norm = tf.reduce_sum(tf.abs(x))
self._conv3 = x
#l3_deconv3
l3_x = self.conv_transpose(x, kernel3, 'l3_deconv3', strides=[1,2,2,1])
#l3_deconv2
l3_x = self.conv_transpose(l3_x, kernel2, 'l3_deconv2', strides=[1,2,2,1])
#l3_deconv1
l3_img_hat = self.conv_transpose(l3_x, kernel1, 'l3_deconv1', strides=[1,1,1,1])
# conv4
x, kernel4 = self.conv_n_kernel(x, [4,4,256,256], 'conv4', strides=[1,2,2,1])
l4_norm = tf.reduce_sum(tf.abs(x))
self._conv4 = x
#l4_deconv4
l4_x = self.conv_transpose(x, kernel4, 'l4_deconv4', strides=[1,2,2,1])
#l4_deconv3
l4_x = self.conv_transpose(l4_x, kernel3, 'l4_deconv3', strides=[1,2,2,1])
#l4_deconv2
l4_x = self.conv_transpose(l4_x, kernel2, 'l4_deconv2', strides=[1,2,2,1])
#l4_deconv1
l4_img_hat = self.conv_transpose(l4_x, kernel1, 'l4_deconv1', strides=[1,1,1,1])
# conv5
x, kernel5 = self.conv_n_kernel(x, [5,5,256,512], 'conv5', strides=[1,1,1,1])
l5_norm = tf.reduce_sum(tf.abs(x))
self._conv5 = x
#l5_deconv5
l5_x = self.conv_transpose(x, kernel5, 'l5_deconv5', strides=[1,1,1,1])
#l5_deconv4
l5_x = self.conv_transpose(l5_x, kernel4, 'l5_deconv4', strides=[1,2,2,1])
#l5_deconv3
l5_x = self.conv_transpose(l5_x, kernel3, 'l5_deconv3', strides=[1,2,2,1])
#l5_deconv2
l5_x = self.conv_transpose(l5_x, kernel2, 'l5_deconv2', strides=[1,2,2,1])
#l5_deconv1
l5_img_hat = self.conv_transpose(l5_x, kernel1, 'l5_deconv1', strides=[1,1,1,1])
return l1_img_hat, l1_norm, l2_img_hat, l2_norm, \
l3_img_hat, l3_norm, l4_img_hat, l4_norm, l5_img_hat, l5_norm
def loss(self, images, images_hat):
loss = tf.reduce_sum( tf.square(images - images_hat) )
#loss = tf.reduce_sum( images_hat * tf.log(images) )
return loss
def optimize(self, loss, var_list=None):
optimizer = tf.train.AdagradOptimizer(0.001)
#optimizer = tf.train.AdamOptimizer(learning_rate=0.001,
# beta1=0.9,
# beta2=0.999,
# epsilon=1e-08)
global_step = tf.Variable(0, name='global_step', trainable=False)
if var_list:
train_op = optimizer.minimize(loss, global_step=global_step, var_list=var_list)
else:
train_op = optimizer.minimize(loss, global_step=global_step)
return train_op
def build_graph(self):
print "conv_ae_char_nopool.py :: build_graph()"
images = tf.placeholder(tf.float32, (BATCH_SIZE, 60, 60, 1))
keep_prob = tf.placeholder(tf.float32)
self._images = images
self._keep_prob = keep_prob
images_drop = tf.nn.dropout(images, keep_prob)
images_hat, l1_loss = self.forward(images_drop)
self._images_hat = images_hat
loss = self.loss(images, images_hat)
self._loss = loss
self._l1_loss = l1_loss
self._train_op = self.optimize(loss+0.002*l1_loss)
tf.global_variables_initializer().run()
if self._log_dir != '':
self._summary_writer = tf.summary.FileWriter(self._log_dir,
self._session.graph)
self._summary_writer.flush()
self._saver = tf.train.Saver()
def build_layer_wise_graph(self):
print "conv_ae_char_nopool.py :: build_layer_wise_graph()"
images = tf.placeholder(tf.float32, (BATCH_SIZE, 60, 60, 1))
self._images = images
self._l1_img_hat, self._l1_norm, self._l2_img_hat, self._l2_norm, \
self._l3_img_hat, self._l3_norm, self._l4_img_hat, self._l4_norm, \
self._l5_img_hat, self._l5_norm = self.layer_wise_forward(images)
self._l1_loss = self.loss(images, self._l1_img_hat)
self._l2_loss = self.loss(images, self._l2_img_hat)
self._l3_loss = self.loss(images, self._l3_img_hat)
self._l4_loss = self.loss(images, self._l4_img_hat)
self._l5_loss = self.loss(images, self._l5_img_hat)
l1_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv1')
l1_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l1_deconv1')
l2_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv2')
l2_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l2_deconv2')
l2_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l2_deconv1')
l3_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv3')
l3_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l3_deconv3')
l3_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l3_deconv2')
l3_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l3_deconv1')
l4_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv4')
l4_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l4_deconv4')
l4_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l4_deconv3')
l4_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l4_deconv2')
l4_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l4_deconv1')
l5_var_list = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='conv5')
l5_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l5_deconv5')
l5_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l5_deconv4')
l5_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l5_deconv3')
l5_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l5_deconv2')
l5_var_list += tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='l5_deconv1')
self._train_l1 = self.optimize(self._l1_loss + 0.002*self._l1_norm, var_list=l1_var_list)
self._train_l2 = self.optimize(self._l2_loss + 0.001*self._l2_norm, var_list=l2_var_list)
self._train_l3 = self.optimize(self._l3_loss + 0.01*self._l3_norm, var_list=l3_var_list)
self._train_l4 = self.optimize(self._l4_loss + 0.1*self._l4_norm, var_list=l4_var_list)
self._train_l5 = self.optimize(self._l5_loss + 0.1*self._l5_norm, var_list=l5_var_list)
tf.global_variables_initializer().run()
if self._log_dir != '':
self._summary_writer = tf.summary.FileWriter(self._log_dir,
self._session.graph)
self._summary_writer.flush()
self._saver = tf.train.Saver()
def add_summary(self, tag, value, step):
summary = tf.Summary(value=[tf.Summary.Value(tag=tag, simple_value=value)])
self._summary_writer.add_summary(summary, step)
self._summary_writer.flush()
def save_model(self, step):
self._saver.save(self._session,
os.path.join(self._log_dir, 'conv_ae_char_nopool.ckpt'),
global_step=step)
def load_model(self, model_path):
self._saver.restore(self._session, model_path)
def train_layer(self, layer_idx, images):
if layer_idx == 0:
_, loss_val, norm = self._session.run([self._train_l1, self._l1_loss, self._l1_norm], {
self._images: images})
elif layer_idx == 1:
_, loss_val, norm = self._session.run([self._train_l2, self._l2_loss, self._l2_norm], {
self._images: images})
elif layer_idx == 2:
_, loss_val, norm = self._session.run([self._train_l3, self._l3_loss, self._l3_norm], {
self._images: images})
elif layer_idx == 3:
_, loss_val, norm = self._session.run([self._train_l4, self._l4_loss, self._l4_norm], {
self._images: images})
elif layer_idx == 4:
_, loss_val, norm = self._session.run([self._train_l5, self._l5_loss, self._l5_norm], {
self._images: images})
return loss_val, norm
def train_batch(self, images, keep_prob):
_, loss_val, l1_norm = self._session.run([self._train_op, self._loss, self._l1_loss], {
self._images: images,
self._keep_prob: keep_prob})
return loss_val, l1_norm
def test_layer(self, layer_idx, images):
if layer_idx == 0:
images_hat = self._session.run(self._l1_img_hat, {self._images: images})
elif layer_idx == 1:
images_hat = self._session.run(self._l2_img_hat, {self._images: images})
elif layer_idx == 2:
images_hat = self._session.run(self._l3_img_hat, {self._images: images})
elif layer_idx == 3:
images_hat = self._session.run(self._l4_img_hat, {self._images: images})
elif layer_idx == 4:
images_hat = self._session.run(self._l5_img_hat, {self._images: images})
return images_hat
def test_batch(self, images):
images_hat = self._session.run(self._images_hat, {
self._images: images,
self._keep_prob: 1})
return images_hat
def get_embs(self, images):
embs_val = self._session.run(self._embs, {
self._images: images,
self._keep_prob: 1})
return embs_val
def get_layers_n_args(self, images):
l1, l2, l3, l4, l5 = self._session.run(
[self._conv1, self._conv2, self._conv3, self._conv4, self._conv5],
{self._images: images})
return l1, l2, l3, l4, l5
def reconstruct_from_layer(self, layer_idx, act):
if layer_idx == 0:
images_hat = self._session.run(self._l1_img_hat, {self._conv1: act})
elif layer_idx == 1:
images_hat = self._session.run(self._l2_img_hat, {self._conv2: act})
elif layer_idx == 2:
images_hat = self._session.run(self._l3_img_hat, {self._conv3: act})
elif layer_idx == 3:
images_hat = self._session.run(self._l4_img_hat, {self._conv4: act})
elif layer_idx == 4:
images_hat = self._session.run(self._l5_img_hat, {self._conv5: act})
return images_hat
def reconstruct_from_embs(self, embs):
images_hat = self._session.run(self._images_hat, {
#self._images: images,
self._embs: embs})
return images_hat
| 14,915 | 38.356201 | 108 | py |
battery-historian | battery-historian-master/scripts/historian.py | #!/usr/bin/python
"""Legacy Historian script for analyzing Android bug reports."""
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TO USE: (see also usage() below)
# adb shell dumpsys batterystats --enable full-wake-history (post-KitKat only)
# adb shell dumpsys batterystats --reset
# Optionally start powermonitor logging:
# For example, if using a Monsoon:
# if device/host clocks are not synced, run historian.py -v
# cts/tools/utils/monsoon.py --serialno 2294 --hz 1 --samples 100000 \
# -timestamp | tee monsoon.out
# ...let device run a while...
# stop monsoon.py
# adb bugreport > bugreport.txt
# ./historian.py -p monsoon.out bugreport.txt
import collections
import datetime
import fileinput
import getopt
import re
import StringIO
import subprocess
import sys
import time
POWER_DATA_FILE_TIME_OFFSET = 0 # deal with any clock mismatch.
BLAME_CATEGORY = "wake_lock_in" # category to assign power blame to.
ROWS_TO_SUMMARIZE = ["wake_lock", "running"] # -s: summarize these rows
getopt_debug = 0
getopt_bill_extra_secs = 0
getopt_power_quanta = 15 # slice powermonitor data this many seconds,
# to avoid crashing visualizer
getopt_power_data_file = False
getopt_proc_name = ""
getopt_highlight_category = ""
getopt_show_all_wakelocks = False
getopt_sort_by_power = True
getopt_summarize_pct = -1
getopt_report_filename = ""
getopt_generate_chart_only = False
getopt_disable_chart_drawing = False
def usage():
"""Print usage of the script."""
print "\nUsage: %s [OPTIONS] [FILE]\n" % sys.argv[0]
print " -a: show all wakelocks (don't abbreviate system wakelocks)"
print " -c: disable drawing of chart"
print " -d: debug mode, output debugging info for this program"
print (" -e TIME: extend billing an extra TIME seconds after each\n"
" wakelock, or until the next wakelock is seen. Useful for\n"
" accounting for modem power overhead.")
print " -h: print this message."
print (" -m: generate output that can be embedded in an existing page.\n"
" HTML header and body tags are not outputted.")
print (" -n [CATEGORY=]PROC: output another row containing only processes\n"
" whose name matches uid of PROC in CATEGORY.\n"
" If CATEGORY is not specified, search in wake_lock_in.")
print (" -p FILE: analyze FILE containing power data. Format per\n"
" line: <timestamp in epoch seconds> <amps>")
print (" -q TIME: quantize data on power row in buckets of TIME\n"
" seconds (default %d)" % getopt_power_quanta)
print " -r NAME: report input file name as NAME in HTML."
print (" -s PCT: summarize certain useful rows with additional rows\n"
" showing percent time spent over PCT% in each.")
print " -t: sort power report by wakelock duration instead of charge"
print " -v: synchronize device time before collecting power data"
print "\n"
sys.exit(1)
def parse_time(s, fmt):
"""Parses a human readable duration string into milliseconds.
Takes a human readable duration string like '1d2h3m4s5ms' and returns
the equivalent in milliseconds.
Args:
s: Duration string
fmt: A re object to parse the string
Returns:
A number indicating the duration in milliseconds.
"""
if s == "0": return 0.0
p = re.compile(fmt)
match = p.search(s)
try:
d = match.groupdict()
except IndexError:
return -1.0
ret = 0.0
if d["day"]: ret += float(d["day"])*60*60*24
if d["hrs"]: ret += float(d["hrs"])*60*60
if d["min"]: ret += float(d["min"])*60
if d["sec"]: ret += float(d["sec"])
if d["ms"]: ret += float(d["ms"])/1000
return ret
def time_float_to_human(t, show_complete_time):
if show_complete_time:
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(t))
else:
return time.strftime("%H:%M:%S", time.localtime(t))
def abbrev_timestr(s):
"""Chop milliseconds off of a time string, if present."""
arr = s.split("s")
if len(arr) < 3: return "0s"
return arr[0]+"s"
def timestr_to_jsdate(timestr):
return "new Date(%s * 1000)" % timestr
def format_time(delta_time):
"""Return a time string representing time past since initial event."""
if not delta_time:
return str(0)
timestr = "+"
datet = datetime.datetime.utcfromtimestamp(delta_time)
if delta_time > 24 * 60 * 60:
timestr += str(datet.day - 1) + datet.strftime("d%Hh%Mm%Ss")
elif delta_time > 60 * 60:
timestr += datet.strftime("%Hh%Mm%Ss").lstrip("0")
elif delta_time > 60:
timestr += datet.strftime("%Mm%Ss").lstrip("0")
elif delta_time > 1:
timestr += datet.strftime("%Ss").lstrip("0")
ms = datet.microsecond / 1000.0
timestr += "%03dms" % ms
return timestr
def format_duration(dur_ms):
"""Return a time string representing the duration in human readable format."""
if not dur_ms:
return "0ms"
ms = dur_ms % 1000
dur_ms = (dur_ms - ms) / 1000
secs = dur_ms % 60
dur_ms = (dur_ms - secs) / 60
mins = dur_ms % 60
hrs = (dur_ms - mins) / 60
out = ""
if hrs > 0:
out += "%dh" % hrs
if mins > 0:
out += "%dm" % mins
if secs > 0:
out += "%ds" % secs
if ms > 0 or not out:
out += "%dms" % ms
return out
def get_event_category(e):
e = e.lstrip("+-")
earr = e.split("=")
return earr[0]
def get_quoted_region(e):
e = e.split("\"")[1]
return e
def get_after_equal(e):
e = e.split("=")[1]
return e
def get_wifi_suppl_state(e):
try:
e = get_after_equal(e)
return e.split("(")[0]
except IndexError:
return ""
def get_event_subcat(cat, e):
"""Get subcategory of an category from an event string.
Subcategory can be use to distinguish simultaneous entities
within one category. To track possible concurrent instances,
add category name to concurrent_cat. Default is to track
events using only category name.
Args:
cat: Category name
e: Event name
Returns:
A string that is the subcategory of the event. Returns
the substring after category name if not empty and cat
is one of the categories tracked by concurrent_cat.
Default subcategory is the empty string.
"""
concurrent_cat = {"wake_lock_in", "sync", "top", "job", "conn"}
if cat in concurrent_cat:
try:
return get_after_equal(e)
except IndexError:
pass
return ""
def get_proc_pair(e):
if ":" in e:
proc_pair = get_after_equal(e)
return proc_pair.split(":", 1)
else:
return ("", "")
def as_to_mah(a):
return a * 1000 / 60 / 60
def apply_fn_over_range(fn, start_time, end_time, arglist):
"""Apply a given function per second quanta over a time range.
Args:
fn: The function to apply
start_time: The starting time of the whole duration
end_time: The ending time of the whole duration
arglist: Additional argument list
Returns:
A list of results generated by applying the function
over the time range.
"""
results = []
cursor = start_time
while cursor < end_time:
cursor_int = int(cursor)
next_cursor = float(cursor_int + 1)
if next_cursor > end_time: next_cursor = end_time
time_this_quanta = next_cursor - cursor
results.append(fn(cursor_int, time_this_quanta, *arglist))
cursor = next_cursor
return results
def space_escape(match):
value = match.group()
p = re.compile(r"\s+")
return p.sub("_", value)
def parse_reset_time(line):
line = line.strip()
line = line.split("RESET:TIME: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d-%H-%M-%S")
return time.mktime(st)
def is_file_legacy_mode(input_file):
"""Autodetect legacy (K and earlier) format."""
detection_on = False
for line in fileinput.input(input_file):
if not detection_on and line.startswith("Battery History"):
detection_on = True
if not detection_on:
continue
split_line = line.split()
if not split_line:
continue
line_time = split_line[0]
if "+" not in line_time and "-" not in line_time:
continue
fileinput.close()
return line_time[0] == "-"
return False
def is_emit_event(e):
return e[0] != "+"
def is_standalone_event(e):
return not (e[0] == "+" or e[0] == "-")
def is_proc_event(e):
return e.startswith("+proc")
def autovivify():
"""Returns a multidimensional dict."""
return collections.defaultdict(autovivify)
def swap(swap_list, first, second):
swap_list[first], swap_list[second] = swap_list[second], swap_list[first]
def add_emit_event(emit_dict, cat, name, start, end):
"""Saves a new event into the dictionary that will be visualized."""
newevent = (name, int(start), int(end))
if end < start:
print "BUG: end time before start time: %s %s %s<br>" % (name,
start,
end)
else:
if getopt_debug:
print "Stored emitted event: %s<br>" % str(newevent)
if cat in emit_dict:
emit_dict[cat].append(newevent)
else:
emit_dict[cat] = [newevent]
def sync_time():
subprocess.call(["adb", "root"])
subprocess.call(["sleep", "3"])
start_time = int(time.time())
while int(time.time()) == start_time:
pass
curr_time = time.strftime("%Y%m%d.%H%M%S", time.localtime())
subprocess.call(["adb", "shell", "date", "-s", curr_time])
sys.exit(0)
def parse_search_option(cmd):
global getopt_proc_name, getopt_highlight_category
if "=" in cmd:
getopt_highlight_category = cmd.split("=")[0]
getopt_proc_name = cmd.split("=")[1]
else:
getopt_highlight_category = "wake_lock_in"
getopt_proc_name = cmd
def parse_argv():
"""Parse argument and set up globals."""
global getopt_debug, getopt_bill_extra_secs, getopt_power_quanta
global getopt_sort_by_power, getopt_power_data_file
global getopt_summarize_pct, getopt_show_all_wakelocks
global getopt_report_filename
global getopt_generate_chart_only
global getopt_disable_chart_drawing
try:
opts, argv_rest = getopt.getopt(sys.argv[1:],
"acde:hmn:p:q:r:s:tv", ["help"])
except getopt.GetoptError as err:
print "<pre>\n"
print str(err)
usage()
try:
for o, a in opts:
if o == "-a": getopt_show_all_wakelocks = True
if o == "-c": getopt_disable_chart_drawing = True
if o == "-d": getopt_debug = True
if o == "-e": getopt_bill_extra_secs = int(a)
if o in ("-h", "--help"): usage()
if o == "-m": getopt_generate_chart_only = True
if o == "-n": parse_search_option(a)
if o == "-p": getopt_power_data_file = a
if o == "-q": getopt_power_quanta = int(a)
if o == "-r": getopt_report_filename = str(a)
if o == "-s": getopt_summarize_pct = int(a)
if o == "-t": getopt_sort_by_power = False
if o == "-v": sync_time()
except ValueError as err:
print str(err)
usage()
if not argv_rest:
usage()
return argv_rest
class Printer(object):
"""Organize and render the visualizer."""
_default_color = "#4070cf"
# -n option is represented by "highlight". All the other names specified
# in _print_setting are the same as category names.
_print_setting = [
("battery_level", "#4070cf"),
("plugged", "#2e8b57"),
("screen", "#cbb69d"),
("top", "#dc3912"),
("sync", "#9900aa"),
("wake_lock_pct", "#6fae11"),
("wake_lock", "#cbb69d"),
("highlight", "#4070cf"),
("running_pct", "#6fae11"),
("running", "#990099"),
("wake_reason", "#b82e2e"),
("wake_lock_in", "#ff33cc"),
("job", "#cbb69d"),
("mobile_radio", "#aa0000"),
("data_conn", "#4070cf"),
("conn", "#ff6a19"),
("activepower", "#dd4477"),
("device_idle", "#37ff64"),
("motion", "#4070cf"),
("active", "#119fc8"),
("power_save", "#ff2222"),
("wifi", "#119fc8"),
("wifi_full_lock", "#888888"),
("wifi_scan", "#888888"),
("wifi_multicast", "#888888"),
("wifi_radio", "#888888"),
("wifi_running", "#109618"),
("wifi_suppl", "#119fc8"),
("wifi_signal_strength", "#9900aa"),
("phone_signal_strength", "#dc3912"),
("phone_scanning", "#dda0dd"),
("audio", "#990099"),
("phone_in_call", "#cbb69d"),
("bluetooth", "#cbb69d"),
("phone_state", "#dc3912"),
("signal_strength", "#119fc8"),
("video", "#cbb69d"),
("flashlight", "#cbb69d"),
("low_power", "#109618"),
("fg", "#dda0dd"),
("gps", "#ff9900"),
("reboot", "#ddff77"),
("power", "#ff2222"),
("status", "#9ac658"),
("health", "#888888"),
("plug", "#888888"),
("charging", "#888888"),
("pkginst", "#cbb69d"),
("pkgunin", "#cbb69d")]
_ignore_categories = ["user", "userfg"]
def __init__(self):
self._print_setting_cats = set()
for cat in self._print_setting:
self._print_setting_cats.add(cat[0])
def combine_wifi_states(self, event_list, start_time):
"""Discard intermediate states and combine events chronologically."""
tracking_states = ["disconn", "completed", "disabled", "scanning"]
selected_event_list = []
for event in event_list:
state = get_wifi_suppl_state(event[0])
if state in tracking_states:
selected_event_list.append(event)
if len(selected_event_list) <= 1:
return set(selected_event_list)
event_name = "wifi_suppl="
for e in selected_event_list:
state = get_wifi_suppl_state(e[0])
event_name += (state + "->")
event_name = event_name[:-2]
sample_event = selected_event_list[0][0]
timestr_start = sample_event.find("(")
event_name += sample_event[timestr_start:]
return set([(event_name, start_time, start_time)])
def aggregate_events(self, emit_dict):
"""Combine events with the same name occurring during the same second.
Aggregate events to keep visualization from being so noisy.
Args:
emit_dict: A dict containing events.
Returns:
A dict with repeated events happening within one sec removed.
"""
output_dict = {}
for cat, events in emit_dict.iteritems():
output_dict[cat] = []
start_dict = {}
for event in events:
start_time = event[1]
if start_time in start_dict:
start_dict[start_time].append(event)
else:
start_dict[start_time] = [event]
for start_time, event_list in start_dict.iteritems():
if cat == "wifi_suppl":
event_set = self.combine_wifi_states(event_list, start_time)
else:
event_set = set(event_list) # uniqify
for event in event_set:
output_dict[cat].append(event)
return output_dict
def print_emit_dict(self, cat, emit_dict):
for e in emit_dict[cat]:
if cat == "wake_lock":
cat_name = "wake_lock *"
else:
cat_name = cat
print "['%s', '%s', %s, %s]," % (cat_name, e[0],
timestr_to_jsdate(e[1]),
timestr_to_jsdate(e[2]))
def print_highlight_dict(self, highlight_dict):
catname = getopt_proc_name + " " + getopt_highlight_category
if getopt_highlight_category in highlight_dict:
for e in highlight_dict[getopt_highlight_category]:
print "['%s', '%s', %s, %s]," % (catname, e[0],
timestr_to_jsdate(e[1]),
timestr_to_jsdate(e[2]))
def print_events(self, emit_dict, highlight_dict):
"""print category data in the order of _print_setting.
Args:
emit_dict: Major event dict.
highlight_dict: Additional event information for -n option.
"""
emit_dict = self.aggregate_events(emit_dict)
highlight_dict = self.aggregate_events(highlight_dict)
cat_count = 0
for i in range(0, len(self._print_setting)):
cat = self._print_setting[i][0]
if cat in emit_dict:
self.print_emit_dict(cat, emit_dict)
cat_count += 1
if cat == "highlight":
self.print_highlight_dict(highlight_dict)
# handle category that is not included in _print_setting
if cat_count < len(emit_dict):
for cat in emit_dict:
if (cat not in self._print_setting_cats and
cat not in self._ignore_categories):
sys.stderr.write("event category not found: %s\n" % cat)
self.print_emit_dict(cat, emit_dict)
def print_chart_options(self, emit_dict, highlight_dict, width, height):
"""Print Options provided to the visualizater."""
color_string = ""
cat_count = 0
# construct color string following the order of _print_setting
for i in range(0, len(self._print_setting)):
cat = self._print_setting[i][0]
if cat in emit_dict:
color_string += "'%s', " % self._print_setting[i][1]
cat_count += 1
if cat == "highlight" and highlight_dict:
color_string += "'%s', " % self._print_setting[i][1]
cat_count += 1
if cat_count % 4 == 0:
color_string += "\n\t"
# handle category that is not included in _print_setting
if cat_count < len(emit_dict):
for cat in emit_dict:
if cat not in self._print_setting_cats:
color_string += "'%s', " % self._default_color
print("\toptions = {\n"
"\ttimeline: { colorByRowLabel: true},\n"
"\t'width': %s,\n"
"\t'height': %s, \n"
"\tcolors: [%s]\n"
"\t};" % (width, height, color_string))
class LegacyFormatConverter(object):
"""Convert Kit-Kat bugreport format to latest format support."""
_TIME_FORMAT = (r"\-((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
def __init__(self):
self._end_time = 0
self._total_duration = 0
def parse_end_time(self, line):
line = line.strip()
try:
line = line.split("dumpstate: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d %H:%M:%S")
self._end_time = time.mktime(st)
except IndexError:
pass
def get_timestr(self, line_time):
"""Convert backward time string in Kit-Kat to forward time string."""
delta = self._total_duration - parse_time(line_time, self._TIME_FORMAT)
datet = datetime.datetime.utcfromtimestamp(delta)
if delta == 0:
return "0"
timestr = "+"
if delta > 24 * 60 * 60:
timestr += str(datet.day - 1) + datet.strftime("d%Hh%Mm%Ss")
elif delta > 60 * 60:
timestr += datet.strftime("%Hh%Mm%Ss").lstrip("0")
elif delta > 60:
timestr += datet.strftime("%Mm%Ss").lstrip("0")
elif delta > 1:
timestr += datet.strftime("%Ss").lstrip("0")
ms = datet.microsecond / 1000.0
timestr += "%03dms" % ms
return timestr
def get_header(self, line_time):
self._total_duration = parse_time(line_time, self._TIME_FORMAT)
start_time = self._end_time - self._total_duration
header = "Battery History\n"
header += "RESET:TIME: %s\n" % time.strftime("%Y-%m-%d-%H-%M-%S",
time.localtime(start_time))
return header
def convert(self, input_file):
"""Convert legacy format file into string that fits latest format."""
output_string = ""
history_start = False
for line in fileinput.input(input_file):
if "dumpstate:" in line:
self.parse_end_time(line)
if self._end_time:
break
fileinput.close()
if not self._end_time:
print "cannot find end time"
sys.exit(1)
for line in fileinput.input(input_file):
if not history_start and line.startswith("Battery History"):
history_start = True
continue
elif not history_start:
continue
if line.isspace(): break
line = line.strip()
arr = line.split()
if len(arr) < 4: continue
p = re.compile('"[^"]+"')
line = p.sub(space_escape, line)
split_line = line.split()
(line_time, line_battery_level, line_state) = split_line[:3]
line_events = split_line[3:]
if not self._total_duration:
output_string += self.get_header(line_time)
timestr = self.get_timestr(line_time)
event_string = " ".join(line_events)
newline = "%s _ %s %s %s\n" % (timestr, line_battery_level,
line_state, event_string)
output_string += newline
fileinput.close()
return output_string
class BHEmitter(object):
"""Process battery history section from bugreport.txt."""
_omit_cats = ["temp", "volt", "brightness", "sensor", "proc"]
# categories that have "+" and "-" events. If we see an event in these
# categories starting at time 0 without +/- sign, treat it as a "+" event.
_transitional_cats = ["plugged", "running", "wake_lock", "gps", "sensor",
"phone_in_call", "mobile_radio", "phone_scanning",
"proc", "fg", "top", "sync", "wifi", "wifi_full_lock",
"wifi_scan", "wifi_multicast", "wifi_running", "conn",
"bluetooth", "audio", "video", "wake_lock_in", "job",
"device_idle", "wifi_radio"]
_in_progress_dict = autovivify() # events that are currently in progress
_proc_dict = {} # mapping of "proc" uid to human-readable name
_search_proc_id = -1 # proc id of the getopt_proc_name
match_list = [] # list of package names that match search string
cat_list = [] # BLAME_CATEGORY summary data
def store_event(self, cat, subcat, event_str, event_time, timestr):
self._in_progress_dict[cat][subcat] = (event_str, event_time, timestr)
if getopt_debug:
print "store_event: %s in %s/%s<br>" % (event_str, cat, subcat)
def retrieve_event(self, cat, subcat):
"""Pop event from in-progress event dict if match exists."""
if cat in self._in_progress_dict:
try:
result = self._in_progress_dict[cat].pop(subcat)
if getopt_debug:
print "retrieve_event: found %s/%s<br>" % (cat, subcat)
return (True, result)
except KeyError:
pass
if getopt_debug:
print "retrieve_event: no match for event %s/%s<br>" % (cat, subcat)
return (False, (None, None, None))
def store_proc(self, e, highlight_dict):
proc_pair = get_after_equal(e)
(proc_id, proc_name) = proc_pair.split(":", 1)
self._proc_dict[proc_id] = proc_name # may overwrite
if getopt_proc_name and getopt_proc_name in proc_name and proc_id:
if proc_pair not in self.match_list:
self.match_list.append(proc_pair)
if self._search_proc_id == -1:
self._search_proc_id = proc_id
elif self._search_proc_id != proc_id:
if (proc_name[1:-1] == getopt_proc_name or
proc_name == getopt_proc_name):
# reinitialize
highlight_dict.clear()
# replace default match with complete match
self._search_proc_id = proc_id
swap(self.match_list, 0, -1)
def procs_to_str(self):
l = sorted(self._proc_dict.items(), key=lambda x: x[0])
result = ""
for i in l:
result += "%s: %s\n" % (i[0], i[1])
return result
def get_proc_name(self, proc_id):
if proc_id in self._proc_dict:
return self._proc_dict[proc_id]
else:
return ""
def annotate_event_name(self, name):
"""Modifies the event name to make it more understandable."""
if "*alarm*" in name:
try:
proc_pair = get_after_equal(name)
except IndexError:
return name
proc_id = proc_pair.split(":", 1)[0]
name = name + ":" + self.get_proc_name(proc_id)
if getopt_debug:
print "annotate_event_name: %s" % name
return name
def abbreviate_event_name(self, name):
"""Abbreviate location-related event name."""
if not getopt_show_all_wakelocks:
if "wake_lock" in name:
if "LocationManagerService" in name or "NlpWakeLock" in name:
return "LOCATION"
if "UlrDispatching" in name:
return "LOCATION"
if "GCoreFlp" in name or "GeofencerStateMachine" in name:
return "LOCATION"
if "NlpCollectorWakeLock" in name or "WAKEUP_LOCATOR" in name:
return "LOCATION"
if "GCM" in name or "C2DM" in name:
return "GCM"
return name
def process_wakelock_event_name(self, start_name, start_id, end_name, end_id):
start_name = self.process_event_name(start_name)
end_name = self.process_event_name(end_name)
event_name = "first=%s:%s, last=%s:%s" % (start_id, start_name,
end_id, end_name)
return event_name
def process_event_timestr(self, start_timestr, end_timestr):
return "(%s-%s)" % (abbrev_timestr(start_timestr),
abbrev_timestr(end_timestr))
def process_event_name(self, event_name):
event_name = self.annotate_event_name(event_name)
event_name = self.abbreviate_event_name(event_name)
return event_name.replace("'", r"\'")
def track_event_parallelism_fn(self, start_time, time_this_quanta, time_dict):
if start_time in time_dict:
time_dict[start_time] += time_this_quanta
else:
time_dict[start_time] = time_this_quanta
if getopt_debug:
print "time_dict[%d] now %f added %f" % (start_time,
time_dict[start_time],
time_this_quanta)
# track total amount of event time held per second quanta
def track_event_parallelism(self, start_time, end_time, time_dict):
apply_fn_over_range(self.track_event_parallelism_fn,
start_time, end_time, [time_dict])
def emit_event(self, cat, event_name, start_time, start_timestr,
end_event_name, end_time, end_timestr,
emit_dict, time_dict, highlight_dict):
"""Saves an event to be later visualized."""
(start_pid, start_pname) = get_proc_pair(event_name)
(end_pid, end_pname) = get_proc_pair(end_event_name)
if cat == "wake_lock" and end_pname and end_pname != start_pname:
short_event_name = self.process_wakelock_event_name(
start_pname, start_pid, end_pname, end_pid)
else:
short_event_name = self.process_event_name(event_name)
event_name = short_event_name + self.process_event_timestr(start_timestr,
end_timestr)
if getopt_highlight_category == cat:
if start_pid == self._search_proc_id or end_pid == self._search_proc_id:
add_emit_event(highlight_dict, cat,
event_name, start_time, end_time)
if cat == BLAME_CATEGORY:
self.cat_list.append((short_event_name, start_time, end_time))
end_time += getopt_bill_extra_secs
self.track_event_parallelism(start_time, end_time, time_dict)
if end_time - start_time < 1:
# HACK: visualizer library doesn't always render sub-second events
end_time += 1
add_emit_event(emit_dict, cat, event_name, start_time, end_time)
def handle_event(self, event_time, time_str, event_str,
emit_dict, time_dict, highlight_dict):
"""Handle an individual event.
Args:
event_time: Event time
time_str: Event time as string
event_str: Event string
emit_dict: A dict tracking events to draw in the timeline, by row
time_dict: A dict tracking BLAME_CATEGORY duration, by seconds
highlight_dict: A separate event dict for -n option
"""
if getopt_debug:
print "<p>handle_event: %s at %s<br>" % (event_str, time_str)
cat = get_event_category(event_str)
subcat = get_event_subcat(cat, event_str)
# events already in progress are treated as starting at time 0
if (time_str == "0" and is_standalone_event(event_str)
and cat in self._transitional_cats):
event_str = "+" + event_str
if is_proc_event(event_str): self.store_proc(event_str, highlight_dict)
if cat in self._omit_cats: return
if not is_emit_event(event_str):
# "+" event, save it until we find a matching "-"
self.store_event(cat, subcat, event_str, event_time, time_str)
return
else:
# "-" or standalone event such as "wake_reason"
start_time = 0.0
(found, event) = self.retrieve_event(cat, subcat)
if found:
(event_name, start_time, start_timestr) = event
else:
event_name = event_str
start_time = event_time
start_timestr = time_str
# Events that were still going on at the time of reboot
# should be marked as ending at the time of reboot.
if event_str == "reboot":
self.emit_remaining_events(event_time, time_str, emit_dict,
time_dict, highlight_dict)
self.emit_event(cat, event_name, start_time, start_timestr,
event_str, event_time, time_str,
emit_dict, time_dict, highlight_dict)
def generate_summary_row(self, row_to_summarize, emit_dict, start_time,
end_time):
"""Generate additional data row showing % time covered by another row."""
summarize_quanta = 60
row_name = row_to_summarize + "_pct"
if row_to_summarize not in emit_dict: return
summarize_list = emit_dict[row_to_summarize]
seconds_dict = {}
# Generate dict of seconds where the row to summarize is seen.
for i in summarize_list:
self.track_event_parallelism(i[1], i[2], seconds_dict)
# Traverse entire range of time we care about and generate % events.
for summary_start_time in range(int(start_time), int(end_time),
summarize_quanta):
summary_end_time = summary_start_time + summarize_quanta
found_ctr = 0
for second_cursor in range(summary_start_time, summary_end_time):
if second_cursor in seconds_dict:
found_ctr += 1
if found_ctr:
pct = int(found_ctr * 100 / summarize_quanta)
if pct > getopt_summarize_pct:
add_emit_event(emit_dict, row_name, "%s=%d" % (row_name, pct),
summary_start_time, summary_end_time)
def generate_summary_rows(self, emit_dict, start_time, end_time):
if getopt_summarize_pct < 0:
return
for i in ROWS_TO_SUMMARIZE:
self.generate_summary_row(i, emit_dict, start_time, end_time)
def emit_remaining_events(self, end_time, end_timestr, emit_dict, time_dict,
highlight_dict):
for cat in self._in_progress_dict:
for subcat in self._in_progress_dict[cat]:
(event_name, s_time, s_timestr) = self._in_progress_dict[cat][subcat]
self.emit_event(cat, event_name, s_time, s_timestr,
event_name, end_time, end_timestr,
emit_dict, time_dict, highlight_dict)
class BlameSynopsis(object):
"""Summary data of BLAME_CATEGORY instance used for power accounting."""
def __init__(self):
self.name = ""
self.mah = 0
self.timestr = ""
self._duration_list = []
def add(self, name, duration, mah, t):
self.name = name
self._duration_list.append(duration)
self.mah += mah
if not self.timestr:
self.timestr = time_float_to_human(t, False)
def get_count(self):
return len(self._duration_list)
def get_median_duration(self):
return sorted(self._duration_list)[int(self.get_count() / 2)]
def get_total_duration(self):
return sum(self._duration_list)
def to_str(self, total_mah, show_power):
"""Returns a summary string."""
if total_mah:
pct = self.mah * 100 / total_mah
else:
pct = 0
avg = self.get_total_duration() / self.get_count()
ret = ""
if show_power:
ret += "%.3f mAh (%.1f%%), " % (self.mah, pct)
ret += "%3s events, " % str(self.get_count())
ret += "%6.3fs total " % self.get_total_duration()
ret += "%6.3fs avg " % avg
ret += "%6.3fs median: " % self.get_median_duration()
ret += self.name
ret += " (first at %s)" % self.timestr
return ret
class PowerEmitter(object):
"""Give power accounting and bill to wake lock."""
_total_amps = 0
_total_top_amps = 0
_line_ctr = 0
_TOP_THRESH = .01
_quanta_amps = 0
_start_secs = 0
_power_dict = {}
_synopsis_dict = {}
def __init__(self, cat_list):
self._cat_list = cat_list
def get_range_power_fn(self, start_time, time_this_quanta, time_dict):
"""Assign proportional share of blame.
During any second, this event might have been held for
less than the second, and others might have been held during
that time. Here we try to assign the proportional share of the
blame.
Args:
start_time: Starting time of this quanta
time_this_quanta: Duration of this quanta
time_dict: A dict tracking total time at different starting time
Returns:
A proportional share of blame for the quanta.
"""
if start_time in self._power_dict:
total_time_held = time_dict[start_time]
multiplier = time_this_quanta / total_time_held
result = self._power_dict[start_time] * multiplier
if getopt_debug:
print("get_range_power: distance %f total time %f "
"base power %f, multiplier %f<br>" %
(time_this_quanta, total_time_held,
self._power_dict[start_time], multiplier))
assert multiplier <= 1.0
else:
if getopt_debug:
print "get_range_power: no power data available"
result = 0.0
return result
def get_range_power(self, start, end, time_dict):
power_results = apply_fn_over_range(self.get_range_power_fn,
start, end, [time_dict])
result = 0.0
for i in power_results:
result += i
return result
def bill(self, time_dict):
for _, e in enumerate(self._cat_list):
(event_name, start_time, end_time) = e
if event_name in self._synopsis_dict:
sd = self._synopsis_dict[event_name]
else:
sd = BlameSynopsis()
amps = self.get_range_power(start_time,
end_time + getopt_bill_extra_secs,
time_dict)
mah = as_to_mah(amps)
sd.add(event_name, end_time - start_time, mah, start_time)
if getopt_debug:
print "billed range %f %f at %fAs to %s<br>" % (start_time, end_time,
amps, event_name)
self._synopsis_dict[event_name] = sd
def handle_line(self, secs, amps, emit_dict):
"""Handle a power data file line."""
self._line_ctr += 1
if not self._start_secs:
self._start_secs = secs
self._quanta_amps += amps
self._total_amps += amps
self._power_dict[secs] = amps
if secs % getopt_power_quanta:
return
avg = self._quanta_amps / getopt_power_quanta
event_name = "%.3f As (%.3f A avg)" % (self._quanta_amps, avg)
add_emit_event(emit_dict, "power", event_name, self._start_secs, secs)
if self._quanta_amps > self._TOP_THRESH * getopt_power_quanta:
self._total_top_amps += self._quanta_amps
add_emit_event(emit_dict, "activepower", event_name,
self._start_secs, secs)
self._quanta_amps = 0
self._start_secs = secs
def report(self):
"""Report bill of BLAME_CATEGORY."""
mah = as_to_mah(self._total_amps)
report_power = self._line_ctr
if report_power:
avg_ma = self._total_amps/self._line_ctr
print "<p>Total power: %.3f mAh, avg %.3f" % (mah, avg_ma)
top_mah = as_to_mah(self._total_top_amps)
print ("<br>Total power above awake "
"threshold (%.1fmA): %.3f mAh %.3f As" % (self._TOP_THRESH * 1000,
top_mah,
self._total_top_amps))
print "<br>%d samples, %d min<p>" % (self._line_ctr, self._line_ctr / 60)
if report_power and getopt_bill_extra_secs:
print("<b>Power seen during each history event, including %d "
"seconds after each event:" % getopt_bill_extra_secs)
elif report_power:
print "<b>Power seen during each history event:"
else:
print "<b>Event summary:"
print "</b><br><pre>"
report_list = []
total_mah = 0.0
total_count = 0
for _, v in self._synopsis_dict.iteritems():
total_mah += v.mah
total_count += v.get_count()
if getopt_sort_by_power and report_power:
sort_term = v.mah
else:
sort_term = v.get_total_duration()
report_list.append((sort_term, v.to_str(mah, report_power)))
report_list.sort(key=lambda tup: tup[0], reverse=True)
for i in report_list:
print i[1]
print "total: %.3f mAh, %d events" % (total_mah, total_count)
print "</pre>\n"
def adjust_reboot_time(line, event_time):
# Line delta time is not reset after reboot, but wall time will
# be printed after reboot finishes. This function returns how much
# we are off and actual reboot event time.
line = line.strip()
line = line.split("TIME: ", 1)[1]
st = time.strptime(line, "%Y-%m-%d-%H-%M-%S")
wall_time = time.mktime(st)
return wall_time - event_time, wall_time
def get_app_id(uid):
"""Returns the app ID from a string.
Reverses and uses the methods defined in UserHandle.java to get
only the app ID.
Args:
uid: a string representing the uid printed in the history output
Returns:
An integer representing the specific app ID.
"""
abr_uid_re = re.compile(r"u(?P<userId>\d+)(?P<aidType>[ias])(?P<appId>\d+)")
if not uid:
return 0
if uid.isdigit():
# 100000 is the range of uids allocated for a user.
return int(uid) % 100000
if abr_uid_re.match(uid):
match = abr_uid_re.search(uid)
try:
d = match.groupdict()
if d["aidType"] == "i": # first isolated uid
return int(d["appId"]) + 99000
if d["aidType"] == "a": # first application uid
return int(d["appId"]) + 10000
return int(d["appId"]) # app id wasn't modified
except IndexError:
sys.stderr.write("Abbreviated app UID didn't match properly")
return uid
usr_time = "usrTime"
sys_time = "sysTime"
# A map of app uid to their total CPU usage in terms of user
# and system time (in ms).
app_cpu_usage = {}
def save_app_cpu_usage(uid, usr_cpu_time, sys_cpu_time):
uid = get_app_id(uid)
if uid in app_cpu_usage:
app_cpu_usage[uid][usr_time] += usr_cpu_time
app_cpu_usage[uid][sys_time] += sys_cpu_time
else:
app_cpu_usage[uid] = {usr_time: usr_cpu_time, sys_time: sys_cpu_time}
# Constants defined in android.net.ConnectivityManager
conn_constants = {
"0": "TYPE_MOBILE",
"1": "TYPE_WIFI",
"2": "TYPE_MOBILE_MMS",
"3": "TYPE_MOBILE_SUPL",
"4": "TYPE_MOBILE_DUN",
"5": "TYPE_MOBILE_HIPRI",
"6": "TYPE_WIMAX",
"7": "TYPE_BLUETOOTH",
"8": "TYPE_DUMMY",
"9": "TYPE_ETHERNET",
"17": "TYPE_VPN",
}
def main():
details_re = re.compile(r"^Details:\scpu=\d+u\+\d+s\s*(\((?P<appCpu>.*)\))?")
app_cpu_usage_re = re.compile(
r"(?P<uid>\S+)=(?P<userTime>\d+)u\+(?P<sysTime>\d+)s")
proc_stat_re = re.compile((r"^/proc/stat=(?P<usrTime>-?\d+)\s+usr,\s+"
r"(?P<sysTime>-?\d+)\s+sys,\s+"
r"(?P<ioTime>-?\d+)\s+io,\s+"
r"(?P<irqTime>-?\d+)\s+irq,\s+"
r"(?P<sirqTime>-?\d+)\s+sirq,\s+"
r"(?P<idleTime>-?\d+)\s+idle.*")
)
data_start_time = 0.0
data_stop_time = 0
data_stop_timestr = ""
on_mode = False
time_offset = 0.0
overflowed = False
reboot = False
prev_battery_level = -1
bhemitter = BHEmitter()
emit_dict = {} # maps event categories to events
time_dict = {} # total event time held per second
highlight_dict = {} # search result for -n option
is_first_data_line = True
is_dumpsys_format = False
argv_remainder = parse_argv()
input_file = argv_remainder[0]
legacy_mode = is_file_legacy_mode(input_file)
# A map of /proc/stat names to total times (in ms).
proc_stat_summary = {
"usr": 0,
"sys": 0,
"io": 0,
"irq": 0,
"sirq": 0,
"idle": 0,
}
if legacy_mode:
input_string = LegacyFormatConverter().convert(input_file)
input_file = StringIO.StringIO(input_string)
else:
input_file = open(input_file, "r")
while True:
line = input_file.readline()
if not line: break
if not on_mode and line.startswith("Battery History"):
on_mode = True
continue
elif not on_mode:
continue
if line.isspace(): break
line = line.strip()
if "RESET:TIME: " in line:
data_start_time = parse_reset_time(line)
continue
if "OVERFLOW" in line:
overflowed = True
break
if "START" in line:
reboot = True
continue
if "TIME: " in line:
continue
# escape spaces within quoted regions
p = re.compile('"[^"]+"')
line = p.sub(space_escape, line)
if details_re.match(line):
match = details_re.search(line)
try:
d = match.groupdict()
if d["appCpu"]:
for app in d["appCpu"].split(", "):
app_match = app_cpu_usage_re.search(app)
try:
a = app_match.groupdict()
save_app_cpu_usage(a["uid"],
int(a["userTime"]), int(a["sysTime"]))
except IndexError:
sys.stderr.write("App CPU usage line didn't match properly")
except IndexError:
sys.stderr.write("Details line didn't match properly")
continue
elif proc_stat_re.match(line):
match = proc_stat_re.search(line)
try:
d = match.groupdict()
if d["usrTime"]:
proc_stat_summary["usr"] += int(d["usrTime"])
if d["sysTime"]:
proc_stat_summary["sys"] += int(d["sysTime"])
if d["ioTime"]:
proc_stat_summary["io"] += int(d["ioTime"])
if d["irqTime"]:
proc_stat_summary["irq"] += int(d["irqTime"])
if d["sirqTime"]:
proc_stat_summary["sirq"] += int(d["sirqTime"])
if d["idleTime"]:
proc_stat_summary["idle"] += int(d["idleTime"])
except IndexError:
sys.stderr.write("proc/stat line didn't match properly")
continue
# pull apart input line by spaces
split_line = line.split()
if len(split_line) < 4: continue
(line_time, _, line_battery_level, fourth_field) = split_line[:4]
# "bugreport" output has an extra hex field vs "dumpsys", detect here.
if is_first_data_line:
is_first_data_line = False
try:
int(fourth_field, 16)
except ValueError:
is_dumpsys_format = True
if is_dumpsys_format:
line_events = split_line[3:]
else:
line_events = split_line[4:]
fmt = (r"\+((?P<day>\d+)d)?((?P<hrs>\d+)h)?((?P<min>\d+)m)?"
r"((?P<sec>\d+)s)?((?P<ms>\d+)ms)?$")
time_delta_s = parse_time(line_time, fmt) + time_offset
if time_delta_s < 0:
print "Warning: time went backwards: %s" % line
continue
event_time = data_start_time + time_delta_s
if reboot and "TIME:" in line:
# adjust offset using wall time
offset, event_time = adjust_reboot_time(line, event_time)
if offset < 0:
print "Warning: time went backwards: %s" % line
continue
time_offset += offset
time_delta_s = event_time - data_start_time
reboot = False
line_events = {"reboot"}
if line_battery_level != prev_battery_level:
# battery_level is not an actual event, it's on every line
if line_battery_level.isdigit():
bhemitter.handle_event(event_time, format_time(time_delta_s),
"battery_level=" + line_battery_level,
emit_dict, time_dict, highlight_dict)
for event in line_events:
# conn events need to be parsed in order to be useful
if event.startswith("conn"):
num, ev = get_after_equal(event).split(":")
if ev == "\"CONNECTED\"":
event = "+conn="
else:
event = "-conn="
if num in conn_constants:
event += conn_constants[num]
else:
event += "UNKNOWN"
bhemitter.handle_event(event_time, format_time(time_delta_s), event,
emit_dict, time_dict, highlight_dict)
prev_battery_level = line_battery_level
data_stop_time = event_time
data_stop_timestr = format_time(time_delta_s)
input_file.close()
if not on_mode:
print "Battery history not present in bugreport."
return
bhemitter.emit_remaining_events(data_stop_time, data_stop_timestr,
emit_dict, time_dict, highlight_dict)
bhemitter.generate_summary_rows(emit_dict, data_start_time,
data_stop_time)
power_emitter = PowerEmitter(bhemitter.cat_list)
if getopt_power_data_file:
for line in fileinput.input(getopt_power_data_file):
data = line.split(" ")
secs = float(data[0]) + POWER_DATA_FILE_TIME_OFFSET
amps = float(data[1])
power_emitter.handle_line(secs, amps, emit_dict)
power_emitter.bill(time_dict)
printer = Printer()
if not getopt_generate_chart_only:
print "<!DOCTYPE html>\n<html><head>\n"
report_filename = argv_remainder[0]
if getopt_report_filename:
report_filename = getopt_report_filename
header = "Battery Historian analysis for %s" % report_filename
print "<title>" + header + "</title>"
if overflowed:
print ('<font size="5" color="red">Warning: History overflowed at %s, '
'many events may be missing.</font>' %
time_float_to_human(data_stop_time, True))
print "<p>" + header + "</p>"
if legacy_mode:
print("<p><b>WARNING:</b> legacy format detected; "
"history information is limited</p>\n")
if not getopt_generate_chart_only:
print """
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.1/jquery.min.js"></script>
<script type="text/javascript" src="https://www.google.com/jsapi?autoload={'modules':[{'name':'visualization','version':'1','packages':['timeline']}]}"></script>
"""
print "<script type=\"text/javascript\">"
if not getopt_disable_chart_drawing:
print "google.setOnLoadCallback(drawChart);\n"
print """
var dataTable;
var chart;
var options;
var default_width = 3000
function drawChart() {
container = document.getElementById('chart');
chart = new google.visualization.Timeline(container);
dataTable = new google.visualization.DataTable();
dataTable.addColumn({ type: 'string', id: 'Position' });
dataTable.addColumn({ type: 'string', id: 'Name' });
dataTable.addColumn({ type: 'date', id: 'Start' });
dataTable.addColumn({ type: 'date', id: 'End' });
dataTable.addRows([
"""
printer.print_events(emit_dict, highlight_dict)
print "]);"
width = 3000 # default width
height = 3000 # intial height
printer.print_chart_options(emit_dict, highlight_dict, width, height)
print """
//make sure allocate enough vertical space
options['height'] = dataTable.getNumberOfRows() * 40;
chart.draw(dataTable, options);
//get vertical coordinate of scale bar
var svg = document.getElementById('chart').getElementsByTagName('svg')[0];
var label = svg.children[2].children[0];
var y = label.getAttribute('y');
//plus height of scale bar
var chart_div_height = parseInt(y) + 50;
var chart_height = chart_div_height;
//set chart height to exact height
options['height'] = chart_height;
$('#chart').css('height', chart_div_height);
svg.setAttribute('height', chart_height);
var content = $('#chart').children()[0];
$(content).css('height', chart_height);
var inner = $(content).children()[0];
$(inner).css('height', chart_height);
}
function redrawChart() {
var scale = document.getElementById("scale").value;
scale = scale.replace('%', '') / 100
options['width'] = scale * default_width;
chart.draw(dataTable, options);
}
</script>
<style>
#redrawButton{
width:100px;
}
</style>
"""
if not getopt_generate_chart_only:
print "</head>\n<body>\n"
show_complete_time = False
if data_stop_time - data_start_time > 24 * 60 * 60:
show_complete_time = True
start_localtime = time_float_to_human(data_start_time, show_complete_time)
stop_localtime = time_float_to_human(data_stop_time, show_complete_time)
print "<div id=\"chart\">"
if not getopt_generate_chart_only:
print ("<b>WARNING: Visualizer disabled. "
"If you see this message, download the HTML then open it.</b>")
print "</div>"
print("<p><b>WARNING:</b>\n"
"<br>*: wake_lock field only shows the first/last wakelock held \n"
"when the system is awake. For more detail, use wake_lock_in."
"<br>To enable full wakelock reporting (post-KitKat only) : \n"
"<br>adb shell dumpsys batterystats "
"--enable full-wake-history</p>")
if getopt_proc_name:
if len(bhemitter.match_list) > 1:
print("<p><b>WARNING:</b>\n"
"<br>Multiple match found on -n option <b>%s</b>"
"<ul>" % getopt_proc_name)
for match in bhemitter.match_list:
print "<li>%s</li>" % match
print ("</ul>Showing search result for %s</p>"
% bhemitter.match_list[0].split(":", 1)[0])
elif not bhemitter.match_list:
print("<p><b>WARNING:</b>\n"
"<br>No match on -n option <b>%s</b></p>" % getopt_proc_name)
if not highlight_dict:
print ("Search - <b>%s</b> in <b>%s</b> - did not match any event"
% (getopt_proc_name, getopt_highlight_category))
print ("<pre>(Local time %s - %s, %dm elapsed)</pre>"
% (start_localtime, stop_localtime,
(data_stop_time-data_start_time) / 60))
print ("<p>\n"
"Zoom: <input id=\"scale\" type=\"text\" value=\"100%\"></input>"
"<button type=\"button\" id=\"redrawButton\""
"onclick=\"redrawChart()\">redraw</button></p>\n"
"</p>\n")
power_emitter.report()
if app_cpu_usage:
print "<b>App CPU usage:</b><br />"
print "In user time:<br />"
print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
for (uid, use) in sorted(app_cpu_usage.items(),
key=lambda x: -x[1][usr_time]):
print "<tr><td>%s</td>" % uid
print "<td>%s</td></tr>" % format_duration(use[usr_time])
print "</table>"
print "<br />In system time:<br />"
print "<table border=\"1\"><tr><td>UID</td><td>Duration</td></tr>"
for (uid, use) in sorted(app_cpu_usage.items(),
key=lambda x: -x[1][sys_time]):
print "<tr><td>%s</td>" % uid
print "<td>%s</td></tr>" % format_duration(use[sys_time])
print "</table>"
print "<br /><b>Proc/stat summary</b><ul>"
print "<li>Total User Time: %s</li>" % format_duration(
proc_stat_summary["usr"])
print "<li>Total System Time: %s</li>" % format_duration(
proc_stat_summary["sys"])
print "<li>Total IO Time: %s</li>" % format_duration(
proc_stat_summary["io"])
print "<li>Total Irq Time: %s</li>" % format_duration(
proc_stat_summary["irq"])
print "<li>Total Soft Irq Time: %s</li>" % format_duration(
proc_stat_summary["sirq"])
print "<li>Total Idle Time: %s</li>" % format_duration(
proc_stat_summary["idle"])
print "</ul>"
print "<pre>Process table:"
print bhemitter.procs_to_str()
print "</pre>\n"
if not getopt_generate_chart_only:
print "</body>\n</html>"
if __name__ == "__main__":
main()
| 52,562 | 31.87242 | 167 | py |
battery-historian | battery-historian-master/scripts/kernel_trace.py | #!/usr/bin/python
"""Historian script for converting the timestamps in kernel trace to UTC.
TO USE:
kernel_trace.py --bugreport=<path to bugreport> --trace=<path to trace file>
--device=<device type hammerhead/shamu/flounder/flounder_lte>
"""
# Copyright 2016 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import csv
import datetime
import getopt
import re
import sys
flag_bugreport = None
flag_trace = None
flag_device = None
def read_dmesg(bugreport, suspend_exit, suspend_enter, device):
"""Extracts the suspend exit/entries times from the bugreport."""
read_first_suspend_entry = True
first_jiffy = 0
first_utc = 0
if device == "flounder" or device == "flounder_lte":
device_suspend_pattern = "(.*)tegra124-pinctrl tegra124-pinctrl:(.*)"
elif device == "shamu" or device == "hammerhead":
device_suspend_pattern = "(.*)Suspending console(.*)"
else:
return (0, 0)
for line in bugreport:
m = re.match(r"(.*)\[(.*)\] PM: suspend ([a-z]+) (.*?) UTC", line)
if m:
if "exit" in m.group(3):
jiffy = float(m.group(2))
utc = m.group(4)
utc = utc[:-3]
utc = datetime.datetime.strptime(utc, "%Y-%m-%d %H:%M:%S.%f")
suspend_exit[jiffy] = utc
elif read_first_suspend_entry and "entry" in m.group(3):
jiffy = float(re.search(r"\[([ 0-9.]+)\]", line).group(1))
utc = re.search("PM: suspend entry (.*) UTC", line).group(1)
first_jiffy = jiffy
utc = utc[:-3]
utc = datetime.datetime.strptime(utc, "%Y-%m-%d %H:%M:%S.%f")
first_utc = utc
read_first_suspend_entry = False
elif re.match(device_suspend_pattern, line):
jiffy = float(re.search(r"\[([ 0-9.]+)\]", line).group(1))
suspend_enter.append(jiffy)
return (first_jiffy, first_utc)
def convert_timestamps(trace_file, file_handle, time_dict, first_jiffy,
first_utc):
"""Converts all the valid jiffies to UTC time in the trace file."""
line_number = 0
trace_start = 0
keys = sorted(time_dict)
# Find the point where the stats for all the cores start.
for row in trace_file:
if len(row) > 4 and ("buffer" in row[3]) and ("started" in row[4]):
trace_start = line_number
line_number += 1
file_handle.seek(0)
line_number = 0
curr_jiffy = keys[0]
next_jiffy = keys[1]
index = 1
for row in trace_file:
# Skip trace rows which contain incomplete data.
if line_number < trace_start:
line_number += 1
continue
row_no = 3
if "#" in row[0]:
continue
for row_no in range(row_no, len(row)):
if ":" in row[row_no]:
break
if row_no == len(row):
continue
jiffy = float(row[row_no][:-1])
# Skip trace points for which we do not have timestamp conversion.
if ((first_jiffy != 0 and jiffy < first_jiffy) or
(first_jiffy == 0 and jiffy < keys[0])):
continue
elif first_jiffy != 0 and jiffy < keys[0]:
diff = jiffy - first_jiffy
us = (diff - int(diff))*1000000
utc = first_utc + datetime.timedelta(seconds=int(diff),
microseconds=us)
row[row_no] = str(utc)
elif jiffy > curr_jiffy and jiffy < next_jiffy:
diff = jiffy - curr_jiffy
us = (diff - int(diff))*1000000
utc = time_dict[curr_jiffy] + datetime.timedelta(seconds=int(diff),
microseconds=us)
row[row_no] = str(utc)
else:
index += 1
curr_jiffy = next_jiffy
if index < len(keys):
next_jiffy = keys[index]
else:
next_jiffy = float("inf")
while next_jiffy < jiffy and index < len(keys):
curr_jiffy = next_jiffy
next_jiffy = keys[index]
index += 1
diff = jiffy - curr_jiffy
us = (diff - int(diff))*1000000
utc = time_dict[curr_jiffy] + datetime.timedelta(seconds=int(diff),
microseconds=us)
row[row_no] = '"' + str(utc) + '"'
for each_column in row:
sys.stdout.write(str(each_column) + " ")
sys.stdout.write("\n")
def usage():
"""Print usage of the script."""
print ("\nUsage: %s --bugreport=<path to bugreport>"
" --trace=<path to trace file>"
" --device=<device type"
" hammerhead/shamu/flounder/flounder_lte>\n") % sys.argv[0]
sys.exit(1)
def parse_argv(argv):
"""Parse arguments and set up globals."""
global flag_bugreport, flag_trace, flag_device
try:
opts, unused_args = getopt.getopt(argv,
"", ["bugreport=", "trace=", "device="])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt == "--bugreport":
flag_bugreport = arg
elif opt == "--trace":
flag_trace = arg
elif opt == "--device":
flag_device = arg
else:
usage()
sys.exit(2)
def main(argv):
parse_argv(argv)
if not flag_bugreport:
print "Bug report not valid"
usage()
sys.exit(1)
if not flag_trace:
print "Trace file not valid"
usage()
sys.exit(1)
if not flag_device:
print "Device not valid"
usage()
sys.exit(1)
try:
bugreport = open(flag_bugreport)
except IOError:
print "Unable to open bug report"
sys.exit(1)
suspend_exit = {}
suspend_enter = []
first_jiffy, first_utc = read_dmesg(bugreport, suspend_exit, suspend_enter,
flag_device)
if not (len(suspend_enter) and len(suspend_exit)):
return
if suspend_enter and (first_jiffy > suspend_enter[0]):
first_jiffy = 0
time_dict = {}
timestamp = sorted(suspend_exit)
index = 0
for timestamp in timestamp:
if index >= len(suspend_enter) or timestamp < suspend_enter[index]:
continue
utc = suspend_exit[timestamp]
diff = timestamp - float(suspend_enter[index])
utc -= datetime.timedelta(seconds=int(diff),
microseconds=(diff - int(diff))*1000000)
time_dict[suspend_enter[index]] = utc
index += 1
try:
file_handle = open(flag_trace, "r")
trace_file = csv.reader(file_handle, delimiter=" ", skipinitialspace=True)
except IOError:
print "Unable to open trace file"
sys.exit(1)
convert_timestamps(trace_file, file_handle, time_dict, first_jiffy,
first_utc)
if __name__ == "__main__":
main(sys.argv[1:])
| 6,942 | 29.186957 | 78 | py |
3DG-STFM | 3DG-STFM-master/train_rgbd_t_s.py | import math
import argparse
import pprint
from distutils.util import strtobool
from pathlib import Path
from loguru import logger as loguru_logger
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
from src.config.default import get_cfg_defaults
from src.utils.misc import get_rank_zero_only_logger, setup_gpus
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGB_teacher_student
loguru_logger = get_rank_zero_only_logger(loguru_logger)
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--exp_name', type=str, default='default_exp_name')
parser.add_argument(
'--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=4)
parser.add_argument(
'--pin_memory', type=lambda x: bool(strtobool(x)),
nargs='?', default=True, help='whether loading data to pinned memory or not')
parser.add_argument(
'--ckpt_path', type=str, default=None,
help='pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR')
parser.add_argument(
'--disable_ckpt', action='store_true',
help='disable checkpoint saving (useful for debugging).')
parser.add_argument(
'--profiler_name', type=str, default=None,
help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--parallel_load_data', action='store_true',
help='load datasets in with multiple processes.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
rank_zero_only(pprint.pprint)(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# TODO: Use different seeds for each dataloader workers
# This is needed for data augmentation
# scale lr and warmup-step automatically
args.gpus = _n_gpus = setup_gpus(args.gpus)
config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes
config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size
_scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS
config.TRAINER.SCALING = _scaling
config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling
config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling)
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGB_teacher_student(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
#model = PL_LoFTR_RGB_teacher_student_share(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
loguru_logger.info(f"LoFTR LightningModule initialized!")
# lightning data
data_module = RGBDDataModule(args, config)
loguru_logger.info(f"LoFTR DataModule initialized!")
# TensorBoard Logger
logger = TensorBoardLogger(save_dir='logs/tb_logs', name=args.exp_name, default_hp_metric=False)
ckpt_dir = Path(logger.log_dir) / 'checkpoints'
# Callbacks
# TODO: update ModelCheckpoint to monitor multiple metrics
ckpt_callback = ModelCheckpoint(monitor='auc@10', verbose=True, save_top_k=5, mode='max',
save_last=True,
dirpath=str(ckpt_dir),
filename='{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}')
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor]
if not args.disable_ckpt:
callbacks.append(ckpt_callback)
# Lightning Trainer
trainer = pl.Trainer.from_argparse_args(
args,
plugins=DDPPlugin(find_unused_parameters=True,
num_nodes=args.num_nodes,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0),
gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING,
callbacks=callbacks,
logger=logger,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0,
replace_sampler_ddp=False, # use custom sampler
reload_dataloaders_every_epoch=False, # avoid repeated samples!
weights_summary='full',
profiler=profiler)
loguru_logger.info(f"Trainer initialized!")
loguru_logger.info(f"Start training!")
trainer.fit(model, datamodule=data_module)
if __name__ == '__main__':
main()
| 5,270 | 41.168 | 111 | py |
3DG-STFM | 3DG-STFM-master/train_rgb.py | import math
import argparse
import pprint
from distutils.util import strtobool
from pathlib import Path
from loguru import logger as loguru_logger
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
from src.config.default import get_cfg_defaults
from src.utils.misc import get_rank_zero_only_logger, setup_gpus
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGB
loguru_logger = get_rank_zero_only_logger(loguru_logger)
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--exp_name', type=str, default='default_exp_name')
parser.add_argument(
'--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=4)
parser.add_argument(
'--pin_memory', type=lambda x: bool(strtobool(x)),
nargs='?', default=True, help='whether loading data to pinned memory or not')
parser.add_argument(
'--ckpt_path', type=str, default=None,
help='pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR')
parser.add_argument(
'--disable_ckpt', action='store_true',
help='disable checkpoint saving (useful for debugging).')
parser.add_argument(
'--profiler_name', type=str, default=None,
help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--parallel_load_data', action='store_true',
help='load datasets in with multiple processes.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
rank_zero_only(pprint.pprint)(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# This is needed for data augmentation
# scale lr and warmup-step automatically
args.gpus = _n_gpus = setup_gpus(args.gpus)
config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes
config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size
_scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS
config.TRAINER.SCALING = _scaling
config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling
config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling)
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGB(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
loguru_logger.info(f"LoFTR LightningModule initialized!")
# lightning data
data_module = RGBDataModule(args, config)
loguru_logger.info(f"LoFTR DataModule initialized!")
# TensorBoard Logger
logger = TensorBoardLogger(save_dir='logs/tb_logs', name=args.exp_name, default_hp_metric=False)
ckpt_dir = Path(logger.log_dir) / 'checkpoints'
# Callbacks
ckpt_callback = ModelCheckpoint(monitor='auc@10', verbose=True, save_top_k=5, mode='max',
save_last=True,
dirpath=str(ckpt_dir),
filename='{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}')
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor]
if not args.disable_ckpt:
callbacks.append(ckpt_callback)
# Lightning Trainer
trainer = pl.Trainer.from_argparse_args(
args,
plugins=DDPPlugin(find_unused_parameters=False,
num_nodes=args.num_nodes,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0),
gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING,
callbacks=callbacks,
logger=logger,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0,
replace_sampler_ddp=False, # use custom sampler
reload_dataloaders_every_epoch=False, # avoid repeated samples!
weights_summary='full',
profiler=profiler)
loguru_logger.info(f"Trainer initialized!")
loguru_logger.info(f"Start training!")
trainer.fit(model, datamodule=data_module)
if __name__ == '__main__':
main()
| 5,007 | 40.04918 | 111 | py |
3DG-STFM | 3DG-STFM-master/test_rgbd.py | import pytorch_lightning as pl
import argparse
import pprint
from loguru import logger as loguru_logger
from src.config.default import get_cfg_defaults
from src.utils.profiler import build_profiler
from src.lightning.data import MultiSceneDataModule, RGBDDataModule
from src.lightning.lightning_loftr import PL_LoFTR,PL_LoFTR_RGBD
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--ckpt_path', type=str, default="weights/indoor_ds.ckpt", help='path to the checkpoint')
parser.add_argument(
'--dump_dir', type=str, default=None, help="if set, the matching results will be dump to dump_dir")
parser.add_argument(
'--profiler_name', type=str, default=None, help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--batch_size', type=int, default=1, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=2)
parser.add_argument(
'--thr', type=float, default=None, help='modify the coarse-level matching threshold.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
if __name__ == '__main__':
# parse arguments
args = parse_args()
pprint.pprint(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# tune when testing
if args.thr is not None:
config.LOFTR.MATCH_COARSE.THR = args.thr
loguru_logger.info(f"Args and config initialized!")
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGBD(config, pretrained_ckpt=args.ckpt_path, profiler=profiler, dump_dir=args.dump_dir)
loguru_logger.info(f"LoFTR-lightning initialized!")
# lightning data
data_module = RGBDDataModule(args, config)
loguru_logger.info(f"DataModule initialized!")
# lightning trainer
trainer = pl.Trainer.from_argparse_args(args, replace_sampler_ddp=False, logger=False)
loguru_logger.info(f"Start testing!")
trainer.test(model, datamodule=data_module, verbose=False)
| 2,657 | 37.521739 | 111 | py |
3DG-STFM | 3DG-STFM-master/test_rgb.py | import pytorch_lightning as pl
import argparse
import pprint
from loguru import logger as loguru_logger
from src.config.default import get_cfg_defaults
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGB
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--ckpt_path', type=str, default="weights/indoor_ds.ckpt", help='path to the checkpoint')
parser.add_argument(
'--dump_dir', type=str, default=None, help="if set, the matching results will be dump to dump_dir")
parser.add_argument(
'--profiler_name', type=str, default=None, help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--batch_size', type=int, default=1, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=2)
parser.add_argument(
'--thr', type=float, default=None, help='modify the coarse-level matching threshold.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
if __name__ == '__main__':
# parse arguments
args = parse_args()
pprint.pprint(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# tune when testing
if args.thr is not None:
config.LOFTR.MATCH_COARSE.THR = args.thr
loguru_logger.info(f"Args and config initialized!")
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGB(config, pretrained_ckpt=args.ckpt_path, profiler=profiler, dump_dir=args.dump_dir)
loguru_logger.info(f"LoFTR-lightning initialized!")
# lightning data
data_module = RGBDataModule(args, config)
loguru_logger.info(f"DataModule initialized!")
# lightning trainer
trainer = pl.Trainer.from_argparse_args(args, replace_sampler_ddp=False, logger=False)
loguru_logger.info(f"Start testing!")
trainer.test(model, datamodule=data_module, verbose=False)
| 2,622 | 37.014493 | 111 | py |
3DG-STFM | 3DG-STFM-master/demo.py | import os
import torch
import cv2
import numpy as np
import matplotlib.cm as cm
import matplotlib.colors
from src.loftr import default_cfg, LoFTR_RGBD, LoFTR_RGB
import matplotlib.pyplot as plt
def make_matching_figure(
img0, img1, mkpts0, mkpts1, color,
kpts0=None, kpts1=None, text=[], dpi=75, path=None):
# draw image pair
assert mkpts0.shape[0] == mkpts1.shape[0], f'mkpts0: {mkpts0.shape[0]} v.s. mkpts1: {mkpts1.shape[0]}'
fig, axes = plt.subplots(1, 2, figsize=(10, 6), dpi=dpi)
axes[0].imshow(img0)
axes[1].imshow(img1)
for i in range(2): # clear all frames
axes[i].get_yaxis().set_ticks([])
axes[i].get_xaxis().set_ticks([])
for spine in axes[i].spines.values():
spine.set_visible(False)
plt.tight_layout(pad=1)
if kpts0 is not None:
assert kpts1 is not None
axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c='w', s=2)
axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c='w', s=2)
# draw matches
if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0:
fig.canvas.draw()
transFigure = fig.transFigure.inverted()
fkpts0 = transFigure.transform(axes[0].transData.transform(mkpts0))
fkpts1 = transFigure.transform(axes[1].transData.transform(mkpts1))
fig.lines = [matplotlib.lines.Line2D((fkpts0[i, 0], fkpts1[i, 0]),
(fkpts0[i, 1], fkpts1[i, 1]),
transform=fig.transFigure, c='b', linewidth=0.5,alpha=0.3)
for i in range(len(mkpts0))]
axes[0].scatter(mkpts0[:, 0], mkpts0[:, 1], c=color, s=5)
axes[1].scatter(mkpts1[:, 0], mkpts1[:, 1], c=color, s=5)
# put txts
txt_color = 'k' if img0[:100, :200].mean() > 200 else 'w'
fig.text(
0.01, 0.99, '\n'.join(text), transform=fig.axes[0].transAxes,
fontsize=15, va='top', ha='left', color='k')
# save or return figure
if path:
plt.savefig(str(path), bbox_inches='tight', pad_inches=0)
print('saved', os.getcwd(), path)
plt.close()
else:
return fig
def pose_filter(mkpts0, mkpts1, K0, K1):
mkpts0 = (mkpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
mkpts1 = (mkpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
ransac_thr = 0.5 / np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
if len(mkpts0) < 6:
E = None
mask = None
else:
E, mask = cv2.findEssentialMat(
mkpts0, mkpts1, np.eye(3), threshold=ransac_thr, prob=0.99999, method=cv2.RANSAC)
return E, mask
root_dir = 'inference/'
pretrained_ckpt = "weights/indoor_student.ckpt"
matcher = LoFTR_RGB(config=default_cfg)
img0_pth, img1_pth = 'demo1.jpg','demo2.jpg'
img0_pth, img1_pth = root_dir + img0_pth, root_dir + img1_pth
sd = torch.load(pretrained_ckpt, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
matcher.load_state_dict(new_state_dict, strict=False)
matcher = matcher.eval().cuda()
img0_raw = cv2.imread(img0_pth, cv2.IMREAD_COLOR)
img1_raw = cv2.imread(img1_pth, cv2.IMREAD_COLOR)
img0_raw = cv2.resize(img0_raw, (640, 480))
img1_raw = cv2.resize(img1_raw, (640, 480))
img0 = cv2.cvtColor(img0_raw, cv2.COLOR_BGR2RGB)
img1 = cv2.cvtColor(img1_raw, cv2.COLOR_BGR2RGB)
img0 = np.ascontiguousarray(img0)
img1 = np.ascontiguousarray(img1)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
img0 = img0.astype(float)
img1 = img1.astype(float)
img0[:, :, 0] = (img0[:, :, 0] / 255. - mean[0]) / std[0]
img0[:, :, 1] = (img0[:, :, 1] / 255. - mean[1]) / std[1]
img0[:, :, 2] = (img0[:, :, 2] / 255. - mean[2]) / std[2]
img1[:, :, 0] = (img1[:, :, 0] / 255. - mean[0]) / std[0]
img1[:, :, 1] = (img1[:, :, 1] / 255. - mean[1]) / std[1]
img1[:, :, 2] = (img1[:, :, 2] / 255. - mean[2]) / std[2]
img0 = torch.from_numpy(img0).float()[None].cuda()
img1 = torch.from_numpy(img1).float()[None].cuda()
img0 = img0.permute(0, 3, 1, 2)
img1 = img1.permute(0, 3, 1, 2)
batch = {'image0': img0, 'image1': img1}
# Inference with LoFTR and get prediction
with torch.no_grad():
matcher(batch)
mkpts0 = batch['mkpts0_f'].cpu().numpy()
mkpts1 = batch['mkpts1_f'].cpu().numpy()
mconf = batch['mconf'].cpu().numpy()
#_, mask = pose_filter(mkpts0, mkpts1, K0, K1)
# ind_mask = np.where(mask == 1)
# mkpts0 = mkpts0[ind_mask[0], :]
# mkpts1 = mkpts1[ind_mask[0], :]
# mconf = mconf[ind_mask[0]]
# Draw
if mconf!=[]:
mconf=(mconf-mconf.min())/(mconf.max()-mconf.min())
color = cm.jet(mconf)
text = [
'3DG-STFM',
'Matches: {}'.format(len(mkpts0)),
]
fig = make_matching_figure(img0_raw, img1_raw, mkpts0, mkpts1, color, text=text,
path='demo.png')
| 4,874 | 33.574468 | 106 | py |
3DG-STFM | 3DG-STFM-master/train_rgbd.py | import math
import argparse
import pprint
from distutils.util import strtobool
from pathlib import Path
from loguru import logger as loguru_logger
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.loggers import TensorBoardLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateMonitor
from pytorch_lightning.plugins import DDPPlugin
from src.config.default import get_cfg_defaults
from src.utils.misc import get_rank_zero_only_logger, setup_gpus
from src.utils.profiler import build_profiler
from src.lightning.data import RGBDDataModule
from src.lightning.lightning_loftr import PL_LoFTR_RGBD
loguru_logger = get_rank_zero_only_logger(loguru_logger)
def parse_args():
# init a costum parser which will be added into pl.Trainer parser
# check documentation: https://pytorch-lightning.readthedocs.io/en/latest/common/trainer.html#trainer-flags
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'data_cfg_path', type=str, help='data config path')
parser.add_argument(
'main_cfg_path', type=str, help='main config path')
parser.add_argument(
'--exp_name', type=str, default='default_exp_name')
parser.add_argument(
'--batch_size', type=int, default=4, help='batch_size per gpu')
parser.add_argument(
'--num_workers', type=int, default=4)
parser.add_argument(
'--pin_memory', type=lambda x: bool(strtobool(x)),
nargs='?', default=True, help='whether loading data to pinned memory or not')
parser.add_argument(
'--ckpt_path', type=str, default=None,
help='pretrained checkpoint path, helpful for using a pre-trained coarse-only LoFTR')
parser.add_argument(
'--disable_ckpt', action='store_true',
help='disable checkpoint saving (useful for debugging).')
parser.add_argument(
'--profiler_name', type=str, default=None,
help='options: [inference, pytorch], or leave it unset')
parser.add_argument(
'--parallel_load_data', action='store_true',
help='load datasets in with multiple processes.')
parser = pl.Trainer.add_argparse_args(parser)
return parser.parse_args()
def main():
# parse arguments
args = parse_args()
rank_zero_only(pprint.pprint)(vars(args))
# init default-cfg and merge it with the main- and data-cfg
config = get_cfg_defaults()
config.merge_from_file(args.main_cfg_path)
config.merge_from_file(args.data_cfg_path)
pl.seed_everything(config.TRAINER.SEED) # reproducibility
# TODO: Use different seeds for each dataloader workers
# This is needed for data augmentation
# scale lr and warmup-step automatically
args.gpus = _n_gpus = setup_gpus(args.gpus)
config.TRAINER.WORLD_SIZE = _n_gpus * args.num_nodes
config.TRAINER.TRUE_BATCH_SIZE = config.TRAINER.WORLD_SIZE * args.batch_size
_scaling = config.TRAINER.TRUE_BATCH_SIZE / config.TRAINER.CANONICAL_BS
config.TRAINER.SCALING = _scaling
config.TRAINER.TRUE_LR = config.TRAINER.CANONICAL_LR * _scaling
config.TRAINER.WARMUP_STEP = math.floor(config.TRAINER.WARMUP_STEP / _scaling)
# lightning module
profiler = build_profiler(args.profiler_name)
model = PL_LoFTR_RGBD(config, pretrained_ckpt=args.ckpt_path, profiler=profiler)
loguru_logger.info(f"LoFTR LightningModule initialized!")
# lightning data
data_module = RGBDDataModule(args, config)
loguru_logger.info(f"LoFTR DataModule initialized!")
# TensorBoard Logger
logger = TensorBoardLogger(save_dir='logs/tb_logs', name=args.exp_name, default_hp_metric=False)
ckpt_dir = Path(logger.log_dir) / 'checkpoints'
# Callbacks
# TODO: update ModelCheckpoint to monitor multiple metrics
ckpt_callback = ModelCheckpoint(monitor='auc@10', verbose=True, save_top_k=5, mode='max',
save_last=True,
dirpath=str(ckpt_dir),
filename='{epoch}-{auc@5:.3f}-{auc@10:.3f}-{auc@20:.3f}')
lr_monitor = LearningRateMonitor(logging_interval='step')
callbacks = [lr_monitor]
if not args.disable_ckpt:
callbacks.append(ckpt_callback)
# Lightning Trainer
trainer = pl.Trainer.from_argparse_args(
args,
plugins=DDPPlugin(find_unused_parameters=False,
num_nodes=args.num_nodes,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0),
gradient_clip_val=config.TRAINER.GRADIENT_CLIPPING,
callbacks=callbacks,
logger=logger,
sync_batchnorm=config.TRAINER.WORLD_SIZE > 0,
replace_sampler_ddp=False, # use custom sampler
reload_dataloaders_every_epoch=False, # avoid repeated samples!
weights_summary='full',
profiler=profiler)
loguru_logger.info(f"Trainer initialized!")
loguru_logger.info(f"Start training!")
trainer.fit(model, datamodule=data_module)
if __name__ == '__main__':
main()
| 5,134 | 40.41129 | 111 | py |
3DG-STFM | 3DG-STFM-master/src/__init__.py | 0 | 0 | 0 | py |
|
3DG-STFM | 3DG-STFM-master/src/config/default.py | from yacs.config import CfgNode as CN
_CN = CN()
############## ↓ LoFTR Pipeline ↓ ##############
_CN.LOFTR = CN()
_CN.LOFTR.BACKBONE_TYPE = 'ResNetFPN'
_CN.LOFTR.RESOLUTION = (8, 2) # options: [(8, 2), (16, 4)]
_CN.LOFTR.FINE_WINDOW_SIZE = 5 # window_size in fine_level, must be odd
_CN.LOFTR.FINE_CONCAT_COARSE_FEAT = True
# 1. LoFTR-backbone (local feature CNN) config
_CN.LOFTR.RESNETFPN = CN()
_CN.LOFTR.RESNETFPN.INITIAL_DIM = 128
_CN.LOFTR.RESNETFPN.BLOCK_DIMS = [128, 196, 256] # s1, s2, s3
# 2. LoFTR-coarse module config
_CN.LOFTR.COARSE = CN()
_CN.LOFTR.COARSE.D_MODEL = 256
_CN.LOFTR.COARSE.D_FFN = 256
_CN.LOFTR.COARSE.NHEAD = 8
_CN.LOFTR.COARSE.LAYER_NAMES = ['self', 'cross'] * 4
_CN.LOFTR.COARSE.ATTENTION = 'linear' # options: ['linear', 'full']
# 3. Coarse-Matching config
_CN.LOFTR.MATCH_COARSE = CN()
_CN.LOFTR.MATCH_COARSE.THR = 0.2
_CN.LOFTR.MATCH_COARSE.BORDER_RM = 2
_CN.LOFTR.MATCH_COARSE.MATCH_TYPE = 'dual_softmax' # options: ['dual_softmax, 'sinkhorn']
_CN.LOFTR.MATCH_COARSE.DSMAX_TEMPERATURE = 0.1
_CN.LOFTR.MATCH_COARSE.SKH_ITERS = 3
_CN.LOFTR.MATCH_COARSE.SKH_INIT_BIN_SCORE = 1.0
_CN.LOFTR.MATCH_COARSE.SKH_PREFILTER = False
_CN.LOFTR.MATCH_COARSE.TRAIN_COARSE_PERCENT = 0.2 # training tricks: save GPU memory
_CN.LOFTR.MATCH_COARSE.TRAIN_PAD_NUM_GT_MIN = 200 # training tricks: avoid DDP deadlock
_CN.LOFTR.MATCH_COARSE.SPARSE_SPVS = True
# 4. LoFTR-fine module config
_CN.LOFTR.FINE = CN()
_CN.LOFTR.FINE.D_MODEL = 128
_CN.LOFTR.FINE.D_FFN = 128
_CN.LOFTR.FINE.NHEAD = 8
_CN.LOFTR.FINE.LAYER_NAMES = ['self', 'cross'] * 1
_CN.LOFTR.FINE.ATTENTION = 'linear'
# 5. LoFTR Losses
# -- # coarse-level
_CN.LOFTR.LOSS = CN()
_CN.LOFTR.LOSS.COARSE_TYPE = 'focal' # ['focal', 'cross_entropy']
_CN.LOFTR.LOSS.COARSE_WEIGHT = 1.0
# _CN.LOFTR.LOSS.SPARSE_SPVS = False
# -- - -- # focal loss (coarse)
_CN.LOFTR.LOSS.FOCAL_ALPHA = 0.25
_CN.LOFTR.LOSS.FOCAL_GAMMA = 2.0
_CN.LOFTR.LOSS.POS_WEIGHT = 1.0
_CN.LOFTR.LOSS.NEG_WEIGHT = 1.0
# _CN.LOFTR.LOSS.DUAL_SOFTMAX = False # whether coarse-level use dual-softmax or not.
# use `_CN.LOFTR.MATCH_COARSE.MATCH_TYPE`
# -- # fine-level
_CN.LOFTR.LOSS.FINE_TYPE = 'l2_with_std' # ['l2_with_std', 'l2']
_CN.LOFTR.LOSS.FINE_WEIGHT = 1.0
_CN.LOFTR.LOSS.FINE_CORRECT_THR = 1.0 # for filtering valid fine-level gts (some gt matches might fall out of the fine-level window)
############## Dataset ##############
_CN.DATASET = CN()
# 1. data config
# training and validating
_CN.DATASET.TRAINVAL_DATA_SOURCE = None # options: ['ScanNet', 'MegaDepth']
_CN.DATASET.TRAIN_DATA_ROOT = None
_CN.DATASET.TRAIN_POSE_ROOT = None # (optional directory for poses)
_CN.DATASET.TRAIN_NPZ_ROOT = None
_CN.DATASET.TRAIN_LIST_PATH = None
_CN.DATASET.TRAIN_INTRINSIC_PATH = None
_CN.DATASET.VAL_DATA_ROOT = None
_CN.DATASET.VAL_POSE_ROOT = None # (optional directory for poses)
_CN.DATASET.VAL_NPZ_ROOT = None
_CN.DATASET.VAL_LIST_PATH = None # None if val data from all scenes are bundled into a single npz file
_CN.DATASET.VAL_INTRINSIC_PATH = None
# testing
_CN.DATASET.TEST_DATA_SOURCE = None
_CN.DATASET.TEST_DATA_ROOT = None
_CN.DATASET.TEST_POSE_ROOT = None # (optional directory for poses)
_CN.DATASET.TEST_NPZ_ROOT = None
_CN.DATASET.TEST_LIST_PATH = None # None if test data from all scenes are bundled into a single npz file
_CN.DATASET.TEST_INTRINSIC_PATH = None
# 2. dataset config
# general options
_CN.DATASET.MIN_OVERLAP_SCORE_TRAIN = 0.4 # discard data with overlap_score < min_overlap_score
_CN.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0
_CN.DATASET.AUGMENTATION_TYPE = None # options: [None, 'dark', 'mobile']
# MegaDepth options
_CN.DATASET.MGDPT_IMG_RESIZE = 640 # resize the longer side, zero-pad bottom-right to square.
_CN.DATASET.MGDPT_IMG_PAD = True # pad img to square with size = MGDPT_IMG_RESIZE
_CN.DATASET.MGDPT_DEPTH_PAD = True # pad depthmap to square with size = 2000
_CN.DATASET.MGDPT_DF = 8
############## Trainer ##############
_CN.TRAINER = CN()
_CN.TRAINER.WORLD_SIZE = 1
_CN.TRAINER.CANONICAL_BS = 64
_CN.TRAINER.CANONICAL_LR = 6e-3 #6e-3
_CN.TRAINER.SCALING = None # this will be calculated automatically
_CN.TRAINER.FIND_LR = False # use learning rate finder from pytorch-lightning
# optimizer
_CN.TRAINER.OPTIMIZER = "adamw" # [adam, adamw]
_CN.TRAINER.TRUE_LR = None # this will be calculated automatically at runtime
_CN.TRAINER.ADAM_DECAY = 0. # ADAM: for adam
_CN.TRAINER.ADAMW_DECAY = 0.1
# step-based warm-up
_CN.TRAINER.WARMUP_TYPE = 'linear' # [linear, constant]
_CN.TRAINER.WARMUP_RATIO = 0.
_CN.TRAINER.WARMUP_STEP = 4800#4800
# learning rate scheduler
_CN.TRAINER.SCHEDULER = 'MultiStepLR' # [MultiStepLR, CosineAnnealing, ExponentialLR]
_CN.TRAINER.SCHEDULER_INTERVAL = 'epoch' # [epoch, step]
_CN.TRAINER.MSLR_MILESTONES = [3, 6, 9, 12] # MSLR: MultiStepLR
_CN.TRAINER.MSLR_GAMMA = 0.5
_CN.TRAINER.COSA_TMAX = 30 # COSA: CosineAnnealing
_CN.TRAINER.ELR_GAMMA = 0.999992 # ELR: ExponentialLR, this value for 'step' interval
# plotting related
_CN.TRAINER.ENABLE_PLOTTING = True
_CN.TRAINER.N_VAL_PAIRS_TO_PLOT = 32 # number of val/test paris for plotting
_CN.TRAINER.PLOT_MODE = 'evaluation' # ['evaluation', 'confidence']
_CN.TRAINER.PLOT_MATCHES_ALPHA = 'dynamic'
# geometric metrics and pose solver
_CN.TRAINER.EPI_ERR_THR = 5e-4 # recommendation: 5e-4 for ScanNet, 1e-4 for MegaDepth (from SuperGlue)
_CN.TRAINER.POSE_GEO_MODEL = 'E' # ['E', 'F', 'H']
_CN.TRAINER.POSE_ESTIMATION_METHOD = 'RANSAC' # [RANSAC, DEGENSAC, MAGSAC]
_CN.TRAINER.RANSAC_PIXEL_THR = 0.5
_CN.TRAINER.RANSAC_CONF = 0.99999
_CN.TRAINER.RANSAC_MAX_ITERS = 10000
_CN.TRAINER.USE_MAGSACPP = False
# data sampler for train_dataloader
_CN.TRAINER.DATA_SAMPLER = 'scene_balance' # options: ['scene_balance', 'random', 'normal']
# 'scene_balance' config
_CN.TRAINER.N_SAMPLES_PER_SUBSET = 200
_CN.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT = True # whether sample each scene with replacement or not
_CN.TRAINER.SB_SUBSET_SHUFFLE = True # after sampling from scenes, whether shuffle within the epoch or not
_CN.TRAINER.SB_REPEAT = 1 # repeat N times for training the sampled data
# 'random' config
_CN.TRAINER.RDM_REPLACEMENT = True
_CN.TRAINER.RDM_NUM_SAMPLES = None
# gradient clipping
_CN.TRAINER.GRADIENT_CLIPPING = 0.5
# reproducibility
# This seed affects the data sampling. With the same seed, the data sampling is promised
# to be the same. When resume training from a checkpoint, it's better to use a different
# seed, otherwise the sampled data will be exactly the same as before resuming, which will
# cause less unique data items sampled during the entire training.
# Use of different seed values might affect the final training result, since not all data items
# are used during training on ScanNet. (60M pairs of images sampled during traing from 230M pairs in total.)
_CN.TRAINER.SEED = 66
def get_cfg_defaults():
"""Get a yacs CfgNode object with default values for my_project."""
# Return a clone so that the defaults will not be altered
# This is for the "local variable" use pattern
return _CN.clone()
| 7,068 | 40.339181 | 133 | py |
3DG-STFM | 3DG-STFM-master/src/datasets/sampler.py | import torch
from torch.utils.data import Sampler, ConcatDataset
class RandomConcatSampler(Sampler):
""" Random sampler for ConcatDataset. At each epoch, `n_samples_per_subset` samples will be draw from each subset
in the ConcatDataset. If `subset_replacement` is ``True``, sampling within each subset will be done with replacement.
However, it is impossible to sample data without replacement between epochs, unless bulding a stateful sampler lived along the entire training phase.
For current implementation, the randomness of sampling is ensured no matter the sampler is recreated across epochs or not and call `torch.manual_seed()` or not.
Args:
shuffle (bool): shuffle the random sampled indices across all sub-datsets.
repeat (int): repeatedly use the sampled indices multiple times for training.
[arXiv:1902.05509, arXiv:1901.09335]
NOTE: Don't re-initialize the sampler between epochs (will lead to repeated samples)
NOTE: This sampler behaves differently with DistributedSampler.
It assume the dataset is splitted across ranks instead of replicated.
TODO: Add a `set_epoch()` method to fullfill sampling without replacement across epochs.
ref: https://github.com/PyTorchLightning/pytorch-lightning/blob/e9846dd758cfb1500eb9dba2d86f6912eb487587/pytorch_lightning/trainer/training_loop.py#L373
"""
def __init__(self,
data_source: ConcatDataset,
n_samples_per_subset: int,
subset_replacement: bool=True,
shuffle: bool=True,
repeat: int=1,
seed: int=None):
if not isinstance(data_source, ConcatDataset):
raise TypeError("data_source should be torch.utils.data.ConcatDataset")
self.data_source = data_source
self.n_subset = len(self.data_source.datasets)
self.n_samples_per_subset = n_samples_per_subset
self.n_samples = self.n_subset * self.n_samples_per_subset * repeat
self.subset_replacement = subset_replacement
self.repeat = repeat
self.shuffle = shuffle
self.generator = torch.manual_seed(seed)
assert self.repeat >= 1
def __len__(self):
return self.n_samples
def __iter__(self):
indices = []
# sample from each sub-dataset
for d_idx in range(self.n_subset):
low = 0 if d_idx==0 else self.data_source.cumulative_sizes[d_idx-1]
high = self.data_source.cumulative_sizes[d_idx]
if self.subset_replacement:
rand_tensor = torch.randint(low, high, (self.n_samples_per_subset, ),
generator=self.generator, dtype=torch.int64)
else: # sample without replacement
len_subset = len(self.data_source.datasets[d_idx])
rand_tensor = torch.randperm(len_subset, generator=self.generator) + low
if len_subset >= self.n_samples_per_subset:
rand_tensor = rand_tensor[:self.n_samples_per_subset]
else: # padding with replacement
rand_tensor_replacement = torch.randint(low, high, (self.n_samples_per_subset - len_subset, ),
generator=self.generator, dtype=torch.int64)
rand_tensor = torch.cat([rand_tensor, rand_tensor_replacement])
indices.append(rand_tensor)
indices = torch.cat(indices)
if self.shuffle: # shuffle the sampled dataset (from multiple subsets)
rand_tensor = torch.randperm(len(indices), generator=self.generator)
indices = indices[rand_tensor]
# repeat the sampled indices (can be used for RepeatAugmentation or pure RepeatSampling)
if self.repeat > 1:
repeat_indices = [indices.clone() for _ in range(self.repeat - 1)]
if self.shuffle:
_choice = lambda x: x[torch.randperm(len(x), generator=self.generator)]
repeat_indices = map(_choice, repeat_indices)
indices = torch.cat([indices, *repeat_indices], 0)
assert indices.shape[0] == self.n_samples
return iter(indices.tolist())
| 4,293 | 54.051282 | 164 | py |
3DG-STFM | 3DG-STFM-master/src/datasets/megadepth.py | import os.path as osp
import numpy as np
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset
from loguru import logger
import cv2
from src.utils.dataset import read_megadepth_gray, read_megadepth_depth, read_megadepth_rgb
class MegaDepth_RGB_Dataset(Dataset):
def __init__(self,
root_dir,
npz_path,
mode='train',
min_overlap_score=0.4,
img_resize=None,
df=None,
img_padding=False,
depth_padding=False,
augment_fn=None,
**kwargs):
"""
Manage one scene(npz_path) of MegaDepth dataset.
Args:
root_dir (str): megadepth root directory that has `phoenix`.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
mode (str): options are ['train', 'val', 'test']
min_overlap_score (float): how much a pair should have in common. In range of [0, 1]. Set to 0 when testing.
img_resize (int, optional): the longer edge of resized images. None for no resize. 640 is recommended.
This is useful during training with batches and testing with memory intensive algorithms.
df (int, optional): image size division factor. NOTE: this will change the final image size after img_resize.
img_padding (bool): If set to 'True', zero-pad the image to squared size. This is useful during training.
depth_padding (bool): If set to 'True', zero-pad depthmap to (2000, 2000). This is useful during training.
augment_fn (callable, optional): augments images with pre-defined visual effects.
"""
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.scene_id = npz_path.split('.')[0]
# prepare scene_info and pair_info
if mode == 'test' and min_overlap_score != 0:
logger.warning("You are using `min_overlap_score`!=0 in test mode. Set to 0.")
min_overlap_score = 0
self.scene_info = np.load(npz_path, allow_pickle=True)
self.pair_infos = self.scene_info['pair_infos'].copy()
del self.scene_info['pair_infos']
self.pair_infos = [pair_info for pair_info in self.pair_infos if pair_info[1] > min_overlap_score]
# parameters for image resizing, padding and depthmap padding
if mode == 'train':
assert img_resize is not None and img_padding and depth_padding
self.img_resize = img_resize
self.df = df
self.img_padding = img_padding
self.depth_max_size = 2000 if depth_padding else None # the upperbound of depthmaps size in megadepth.
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
self.coarse_scale = getattr(kwargs, 'coarse_scale', 0.125)
def __len__(self):
return len(self.pair_infos)
def __getitem__(self, idx):
(idx0, idx1), overlap_score, central_matches = self.pair_infos[idx]
# read grayscale image and mask. (1, h, w) and (h, w)
img_name0 = osp.join(self.root_dir, self.scene_info['image_paths'][idx0])
img_name1 = osp.join(self.root_dir, self.scene_info['image_paths'][idx1])
# TODO: Support augmentation & handle seeds for each worker correctly.
image0, mask0, scale0 = read_megadepth_rgb(
img_name0, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1, mask1, scale1 = read_megadepth_rgb(
img_name1, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
# read depth. shape: (h, w)
if self.mode in ['train', 'val']:
depth0 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx0]), pad_to=self.depth_max_size)
depth1 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]), pad_to=self.depth_max_size)
else:
depth0 = depth1 = torch.tensor([])
# read intrinsics of original size
K_0 = torch.tensor(self.scene_info['intrinsics'][idx0].copy(), dtype=torch.float).reshape(3, 3)
K_1 = torch.tensor(self.scene_info['intrinsics'][idx1].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T0 = self.scene_info['poses'][idx0]
T1 = self.scene_info['poses'][idx1]
T_0to1 = torch.tensor(np.matmul(T1, np.linalg.inv(T0)), dtype=torch.float)[:4, :4] # (4, 4)
T_1to0 = T_0to1.inverse()
data = {
'image0': image0, # (3, h, w)
'depth0': depth0, # (h, w)
'image1': image1,
'depth1': depth1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'scale0': scale0, # [scale_w, scale_h]
'scale1': scale1,
'dataset_name': 'MegaDepth',
'scene_id': self.scene_id,
'pair_id': idx,
'pair_names': (self.scene_info['image_paths'][idx0], self.scene_info['image_paths'][idx1]),
}
# for LoFTR training
if mask0 is not None: # img_padding is True
if self.coarse_scale:
[ts_mask_0, ts_mask_1] = F.interpolate(torch.stack([mask0[0], mask1[0]], dim=0)[None].float(),
scale_factor=self.coarse_scale,
mode='nearest',
recompute_scale_factor=False)[0].bool()
data.update({'mask0': ts_mask_0, 'mask1': ts_mask_1})
return data
class MegaDepth_RGBD_Dataset(Dataset):
def __init__(self,
root_dir,
npz_path,
mode='train',
min_overlap_score=0.4,
img_resize=None,
df=None,
img_padding=False,
depth_padding=False,
augment_fn=None,
**kwargs):
"""
Manage one scene(npz_path) of MegaDepth dataset.
Args:
root_dir (str): megadepth root directory that has `phoenix`.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
mode (str): options are ['train', 'val', 'test']
min_overlap_score (float): how much a pair should have in common. In range of [0, 1]. Set to 0 when testing.
img_resize (int, optional): the longer edge of resized images. None for no resize. 640 is recommended.
This is useful during training with batches and testing with memory intensive algorithms.
df (int, optional): image size division factor. NOTE: this will change the final image size after img_resize.
img_padding (bool): If set to 'True', zero-pad the image to squared size. This is useful during training.
depth_padding (bool): If set to 'True', zero-pad depthmap to (2000, 2000). This is useful during training.
augment_fn (callable, optional): augments images with pre-defined visual effects.
"""
super().__init__()
self.root_dir = root_dir
self.mode = mode
self.scene_id = npz_path.split('.')[0]
# prepare scene_info and pair_info
if mode == 'test' and min_overlap_score != 0:
logger.warning("You are using `min_overlap_score`!=0 in test mode. Set to 0.")
min_overlap_score = 0
self.scene_info = np.load(npz_path, allow_pickle=True)
self.pair_infos = self.scene_info['pair_infos'].copy()
del self.scene_info['pair_infos']
self.pair_infos = [pair_info for pair_info in self.pair_infos if pair_info[1] > min_overlap_score]
# parameters for image resizing, padding and depthmap padding
if mode == 'train':
assert img_resize is not None and img_padding and depth_padding
self.img_resize = img_resize
self.df = df
self.img_padding = img_padding
self.depth_max_size = 2000 if depth_padding else None # the upperbound of depthmaps size in megadepth.
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
self.coarse_scale = getattr(kwargs, 'coarse_scale', 0.125)
def __len__(self):
return len(self.pair_infos)
def __getitem__(self, idx):
(idx0, idx1), overlap_score, central_matches = self.pair_infos[idx]
# read grayscale image and mask. (1, h, w) and (h, w)
img_name0 = osp.join(self.root_dir, self.scene_info['image_paths'][idx0])
img_name1 = osp.join(self.root_dir, self.scene_info['image_paths'][idx1])
# TODO: Support augmentation & handle seeds for each worker correctly.
image0, mask0, scale0 = read_megadepth_rgb(
img_name0, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1, mask1, scale1 = read_megadepth_rgb(
img_name1, self.img_resize, self.df, self.img_padding, None)
# np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
# read depth. shape: (h, w)
if self.mode in ['train', 'val','test']:
depth0 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx0]), pad_to=self.depth_max_size)
depth1 = read_megadepth_depth(
osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]), pad_to=self.depth_max_size)
else:
depth0 = depth1 = torch.tensor([])
# read intrinsics of original size
K_0 = torch.tensor(self.scene_info['intrinsics'][idx0].copy(), dtype=torch.float).reshape(3, 3)
K_1 = torch.tensor(self.scene_info['intrinsics'][idx1].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T0 = self.scene_info['poses'][idx0]
T1 = self.scene_info['poses'][idx1]
T_0to1 = torch.tensor(np.matmul(T1, np.linalg.inv(T0)), dtype=torch.float)[:4, :4] # (4, 4)
T_1to0 = T_0to1.inverse()
resize_x,resize_y= image0.shape[-2:]
#print(osp.join(self.root_dir, self.scene_info['depth_paths'][idx1]))
resized_dpt0 = cv2.resize(np.float32(depth0), (resize_x, resize_y), interpolation=cv2.INTER_NEAREST)
resized_dpt1 = cv2.resize(np.float32(depth1), (resize_x, resize_y), interpolation=cv2.INTER_NEAREST)
resized_dpt0 =np.clip(resized_dpt0, 0, 3e8)
resized_dpt1 = np.clip(resized_dpt1, 0, 3e8)
max_ele = max(resized_dpt0.max(),resized_dpt1.max())
min_ele = min(resized_dpt0.min(),resized_dpt1.min())
resized_dpt0 = (resized_dpt0-min_ele)/(max_ele-min_ele)
resized_dpt1 = (resized_dpt1-min_ele)/(max_ele-min_ele)
#resized_dpt0 = np.clip(resized_dpt0, 0.6, 350)
#resized_dpt1 = np.clip(resized_dpt1, 0.6, 350)
#resized_dpt0 = np.log(resized_dpt0+1)
#resized_dpt1 = np.log(resized_dpt1+1)
resized_dpt0 = torch.from_numpy(resized_dpt0).float()
resized_dpt1 = torch.from_numpy(resized_dpt1).float()
image0 = torch.cat((image0, resized_dpt0[None, ...]/1.), dim = 0)
image1 = torch.cat((image1, resized_dpt1[None, ...]/1.), dim = 0)
data = {
'image0': image0, # (3, h, w)
'depth0': depth0, # (h, w)
'image1': image1,
'depth1': depth1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'scale0': scale0, # [scale_w, scale_h]
'scale1': scale1,
'dataset_name': 'MegaDepth',
'scene_id': self.scene_id,
'pair_id': idx,
'pair_names': (self.scene_info['image_paths'][idx0], self.scene_info['image_paths'][idx1]),
}
# for LoFTR training
if mask0 is not None: # img_padding is True
if self.coarse_scale:
[ts_mask_0, ts_mask_1] = F.interpolate(torch.stack([mask0[0], mask1[0]], dim=0)[None].float(),
scale_factor=self.coarse_scale,
mode='nearest',
recompute_scale_factor=False)[0].bool()
data.update({'mask0': ts_mask_0, 'mask1': ts_mask_1})
return data | 12,808 | 46.6171 | 129 | py |
3DG-STFM | 3DG-STFM-master/src/datasets/scannet.py | from os import path as osp
from typing import Dict
from unicodedata import name
import numpy as np
import torch
import torch.utils as utils
from numpy.linalg import inv
from src.utils.dataset import (
read_scannet_rgb,
read_scannet_gray,
read_scannet_depth,
read_scannet_pose,
read_scannet_intrinsic
)
class ScanNet_RGB_Dataset(utils.data.Dataset):
def __init__(self,
root_dir,
npz_path,
intrinsic_path,
mode='train',
min_overlap_score=0.4,
augment_fn=None,
pose_dir=None,
**kwargs):
"""Manage one scene of ScanNet Dataset.
Args:
root_dir (str): ScanNet root directory that contains scene folders.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
intrinsic_path (str): path to depth-camera intrinsic file.
mode (str): options are ['train', 'val', 'test'].
augment_fn (callable, optional): augments images with pre-defined visual effects.
pose_dir (str): ScanNet root directory that contains all poses.
(we use a separate (optional) pose_dir since we store images and poses separately.)
"""
super().__init__()
self.root_dir = root_dir
self.pose_dir = pose_dir if pose_dir is not None else root_dir
self.mode = mode
# prepare data_names, intrinsics and extrinsics(T)
with np.load(npz_path) as data:
self.data_names = data['name']
if 'score' in data.keys() and mode not in ['val' or 'test']:
kept_mask = data['score'] > min_overlap_score
self.data_names = self.data_names[kept_mask]
self.intrinsics = dict(np.load(intrinsic_path))
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
def __len__(self):
return len(self.data_names)
def _read_abs_pose(self, scene_name, name):
pth = osp.join(self.pose_dir,
scene_name,
'pose', f'{name}.txt')
return read_scannet_pose(pth)
def _compute_rel_pose(self, scene_name, name0, name1):
pose0 = self._read_abs_pose(scene_name, name0)
pose1 = self._read_abs_pose(scene_name, name1)
return np.matmul(pose1, inv(pose0)) # (4, 4)
def __getitem__(self, idx):
data_name = self.data_names[idx]
scene_name, scene_sub_name, stem_name_0, stem_name_1 = data_name
scene_name = f'scene{scene_name:04d}_{scene_sub_name:02d}'
# read the grayscale image which will be resized to (1, 480, 640)
img_name0 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_0}.jpg')
img_name1 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_1}.jpg')
# img_name0 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png')#depth image as color for inference--Runyu
# img_name1 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png')#depth image as color for inference--Runyu
# TODO: Support augmentation & handle seeds for each worker correctly.
#print(img_name0,img_name1)
image0 = read_scannet_rgb(img_name0, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1 = read_scannet_rgb(img_name1, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
# read the depthmap which is stored as (480, 640)
if self.mode in ['train', 'val', 'test']: # original not include 'test' mode
depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))
depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))
else:
depth0 = depth1 = torch.tensor([])
image0 = image0.permute(0, 3, 1, 2)
image1 = image1.permute(0, 3, 1, 2)
# depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))
# depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))
# image0 = depth0/10.
# image0 = image0[None]
# image1 = depth1/10.
# image1 = image1[None]
# read the intrinsic of depthmap
K_0 = K_1 = torch.tensor(self.intrinsics[scene_name].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T_0to1 = torch.tensor(self._compute_rel_pose(scene_name, stem_name_0, stem_name_1),
dtype=torch.float32)
T_1to0 = T_0to1.inverse()
#image0 = torch.cat((image0, depth0[None, ...]/10.), dim = 0)
#image1 = torch.cat((image1, depth1[None, ...]/10.), dim = 0)
data = {
'image0': image0[0], # (2, h, w)
'depth0': depth0, # (h, w)
'image1': image1[0],
'depth1': depth1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'dataset_name': 'ScanNet',
'scene_id': scene_name,
'pair_id': idx,
'pair_names': (osp.join(scene_name, 'color', f'{stem_name_0}.jpg'),
osp.join(scene_name, 'color', f'{stem_name_1}.jpg'))
}
return data
class ScanNet_RGBD_Dataset(utils.data.Dataset):
def __init__(self,
root_dir,
npz_path,
intrinsic_path,
mode='train',
min_overlap_score=0.4,
augment_fn=None,
pose_dir=None,
**kwargs):
"""Manage one scene of ScanNet Dataset.
Args:
root_dir (str): ScanNet root directory that contains scene folders.
npz_path (str): {scene_id}.npz path. This contains image pair information of a scene.
intrinsic_path (str): path to depth-camera intrinsic file.
mode (str): options are ['train', 'val', 'test'].
augment_fn (callable, optional): augments images with pre-defined visual effects.
pose_dir (str): ScanNet root directory that contains all poses.
(we use a separate (optional) pose_dir since we store images and poses separately.)
"""
super().__init__()
self.root_dir = root_dir
self.pose_dir = pose_dir if pose_dir is not None else root_dir
self.mode = mode
# prepare data_names, intrinsics and extrinsics(T)
with np.load(npz_path) as data:
self.data_names = data['name']
if 'score' in data.keys() and mode not in ['val' or 'test']:
kept_mask = data['score'] > min_overlap_score
self.data_names = self.data_names[kept_mask]
self.intrinsics = dict(np.load(intrinsic_path))
# for training LoFTR
self.augment_fn = augment_fn if mode == 'train' else None
def __len__(self):
return len(self.data_names)
def _read_abs_pose(self, scene_name, name):
pth = osp.join(self.pose_dir,
scene_name,
'pose', f'{name}.txt')
return read_scannet_pose(pth)
def _compute_rel_pose(self, scene_name, name0, name1):
pose0 = self._read_abs_pose(scene_name, name0)
pose1 = self._read_abs_pose(scene_name, name1)
return np.matmul(pose1, inv(pose0)) # (4, 4)
def __getitem__(self, idx):
data_name = self.data_names[idx]
scene_name, scene_sub_name, stem_name_0, stem_name_1 = data_name
scene_name = f'scene{scene_name:04d}_{scene_sub_name:02d}'
# read the grayscale image which will be resized to (1, 480, 640)
img_name0 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_0}.jpg')
img_name1 = osp.join(self.root_dir, scene_name, 'color', f'{stem_name_1}.jpg')
# img_name0 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png')#depth image as color for inference--Runyu
# img_name1 = osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png')#depth image as color for inference--Runyu
# TODO: Support augmentation & handle seeds for each worker correctly.
image0 = read_scannet_rgb(img_name0, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
image1 = read_scannet_rgb(img_name1, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
gray0 = read_scannet_gray(img_name0, resize=(640, 480), augment_fn=None)
# augment_fn=np.random.choice([self.augment_fn, None], p=[0.5, 0.5]))
gray1 = read_scannet_gray(img_name1, resize=(640, 480), augment_fn=None)
# read the depthmap which is stored as (480, 640)
if self.mode in ['train', 'val', 'test']: # original not include 'test' mode
depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))##changed
depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))##changed
else:
depth0 = depth1 = torch.tensor([])
image0 = image0.permute(0, 3, 1, 2)
image0 = image0[0]
image1 = image1.permute(0, 3, 1, 2)
image1 = image1[0]
# depth0 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_0}.png'))
# depth1 = read_scannet_depth(osp.join(self.root_dir, scene_name, 'depth', f'{stem_name_1}.png'))
# image0 = depth0/10.
# image0 = image0[None]
# image1 = depth1/10.
# image1 = image1[None]
# read the intrinsic of depthmap
K_0 = K_1 = torch.tensor(self.intrinsics[scene_name].copy(), dtype=torch.float).reshape(3, 3)
# read and compute relative poses
T_0to1 = torch.tensor(self._compute_rel_pose(scene_name, stem_name_0, stem_name_1),
dtype=torch.float32)
T_1to0 = T_0to1.inverse()
#depth0 = depth0*10000./255.##changed
#depth1 = depth1*10000./255.##changed
image0 = torch.cat((image0, depth0[None, ...]/10.), dim = 0)
image1 = torch.cat((image1, depth1[None, ...]/10.), dim = 0)
data = {
'image0': image0, # (4, h, w)
'depth0': depth0, # (h, w)
'gray0':gray0,
'image1': image1,
'depth1': depth1,
'gray1': gray1,
'T_0to1': T_0to1, # (4, 4)
'T_1to0': T_1to0,
'K0': K_0, # (3, 3)
'K1': K_1,
'dataset_name': 'ScanNet',
'scene_id': scene_name,
'pair_id': idx,
'pair_names': (osp.join(scene_name, 'color', f'{stem_name_0}.jpg'),
osp.join(scene_name, 'color', f'{stem_name_1}.jpg'))
}
return data | 11,203 | 43.995984 | 130 | py |
3DG-STFM | 3DG-STFM-master/src/lightning/lightning_loftr.py |
from collections import defaultdict
import pprint
from loguru import logger
from pathlib import Path
import torch
import numpy as np
import pytorch_lightning as pl
from matplotlib import pyplot as plt
from src.loftr import LoFTR_RGB,LoFTR_RGBD,LoFTR_RGBD_teacher,LoFTR_RGB_student
from src.loftr.utils.supervision import compute_supervision_coarse, compute_supervision_fine
from src.losses.loftr_loss import LoFTRLoss,LoFTRLoss_t_s
from src.optimizers import build_optimizer, build_scheduler
from src.utils.metrics import (
compute_symmetrical_epipolar_errors,
compute_pose_errors,
compute_homo_errors,
aggregate_metrics_homo,
aggregate_metrics,
filter_depth_inconsist_point,
filter_unsampled_point
)
from src.utils.plotting import make_matching_figures
from src.utils.comm import gather, all_gather
from src.utils.misc import lower_config, flattenList
from src.utils.profiler import PassThroughProfiler
import torch.nn as nn
class PL_LoFTR_RGB(pl.LightningModule):
def __init__(self, config, pretrained_ckpt=None, profiler=None, dump_dir=None):
"""
TODO:
- use the new version of PL logging API.
"""
super().__init__()
# Misc
self.config = config # full config
_config = lower_config(self.config)
self.loftr_cfg = lower_config(_config['loftr'])
self.profiler = profiler or PassThroughProfiler()
self.n_vals_plot = max(config.TRAINER.N_VAL_PAIRS_TO_PLOT // config.TRAINER.WORLD_SIZE, 1)
# Matcher: LoFTR
self.matcher = LoFTR_RGB(config=_config['loftr'])
self.loss = LoFTRLoss(_config)
# Pretrained weights
if pretrained_ckpt:
#self.matcher.load_state_dict(torch.load(pretrained_ckpt, map_location='cpu')['state_dict'])
sd = torch.load(pretrained_ckpt, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
self.matcher.load_state_dict(new_state_dict, strict=False)
logger.info(f"Load \'{pretrained_ckpt}\' as pretrained checkpoint")
# Testing
self.dump_dir = dump_dir
def configure_optimizers(self):
# FIXME: The scheduler did not work properly when `--resume_from_checkpoint`
optimizer = build_optimizer(self, self.config)
scheduler = build_scheduler(self.config, optimizer)
return [optimizer], [scheduler]
def optimizer_step(
self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# learning rate warm up
warmup_step = self.config.TRAINER.WARMUP_STEP
if self.trainer.global_step < warmup_step:
if self.config.TRAINER.WARMUP_TYPE == 'linear':
base_lr = self.config.TRAINER.WARMUP_RATIO * self.config.TRAINER.TRUE_LR
lr = base_lr + \
(self.trainer.global_step / self.config.TRAINER.WARMUP_STEP) * \
abs(self.config.TRAINER.TRUE_LR - base_lr)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif self.config.TRAINER.WARMUP_TYPE == 'constant':
pass
else:
raise ValueError(f'Unknown lr warm-up strategy: {self.config.TRAINER.WARMUP_TYPE}')
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def _trainval_inference(self, batch):
with self.profiler.profile("Compute coarse supervision"):
compute_supervision_coarse(batch, self.config)
with self.profiler.profile("LoFTR"):
self.matcher(batch)
with self.profiler.profile("Compute fine supervision"):
compute_supervision_fine(batch, self.config)
with self.profiler.profile("Compute losses"):
self.loss(batch)
def _compute_metrics(self, batch):
with self.profiler.profile("Copmute metrics"):
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'][batch['m_bids'] == b].cpu().numpy() for b in range(bs)],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom_sample(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def training_step(self, batch, batch_idx):
self._trainval_inference(batch)
# logging
if self.trainer.global_rank == 0 and self.global_step % self.trainer.log_every_n_steps == 0:
# scalars
for k, v in batch['loss_scalars'].items():
self.logger.experiment.add_scalar(f'train/{k}', v, self.global_step)
# net-params
if self.config.LOFTR.MATCH_COARSE.MATCH_TYPE == 'sinkhorn':
self.logger.experiment.add_scalar(
f'skh_bin_score', self.matcher.coarse_matching.bin_score.clone().detach().cpu().data,
self.global_step)
# figures
if self.config.TRAINER.ENABLE_PLOTTING:
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
figures = make_matching_figures(batch, self.config, self.config.TRAINER.PLOT_MODE)
for k, v in figures.items():
self.logger.experiment.add_figure(f'train_match/{k}', v, self.global_step)
return {'loss': batch['loss']}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
if self.trainer.global_rank == 0:
self.logger.experiment.add_scalar(
'train/avg_loss_on_epoch', avg_loss,
global_step=self.current_epoch)
def validation_step(self, batch, batch_idx):
self._trainval_inference(batch)
ret_dict, _ = self._compute_metrics(batch)
val_plot_interval = max(self.trainer.num_val_batches[0] // self.n_vals_plot, 1)
figures = {self.config.TRAINER.PLOT_MODE: []}
if batch_idx % val_plot_interval == 0:
figures = make_matching_figures(batch, self.config, mode=self.config.TRAINER.PLOT_MODE)
return {
**ret_dict,
'loss_scalars': batch['loss_scalars'],
'figures': figures,
}
def validation_epoch_end(self, outputs):
# handle multiple validation sets
multi_outputs = [outputs] if not isinstance(outputs[0], (list, tuple)) else outputs
multi_val_metrics = defaultdict(list)
for valset_idx, outputs in enumerate(multi_outputs):
# since pl performs sanity_check at the very begining of the training
cur_epoch = self.trainer.current_epoch
if not self.trainer.resume_from_checkpoint and self.trainer.running_sanity_check:
cur_epoch = -1
# 1. loss_scalars: dict of list, on cpu
_loss_scalars = [o['loss_scalars'] for o in outputs]
loss_scalars = {k: flattenList(all_gather([_ls[k] for _ls in _loss_scalars])) for k in _loss_scalars[0]}
# 2. val metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(all_gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# NOTE: all ranks need to `aggregate_merics`, but only log at rank-0
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
for thr in [5, 10, 20]:
multi_val_metrics[f'auc@{thr}'].append(val_metrics_4tb[f'auc@{thr}'])
# 3. figures
_figures = [o['figures'] for o in outputs]
figures = {k: flattenList(gather(flattenList([_me[k] for _me in _figures]))) for k in _figures[0]}
# tensorboard records only on rank 0
if self.trainer.global_rank == 0:
for k, v in loss_scalars.items():
mean_v = torch.stack(v).mean()
self.logger.experiment.add_scalar(f'val_{valset_idx}/avg_{k}', mean_v, global_step=cur_epoch)
for k, v in val_metrics_4tb.items():
self.logger.experiment.add_scalar(f"metrics_{valset_idx}/{k}", v, global_step=cur_epoch)
for k, v in figures.items():
if self.trainer.global_rank == 0:
for plot_idx, fig in enumerate(v):
self.logger.experiment.add_figure(
f'val_match_{valset_idx}/{k}/pair-{plot_idx}', fig, cur_epoch, close=True)
plt.close('all')
for thr in [5, 10, 20]:
# log on all ranks for ModelCheckpoint callback to work properly
self.log(f'auc@{thr}', torch.tensor(np.mean(multi_val_metrics[f'auc@{thr}']))) # ckpt monitors on this
def test_step(self, batch, batch_idx):
with self.profiler.profile("LoFTR"):
self.matcher(batch)
setting = 'Normal'
if setting == 'Normal':
ret_dict, rel_pair_names = self._compute_metrics(batch)
elif setting == 'depth_check':
# print("Using the depth information to remove the matching outliers")
ret_dict, rel_pair_names = self._compute_metrics_custom(batch)
elif setting == 'depth_sample':
ret_dict, rel_pair_names = self._compute_metrics_custom_sample(batch)
with self.profiler.profile("dump_results"):
if self.dump_dir is not None:
# dump results for further analysis
keys_to_save = {'mkpts0_f', 'mkpts1_f', 'mconf', 'epi_errs'}
pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].shape[0]
dumps = []
for b_id in range(bs):
item = {}
mask = batch['m_bids'] == b_id
item['pair_names'] = pair_names[b_id]
item['identifier'] = '#'.join(rel_pair_names[b_id])
for key in keys_to_save:
if setting == 'depth_check':
item[key] = batch[key][:].cpu().numpy()
elif setting == 'Normal':
item[key] = batch[key][mask].cpu().numpy()
elif setting == 'depth_sample':
print('here')
for key in ['R_errs', 't_errs', 'inliers']:
item[key] = batch[key][b_id]
dumps.append(item)
ret_dict['dumps'] = dumps
return ret_dict
def test_epoch_end(self, outputs):
# metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# [{key: [{...}, *#bs]}, *#batch]
if self.dump_dir is not None:
Path(self.dump_dir).mkdir(parents=True, exist_ok=True)
_dumps = flattenList([o['dumps'] for o in outputs]) # [{...}, #bs*#batch]
dumps = flattenList(gather(_dumps)) # [{...}, #proc*#bs*#batch]
logger.info(f'Prediction and evaluation results will be saved to: {self.dump_dir}')
if self.trainer.global_rank == 0:
print(self.profiler.summary())
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
logger.info('\n' + pprint.pformat(val_metrics_4tb))
if self.dump_dir is not None:
np.save(Path(self.dump_dir) / 'LoFTR_pred_eval_our', dumps)
class PL_LoFTR_RGBD(pl.LightningModule):
def __init__(self, config, pretrained_ckpt=None, profiler=None, dump_dir=None):
"""
TODO:
- use the new version of PL logging API.
"""
super().__init__()
# Misc
self.config = config # full config
_config = lower_config(self.config)
self.loftr_cfg = lower_config(_config['loftr'])
self.profiler = profiler or PassThroughProfiler()
self.n_vals_plot = max(config.TRAINER.N_VAL_PAIRS_TO_PLOT // config.TRAINER.WORLD_SIZE, 1)
# Matcher: LoFTR
self.matcher = LoFTR_RGBD(config=_config['loftr'])
self.loss = LoFTRLoss(_config)
# Pretrained weights
if pretrained_ckpt:
#self.matcher.load_state_dict(torch.load(pretrained_ckpt, map_location='cpu')['state_dict'])
sd = torch.load(pretrained_ckpt, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
self.matcher.load_state_dict(new_state_dict, strict=False)
logger.info(f"Load \'{pretrained_ckpt}\' as pretrained checkpoint")
# Testing
self.dump_dir = dump_dir
def configure_optimizers(self):
# FIXME: The scheduler did not work properly when `--resume_from_checkpoint`
optimizer = build_optimizer(self, self.config)
scheduler = build_scheduler(self.config, optimizer)
return [optimizer], [scheduler]
def optimizer_step(
self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# learning rate warm up
warmup_step = self.config.TRAINER.WARMUP_STEP
#print(self.trainer.global_step, self.config.TRAINER.WARMUP_STEP)
if self.trainer.global_step < warmup_step:
if self.config.TRAINER.WARMUP_TYPE == 'linear':
base_lr = self.config.TRAINER.WARMUP_RATIO * self.config.TRAINER.TRUE_LR
lr = base_lr + \
(self.trainer.global_step / self.config.TRAINER.WARMUP_STEP) * \
abs(self.config.TRAINER.TRUE_LR - base_lr)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif self.config.TRAINER.WARMUP_TYPE == 'constant':
pass
else:
raise ValueError(f'Unknown lr warm-up strategy: {self.config.TRAINER.WARMUP_TYPE}')
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def _trainval_inference(self, batch):
with self.profiler.profile("Compute coarse supervision"):
compute_supervision_coarse(batch, self.config)
with self.profiler.profile("LoFTR"):
self.matcher(batch)
with self.profiler.profile("Compute fine supervision"):
compute_supervision_fine(batch, self.config)
with self.profiler.profile("Compute losses"):
self.loss(batch)
def _compute_metrics(self, batch):
with self.profiler.profile("Copmute metrics"):
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'][batch['m_bids'] == b].cpu().numpy() for b in range(bs)],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom_sample(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def training_step(self, batch, batch_idx):
self._trainval_inference(batch)
# logging
if self.trainer.global_rank == 0 and self.global_step % self.trainer.log_every_n_steps == 0:
# scalars
for k, v in batch['loss_scalars'].items():
self.logger.experiment.add_scalar(f'train/{k}', v, self.global_step)
# net-params
if self.config.LOFTR.MATCH_COARSE.MATCH_TYPE == 'sinkhorn':
self.logger.experiment.add_scalar(
f'skh_bin_score', self.matcher.coarse_matching.bin_score.clone().detach().cpu().data,
self.global_step)
# figures
if self.config.TRAINER.ENABLE_PLOTTING:
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
figures = make_matching_figures(batch, self.config, self.config.TRAINER.PLOT_MODE)
for k, v in figures.items():
self.logger.experiment.add_figure(f'train_match/{k}', v, self.global_step)
return {'loss': batch['loss']}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
if self.trainer.global_rank == 0:
self.logger.experiment.add_scalar(
'train/avg_loss_on_epoch', avg_loss,
global_step=self.current_epoch)
def validation_step(self, batch, batch_idx):
self._trainval_inference(batch)
ret_dict, _ = self._compute_metrics(batch)
val_plot_interval = max(self.trainer.num_val_batches[0] // self.n_vals_plot, 1)
figures = {self.config.TRAINER.PLOT_MODE: []}
if batch_idx % val_plot_interval == 0:
figures = make_matching_figures(batch, self.config, mode=self.config.TRAINER.PLOT_MODE)
return {
**ret_dict,
'loss_scalars': batch['loss_scalars'],
'figures': figures,
}
def validation_epoch_end(self, outputs):
# handle multiple validation sets
multi_outputs = [outputs] if not isinstance(outputs[0], (list, tuple)) else outputs
multi_val_metrics = defaultdict(list)
for valset_idx, outputs in enumerate(multi_outputs):
# since pl performs sanity_check at the very begining of the training
cur_epoch = self.trainer.current_epoch
if not self.trainer.resume_from_checkpoint and self.trainer.running_sanity_check:
cur_epoch = -1
# 1. loss_scalars: dict of list, on cpu
_loss_scalars = [o['loss_scalars'] for o in outputs]
loss_scalars = {k: flattenList(all_gather([_ls[k] for _ls in _loss_scalars])) for k in _loss_scalars[0]}
# 2. val metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(all_gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# NOTE: all ranks need to `aggregate_merics`, but only log at rank-0
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
for thr in [5, 10, 20]:
multi_val_metrics[f'auc@{thr}'].append(val_metrics_4tb[f'auc@{thr}'])
# 3. figures
_figures = [o['figures'] for o in outputs]
figures = {k: flattenList(gather(flattenList([_me[k] for _me in _figures]))) for k in _figures[0]}
# tensorboard records only on rank 0
if self.trainer.global_rank == 0:
for k, v in loss_scalars.items():
mean_v = torch.stack(v).mean()
self.logger.experiment.add_scalar(f'val_{valset_idx}/avg_{k}', mean_v, global_step=cur_epoch)
for k, v in val_metrics_4tb.items():
self.logger.experiment.add_scalar(f"metrics_{valset_idx}/{k}", v, global_step=cur_epoch)
for k, v in figures.items():
if self.trainer.global_rank == 0:
for plot_idx, fig in enumerate(v):
self.logger.experiment.add_figure(
f'val_match_{valset_idx}/{k}/pair-{plot_idx}', fig, cur_epoch, close=True)
plt.close('all')
for thr in [5, 10, 20]:
# log on all ranks for ModelCheckpoint callback to work properly
self.log(f'auc@{thr}', torch.tensor(np.mean(multi_val_metrics[f'auc@{thr}']))) # ckpt monitors on this
def test_step(self, batch, batch_idx):
with self.profiler.profile("LoFTR"):
self.matcher(batch)
setting = 'Normal'
if setting == 'Normal':
ret_dict, rel_pair_names = self._compute_metrics(batch)
elif setting == 'depth_check':
# print("Using the depth information to remove the matching outliers")
ret_dict, rel_pair_names = self._compute_metrics_custom(batch)
elif setting == 'depth_sample':
ret_dict, rel_pair_names = self._compute_metrics_custom_sample(batch)
with self.profiler.profile("dump_results"):
if self.dump_dir is not None:
# dump results for further analysis
keys_to_save = {'mkpts0_f', 'mkpts1_f', 'mconf', 'epi_errs'}
pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].shape[0]
dumps = []
for b_id in range(bs):
item = {}
mask = batch['m_bids'] == b_id
item['pair_names'] = pair_names[b_id]
item['identifier'] = '#'.join(rel_pair_names[b_id])
for key in keys_to_save:
if setting == 'depth_check':
item[key] = batch[key][:].cpu().numpy()
elif setting == 'Normal':
item[key] = batch[key][mask].cpu().numpy()
elif setting == 'depth_sample':
print('here')
for key in ['R_errs', 't_errs', 'inliers']:
item[key] = batch[key][b_id]
dumps.append(item)
ret_dict['dumps'] = dumps
return ret_dict
def test_epoch_end(self, outputs):
# metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# [{key: [{...}, *#bs]}, *#batch]
if self.dump_dir is not None:
Path(self.dump_dir).mkdir(parents=True, exist_ok=True)
_dumps = flattenList([o['dumps'] for o in outputs]) # [{...}, #bs*#batch]
dumps = flattenList(gather(_dumps)) # [{...}, #proc*#bs*#batch]
logger.info(f'Prediction and evaluation results will be saved to: {self.dump_dir}')
if self.trainer.global_rank == 0:
print(self.profiler.summary())
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
logger.info('\n' + pprint.pformat(val_metrics_4tb))
if self.dump_dir is not None:
np.save(Path(self.dump_dir) / 'LoFTR_pred_eval', dumps)
class PL_LoFTR_RGB_teacher_student(pl.LightningModule):
def __init__(self, config, pretrained_ckpt=None, profiler=None, dump_dir=None):
"""
TODO:
- use the new version of PL logging API.
"""
super().__init__()
# Misc
self.config = config # full config
_config = lower_config(self.config)
self.loftr_cfg = lower_config(_config['loftr'])
self.profiler = profiler or PassThroughProfiler()
self.n_vals_plot = max(config.TRAINER.N_VAL_PAIRS_TO_PLOT // config.TRAINER.WORLD_SIZE, 1)
# Matcher: LoFTR
self.matcher = LoFTR_RGB_student(config=_config['loftr'])
self.loss = LoFTRLoss_t_s(_config)
#pretrained_rgb = "./logs/tb_logs/4gpu_mini_rgb_rgbd/rgb/checkpoints/epoch=28-auc@5=0.151-auc@10=0.313-auc@20=0.484.ckpt"
#sd = torch.load(pretrained_rgb, map_location='cpu')['state_dict']
#from collections import OrderedDict
#new_state_dict = OrderedDict()
#for k, v in sd.items():
# name = k[8:] # remove `matcher.`
# new_state_dict[name] = v
#self.matcher.load_state_dict(new_state_dict, strict=False)
# Pretrained weights
if pretrained_ckpt:
self.matcher.load_state_dict(torch.load(pretrained_ckpt, map_location='cpu')['state_dict'])
logger.info(f"Load \'{pretrained_ckpt}\' as pretrained checkpoint")
# Testing
self.dump_dir = dump_dir
def configure_optimizers(self):
# FIXME: The scheduler did not work properly when `--resume_from_checkpoint`
optimizer = build_optimizer(self, self.config)
scheduler = build_scheduler(self.config, optimizer)
return [optimizer], [scheduler]
def optimizer_step(
self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
# learning rate warm up
warmup_step = self.config.TRAINER.WARMUP_STEP
#print(self.trainer.global_step,self.config.TRAINER.WARMUP_STEP)
if self.trainer.global_step < warmup_step:
if self.config.TRAINER.WARMUP_TYPE == 'linear':
base_lr = self.config.TRAINER.WARMUP_RATIO * self.config.TRAINER.TRUE_LR
lr = base_lr + \
(self.trainer.global_step / self.config.TRAINER.WARMUP_STEP) * \
abs(self.config.TRAINER.TRUE_LR - base_lr)
for pg in optimizer.param_groups:
pg['lr'] = lr
elif self.config.TRAINER.WARMUP_TYPE == 'constant':
pass
else:
raise ValueError(f'Unknown lr warm-up strategy: {self.config.TRAINER.WARMUP_TYPE}')
# update params
optimizer.step(closure=optimizer_closure)
optimizer.zero_grad()
def _trainval_inference(self, batch):
with self.profiler.profile("Compute coarse supervision"):
compute_supervision_coarse(batch, self.config)
with self.profiler.profile("LoFTR"):
self.matcher(batch)
with self.profiler.profile("Compute fine supervision"):
compute_supervision_fine(batch, self.config)
with self.profiler.profile("Compute losses"):
self.loss(batch)
def _compute_metrics(self, batch):
with self.profiler.profile("Copmute metrics"):
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'][batch['m_bids'] == b].cpu().numpy() for b in range(bs)],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def _compute_metrics_custom_sample(self, batch):
with self.profiler.profile("Copmute metrics"):
filter_depth_inconsist_point(batch, self.config)
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
compute_pose_errors(batch, self.config) # compute R_errs, t_errs, pose_errs for each pair
rel_pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].size(0)
metrics = {
# to filter duplicate pairs caused by DistributedSampler
'identifiers': ['#'.join(rel_pair_names[b]) for b in range(bs)],
'epi_errs': [batch['epi_errs'].cpu().numpy()],
'R_errs': batch['R_errs'],
't_errs': batch['t_errs'],
'inliers': batch['inliers']}
ret_dict = {'metrics': metrics}
return ret_dict, rel_pair_names
def training_step(self, batch, batch_idx):
self._trainval_inference(batch)
# logging
if self.trainer.global_rank == 0 and self.global_step % self.trainer.log_every_n_steps == 0:
# scalars
for k, v in batch['loss_scalars'].items():
self.logger.experiment.add_scalar(f'train/{k}', v, self.global_step)
# net-params
if self.config.LOFTR.MATCH_COARSE.MATCH_TYPE == 'sinkhorn':
self.logger.experiment.add_scalar(
f'skh_bin_score', self.matcher.coarse_matching.bin_score.clone().detach().cpu().data,
self.global_step)
# figures
if self.config.TRAINER.ENABLE_PLOTTING:
compute_symmetrical_epipolar_errors(batch) # compute epi_errs for each match
figures = make_matching_figures(batch, self.config, self.config.TRAINER.PLOT_MODE)
for k, v in figures.items():
self.logger.experiment.add_figure(f'train_match/{k}', v, self.global_step)
return {'loss': batch['loss']}
def training_epoch_end(self, outputs):
avg_loss = torch.stack([x['loss'] for x in outputs]).mean()
if self.trainer.global_rank == 0:
self.logger.experiment.add_scalar(
'train/avg_loss_on_epoch', avg_loss,
global_step=self.current_epoch)
def validation_step(self, batch, batch_idx):
self._trainval_inference(batch)
ret_dict, _ = self._compute_metrics(batch)
val_plot_interval = max(self.trainer.num_val_batches[0] // self.n_vals_plot, 1)
figures = {self.config.TRAINER.PLOT_MODE: []}
if batch_idx % val_plot_interval == 0:
figures = make_matching_figures(batch, self.config, mode=self.config.TRAINER.PLOT_MODE)
return {
**ret_dict,
'loss_scalars': batch['loss_scalars'],
'figures': figures,
}
def validation_epoch_end(self, outputs):
# handle multiple validation sets
multi_outputs = [outputs] if not isinstance(outputs[0], (list, tuple)) else outputs
multi_val_metrics = defaultdict(list)
for valset_idx, outputs in enumerate(multi_outputs):
# since pl performs sanity_check at the very begining of the training
cur_epoch = self.trainer.current_epoch
if not self.trainer.resume_from_checkpoint and self.trainer.running_sanity_check:
cur_epoch = -1
# 1. loss_scalars: dict of list, on cpu
_loss_scalars = [o['loss_scalars'] for o in outputs]
loss_scalars = {k: flattenList(all_gather([_ls[k] for _ls in _loss_scalars])) for k in _loss_scalars[0]}
# 2. val metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(all_gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# NOTE: all ranks need to `aggregate_merics`, but only log at rank-0
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
for thr in [5, 10, 20]:
multi_val_metrics[f'auc@{thr}'].append(val_metrics_4tb[f'auc@{thr}'])
# 3. figures
_figures = [o['figures'] for o in outputs]
figures = {k: flattenList(gather(flattenList([_me[k] for _me in _figures]))) for k in _figures[0]}
# tensorboard records only on rank 0
if self.trainer.global_rank == 0:
for k, v in loss_scalars.items():
mean_v = torch.stack(v).mean()
self.logger.experiment.add_scalar(f'val_{valset_idx}/avg_{k}', mean_v, global_step=cur_epoch)
for k, v in val_metrics_4tb.items():
self.logger.experiment.add_scalar(f"metrics_{valset_idx}/{k}", v, global_step=cur_epoch)
for k, v in figures.items():
if self.trainer.global_rank == 0:
for plot_idx, fig in enumerate(v):
self.logger.experiment.add_figure(
f'val_match_{valset_idx}/{k}/pair-{plot_idx}', fig, cur_epoch, close=True)
plt.close('all')
for thr in [5, 10, 20]:
# log on all ranks for ModelCheckpoint callback to work properly
self.log(f'auc@{thr}', torch.tensor(np.mean(multi_val_metrics[f'auc@{thr}']))) # ckpt monitors on this
def test_step(self, batch, batch_idx):
with self.profiler.profile("LoFTR"):
self.matcher(batch)
setting = 'Normal'
if setting == 'Normal':
ret_dict, rel_pair_names = self._compute_metrics(batch)
elif setting == 'depth_check':
# print("Using the depth information to remove the matching outliers")
ret_dict, rel_pair_names = self._compute_metrics_custom(batch)
elif setting == 'depth_sample':
ret_dict, rel_pair_names = self._compute_metrics_custom_sample(batch)
with self.profiler.profile("dump_results"):
if self.dump_dir is not None:
# dump results for further analysis
keys_to_save = {'mkpts0_f', 'mkpts1_f', 'mconf', 'epi_errs'}
pair_names = list(zip(*batch['pair_names']))
bs = batch['image0'].shape[0]
dumps = []
for b_id in range(bs):
item = {}
mask = batch['m_bids'] == b_id
item['pair_names'] = pair_names[b_id]
item['identifier'] = '#'.join(rel_pair_names[b_id])
for key in keys_to_save:
if setting == 'depth_check':
item[key] = batch[key][:].cpu().numpy()
elif setting == 'Normal':
item[key] = batch[key][mask].cpu().numpy()
elif setting == 'depth_sample':
print('here')
for key in ['R_errs', 't_errs', 'inliers']:
item[key] = batch[key][b_id]
dumps.append(item)
ret_dict['dumps'] = dumps
return ret_dict
def test_epoch_end(self, outputs):
# metrics: dict of list, numpy
_metrics = [o['metrics'] for o in outputs]
metrics = {k: flattenList(gather(flattenList([_me[k] for _me in _metrics]))) for k in _metrics[0]}
# [{key: [{...}, *#bs]}, *#batch]
if self.dump_dir is not None:
Path(self.dump_dir).mkdir(parents=True, exist_ok=True)
_dumps = flattenList([o['dumps'] for o in outputs]) # [{...}, #bs*#batch]
dumps = flattenList(gather(_dumps)) # [{...}, #proc*#bs*#batch]
logger.info(f'Prediction and evaluation results will be saved to: {self.dump_dir}')
if self.trainer.global_rank == 0:
print(self.profiler.summary())
val_metrics_4tb = aggregate_metrics(metrics, self.config.TRAINER.EPI_ERR_THR)
logger.info('\n' + pprint.pformat(val_metrics_4tb))
if self.dump_dir is not None:
np.save(Path(self.dump_dir) / 'LoFTR_pred_eval', dumps)
| 40,636 | 45.021518 | 129 | py |
3DG-STFM | 3DG-STFM-master/src/lightning/data.py | import os
import math
from collections import abc
from loguru import logger
from torch.utils.data.dataset import Dataset
from tqdm import tqdm
from os import path as osp
from pathlib import Path
from joblib import Parallel, delayed
import pytorch_lightning as pl
from torch import distributed as dist
from torch.utils.data import (
Dataset,
DataLoader,
ConcatDataset,
DistributedSampler,
RandomSampler,
dataloader
)
from src.utils.augment import build_augmentor
from src.utils.dataloader import get_local_split
from src.utils.misc import tqdm_joblib
from src.utils import comm
from src.datasets.megadepth import MegaDepth_RGB_Dataset,MegaDepth_RGBD_Dataset
from src.datasets.scannet import ScanNet_RGB_Dataset,ScanNet_RGBD_Dataset
from src.datasets.sampler import RandomConcatSampler
class RGBDataModule(pl.LightningDataModule):
"""
For distributed training, each training process is assgined
only a part of the training scenes to reduce memory overhead.
"""
def __init__(self, args, config):
super().__init__()
# 1. data config
# Train and Val should from the same data source
self.trainval_data_source = config.DATASET.TRAINVAL_DATA_SOURCE
self.test_data_source = config.DATASET.TEST_DATA_SOURCE
# training and validating
self.train_data_root = config.DATASET.TRAIN_DATA_ROOT
self.train_pose_root = config.DATASET.TRAIN_POSE_ROOT # (optional)
self.train_npz_root = config.DATASET.TRAIN_NPZ_ROOT
self.train_list_path = config.DATASET.TRAIN_LIST_PATH
self.train_intrinsic_path = config.DATASET.TRAIN_INTRINSIC_PATH
self.val_data_root = config.DATASET.VAL_DATA_ROOT
self.val_pose_root = config.DATASET.VAL_POSE_ROOT # (optional)
self.val_npz_root = config.DATASET.VAL_NPZ_ROOT
self.val_list_path = config.DATASET.VAL_LIST_PATH
self.val_intrinsic_path = config.DATASET.VAL_INTRINSIC_PATH
# testing
self.test_data_root = config.DATASET.TEST_DATA_ROOT
self.test_pose_root = config.DATASET.TEST_POSE_ROOT # (optional)
self.test_npz_root = config.DATASET.TEST_NPZ_ROOT
self.test_list_path = config.DATASET.TEST_LIST_PATH
self.test_intrinsic_path = config.DATASET.TEST_INTRINSIC_PATH
# 2. dataset config
# general options
self.min_overlap_score_test = config.DATASET.MIN_OVERLAP_SCORE_TEST # 0.4, omit data with overlap_score < min_overlap_score
self.min_overlap_score_train = config.DATASET.MIN_OVERLAP_SCORE_TRAIN
self.augment_fn = build_augmentor(config.DATASET.AUGMENTATION_TYPE) # None, options: [None, 'dark', 'mobile']
# MegaDepth options
self.mgdpt_img_resize = config.DATASET.MGDPT_IMG_RESIZE # 840
self.mgdpt_img_pad = config.DATASET.MGDPT_IMG_PAD # True
self.mgdpt_depth_pad = config.DATASET.MGDPT_DEPTH_PAD # True
self.mgdpt_df = config.DATASET.MGDPT_DF # 8
self.coarse_scale = 1 / config.LOFTR.RESOLUTION[0] # 0.125. for training loftr.
# 3.loader parameters
self.train_loader_params = {
'batch_size': args.batch_size,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.val_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.test_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': True
}
# 4. sampler
self.data_sampler = config.TRAINER.DATA_SAMPLER
self.n_samples_per_subset = config.TRAINER.N_SAMPLES_PER_SUBSET
self.subset_replacement = config.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT
self.shuffle = config.TRAINER.SB_SUBSET_SHUFFLE
self.repeat = config.TRAINER.SB_REPEAT
# (optional) RandomSampler for debugging
# misc configurations
self.parallel_load_data = getattr(args, 'parallel_load_data', False)
self.seed = config.TRAINER.SEED # 66
def setup(self, stage=None):
"""
Setup train / val / test dataset. This method will be called by PL automatically.
Args:
stage (str): 'fit' in training phase, and 'test' in testing phase.
"""
assert stage in ['fit', 'test'], "stage must be either fit or test"
try:
self.world_size = dist.get_world_size()
self.rank = dist.get_rank()
logger.info(f"[rank:{self.rank}] world_size: {self.world_size}")
except AssertionError as ae:
self.world_size = 1
self.rank = 0
logger.warning(str(ae) + " (set wolrd_size=1 and rank=0)")
if stage == 'fit':
self.train_dataset = self._setup_dataset(
self.train_data_root,
self.train_npz_root,
self.train_list_path,
self.train_intrinsic_path,
mode='train',
min_overlap_score=self.min_overlap_score_train,
pose_dir=self.train_pose_root)
# setup multiple (optional) validation subsets
if isinstance(self.val_list_path, (list, tuple)):
self.val_dataset = []
if not isinstance(self.val_npz_root, (list, tuple)):
self.val_npz_root = [self.val_npz_root for _ in range(len(self.val_list_path))]
for npz_list, npz_root in zip(self.val_list_path, self.val_npz_root):
self.val_dataset.append(self._setup_dataset(
self.val_data_root,
npz_root,
npz_list,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root))
else:
self.val_dataset = self._setup_dataset(
self.val_data_root,
self.val_npz_root,
self.val_list_path,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root)
logger.info(f'[rank:{self.rank}] Train & Val Dataset loaded!')
else: # stage == 'test
self.test_dataset = self._setup_dataset(
self.test_data_root,
self.test_npz_root,
self.test_list_path,
self.test_intrinsic_path,
mode='test',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.test_pose_root)
logger.info(f'[rank:{self.rank}]: Test Dataset loaded!')
def _setup_dataset(self,
data_root,
split_npz_root,
scene_list_path,
intri_path,
mode='train',
min_overlap_score=0.,
pose_dir=None):
""" Setup train / val / test set"""
with open(scene_list_path, 'r') as f:
npz_names = [name.split()[0] for name in f.readlines()]
if mode == 'train':
local_npz_names = get_local_split(npz_names, self.world_size, self.rank, self.seed)
else:
local_npz_names = npz_names
logger.info(f'[rank {self.rank}]: {len(local_npz_names)} scene(s) assigned.')
dataset_builder = self._build_concat_dataset_parallel \
if self.parallel_load_data \
else self._build_concat_dataset
return dataset_builder(data_root, local_npz_names, split_npz_root, intri_path,
mode=mode, min_overlap_score=min_overlap_score, pose_dir=pose_dir)
def _build_concat_dataset(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None
):
datasets = []
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
for npz_name in tqdm(npz_names,
desc=f'[rank:{self.rank}] loading {mode} datasets',
disable=int(self.rank) != 0):
# `ScanNetDataset`/`MegaDepthDataset` load all data from npz_path when initialized, which might take time.
npz_path = osp.join(npz_dir, npz_name)
if data_source == 'ScanNet':
datasets.append(
ScanNet_RGB_Dataset(data_root,
npz_path,
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))
elif data_source == 'MegaDepth':
datasets.append(
MegaDepth_RGB_Dataset(data_root,
npz_path,
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))
else:
raise NotImplementedError()
return ConcatDataset(datasets)
def _build_concat_dataset_parallel(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None,
):
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
with tqdm_joblib(tqdm(desc=f'[rank:{self.rank}] loading {mode} datasets',
total=len(npz_names), disable=int(self.rank) != 0)):
if data_source == 'ScanNet':
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
ScanNet_RGB_Dataset,
data_root,
osp.join(npz_dir, x),
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))(name)
for name in npz_names)
elif data_source == 'MegaDepth':
# TODO: _pickle.PicklingError: Could not pickle the task to send it to the workers.
raise NotImplementedError()
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
MegaDepth_RGB_Dataset,
data_root,
osp.join(npz_dir, x),
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))(name)
for name in npz_names)
else:
raise ValueError(f'Unknown dataset: {data_source}')
return ConcatDataset(datasets)
def train_dataloader(self):
""" Build training dataloader for ScanNet / MegaDepth. """
assert self.data_sampler in ['scene_balance']
logger.info(
f'[rank:{self.rank}/{self.world_size}]: Train Sampler and DataLoader re-init (should not re-init between epochs!).')
if self.data_sampler == 'scene_balance':
sampler = RandomConcatSampler(self.train_dataset,
self.n_samples_per_subset,
self.subset_replacement,
self.shuffle, self.repeat, self.seed)
else:
sampler = None
dataloader = DataLoader(self.train_dataset, sampler=sampler, **self.train_loader_params)
return dataloader
def val_dataloader(self):
""" Build validation dataloader for ScanNet / MegaDepth. """
logger.info(f'[rank:{self.rank}/{self.world_size}]: Val Sampler and DataLoader re-init.')
if not isinstance(self.val_dataset, abc.Sequence):
sampler = DistributedSampler(self.val_dataset, shuffle=False)
return DataLoader(self.val_dataset, sampler=sampler, **self.val_loader_params)
else:
dataloaders = []
for dataset in self.val_dataset:
sampler = DistributedSampler(dataset, shuffle=False)
dataloaders.append(DataLoader(dataset, sampler=sampler, **self.val_loader_params))
return dataloaders
def test_dataloader(self, *args, **kwargs):
logger.info(f'[rank:{self.rank}/{self.world_size}]: Test Sampler and DataLoader re-init.')
sampler = DistributedSampler(self.test_dataset, shuffle=False)
return DataLoader(self.test_dataset, sampler=sampler, **self.test_loader_params)
class RGBDDataModule(pl.LightningDataModule):
"""
For distributed training, each training process is assgined
only a part of the training scenes to reduce memory overhead.
"""
def __init__(self, args, config):
super().__init__()
# 1. data config
# Train and Val should from the same data source
self.trainval_data_source = config.DATASET.TRAINVAL_DATA_SOURCE
self.test_data_source = config.DATASET.TEST_DATA_SOURCE
# training and validating
self.train_data_root = config.DATASET.TRAIN_DATA_ROOT
self.train_pose_root = config.DATASET.TRAIN_POSE_ROOT # (optional)
self.train_npz_root = config.DATASET.TRAIN_NPZ_ROOT
self.train_list_path = config.DATASET.TRAIN_LIST_PATH
self.train_intrinsic_path = config.DATASET.TRAIN_INTRINSIC_PATH
self.val_data_root = config.DATASET.VAL_DATA_ROOT
self.val_pose_root = config.DATASET.VAL_POSE_ROOT # (optional)
self.val_npz_root = config.DATASET.VAL_NPZ_ROOT
self.val_list_path = config.DATASET.VAL_LIST_PATH
self.val_intrinsic_path = config.DATASET.VAL_INTRINSIC_PATH
# testing
self.test_data_root = config.DATASET.TEST_DATA_ROOT
self.test_pose_root = config.DATASET.TEST_POSE_ROOT # (optional)
self.test_npz_root = config.DATASET.TEST_NPZ_ROOT
self.test_list_path = config.DATASET.TEST_LIST_PATH
self.test_intrinsic_path = config.DATASET.TEST_INTRINSIC_PATH
# 2. dataset config
# general options
self.min_overlap_score_test = config.DATASET.MIN_OVERLAP_SCORE_TEST # 0.4, omit data with overlap_score < min_overlap_score
self.min_overlap_score_train = config.DATASET.MIN_OVERLAP_SCORE_TRAIN
self.augment_fn = build_augmentor(config.DATASET.AUGMENTATION_TYPE) # None, options: [None, 'dark', 'mobile']
# MegaDepth options
self.mgdpt_img_resize = config.DATASET.MGDPT_IMG_RESIZE # 840
self.mgdpt_img_pad = config.DATASET.MGDPT_IMG_PAD # True
self.mgdpt_depth_pad = config.DATASET.MGDPT_DEPTH_PAD # True
self.mgdpt_df = config.DATASET.MGDPT_DF # 8
self.coarse_scale = 1 / config.LOFTR.RESOLUTION[0] # 0.125. for training loftr.
# 3.loader parameters
self.train_loader_params = {
'batch_size': args.batch_size,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.val_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': getattr(args, 'pin_memory', True)
}
self.test_loader_params = {
'batch_size': 1,
'shuffle': False,
'num_workers': args.num_workers,
'pin_memory': True
}
# 4. sampler
self.data_sampler = config.TRAINER.DATA_SAMPLER
self.n_samples_per_subset = config.TRAINER.N_SAMPLES_PER_SUBSET
self.subset_replacement = config.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT
self.shuffle = config.TRAINER.SB_SUBSET_SHUFFLE
self.repeat = config.TRAINER.SB_REPEAT
# (optional) RandomSampler for debugging
# misc configurations
self.parallel_load_data = getattr(args, 'parallel_load_data', False)
self.seed = config.TRAINER.SEED # 66
def setup(self, stage=None):
"""
Setup train / val / test dataset. This method will be called by PL automatically.
Args:
stage (str): 'fit' in training phase, and 'test' in testing phase.
"""
assert stage in ['fit', 'test'], "stage must be either fit or test"
try:
self.world_size = dist.get_world_size()
self.rank = dist.get_rank()
logger.info(f"[rank:{self.rank}] world_size: {self.world_size}")
except AssertionError as ae:
self.world_size = 1
self.rank = 0
logger.warning(str(ae) + " (set wolrd_size=1 and rank=0)")
if stage == 'fit':
self.train_dataset = self._setup_dataset(
self.train_data_root,
self.train_npz_root,
self.train_list_path,
self.train_intrinsic_path,
mode='train',
min_overlap_score=self.min_overlap_score_train,
pose_dir=self.train_pose_root)
# setup multiple (optional) validation subsets
if isinstance(self.val_list_path, (list, tuple)):
self.val_dataset = []
if not isinstance(self.val_npz_root, (list, tuple)):
self.val_npz_root = [self.val_npz_root for _ in range(len(self.val_list_path))]
for npz_list, npz_root in zip(self.val_list_path, self.val_npz_root):
self.val_dataset.append(self._setup_dataset(
self.val_data_root,
npz_root,
npz_list,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root))
else:
self.val_dataset = self._setup_dataset(
self.val_data_root,
self.val_npz_root,
self.val_list_path,
self.val_intrinsic_path,
mode='val',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.val_pose_root)
logger.info(f'[rank:{self.rank}] Train & Val Dataset loaded!')
else: # stage == 'test
self.test_dataset = self._setup_dataset(
self.test_data_root,
self.test_npz_root,
self.test_list_path,
self.test_intrinsic_path,
mode='test',
min_overlap_score=self.min_overlap_score_test,
pose_dir=self.test_pose_root)
logger.info(f'[rank:{self.rank}]: Test Dataset loaded!')
def _setup_dataset(self,
data_root,
split_npz_root,
scene_list_path,
intri_path,
mode='train',
min_overlap_score=0.,
pose_dir=None):
""" Setup train / val / test set"""
with open(scene_list_path, 'r') as f:
npz_names = [name.split()[0] for name in f.readlines()]
if mode == 'train':
local_npz_names = get_local_split(npz_names, self.world_size, self.rank, self.seed)
else:
local_npz_names = npz_names
logger.info(f'[rank {self.rank}]: {len(local_npz_names)} scene(s) assigned.')
dataset_builder = self._build_concat_dataset_parallel \
if self.parallel_load_data \
else self._build_concat_dataset
return dataset_builder(data_root, local_npz_names, split_npz_root, intri_path,
mode=mode, min_overlap_score=min_overlap_score, pose_dir=pose_dir)
def _build_concat_dataset(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None
):
datasets = []
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
for npz_name in tqdm(npz_names,
desc=f'[rank:{self.rank}] loading {mode} datasets',
disable=int(self.rank) != 0):
# `ScanNetDataset`/`MegaDepthDataset` load all data from npz_path when initialized, which might take time.
npz_path = osp.join(npz_dir, npz_name)
if data_source == 'ScanNet':
datasets.append(
ScanNet_RGBD_Dataset(data_root,
npz_path,
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))
elif data_source == 'MegaDepth':
datasets.append(
MegaDepth_RGBD_Dataset(data_root,
npz_path,
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))
else:
raise NotImplementedError()
return ConcatDataset(datasets)
def _build_concat_dataset_parallel(
self,
data_root,
npz_names,
npz_dir,
intrinsic_path,
mode,
min_overlap_score=0.,
pose_dir=None,
):
augment_fn = self.augment_fn if mode == 'train' else None
data_source = self.trainval_data_source if mode in ['train', 'val'] else self.test_data_source
if str(data_source).lower() == 'megadepth':
npz_names = [f'{n}.npz' for n in npz_names]
with tqdm_joblib(tqdm(desc=f'[rank:{self.rank}] loading {mode} datasets',
total=len(npz_names), disable=int(self.rank) != 0)):
if data_source == 'ScanNet':
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
ScanNet_RGBD_Dataset,
data_root,
osp.join(npz_dir, x),
intrinsic_path,
mode=mode,
min_overlap_score=min_overlap_score,
augment_fn=augment_fn,
pose_dir=pose_dir))(name)
for name in npz_names)
elif data_source == 'MegaDepth':
# TODO: _pickle.PicklingError: Could not pickle the task to send it to the workers.
raise NotImplementedError()
datasets = Parallel(n_jobs=math.floor(len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size()))(
delayed(lambda x: _build_dataset(
MegaDepthDataset,
data_root,
osp.join(npz_dir, x),
mode=mode,
min_overlap_score=min_overlap_score,
img_resize=self.mgdpt_img_resize,
df=self.mgdpt_df,
img_padding=self.mgdpt_img_pad,
depth_padding=self.mgdpt_depth_pad,
augment_fn=augment_fn,
coarse_scale=self.coarse_scale))(name)
for name in npz_names)
else:
raise ValueError(f'Unknown dataset: {data_source}')
return ConcatDataset(datasets)
def train_dataloader(self):
""" Build training dataloader for ScanNet / MegaDepth. """
assert self.data_sampler in ['scene_balance']
logger.info(
f'[rank:{self.rank}/{self.world_size}]: Train Sampler and DataLoader re-init (should not re-init between epochs!).')
if self.data_sampler == 'scene_balance':
sampler = RandomConcatSampler(self.train_dataset,
self.n_samples_per_subset,
self.subset_replacement,
self.shuffle, self.repeat, self.seed)
else:
sampler = None
dataloader = DataLoader(self.train_dataset, sampler=sampler, **self.train_loader_params)
return dataloader
def val_dataloader(self):
""" Build validation dataloader for ScanNet / MegaDepth. """
logger.info(f'[rank:{self.rank}/{self.world_size}]: Val Sampler and DataLoader re-init.')
if not isinstance(self.val_dataset, abc.Sequence):
sampler = DistributedSampler(self.val_dataset, shuffle=False)
return DataLoader(self.val_dataset, sampler=sampler, **self.val_loader_params)
else:
dataloaders = []
for dataset in self.val_dataset:
sampler = DistributedSampler(dataset, shuffle=False)
dataloaders.append(DataLoader(dataset, sampler=sampler, **self.val_loader_params))
return dataloaders
def test_dataloader(self, *args, **kwargs):
logger.info(f'[rank:{self.rank}/{self.world_size}]: Test Sampler and DataLoader re-init.')
sampler = DistributedSampler(self.test_dataset, shuffle=False)
return DataLoader(self.test_dataset, sampler=sampler, **self.test_loader_params)
def _build_dataset(dataset: Dataset, *args, **kwargs):
return dataset(*args, **kwargs)
| 27,968 | 44.626427 | 132 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/__init__.py | from .loftr import LoFTR_RGB,LoFTR_RGBD,LoFTR_RGBD_teacher,LoFTR_RGB_student
from .utils.cvpr_ds_config import default_cfg
| 123 | 40.333333 | 76 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.einops import rearrange, repeat
from .backbone import build_backbone_rgb,build_backbone_rgbd
from .utils.position_encoding import PositionEncodingSine
from .loftr_module import LocalFeatureTransformer, FinePreprocess
from .utils.coarse_matching import CoarseMatching,CoarseMatching_t
from .utils.fine_matching import FineMatching,FineMatching_t
class LoFTR_RGB(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgb(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching()
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
data.update({
'bs': data['image0'].size(0),
'hw0_i': data['image0'].shape[2:], 'hw1_i': data['image1'].shape[2:]
})
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(data['image0']), self.backbone(data['image1'])
data.update({
'hw0_c': feat_c0.shape[2:], 'hw1_c': feat_c1.shape[2:],
'hw0_f': feat_f0.shape[2:], 'hw1_f': feat_f1.shape[2:]
})
# 2. coarse-level loftr module
# add featmap with positional encoding, then flatten it to sequence [N, HW, C]
feat_c0 = rearrange(self.pos_encoding(feat_c0), 'n c h w -> n (h w) c')
feat_c1 = rearrange(self.pos_encoding(feat_c1), 'n c h w -> n (h w) c')
mask_c0 = mask_c1 = None # mask is useful in training
if 'mask0' in data:
mask_c0, mask_c1 = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0, feat_c1 = self.loftr_coarse(feat_c0, feat_c1, mask_c0, mask_c1)
# 3. match coarse-level
self.coarse_matching(feat_c0, feat_c1, data, mask_c0=mask_c0, mask_c1=mask_c1)
# 4. fine-level refinement
feat_f0_unfold, feat_f1_unfold = self.fine_preprocess(feat_f0, feat_f1, feat_c0, feat_c1, data)
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold, feat_f1_unfold = self.loftr_fine(feat_f0_unfold, feat_f1_unfold)
# 5. match fine-level
self.fine_matching(feat_f0_unfold, feat_f1_unfold, data)
class LoFTR_RGBD(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgbd(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching()
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
data.update({
'bs': data['image0'].size(0),
'hw0_i': data['image0'].shape[2:], 'hw1_i': data['image1'].shape[2:]
})
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(data['image0']), self.backbone(data['image1'])
data.update({
'hw0_c': feat_c0.shape[2:], 'hw1_c': feat_c1.shape[2:],
'hw0_f': feat_f0.shape[2:], 'hw1_f': feat_f1.shape[2:]
})
# 2. coarse-level loftr module
# add featmap with positional encoding, then flatten it to sequence [N, HW, C]
feat_c0 = rearrange(self.pos_encoding(feat_c0), 'n c h w -> n (h w) c')
feat_c1 = rearrange(self.pos_encoding(feat_c1), 'n c h w -> n (h w) c')
mask_c0 = mask_c1 = None # mask is useful in training
if 'mask0' in data:
mask_c0, mask_c1 = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0, feat_c1 = self.loftr_coarse(feat_c0, feat_c1, mask_c0, mask_c1)
# 3. match coarse-level
self.coarse_matching(feat_c0, feat_c1, data, mask_c0=mask_c0, mask_c1=mask_c1)
# 4. fine-level refinement
feat_f0_unfold, feat_f1_unfold = self.fine_preprocess(feat_f0, feat_f1, feat_c0, feat_c1, data)
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold, feat_f1_unfold = self.loftr_fine(feat_f0_unfold, feat_f1_unfold)
# 5. match fine-level
self.fine_matching(feat_f0_unfold, feat_f1_unfold, data)
class LoFTR_RGBD_teacher(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgbd(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching_t(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching_t()
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(data['image0']), self.backbone(data['image1'])
data.update({
'hw0_c_t': feat_c0, 'hw1_c_t': feat_c1,
'hw0_f_t': feat_f0, 'hw1_f_t': feat_f1
})
class LoFTR_RGB_student(nn.Module):
def __init__(self, config):
super().__init__()
# Misc
self.config = config
# Modules
self.backbone = build_backbone_rgb(config)
self.pos_encoding = PositionEncodingSine(config['coarse']['d_model'])
self.loftr_coarse = LocalFeatureTransformer(config['coarse'])
self.coarse_matching = CoarseMatching(config['match_coarse'])
self.fine_preprocess = FinePreprocess(config)
self.loftr_fine = LocalFeatureTransformer(config["fine"])
self.fine_matching = FineMatching()
self.teacher = LoFTR_RGBD_teacher(config)
pretrained_t = "./logs/tb_logs/indoor/indoor_rgbd_teacher.ckpt" # hard code , modified in the future, load teacher model
sd = torch.load(pretrained_t, map_location='cpu')['state_dict']
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in sd.items():
name = k[8:] # remove `matcher.`
new_state_dict[name] = v
self.teacher.load_state_dict(new_state_dict,strict=True)
self.teacher.eval()
for param in self.teacher.parameters():
param.requires_grad = False
def fine_preprocess_teacher_branch(self, feat_f0, feat_f1, feat_c0, feat_c1, data,b_ids,i_ids,j_ids):
W = data['W']
stride = data['hw0_f'][0] // data['hw0_c'][0]
d_model_f = 128
if b_ids.shape[0] == 0:
feat0 = torch.empty(0, W**2, d_model_f, device=feat_f0.device)
feat1 = torch.empty(0, W**2, d_model_f, device=feat_f0.device)
return feat0, feat1
# 1. unfold(crop) all local windows
feat_f0_unfold = F.unfold(feat_f0, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f0_unfold = rearrange(feat_f0_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
feat_f1_unfold = F.unfold(feat_f1, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f1_unfold = rearrange(feat_f1_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
# 2. select only the predicted matches
feat_f0_unfold = feat_f0_unfold[b_ids, i_ids] # [n, ww, cf]
feat_f1_unfold = feat_f1_unfold[b_ids, j_ids]
# option: use coarse-level loftr feature as context: concat and linear
if True:#self.cat_c_feat:
feat_c_win = self.teacher.fine_preprocess.down_proj(torch.cat([feat_c0[b_ids, i_ids],
feat_c1[b_ids, j_ids]], 0)) # [2n, c]
feat_cf_win = self.teacher.fine_preprocess.merge_feat(torch.cat([
torch.cat([feat_f0_unfold, feat_f1_unfold], 0), # [2n, ww, cf]
repeat(feat_c_win, 'n c -> n ww c', ww=W**2), # [2n, ww, cf]
], -1))
feat_f0_unfold, feat_f1_unfold = torch.chunk(feat_cf_win, 2, dim=0)
return feat_f0_unfold, feat_f1_unfold
def forward(self, data):
"""
Update:
data (dict): {
'image0': (torch.Tensor): (N, 1, H, W)
'image1': (torch.Tensor): (N, 1, H, W)
'mask0'(optional) : (torch.Tensor): (N, H, W) '0' indicates a padded position
'mask1'(optional) : (torch.Tensor): (N, H, W)
}
"""
# 1. Local Feature CNN
data.update({
'bs': data['image0'].size(0),
'hw0_i': data['image0'].shape[2:], 'hw1_i': data['image1'].shape[2:]
})
image0 = data['image0'][:, :3, :, :].clone()
image1 = data['image1'][:, :3, :, :].clone()
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c, feats_f = self.backbone(torch.cat([image0, image1], dim=0))
(feat_c0, feat_c1), (feat_f0, feat_f1) = feats_c.split(data['bs']), feats_f.split(data['bs'])
else: # handle different input shapes
(feat_c0, feat_f0), (feat_c1, feat_f1) = self.backbone(image0), self.backbone(image1)
data.update({
'hw0_c': feat_c0.shape[2:], 'hw1_c': feat_c1.shape[2:],
'hw0_f': feat_f0.shape[2:], 'hw1_f': feat_f1.shape[2:]
})
# 2. coarse-level loftr module
# add featmap with positional encoding, then flatten it to sequence [N, HW, C]
feat_c0 = rearrange(self.pos_encoding(feat_c0), 'n c h w -> n (h w) c')
feat_c1 = rearrange(self.pos_encoding(feat_c1), 'n c h w -> n (h w) c')
mask_c0 = mask_c1 = None # mask is useful in training
if 'mask0' in data:
mask_c0, mask_c1 = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0, feat_c1 = self.loftr_coarse(feat_c0, feat_c1, mask_c0, mask_c1)
# 3. match coarse-level
self.coarse_matching(feat_c0, feat_c1, data, mask_c0=mask_c0, mask_c1=mask_c1)
# 4. fine-level refinement
feat_f0_unfold, feat_f1_unfold = self.fine_preprocess(feat_f0, feat_f1, feat_c0, feat_c1, data)
save_bi,save_ii,save_ji = data['b_ids'], data['i_ids'],data['j_ids']
#feat_f0_unfold_t, feat_f1_unfold_t = feat_f0_unfold.clone(), feat_f1_unfold.clone()
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold, feat_f1_unfold = self.loftr_fine(feat_f0_unfold, feat_f1_unfold)
# 5. match fine-level
self.fine_matching(feat_f0_unfold, feat_f1_unfold, data)
## teacher inference
if data['hw0_i'] == data['hw1_i']: # faster & better BN convergence
feats_c_t, feats_f_t = self.teacher.backbone(torch.cat([data['image0'], data['image1']], dim=0))
(feat_c0_t, feat_c1_t), (feat_f0_t, feat_f1_t) = feats_c_t.split(data['bs']), feats_f_t.split(data['bs'])
else: # handle different input shapes
(feat_c0_t, feat_f0_t), (feat_c1_t, feat_f1_t) = self.teacher.backbone(data['image0']), self.teacher.backbone(data['image1'])
feat_c0_t = rearrange(self.teacher.pos_encoding(feat_c0_t), 'n c h w -> n (h w) c')
feat_c1_t = rearrange(self.teacher.pos_encoding(feat_c1_t), 'n c h w -> n (h w) c')
mask_c0_t = mask_c1_t = None # mask is useful in training
#if 'mask0' in data:
# mask_c0_t, mask_c1_t = data['mask0'].flatten(-2), data['mask1'].flatten(-2)
feat_c0_t, feat_c1_t = self.teacher.loftr_coarse(feat_c0_t, feat_c1_t, mask_c0_t, mask_c1_t)
self.teacher.coarse_matching(feat_c0_t, feat_c1_t, data, mask_c0=None, mask_c1=None)
feat_f0_unfold_t, feat_f1_unfold_t = self.fine_preprocess_teacher_branch(feat_f0_t, feat_f1_t, feat_c0_t, feat_c1_t, data,save_bi,save_ii,save_ji)
if feat_f0_unfold.size(0) != 0: # at least one coarse level predicted
feat_f0_unfold_t, feat_f1_unfold_t = self.teacher.loftr_fine(feat_f0_unfold_t, feat_f1_unfold_t)
else:
feat_f0_unfold_t, feat_f1_unfold_t = feat_f0_unfold.clone(), feat_f1_unfold.clone()
#feat_f0_unfold_t, feat_f1_unfold_t = self.teacher.loftr_fine(feat_f0_unfold_t, feat_f1_unfold_t)
self.teacher.fine_matching(feat_f0_unfold_t, feat_f1_unfold_t, data)
| 14,794 | 45.671924 | 154 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/backbone/__init__.py | from .resnet_fpn import ResNetFPN_8_2_RGB,ResNetFPN_8_2_RGBD
def build_backbone_rgb(config):
if config['backbone_type'] == 'ResNetFPN':
if config['resolution'] == (8, 2):
return ResNetFPN_8_2_RGB(config['resnetfpn'])
else:
raise ValueError(f"LOFTR.BACKBONE_TYPE {config['backbone_type']} not supported.")
def build_backbone_rgbd(config):
if config['backbone_type'] == 'ResNetFPN':
if config['resolution'] == (8, 2):
return ResNetFPN_8_2_RGBD(config['resnetfpn'])
else:
raise ValueError(f"LOFTR.BACKBONE_TYPE {config['backbone_type']} not supported.") | 624 | 40.666667 | 89 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/backbone/resnet_fpn.py | import torch.nn as nn
import torch.nn.functional as F
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution without padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, padding=0, bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class BasicBlock(nn.Module):
def __init__(self, in_planes, planes, stride=1):
super().__init__()
self.conv1 = conv3x3(in_planes, planes, stride)
self.conv2 = conv3x3(planes, planes)
self.bn1 = nn.BatchNorm2d(planes)
self.bn2 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
if stride == 1:
self.downsample = None
else:
self.downsample = nn.Sequential(
conv1x1(in_planes, planes, stride=stride),
nn.BatchNorm2d(planes)
)
def forward(self, x):
y = x
y = self.relu(self.bn1(self.conv1(y)))
y = self.bn2(self.conv2(y))
if self.downsample is not None:
x = self.downsample(x)
return self.relu(x+y)
class ResNetFPN_8_2_RGB(nn.Module):
"""
ResNet+FPN, output resolution are 1/8 and 1/2.
Each block has 2 layers.
"""
def __init__(self, config):
super().__init__()
# Config
block = BasicBlock
initial_dim = config['initial_dim']
block_dims = config['block_dims']
# Class Variable
self.in_planes = initial_dim
# Networks
self.conv1 = nn.Conv2d(3, initial_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(initial_dim)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2
self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4
self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8
# 3. FPN upsample
self.layer3_outconv = conv1x1(block_dims[2], block_dims[2])
self.layer2_outconv = conv1x1(block_dims[1], block_dims[2])
self.layer2_outconv2 = nn.Sequential(
conv3x3(block_dims[2], block_dims[2]),
nn.BatchNorm2d(block_dims[2]),
nn.LeakyReLU(),
conv3x3(block_dims[2], block_dims[1]),
)
self.layer1_outconv = conv1x1(block_dims[0], block_dims[1])
self.layer1_outconv2 = nn.Sequential(
conv3x3(block_dims[1], block_dims[1]),
nn.BatchNorm2d(block_dims[1]),
nn.LeakyReLU(),
conv3x3(block_dims[1], block_dims[0]),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, dim, stride=1):
layer1 = block(self.in_planes, dim, stride=stride)
layer2 = block(dim, dim, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# ResNet Backbone
x0 = self.relu(self.bn1(self.conv1(x)))
x1 = self.layer1(x0) # 1/2
x2 = self.layer2(x1) # 1/4
x3 = self.layer3(x2) # 1/8
# FPN
x3_out = self.layer3_outconv(x3)
x3_out_2x = F.interpolate(x3_out, scale_factor=2., mode='bilinear', align_corners=True)
x2_out = self.layer2_outconv(x2)
x2_out = self.layer2_outconv2(x2_out+x3_out_2x)
x2_out_2x = F.interpolate(x2_out, scale_factor=2., mode='bilinear', align_corners=True)
x1_out = self.layer1_outconv(x1)
x1_out = self.layer1_outconv2(x1_out+x2_out_2x)
return [x3_out, x1_out]
class ResNetFPN_8_2_RGBD(nn.Module):
"""
ResNet+FPN, output resolution are 1/8 and 1/2.
Each block has 2 layers.
"""
def __init__(self, config):
super().__init__()
# Config
block = BasicBlock
initial_dim = config['initial_dim']
block_dims = config['block_dims']
# Class Variable
self.in_planes = initial_dim
# Networks
self.conv1 = nn.Conv2d(4, initial_dim, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(initial_dim)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, block_dims[0], stride=1) # 1/2
self.layer2 = self._make_layer(block, block_dims[1], stride=2) # 1/4
self.layer3 = self._make_layer(block, block_dims[2], stride=2) # 1/8
# 3. FPN upsample
self.layer3_outconv = conv1x1(block_dims[2], block_dims[2])
self.layer2_outconv = conv1x1(block_dims[1], block_dims[2])
self.layer2_outconv2 = nn.Sequential(
conv3x3(block_dims[2], block_dims[2]),
nn.BatchNorm2d(block_dims[2]),
nn.LeakyReLU(),
conv3x3(block_dims[2], block_dims[1]),
)
self.layer1_outconv = conv1x1(block_dims[0], block_dims[1])
self.layer1_outconv2 = nn.Sequential(
conv3x3(block_dims[1], block_dims[1]),
nn.BatchNorm2d(block_dims[1]),
nn.LeakyReLU(),
conv3x3(block_dims[1], block_dims[0]),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, dim, stride=1):
layer1 = block(self.in_planes, dim, stride=stride)
layer2 = block(dim, dim, stride=1)
layers = (layer1, layer2)
self.in_planes = dim
return nn.Sequential(*layers)
def forward(self, x):
# ResNet Backbone
x0 = self.relu(self.bn1(self.conv1(x)))
x1 = self.layer1(x0) # 1/2
x2 = self.layer2(x1) # 1/4
x3 = self.layer3(x2) # 1/8
# FPN
x3_out = self.layer3_outconv(x3)
x3_out_2x = F.interpolate(x3_out, scale_factor=2., mode='bilinear', align_corners=True)
x2_out = self.layer2_outconv(x2)
x2_out = self.layer2_outconv2(x2_out+x3_out_2x)
x2_out_2x = F.interpolate(x2_out, scale_factor=2., mode='bilinear', align_corners=True)
x1_out = self.layer1_outconv(x1)
x1_out = self.layer1_outconv2(x1_out+x2_out_2x)
return [x3_out, x1_out]
| 6,772 | 33.380711 | 96 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/linear_attention.py | """
Linear Transformer proposed in "Transformers are RNNs: Fast Autoregressive Transformers with Linear Attention"
Modified from: https://github.com/idiap/fast-transformers/blob/master/fast_transformers/attention/linear_attention.py
"""
import torch
from torch.nn import Module, Dropout
def elu_feature_map(x):
return torch.nn.functional.elu(x) + 1
class LinearAttention(Module):
def __init__(self, eps=1e-6):
super().__init__()
self.feature_map = elu_feature_map
self.eps = eps
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
""" Multi-Head linear attention proposed in "Transformers are RNNs"
Args:
queries: [N, L, H, D]
keys: [N, S, H, D]
values: [N, S, H, D]
q_mask: [N, L]
kv_mask: [N, S]
Returns:
queried_values: (N, L, H, D)
"""
Q = self.feature_map(queries)
K = self.feature_map(keys)
# set padded position to zero
if q_mask is not None:
Q = Q * q_mask[:, :, None, None]
if kv_mask is not None:
K = K * kv_mask[:, :, None, None]
values = values * kv_mask[:, :, None, None]
v_length = values.size(1)
values = values / v_length # prevent fp16 overflow
KV = torch.einsum("nshd,nshv->nhdv", K, values) # (S,D)' @ S,V
Z = 1 / (torch.einsum("nlhd,nhd->nlh", Q, K.sum(dim=1)) + self.eps)
queried_values = torch.einsum("nlhd,nhdv,nlh->nlhv", Q, KV, Z) * v_length
return queried_values.contiguous()
class FullAttention(Module):
def __init__(self, use_dropout=False, attention_dropout=0.1):
super().__init__()
self.use_dropout = use_dropout
self.dropout = Dropout(attention_dropout)
def forward(self, queries, keys, values, q_mask=None, kv_mask=None):
""" Multi-head scaled dot-product attention, a.k.a full attention.
Args:
queries: [N, L, H, D]
keys: [N, S, H, D]
values: [N, S, H, D]
q_mask: [N, L]
kv_mask: [N, S]
Returns:
queried_values: (N, L, H, D)
"""
# Compute the unnormalized attention and apply the masks
QK = torch.einsum("nlhd,nshd->nlsh", queries, keys)
if kv_mask is not None:
QK.masked_fill_(~(q_mask[:, :, None, None] * kv_mask[:, None, :, None]), float('-inf'))
# Compute the attention and the weighted average
softmax_temp = 1. / queries.size(3)**.5 # sqrt(D)
A = torch.softmax(softmax_temp * QK, dim=2)
if self.use_dropout:
A = self.dropout(A)
queried_values = torch.einsum("nlsh,nshd->nlhd", A, values)
return queried_values.contiguous()
| 2,794 | 33.085366 | 117 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/fine_preprocess.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.einops import rearrange, repeat
class FinePreprocess(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.cat_c_feat = config['fine_concat_coarse_feat']
self.W = self.config['fine_window_size']
d_model_c = self.config['coarse']['d_model']
d_model_f = self.config['fine']['d_model']
self.d_model_f = d_model_f
if self.cat_c_feat:
self.down_proj = nn.Linear(d_model_c, d_model_f, bias=True)
self.merge_feat = nn.Linear(2*d_model_f, d_model_f, bias=True)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.kaiming_normal_(p, mode="fan_out", nonlinearity="relu")
def forward(self, feat_f0, feat_f1, feat_c0, feat_c1, data):
W = self.W
stride = data['hw0_f'][0] // data['hw0_c'][0]
data.update({'W': W})
if data['b_ids'].shape[0] == 0:
feat0 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
feat1 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
return feat0, feat1
# 1. unfold(crop) all local windows
feat_f0_unfold = F.unfold(feat_f0, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f0_unfold = rearrange(feat_f0_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
feat_f1_unfold = F.unfold(feat_f1, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f1_unfold = rearrange(feat_f1_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
# 2. select only the predicted matches
feat_f0_unfold = feat_f0_unfold[data['b_ids'], data['i_ids']] # [n, ww, cf]
feat_f1_unfold = feat_f1_unfold[data['b_ids'], data['j_ids']]
# option: use coarse-level loftr feature as context: concat and linear
if self.cat_c_feat:
feat_c_win = self.down_proj(torch.cat([feat_c0[data['b_ids'], data['i_ids']],
feat_c1[data['b_ids'], data['j_ids']]], 0)) # [2n, c]
feat_cf_win = self.merge_feat(torch.cat([
torch.cat([feat_f0_unfold, feat_f1_unfold], 0), # [2n, ww, cf]
repeat(feat_c_win, 'n c -> n ww c', ww=W**2), # [2n, ww, cf]
], -1))
feat_f0_unfold, feat_f1_unfold = torch.chunk(feat_cf_win, 2, dim=0)
return feat_f0_unfold, feat_f1_unfold
class FinePreprocess_t(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.cat_c_feat = config['fine_concat_coarse_feat']
self.W = self.config['fine_window_size']
d_model_c = self.config['coarse']['d_model']
d_model_f = self.config['fine']['d_model']
self.d_model_f = d_model_f
if self.cat_c_feat:
self.down_proj = nn.Linear(d_model_c, d_model_f, bias=True)
self.merge_feat = nn.Linear(2*d_model_f, d_model_f, bias=True)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.kaiming_normal_(p, mode="fan_out", nonlinearity="relu")
def forward(self, feat_f0, feat_f1, feat_c0, feat_c1, data):
W = self.W
stride = data['hw0_f'][0] // data['hw0_c'][0]
data.update({'W': W})
if data['b_ids_t'].shape[0] == 0:
feat0 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
feat1 = torch.empty(0, self.W**2, self.d_model_f, device=feat_f0.device)
return feat0, feat1
# 1. unfold(crop) all local windows
feat_f0_unfold = F.unfold(feat_f0, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f0_unfold = rearrange(feat_f0_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
feat_f1_unfold = F.unfold(feat_f1, kernel_size=(W, W), stride=stride, padding=W//2)
feat_f1_unfold = rearrange(feat_f1_unfold, 'n (c ww) l -> n l ww c', ww=W**2)
# 2. select only the predicted matches
feat_f0_unfold = feat_f0_unfold[data['b_ids_t'], data['i_ids_t']] # [n, ww, cf]
feat_f1_unfold = feat_f1_unfold[data['b_ids_t'], data['j_ids_t']]
# option: use coarse-level loftr feature as context: concat and linear
if self.cat_c_feat:
feat_c_win = self.down_proj(torch.cat([feat_c0[data['b_ids_t'], data['i_ids_t']],
feat_c1[data['b_ids_t'], data['j_ids_t']]], 0)) # [2n, c]
feat_cf_win = self.merge_feat(torch.cat([
torch.cat([feat_f0_unfold, feat_f1_unfold], 0), # [2n, ww, cf]
repeat(feat_c_win, 'n c -> n ww c', ww=W**2), # [2n, ww, cf]
], -1))
feat_f0_unfold, feat_f1_unfold = torch.chunk(feat_cf_win, 2, dim=0)
return feat_f0_unfold, feat_f1_unfold
| 5,006 | 43.705357 | 109 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/transformer.py | import copy
import torch
import torch.nn as nn
from .linear_attention import LinearAttention, FullAttention
class LoFTREncoderLayer(nn.Module):
def __init__(self,
d_model,
nhead,
attention='linear'):
super(LoFTREncoderLayer, self).__init__()
self.dim = d_model // nhead
self.nhead = nhead
# multi-head attention
self.q_proj = nn.Linear(d_model, d_model, bias=False)
self.k_proj = nn.Linear(d_model, d_model, bias=False)
self.v_proj = nn.Linear(d_model, d_model, bias=False)
self.attention = LinearAttention() if attention == 'linear' else FullAttention()
self.merge = nn.Linear(d_model, d_model, bias=False)
# feed-forward network
self.mlp = nn.Sequential(
nn.Linear(d_model*2, d_model*2, bias=False),
nn.ReLU(True),
nn.Linear(d_model*2, d_model, bias=False),
)
# norm and dropout
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
def forward(self, x, source, x_mask=None, source_mask=None):
"""
Args:
x (torch.Tensor): [N, L, C]
source (torch.Tensor): [N, S, C]
x_mask (torch.Tensor): [N, L] (optional)
source_mask (torch.Tensor): [N, S] (optional)
"""
bs = x.size(0)
query, key, value = x, source, source
# multi-head attention
query = self.q_proj(query).view(bs, -1, self.nhead, self.dim) # [N, L, (H, D)]
key = self.k_proj(key).view(bs, -1, self.nhead, self.dim) # [N, S, (H, D)]
value = self.v_proj(value).view(bs, -1, self.nhead, self.dim)
message = self.attention(query, key, value, q_mask=x_mask, kv_mask=source_mask) # [N, L, (H, D)]
message = self.merge(message.view(bs, -1, self.nhead*self.dim)) # [N, L, C]
message = self.norm1(message)
# feed-forward network
message = self.mlp(torch.cat([x, message], dim=2))
message = self.norm2(message)
return x + message
class LocalFeatureTransformer(nn.Module):
"""A Local Feature Transformer (LoFTR) module."""
def __init__(self, config):
super(LocalFeatureTransformer, self).__init__()
self.config = config
self.d_model = config['d_model']
self.nhead = config['nhead']
self.layer_names = config['layer_names']
encoder_layer = LoFTREncoderLayer(config['d_model'], config['nhead'], config['attention'])
self.layers = nn.ModuleList([copy.deepcopy(encoder_layer) for _ in range(len(self.layer_names))])
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feat0, feat1, mask0=None, mask1=None):
"""
Args:
feat0 (torch.Tensor): [N, L, C]
feat1 (torch.Tensor): [N, S, C]
mask0 (torch.Tensor): [N, L] (optional)
mask1 (torch.Tensor): [N, S] (optional)
"""
assert self.d_model == feat0.size(2), "the feature number of src and transformer must be equal"
for layer, name in zip(self.layers, self.layer_names):
if name == 'self':
feat0 = layer(feat0, feat0, mask0, mask0)
feat1 = layer(feat1, feat1, mask1, mask1)
elif name == 'cross':
feat0 = layer(feat0, feat1, mask0, mask1)
feat1 = layer(feat1, feat0, mask1, mask0)
else:
raise KeyError
return feat0, feat1
| 3,657 | 34.514563 | 105 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/loftr_module/__init__.py | from .transformer import LocalFeatureTransformer
from .fine_preprocess import FinePreprocess
| 93 | 30.333333 | 48 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/supervision.py | from math import log
from loguru import logger
import torch
from einops import repeat
from kornia.utils import create_meshgrid
from .geometry import warp_kpts
############## ↓ Coarse-Level supervision ↓ ##############
@torch.no_grad()
def mask_pts_at_padded_regions(grid_pt, mask):
"""For megadepth dataset, zero-padding exists in images"""
mask = repeat(mask, 'n h w -> n (h w) c', c=2)
grid_pt[~mask.bool()] = 0
return grid_pt
@torch.no_grad()
def spvs_coarse(data, config):
"""
Update:
data (dict): {
"conf_matrix_gt": [N, hw0, hw1],
'spv_b_ids': [M]
'spv_i_ids': [M]
'spv_j_ids': [M]
'spv_w_pt0_i': [N, hw0, 2], in original image resolution
'spv_pt1_i': [N, hw1, 2], in original image resolution
}
NOTE:
- for scannet dataset, there're 3 kinds of resolution {i, c, f}
- for megadepth dataset, there're 4 kinds of resolution {i, i_resize, c, f}
"""
# 1. misc
device = data['image0'].device
N, _, H0, W0 = data['image0'].shape
_, _, H1, W1 = data['image1'].shape
scale = config['LOFTR']['RESOLUTION'][0]
scale0 = scale * data['scale0'][:, None] if 'scale0' in data else scale
scale1 = scale * data['scale1'][:, None] if 'scale0' in data else scale
h0, w0, h1, w1 = map(lambda x: x // scale, [H0, W0, H1, W1])
# 2. warp grids
# create kpts in meshgrid and resize them to image resolution
grid_pt0_c = create_meshgrid(h0, w0, False, device).reshape(1, h0*w0, 2).repeat(N, 1, 1) # [N, hw, 2]
grid_pt0_i = scale0 * grid_pt0_c
grid_pt1_c = create_meshgrid(h1, w1, False, device).reshape(1, h1*w1, 2).repeat(N, 1, 1)
grid_pt1_i = scale1 * grid_pt1_c
# mask padded region to (0, 0), so no need to manually mask conf_matrix_gt
if 'mask0' in data:
grid_pt0_i = mask_pts_at_padded_regions(grid_pt0_i, data['mask0'])
grid_pt1_i = mask_pts_at_padded_regions(grid_pt1_i, data['mask1'])
# warp kpts bi-directionally and resize them to coarse-level resolution
# (no depth consistency check, since it leads to worse results experimentally)
# (unhandled edge case: points with 0-depth will be warped to the left-up corner)
_, w_pt0_i = warp_kpts(grid_pt0_i, data['depth0'], data['depth1'], data['T_0to1'], data['K0'], data['K1'])
_, w_pt1_i = warp_kpts(grid_pt1_i, data['depth1'], data['depth0'], data['T_1to0'], data['K1'], data['K0'])
w_pt0_c = w_pt0_i / scale1
w_pt1_c = w_pt1_i / scale0
# 3. check if mutual nearest neighbor
w_pt0_c_round = w_pt0_c[:, :, :].round().long()
nearest_index1 = w_pt0_c_round[..., 0] + w_pt0_c_round[..., 1] * w1
w_pt1_c_round = w_pt1_c[:, :, :].round().long()
nearest_index0 = w_pt1_c_round[..., 0] + w_pt1_c_round[..., 1] * w0
# corner case: out of boundary
def out_bound_mask(pt, w, h):
return (pt[..., 0] < 0) + (pt[..., 0] >= w) + (pt[..., 1] < 0) + (pt[..., 1] >= h)
nearest_index1[out_bound_mask(w_pt0_c_round, w1, h1)] = 0
nearest_index0[out_bound_mask(w_pt1_c_round, w0, h0)] = 0
loop_back = torch.stack([nearest_index0[_b][_i] for _b, _i in enumerate(nearest_index1)], dim=0)
correct_0to1 = loop_back == torch.arange(h0*w0, device=device)[None].repeat(N, 1)
correct_0to1[:, 0] = False # ignore the top-left corner
# 4. construct a gt conf_matrix
conf_matrix_gt = torch.zeros(N, h0*w0, h1*w1, device=device)
b_ids, i_ids = torch.where(correct_0to1 != 0)
j_ids = nearest_index1[b_ids, i_ids]
conf_matrix_gt[b_ids, i_ids, j_ids] = 1
data.update({'conf_matrix_gt': conf_matrix_gt})
# 5. save coarse matches(gt) for training fine level
if len(b_ids) == 0:
logger.warning(f"No groundtruth coarse match found for: {data['pair_names']}")
# this won't affect fine-level loss calculation
b_ids = torch.tensor([0], device=device)
i_ids = torch.tensor([0], device=device)
j_ids = torch.tensor([0], device=device)
data.update({
'spv_b_ids': b_ids,
'spv_i_ids': i_ids,
'spv_j_ids': j_ids
})
# 6. save intermediate results (for fast fine-level computation)
data.update({
'spv_w_pt0_i': w_pt0_i,
'spv_pt1_i': grid_pt1_i
})
def compute_supervision_coarse(data, config):
assert len(set(data['dataset_name'])) == 1, "Do not support mixed datasets training!"
data_source = data['dataset_name'][0]
if data_source.lower() in ['scannet', 'megadepth']:
spvs_coarse(data, config)
else:
raise ValueError(f'Unknown data source: {data_source}')
############## ↓ Fine-Level supervision ↓ ##############
@torch.no_grad()
def spvs_fine(data, config):
"""
Update:
data (dict):{
"expec_f_gt": [M, 2]}
"""
# 1. misc
# w_pt0_i, pt1_i = data.pop('spv_w_pt0_i'), data.pop('spv_pt1_i')
w_pt0_i, pt1_i = data['spv_w_pt0_i'], data['spv_pt1_i']
scale = config['LOFTR']['RESOLUTION'][1]
radius = config['LOFTR']['FINE_WINDOW_SIZE'] // 2
# 2. get coarse prediction
b_ids, i_ids, j_ids = data['b_ids'], data['i_ids'], data['j_ids']
# 3. compute gt
scale = scale * data['scale1'][b_ids] if 'scale0' in data else scale
# `expec_f_gt` might exceed the window, i.e. abs(*) > 1, which would be filtered later
expec_f_gt = (w_pt0_i[b_ids, i_ids] - pt1_i[b_ids, j_ids]) / scale / radius # [M, 2]
data.update({"expec_f_gt": expec_f_gt})
def compute_supervision_fine(data, config):
data_source = data['dataset_name'][0]
if data_source.lower() in ['scannet', 'megadepth']:
spvs_fine(data, config)
else:
raise NotImplementedError
| 5,724 | 36.418301 | 110 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/cvpr_ds_config.py | from yacs.config import CfgNode as CN
def lower_config(yacs_cfg):
if not isinstance(yacs_cfg, CN):
return yacs_cfg
return {k.lower(): lower_config(v) for k, v in yacs_cfg.items()}
_CN = CN()
_CN.BACKBONE_TYPE = 'ResNetFPN'
_CN.RESOLUTION = (8, 2) # options: [(8, 2), (16, 4)]
_CN.FINE_WINDOW_SIZE = 5 # window_size in fine_level, must be odd
_CN.FINE_CONCAT_COARSE_FEAT = True
# 1. LoFTR-backbone (local feature CNN) config
_CN.RESNETFPN = CN()
_CN.RESNETFPN.INITIAL_DIM = 128
_CN.RESNETFPN.BLOCK_DIMS = [128, 196, 256] # s1, s2, s3
# 2. LoFTR-coarse module config
_CN.COARSE = CN()
_CN.COARSE.D_MODEL = 256
_CN.COARSE.D_FFN = 256
_CN.COARSE.NHEAD = 8
_CN.COARSE.LAYER_NAMES = ['self', 'cross'] * 4
_CN.COARSE.ATTENTION = 'linear' # options: ['linear', 'full']
# 3. Coarse-Matching config
_CN.MATCH_COARSE = CN()
_CN.MATCH_COARSE.THR = 0.2
_CN.MATCH_COARSE.BORDER_RM = 2
_CN.MATCH_COARSE.MATCH_TYPE = 'dual_softmax' # options: ['dual_softmax, 'sinkhorn']
_CN.MATCH_COARSE.DSMAX_TEMPERATURE = 0.1
_CN.MATCH_COARSE.SKH_ITERS = 3
_CN.MATCH_COARSE.SKH_INIT_BIN_SCORE = 1.0
_CN.MATCH_COARSE.SKH_PREFILTER = True
_CN.MATCH_COARSE.TRAIN_COARSE_PERCENT = 0.4 # training tricks: save GPU memory
_CN.MATCH_COARSE.TRAIN_PAD_NUM_GT_MIN = 200 # training tricks: avoid DDP deadlock
# 4. LoFTR-fine module config
_CN.FINE = CN()
_CN.FINE.D_MODEL = 128
_CN.FINE.D_FFN = 128
_CN.FINE.NHEAD = 8
_CN.FINE.LAYER_NAMES = ['self', 'cross'] * 1
_CN.FINE.ATTENTION = 'linear'
default_cfg = lower_config(_CN)
| 1,516 | 29.34 | 84 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/position_encoding.py | import math
import torch
from torch import nn
class PositionEncodingSine(nn.Module):
"""
This is a sinusoidal position encoding that generalized to 2-dimensional images
"""
def __init__(self, d_model, max_shape=(256, 256)):
"""
Args:
max_shape (tuple): for 1/8 featmap, the max length of 256 corresponds to 2048 pixels
"""
super().__init__()
pe = torch.zeros((d_model, *max_shape))
y_position = torch.ones(max_shape).cumsum(0).float().unsqueeze(0)
x_position = torch.ones(max_shape).cumsum(1).float().unsqueeze(0)
div_term = torch.exp(torch.arange(0, d_model//2, 2).float() * (-math.log(10000.0) / d_model//2))
div_term = div_term[:, None, None] # [C//4, 1, 1]
pe[0::4, :, :] = torch.sin(x_position * div_term)
pe[1::4, :, :] = torch.cos(x_position * div_term)
pe[2::4, :, :] = torch.sin(y_position * div_term)
pe[3::4, :, :] = torch.cos(y_position * div_term)
self.register_buffer('pe', pe.unsqueeze(0), persistent=False) # [1, C, H, W]
def forward(self, x):
"""
Args:
x: [N, C, H, W]
"""
return x + self.pe[:, :, :x.size(2), :x.size(3)]
| 1,235 | 33.333333 | 104 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/fine_matching.py | import math
import torch
import torch.nn as nn
from kornia.geometry.subpix import dsnt
from kornia.utils.grid import create_meshgrid
class FineMatching(nn.Module):
"""FineMatching with s2d paradigm"""
def __init__(self):
super().__init__()
def forward(self, feat_f0, feat_f1, data):
"""
Args:
feat0 (torch.Tensor): [M, WW, C]
feat1 (torch.Tensor): [M, WW, C]
data (dict)
Update:
data (dict):{
'expec_f' (torch.Tensor): [M, 3],
'mkpts0_f' (torch.Tensor): [M, 2],
'mkpts1_f' (torch.Tensor): [M, 2]}
"""
M, WW, C = feat_f0.shape
W = int(math.sqrt(WW))
scale = data['hw0_i'][0] / data['hw0_f'][0]
self.M, self.W, self.WW, self.C, self.scale = M, W, WW, C, scale
# corner case: if no coarse matches found
if M == 0:
assert self.training == False, "M is always >0, when training, see coarse_matching.py"
# logger.warning('No matches found in coarse-level.')
data.update({
'expec_f': torch.empty(0, 3, device=feat_f0.device),
'mkpts0_f': data['mkpts0_c'],
'mkpts1_f': data['mkpts1_c'],
})
return
feat_f0_picked = feat_f0_picked = feat_f0[:, WW//2, :]
sim_matrix = torch.einsum('mc,mrc->mr', feat_f0_picked, feat_f1)
softmax_temp = 1. / C**.5
heatmap = torch.softmax(softmax_temp * sim_matrix, dim=1).view(-1, W, W)
# compute coordinates from heatmap
coords_normalized = dsnt.spatial_expectation2d(heatmap[None], True)[0] # [M, 2]
grid_normalized = create_meshgrid(W, W, True, heatmap.device).reshape(1, -1, 2) # [1, WW, 2]
# compute std over <x, y>
var = torch.sum(grid_normalized**2 * heatmap.view(-1, WW, 1), dim=1) - coords_normalized**2 # [M, 2]
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # [M] clamp needed for numerical stability
# for fine-level supervision
data.update({'expec_f': torch.cat([coords_normalized, std.unsqueeze(1)], -1)})
# compute absolute kpt coords
self.get_fine_match(coords_normalized, data)
@torch.no_grad()
def get_fine_match(self, coords_normed, data):
W, WW, C, scale = self.W, self.WW, self.C, self.scale
# mkpts0_f and mkpts1_f
mkpts0_f = data['mkpts0_c']
scale1 = scale * data['scale1'][data['b_ids']] if 'scale0' in data else scale
mkpts1_f = data['mkpts1_c'] + (coords_normed * (W // 2) * scale1)[:len(data['mconf'])]
data.update({
"mkpts0_f": mkpts0_f,
"mkpts1_f": mkpts1_f
})
class FineMatching_t(nn.Module):
"""FineMatching with s2d paradigm"""
def __init__(self):
super().__init__()
def forward(self, feat_f0, feat_f1, data):
"""
Args:
feat0 (torch.Tensor): [M, WW, C]
feat1 (torch.Tensor): [M, WW, C]
data (dict)
Update:
data (dict):{
'expec_f' (torch.Tensor): [M, 3],
'mkpts0_f' (torch.Tensor): [M, 2],
'mkpts1_f' (torch.Tensor): [M, 2]}
"""
M, WW, C = feat_f0.shape
W = int(math.sqrt(WW))
scale = data['hw0_i'][0] / data['hw0_f'][0]
self.M, self.W, self.WW, self.C, self.scale = M, W, WW, C, scale
# corner case: if no coarse matches found
if M == 0:
assert self.training == False, "M is always >0, when training, see coarse_matching.py"
# logger.warning('No matches found in coarse-level.')
data.update({
'expec_f_t': torch.empty(0, 3, device=feat_f0.device),
'mkpts0_f_t': data['mkpts0_c'],
'mkpts1_f_T': data['mkpts1_c'],
})
return
feat_f0_picked = feat_f0_picked = feat_f0[:, WW // 2, :]
sim_matrix = torch.einsum('mc,mrc->mr', feat_f0_picked, feat_f1)
softmax_temp = 1. / C ** .5
heatmap = torch.softmax(softmax_temp * sim_matrix, dim=1).view(-1, W, W)
# compute coordinates from heatmap
coords_normalized = dsnt.spatial_expectation2d(heatmap[None], True)[0] # [M, 2]
grid_normalized = create_meshgrid(W, W, True, heatmap.device).reshape(1, -1, 2) # [1, WW, 2]
# compute std over <x, y>
var = torch.sum(grid_normalized ** 2 * heatmap.view(-1, WW, 1), dim=1) - coords_normalized ** 2 # [M, 2]
std = torch.sum(torch.sqrt(torch.clamp(var, min=1e-10)), -1) # [M] clamp needed for numerical stability
# for fine-level supervision
data.update({'expec_f_t': torch.cat([coords_normalized, std.unsqueeze(1)], -1)})
# compute absolute kpt coords
self.get_fine_match(coords_normalized, data)
@torch.no_grad()
def get_fine_match(self, coords_normed, data):
W, WW, C, scale = self.W, self.WW, self.C, self.scale
# mkpts0_f and mkpts1_f
mkpts0_f = data['mkpts0_c']
scale1 = scale * data['scale1'][data['b_ids']] if 'scale0' in data else scale
mkpts1_f = data['mkpts1_c'] + (coords_normed * (W // 2) * scale1)[:len(data['mconf'])]
data.update({
"mkpts0_f": mkpts0_f,
"mkpts1_f": mkpts1_f
}) | 5,385 | 37.471429 | 113 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/supervision_homography.py | from math import log
from loguru import logger
import torch
from einops import repeat
from kornia.utils import create_meshgrid
from .geometry import warp_kpts,warp_kpts_homo
############## ↓ Coarse-Level supervision ↓ ##############
@torch.no_grad()
def mask_pts_at_padded_regions(grid_pt, mask):
"""For megadepth dataset, zero-padding exists in images"""
mask = repeat(mask, 'n h w -> n (h w) c', c=2)
grid_pt[~mask.bool()] = 0
return grid_pt
@torch.no_grad()
def spvs_coarse(data, config):
"""
Update:
data (dict): {
"conf_matrix_gt": [N, hw0, hw1],
'spv_b_ids': [M]
'spv_i_ids': [M]
'spv_j_ids': [M]
'spv_w_pt0_i': [N, hw0, 2], in original image resolution
'spv_pt1_i': [N, hw1, 2], in original image resolution
}
NOTE:
- for scannet dataset, there're 3 kinds of resolution {i, c, f}
- for megadepth dataset, there're 4 kinds of resolution {i, i_resize, c, f}
"""
# 1. misc
device = data['image0'].device
N, _, H0, W0 = data['image0'].shape
_, _, H1, W1 = data['image1'].shape
scale = config['LOFTR']['RESOLUTION'][0]
scale0 = scale * data['scale0'][:, None] if 'scale0' in data else scale
scale1 = scale * data['scale1'][:, None] if 'scale0' in data else scale
h0, w0, h1, w1 = map(lambda x: x // scale, [H0, W0, H1, W1])
# 2. warp grids
# create kpts in meshgrid and resize them to image resolution
grid_pt0_c = create_meshgrid(h0, w0, False, device).reshape(1, h0 * w0, 2).repeat(N, 1, 1) # [N, hw, 2]
grid_pt0_i = scale0 * grid_pt0_c
grid_pt1_c = create_meshgrid(h1, w1, False, device).reshape(1, h1 * w1, 2).repeat(N, 1, 1)
grid_pt1_i = scale1 * grid_pt1_c
# mask padded region to (0, 0), so no need to manually mask conf_matrix_gt
if 'mask0' in data:
grid_pt0_i = mask_pts_at_padded_regions(grid_pt0_i, data['mask0'])
grid_pt1_i = mask_pts_at_padded_regions(grid_pt1_i, data['mask1'])
# warp kpts bi-directionally and resize them to coarse-level resolution
# (no depth consistency check, since it leads to worse results experimentally)
# (unhandled edge case: points with 0-depth will be warped to the left-up corner)
w_pt0_i = warp_kpts_homo(grid_pt0_i, data['M'])
inv_M = torch.inverse(data['M'])
w_pt1_i = warp_kpts_homo(grid_pt1_i, inv_M)
#_, w_pt0_i = warp_kpts(grid_pt0_i, data['depth0'], data['depth1'], data['T_0to1'], data['K0'], data['K1'])
#_, w_pt1_i = warp_kpts(grid_pt1_i, data['depth1'], data['depth0'], data['T_1to0'], data['K1'], data['K0'])
w_pt0_c = w_pt0_i / scale1
w_pt1_c = w_pt1_i / scale0
# 3. check if mutual nearest neighbor
w_pt0_c_round = w_pt0_c[:, :, :].round().long()
nearest_index1 = w_pt0_c_round[..., 0] + w_pt0_c_round[..., 1] * w1
w_pt1_c_round = w_pt1_c[:, :, :].round().long()
nearest_index0 = w_pt1_c_round[..., 0] + w_pt1_c_round[..., 1] * w0
# corner case: out of boundary
def out_bound_mask(pt, w, h):
return (pt[..., 0] < 0) + (pt[..., 0] >= w) + (pt[..., 1] < 0) + (pt[..., 1] >= h)
nearest_index1[out_bound_mask(w_pt0_c_round, w1, h1)] = 0
nearest_index0[out_bound_mask(w_pt1_c_round, w0, h0)] = 0
loop_back = torch.stack([nearest_index0[_b][_i] for _b, _i in enumerate(nearest_index1)], dim=0)
correct_0to1 = loop_back == torch.arange(h0 * w0, device=device)[None].repeat(N, 1)
correct_0to1[:, 0] = False # ignore the top-left corner
# 4. construct a gt conf_matrix
conf_matrix_gt = torch.zeros(N, h0 * w0, h1 * w1, device=device)
b_ids, i_ids = torch.where(correct_0to1 != 0)
j_ids = nearest_index1[b_ids, i_ids]
conf_matrix_gt[b_ids, i_ids, j_ids] = 1
data.update({'conf_matrix_gt': conf_matrix_gt})
# 5. save coarse matches(gt) for training fine level
if len(b_ids) == 0:
logger.warning(f"No groundtruth coarse match found for: {data['pair_names']}")
# this won't affect fine-level loss calculation
b_ids = torch.tensor([0], device=device)
i_ids = torch.tensor([0], device=device)
j_ids = torch.tensor([0], device=device)
data.update({
'spv_b_ids': b_ids,
'spv_i_ids': i_ids,
'spv_j_ids': j_ids
})
# 6. save intermediate results (for fast fine-level computation)
data.update({
'spv_w_pt0_i': w_pt0_i,
'spv_pt1_i': grid_pt1_i
})
def compute_supervision_coarse_homo(data, config):
spvs_coarse(data, config)
#assert len(set(data['dataset_name'])) == 1, "Do not support mixed datasets training!"
#data_source = data['dataset_name'][0]
#if data_source.lower() in ['scannet', 'megadepth']:
# spvs_coarse(data, config)
#else:
# raise ValueError(f'Unknown data source: {data_source}')
############## ↓ Fine-Level supervision ↓ ##############
@torch.no_grad()
def spvs_fine(data, config):
"""
Update:
data (dict):{
"expec_f_gt": [M, 2]}
"""
# 1. misc
# w_pt0_i, pt1_i = data.pop('spv_w_pt0_i'), data.pop('spv_pt1_i')
w_pt0_i, pt1_i = data['spv_w_pt0_i'], data['spv_pt1_i']
scale = config['LOFTR']['RESOLUTION'][1]
radius = config['LOFTR']['FINE_WINDOW_SIZE'] // 2
# 2. get coarse prediction
b_ids, i_ids, j_ids = data['b_ids'], data['i_ids'], data['j_ids']
# 3. compute gt
scale = scale * data['scale1'][b_ids] if 'scale0' in data else scale
# `expec_f_gt` might exceed the window, i.e. abs(*) > 1, which would be filtered later
expec_f_gt = (w_pt0_i[b_ids, i_ids] - pt1_i[b_ids, j_ids]) / scale / radius # [M, 2]
data.update({"expec_f_gt": expec_f_gt})
def compute_supervision_fine_homo(data, config):
spvs_fine(data, config)
#data_source = data['dataset_name'][0]
#if data_source.lower() in ['scannet', 'megadepth']:
# spvs_fine(data, config)
#else:
# raise NotImplementedError
| 5,957 | 36.708861 | 111 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/geometry.py | import torch
import cv2
@torch.no_grad()
def warp_kpts_homo(kpts0, M):
""" Warp kpts0 from I0 to I1 with Homography M
Args:
kpts0 (torch.Tensor): [N, L, 2] - <x, y>,
M (torch.Tensor):
Returns:
warped_keypoints0 (torch.Tensor): [N, L, 2] <x0_hat, y1_hat>
"""
#kpts0_long = kpts0.round().long()
#print(kpts0_long.cpu().numpy()[None].shape)
#print(M.cpu().numpy().shape)
#print(kpts0.size())
#kpts1 = cv2.perspectiveTransform(kpts0_long.cpu().numpy()[None], M.cpu().numpy())
#project
device = kpts0.device
w_kpts0 = cv2.perspectiveTransform(kpts0.cpu().numpy(), M.cpu().numpy()[0])
w_kpts0 = torch.from_numpy(w_kpts0)
w_kpts0 = w_kpts0.to(device)
#print(device,M)
return w_kpts0
@torch.no_grad()
def warp_kpts(kpts0, depth0, depth1, T_0to1, K0, K1):
""" Warp kpts0 from I0 to I1 with depth, K and Rt
Also check covisibility and depth consistency.
Depth is consistent if relative error < 0.2 (hard-coded).
Args:
kpts0 (torch.Tensor): [N, L, 2] - <x, y>,
depth0 (torch.Tensor): [N, H, W],
depth1 (torch.Tensor): [N, H, W],
T_0to1 (torch.Tensor): [N, 3, 4],
K0 (torch.Tensor): [N, 3, 3],
K1 (torch.Tensor): [N, 3, 3],
Returns:
calculable_mask (torch.Tensor): [N, L]
warped_keypoints0 (torch.Tensor): [N, L, 2] <x0_hat, y1_hat>
"""
kpts0_long = kpts0.round().long()
# Sample depth, get calculable_mask on depth != 0
kpts0_depth = torch.stack(
[depth0[i, kpts0_long[i, :, 1], kpts0_long[i, :, 0]] for i in range(kpts0.shape[0])], dim=0
) # (N, L)
nonzero_mask = kpts0_depth != 0
# Unproject
kpts0_h = torch.cat([kpts0, torch.ones_like(kpts0[:, :, [0]])], dim=-1) * kpts0_depth[..., None] # (N, L, 3)
kpts0_cam = K0.inverse() @ kpts0_h.transpose(2, 1) # (N, 3, L)
# Rigid Transform
w_kpts0_cam = T_0to1[:, :3, :3] @ kpts0_cam + T_0to1[:, :3, [3]] # (N, 3, L)
w_kpts0_depth_computed = w_kpts0_cam[:, 2, :]
# Project
w_kpts0_h = (K1 @ w_kpts0_cam).transpose(2, 1) # (N, L, 3)
w_kpts0 = w_kpts0_h[:, :, :2] / (w_kpts0_h[:, :, [2]] + 1e-4) # (N, L, 2), +1e-4 to avoid zero depth
# Covisible Check
h, w = depth1.shape[1:3]
covisible_mask = (w_kpts0[:, :, 0] > 0) * (w_kpts0[:, :, 0] < w-1) * \
(w_kpts0[:, :, 1] > 0) * (w_kpts0[:, :, 1] < h-1)
w_kpts0_long = w_kpts0.long()
w_kpts0_long[~covisible_mask, :] = 0
w_kpts0_depth = torch.stack(
[depth1[i, w_kpts0_long[i, :, 1], w_kpts0_long[i, :, 0]] for i in range(w_kpts0_long.shape[0])], dim=0
) # (N, L)
consistent_mask = ((w_kpts0_depth - w_kpts0_depth_computed) / w_kpts0_depth).abs() < 0.2
valid_mask = nonzero_mask * covisible_mask * consistent_mask
return valid_mask, w_kpts0
| 2,838 | 34.936709 | 113 | py |
3DG-STFM | 3DG-STFM-master/src/loftr/utils/coarse_matching.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from einops.einops import rearrange
INF = 1e9
def mask_border(m, b: int, v):
""" Mask borders with value
Args:
m (torch.Tensor): [N, H0, W0, H1, W1]
b (int)
v (m.dtype)
"""
if b <= 0:
return
m[:, :b] = v
m[:, :, :b] = v
m[:, :, :, :b] = v
m[:, :, :, :, :b] = v
m[:, -b:] = v
m[:, :, -b:] = v
m[:, :, :, -b:] = v
m[:, :, :, :, -b:] = v
def mask_border_with_padding(m, bd, v, p_m0, p_m1):
if bd <= 0:
return
m[:, :bd] = v
m[:, :, :bd] = v
m[:, :, :, :bd] = v
m[:, :, :, :, :bd] = v
h0s, w0s = p_m0.sum(1).max(-1)[0].int(), p_m0.sum(-1).max(-1)[0].int()
h1s, w1s = p_m1.sum(1).max(-1)[0].int(), p_m1.sum(-1).max(-1)[0].int()
for b_idx, (h0, w0, h1, w1) in enumerate(zip(h0s, w0s, h1s, w1s)):
m[b_idx, h0 - bd:] = v
m[b_idx, :, w0 - bd:] = v
m[b_idx, :, :, h1 - bd:] = v
m[b_idx, :, :, :, w1 - bd:] = v
def compute_max_candidates(p_m0, p_m1):
"""Compute the max candidates of all pairs within a batch
Args:
p_m0, p_m1 (torch.Tensor): padded masks
"""
h0s, w0s = p_m0.sum(1).max(-1)[0], p_m0.sum(-1).max(-1)[0]
h1s, w1s = p_m1.sum(1).max(-1)[0], p_m1.sum(-1).max(-1)[0]
max_cand = torch.sum(
torch.min(torch.stack([h0s * w0s, h1s * w1s], -1), -1)[0])
return max_cand
class CoarseMatching(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# general config
self.thr = config['thr']
self.border_rm = config['border_rm']
# -- # for trainig fine-level LoFTR
self.train_coarse_percent = config['train_coarse_percent']
self.train_pad_num_gt_min = config['train_pad_num_gt_min']
# we provide 2 options for differentiable matching
self.match_type = config['match_type']
if self.match_type == 'dual_softmax':
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
try:
from .superglue import log_optimal_transport
except ImportError:
raise ImportError("download superglue.py first!")
self.log_optimal_transport = log_optimal_transport
self.bin_score = nn.Parameter(
torch.tensor(config['skh_init_bin_score'], requires_grad=True))
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
else:
raise NotImplementedError()
def forward(self, feat_c0, feat_c1, data, mask_c0=None, mask_c1=None):
"""
Args:
feat0 (torch.Tensor): [N, L, C]
feat1 (torch.Tensor): [N, S, C]
data (dict)
mask_c0 (torch.Tensor): [N, L] (optional)
mask_c1 (torch.Tensor): [N, S] (optional)
Update:
data (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
NOTE: M' != M during training.
"""
N, L, S, C = feat_c0.size(0), feat_c0.size(1), feat_c1.size(1), feat_c0.size(2)
# normalize
feat_c0, feat_c1 = map(lambda feat: feat / feat.shape[-1]**.5,
[feat_c0, feat_c1])
if self.match_type == 'dual_softmax':
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0,
feat_c1) / self.temperature
if mask_c0 is not None:
sim_matrix.masked_fill_(
~(mask_c0[..., None] * mask_c1[:, None]).bool(),
-INF)
data.update({'sim_matrix': sim_matrix})
conf_matrix = F.softmax(sim_matrix, 1) * F.softmax(sim_matrix, 2)
elif self.match_type == 'sinkhorn':
# sinkhorn, dustbin included
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0, feat_c1)
if mask_c0 is not None:
sim_matrix[:, :L, :S].masked_fill_(
~(mask_c0[..., None] * mask_c1[:, None]).bool(),
-INF)
# build uniform prior & use sinkhorn
log_assign_matrix = self.log_optimal_transport(
sim_matrix, self.bin_score, self.skh_iters)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1]
# filter prediction with dustbin score (only in evaluation mode)
if not self.training and self.skh_prefilter:
filter0 = (assign_matrix.max(dim=2)[1] == S)[:, :-1] # [N, L]
filter1 = (assign_matrix.max(dim=1)[1] == L)[:, :-1] # [N, S]
conf_matrix[filter0[..., None].repeat(1, 1, S)] = 0
conf_matrix[filter1[:, None].repeat(1, L, 1)] = 0
if self.config['sparse_spvs']:
data.update({'conf_matrix_with_bin': assign_matrix.clone()})
data.update({'conf_matrix': conf_matrix})
# predict coarse matches from conf_matrix
data.update(**self.get_coarse_match(conf_matrix, data))
@torch.no_grad()
def get_coarse_match(self, conf_matrix, data):
"""
Args:
conf_matrix (torch.Tensor): [N, L, S]
data (dict): with keys ['hw0_i', 'hw1_i', 'hw0_c', 'hw1_c']
Returns:
coarse_matches (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'm_bids' (torch.Tensor): [M],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
"""
axes_lengths = {
'h0c': data['hw0_c'][0],
'w0c': data['hw0_c'][1],
'h1c': data['hw1_c'][0],
'w1c': data['hw1_c'][1]
}
_device = conf_matrix.device
# 1. confidence thresholding
mask = conf_matrix > self.thr
mask = rearrange(mask, 'b (h0c w0c) (h1c w1c) -> b h0c w0c h1c w1c',
**axes_lengths)
if 'mask0' not in data:
mask_border(mask, self.border_rm, False)
else:
mask_border_with_padding(mask, self.border_rm, False,
data['mask0'], data['mask1'])
mask = rearrange(mask, 'b h0c w0c h1c w1c -> b (h0c w0c) (h1c w1c)',
**axes_lengths)
# 2. mutual nearest
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
# 3. find all valid coarse matches
# this only works when at most one `True` in each row
mask_v, all_j_ids = mask.max(dim=2)
b_ids, i_ids = torch.where(mask_v)
j_ids = all_j_ids[b_ids, i_ids]
mconf = conf_matrix[b_ids, i_ids, j_ids]
# 4. Random sampling of training samples for fine-level LoFTR
# (optional) pad samples with gt coarse-level matches
if self.training:
# NOTE:
# The sampling is performed across all pairs in a batch without manually balancing
# #samples for fine-level increases w.r.t. batch_size
if 'mask0' not in data:
num_candidates_max = mask.size(0) * max(
mask.size(1), mask.size(2))
else:
num_candidates_max = compute_max_candidates(
data['mask0'], data['mask1'])
num_matches_train = int(num_candidates_max *
self.train_coarse_percent)
num_matches_pred = len(b_ids)
assert self.train_pad_num_gt_min < num_matches_train, "min-num-gt-pad should be less than num-train-matches"
# pred_indices is to select from prediction
if num_matches_pred <= num_matches_train - self.train_pad_num_gt_min:
pred_indices = torch.arange(num_matches_pred, device=_device)
else:
pred_indices = torch.randint(
num_matches_pred,
(num_matches_train - self.train_pad_num_gt_min, ),
device=_device)
# gt_pad_indices is to select from gt padding. e.g. max(3787-4800, 200)
gt_pad_indices = torch.randint(
len(data['spv_b_ids']),
(max(num_matches_train - num_matches_pred,
self.train_pad_num_gt_min), ),
device=_device)
mconf_gt = torch.zeros(len(data['spv_b_ids']), device=_device) # set conf of gt paddings to all zero
#print(len(data['spv_b_ids']),b_ids.size(),mconf.size(),gt_pad_indices.size(),pred_indices.size())
#print([j_ids, data['spv_j_ids']])
#print(j_ids.size(), data['spv_j_ids'].size())
b_ids, i_ids, j_ids, mconf = map(
lambda x, y: torch.cat([x[pred_indices], y[gt_pad_indices]],
dim=0),
*zip([b_ids, data['spv_b_ids']], [i_ids, data['spv_i_ids']],
[j_ids, data['spv_j_ids']], [mconf, mconf_gt]))
#print(b_ids.size(),'---------here')
# These matches select patches that feed into fine-level network
coarse_matches = {'b_ids': b_ids, 'i_ids': i_ids, 'j_ids': j_ids}
# 4. Update with matches in original image resolution
scale = data['hw0_i'][0] / data['hw0_c'][0]
scale0 = scale * data['scale0'][b_ids] if 'scale0' in data else scale
scale1 = scale * data['scale1'][b_ids] if 'scale1' in data else scale
mkpts0_c = torch.stack(
[i_ids % data['hw0_c'][1], i_ids // data['hw0_c'][1]],
dim=1) * scale0
mkpts1_c = torch.stack(
[j_ids % data['hw1_c'][1], j_ids // data['hw1_c'][1]],
dim=1) * scale1
# These matches is the current prediction (for visualization)
coarse_matches.update({
'gt_mask': mconf == 0,
'm_bids': b_ids[mconf != 0], # mconf == 0 => gt matches
'mkpts0_c': mkpts0_c[mconf != 0],
'mkpts1_c': mkpts1_c[mconf != 0],
'mconf': mconf[mconf != 0]
})
return coarse_matches
class CoarseMatching_t(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
# general config
self.thr = config['thr']
self.border_rm = config['border_rm']
# -- # for trainig fine-level LoFTR
self.train_coarse_percent = config['train_coarse_percent']
self.train_pad_num_gt_min = config['train_pad_num_gt_min']
# we provide 2 options for differentiable matching
self.match_type = config['match_type']
if self.match_type == 'dual_softmax':
self.temperature = config['dsmax_temperature']
elif self.match_type == 'sinkhorn':
try:
from .superglue import log_optimal_transport
except ImportError:
raise ImportError("download superglue.py first!")
self.log_optimal_transport = log_optimal_transport
self.bin_score = nn.Parameter(
torch.tensor(config['skh_init_bin_score'], requires_grad=True))
self.skh_iters = config['skh_iters']
self.skh_prefilter = config['skh_prefilter']
else:
raise NotImplementedError()
def forward(self, feat_c0, feat_c1, data, mask_c0=None, mask_c1=None):
"""
Args:
feat0 (torch.Tensor): [N, L, C]
feat1 (torch.Tensor): [N, S, C]
data (dict)
mask_c0 (torch.Tensor): [N, L] (optional)
mask_c1 (torch.Tensor): [N, S] (optional)
Update:
data (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
NOTE: M' != M during training.
"""
N, L, S, C = feat_c0.size(0), feat_c0.size(1), feat_c1.size(1), feat_c0.size(2)
# normalize
feat_c0, feat_c1 = map(lambda feat: feat / feat.shape[-1]**.5,
[feat_c0, feat_c1])
if self.match_type == 'dual_softmax':
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0,
feat_c1) / self.temperature
data.update({'teacher_matrix': sim_matrix})
#if mask_c0 is not None:
# sim_matrix.masked_fill_(
# ~(mask_c0[..., None] * mask_c1[:, None]).bool(),
# -INF)
conf_matrix = F.softmax(sim_matrix, 1) * F.softmax(sim_matrix, 2)
elif self.match_type == 'sinkhorn':
# sinkhorn, dustbin included
sim_matrix = torch.einsum("nlc,nsc->nls", feat_c0, feat_c1)
#if mask_c0 is not None:
# sim_matrix[:, :L, :S].masked_fill_(
# ~(mask_c0[..., None] * mask_c1[:, None]).bool(),
# -INF)
# build uniform prior & use sinkhorn
log_assign_matrix = self.log_optimal_transport(
sim_matrix, self.bin_score, self.skh_iters)
assign_matrix = log_assign_matrix.exp()
conf_matrix = assign_matrix[:, :-1, :-1]
# filter prediction with dustbin score (only in evaluation mode)
if not self.training and self.skh_prefilter:
filter0 = (assign_matrix.max(dim=2)[1] == S)[:, :-1] # [N, L]
filter1 = (assign_matrix.max(dim=1)[1] == L)[:, :-1] # [N, S]
conf_matrix[filter0[..., None].repeat(1, 1, S)] = 0
conf_matrix[filter1[:, None].repeat(1, L, 1)] = 0
if self.config['sparse_spvs']:
data.update({'conf_matrix_with_bin': assign_matrix.clone()})
data.update({'conf_matrix_t': conf_matrix})
# predict coarse matches from conf_matrix
#data.update(**self.get_coarse_match(conf_matrix, data))
@torch.no_grad()
def get_coarse_match(self, conf_matrix, data):
"""
Args:
conf_matrix (torch.Tensor): [N, L, S]
data (dict): with keys ['hw0_i', 'hw1_i', 'hw0_c', 'hw1_c']
Returns:
coarse_matches (dict): {
'b_ids' (torch.Tensor): [M'],
'i_ids' (torch.Tensor): [M'],
'j_ids' (torch.Tensor): [M'],
'gt_mask' (torch.Tensor): [M'],
'm_bids' (torch.Tensor): [M],
'mkpts0_c' (torch.Tensor): [M, 2],
'mkpts1_c' (torch.Tensor): [M, 2],
'mconf' (torch.Tensor): [M]}
"""
axes_lengths = {
'h0c': data['hw0_c'][0],
'w0c': data['hw0_c'][1],
'h1c': data['hw1_c'][0],
'w1c': data['hw1_c'][1]
}
_device = conf_matrix.device
# 1. confidence thresholding
mask = conf_matrix > self.thr
mask = rearrange(mask, 'b (h0c w0c) (h1c w1c) -> b h0c w0c h1c w1c',
**axes_lengths)
if 'mask0' not in data:
mask_border(mask, self.border_rm, False)
else:
mask_border_with_padding(mask, self.border_rm, False,
data['mask0'], data['mask1'])
mask = rearrange(mask, 'b h0c w0c h1c w1c -> b (h0c w0c) (h1c w1c)',
**axes_lengths)
# 2. mutual nearest
mask = mask \
* (conf_matrix == conf_matrix.max(dim=2, keepdim=True)[0]) \
* (conf_matrix == conf_matrix.max(dim=1, keepdim=True)[0])
# 3. find all valid coarse matches
# this only works when at most one `True` in each row
mask_v, all_j_ids = mask.max(dim=2)
b_ids, i_ids = torch.where(mask_v)
j_ids = all_j_ids[b_ids, i_ids]
mconf = conf_matrix[b_ids, i_ids, j_ids]
# 4. Random sampling of training samples for fine-level LoFTR
# (optional) pad samples with gt coarse-level matches
if self.training:
# NOTE:
# The sampling is performed across all pairs in a batch without manually balancing
# #samples for fine-level increases w.r.t. batch_size
if 'mask0' not in data:
num_candidates_max = mask.size(0) * max(
mask.size(1), mask.size(2))
else:
num_candidates_max = compute_max_candidates(
data['mask0'], data['mask1'])
num_matches_train = int(num_candidates_max *
self.train_coarse_percent)
num_matches_pred = len(b_ids)
assert self.train_pad_num_gt_min < num_matches_train, "min-num-gt-pad should be less than num-train-matches"
# pred_indices is to select from prediction
if num_matches_pred <= num_matches_train - self.train_pad_num_gt_min:
pred_indices = torch.arange(num_matches_pred, device=_device)
else:
pred_indices = torch.randint(
num_matches_pred,
(num_matches_train - self.train_pad_num_gt_min, ),
device=_device)
# gt_pad_indices is to select from gt padding. e.g. max(3787-4800, 200)
gt_pad_indices = torch.randint(
len(data['spv_b_ids']),
(max(num_matches_train - num_matches_pred,
self.train_pad_num_gt_min), ),
device=_device)
mconf_gt = torch.zeros(len(data['spv_b_ids']), device=_device) # set conf of gt paddings to all zero
#print(len(data['spv_b_ids']),b_ids.size(),mconf.size(),gt_pad_indices.size(),pred_indices.size())
#print([j_ids, data['spv_j_ids']])
#print(j_ids.size(), data['spv_j_ids'].size())
b_ids, i_ids, j_ids, mconf = map(
lambda x, y: torch.cat([x[pred_indices], y[gt_pad_indices]],
dim=0),
*zip([b_ids, data['spv_b_ids']], [i_ids, data['spv_i_ids']],
[j_ids, data['spv_j_ids']], [mconf, mconf_gt]))
#print(b_ids.size(),'---------here')
# These matches select patches that feed into fine-level network
coarse_matches = {'b_ids': b_ids, 'i_ids': i_ids, 'j_ids': j_ids}
# 4. Update with matches in original image resolution
scale = data['hw0_i'][0] / data['hw0_c'][0]
scale0 = scale * data['scale0'][b_ids] if 'scale0' in data else scale
scale1 = scale * data['scale1'][b_ids] if 'scale1' in data else scale
mkpts0_c = torch.stack(
[i_ids % data['hw0_c'][1], i_ids // data['hw0_c'][1]],
dim=1) * scale0
mkpts1_c = torch.stack(
[j_ids % data['hw1_c'][1], j_ids // data['hw1_c'][1]],
dim=1) * scale1
# These matches is the current prediction (for visualization)
coarse_matches.update({
'gt_mask': mconf == 0,
'm_bids': b_ids[mconf != 0], # mconf == 0 => gt matches
'mkpts0_c': mkpts0_c[mconf != 0],
'mkpts1_c': mkpts1_c[mconf != 0],
'mconf': mconf[mconf != 0]
})
return coarse_matches
| 20,002 | 41.289641 | 120 | py |
3DG-STFM | 3DG-STFM-master/src/optimizers/__init__.py | import torch
from torch.optim.lr_scheduler import MultiStepLR, CosineAnnealingLR, ExponentialLR
def build_optimizer(model, config):
name = config.TRAINER.OPTIMIZER
lr = config.TRAINER.TRUE_LR
if name == "adam":
return torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=config.TRAINER.ADAM_DECAY)
elif name == "adamw":
#print('Heree is the here')
for name, p in model.named_parameters():
if p.requires_grad==True:
print(name)
return torch.optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, weight_decay=config.TRAINER.ADAMW_DECAY)
else:
raise ValueError(f"TRAINER.OPTIMIZER = {name} is not a valid optimizer!")
def build_scheduler(config, optimizer):
"""
Returns:
scheduler (dict):{
'scheduler': lr_scheduler,
'interval': 'step', # or 'epoch'
'monitor': 'val_f1', (optional)
'frequency': x, (optional)
}
"""
scheduler = {'interval': config.TRAINER.SCHEDULER_INTERVAL}
name = config.TRAINER.SCHEDULER
if name == 'MultiStepLR':
scheduler.update(
{'scheduler': MultiStepLR(optimizer, config.TRAINER.MSLR_MILESTONES, gamma=config.TRAINER.MSLR_GAMMA)})
elif name == 'CosineAnnealing':
scheduler.update(
{'scheduler': CosineAnnealingLR(optimizer, config.TRAINER.COSA_TMAX)})
elif name == 'ExponentialLR':
scheduler.update(
{'scheduler': ExponentialLR(optimizer, config.TRAINER.ELR_GAMMA)})
else:
raise NotImplementedError()
return scheduler
| 1,665 | 35.217391 | 135 | py |
3DG-STFM | 3DG-STFM-master/src/utils/plotting.py | import bisect
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import os
def _compute_conf_thresh(data):
dataset_name = data['dataset_name'][0].lower()
if dataset_name == 'scannet':
thr = 5e-4
elif dataset_name == 'megadepth':
thr = 1e-4
else:
raise ValueError(f'Unknown dataset: {dataset_name}')
return thr
# --- VISUALIZATION --- #
def make_matching_figure(
img0, img1, mkpts0, mkpts1, color,
kpts0=None, kpts1=None, text=[], dpi=75, path=None):
# draw image pair
assert mkpts0.shape[0] == mkpts1.shape[0], f'mkpts0: {mkpts0.shape[0]} v.s. mkpts1: {mkpts1.shape[0]}'
fig, axes = plt.subplots(1, 2, figsize=(10, 6), dpi=dpi)
axes[0].imshow(img0, cmap='gray')
axes[1].imshow(img1, cmap='gray')
for i in range(2): # clear all frames
axes[i].get_yaxis().set_ticks([])
axes[i].get_xaxis().set_ticks([])
for spine in axes[i].spines.values():
spine.set_visible(False)
plt.tight_layout(pad=1)
if kpts0 is not None:
assert kpts1 is not None
axes[0].scatter(kpts0[:, 0], kpts0[:, 1], c='w', s=2)
axes[1].scatter(kpts1[:, 0], kpts1[:, 1], c='w', s=2)
# draw matches
if mkpts0.shape[0] != 0 and mkpts1.shape[0] != 0:
fig.canvas.draw()
transFigure = fig.transFigure.inverted()
fkpts0 = transFigure.transform(axes[0].transData.transform(mkpts0))
fkpts1 = transFigure.transform(axes[1].transData.transform(mkpts1))
fig.lines = [matplotlib.lines.Line2D((fkpts0[i, 0], fkpts1[i, 0]),
(fkpts0[i, 1], fkpts1[i, 1]),
transform=fig.transFigure, c=color[i], linewidth=1)
for i in range(len(mkpts0))]
axes[0].scatter(mkpts0[:, 0], mkpts0[:, 1], c=color, s=4)
axes[1].scatter(mkpts1[:, 0], mkpts1[:, 1], c=color, s=4)
# put txts
txt_color = 'k' if img0[:100, :200].mean() > 200 else 'w'
fig.text(
0.01, 0.99, '\n'.join(text), transform=fig.axes[0].transAxes,
fontsize=15, va='top', ha='left', color=txt_color)
# save or return figure
if path:
plt.savefig(str(path), bbox_inches='tight', pad_inches=0)
print('saved',os.getcwd(),path)
plt.close()
else:
return fig
def _make_evaluation_figure(data, b_id, alpha='dynamic'):
b_mask = data['m_bids'] == b_id
conf_thr = _compute_conf_thresh(data)
img0 = (data['image0'][b_id][0].cpu().numpy() * 255).round().astype(np.int32)
img1 = (data['image1'][b_id][0].cpu().numpy() * 255).round().astype(np.int32)
kpts0 = data['mkpts0_f'][b_mask].cpu().numpy()
kpts1 = data['mkpts1_f'][b_mask].cpu().numpy()
# for megadepth, we visualize matches on the resized image
if 'scale0' in data:
kpts0 = kpts0 / data['scale0'][b_id].cpu().numpy()[[1, 0]]
kpts1 = kpts1 / data['scale1'][b_id].cpu().numpy()[[1, 0]]
epi_errs = data['epi_errs'][b_mask].cpu().numpy()
correct_mask = epi_errs < conf_thr
precision = np.mean(correct_mask) if len(correct_mask) > 0 else 0
n_correct = np.sum(correct_mask)
n_gt_matches = int(data['conf_matrix_gt'][b_id].sum().cpu())
recall = 0 if n_gt_matches == 0 else n_correct / (n_gt_matches)
# recall might be larger than 1, since the calculation of conf_matrix_gt
# uses groundtruth depths and camera poses, but epipolar distance is used here.
# matching info
if alpha == 'dynamic':
alpha = dynamic_alpha(len(correct_mask))
color = error_colormap(epi_errs, conf_thr, alpha=alpha)
text = [
f'#Matches {len(kpts0)}',
f'Precision({conf_thr:.2e}) ({100 * precision:.1f}%): {n_correct}/{len(kpts0)}',
f'Recall({conf_thr:.2e}) ({100 * recall:.1f}%): {n_correct}/{n_gt_matches}'
]
# make the figure
figure = make_matching_figure(img0, img1, kpts0, kpts1,
color, text=text)
return figure
def _make_confidence_figure(data, b_id):
# TODO: Implement confidence figure
raise NotImplementedError()
def make_matching_figures(data, config, mode='evaluation'):
""" Make matching figures for a batch.
Args:
data (Dict): a batch updated by PL_LoFTR.
config (Dict): matcher config
Returns:
figures (Dict[str, List[plt.figure]]
"""
assert mode in ['evaluation', 'confidence'] # 'confidence'
figures = {mode: []}
for b_id in range(data['image0'].size(0)):
if mode == 'evaluation':
fig = _make_evaluation_figure(
data, b_id,
alpha=config.TRAINER.PLOT_MATCHES_ALPHA)
elif mode == 'confidence':
fig = _make_confidence_figure(data, b_id)
else:
raise ValueError(f'Unknown plot mode: {mode}')
figures[mode].append(fig)
return figures
def dynamic_alpha(n_matches,
milestones=[0, 300, 1000, 2000],
alphas=[1.0, 0.8, 0.4, 0.2]):
if n_matches == 0:
return 1.0
ranges = list(zip(alphas, alphas[1:] + [None]))
loc = bisect.bisect_right(milestones, n_matches) - 1
_range = ranges[loc]
if _range[1] is None:
return _range[0]
return _range[1] + (milestones[loc + 1] - n_matches) / (
milestones[loc + 1] - milestones[loc]) * (_range[0] - _range[1])
def error_colormap(err, thr, alpha=1.0):
assert alpha <= 1.0 and alpha > 0, f"Invaid alpha value: {alpha}"
x = 1 - np.clip(err / (thr * 2), 0, 1)
return np.clip(
np.stack([2-x*2, x*2, np.zeros_like(x), np.ones_like(x)*alpha], -1), 0, 1)
| 5,723 | 35.227848 | 106 | py |
3DG-STFM | 3DG-STFM-master/src/utils/comm.py | # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
[Copied from detectron2]
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import functools
import logging
import numpy as np
import pickle
import torch
import torch.distributed as dist
_LOCAL_PROCESS_GROUP = None
"""
A torch process group which only includes processes that on the same machine as the current process.
This variable is set when processes are spawned by `launch()` in "engine/launch.py".
"""
def get_world_size() -> int:
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank() -> int:
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def get_local_rank() -> int:
"""
Returns:
The rank of the current process within the local (per-machine) process group.
"""
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
assert _LOCAL_PROCESS_GROUP is not None
return dist.get_rank(group=_LOCAL_PROCESS_GROUP)
def get_local_size() -> int:
"""
Returns:
The size of the per-machine process group,
i.e. the number of processes per machine.
"""
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size(group=_LOCAL_PROCESS_GROUP)
def is_main_process() -> bool:
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
@functools.lru_cache()
def _get_global_gloo_group():
"""
Return a process group based on gloo backend, containing all the ranks
The result is cached.
"""
if dist.get_backend() == "nccl":
return dist.new_group(backend="gloo")
else:
return dist.group.WORLD
def _serialize_to_tensor(data, group):
backend = dist.get_backend(group)
assert backend in ["gloo", "nccl"]
device = torch.device("cpu" if backend == "gloo" else "cuda")
buffer = pickle.dumps(data)
if len(buffer) > 1024 ** 3:
logger = logging.getLogger(__name__)
logger.warning(
"Rank {} trying to all-gather {:.2f} GB of data on device {}".format(
get_rank(), len(buffer) / (1024 ** 3), device
)
)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def _pad_to_largest_tensor(tensor, group):
"""
Returns:
list[int]: size of the tensor, on each rank
Tensor: padded tensor that has the max size
"""
world_size = dist.get_world_size(group=group)
assert (
world_size >= 1
), "comm.gather/all_gather must be called from ranks within the given group!"
local_size = torch.tensor([tensor.numel()], dtype=torch.int64, device=tensor.device)
size_list = [
torch.zeros([1], dtype=torch.int64, device=tensor.device) for _ in range(world_size)
]
dist.all_gather(size_list, local_size, group=group)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
if local_size != max_size:
padding = torch.zeros((max_size - local_size,), dtype=torch.uint8, device=tensor.device)
tensor = torch.cat((tensor, padding), dim=0)
return size_list, tensor
def all_gather(data, group=None):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: list of data gathered from each rank
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group) == 1:
return [data]
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
max_size = max(size_list)
# receiving Tensor from all ranks
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.all_gather(tensor_list, tensor, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def gather(data, dst=0, group=None):
"""
Run gather on arbitrary picklable data (not necessarily tensors).
Args:
data: any picklable object
dst (int): destination rank
group: a torch process group. By default, will use a group which
contains all ranks on gloo backend.
Returns:
list[data]: on dst, a list of data gathered from each rank. Otherwise,
an empty list.
"""
if get_world_size() == 1:
return [data]
if group is None:
group = _get_global_gloo_group()
if dist.get_world_size(group=group) == 1:
return [data]
rank = dist.get_rank(group=group)
tensor = _serialize_to_tensor(data, group)
size_list, tensor = _pad_to_largest_tensor(tensor, group)
# receiving Tensor from all ranks
if rank == dst:
max_size = max(size_list)
tensor_list = [
torch.empty((max_size,), dtype=torch.uint8, device=tensor.device) for _ in size_list
]
dist.gather(tensor, tensor_list, dst=dst, group=group)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
else:
dist.gather(tensor, [], dst=dst, group=group)
return []
def shared_random_seed():
"""
Returns:
int: a random number that is the same across all workers.
If workers need a shared RNG, they can use this shared seed to
create one.
All workers must call this function, otherwise it will deadlock.
"""
ints = np.random.randint(2 ** 31)
all_ints = all_gather(ints)
return all_ints[0]
def reduce_dict(input_dict, average=True):
"""
Reduce the values in the dictionary from all processes so that process with rank
0 has the reduced results.
Args:
input_dict (dict): inputs to be reduced. All the values must be scalar CUDA Tensor.
average (bool): whether to do average or sum
Returns:
a dict with the same keys as input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 7,776 | 28.236842 | 100 | py |
3DG-STFM | 3DG-STFM-master/src/utils/dataloader.py | import numpy as np
# --- PL-DATAMODULE ---
def get_local_split(items: list, world_size: int, rank: int, seed: int):
""" The local rank only loads a split of the dataset. """
n_items = len(items)
items_permute = np.random.RandomState(seed).permutation(items)
if n_items % world_size == 0:
padded_items = items_permute
else:
padding = np.random.RandomState(seed).choice(
items,
world_size - (n_items % world_size),
replace=True)
padded_items = np.concatenate([items_permute, padding])
assert len(padded_items) % world_size == 0, \
f'len(padded_items): {len(padded_items)}; world_size: {world_size}; len(padding): {len(padding)}'
n_per_rank = len(padded_items) // world_size
local_items = padded_items[n_per_rank * rank: n_per_rank * (rank+1)]
return local_items
| 876 | 35.541667 | 109 | py |
3DG-STFM | 3DG-STFM-master/src/utils/misc.py | import os
import contextlib
import joblib
from typing import Union
from loguru import _Logger, logger
from itertools import chain
import torch
from yacs.config import CfgNode as CN
from pytorch_lightning.utilities import rank_zero_only
def lower_config(yacs_cfg):
if not isinstance(yacs_cfg, CN):
return yacs_cfg
return {k.lower(): lower_config(v) for k, v in yacs_cfg.items()}
def upper_config(dict_cfg):
if not isinstance(dict_cfg, dict):
return dict_cfg
return {k.upper(): upper_config(v) for k, v in dict_cfg.items()}
def log_on(condition, message, level):
if condition:
assert level in ['INFO', 'DEBUG', 'WARNING', 'ERROR', 'CRITICAL']
logger.log(level, message)
def get_rank_zero_only_logger(logger: _Logger):
if rank_zero_only.rank == 0:
return logger
else:
for _level in logger._core.levels.keys():
level = _level.lower()
setattr(logger, level,
lambda x: None)
logger._log = lambda x: None
return logger
def setup_gpus(gpus: Union[str, int]) -> int:
""" A temporary fix for pytorch-lighting 1.3.x """
gpus = str(gpus)
gpu_ids = []
if ',' not in gpus:
n_gpus = int(gpus)
return n_gpus if n_gpus != -1 else torch.cuda.device_count()
else:
gpu_ids = [i.strip() for i in gpus.split(',') if i != '']
# setup environment variables
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
if visible_devices is None:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = ','.join(str(i) for i in gpu_ids)
visible_devices = os.getenv('CUDA_VISIBLE_DEVICES')
logger.warning(f'[Temporary Fix] manually set CUDA_VISIBLE_DEVICES when specifying gpus to use: {visible_devices}')
else:
logger.warning('[Temporary Fix] CUDA_VISIBLE_DEVICES already set by user or the main process.')
return len(gpu_ids)
def flattenList(x):
return list(chain(*x))
@contextlib.contextmanager
def tqdm_joblib(tqdm_object):
"""Context manager to patch joblib to report into tqdm progress bar given as argument
Usage:
with tqdm_joblib(tqdm(desc="My calculation", total=10)) as progress_bar:
Parallel(n_jobs=16)(delayed(sqrt)(i**2) for i in range(10))
When iterating over a generator, directly use of tqdm is also a solutin (but monitor the task queuing, instead of finishing)
ret_vals = Parallel(n_jobs=args.world_size)(
delayed(lambda x: _compute_cov_score(pid, *x))(param)
for param in tqdm(combinations(image_ids, 2),
desc=f'Computing cov_score of [{pid}]',
total=len(image_ids)*(len(image_ids)-1)/2))
Src: https://stackoverflow.com/a/58936697
"""
class TqdmBatchCompletionCallback(joblib.parallel.BatchCompletionCallBack):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __call__(self, *args, **kwargs):
tqdm_object.update(n=self.batch_size)
return super().__call__(*args, **kwargs)
old_batch_callback = joblib.parallel.BatchCompletionCallBack
joblib.parallel.BatchCompletionCallBack = TqdmBatchCompletionCallback
try:
yield tqdm_object
finally:
joblib.parallel.BatchCompletionCallBack = old_batch_callback
tqdm_object.close()
| 3,512 | 33.441176 | 128 | py |
3DG-STFM | 3DG-STFM-master/src/utils/augment.py | import albumentations as A
class DarkAug(object):
"""
Extreme dark augmentation aiming at Aachen Day-Night
"""
def __init__(self) -> None:
self.augmentor = A.Compose([
A.RandomBrightnessContrast(p=0.75, brightness_limit=(-0.6, 0.0), contrast_limit=(-0.5, 0.3)),
A.Blur(p=0.1, blur_limit=(3, 9)),
A.MotionBlur(p=0.2, blur_limit=(3, 25)),
A.RandomGamma(p=0.1, gamma_limit=(15, 65)),
A.HueSaturationValue(p=0.1, val_shift_limit=(-100, -40))
], p=0.75)
def __call__(self, x):
return self.augmentor(image=x)['image']
class MobileAug(object):
"""
Random augmentations aiming at images of mobile/handhold devices.
"""
def __init__(self):
self.augmentor = A.Compose([
A.MotionBlur(p=0.25),
A.ColorJitter(p=0.5),
A.RandomRain(p=0.1), # random occlusion
A.RandomSunFlare(p=0.1),
A.JpegCompression(p=0.25),
A.ISONoise(p=0.25)
], p=1.0)
def __call__(self, x):
return self.augmentor(image=x)['image']
def build_augmentor(method=None, **kwargs):
if method is not None:
raise NotImplementedError('Using of augmentation functions are not supported yet!')
if method == 'dark':
return DarkAug()
elif method == 'mobile':
return MobileAug()
elif method is None:
return None
else:
raise ValueError(f'Invalid augmentation method: {method}')
if __name__ == '__main__':
augmentor = build_augmentor('FDA')
| 1,578 | 27.196429 | 105 | py |
3DG-STFM | 3DG-STFM-master/src/utils/dataset.py | import io
from loguru import logger
import cv2
import numpy as np
import h5py
import torch
from numpy.linalg import inv
import os
try:
# for internel use only
from .client import MEGADEPTH_CLIENT, SCANNET_CLIENT
except Exception:
MEGADEPTH_CLIENT = SCANNET_CLIENT = None
# --- DATA IO ---
def load_array_from_s3(
path, client, cv_type,
use_h5py=False,
):
byte_str = client.Get(path)
try:
if not use_h5py:
raw_array = np.fromstring(byte_str, np.uint8)
data = cv2.imdecode(raw_array, cv_type)
else:
f = io.BytesIO(byte_str)
data = np.array(h5py.File(f, 'r')['/depth'])
except Exception as ex:
print(f"==> Data loading failure: {path}")
raise ex
assert data is not None
return data
def imread_gray(path, augment_fn=None, client=SCANNET_CLIENT):
cv_type = cv2.IMREAD_GRAYSCALE if augment_fn is None \
else cv2.IMREAD_COLOR
if str(path).startswith('s3://'):
image = load_array_from_s3(str(path), client, cv_type)
else:
image = cv2.imread(str(path), cv_type)
#image =image*12.
#image=image/float(image.max())
#image*=255.
if augment_fn is not None:
image = cv2.imread(str(path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = augment_fn(image)
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
return image # (h, w)
def get_resized_wh(w, h, resize=None):
if resize is not None: # resize the longer edge
scale = resize / max(h, w)
w_new, h_new = int(round(w*scale)), int(round(h*scale))
else:
w_new, h_new = w, h
return w_new, h_new
def get_divisible_wh(w, h, df=None):
if df is not None:
w_new, h_new = map(lambda x: int(x // df * df), [w, h])
else:
w_new, h_new = w, h
return w_new, h_new
def pad_bottom_right(inp, pad_size, ret_mask=False):
assert isinstance(pad_size, int) and pad_size >= max(inp.shape[-2:]), f"{pad_size} < {max(inp.shape[-2:])}"
mask = None
if inp.ndim == 2:
padded = np.zeros((pad_size, pad_size), dtype=inp.dtype)
padded[:inp.shape[0], :inp.shape[1]] = inp
if ret_mask:
mask = np.zeros((pad_size, pad_size), dtype=bool)
mask[:inp.shape[0], :inp.shape[1]] = True
elif inp.ndim == 3:
padded = np.zeros((inp.shape[0], pad_size, pad_size), dtype=inp.dtype)
padded[:, :inp.shape[1], :inp.shape[2]] = inp
if ret_mask:
mask = np.zeros((inp.shape[0], pad_size, pad_size), dtype=bool)
mask[:, :inp.shape[1], :inp.shape[2]] = True
else:
raise NotImplementedError()
return padded, mask
# --- MEGADEPTH ---
def read_megadepth_gray(path, resize=None, df=None, padding=False, augment_fn=None):
"""
Args:
resize (int, optional): the longer edge of resized images. None for no resize.
padding (bool): If set to 'True', zero-pad resized images to squared size.
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
# read image
image = imread_gray(path, augment_fn, client=MEGADEPTH_CLIENT)
# resize image
w, h = image.shape[1], image.shape[0]
w_new, h_new = get_resized_wh(w, h, resize)
w_new, h_new = get_divisible_wh(w_new, h_new, df)
image = cv2.resize(image, (w_new, h_new))
scale = torch.tensor([w/w_new, h/h_new], dtype=torch.float)
if padding: # padding
pad_to = max(h_new, w_new)
image, mask = pad_bottom_right(image, pad_to, ret_mask=True)
else:
mask = None
image = torch.from_numpy(image).float()[None] / 255 # (h, w) -> (1, h, w) and normalized
mask = torch.from_numpy(mask)
return image, mask, scale
def read_megadepth_rgb(path, resize=None, df=None, padding=False, augment_fn=None):
"""
Args:
resize (int, optional): the longer edge of resized images. None for no resize.
padding (bool): If set to 'True', zero-pad resized images to squared size.
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
image = cv2.imread(str(path), cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#image = cv2.resize(image, resize)
image = np.ascontiguousarray(image)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
image= image.astype(float)
image[:, :, 0] = (image[:, :, 0]/255.-mean[0]) / std[0]
image[:, :, 1] = (image[:, :, 1]/255.-mean[1]) / std[1]
image[:, :, 2] = (image[:, :, 2]/255.-mean[2]) / std[2]
# (h, w) -> (1, h, w) and normalized
#image = torch.from_numpy(image).float()[None]
#return image
# read image
#image = imread_gray(path, augment_fn, client=MEGADEPTH_CLIENT)
# resize image
w, h = image.shape[1], image.shape[0]
w_new, h_new = get_resized_wh(w, h, resize)
w_new, h_new = get_divisible_wh(w_new, h_new, df)
image = cv2.resize(image, (w_new, h_new))
scale = torch.tensor([w/w_new, h/h_new], dtype=torch.float)
image = image.transpose(2, 0, 1)
if padding: # padding
pad_to = max(h_new, w_new)
image, mask = pad_bottom_right(image, pad_to, ret_mask=True)
else:
mask = None
image = torch.from_numpy(image).float() # (3, h, w) and normalized
mask = torch.from_numpy(mask)
return image, mask, scale
def read_megadepth_depth(path, pad_to=None):
if str(path).startswith('s3://'):
depth = load_array_from_s3(path, MEGADEPTH_CLIENT, None, use_h5py=True)
else:
depth = np.array(h5py.File(path, 'r')['depth'])
if pad_to is not None:
depth, _ = pad_bottom_right(depth, pad_to, ret_mask=False)
depth = torch.from_numpy(depth).float() # (h, w)
return depth
# --- ScanNet ---
def read_scannet_gray(path, resize=(640, 480), augment_fn=None):
"""
Args:
resize (tuple): align image to depthmap, in (w, h).
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
# read and resize image
image = imread_gray(path, augment_fn)
image = cv2.resize(image, resize)
# (h, w) -> (1, h, w) and normalized
image = torch.from_numpy(image).float()[None] / 255
return image
def read_scannet_rgb(path, resize=(640, 480), augment_fn=None):
"""
Args:
resize (tuple): align image to depthmap, in (w, h).
augment_fn (callable, optional): augments images with pre-defined visual effects
Returns:
image (torch.tensor): (1, h, w)
mask (torch.tensor): (h, w)
scale (torch.tensor): [w/w_new, h/h_new]
"""
# read and resize image
image = cv2.imread(str(path), cv2.IMREAD_COLOR)
#if os.path.exists(str(path)):
# print('yes')
#if os.path.exists(path):
# print('no')
#print(str(path))
#print(image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = cv2.resize(image, resize)
image = np.ascontiguousarray(image)
mean = (0.485, 0.456, 0.406)
std = (0.229, 0.224, 0.225)
image= image.astype(float)
image[:, :, 0] = (image[:, :, 0]/255.-mean[0]) / std[0]
image[:, :, 1] = (image[:, :, 1]/255.-mean[1]) / std[1]
image[:, :, 2] = (image[:, :, 2]/255.-mean[2]) / std[2]
# (h, w) -> (1, h, w) and normalized
image = torch.from_numpy(image).float()[None]
return image
def read_scannet_depth(path):
if str(path).startswith('s3://'):
depth = load_array_from_s3(str(path), SCANNET_CLIENT, cv2.IMREAD_UNCHANGED)
else:
depth = cv2.imread(str(path), cv2.IMREAD_UNCHANGED)
depth = depth / 1000
depth = torch.from_numpy(depth).float() # (h, w)
return depth
def read_scannet_pose(path):
""" Read ScanNet's Camera2World pose and transform it to World2Camera.
Returns:
pose_w2c (np.ndarray): (4, 4)
"""
cam2world = np.loadtxt(path, delimiter=' ')
world2cam = inv(cam2world)
return world2cam
def read_scannet_intrinsic(path):
""" Read ScanNet's intrinsic matrix and return the 3x3 matrix.
"""
intrinsic = np.loadtxt(path, delimiter=' ')
return intrinsic[:-1, :-1]
| 8,671 | 31.479401 | 111 | py |
3DG-STFM | 3DG-STFM-master/src/utils/metrics.py | import torch
import cv2
import numpy as np
from collections import OrderedDict
from loguru import logger
from kornia.geometry.epipolar import numeric
from kornia.geometry.conversions import convert_points_to_homogeneous
import random
# --- METRICS ---
def relative_pose_error(T_0to1, R, t, ignore_gt_t_thr=0.0):
# angle error between 2 vectors
t_gt = T_0to1[:3, 3]
n = np.linalg.norm(t) * np.linalg.norm(t_gt)
t_err = np.rad2deg(np.arccos(np.clip(np.dot(t, t_gt) / n, -1.0, 1.0)))
t_err = np.minimum(t_err, 180 - t_err) # handle E ambiguity
if np.linalg.norm(t_gt) < ignore_gt_t_thr: # pure rotation is challenging
t_err = 0
# angle error between 2 rotation matrices
R_gt = T_0to1[:3, :3]
cos = (np.trace(np.dot(R.T, R_gt)) - 1) / 2
cos = np.clip(cos, -1., 1.) # handle numercial errors
R_err = np.rad2deg(np.abs(np.arccos(cos)))
return t_err, R_err
def symmetric_epipolar_distance(pts0, pts1, E, K0, K1):
"""Squared symmetric epipolar distance.
This can be seen as a biased estimation of the reprojection error.
Args:
pts0 (torch.Tensor): [N, 2]
E (torch.Tensor): [3, 3]
"""
pts0 = (pts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
pts1 = (pts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
pts0 = convert_points_to_homogeneous(pts0)
pts1 = convert_points_to_homogeneous(pts1)
Ep0 = pts0 @ E.T # [N, 3]
p1Ep0 = torch.sum(pts1 * Ep0, -1) # [N,]
Etp1 = pts1 @ E # [N, 3]
d = p1Ep0**2 * (1.0 / (Ep0[:, 0]**2 + Ep0[:, 1]**2) + 1.0 / (Etp1[:, 0]**2 + Etp1[:, 1]**2)) # N
return d
def compute_symmetrical_epipolar_errors(data):
"""
Update:
data (dict):{"epi_errs": [M]}
"""
Tx = numeric.cross_product_matrix(data['T_0to1'][:, :3, 3])
E_mat = Tx @ data['T_0to1'][:, :3, :3]
m_bids = data['m_bids']
pts0 = data['mkpts0_f']
pts1 = data['mkpts1_f']
epi_errs = []
for bs in range(Tx.size(0)):
mask = m_bids == bs
epi_errs.append(
symmetric_epipolar_distance(pts0[mask], pts1[mask], E_mat[bs], data['K0'][bs], data['K1'][bs]))
epi_errs = torch.cat(epi_errs, dim=0)
data.update({'epi_errs': epi_errs})
def estimate_homo(kpts0, kpts1, M, thresh, conf=0.99999):
if len(kpts0) < 5:
return None
# normalize keypoints
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
# normalize ransac threshold
ransac_thr = thresh / np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
# compute pose with cv2
E, mask = cv2.findEssentialMat(
kpts0, kpts1, np.eye(3), threshold=ransac_thr, prob=conf, method=cv2.RANSAC)
if E is None:
print("\nE is None while trying to recover pose.\n")
return None
# recover pose from E
best_num_inliers = 0
ret = None
for _E in np.split(E, len(E) / 3):
n, R, t, _ = cv2.recoverPose(_E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)
if n > best_num_inliers:
ret = (R, t[:, 0], mask.ravel() > 0)
best_num_inliers = n
return ret
def estimate_pose(kpts0, kpts1, K0, K1, thresh, conf=0.99999):
if len(kpts0) < 5:
return None
# normalize keypoints
kpts0 = (kpts0 - K0[[0, 1], [2, 2]][None]) / K0[[0, 1], [0, 1]][None]
kpts1 = (kpts1 - K1[[0, 1], [2, 2]][None]) / K1[[0, 1], [0, 1]][None]
# normalize ransac threshold
ransac_thr = thresh / np.mean([K0[0, 0], K1[1, 1], K0[0, 0], K1[1, 1]])
# compute pose with cv2
E, mask = cv2.findEssentialMat(
kpts0, kpts1, np.eye(3), threshold=ransac_thr, prob=conf, method=cv2.RANSAC)
#E, mask = cv2.findEssentialMat(
# kpts0, kpts1, np.eye(3), prob=conf, method=None)
#E, mask = cv2.findEssentialMat(
# kpts0, kpts1, np.eye(3), prob=conf, method=None)
if E is None:
print("\nE is None while trying to recover pose.\n")
return None
# recover pose from E
best_num_inliers = 0
ret = None
for _E in np.split(E, len(E) / 3):
n, R, t, _ = cv2.recoverPose(_E, kpts0, kpts1, np.eye(3), 1e9, mask=mask)
if n > best_num_inliers:
ret = (R, t[:, 0], mask.ravel() > 0)
best_num_inliers = n
return ret
def compute_homo_errors(data, config):
"""
Update:
data (dict):{
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'inliers': []})
data.update({'epi_errs': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
pts1 = data['mkpts1_f'].cpu().numpy()
M=data['M']
#print(data)
#K0 = data['K0'].cpu().numpy()
#K1 = data['K1'].cpu().numpy()
#T_0to1 = data['T_0to1'].cpu().numpy()
for bs in range(data['image0'].shape[0]):
mask = m_bids == bs
kpts0 = pts0[mask]
kpts1 = pts1[mask]
M_b = M[bs]
if kpts0.shape[0]==0:
data['inliers'].append(np.array([]).astype(np.bool))
data['epi_errs'].append(np.array([]).astype(np.bool))
else:
kpts0 = kpts0.reshape((1, -1, 2))
kpts0 = cv2.perspectiveTransform(kpts0, M_b.cpu().numpy())
inliers=0
epi_errs = []
for ii,cord in enumerate(kpts0[0]):
diff = cord-kpts1[ii]
if (diff[0]**2+diff[1]**2)<=4:
inliers+=1
epi_errs.append(np.sqrt(diff[0]**2+diff[1]**2))
data['epi_errs'].append(np.array(epi_errs))
data['inliers'].append(inliers)
def filter_based_on_depth(depth0,depth1,coordinates0,coordinates1,K0,K1,T_0to1):
coordinates0=coordinates0[None,...]
coordinates1 = coordinates1[None, ...]
coordinates0 =coordinates0.long()
coordinates1 =coordinates1.long()
kpts0_depth = torch.stack([depth0[coordinates0[0,:, 1], coordinates0[0,:, 0]]], dim=0)
nonzero_mask = (kpts0_depth != 0)*float('inf')
kpts0_h = torch.cat([coordinates0, torch.ones_like(coordinates0[:, :, [0]])], dim=-1) * kpts0_depth[
..., None] # (N, L, 3)
kpts0_cam = K0.inverse() @ kpts0_h.transpose(2, 1) # (N, 3, L)
# Rigid Transform
w_kpts0_cam = T_0to1[:3, :3] @ kpts0_cam + T_0to1[:3, [3]] # (N, 3, L)
w_kpts0_depth_computed = w_kpts0_cam[:, 2, :]
# Project
w_kpts0_h = (K1 @ w_kpts0_cam).transpose(2, 1) # (N, L, 3)
w_kpts0 = w_kpts0_h[:, :, :2] / (w_kpts0_h[:, :, [2]] + 1e-4) # (N, L, 2), +1e-4 to avoid zero depth
# Covisible Check
h, w = depth1.shape[0:2]
covisible_mask = (w_kpts0[:, :, 0] > 0) * (w_kpts0[:, :, 0] < w - 1) * \
(w_kpts0[:, :, 1] > 0) * (w_kpts0[:, :, 1] < h - 1)
w_kpts0_long = w_kpts0.long()
w_kpts0_long[~covisible_mask, :] = 0
w_kpts0_depth = torch.stack([depth1[coordinates1[0, :, 1], coordinates1[0, :, 0]]], dim=0)
# consistent_mask = ((w_kpts0_depth - w_kpts0_depth_computed) / w_kpts0_depth).abs() < 0.2
#diff = (abs(w_kpts0_depth - w_kpts0_depth_computed)/(w_kpts0_depth+1e-4))
diff = abs((w_kpts0_depth - w_kpts0_depth_computed)/(w_kpts0_depth+1e-4))
#diff *= nonzero_mask
indice = torch.where(diff>0.15)
#print(diff.size())
#print(len(indice[1]))
new_cor0 = coordinates0[indice[0],indice[1]]
new_cor1 = coordinates1[indice[0],indice[1]]
return indice[1]#new_cor0,new_cor1
def filter_depth_inconsist_point(data, config):
"""
Update:
data (dict):{
"R_errs" List[float]: [N]
"t_errs" List[float]: [N]
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'R_errs': [], 't_errs': [], 'inliers': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu()#.numpy()
pts1 = data['mkpts1_f'].cpu()#.numpy()
depth0 = data['depth0'].cpu()#.numpy()
depth1 = data['depth1'].cpu()#.numpy()# shape (1,480,640)
K0 = data['K0'].cpu()#.numpy()
K1 = data['K1'].cpu()#.numpy()
T_0to1 = data['T_0to1'].cpu()#.numpy()
for bs in range(K0.shape[0]):
mask = m_bids == bs
#ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
ind=filter_based_on_depth(depth0[bs],depth1[bs],pts0, pts1, K0[bs], K1[bs],T_0to1[bs])
m_bids_new = data['m_bids']
m_bids_new[ind]=-1
data.update({'m_bids': m_bids_new.cuda()})
#data.update({'mkpts0_f': new_cor0.cuda(), 'mkpts1_f': new_cor1.cuda(),'m_bids': m_bids_new.cuda()})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
mask = m_bids == bs
pts1 = data['mkpts1_f'].cpu().numpy()
K0 = data['K0'].cpu().numpy()
K1 = data['K1'].cpu().numpy()
T_0to1 = data['T_0to1'].cpu().numpy()
ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
if ret is None:
data['R_errs'].append(np.inf)
data['t_errs'].append(np.inf)
data['inliers'].append(np.array([]).astype(np.bool))
else:
R, t, inliers = ret
t_err, R_err = relative_pose_error(T_0to1[bs], R, t, ignore_gt_t_thr=0.0)
data['R_errs'].append(R_err)
data['t_errs'].append(t_err)
data['inliers'].append(inliers)
def filter_based_random_sample(depth0,depth1,pts0, pts1):
max_depth = depth0.max()
h, w = depth0.shape[0:2]
scale =8
h = h//8
w = w//8
uni_pb = 1./float(h*w*10000)
total = pts0.size(0)
rest = 1 - uni_pb*total
set_ind = np.arange(total+1)
pb_ind = [uni_pb]*total+[rest]
np.random.seed()
ind = np.random.choice(set_ind,size = (int(total/5)),replace=False, p = pb_ind)
dust_bin = np.where(ind==total)[0]
try:
ind =list(ind)
ind.pop(dust_bin[0])
return ind
except:
return ind
def filter_unsampled_point(data, config):
"""
Update:
data (dict):{
"R_errs" List[float]: [N]
"t_errs" List[float]: [N]
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'R_errs': [], 't_errs': [], 'inliers': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu()#.numpy()
pts1 = data['mkpts1_f'].cpu()#.numpy()
depth0 = data['depth0'].cpu()#.numpy()
depth1 = data['depth1'].cpu()#.numpy()# shape (1,480,640)
K0 = data['K0'].cpu()#.numpy()
K1 = data['K1'].cpu()#.numpy()
T_0to1 = data['T_0to1'].cpu()#.numpy()
for bs in range(K0.shape[0]):
mask = m_bids == bs
#ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
ind=filter_based_random_sample(depth0[bs],depth1[bs],pts0, pts1)
m_bids_new = data['m_bids']
m_bids_new[ind]=-1
data.update({'m_bids': m_bids_new.cuda()})
#data.update({'mkpts0_f': new_cor0.cuda(), 'mkpts1_f': new_cor1.cuda(),'m_bids': m_bids_new.cuda()})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
mask = m_bids == bs
pts1 = data['mkpts1_f'].cpu().numpy()
K0 = data['K0'].cpu().numpy()
K1 = data['K1'].cpu().numpy()
T_0to1 = data['T_0to1'].cpu().numpy()
ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
if ret is None:
data['R_errs'].append(np.inf)
data['t_errs'].append(np.inf)
data['inliers'].append(np.array([]).astype(np.bool))
else:
R, t, inliers = ret
t_err, R_err = relative_pose_error(T_0to1[bs], R, t, ignore_gt_t_thr=0.0)
data['R_errs'].append(R_err)
data['t_errs'].append(t_err)
data['inliers'].append(inliers)
def compute_pose_errors(data, config):
"""
Update:
data (dict):{
"R_errs" List[float]: [N]
"t_errs" List[float]: [N]
"inliers" List[np.ndarray]: [N]
}
"""
pixel_thr = config.TRAINER.RANSAC_PIXEL_THR # 0.5
conf = config.TRAINER.RANSAC_CONF # 0.99999
data.update({'R_errs': [], 't_errs': [], 'inliers': []})
m_bids = data['m_bids'].cpu().numpy()
pts0 = data['mkpts0_f'].cpu().numpy()
pts1 = data['mkpts1_f'].cpu().numpy()
K0 = data['K0'].cpu().numpy()
K1 = data['K1'].cpu().numpy()
T_0to1 = data['T_0to1'].cpu().numpy()
for bs in range(K0.shape[0]):
mask = m_bids == bs
ret = estimate_pose(pts0[mask], pts1[mask], K0[bs], K1[bs], pixel_thr, conf=conf)
if ret is None:
data['R_errs'].append(np.inf)
data['t_errs'].append(np.inf)
data['inliers'].append(np.array([]).astype(np.bool))
else:
R, t, inliers = ret
t_err, R_err = relative_pose_error(T_0to1[bs], R, t, ignore_gt_t_thr=0.0)
data['R_errs'].append(R_err)
data['t_errs'].append(t_err)
data['inliers'].append(inliers)
# --- METRIC AGGREGATION ---
def error_auc(errors, thresholds):
"""
Args:
errors (list): [N,]
thresholds (list)
"""
errors = [0] + sorted(list(errors))
recall = list(np.linspace(0, 1, len(errors)))
aucs = []
thresholds = [5, 10, 20]
for thr in thresholds:
last_index = np.searchsorted(errors, thr)
y = recall[:last_index] + [recall[last_index-1]]
x = errors[:last_index] + [thr]
aucs.append(np.trapz(y, x) / thr)
return {f'auc@{t}': auc for t, auc in zip(thresholds, aucs)}
def epidist_prec(errors, thresholds, ret_dict=False):
precs = []
for thr in thresholds:
prec_ = []
for errs in errors:
correct_mask = errs < thr
prec_.append(np.mean(correct_mask) if len(correct_mask) > 0 else 0)
precs.append(np.mean(prec_) if len(prec_) > 0 else 0)
if ret_dict:
return {f'prec@{t:.0e}': prec for t, prec in zip(thresholds, precs)}
else:
return precs
def aggregate_metrics(metrics, epi_err_thr=5e-4):
""" Aggregate metrics for the whole dataset:
(This method should be called once per dataset)
1. AUC of the pose error (angular) at the threshold [5, 10, 20]
2. Mean matching precision at the threshold 5e-4(ScanNet), 1e-4(MegaDepth)
"""
# filter duplicates
unq_ids = OrderedDict((iden, id) for id, iden in enumerate(metrics['identifiers']))
unq_ids = list(unq_ids.values())
logger.info(f'Aggregating metrics over {len(unq_ids)} unique items...')
# pose auc
angular_thresholds = [5, 10, 20]
pose_errors = np.max(np.stack([metrics['R_errs'], metrics['t_errs']]), axis=0)[unq_ids]
aucs = error_auc(pose_errors, angular_thresholds) # (auc@5, auc@10, auc@20)
# matching precision
dist_thresholds = [epi_err_thr]
precs = epidist_prec(np.array(metrics['epi_errs'], dtype=object)[unq_ids], dist_thresholds, True) # (prec@err_thr)
return {**aucs, **precs}
def aggregate_metrics_homo(metrics, epi_err_thr=5e-4):
""" Aggregate metrics for the whole dataset:
(This method should be called once per dataset)
1. AUC of the pose error (angular) at the threshold [5, 10, 20]
2. Mean matching precision at the threshold 5e-4(ScanNet), 1e-4(MegaDepth)
"""
# filter duplicates
#unq_ids = OrderedDict((iden, id) for id, iden in enumerate(metrics['identifiers']))
#unq_ids = list(unq_ids.values())
#logger.info(f'Aggregating metrics over {len(unq_ids)} unique items...')
# pose auc
#angular_thresholds = [5, 10, 20]
#pose_errors = np.max(np.stack([metrics['R_errs'], metrics['t_errs']]), axis=0)#[unq_ids]
#aucs = error_auc(pose_errors, angular_thresholds) # (auc@5, auc@10, auc@20)
# matching precision
dist_thresholds = [epi_err_thr]
precs = epidist_prec(np.array(metrics['epi_errs'], dtype=object), dist_thresholds, True) # (prec@err_thr)
return { **precs} | 16,290 | 35.042035 | 119 | py |
3DG-STFM | 3DG-STFM-master/src/utils/profiler.py | import torch
from pytorch_lightning.profiler import SimpleProfiler, PassThroughProfiler
from contextlib import contextmanager
from pytorch_lightning.utilities import rank_zero_only
class InferenceProfiler(SimpleProfiler):
"""
This profiler records duration of actions with cuda.synchronize()
Use this in test time.
"""
def __init__(self):
super().__init__()
self.start = rank_zero_only(self.start)
self.stop = rank_zero_only(self.stop)
self.summary = rank_zero_only(self.summary)
@contextmanager
def profile(self, action_name: str) -> None:
try:
torch.cuda.synchronize()
self.start(action_name)
yield action_name
finally:
torch.cuda.synchronize()
self.stop(action_name)
def build_profiler(name):
if name == 'inference':
return InferenceProfiler()
elif name == 'pytorch':
from pytorch_lightning.profiler import PyTorchProfiler
return PyTorchProfiler(use_cuda=True, profile_memory=True, row_limit=100)
elif name is None:
return PassThroughProfiler()
else:
raise ValueError(f'Invalid profiler: {name}')
| 1,199 | 29 | 81 | py |
3DG-STFM | 3DG-STFM-master/src/losses/loftr_loss.py | from loguru import logger
import torch
import torch.nn as nn
import torch.nn.functional as F
class LoFTRLoss(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config # config under the global namespace
self.loss_config = config['loftr']['loss']
self.match_type = self.config['loftr']['match_coarse']['match_type']
self.sparse_spvs = self.config['loftr']['match_coarse']['sparse_spvs']
# coarse-level
self.correct_thr = self.loss_config['fine_correct_thr']
self.c_pos_w = self.loss_config['pos_weight']
self.c_neg_w = self.loss_config['neg_weight']
# fine-level
self.fine_type = self.loss_config['fine_type']
def compute_coarse_loss(self, conf, conf_gt, weight=None):
""" Point-wise CE / Focal Loss with 0 / 1 confidence as gt.
Args:
conf (torch.Tensor): (N, HW0, HW1) / (N, HW0+1, HW1+1)
conf_gt (torch.Tensor): (N, HW0, HW1)
weight (torch.Tensor): (N, HW0, HW1)
"""
pos_mask, neg_mask = conf_gt == 1, conf_gt == 0
c_pos_w, c_neg_w = self.c_pos_w, self.c_neg_w
# corner case: no gt coarse-level match at all
if not pos_mask.any(): # assign a wrong gt
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_neg_w = 0.
if self.loss_config['coarse_type'] == 'cross_entropy':
assert not self.sparse_spvs, 'Sparse Supervision for cross-entropy not implemented!'
conf = torch.clamp(conf, 1e-6, 1-1e-6)
loss_pos = - torch.log(conf[pos_mask])
loss_neg = - torch.log(1 - conf[neg_mask])
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean()
elif self.loss_config['coarse_type'] == 'focal':
conf = torch.clamp(conf, 1e-6, 1-1e-6)
alpha = self.loss_config['focal_alpha']
gamma = self.loss_config['focal_gamma']
if self.sparse_spvs:
pos_conf = conf[:, :-1, :-1][pos_mask] \
if self.match_type == 'sinkhorn' \
else conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
# calculate losses for negative samples
if self.match_type == 'sinkhorn':
neg0, neg1 = conf_gt.sum(-1) == 0, conf_gt.sum(1) == 0
neg_conf = torch.cat([conf[:, :-1, -1][neg0], conf[:, -1, :-1][neg1]], 0)
loss_neg = - alpha * torch.pow(1 - neg_conf, gamma) * neg_conf.log()
else:
# These is no dustbin for dual_softmax, so we left unmatchable patches without supervision.
# we could also add 'pseudo negtive-samples'
pass
# handle loss weights
if weight is not None:
# Different from dense-spvs, the loss w.r.t. padded regions aren't directly zeroed out,
# but only through manually setting corresponding regions in sim_matrix to '-inf'.
loss_pos = loss_pos * weight[pos_mask]
if self.match_type == 'sinkhorn':
neg_w0 = (weight.sum(-1) != 0)[neg0]
neg_w1 = (weight.sum(1) != 0)[neg1]
neg_mask = torch.cat([neg_w0, neg_w1], 0)
loss_neg = loss_neg[neg_mask]
loss = c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean() \
if self.match_type == 'sinkhorn' \
else c_pos_w * loss_pos.mean()
return loss
# positive and negative elements occupy similar propotions. => more balanced loss weights needed
else: # dense supervision (in the case of match_type=='sinkhorn', the dustbin is not supervised.)
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean()
# each negative element occupy a smaller propotion than positive elements. => higher negative loss weight needed
else:
raise ValueError('Unknown coarse loss: {type}'.format(type=self.loss_config['coarse_type']))
def compute_fine_loss(self, expec_f, expec_f_gt):
if self.fine_type == 'l2_with_std':
return self._compute_fine_loss_l2_std(expec_f, expec_f_gt)
elif self.fine_type == 'l2':
return self._compute_fine_loss_l2(expec_f, expec_f_gt)
else:
raise NotImplementedError()
def _compute_fine_loss_l2(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 2] <x, y>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask]) ** 2).sum(-1)
return offset_l2.mean()
def _compute_fine_loss_l2_std(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 3] <x, y, std>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
# correct_mask tells you which pair to compute fine-loss
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
# use std as weight that measures uncertainty
std = expec_f[:, 2]
inverse_std = 1. / torch.clamp(std, min=1e-10)
weight = (inverse_std / torch.mean(inverse_std)).detach() # avoid minizing loss through increase std
# corner case: no correct coarse match found
if not correct_mask.any():
if self.training: # this seldomly happen during training, since we pad prediction with gt
# sometimes there is not coarse-level gt at all.
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
weight[0] = 0.
else:
return None
# l2 loss with std
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask, :2]) ** 2).sum(-1)
loss = (offset_l2 * weight[correct_mask]).mean()
return loss
@torch.no_grad()
def compute_c_weight(self, data):
""" compute element-wise weights for computing coarse-level loss. """
if 'mask0' in data:
c_weight = (data['mask0'].flatten(-2)[..., None] * data['mask1'].flatten(-2)[:, None]).float()
else:
c_weight = None
return c_weight
def forward(self, data):
"""
Update:
data (dict): update{
'loss': [1] the reduced loss across a batch,
'loss_scalars' (dict): loss scalars for tensorboard_record
}
"""
loss_scalars = {}
# 0. compute element-wise loss weight
c_weight = self.compute_c_weight(data)
# 1. coarse-level loss
loss_c = self.compute_coarse_loss(
data['conf_matrix_with_bin'] if self.sparse_spvs and self.match_type == 'sinkhorn' \
else data['conf_matrix'],
data['conf_matrix_gt'],
weight=c_weight)
loss = loss_c * self.loss_config['coarse_weight']
loss_scalars.update({"loss_c": loss_c.clone().detach().cpu()})
# 2. fine-level loss
loss_f = self.compute_fine_loss(data['expec_f'], data['expec_f_gt'])
if loss_f is not None:
loss += loss_f * self.loss_config['fine_weight']
loss_scalars.update({"loss_f": loss_f.clone().detach().cpu()})
else:
assert self.training is False
loss_scalars.update({'loss_f': torch.tensor(1.)}) # 1 is the upper bound
loss_scalars.update({'loss': loss.clone().detach().cpu()})
data.update({"loss": loss, "loss_scalars": loss_scalars})
class LoFTRLoss_t_s(nn.Module):
## Student teacher learning loss
def __init__(self, config):
super().__init__()
self.config = config # config under the global namespace
self.loss_config = config['loftr']['loss']
self.match_type = self.config['loftr']['match_coarse']['match_type']
self.sparse_spvs = self.config['loftr']['match_coarse']['sparse_spvs']
# coarse-level
self.correct_thr = self.loss_config['fine_correct_thr']
self.c_pos_w = self.loss_config['pos_weight']
self.c_neg_w = self.loss_config['neg_weight']
# fine-level
self.fine_type = self.loss_config['fine_type']
def compute_coarse_loss(self, conf, conf_gt, sim_s,sim_t,weight=None):
""" Point-wise CE / Focal Loss with 0 / 1 confidence as gt.
Args:
conf (torch.Tensor): (N, HW0, HW1) / (N, HW0+1, HW1+1)
conf_gt (torch.Tensor): (N, HW0, HW1)
weight (torch.Tensor): (N, HW0, HW1)
"""
T =2.
loss_fea = nn.KLDivLoss(reduction='none')
N = sim_s.size(0)
c_loss1 = loss_fea(F.log_softmax(sim_s / T, dim=2), F.softmax(sim_t / T, dim=2)) * T * T
c_loss2 = loss_fea(F.log_softmax(sim_s / T, dim=1), F.softmax(sim_t / T, dim=1)) * T * T
pos_mask, neg_mask = conf_gt == 1, conf_gt == 0
c_pos_w, c_neg_w = self.c_pos_w, self.c_neg_w
# corner case: no gt coarse-level match at all
if not pos_mask.any(): # assign a wrong gt
pos_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_pos_w = 0.
if not neg_mask.any():
neg_mask[0, 0, 0] = True
if weight is not None:
weight[0, 0, 0] = 0.
c_neg_w = 0.
gamma = self.loss_config['focal_gamma']
alpha = self.loss_config['focal_alpha']
loss_pos_1 = torch.pow(1 - conf[pos_mask], gamma) * c_loss1[pos_mask]
loss_neg_1 = torch.pow(conf[neg_mask], gamma) * (1 - c_loss1[neg_mask])
loss_pos_2 = torch.pow(1 - conf[pos_mask], gamma) * c_loss2[pos_mask]
loss_neg_2 = torch.pow(conf[neg_mask], gamma) * (1 - c_loss2[neg_mask])
c_loss_kd = (loss_pos_1.mean() +loss_neg_1.mean() +loss_pos_2.mean() +loss_neg_2.mean())/2.*alpha*16.#hard code, modified in the future alpha=0.25, total weights=0.25*16=4
if self.loss_config['coarse_type'] == 'cross_entropy':
assert not self.sparse_spvs, 'Sparse Supervision for cross-entropy not implemented!'
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
loss_pos = - torch.log(conf[pos_mask])
loss_neg = - torch.log(1 - conf[neg_mask])
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return (c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean())*0.5+0.5*c_loss_kd
elif self.loss_config['coarse_type'] == 'focal':
conf = torch.clamp(conf, 1e-6, 1 - 1e-6)
alpha = self.loss_config['focal_alpha']
gamma = self.loss_config['focal_gamma']
if self.sparse_spvs:
pos_conf = conf[:, :-1, :-1][pos_mask] \
if self.match_type == 'sinkhorn' \
else conf[pos_mask]
loss_pos = - alpha * torch.pow(1 - pos_conf, gamma) * pos_conf.log()
# calculate losses for negative samples
if self.match_type == 'sinkhorn':
neg0, neg1 = conf_gt.sum(-1) == 0, conf_gt.sum(1) == 0
neg_conf = torch.cat([conf[:, :-1, -1][neg0], conf[:, -1, :-1][neg1]], 0)
loss_neg = - alpha * torch.pow(1 - neg_conf, gamma) * neg_conf.log()
else:
# These is no dustbin for dual_softmax, so we left unmatchable patches without supervision.
# we could also add 'pseudo negtive-samples'
pass
# handle loss weights
if weight is not None:
# Different from dense-spvs, the loss w.r.t. padded regions aren't directly zeroed out,
# but only through manually setting corresponding regions in sim_matrix to '-inf'.
loss_pos = loss_pos * weight[pos_mask]
if self.match_type == 'sinkhorn':
neg_w0 = (weight.sum(-1) != 0)[neg0]
neg_w1 = (weight.sum(1) != 0)[neg1]
neg_mask = torch.cat([neg_w0, neg_w1], 0)
loss_neg = loss_neg[neg_mask]
loss = c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean() \
if self.match_type == 'sinkhorn' \
else c_pos_w * loss_pos.mean()
return loss*0.5+0.5*c_loss_kd
# positive and negative elements occupy similar propotions. => more balanced loss weights needed
else: # dense supervision (in the case of match_type=='sinkhorn', the dustbin is not supervised.)
loss_pos = - alpha * torch.pow(1 - conf[pos_mask], gamma) * (conf[pos_mask]).log()
loss_neg = - alpha * torch.pow(conf[neg_mask], gamma) * (1 - conf[neg_mask]).log()
if weight is not None:
loss_pos = loss_pos * weight[pos_mask]
loss_neg = loss_neg * weight[neg_mask]
return (c_pos_w * loss_pos.mean() + c_neg_w * loss_neg.mean()) * 0.5 + 0.5 * c_loss_kd
else:
raise ValueError('Unknown coarse loss: {type}'.format(type=self.loss_config['coarse_type']))
def compute_fine_loss(self, expec_f, expec_f_gt):
if self.fine_type == 'l2_with_std':
return self._compute_fine_loss_l2_std(expec_f, expec_f_gt)
elif self.fine_type == 'l2':
return self._compute_fine_loss_l2(expec_f, expec_f_gt)
else:
raise NotImplementedError()
def _compute_fine_loss_l2(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 2] <x, y>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask]) ** 2).sum(-1)
return offset_l2.mean()
def _compute_fine_loss_t_s_l2(self, expec_f, expec_f_gt,expec_f_t):
"""
Args:
expec_f (torch.Tensor): [M, 2] <x, y>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
offset_l2 = ((expec_f_t[correct_mask,:2] - expec_f[correct_mask,:2]) ** 2).sum(-1)
return offset_l2.mean()
def _compute_fine_loss_t_s_kld(self, expec_f, expec_f_gt,expec_f_t):
"""
Attentive loss
"""
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
if correct_mask.sum() == 0:
if self.training: # this seldomly happen when training, since we pad prediction with gt
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
else:
return None
# use std as weight that measures uncertainty
std1 = expec_f[correct_mask, 2].detach()
std2 = expec_f_t[correct_mask, 2].detach()
tmp =((expec_f_t[correct_mask,:2] - expec_f[correct_mask,:2]) ** 2).sum(-1)
loss = (torch.log(std2)-torch.log(std1))+(std1**2+tmp)/2.*std2**2 -0.5
loss = loss.mean()
return loss
def _compute_fine_loss_l2_std(self, expec_f, expec_f_gt):
"""
Args:
expec_f (torch.Tensor): [M, 3] <x, y, std>
expec_f_gt (torch.Tensor): [M, 2] <x, y>
"""
# correct_mask tells you which pair to compute fine-loss
correct_mask = torch.linalg.norm(expec_f_gt, ord=float('inf'), dim=1) < self.correct_thr
# use std as weight that measures uncertainty
std = expec_f[:, 2]
inverse_std = 1. / torch.clamp(std, min=1e-10)
weight = (inverse_std / torch.mean(inverse_std)).detach() # avoid minizing loss through increase std
# corner case: no correct coarse match found
if not correct_mask.any():
if self.training: # this seldomly happen during training, since we pad prediction with gt
# sometimes there is not coarse-level gt at all.
logger.warning("assign a false supervision to avoid ddp deadlock")
correct_mask[0] = True
weight[0] = 0.
else:
return None
# l2 loss with std
offset_l2 = ((expec_f_gt[correct_mask] - expec_f[correct_mask, :2]) ** 2).sum(-1)
loss = (offset_l2 * weight[correct_mask]).mean()
return loss
@torch.no_grad()
def compute_c_weight(self, data):
""" compute element-wise weights for computing coarse-level loss. """
if 'mask0' in data:
c_weight = (data['mask0'].flatten(-2)[..., None] * data['mask1'].flatten(-2)[:, None]).float()
else:
c_weight = None
return c_weight
def forward(self, data):
"""
Update:
data (dict): update{
'loss': [1] the reduced loss across a batch,
'loss_scalars' (dict): loss scalars for tensorboard_record
}
"""
loss_scalars = {}
# 0. compute element-wise loss weight
c_weight = self.compute_c_weight(data)
# 1. coarse-level loss
loss_c = self.compute_coarse_loss(data['conf_matrix'],data['conf_matrix_gt'],data['sim_matrix'],
data['teacher_matrix'],weight=c_weight)
loss = loss_c * self.loss_config['coarse_weight']
loss_scalars.update({"loss_c": loss_c.clone().detach().cpu()})
# 2. fine-level loss
loss_f= self.compute_fine_loss(data['expec_f'], data['expec_f_gt'])
loss_f_2 = self._compute_fine_loss_t_s_kld(data['expec_f'], data['expec_f_gt'], data['expec_f_t'])
if loss_f is not None and loss_f_2 is not None:
loss_f =loss_f*0.5+0.5*loss_f_2
loss += loss_f * self.loss_config['fine_weight']
loss_scalars.update({"loss_f": loss_f.clone().detach().cpu()})
else:
assert self.training is False
loss_scalars.update({'loss_f': torch.tensor(1.)}) # 1 is the upper bound
loss_scalars.update({'loss': loss.clone().detach().cpu()})
data.update({"loss": loss, "loss_scalars": loss_scalars})
| 20,436 | 46.30787 | 179 | py |
3DG-STFM | 3DG-STFM-master/configs/loftr/indoor/loftr_ds_dense.py | from src.config.default import _CN as cfg
cfg.LOFTR.MATCH_COARSE.MATCH_TYPE = 'dual_softmax'
cfg.LOFTR.MATCH_COARSE.SPARSE_SPVS = False
cfg.TRAINER.MSLR_MILESTONES = [3, 6, 9, 12, 17, 20, 23, 26, 29]
| 203 | 24.5 | 63 | py |
3DG-STFM | 3DG-STFM-master/configs/loftr/indoor/scannet/loftr_ds_eval.py | """ A config only for reproducing the ScanNet evaluation results.
We remove border matches by default, but the originally implemented
`remove_border()` has a bug, leading to only two sides of
all borders are actually removed. However, the [bug fix](https://github.com/zju3dv/LoFTR/commit/e9146c8144dea5f3cbdd98b225f3e147a171c216)
makes the scannet evaluation results worse (auc@10=40.8 => 39.5), which should be
caused by tiny result fluctuation of few image pairs. This config set `BORDER_RM` to 0
to be consistent with the results in our paper.
"""
from src.config.default import _CN as cfg
cfg.LOFTR.MATCH_COARSE.MATCH_TYPE = 'dual_softmax'
cfg.LOFTR.MATCH_COARSE.BORDER_RM = 0
| 685 | 41.875 | 137 | py |
3DG-STFM | 3DG-STFM-master/configs/loftr/outdoor/loftr_ds_dense.py | from src.config.default import _CN as cfg
cfg.LOFTR.MATCH_COARSE.MATCH_TYPE = 'dual_softmax'
cfg.LOFTR.MATCH_COARSE.SPARSE_SPVS = False
cfg.TRAINER.CANONICAL_LR = 8e-3
cfg.TRAINER.WARMUP_STEP = 1875 # 3 epochs
cfg.TRAINER.WARMUP_RATIO = 0.1
cfg.TRAINER.MSLR_MILESTONES = [8, 12, 16, 20, 24]
# pose estimation
cfg.TRAINER.RANSAC_PIXEL_THR = 0.5
cfg.TRAINER.OPTIMIZER = "adamw"
cfg.TRAINER.ADAMW_DECAY = 0.1
cfg.LOFTR.MATCH_COARSE.TRAIN_COARSE_PERCENT = 0.3
| 461 | 26.176471 | 50 | py |
3DG-STFM | 3DG-STFM-master/configs/data/scannet_mini_trainval.py | from configs.data.base import cfg
TRAIN_BASE_PATH = "data/scannet_mini/index"
cfg.DATASET.TRAINVAL_DATA_SOURCE = "ScanNet"
cfg.DATASET.TRAIN_DATA_ROOT = "data/scannet_mini/train"
cfg.DATASET.TRAIN_NPZ_ROOT = f"{TRAIN_BASE_PATH}/scene_data/train"
cfg.DATASET.TRAIN_LIST_PATH = f"{TRAIN_BASE_PATH}/scene_data/train_list/scannet_all.txt"
cfg.DATASET.TRAIN_INTRINSIC_PATH = f"{TRAIN_BASE_PATH}/intrinsics.npz"
TEST_BASE_PATH = "inference/scannet_test_1500"
cfg.DATASET.TEST_DATA_SOURCE = "ScanNet"
cfg.DATASET.VAL_DATA_ROOT = cfg.DATASET.TEST_DATA_ROOT = "data/scannet/test"
cfg.DATASET.VAL_NPZ_ROOT = cfg.DATASET.TEST_NPZ_ROOT = TEST_BASE_PATH
cfg.DATASET.VAL_LIST_PATH = cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scannet_test.txt"
cfg.DATASET.VAL_INTRINSIC_PATH = cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz"
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0 # for both test and val
| 907 | 49.444444 | 101 | py |
3DG-STFM | 3DG-STFM-master/configs/data/base.py | """
The data config will be the last one merged into the main config.
Setups in data configs will override all existed setups!
"""
from yacs.config import CfgNode as CN
_CN = CN()
_CN.DATASET = CN()
_CN.TRAINER = CN()
# training data config
_CN.DATASET.TRAIN_DATA_ROOT = None
_CN.DATASET.TRAIN_POSE_ROOT = None
_CN.DATASET.TRAIN_NPZ_ROOT = None
_CN.DATASET.TRAIN_LIST_PATH = None
_CN.DATASET.TRAIN_INTRINSIC_PATH = None
# validation set config
_CN.DATASET.VAL_DATA_ROOT = None
_CN.DATASET.VAL_POSE_ROOT = None
_CN.DATASET.VAL_NPZ_ROOT = None
_CN.DATASET.VAL_LIST_PATH = None
_CN.DATASET.VAL_INTRINSIC_PATH = None
# testing data config
_CN.DATASET.TEST_DATA_ROOT = None
_CN.DATASET.TEST_POSE_ROOT = None
_CN.DATASET.TEST_NPZ_ROOT = None
_CN.DATASET.TEST_LIST_PATH = None
_CN.DATASET.TEST_INTRINSIC_PATH = None
# dataset config
_CN.DATASET.MIN_OVERLAP_SCORE_TRAIN = 0.4
_CN.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0 # for both test and val
cfg = _CN
| 949 | 25.388889 | 65 | py |
3DG-STFM | 3DG-STFM-master/configs/data/megadepth_trainval_640.py | from configs.data.base import cfg
TRAIN_BASE_PATH = "data/megadepth/index"
cfg.DATASET.TRAINVAL_DATA_SOURCE = "MegaDepth"
cfg.DATASET.TRAIN_DATA_ROOT = "data/megadepth/train"
cfg.DATASET.TRAIN_NPZ_ROOT = f"{TRAIN_BASE_PATH}/scene_info_0.1_0.7"
cfg.DATASET.TRAIN_LIST_PATH = f"{TRAIN_BASE_PATH}/trainvaltest_list/train_list.txt"
cfg.DATASET.MIN_OVERLAP_SCORE_TRAIN = 0.0
TEST_BASE_PATH = "data/megadepth/index"
cfg.DATASET.TEST_DATA_SOURCE = "MegaDepth"
cfg.DATASET.VAL_DATA_ROOT = cfg.DATASET.TEST_DATA_ROOT = "data/megadepth/test"
cfg.DATASET.VAL_NPZ_ROOT = cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}/scene_info_val_1500"
cfg.DATASET.VAL_LIST_PATH = cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/trainvaltest_list/val_list.txt"
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0 # for both test and val
# 368 scenes in total for MegaDepth
# (with difficulty balanced (further split each scene to 3 sub-scenes))
cfg.TRAINER.N_SAMPLES_PER_SUBSET = 100
cfg.DATASET.MGDPT_IMG_RESIZE = 640 # for training on 11GB mem GPUs
| 1,022 | 43.478261 | 107 | py |
3DG-STFM | 3DG-STFM-master/configs/data/scannet_test_1500.py | from configs.data.base import cfg
TEST_BASE_PATH = "inference/scannet_test_1500"
cfg.DATASET.TEST_DATA_SOURCE = "ScanNet"
cfg.DATASET.TEST_DATA_ROOT = "data/scannet/test"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scannet_test.txt"
cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz"
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.0
| 398 | 32.25 | 68 | py |
3DG-STFM | 3DG-STFM-master/configs/data/scannet_test_mini.py | from configs.data.base import cfg
TEST_BASE_PATH = "data/scannet_mini/index"
cfg.DATASET.TEST_DATA_SOURCE = "ScanNet"
cfg.DATASET.TEST_DATA_ROOT = "data/scannet_mini/train"
cfg.DATASET.TEST_NPZ_ROOT = f"{TEST_BASE_PATH}/scene_data/train"
cfg.DATASET.TEST_LIST_PATH = f"{TEST_BASE_PATH}/scene_data/train_list/scannet_all.txt"
cfg.DATASET.TEST_INTRINSIC_PATH = f"{TEST_BASE_PATH}/intrinsics.npz"
cfg.DATASET.MIN_OVERLAP_SCORE_TEST = 0.4 | 437 | 38.818182 | 86 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.