repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
matscipy | matscipy-master/tests/test_eam_calculator_forces_and_hessian.py | #
# Copyright 2020-2021 Lars Pastewka (U. Freiburg)
# 2020 Jan Griesser (U. Freiburg)
# 2019-2020 Wolfram G. Nöhring (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ======================================================================
# matscipy - Python materials science tools
# https://github.com/libAtoms/matscipy
#
# Copyright (2014) James Kermode, King's College London
# Lars Pastewka, Karlsruhe Institute of Technology
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# ======================================================================
import unittest
import gzip
import numpy as np
import ase.io as io
import matscipytest
from matscipy.calculators.eam import EAM
from ase.phonons import Phonons
from ase.lattice.compounds import B1, B2, L1_0, L1_2
from ase.lattice.cubic import FaceCenteredCubic
from scipy.sparse import bsr_matrix
###
class TestEAMForcesHessian(matscipytest.MatSciPyTestCase):
# In the force calculation
force_tolerance = 1e-3
hessian_tolerance = 1e-6
def test_forces_CuZr_glass(self):
"""Calculate interatomic forces in CuZr glass
Reference: tabulated forces from a calculation
with Lammmps (git version patch_29Mar2019-2-g585403d65)
The forces can be re-calculated using the following
Lammps commands:
units metal
atom_style atomic
boundary p p p
read_data CuZr_glass_460_atoms.lammps.data.gz
pair_style eam/alloy
pair_coeff * * ZrCu.onecolumn.eam.alloy Zr Cu
# The initial configuration is in equilibrium
# and the remaining forces are small
# Swap atom types to bring system out of
# equilibrium and create nonzero forces
group originally_Zr type 1
group originally_Cu type 2
set group originally_Zr type 2
set group originally_Cu type 1
run 0
write_dump all custom &
CuZr_glass_460_atoms_forces.lammps.dump.gz &
id type x y z fx fy fz &
modify sort id format float "%.14g"
"""
format = "lammps-dump" if "lammps-dump" in io.formats.all_formats.keys() else "lammps-dump-text"
atoms = io.read("CuZr_glass_460_atoms_forces.lammps.dump.gz", format=format)
old_atomic_numbers = atoms.get_atomic_numbers()
sel, = np.where(old_atomic_numbers == 1)
new_atomic_numbers = np.zeros_like(old_atomic_numbers)
new_atomic_numbers[sel] = 40 # Zr
sel, = np.where(old_atomic_numbers == 2)
new_atomic_numbers[sel] = 29 # Cu
atoms.set_atomic_numbers(new_atomic_numbers)
calculator = EAM('ZrCu.onecolumn.eam.alloy')
atoms.set_calculator(calculator)
atoms.pbc = [True, True, True]
forces = atoms.get_forces()
# Read tabulated forces and compare
with gzip.open("CuZr_glass_460_atoms_forces.lammps.dump.gz") as file:
for line in file:
if line.startswith(b"ITEM: ATOMS "): # ignore header
break
dump = np.loadtxt(file)
forces_dump = dump[:, 5:8]
self.assertArrayAlmostEqual(forces, forces_dump, tol=self.force_tolerance)
def test_hessian_monoatomic(self):
"""Calculate Hessian matrix of pure Cu
Reference: finite difference approximation of
Hessian from ASE
"""
def _test_for_size(size):
atoms = FaceCenteredCubic('Cu', size=size)
calculator = EAM('CuAg.eam.alloy')
self._test_hessian(atoms, calculator)
_test_for_size(size=[1, 1, 1])
_test_for_size(size=[2, 2, 2])
_test_for_size(size=[1, 4, 4])
_test_for_size(size=[4, 1, 4])
_test_for_size(size=[4, 4, 1])
_test_for_size(size=[4, 4, 4])
def test_hessian_monoatomic_with_duplicate_pairs(self):
"""Calculate Hessian matrix of pure Cu
In a small system, the same pair (i,j) will
appear multiple times in the neighbor list,
with different pair distance.
Reference: finite difference approximation of
Hessian from ASE
"""
atoms = FaceCenteredCubic('Cu', size=[2, 2, 2])
calculator = EAM('CuAg.eam.alloy')
self._test_hessian(atoms, calculator)
def test_hessian_crystalline_alloy(self):
"""Calculate Hessian matrix of crystalline alloy
Reference: finite difference approximation of
Hessian from ASE
"""
calculator = EAM('ZrCu.onecolumn.eam.alloy')
lattice_size = [4, 4, 4]
# The lattice parameters are not correct, but that should be irrelevant
# CuZr3
atoms = L1_2(['Cu', 'Zr'], size=lattice_size, latticeconstant=4.0)
self._test_hessian(atoms, calculator)
# Cu3Zr
atoms = L1_2(['Zr', 'Cu'], size=lattice_size, latticeconstant=4.0)
self._test_hessian(atoms, calculator)
# CuZr
atoms = B2(['Zr', 'Cu'], size=lattice_size, latticeconstant=3.3)
self._test_hessian(atoms, calculator)
def test_hessian_amorphous_alloy(self):
"""Calculate Hessian matrix of amorphous alloy
Reference: finite difference approximation of
Hessian from ASE
"""
atoms = io.read('CuZr_glass_460_atoms.gz')
atoms.pbc = [True, True, True]
calculator = EAM('ZrCu.onecolumn.eam.alloy')
self._test_hessian(atoms, calculator)
def test_dynamical_matrix(self):
"""Test dynamical matrix construction
To obtain the dynamical matrix, one could either divide by
masses immediately when constructing the matrix, or one could
first form the complete Hessian and then divide by masses.
The former method is implemented.
"""
atoms = io.read('CuZr_glass_460_atoms.gz')
atoms.pbc = [True, True, True]
calculator = EAM('ZrCu.onecolumn.eam.alloy')
dynamical_matrix = calculator.calculate_hessian_matrix(
atoms, divide_by_masses=True
)
# The second method requires a copy of Hessian, since
# sparse matrix does not properly support *= operator
hessian = calculator.calculate_hessian_matrix(atoms)
masses = atoms.get_masses()
mass_row = np.repeat(masses, np.diff(hessian.indptr))
mass_col = masses[hessian.indices]
inverse_mass = np.sqrt(mass_row * mass_col)**-1.0
blocks = (inverse_mass * np.ones((inverse_mass.size, 3, 3), dtype=inverse_mass.dtype).T).T
nat = len(atoms)
dynamical_matrix_ref = hessian.multiply(
bsr_matrix((blocks, hessian.indices, hessian.indptr), shape=(3*nat, 3*nat))
)
dynamical_matrix = dynamical_matrix.todense()
dynamical_matrix_ref = dynamical_matrix_ref.todense()
self.assertArrayAlmostEqual(
dynamical_matrix, dynamical_matrix.T, tol=self.hessian_tolerance
)
self.assertArrayAlmostEqual(
dynamical_matrix_ref, dynamical_matrix_ref.T, tol=self.hessian_tolerance
)
self.assertArrayAlmostEqual(
dynamical_matrix, dynamical_matrix_ref, tol=self.hessian_tolerance
)
def _test_hessian(self, atoms, calculator):
H_analytical = calculator.calculate_hessian_matrix(atoms)
H_analytical = H_analytical.todense()
# Hessian is symmetric:
self.assertArrayAlmostEqual(H_analytical, H_analytical.T, tol=self.hessian_tolerance)
H_numerical = self._calculate_finite_difference_hessian(atoms, calculator)
self.assertArrayAlmostEqual(H_numerical, H_numerical.T, tol=self.hessian_tolerance)
self.assertArrayAlmostEqual(H_analytical, H_numerical, tol=self.hessian_tolerance)
def _calculate_finite_difference_hessian(self, atoms, calculator):
"""Calcualte the Hessian matrix using finite differences."""
ph = Phonons(atoms, calculator, supercell=(1, 1, 1), delta=1e-6)
ph.clean()
ph.run()
ph.read(acoustic=False)
ph.clean()
H_numerical = ph.get_force_constant()[0, :, :]
return H_numerical
if __name__ == '__main__':
unittest.main()
| 9,568 | 39.205882 | 104 | py |
matscipy | matscipy-master/tests/test_committee.py | import os
import re
import copy
import pathlib
import pytest
import numpy as np
import ase.io
import ase.calculators.emt
import ase.calculators.lj
import matscipy.calculators.committee
@pytest.fixture
def committeemember():
member = matscipy.calculators.committee.CommitteeMember(calculator=ase.calculators.emt.EMT())
training_data = os.path.join(f'{os.path.dirname(__file__)}/committee_data/training_data_minimal.xyz')
member.set_training_data(training_data)
return member
@pytest.fixture
def committee_minimal(committeemember):
committee = matscipy.calculators.committee.Committee(
members=[copy.deepcopy(committeemember), copy.deepcopy(committeemember)]
)
return committee
@pytest.fixture
def committee():
committee = matscipy.calculators.committee.Committee()
committee_training_data = ase.io.read(f'{os.path.dirname(__file__)}/committee_data/training_data.xyz', ':')
num_members = 10
epsilons = np.linspace(0.98, 1.01, num_members)
np.random.seed(123)
np.random.shuffle(epsilons)
for idx_i in range(num_members):
committee += matscipy.calculators.committee.CommitteeMember(
calculator=ase.calculators.lj.LennardJones(sigma=1, epsilon=epsilons[idx_i]),
training_data=[atoms_i for atoms_i in committee_training_data
if idx_i in atoms_i.info['appears_in_committee']]
)
return committee
@pytest.fixture
def committee_calibrated(committee):
committee.set_internal_validation_set(appearance_threshold=5)
committee.calibrate(prop='energy', key='E_lj', location='info')
committee.calibrate(prop='forces', key='F_lj', location='arrays')
return committee
def test_committeemember_initialize():
matscipy.calculators.committee.CommitteeMember(calculator=ase.calculators.emt.EMT())
training_data = os.path.join(f'{os.path.dirname(__file__)}/committee_data/training_data_minimal.xyz')
matscipy.calculators.committee.CommitteeMember(calculator=ase.calculators.emt.EMT(),
training_data=training_data)
matscipy.calculators.committee.CommitteeMember(calculator=ase.calculators.emt.EMT(),
training_data=ase.io.read(training_data, ':'))
def test_committeemember_set_training_data(committeemember):
training_data = os.path.join(f'{os.path.dirname(__file__)}/committee_data/training_data_minimal.xyz')
with pytest.warns(Warning, match=re.escape('Overwriting current training data.')):
committeemember.set_training_data(training_data)
with pytest.warns(Warning, match=re.escape('Overwriting current training data.')):
committeemember.set_training_data(pathlib.Path(training_data))
with pytest.warns(Warning, match=re.escape('Overwriting current training data.')):
committeemember.set_training_data(ase.io.read(training_data, ':'))
def test_committeemember_is_sample_in_atoms(committeemember):
training_data = ase.io.read(os.path.join(f'{os.path.dirname(__file__)}/committee_data/training_data_minimal.xyz'), ':')
test_data = ase.io.read(os.path.join(f'{os.path.dirname(__file__)}/committee_data/test_data.xyz'), ':')
assert committeemember.is_sample_in_atoms(sample=training_data[0])
with pytest.raises(RuntimeError,
match=re.escape('Can\'t test if `sample` is in `atoms`. '
'`sample` has no Atoms.info[\'_Index_FullTrainingSet\']')):
assert not committeemember.is_sample_in_atoms(sample=test_data[0])
test_data[0].info['_Index_FullTrainingSet'] = -1
assert not committeemember.is_sample_in_atoms(sample=test_data[0])
def test_committeemember_setter(committeemember):
with pytest.raises(RuntimeError, match=re.escape('Use `set_training_data()` to modify the committee member')):
committeemember.filename = ''
with pytest.raises(RuntimeError, match=re.escape('Use `set_training_data()` to modify the committee member')):
committeemember.atoms = []
with pytest.raises(RuntimeError, match=re.escape('Use `set_training_data()` to modify the committee member')):
committeemember.ids = []
def test_committee_initialize(committeemember):
committee = matscipy.calculators.committee.Committee()
expected_status = [
('members', []),
('number', 0),
('atoms', []),
('ids', []),
('alphas', {}),
('calibrated_for', set()),
]
for attribute_i, value_i in expected_status:
assert getattr(committee, attribute_i) == value_i
with pytest.warns(Warning, match=re.escape('`Committee.set_internal_validation_set()` has not been called or '
'`Committee`-instance has been altered since last call.')):
assert getattr(committee, 'validation_set') == []
member_0 = copy.deepcopy(committeemember)
member_1 = copy.deepcopy(committeemember)
committee = matscipy.calculators.committee.Committee(
members=[member_0, member_1]
)
expected_status = [
('members', [member_0, member_1]),
('number', 2),
('atoms', member_0.atoms + member_1.atoms),
('ids', member_0.ids + member_1.ids),
('alphas', {}),
('calibrated_for', set()),
]
for attribute_i, value_i in expected_status:
assert getattr(committee, attribute_i) == value_i
with pytest.warns(Warning, match=re.escape('`Committee.set_internal_validation_set()` has not been called or '
'`Committee`-instance has been altered since last call.')):
assert getattr(committee, 'validation_set') == []
def test_committee_member(committee_minimal):
with pytest.raises(AssertionError,
match=re.escape('Members of `Committee` need to be of type `CommitteeMember`. Found ')):
matscipy.calculators.committee.Committee(members=[0, 1])
with pytest.raises(AssertionError,
match=re.escape('Members of `Committee` need to be of type `CommitteeMember`. Found ')):
committee_minimal.members = [0, 1]
with pytest.raises(AssertionError,
match=re.escape('Members of `Committee` need to be of type `CommitteeMember`. Found ')):
committee_minimal.add_member(0)
with pytest.raises(AssertionError,
match=re.escape('Members of `Committee` need to be of type `CommitteeMember`. Found ')):
committee_minimal += 0
def test_committee_set_internal_validation_set(committee):
with pytest.raises(AssertionError):
committee.set_internal_validation_set(appearance_threshold=0)
with pytest.raises(AssertionError):
committee.set_internal_validation_set(appearance_threshold=committee.number - 1)
committee.set_internal_validation_set(appearance_threshold=5)
obtained = set([atoms_i.info['_Index_FullTrainingSet'] for atoms_i
in committee.validation_set])
expected = set([atoms_i.info['_Index_FullTrainingSet'] for atoms_i
in ase.io.read(os.path.join(f'{os.path.dirname(__file__)}/committee_data/validation_set.xyz'), ':')])
assert obtained == expected
def test_committee_calibrate(committee):
committee.set_internal_validation_set(appearance_threshold=5)
committee.calibrate(prop='energy', key='E_lj', location='info')
assert committee.calibrated_for == set(['energy'])
np.testing.assert_array_almost_equal(committee.alphas['energy'], 0.6295416920992463, decimal=6)
committee.calibrate(prop='forces', key='F_lj', location='arrays')
assert committee.calibrated_for == set(['energy', 'forces'])
np.testing.assert_array_almost_equal(committee.alphas['forces'], 0.6195847443699875, decimal=6)
with pytest.warns(Warning,
match=re.escape('`alphas` will be reset to avoid inconsistencies with new validation set.')):
committee.set_internal_validation_set(appearance_threshold=4)
assert committee.alphas == {}
def test_committee__calculate_alpha(committee):
vals_ref = np.array([1.01, 1.02, 1.03])
vals_pred = np.array([2.01, 1.02, 1.03])
var_pred = np.array([1.01, 0.02, 0.03])
obtained = committee._calculate_alpha(vals_ref, vals_pred, var_pred)
np.testing.assert_array_almost_equal(obtained, 0.39584382766472004, decimal=6)
def test_committee_scale_uncertainty(committee):
committee._alphas = {'energy': 2.5}
assert committee.scale_uncertainty(2, 'energy') == 5.0
def test_committeeuncertainty_initialize(committee_calibrated):
matscipy.calculators.committee.CommitteeUncertainty(committee=committee_calibrated)
def test_committeeuncertainty_calculate(committee_calibrated):
calculator = matscipy.calculators.committee.CommitteeUncertainty(committee=committee_calibrated)
test_data = ase.io.read(os.path.join(f'{os.path.dirname(__file__)}/committee_data/test_data.xyz'), ':')
for atoms_i in test_data:
calculator.calculate(atoms=atoms_i, properties=['energy', 'forces'])
for prop_j in ['energy', 'energy_uncertainty']:
np.testing.assert_array_almost_equal(calculator.results[prop_j], atoms_i.info[prop_j], decimal=6,
err_msg=f'Missmatch in property \'{prop_j}\'')
for prop_j in ['forces', 'forces_uncertainty']:
np.testing.assert_array_almost_equal(calculator.results[prop_j], atoms_i.arrays[prop_j], decimal=6,
err_msg=f'Missmatch in property \'{prop_j}\'')
| 9,666 | 42.940909 | 123 | py |
matscipy | matscipy-master/tests/test_crack.py | #
# Copyright 2020 James Kermode (Warwick U.)
# 2017, 2020 Lars Pastewka (U. Freiburg)
# 2016 Punit Patel (Warwick U.)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import unittest
import numpy as np
import ase.io
import ase.units as units
from ase.build import bulk
from ase.constraints import FixAtoms, UnitCellFilter
from ase.md.verlet import VelocityVerlet
from ase.md.velocitydistribution import MaxwellBoltzmannDistribution
from ase.optimize import FIRE
import matscipy.fracture_mechanics.crack as crack
from matscipy.elasticity import fit_elastic_constants
from matscipy.fracture_mechanics.crack import ConstantStrainRate
from matscipy.fracture_mechanics.clusters import diamond, set_groups
from matscipy.neighbours import neighbour_list
try:
import atomistica
from atomistica import TersoffScr, Tersoff_PRB_39_5566_Si_C__Scr
from atomistica import Tersoff, Tersoff_PRB_39_5566_Si_C
have_atomistica = True
except ImportError:
have_atomistica = False
if have_atomistica:
class TestConstantStrain(unittest.TestCase):
def test_apply_strain(self):
calc = TersoffScr(**Tersoff_PRB_39_5566_Si_C__Scr)
timestep = 1.0*units.fs
atoms = ase.io.read('cryst_rot_mod.xyz')
atoms.set_calculator(calc)
# constraints
top = atoms.positions[:, 1].max()
bottom = atoms.positions[:, 1].min()
fixed_mask = ((abs(atoms.positions[:, 1] - top) < 1.0) |
(abs(atoms.positions[:, 1] - bottom) < 1.0))
fix_atoms = FixAtoms(mask=fixed_mask)
# strain
orig_height = (atoms.positions[:, 1].max() - atoms.positions[:, 1].min())
delta_strain = timestep*1e-5*(1/units.fs)
rigid_constraints = False
strain_atoms = ConstantStrainRate(orig_height, delta_strain)
atoms.set_constraint(fix_atoms)
# dynamics
np.random.seed(0)
simulation_temperature = 300*units.kB
MaxwellBoltzmannDistribution(atoms, 2.0*simulation_temperature)
dynamics = VelocityVerlet(atoms, timestep)
def apply_strain(atoms, ConstantStrainRate, rigid_constraints):
ConstantStrainRate.apply_strain(atoms, rigid_constraints)
dynamics.attach(apply_strain, 1, atoms, strain_atoms, rigid_constraints)
dynamics.run(100)
# tests
if rigid_constraints:
answer = 0
temp_answer = 238.2066417638124
else:
answer = 0.013228150080099255
temp_answer = 236.76904696481486
newpos = atoms.get_positions()
current_height = newpos[:, 1].max() - newpos[:, 1].min()
diff_height = (current_height - orig_height)
self.assertAlmostEqual(diff_height, answer, places=3)
temperature = (atoms.get_kinetic_energy()/(1.5*units.kB*len(atoms)))
self.assertAlmostEqual(temperature, temp_answer, places=2)
def test_embedding_size_convergence(self):
calc = Tersoff(**Tersoff_PRB_39_5566_Si_C)
el = 'C'
a0 = 3.566
surface_energy = 2.7326 * 10
crack_surface = [1, 1, 1]
crack_front = [1, -1, 0]
skin_x, skin_y = 1, 1
cryst = bulk(el, cubic=True)
cryst.set_calculator(calc)
FIRE(UnitCellFilter(cryst), logfile=None).run(fmax=1e-6)
a0 = cryst.cell.diagonal().mean()
bondlength = cryst.get_distance(0, 1)
#print('a0 =', a0, ', bondlength =', bondlength)
cryst = diamond(el, a0, [1,1,1], crack_surface, crack_front)
cryst.set_pbc(True)
cryst.set_calculator(calc)
cryst.set_cell(cryst.cell.diagonal(), scale_atoms=True)
C, C_err = fit_elastic_constants(cryst, verbose=False,
symmetry='cubic',
optimizer=FIRE,
fmax=1e-6)
#print('Measured elastic constants (in GPa):')
#print(np.round(C*10/units.GPa)/10)
bondlengths = []
refcell = None
reftip_x = None
reftip_y = None
#[41, 39, 1],
for i, n in enumerate([[21, 19, 1], [11, 9, 1], [6, 5, 1]]):
#print(n)
cryst = diamond(el, a0, n, crack_surface, crack_front)
set_groups(cryst, n, skin_x, skin_y)
cryst.set_pbc(True)
cryst.set_calculator(calc)
FIRE(UnitCellFilter(cryst), logfile=None).run(fmax=1e-6)
cryst.set_cell(cryst.cell.diagonal(), scale_atoms=True)
ase.io.write('cryst_{}.xyz'.format(i), cryst, format='extxyz')
crk = crack.CubicCrystalCrack(crack_surface,
crack_front,
Crot=C/units.GPa)
k1g = crk.k1g(surface_energy)
tip_x = cryst.cell.diagonal()[0]/2
tip_y = cryst.cell.diagonal()[1]/2
a = cryst.copy()
a.set_pbc([False, False, True])
k1 = 1.0
ux, uy = crk.displacements(cryst.positions[:,0], cryst.positions[:,1],
tip_x, tip_y, k1*k1g)
a.positions[:, 0] += ux
a.positions[:, 1] += uy
# Center notched configuration in simulation cell and ensure enough vacuum.
oldr = a[0].position.copy()
if refcell is None:
a.center(vacuum=10.0, axis=0)
a.center(vacuum=10.0, axis=1)
refcell = a.cell.copy()
tip_x += a[0].x - oldr[0]
tip_y += a[0].y - oldr[1]
reftip_x = tip_x
reftip_y = tip_y
else:
a.set_cell(refcell)
# Shift tip position so all systems are exactly centered at the same spot
a.positions[:, 0] += reftip_x - tip_x
a.positions[:, 1] += reftip_y - tip_y
refpositions = a.positions.copy()
# Move reference crystal by same amount
cryst.set_cell(a.cell)
cryst.set_pbc([False, False, True])
cryst.translate(a[0].position - oldr)
bond1, bond2 = crack.find_tip_coordination(a, bondlength=bondlength*1.2)
# Groups mark the fixed region and the region use for fitting the crack tip.
g = a.get_array('groups')
gcryst = cryst.get_array('groups')
ase.io.write('cryst_{}.xyz'.format(i), cryst)
a.set_calculator(calc)
a.set_constraint(FixAtoms(mask=g==0))
FIRE(a, logfile=None).run(fmax=1e-6)
dpos = np.sqrt(((a.positions[:, 0]-refpositions[:, 0])/ux)**2 + ((a.positions[:, 1]-refpositions[:, 1])/uy)**2)
a.set_array('dpos', dpos)
distance_from_tip = np.sqrt((a.positions[:, 0]-reftip_x)**2 + (a.positions[:, 1]-reftip_y)**2)
ase.io.write('crack_{}.xyz'.format(i), a)
# Compute average bond length per atom
neighi, neighj, neighd = neighbour_list('ijd', a, cutoff=bondlength*1.2)
coord = np.bincount(neighi)
assert coord.max() == 4
np.savetxt('dpos_{}.out'.format(i), np.transpose([distance_from_tip[coord==4], dpos[coord==4]]))
# Compute distances from tipcenter
neighdist = np.sqrt(((a.positions[neighi,0]+a.positions[neighj,0])/2-reftip_x)**2 +
((a.positions[neighi,1]+a.positions[neighj,1])/2-reftip_y)**2)
np.savetxt('bl_{}.out'.format(i), np.transpose([neighdist, neighd]))
bondlengths += [a.get_distance(bond1, bond2)]
print(bondlengths, np.diff(bondlengths), bondlengths/bondlengths[-1]-1)
assert np.all(np.diff(bondlengths) > 0)
assert np.max(bondlengths/bondlengths[0]-1) < 0.01
if __name__ == '__main__':
unittest.main()
| 9,096 | 39.07489 | 127 | py |
matscipy | matscipy-master/tests/manybody/manybody_fixtures.py | """Fixtures for Manybody potentials."""
import inspect
import pytest
import matscipy.calculators.manybody.potentials as potentials
import numpy as np
_classes = inspect.getmembers(potentials, inspect.isclass)
_impl_potentials = {
mother: [cls for _, cls in _classes if issubclass(cls, mother)]
for mother in (potentials.Manybody.Phi, potentials.Manybody.Theta)
}
# Default arguments for classes where that do not define a default constructor
_default_arguments = {
potentials.StillingerWeberPair: [{
"__ref__": "",
"el": 1,
"epsilon": 1,
"sigma": 0.9,
"costheta0": 1,
"A": 1,
"B": 1,
"p": 1,
"q": 1,
"a": 2,
"lambda1": 1,
"gamma": 1,
}, np.inf],
potentials.StillingerWeberAngle: [{
"__ref__": "",
"el": 1,
"epsilon": 1,
"sigma": 0.9,
"costheta0": 1,
"A": 1,
"B": 1,
"p": 1,
"q": 1,
"a": 2,
"lambda1": 1,
"gamma": 1,
}],
potentials.KumagaiPair: [{
'__ref__': 'T. Kumagai et. al., Comp. Mat. Sci. 39 (2007)',
'el': 1.0,
'A': 1.0,
'B': 1.0,
'lambda_1': 1.0,
'lambda_2': 1.0,
'eta': 1.0,
'delta': 1.0,
'alpha': 1.0,
'beta': 1.0,
'c_1': 1.0,
'c_2': 1.0,
'c_3': 1.0,
'c_4': 1.0,
'c_5': 1.0,
'h': 1.0,
'R_1': 1.0,
'R_2': 4.0
}],
potentials.KumagaiAngle: [{
'__ref__': 'T. Kumagai et. al., Comp. Mat. Sci. 39 (2007)',
'el': 1.0,
'A': 1.0,
'B': 1.0,
'lambda_1': 1.0,
'lambda_2': 1.0,
'eta': 1.0,
'delta': 1.0,
'alpha': 1.0,
'beta': 1.0,
'c_1': 1.0,
'c_2': 1.0,
'c_3': 1.0,
'c_4': 1.0,
'c_5': 1.0,
'h': 1.0,
'R_1': 1.0,
'R_2': 4.0
}],
potentials.TersoffBrennerPair: [{
'__ref__': 'Tersoff J., Phys. Rev. B 39, 5566 (1989)',
'style': 'tersoff',
'el': 1.0,
'c': 1.0,
'd': 1.0,
'h': 1.0,
'R1': 2.7,
'R2': 3.0,
'A': 1.0,
'B': 1.0,
'lambda1': 1.0,
'mu': 1.0,
'beta': 1.0,
'lambda3': 1.0,
'chi': 1.0,
'n': 1.0
}],
potentials.TersoffBrennerAngle: [{
'__ref__': 'Tersoff J., Phys. Rev. B 39, 5566 (1989)',
'style': 'tersoff',
'el': 1.0,
'c': 1.0,
'd': 1.0,
'h': 1.0,
'R1': 2.7,
'R2': 3.0,
'A': 1.0,
'B': 1.0,
'lambda1': 1.0,
'mu': 1.0,
'beta': 1.0,
'lambda3': 1.0,
'chi': 1.0,
'n': 1.0
}],
potentials.LennardJones: [
1,
1e-3, # so that we test where FD is good
np.inf,
],
}
# Filtering out sympy classes
if getattr(potentials, 'SymPhi', None) is not None:
for m in _impl_potentials:
for i, c in enumerate(_impl_potentials[m]):
if issubclass(c, (potentials.SymPhi, potentials.SymTheta)):
del _impl_potentials[m][i]
# Marking expected failures / TODO fix the following classes
xfails = [
potentials.KumagaiPair, potentials.KumagaiAngle,
potentials.TersoffBrennerPair, potentials.BornMayerCut,
]
for fail in xfails:
for m in _impl_potentials:
classes = _impl_potentials[m]
if fail in classes:
classes[classes.index(fail)] = \
pytest.param(fail,
marks=pytest.mark.xfail(reason="Not implemented"))
class FiniteDiff:
"""Helper class for finite difference tests."""
hessian_ordering = {
2: [(0, 0), (1, 1), (0, 1)], # R, xi
3: [(0, 0), (1, 1), (2, 2), (1, 2), (0, 2), (0, 1)], # R1, R2, R3
}
def __init__(self, pot, coords):
self.pot = pot
self.coords = coords
def __call__(self, *args):
return self.pot(*args)
def gradient(self, *args):
E = self.pot(*args)
return np.gradient(E, *self.coords, edge_order=2)
def hessian(self, *args):
G = self.pot.gradient(*args)
return np.stack([
np.gradient(G[i], self.coords[j], axis=j, edge_order=2)
for i, j in self.hessian_ordering[len(args)]
])
@pytest.fixture(params=_impl_potentials[potentials.Manybody.Phi])
def pair_potential(request):
"""Fixture for pair potentials."""
if request.param in _default_arguments:
return request.param(*_default_arguments[request.param])
return request.param()
@pytest.fixture(params=_impl_potentials[potentials.Manybody.Theta])
def three_body_potential(request):
"""Fixture for three-body potentials."""
if request.param in _default_arguments:
return request.param(*_default_arguments[request.param])
return request.param()
try:
from matscipy.calculators.manybody.potentials import (
SymPhi,
SymTheta,
HarmonicPair,
HarmonicAngle,
LennardJones,
ZeroPair,
BornMayerCut,
TersoffBrennerPair,
TersoffBrennerAngle,
)
from sympy import symbols, acos, sqrt, pi, exp, cos
from sympy.abc import R, xi
from sympy import Piecewise
has_sympy = True
R1, R2, R3 = symbols("R_{1:4}")
cos_angle = (R1 + R2 - R3) / (2 * sqrt(R1 * R2))
_analytical_pair_potentials = [
(HarmonicPair(1, 1), SymPhi(0.5 * (sqrt(R) - 1)**2 + xi, (R, xi))),
(ZeroPair(), SymPhi(xi, (R, xi))),
(LennardJones(1, 1, np.inf),
SymPhi(4 * ((1 / sqrt(R))**12 - (1 / sqrt(R))**6) + xi, (R, xi))),
(BornMayerCut(),
SymPhi(exp((1-sqrt(R))) - 1/R**3 + 1/R**4 + xi, (R, xi))),
(TersoffBrennerPair(_default_arguments[potentials.TersoffBrennerAngle][0]),
SymPhi((
# fc
Piecewise(
(1, sqrt(R) < 2.7),
(0.5 * (1 + cos(pi * (sqrt(R) - 2.7) / (3 - 2.7))),
(sqrt(R) >= 2.7) | (sqrt(R) < 3)),
(0, sqrt(R) >= 3)
) * (
exp(- sqrt(R)) - 1 / sqrt(1 + xi) * exp(- sqrt(R))
)), (R, xi))),
]
_analytical_triplet_potentials = [
(HarmonicAngle(1, np.pi / 2),
SymTheta(
0.5 * (acos(cos_angle) - pi / 2)**2,
(R1, R2, R3))),
(TersoffBrennerAngle(_default_arguments[potentials.TersoffBrennerAngle][0]),
SymTheta(
(2 - 1 / (1 + (1 - cos_angle)**2)) * Piecewise(
(1, sqrt(R2) < 2.7),
(0.5 * (1 + cos(pi * (sqrt(R2) - 2.7) / (3 - 2.7))),
(sqrt(R2) >= 2.7) | (sqrt(R2) < 3)),
(0, sqrt(R2) >= 3)
), (R1, R2, R3))),
]
def _pot_names(potlist):
return [type(pot).__name__ for pot, _ in potlist]
@pytest.fixture(params=_analytical_pair_potentials,
ids=_pot_names(_analytical_pair_potentials))
def analytical_pair(request):
return request.param
@pytest.fixture(params=_analytical_triplet_potentials,
ids=_pot_names(_analytical_triplet_potentials))
def analytical_triplet(request):
return request.param
except ImportError:
has_sympy = False
| 7,324 | 26.641509 | 84 | py |
matscipy | matscipy-master/tests/manybody/reference_params.py | Stillinger_Weber_PRB_31_5262_Si = {
'__ref__': 'F. Stillinger and T. Weber, Phys. Rev. B 31, 5262 (1985)',
'el': 'Si',
'epsilon': 2.1683,
'sigma': 2.0951,
'costheta0': 0.333333333333,
'A': 7.049556277,
'B': 0.6022245584,
'p': 4,
'q': 0,
'a': 1.80,
'lambda1': 21.0,
'gamma': 1.20
}
Kumagai_Comp_Mat_Sci_39_Si = {
'__ref__': 'T. Kumagai et. al., Comp. Mat. Sci. 39 (2007)',
'el': 'Si',
'A': 3281.5905,
'B': 121.00047,
'lambda_1': 3.2300135,
'lambda_2': 1.3457970,
'eta': 1.0000000,
'delta': 0.53298909,
'alpha': 2.3890327,
'beta': 1.0000000,
'c_1': 0.20173476,
'c_2': 730418.72,
'c_3': 1000000.0,
'c_4': 1.0000000,
'c_5': 26.000000,
'h': -0.36500000,
'R_1': 2.70,
'R_2': 3.30
}
# Cutted to test only Si
Tersoff_PRB_39_5566_Si_C = {
'__ref__': 'Tersoff J., Phys. Rev. B 39, 5566 (1989)',
'style': 'Tersoff',
'el': 'Si',
'A': 1.8308e3,
'B': 4.7118e2,
'chi': 1.0,
'lambda1': 2.4799e0,
'mu': 1.7322e0,
'lambda3': 0.0,
'beta': 1.1000e-6,
'n': 7.8734e-1,
'c': 1.0039e5,
'd': 1.6217e1,
'h': -5.9825e-1,
'R1': 2.70,
'R2': 3.00,
}
| 1,287 | 22 | 74 | py |
matscipy | matscipy-master/tests/manybody/test_manybody_potentials.py | """Tests for implementation of potentials and derivatives."""
import pytest
import numpy as np
import numpy.testing as nt
from types import SimpleNamespace
from manybody_fixtures import (
pair_potential,
three_body_potential,
has_sympy,
analytical_pair,
analytical_triplet,
FiniteDiff,
)
def evaluate(pot, *args):
data = SimpleNamespace()
data.E = pot(*args)
data.gradient = pot.gradient(*args)
data.hessian = pot.hessian(*args)
return data
@pytest.fixture
def fd_evaluated_pair(pair_potential):
"""
Compute derivatives and finite differences.
"""
N = 10
L1, L2 = 0.5, 5
rsq, xi = np.linspace(L1, L2, N), np.linspace(L1, L2, N)
fd_pot = FiniteDiff(pair_potential, (rsq, xi))
args = np.meshgrid(rsq, xi, indexing='ij')
return evaluate(pair_potential, *args), evaluate(fd_pot, *args)
@pytest.fixture
def fd_evaluated_three_body(three_body_potential):
"""
Compute derivatives and finite differences.
"""
N = 10
L1, L2 = 1.0, 1.15
rij, rik, rjk = (
np.linspace(L1, L2, N),
np.linspace(L1, L2, N),
np.linspace(L1, L2, N),
)
fd_pot = FiniteDiff(three_body_potential, (rij, rik, rjk))
args = np.meshgrid(rij, rik, rjk, indexing='ij')
return evaluate(three_body_potential, *args), evaluate(fd_pot, *args)
def test_fd_pair(fd_evaluated_pair):
pot, ref = fd_evaluated_pair
nt.assert_allclose(pot.gradient, ref.gradient, rtol=1e-10, atol=1e-14)
nt.assert_allclose(pot.hessian, ref.hessian, rtol=1e-10, atol=1e-14)
def test_fd_three_body(fd_evaluated_three_body):
pot, ref = fd_evaluated_three_body
nt.assert_allclose(pot.gradient, ref.gradient, rtol=1e-4, atol=1e-4)
nt.assert_allclose(pot.hessian, ref.hessian, rtol=1e-4, atol=1e-4)
@pytest.fixture
def analytical_evaluated_pair(analytical_pair):
N = 10
L1, L2 = 0.5, 5
rsq, xi = np.linspace(L1, L2, N), np.linspace(L1, L2, N)
args = np.meshgrid(rsq, xi, indexing='ij')
pot, analytical = analytical_pair
return evaluate(pot, *args), evaluate(analytical, *args)
@pytest.fixture
def analytical_evaluated_three_body(analytical_triplet):
"""
Compute derivatives and finite differences.
"""
N = 10
L1, L2 = 1.0, 3.1
rij, rik, rjk = (
np.linspace(L1, L2, N),
np.linspace(L1, L2, N),
np.linspace(L1, L2, N),
)
args = np.meshgrid(rij, rik, rjk, indexing='ij')
pot, analytical = analytical_triplet
return evaluate(pot, *args), evaluate(analytical, *args)
@pytest.mark.skipif(not has_sympy, reason="Sympy not installed")
def test_analytical_pairs(analytical_evaluated_pair):
pot, ref = analytical_evaluated_pair
# Checking all computed fields
for k in pot.__dict__:
f, f_ref = getattr(pot, k), getattr(ref, k)
nt.assert_allclose(f, f_ref, rtol=1e-10, atol=1e-14)
@pytest.mark.skipif(not has_sympy, reason="Sympy not installed")
def test_analytical_triplets(analytical_evaluated_three_body):
pot, ref = analytical_evaluated_three_body
# Checking all computed fields
for k in pot.__dict__:
f, f_ref = getattr(pot, k), getattr(ref, k)
nt.assert_allclose(f, f_ref, rtol=1e-10, atol=1e-14)
| 3,254 | 26.584746 | 74 | py |
matscipy | matscipy-master/tests/manybody/test_newmb.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
# 2022 Jan Griesser (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import pytest
import numpy as np
import numpy.testing as nt
from ase import Atoms
from matscipy.numerical import (
numerical_forces,
numerical_stress,
numerical_hessian,
numerical_nonaffine_forces,
numerical_nonaffine_forces_reference,
)
from matscipy.calculators.calculator import MatscipyCalculator
from matscipy.calculators.manybody.newmb import Manybody
from matscipy.calculators.pair_potential import PairPotential, LennardJonesCut
from ase.lattice.cubic import Diamond
from ase.optimize import FIRE
from matscipy.calculators.manybody.potentials import (
distance_defined,
ZeroPair,
ZeroAngle,
HarmonicPair,
HarmonicAngle,
KumagaiPair,
KumagaiAngle,
LennardJones,
StillingerWeberPair,
StillingerWeberAngle,
TersoffBrennerPair,
TersoffBrennerAngle,
)
from reference_params import (
Kumagai_Comp_Mat_Sci_39_Si,
Stillinger_Weber_PRB_31_5262_Si,
Tersoff_PRB_39_5566_Si_C,
)
from matscipy.elasticity import (
measure_triclinic_elastic_constants,
)
from matscipy.molecules import Molecules
from matscipy.neighbours import MolecularNeighbourhood, CutoffNeighbourhood
def tetrahedron(distance, rattle):
atoms = Atoms(
"H" * 4,
positions=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)],
cell=[10, 10, 10],
)
atoms.positions *= distance
atoms.rattle(rattle)
return atoms
def diamond(distance, rattle):
atoms = Diamond("Si", size=[1, 1, 1], latticeconstant=distance)
atoms.rattle(rattle)
return atoms
class SimpleAngle(Manybody.Theta):
"""Implementation of a zero three-body interaction."""
def __call__(self, R1, R2, R3):
return 0.5 * (R1**2 + R2**2 + R3**2)
def gradient(self, R1, R2, R3):
return np.stack([
R1,
R2,
R3,
])
def hessian(self, R1, R2, R3):
return np.stack([
np.ones(list(R1.shape)),
np.ones(list(R1.shape)),
np.ones(list(R1.shape)),
np.zeros(list(R1.shape)),
np.zeros(list(R1.shape)),
np.zeros(list(R1.shape)),
])
class MixPair(Manybody.Phi):
"""
Implementation of a harmonic pair interaction.
"""
def __call__(self, r_p, xi_p):
return xi_p * r_p
def gradient(self, r_p, xi_p):
return np.stack([
xi_p,
r_p,
])
def hessian(self, r_p, xi_p):
return np.stack([
np.zeros_like(r_p),
np.zeros_like(xi_p),
np.ones_like(r_p),
])
class LinearPair(Manybody.Phi):
"""
Implementation of a harmonic pair interaction.
"""
def __call__(self, r_p, xi_p):
return r_p + xi_p
def gradient(self, r_p, xi_p):
return np.stack([
np.ones_like(r_p),
np.ones_like(xi_p),
])
def hessian(self, r_p, xi_p):
return np.stack([
np.zeros_like(r_p),
np.zeros_like(xi_p),
np.zeros_like(xi_p),
])
@distance_defined
class SimplePairNoMix(Manybody.Phi):
"""
Implementation of a harmonic pair interaction.
"""
def __call__(self, r_p, xi_p):
return 0.5 * r_p**2 + 0.5 * xi_p**2
def gradient(self, r_p, xi_p):
return np.stack([
r_p,
xi_p,
])
def hessian(self, r_p, xi_p):
return np.stack([
np.ones_like(r_p),
np.ones_like(xi_p),
np.zeros_like(r_p),
])
@distance_defined
class SimplePairNoMixNoSecond(Manybody.Phi):
"""
Implementation of a harmonic pair interaction.
"""
def __call__(self, r_p, xi_p):
return 0.5 * r_p**2 + xi_p
def gradient(self, r_p, xi_p):
return np.stack([
r_p,
np.ones_like(xi_p),
])
def hessian(self, r_p, xi_p):
return np.stack([
np.ones_like(r_p),
np.zeros_like(xi_p),
np.zeros_like(r_p),
])
def molecule():
"""Return a molecule setup involing all 4 atoms."""
# Get all combinations of eight atoms
bonds = np.array(
np.meshgrid([np.arange(4)] * 2),
).T.reshape(-1, 2)
# Get all combinations of eight atoms
angles = np.array(np.meshgrid([np.arange(4)] * 3)).T.reshape(-1, 3)
# Delete degenerate pairs and angles
bonds = bonds[bonds[:, 0] != bonds[:, 1]]
angles = angles[
(angles[:, 0] != angles[:, 1])
| (angles[:, 0] != angles[:, 2])
| (angles[:, 1] != angles[:, 2])
]
#angles = angles[:, (1, 0, 2)]
print(angles)
return MolecularNeighbourhood(
Molecules(bonds_connectivity=bonds, angles_connectivity=angles)
)
def carbon_silicon_pair_types(i, j):
i, j = np.asarray(i), np.asarray(j)
types = np.ones_like(i)
types[i != j] = 2
return types
def carbon_silicon_triplet_types(i, j, k):
i, j, k = np.asarray(i), np.asarray(j), np.asarray(k)
types = np.ones_like(i)
types[(i != j) | (i != k) | (k != j)] = 2
return types
# Potentials to be tested
potentials = {
"Zero(Pair+Angle)~molecule": (
{1: ZeroPair()}, {1: ZeroAngle()}, molecule()
),
"Harmonic(Pair+Angle)~molecule": (
{1: HarmonicPair(1, 1)}, {1: HarmonicAngle(1, np.pi / 4)}, molecule()
),
"HarmonicPair+ZeroAngle~molecule": (
{1: HarmonicPair(1, 1)}, {1: ZeroAngle()}, molecule()
),
"ZeroPair+HarmonicAngle~molecule": (
{1: ZeroPair()}, {1: HarmonicAngle(1, np.pi / 4)}, molecule()
),
"SimpleAngle~cutoff": (
{1: ZeroPair()}, {1: SimpleAngle()}, CutoffNeighbourhood(cutoff=3.0),
),
"SimpleAngle~molecule": (
{1: HarmonicPair(1, 5)}, {1: SimpleAngle()}, molecule(),
),
"KumagaiPair+ZeroAngle": (
{1: KumagaiPair(Kumagai_Comp_Mat_Sci_39_Si)},
{1: ZeroAngle()},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
"HarmonicPair+HarmonicAngle~cutoff~heterogeneous": (
{1: HarmonicPair(1, 1), 2: HarmonicPair(2, 1)},
{1: ZeroAngle(), 2: HarmonicAngle(1, np.pi/3)},
CutoffNeighbourhood(cutoff=3.0,
pair_types=carbon_silicon_pair_types,
triplet_types=carbon_silicon_triplet_types),
),
"LinearPair+HarmonicAngle": (
{1: LinearPair()},
{1: HarmonicAngle(1, np.pi/3)},
CutoffNeighbourhood(cutoff=3.3),
),
"LinearPair+KumagaiAngle": (
{1: LinearPair()},
{1: KumagaiAngle(Kumagai_Comp_Mat_Sci_39_Si)},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
"MixPair+KumagaiAngle": (
{1: MixPair()},
{1: KumagaiAngle(Kumagai_Comp_Mat_Sci_39_Si)},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
"ZeroPair+KumagaiAngle": (
{1: ZeroPair()},
{1: KumagaiAngle(Kumagai_Comp_Mat_Sci_39_Si)},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
"SimplePairNoMix+KumagaiAngle": (
{1: SimplePairNoMix()},
{1: KumagaiAngle(Kumagai_Comp_Mat_Sci_39_Si)},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
"SimplePairNoMixNoSecond+HarmonicAngle": (
{1: SimplePairNoMixNoSecond()},
{1: HarmonicAngle()},
CutoffNeighbourhood(cutoff=3.3),
),
"SimplePairNoMixNoSecond+KumagaiAngle": (
{1: SimplePairNoMixNoSecond()},
{1: KumagaiAngle(Kumagai_Comp_Mat_Sci_39_Si)},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
"StillingerWeber": (
{1: StillingerWeberPair(Stillinger_Weber_PRB_31_5262_Si)},
{1: StillingerWeberAngle(Stillinger_Weber_PRB_31_5262_Si)},
CutoffNeighbourhood(cutoff=Stillinger_Weber_PRB_31_5262_Si["a"]
* Stillinger_Weber_PRB_31_5262_Si["sigma"]),
),
"Tersoff3": (
{1: TersoffBrennerPair(Tersoff_PRB_39_5566_Si_C)},
{1: TersoffBrennerAngle(Tersoff_PRB_39_5566_Si_C)},
CutoffNeighbourhood(cutoff=Tersoff_PRB_39_5566_Si_C["R2"]),
),
"KumagaiPair+KumagaiAngle": (
{1: KumagaiPair(Kumagai_Comp_Mat_Sci_39_Si)},
{1: KumagaiAngle(Kumagai_Comp_Mat_Sci_39_Si)},
CutoffNeighbourhood(cutoff=Kumagai_Comp_Mat_Sci_39_Si["R_2"]),
),
}
# TODO fix molecule tests
for test_name in potentials:
if "~molecule" in test_name:
potentials[test_name] =\
pytest.param(
potentials[test_name],
marks=pytest.mark.xfail(reason="Molecules do not work"))
@pytest.fixture(params=potentials.values(), ids=potentials.keys())
def potential(request):
return request.param
# @pytest.fixture(params=[5.3, 5.431])
@pytest.fixture(params=[5.431])
def distance(request):
return request.param
@pytest.fixture(params=[0, 1e-3])
def rattle(request):
return request.param
@pytest.fixture(params=[diamond])
def configuration(distance, rattle, potential, request):
atoms = request.param(distance, rattle)
atoms.symbols[0:2] = 'C' # making a heterogeneous system
atoms.calc = Manybody(*potential)
atoms.calc.atoms = atoms
atoms.new_array('rattle', np.full(8, rattle))
return atoms
###############################################################################
def test_forces(configuration):
f_ana = configuration.get_forces()
f_num = numerical_forces(configuration, d=1e-6)
nt.assert_allclose(f_ana, f_num, rtol=1e-6, atol=1e-6)
def test_stresses(configuration):
s_ana = configuration.get_stress()
s_num = numerical_stress(configuration, d=1e-6)
nt.assert_allclose(s_ana, s_num, rtol=1e-6, atol=1e-8)
def test_nonaffine_forces(configuration):
naf_ana = configuration.calc.get_property('nonaffine_forces')
naf_num = numerical_nonaffine_forces_reference(configuration, d=1e-6)
# atol here related to fmax above
nt.assert_allclose(naf_ana, naf_num, rtol=1e-6, atol=1e-5)
def test_hessian(configuration):
H_ana = configuration.calc.get_property('hessian').todense()
H_num = numerical_hessian(configuration, d=1e-6).todense()
# For complex potentials (Kumagai, Tersoff), FD strugles out of equilibrium
atol = np.max([configuration.arrays['rattle'][0] * 3.5, 1e-6])
nt.assert_allclose(H_ana, H_num, atol=atol, rtol=1e-6)
def test_dynamical_matrix(configuration):
# Maybe restrict this test to a single potential to reduce testing ?
D_ana = configuration.calc.get_property('dynamical_matrix').todense()
H_ana = configuration.calc.get_property('hessian').todense()
mass = np.repeat(configuration.get_masses(), 3)
H_ana /= np.sqrt(mass.reshape(-1, 1) * mass.reshape(1, -1))
nt.assert_allclose(D_ana, H_ana, atol=1e-10, rtol=1e-10)
def test_birch_constants(configuration):
B_ana = configuration.calc.get_property("birch_coefficients", configuration)
C_num = measure_triclinic_elastic_constants(configuration, delta=1e-8)
nt.assert_allclose(B_ana, C_num, rtol=1e-7, atol=3e-6)
def test_elastic_constants(configuration):
# Needed since zero-temperature elastic constants defined in local minimum
FIRE(configuration, logfile=None).run(fmax=1e-6, steps=400)
C_ana = configuration.calc.get_property("elastic_constants", configuration)
C_num = measure_triclinic_elastic_constants(
configuration,
delta=1e-3,
optimizer=FIRE,
fmax=1e-6,
steps=500,
)
nt.assert_allclose(np.where(C_ana < 1e-6, 0.0, C_ana),
np.where(C_num < 1e-6, 0.0, C_num),
rtol=1e-3, atol=1e-3)
@pytest.mark.parametrize('cutoff', np.linspace(1.1, 20, 10))
def test_pair_compare(cutoff):
atoms = Atoms(
"H" * 4,
positions=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)],
cell=[10, 10, 10],
)
atoms.positions[:] *= 1
atoms.calc = Manybody(
{1: LennardJones(1, 1, cutoff)},
{1: ZeroAngle()},
CutoffNeighbourhood(cutoff=cutoff)
)
newmb_e = atoms.get_potential_energy()
pair = PairPotential({(1, 1): LennardJonesCut(1, 1, cutoff)})
pair_e = pair.get_property('energy', atoms)
assert np.abs(newmb_e - pair_e) / pair_e < 1e-10
@pytest.mark.parametrize('cutoff', [1.4, 1.5])
def test_energy_cutoff(cutoff):
atoms = Atoms(
"H" * 4,
positions=[(0, 0, 0), (1, 0, 0), (0, 1, 0), (0, 0, 1)],
cell=[10, 10, 10],
)
atoms.calc = Manybody(
{1: HarmonicPair(1, 1)},
{1: HarmonicAngle(1, 0)},
CutoffNeighbourhood(cutoff=cutoff)
)
newmb_e = atoms.get_potential_energy()
def harmonic(t):
return 0.5 * (t)**2
# 90 angles with next-neighbor cutoff
# next-neighbor pairs have 0 energy
e = 3 * harmonic(np.pi / 2)
# cutoff large enough for longer distance interactions
# adds all 45 and 60 angles
# adds longer pairs
if cutoff > np.sqrt(2):
e += (
+ 6 * harmonic(np.pi / 4)
+ 3 * harmonic(np.pi / 3)
+ 3 * harmonic(np.sqrt(2) - 1)
)
assert np.abs(e - newmb_e) / e < 1e-10
def test_pair_nonaffine():
atoms = Atoms(
"H" * 2,
positions=[(0, 0, 0), (1, 0, 0)],
cell=[10, 10, 10],
)
atoms.calc = Manybody(
{1: HarmonicPair(1, 0.1)},
{1: ZeroAngle()},
CutoffNeighbourhood(cutoff=2.)
)
naf = atoms.calc.get_property('nonaffine_forces', atoms)
naf_ref = numerical_nonaffine_forces_reference(atoms, d=1e-8)
nt.assert_allclose(naf, naf_ref, atol=1e-6)
# naf_ref = MatscipyCalculator.get_nonaffine_forces(atoms.calc, atoms)
# nt.assert_allclose(naf, naf_ref, atol=1e-6)
| 14,624 | 27.017241 | 80 | py |
matscipy | matscipy-master/tests/manybody/test_manybody_molecules.py | #
# Copyright 2022 Lucas Frérot (U. Freiburg)
#
# matscipy - Materials science with Python at the atomic-scale
# https://github.com/libAtoms/matscipy
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import numpy as np
import numpy.testing as nt
from ase import Atoms
from ase.optimize import FIRE
from matscipy.calculators.manybody.newmb import Manybody
from matscipy.calculators.manybody.potentials import (
ZeroAngle, ZeroPair,
HarmonicAngle, HarmonicPair
)
from matscipy.molecules import Molecules
from matscipy.neighbours import MolecularNeighbourhood
from matscipy.numerical import (
numerical_forces, numerical_hessian, numerical_stress,
numerical_nonaffine_forces_reference
)
import pytest
@pytest.fixture(params=[1, 1.5, 2])
def length(request):
return request.param
@pytest.fixture(params=[np.pi / 6, np.pi / 3, np.pi / 2])
def angle(request):
return request.param
@pytest.fixture
def co2(length, angle):
s = 1.1
atoms = Atoms(
"CO2",
# Not symmetric on purpose
positions=[
[-1, 0, 0],
[0, 0, 0],
[s * np.cos(angle), s * np.sin(angle), 0],
],
cell=[5, 5, 5],
)
atoms.pbc[:] = False
atoms.positions[:] *= length
return atoms
@pytest.fixture
def molecule():
return MolecularNeighbourhood(
Molecules(
bonds_connectivity=[[0, 1], [1, 2]],
angles_connectivity=[[1, 0, 2]]
)
)
def test_harmonic_bond(co2, molecule):
k, r0 = 1, 0.5
co2.calc = Manybody({1: HarmonicPair(k, r0)}, {1: ZeroAngle()}, molecule)
pair_vectors = np.array([
co2.get_distance(0, 1, vector=True),
co2.get_distance(1, 2, vector=True),
])
pair_distances = np.linalg.norm(pair_vectors, axis=-1)
# Testing potential energy
epot = co2.get_potential_energy()
epot_ref = np.sum(0.5 * k * (pair_distances - r0)**2)
nt.assert_allclose(epot, epot_ref, rtol=1e-15)
# Testing force on first atom
f = co2.get_forces()
f_ref = k * (pair_distances[0] - r0) * pair_vectors[0] / pair_distances[0]
nt.assert_allclose(f[0], f_ref, rtol=1e-15)
# Testing all forces with finite differences
f_ref = numerical_forces(co2, d=1e-6)
nt.assert_allclose(f, f_ref, rtol=1e-9, atol=1e-7)
# Testing stress with finite differences
s_ref = numerical_stress(co2, d=1e-6)
nt.assert_allclose(co2.get_stress(), s_ref, rtol=1e-8, atol=1e-7)
# Testing nonaffine forces with finite differences
nf_ref = numerical_nonaffine_forces_reference(co2, d=1e-8)
nf = co2.calc.get_property('nonaffine_forces', co2)
nt.assert_allclose(nf, nf_ref, rtol=1e-8, atol=1e-6)
# Testing hessian
h = co2.calc.get_property('hessian', co2).todense()
h_ref = numerical_hessian(co2, d=1e-6).todense()
print(h, h_ref)
nt.assert_allclose(h, h_ref, atol=1e-4)
@pytest.mark.xfail(reason="Hessian not properly implemented")
def test_harmonic_angle(co2, molecule):
kt, theta0 = 1, np.pi / 4
calc = Manybody({1: ZeroPair()}, {1: HarmonicAngle(kt, theta0)}, molecule)
co2.calc = calc
angle = np.radians(co2.get_angle(0, 1, 2))
# Testing potential energy
epot = co2.get_potential_energy()
epot_ref = 0.5 * kt * (angle - theta0)**2
nt.assert_allclose(epot, epot_ref, rtol=1e-14,
err_msg="Wrong energy")
# Testing forces
f = co2.get_forces()
f_ref = numerical_forces(co2, d=1e-6)
nt.assert_allclose(f, f_ref, rtol=1e-6, atol=2e-9,
err_msg="Wrong forces")
# Checking zeros
nt.assert_allclose(np.abs(f.sum()), 0, atol=1e-13)
# Testing stress
s = co2.get_stress()
s_ref = numerical_stress(co2, d=1e-6)
nt.assert_allclose(s, s_ref, rtol=1e-6, atol=2e-9)
# Testing nonaffine forces with finite differences
nf_ref = numerical_nonaffine_forces_reference(co2, d=1e-6)
nf = co2.calc.get_property('nonaffine_forces', co2)
nt.assert_allclose(nf, nf_ref, rtol=1e-8, atol=1e-6)
# Testing hessian
h = co2.calc.get_property('hessian', co2)
h_ref = numerical_hessian(co2, d=1e-4)
# print(f"{calc.get_block_sparse_hessian(co2)[0]}")
# print(f"{h.todense()}\n\n{h_ref.todense()}")
nt.assert_allclose(h.todense(), h_ref.todense(), atol=1e-5)
| 4,900 | 29.63125 | 78 | py |
matscipy | matscipy-master/tests/manybody/heterogeneous/polyphosphate.py | import pytest
import numpy as np
from ase.io import read
from ase.optimize import FIRE
from ase.calculators.mixing import SumCalculator
from ase.calculators.calculator import PropertyNotImplementedError
from matscipy.numerical import (numerical_forces, numerical_stress,
numerical_nonaffine_forces, numerical_hessian)
from matscipy.elasticity import \
measure_triclinic_elastic_constants as numerical_birch
from matscipy.molecules import Molecules
from matscipy.neighbours import MolecularNeighbourhood
from matscipy.calculators.pair_potential import PairPotential, LennardJonesCut
from matscipy.calculators.ewald import Ewald
from matscipy.calculators.manybody.newmb import Manybody
from matscipy.calculators.manybody.potentials import \
ZeroPair, HarmonicAngle
NUM_PROPERTIES = {
"forces": numerical_forces,
"stress": numerical_stress,
"nonaffine_forces": lambda a: numerical_nonaffine_forces(a, d=1e-8),
"hessian": numerical_hessian,
"birch_coefficients": numerical_birch,
}
def here(path):
from pathlib import Path
return Path(__file__).parent / path
def set_lammps(atoms, lammps_datafile):
"""Set LAMMPS calculator to Atoms object."""
from ase.calculators.lammpslib import LAMMPSlib
from ase.geometry import wrap_positions
atom_symbol_to_lammps = {
"O": 1,
"P": 2,
"Zn": 3,
}
atoms.positions = wrap_positions(atoms.positions, atoms.cell, atoms.pbc)
header = f"""
boundary p p p
units metal
atom_style full
pair_style lj/cut/coul/long 10 10
bond_style zero
angle_style harmonic
special_bonds lj/coul 1 1 1
read_data {lammps_datafile}
# These commands to accomodate for ASE
change_box all triclinic
kspace_style ewald 1e-12
# Deactivate pair and coulomb
kspace_style none
pair_style lj/cut 10
pair_coeff * * 0 1
""".split("\n")
calc = LAMMPSlib(
lmpcmds=[],
atom_types=atom_symbol_to_lammps,
lammps_header=header,
create_atoms=False,
create_box=False,
boundary=False,
keep_alive=True,
log_file='log.lammps')
atoms.calc = calc
def set_legacy_manybody(atoms, molecules):
"""Set matscipy calculators for system."""
from matscipy.calculators.manybody.calculator import NiceManybody
from matscipy.calculators.manybody.explicit_forms import \
HarmonicAngle, ZeroPair
lj_interactions = {
(8, 8): LennardJonesCut(0.012185, 2.9170696728, 10),
(8, 15): LennardJonesCut(0.004251, 1.9198867376, 10),
(8, 30): LennardJonesCut(8.27e-4, 2.792343852, 10),
}
pair = PairPotential(lj_interactions)
triplet = NiceManybody(
ZeroPair(), HarmonicAngle(np.radians(109.47), 2 * 1.77005, atoms),
MolecularNeighbourhood(molecules))
ewald = Ewald()
ewald.set(accuracy=1e-4, cutoff=10., verbose=False)
atoms.arrays['charge'] = atoms.get_initial_charges()
atoms.calc = SumCalculator([triplet])
def set_manybody(atoms, molecules):
lj_interactions = {
(8, 8): LennardJonesCut(0.012185, 2.9170696728, 10),
(8, 15): LennardJonesCut(0.004251, 1.9198867376, 10),
(8, 30): LennardJonesCut(8.27e-4, 2.792343852, 10),
}
neigh = MolecularNeighbourhood(molecules)
pair = PairPotential(lj_interactions)
triplet = Manybody({1: ZeroPair()}, {
1: HarmonicAngle(2 * 1.77005, np.radians(109.47)),
2: HarmonicAngle(2 * 10.4663, np.radians(135.58)),
}, neigh)
ewald = Ewald()
ewald.set(accuracy=1e-12, cutoff=10., verbose=False)
atoms.arrays['charge'] = atoms.get_initial_charges()
atoms.calc = SumCalculator([pair, ewald, triplet])
def map_ase_types(atoms):
"""Convert atom types to atomic numbers."""
lammps_to_atom_num = {
1: 8, # O
2: 15, # P
3: 30, # Zn
}
for lammps_type, atom_num in lammps_to_atom_num.items():
atoms.numbers[atoms.numbers == lammps_type] = atom_num
return atoms
@pytest.fixture
def polyphosphate():
atoms = read(
here('polyphosphate.data'),
format='lammps-data',
style='full',
units='metal')
atoms = map_ase_types(atoms)
mol = Molecules.from_atoms(atoms)
atoms.calc = Manybody({1: ZeroPair()}, {
1: HarmonicAngle(2 * 1.77005, np.radians(109.47)),
2: HarmonicAngle(2 * 10.4663, np.radians(135.58)),
}, MolecularNeighbourhood(mol))
# So that Cauchy stress is non-zero
atoms.cell *= 0.8
atoms.positions *= 0.8
return atoms, mol
def lammps_prop(atoms, prop):
"""Return property computed with LAMMPS if installed."""
atoms = atoms.copy()
try:
set_lammps(atoms, here('polyphosphate.data'))
return atoms.calc.get_property(prop, atoms)
except PropertyNotImplementedError:
return NUM_PROPERTIES[prop](atoms)
except Exception as e:
print(type(e), e)
return None
def test_angles_energy(polyphosphate):
atoms, mol = polyphosphate
epot = atoms.get_potential_energy()
epot_ref = 0
for t, k, theta in zip([1, 2], [1.77005, 10.4663], [109.47, 135.58]):
angles = mol.get_angles(atoms)[mol.angles['type'] == t]
epot_ref += sum(k * np.radians(angles - theta)**2)
assert np.abs(epot_ref - epot) / epot_ref < 1e-14
epot_ref = lammps_prop(atoms, 'energy')
if epot_ref is not None:
assert np.abs(epot_ref - epot) / epot_ref < 1e-13
PROPERTIES = [
"forces",
"stress",
"nonaffine_forces",
"birch_coefficients",
"hessian",
]
@pytest.mark.parametrize("prop", PROPERTIES)
def test_properties(polyphosphate, prop):
atoms, _ = polyphosphate
atol, rtol = 4e-6, 1e-7
data = atoms.calc.get_property(prop, atoms)
ref = NUM_PROPERTIES[prop](atoms)
lref = lammps_prop(atoms, prop)
def dense_cast(x):
from scipy.sparse import issparse
return x.todense() if issparse(x) else x
data = dense_cast(data)
ref = dense_cast(data)
lref = dense_cast(lref)
if lref is not None:
np.testing.assert_allclose(data, lref, atol=atol, rtol=rtol)
np.testing.assert_allclose(data, ref, atol=atol, rtol=rtol)
| 6,260 | 27.589041 | 78 | py |
matscipy | matscipy-master/docs/conf.py | # -*- coding: utf-8 -*-
#
# matscipy documentation build configuration file, created by
# sphinx-quickstart on Sun May 17 16:40:20 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Fix for readthedocs which does not support numpy
#import mock
#
#MOCK_MODULES = ['numpy', 'numpy.linalg']
#for mod_name in MOCK_MODULES:
# sys.modules[mod_name] = mock.Mock()
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'myst_nb',
'numpydoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'matscipy'
copyright = u'2015, James Kermode, Lars Pastewka'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = 'devel'
# The full version, including alpha/beta/rc tags.
release = 'devel'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'matscipydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'matscipy.tex', u'matscipy Documentation',
u'James Kermode, Lars Pastewka', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'matscipy', u'matscipy Documentation',
[u'James Kermode, Lars Pastewka'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'matscipy', u'matscipy Documentation',
u'James Kermode, Lars Pastewka', 'matscipy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Extension configuration -------------------------------------------------
autodoc_default_options = {
'members': True,
'member-order': 'bysource',
'undoc-members': True,
'show-inheritance': True,
'inherited-members': True,
'special-members': '__init__',
}
myst_enable_extensions = [
"dollarmath"]
| 8,929 | 29.793103 | 83 | py |
DLBFoam-1.1_OF9 | DLBFoam-1.1_OF9/tutorials/reactingFoam/shearlayer_DLB/solve_buffer.py | import matplotlib.pyplot as plt
import numpy as np
import glob
import matplotlib
fig,ax = plt.subplots()
mean = 0
for rankid,rank in enumerate(sorted(glob.glob('processor*'))):
get_problem = []
update_state = []
balance = []
solve_buffer = []
unbalance = []
time = []
cpu_solve = open(rank+'/loadBal/cpu_solve.out')
lines_solve = cpu_solve.readlines()[1:]
for x in lines_solve:
time.append(x.split()[0])
get_problem.append(x.split()[1])
update_state.append(x.split()[2])
balance.append(x.split()[3])
solve_buffer.append(x.split()[4])
unbalance.append(x.split()[5])
if(rankid==0):
size = np.size(time)-2
time = np.array([float(i) for i in time])
get_problem = np.array([float(i) for i in get_problem])
update_state = np.array([float(i) for i in update_state])
balance = np.array([float(i) for i in balance])
solve_buffer = np.array([float(i) for i in solve_buffer])
unbalance = np.array([float(i) for i in unbalance])
total = get_problem[:size] + update_state[:size] + balance[:size] + solve_buffer[:size] + unbalance[:size]
mean += solve_buffer[:size]
ax.plot(solve_buffer,linewidth=0.6,label='Processor ID' if rankid==0 else "")
ax.plot(mean/np.size(glob.glob('processor*')),'k--',label='Mean',linewidth=0.9)
ax.tick_params(bottom=True,top=False,left=True,right=True,labeltop=False,labelright=False,length=2,direction='in')
ax.set_ylabel('Chemistry CPU time [s]')
ax.set_xlim([0,None])
ax.legend(loc=1,frameon=False)
fig.text(0.5, -0.01, 'Number of iterations', ha='center')
fig.tight_layout()
fig.savefig('rankbased_solve.png',bbox_inches='tight',pad_inches=0,dpi=600)
plt.show()
| 1,734 | 33.7 | 114 | py |
DLBFoam-1.1_OF9 | DLBFoam-1.1_OF9/tutorials/reactingFoam/shearlayer_DLB_pyJac/solve_buffer.py | import matplotlib.pyplot as plt
import numpy as np
import glob
import matplotlib
fig,ax = plt.subplots()
mean = 0
for rankid,rank in enumerate(sorted(glob.glob('processor*'))):
get_problem = []
update_state = []
balance = []
solve_buffer = []
unbalance = []
time = []
cpu_solve = open(rank+'/loadBal/cpu_solve.out')
lines_solve = cpu_solve.readlines()[1:]
for x in lines_solve:
time.append(x.split()[0])
get_problem.append(x.split()[1])
update_state.append(x.split()[2])
balance.append(x.split()[3])
solve_buffer.append(x.split()[4])
unbalance.append(x.split()[5])
if(rankid==0):
size = np.size(time)-2
time = np.array([float(i) for i in time])
get_problem = np.array([float(i) for i in get_problem])
update_state = np.array([float(i) for i in update_state])
balance = np.array([float(i) for i in balance])
solve_buffer = np.array([float(i) for i in solve_buffer])
unbalance = np.array([float(i) for i in unbalance])
total = get_problem[:size] + update_state[:size] + balance[:size] + solve_buffer[:size] + unbalance[:size]
mean += solve_buffer[:size]
ax.plot(solve_buffer,linewidth=0.6,label='Processor ID' if rankid==0 else "")
ax.plot(mean/np.size(glob.glob('processor*')),'k--',label='Mean',linewidth=0.9)
ax.tick_params(bottom=True,top=False,left=True,right=True,labeltop=False,labelright=False,length=2,direction='in')
ax.set_ylabel('Chemistry CPU time [s]')
ax.set_xlim([0,None])
ax.legend(loc=1,frameon=False)
fig.text(0.5, -0.01, 'Number of iterations', ha='center')
fig.tight_layout()
fig.savefig('rankbased_solve.png',bbox_inches='tight',pad_inches=0,dpi=600)
plt.show()
| 1,734 | 33.7 | 114 | py |
DLBFoam-1.1_OF9 | DLBFoam-1.1_OF9/tests/validation/pyjacTests/PSRTest/computeReferenceValues.py | # Note:
# - This python script computes reference results used in the OpenFOAM validation tests.
# - Cantera environment must be created to run this python script.
# - See https://cantera.org/index.html for further information
# - The utilised mechanism is a modified GRI30 to achieve thermodynamic consistency with openfoam
import time
import cantera as ct
import numpy as np
mechRelPath = "../pyjacTestMechanism/mechanism.cti"
# Enthalpy at standard conditions
gasStd = ct.Solution(mechRelPath)
gasStd.TPX = 298.15, ct.one_atm, 'CH4:0.5,O2:1,N2:3.76'
r = ct.IdealGasConstPressureReactor(gasStd)
dh0 = np.sum(gasStd.standard_enthalpies_RT*gasStd.T*ct.gas_constant*(1/gasStd.molecular_weights)*gasStd.Y)
print("\nsum(Hf*Yi): " + repr(dh0) + "\n")
gas = ct.Solution(mechRelPath)
gas.TPX = 1000.0, 1.36789e+06, 'CH4:0.5,O2:1,N2:3.76'
r = ct.IdealGasConstPressureReactor(gas)
dh0 = np.sum(gas.standard_enthalpies_RT*gas.T*ct.gas_constant*(1/gasStd.molecular_weights)*gas.Y)
print("\nsum(Hf*Yi): " + repr(dh0) + "\n")
sim = ct.ReactorNet([r])
sim.verbose = False
# limit advance when temperature difference is exceeded
delta_T_max = 20.
r.set_advance_limit('temperature', delta_T_max)
states = ct.SolutionArray(gas, extra=['t'])
print('{:10s} {:10s} {:10s} {:14s}'.format(
't [s]', 'T [K]', 'P [Pa]', 'u [J/kg]'))
sim.rtol = 1e-12
sim.atol = 1e-12
tEnd = 0.07
t = time.time()
sim.advance(tEnd)
elapsed = time.time() - t
states.append(r.thermo.state, t=sim.time*1e3)
print('{:10.3e} {:10.6f} {:10.3f} {:14.10f}'.format(
sim.time, r.T, r.thermo.P, gas.Y[gas.species_index('CH4')]))#r.thermo.u))
print("\n Wall clock time: " + repr(elapsed))
| 1,669 | 32.4 | 106 | py |
EfficientDet | EfficientDet-master/inference.py | import cv2
import json
import numpy as np
import os
import time
import glob
from model import efficientdet
from utils import preprocess_image, postprocess_boxes
from utils.draw_boxes import draw_boxes
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 1
weighted_bifpn = True
model_path = 'efficientdet-d1.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
# coco classes
classes = {value['id'] - 1: value['name'] for value in json.load(open('coco_90.json', 'r')).values()}
num_classes = 90
score_threshold = 0.3
colors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]
_, model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
model.load_weights(model_path, by_name=True)
for image_path in glob.glob('datasets/VOC2007/JPEGImages/*.jpg'):
image = cv2.imread(image_path)
src_image = image.copy()
# BGR -> RGB
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale = preprocess_image(image, image_size=image_size)
# run network
start = time.time()
boxes, scores, labels = model.predict_on_batch([np.expand_dims(image, axis=0)])
boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
print(time.time() - start)
boxes = postprocess_boxes(boxes=boxes, scale=scale, height=h, width=w)
# select indices which have a score above the threshold
indices = np.where(scores[:] > score_threshold)[0]
# select those detections
boxes = boxes[indices]
labels = labels[indices]
draw_boxes(src_image, boxes, scores, labels, colors, classes)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', src_image)
cv2.waitKey(0)
if __name__ == '__main__':
main()
| 2,032 | 31.269841 | 105 | py |
EfficientDet | EfficientDet-master/inference_frozen_graph.py | import tensorflow as tf
import numpy as np
import cv2
import os
import time
from utils import preprocess_image
from tensorflow.python.platform import gfile
from utils.anchors import anchors_for_shape
from utils.draw_boxes import draw_boxes
from utils.post_process_boxes import post_process_boxes
def get_frozen_graph(graph_file):
with tf.gfile.FastGFile(graph_file, "rb") as f:
graph_def = tf.compat.v1.GraphDef()
graph_def.ParseFromString(f.read())
return graph_def
def main():
phi = 1
model_path = 'checkpoints/2019-12-03/pascal_05.pb'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
classes = [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train',
'tvmonitor',
]
num_classes = len(classes)
score_threshold = 0.5
colors = [np.random.randint(0, 256, 3).tolist() for i in range(num_classes)]
output_names = {
'output_boxes': 'filtered_detections/map/TensorArrayStack/TensorArrayGatherV3:0',
'output_scores': 'filtered_detections/map/TensorArrayStack_1/TensorArrayGatherV3:0',
'output_labels': 'filtered_detections/map/TensorArrayStack_2/TensorArrayGatherV3:0'
}
graph = tf.Graph()
graph.as_default()
sess = tf.Session()
graph = get_frozen_graph(model_path)
tf.import_graph_def(graph, name='')
output_boxes = sess.graph.get_tensor_by_name(output_names["output_boxes"])
output_scores = sess.graph.get_tensor_by_name(output_names['output_scores'])
output_labels = sess.graph.get_tensor_by_name(output_names['output_labels'])
image_path = 'datasets/VOC2007/JPEGImages/000002.jpg'
image = cv2.imread(image_path)
src_image = image.copy()
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale, offset_h, offset_w = preprocess_image(image, image_size=image_size)
anchors = anchors_for_shape((image_size, image_size))
# run network
start = time.time()
image_batch = np.expand_dims(image, axis=0)
anchors_batch = np.expand_dims(anchors, axis=0)
feed_dict = {"input_1:0": image_batch, "input_4:0": anchors_batch}
boxes, scores, labels = sess.run([output_boxes, output_scores, output_labels], feed_dict)
boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
print(time.time() - start)
boxes = post_process_boxes(boxes=boxes,
scale=scale,
offset_h=offset_h,
offset_w=offset_w,
height=h,
width=w)
# select indices which have a score above the threshold
indices = np.where(scores[:] > score_threshold)[0]
# select those detections
boxes = boxes[indices]
labels = labels[indices]
draw_boxes(src_image, boxes, scores, labels, colors, classes)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', src_image)
cv2.waitKey(0)
if __name__ == '__main__':
main()
| 3,187 | 33.652174 | 109 | py |
EfficientDet | EfficientDet-master/initializers.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import numpy as np
import math
class PriorProbability(keras.initializers.Initializer):
""" Apply a prior probability to the weights.
"""
def __init__(self, probability=0.01):
self.probability = probability
def get_config(self):
return {
'probability': self.probability
}
def __call__(self, shape, dtype=None):
# set bias to -log((1 - p)/p) for foreground
result = np.ones(shape, dtype=np.float32) * -math.log((1 - self.probability) / self.probability)
return result
| 1,177 | 27.731707 | 104 | py |
EfficientDet | EfficientDet-master/losses.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import tensorflow as tf
def focal(alpha=0.25, gamma=1.5):
"""
Create a functor for computing the focal loss.
Args
alpha: Scale the focal weight with alpha.
gamma: Take the power of the focal weight with gamma.
Returns
A functor that computes the focal loss using the alpha and gamma.
"""
def _focal(y_true, y_pred):
"""
Compute the focal loss given the target tensor and the predicted tensor.
As defined in https://arxiv.org/abs/1708.02002
Args
y_true: Tensor of target data from the generator with shape (B, N, num_classes).
y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).
Returns
The focal loss of y_pred w.r.t. y_true.
"""
labels = y_true[:, :, :-1]
# -1 for ignore, 0 for background, 1 for object
anchor_state = y_true[:, :, -1]
classification = y_pred
# filter out "ignore" anchors
indices = tf.where(keras.backend.not_equal(anchor_state, -1))
labels = tf.gather_nd(labels, indices)
classification = tf.gather_nd(classification, indices)
# compute the focal loss
alpha_factor = keras.backend.ones_like(labels) * alpha
alpha_factor = tf.where(keras.backend.equal(labels, 1), alpha_factor, 1 - alpha_factor)
# (1 - 0.99) ** 2 = 1e-4, (1 - 0.9) ** 2 = 1e-2
focal_weight = tf.where(keras.backend.equal(labels, 1), 1 - classification, classification)
focal_weight = alpha_factor * focal_weight ** gamma
cls_loss = focal_weight * keras.backend.binary_crossentropy(labels, classification)
# compute the normalizer: the number of positive anchors
normalizer = tf.where(keras.backend.equal(anchor_state, 1))
normalizer = keras.backend.cast(keras.backend.shape(normalizer)[0], keras.backend.floatx())
normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0), normalizer)
return keras.backend.sum(cls_loss) / normalizer
return _focal
def smooth_l1(sigma=3.0):
"""
Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
regression_loss = tf.where(
keras.backend.less(regression_diff, 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff, 2),
regression_diff - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
return keras.backend.sum(regression_loss) / normalizer
return _smooth_l1
def smooth_l1_quad(sigma=3.0):
"""
Create a smooth L1 loss functor.
Args
sigma: This argument defines the point where the loss changes from L2 to L1.
Returns
A functor for computing the smooth L1 loss given target data and predicted data.
"""
sigma_squared = sigma ** 2
def _smooth_l1(y_true, y_pred):
""" Compute the smooth L1 loss of y_pred w.r.t. y_true.
Args
y_true: Tensor from the generator of shape (B, N, 5). The last value for each box is the state of the anchor (ignore, negative, positive).
y_pred: Tensor from the network of shape (B, N, 4).
Returns
The smooth L1 loss of y_pred w.r.t. y_true.
"""
# separate target and state
regression = y_pred
regression = tf.concat([regression[..., :4], tf.sigmoid(regression[..., 4:9])], axis=-1)
regression_target = y_true[:, :, :-1]
anchor_state = y_true[:, :, -1]
# filter out "ignore" anchors
indices = tf.where(keras.backend.equal(anchor_state, 1))
regression = tf.gather_nd(regression, indices)
regression_target = tf.gather_nd(regression_target, indices)
# compute smooth L1 loss
# f(x) = 0.5 * (sigma * x)^2 if |x| < 1 / sigma / sigma
# |x| - 0.5 / sigma / sigma otherwise
regression_diff = regression - regression_target
regression_diff = keras.backend.abs(regression_diff)
box_regression_loss = tf.where(
keras.backend.less(regression_diff[..., :4], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., :4], 2),
regression_diff[..., :4] - 0.5 / sigma_squared
)
alpha_regression_loss = tf.where(
keras.backend.less(regression_diff[..., 4:8], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 4:8], 2),
regression_diff[..., 4:8] - 0.5 / sigma_squared
)
ratio_regression_loss = tf.where(
keras.backend.less(regression_diff[..., 8], 1.0 / sigma_squared),
0.5 * sigma_squared * keras.backend.pow(regression_diff[..., 8], 2),
regression_diff[..., 8] - 0.5 / sigma_squared
)
# compute the normalizer: the number of positive anchors
normalizer = keras.backend.maximum(1, keras.backend.shape(indices)[0])
normalizer = keras.backend.cast(normalizer, dtype=keras.backend.floatx())
box_regression_loss = tf.reduce_sum(box_regression_loss) / normalizer
alpha_regression_loss = tf.reduce_sum(alpha_regression_loss) / normalizer
ratio_regression_loss = tf.reduce_sum(ratio_regression_loss) / normalizer
return box_regression_loss + alpha_regression_loss + 16 * ratio_regression_loss
return _smooth_l1
| 7,520 | 39.005319 | 150 | py |
EfficientDet | EfficientDet-master/callbacks.py | from tensorflow.keras.callbacks import Callback
import tensorflow.keras.backend as K
import numpy as np
class CosineAnnealingScheduler(Callback):
def __init__(self, cycle_iterations, min_lr, t_mu=2, start_iteration=0):
self.iteration_id = 0
self.start_iteration = start_iteration
self.cycle_iteration_id = 0
self.lrs = []
self.min_lr = min_lr
self.cycle_iterations = cycle_iterations
self.t_mu = t_mu
super(CosineAnnealingScheduler, self).__init__()
def on_batch_end(self, batch, logs):
if self.iteration_id > self.start_iteration:
# (1, 0)
cosine_decay = 0.5 * (1 + np.cos(np.pi * (self.cycle_iteration_id / self.cycle_iterations)))
decayed_lr = (self.max_lr - self.min_lr) * cosine_decay + self.min_lr
K.set_value(self.model.optimizer.lr, decayed_lr)
if self.cycle_iteration_id == self.cycle_iterations:
self.cycle_iteration_id = 0
self.cycle_iterations = int(self.cycle_iterations * self.t_mu)
else:
self.cycle_iteration_id = self.cycle_iteration_id + 1
self.lrs.append(decayed_lr)
elif self.iteration_id == self.start_iteration:
self.max_lr = K.get_value(self.model.optimizer.lr)
self.iteration_id += 1
def on_train_begin(self, logs={}):
self.max_lr = K.get_value(self.model.optimizer.lr)
class ExponentialScheduler(Callback):
def __init__(self, min_lr, max_lr, iterations):
self.factor = np.exp(np.log(max_lr / min_lr) / iterations)
self.min_lr = min_lr
self.max_lr = max_lr
# debug
self.lrs = []
self.losses = []
def on_batch_end(self, batch, logs):
lr = K.get_value(self.model.optimizer.lr)
self.lrs.append(lr)
self.losses.append(logs["loss"])
K.set_value(self.model.optimizer.lr, lr * self.factor)
def on_train_begin(self, logs={}):
K.set_value(self.model.optimizer.lr, self.min_lr)
class LinearWarmUpScheduler(Callback):
def __init__(self, iterations, min_lr):
self.iterations = iterations
self.min_lr = min_lr
self.iteration_id = 0
# debug
self.lrs = []
def on_batch_begin(self, batch, logs):
if self.iteration_id < self.iterations:
lr = (self.max_lr - self.min_lr) / self.iterations * (self.iteration_id + 1) + self.min_lr
K.set_value(self.model.optimizer.lr, lr)
self.iteration_id += 1
self.lrs.append(K.get_value(self.model.optimizer.lr))
def on_train_begin(self, logs={}):
self.max_lr = K.get_value(self.model.optimizer.lr)
K.set_value(self.model.optimizer.lr, self.min_lr)
self.lrs.append(K.get_value(self.model.optimizer.lr))
| 2,830 | 36.746667 | 104 | py |
EfficientDet | EfficientDet-master/efficientnet.py | # Copyright 2019 The TensorFlow Authors, Pavel Yakubovskiy, Björn Barz. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions for EfficientNet model.
[1] Mingxing Tan, Quoc V. Le
EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks.
ICML'19, https://arxiv.org/abs/1905.11946
"""
# Code of this model implementation is mostly written by
# Björn Barz ([@Callidior](https://github.com/Callidior))
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import math
import string
import collections
import numpy as np
from six.moves import xrange
from keras_applications.imagenet_utils import _obtain_input_shape
from keras_applications.imagenet_utils import decode_predictions
from keras_applications.imagenet_utils import preprocess_input as _preprocess_input
from utils import get_submodules_from_kwargs
from layers import BatchNormalization
backend = None
layers = None
models = None
keras_utils = None
BASE_WEIGHTS_PATH = (
'https://github.com/Callidior/keras-applications/'
'releases/download/efficientnet/')
WEIGHTS_HASHES = {
'efficientnet-b0': ('163292582f1c6eaca8e7dc7b51b01c61'
'5b0dbc0039699b4dcd0b975cc21533dc',
'c1421ad80a9fc67c2cc4000f666aa507'
'89ce39eedb4e06d531b0c593890ccff3'),
'efficientnet-b1': ('d0a71ddf51ef7a0ca425bab32b7fa7f1'
'6043ee598ecee73fc674d9560c8f09b0',
'75de265d03ac52fa74f2f510455ba64f'
'9c7c5fd96dc923cd4bfefa3d680c4b68'),
'efficientnet-b2': ('bb5451507a6418a574534aa76a91b106'
'f6b605f3b5dde0b21055694319853086',
'433b60584fafba1ea3de07443b74cfd3'
'2ce004a012020b07ef69e22ba8669333'),
'efficientnet-b3': ('03f1fba367f070bd2545f081cfa7f3e7'
'6f5e1aa3b6f4db700f00552901e75ab9',
'c5d42eb6cfae8567b418ad3845cfd63a'
'a48b87f1bd5df8658a49375a9f3135c7'),
'efficientnet-b4': ('98852de93f74d9833c8640474b2c698d'
'b45ec60690c75b3bacb1845e907bf94f',
'7942c1407ff1feb34113995864970cd4'
'd9d91ea64877e8d9c38b6c1e0767c411'),
'efficientnet-b5': ('30172f1d45f9b8a41352d4219bf930ee'
'3339025fd26ab314a817ba8918fefc7d',
'9d197bc2bfe29165c10a2af8c2ebc675'
'07f5d70456f09e584c71b822941b1952'),
'efficientnet-b6': ('f5270466747753485a082092ac9939ca'
'a546eb3f09edca6d6fff842cad938720',
'1d0923bb038f2f8060faaf0a0449db4b'
'96549a881747b7c7678724ac79f427ed'),
'efficientnet-b7': ('876a41319980638fa597acbbf956a82d'
'10819531ff2dcb1a52277f10c7aefa1a',
'60b56ff3a8daccc8d96edfd40b204c11'
'3e51748da657afd58034d54d3cec2bac')
}
BlockArgs = collections.namedtuple('BlockArgs', [
'kernel_size', 'num_repeat', 'input_filters', 'output_filters',
'expand_ratio', 'id_skip', 'strides', 'se_ratio'
])
# defaults will be a public argument for namedtuple in Python 3.7
# https://docs.python.org/3/library/collections.html#collections.namedtuple
BlockArgs.__new__.__defaults__ = (None,) * len(BlockArgs._fields)
DEFAULT_BLOCKS_ARGS = [
BlockArgs(kernel_size=3, num_repeat=1, input_filters=32, output_filters=16,
expand_ratio=1, id_skip=True, strides=[1, 1], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=2, input_filters=16, output_filters=24,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=2, input_filters=24, output_filters=40,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=3, input_filters=40, output_filters=80,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=3, input_filters=80, output_filters=112,
expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25),
BlockArgs(kernel_size=5, num_repeat=4, input_filters=112, output_filters=192,
expand_ratio=6, id_skip=True, strides=[2, 2], se_ratio=0.25),
BlockArgs(kernel_size=3, num_repeat=1, input_filters=192, output_filters=320,
expand_ratio=6, id_skip=True, strides=[1, 1], se_ratio=0.25)
]
CONV_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 2.0,
'mode': 'fan_out',
# EfficientNet actually uses an untruncated normal distribution for
# initializing conv layers, but keras.initializers.VarianceScaling use
# a truncated distribution.
# We decided against a custom initializer for better serializability.
'distribution': 'normal'
}
}
DENSE_KERNEL_INITIALIZER = {
'class_name': 'VarianceScaling',
'config': {
'scale': 1. / 3.,
'mode': 'fan_out',
'distribution': 'uniform'
}
}
def preprocess_input(x, **kwargs):
kwargs = {k: v for k, v in kwargs.items() if k in ['backend', 'layers', 'models', 'utils']}
return _preprocess_input(x, mode='torch', **kwargs)
def get_swish(**kwargs):
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
def swish(x):
"""Swish activation function: x * sigmoid(x).
Reference: [Searching for Activation Functions](https://arxiv.org/abs/1710.05941)
"""
if backend.backend() == 'tensorflow':
try:
# The native TF implementation has a more
# memory-efficient gradient implementation
return backend.tf.nn.swish(x)
except AttributeError:
pass
return x * backend.sigmoid(x)
return swish
def get_dropout(**kwargs):
"""Wrapper over custom dropout. Fix problem of ``None`` shape for tf.keras.
It is not possible to define FixedDropout class as global object,
because we do not have modules for inheritance at first time.
Issue:
https://github.com/tensorflow/tensorflow/issues/30946
"""
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
class FixedDropout(layers.Dropout):
def _get_noise_shape(self, inputs):
if self.noise_shape is None:
return self.noise_shape
symbolic_shape = backend.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self.noise_shape)]
return tuple(noise_shape)
return FixedDropout
def round_filters(filters, width_coefficient, depth_divisor):
"""Round number of filters based on width multiplier."""
filters *= width_coefficient
new_filters = int(filters + depth_divisor / 2) // depth_divisor * depth_divisor
new_filters = max(depth_divisor, new_filters)
# Make sure that round down does not go down by more than 10%.
if new_filters < 0.9 * filters:
new_filters += depth_divisor
return int(new_filters)
def round_repeats(repeats, depth_coefficient):
"""Round number of repeats based on depth multiplier."""
return int(math.ceil(depth_coefficient * repeats))
def mb_conv_block(inputs, block_args, activation, drop_rate=None, prefix='', freeze_bn=False):
"""Mobile Inverted Residual Bottleneck."""
has_se = (block_args.se_ratio is not None) and (0 < block_args.se_ratio <= 1)
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
# workaround over non working dropout with None in noise_shape in tf.keras
Dropout = get_dropout(
backend=backend,
layers=layers,
models=models,
utils=keras_utils
)
# Expansion phase
filters = block_args.input_filters * block_args.expand_ratio
if block_args.expand_ratio != 1:
x = layers.Conv2D(filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'expand_conv')(inputs)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'expand_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'expand_bn')(x)
x = layers.Activation(activation, name=prefix + 'expand_activation')(x)
else:
x = inputs
# Depthwise Convolution
x = layers.DepthwiseConv2D(block_args.kernel_size,
strides=block_args.strides,
padding='same',
use_bias=False,
depthwise_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'dwconv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'bn')(x)
x = layers.Activation(activation, name=prefix + 'activation')(x)
# Squeeze and Excitation phase
if has_se:
num_reduced_filters = max(1, int(
block_args.input_filters * block_args.se_ratio
))
se_tensor = layers.GlobalAveragePooling2D(name=prefix + 'se_squeeze')(x)
target_shape = (1, 1, filters) if backend.image_data_format() == 'channels_last' else (filters, 1, 1)
se_tensor = layers.Reshape(target_shape, name=prefix + 'se_reshape')(se_tensor)
se_tensor = layers.Conv2D(num_reduced_filters, 1,
activation=activation,
padding='same',
use_bias=True,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'se_reduce')(se_tensor)
se_tensor = layers.Conv2D(filters, 1,
activation='sigmoid',
padding='same',
use_bias=True,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'se_expand')(se_tensor)
if backend.backend() == 'theano':
# For the Theano backend, we have to explicitly make
# the excitation weights broadcastable.
pattern = ([True, True, True, False] if backend.image_data_format() == 'channels_last'
else [True, False, True, True])
se_tensor = layers.Lambda(
lambda x: backend.pattern_broadcast(x, pattern),
name=prefix + 'se_broadcast')(se_tensor)
x = layers.multiply([x, se_tensor], name=prefix + 'se_excite')
# Output phase
x = layers.Conv2D(block_args.output_filters, 1,
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name=prefix + 'project_conv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name=prefix + 'project_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name=prefix + 'project_bn')(x)
if block_args.id_skip and all(
s == 1 for s in block_args.strides
) and block_args.input_filters == block_args.output_filters:
if drop_rate and (drop_rate > 0):
x = Dropout(drop_rate,
noise_shape=(None, 1, 1, 1),
name=prefix + 'drop')(x)
x = layers.add([x, inputs], name=prefix + 'add')
return x
def EfficientNet(width_coefficient,
depth_coefficient,
default_resolution,
dropout_rate=0.2,
drop_connect_rate=0.2,
depth_divisor=8,
blocks_args=DEFAULT_BLOCKS_ARGS,
model_name='efficientnet',
include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
freeze_bn=False,
**kwargs):
"""Instantiates the EfficientNet architecture using given scaling coefficients.
Optionally loads weights pre-trained on ImageNet.
Note that the data format convention used by the model is
the one specified in your Keras config at `~/.keras/keras.json`.
# Arguments
width_coefficient: float, scaling coefficient for network width.
depth_coefficient: float, scaling coefficient for network depth.
default_resolution: int, default input image size.
dropout_rate: float, dropout rate before final classifier layer.
drop_connect_rate: float, dropout rate at skip connections.
depth_divisor: int.
blocks_args: A list of BlockArgs to construct block modules.
model_name: string, model name.
include_top: whether to include the fully-connected
layer at the top of the network.
weights: one of `None` (random initialization),
'imagenet' (pre-training on ImageNet),
or the path to the weights file to be loaded.
input_tensor: optional Keras tensor
(i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: optional shape tuple, only to be specified
if `include_top` is False.
It should have exactly 3 inputs channels.
pooling: optional pooling mode for feature extraction
when `include_top` is `False`.
- `None` means that the output of the model will be
the 4D tensor output of the
last convolutional layer.
- `avg` means that global average pooling
will be applied to the output of the
last convolutional layer, and thus
the output of the model will be a 2D tensor.
- `max` means that global max pooling will
be applied.
classes: optional number of classes to classify images
into, only to be specified if `include_top` is True, and
if no `weights` argument is specified.
# Returns
A Keras model instance.
# Raises
ValueError: in case of invalid argument for `weights`,
or invalid input shape.
"""
global backend, layers, models, keras_utils
backend, layers, models, keras_utils = get_submodules_from_kwargs(kwargs)
features = []
if not (weights in {'imagenet', None} or os.path.exists(weights)):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `imagenet` '
'(pre-training on ImageNet), '
'or the path to the weights file to be loaded.')
if weights == 'imagenet' and include_top and classes != 1000:
raise ValueError('If using `weights` as `"imagenet"` with `include_top`'
' as true, `classes` should be 1000')
# Determine proper input shape
input_shape = _obtain_input_shape(input_shape,
default_size=default_resolution,
min_size=32,
data_format=backend.image_data_format(),
require_flatten=include_top,
weights=weights)
if input_tensor is None:
img_input = layers.Input(shape=input_shape)
else:
if backend.backend() == 'tensorflow':
from tensorflow.python.keras.backend import is_keras_tensor
else:
is_keras_tensor = backend.is_keras_tensor
if not is_keras_tensor(input_tensor):
img_input = layers.Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1
activation = get_swish(**kwargs)
# Build stem
x = img_input
x = layers.Conv2D(round_filters(32, width_coefficient, depth_divisor), 3,
strides=(2, 2),
padding='same',
use_bias=False,
kernel_initializer=CONV_KERNEL_INITIALIZER,
name='stem_conv')(x)
# x = BatchNormalization(freeze=freeze_bn, axis=bn_axis, name='stem_bn')(x)
x = layers.BatchNormalization(axis=bn_axis, name='stem_bn')(x)
x = layers.Activation(activation, name='stem_activation')(x)
# Build blocks
num_blocks_total = sum(block_args.num_repeat for block_args in blocks_args)
block_num = 0
for idx, block_args in enumerate(blocks_args):
assert block_args.num_repeat > 0
# Update block input and output filters based on depth multiplier.
block_args = block_args._replace(
input_filters=round_filters(block_args.input_filters,
width_coefficient, depth_divisor),
output_filters=round_filters(block_args.output_filters,
width_coefficient, depth_divisor),
num_repeat=round_repeats(block_args.num_repeat, depth_coefficient))
# The first block needs to take care of stride and filter size increase.
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
x = mb_conv_block(x, block_args,
activation=activation,
drop_rate=drop_rate,
prefix='block{}a_'.format(idx + 1),
freeze_bn=freeze_bn
)
block_num += 1
if block_args.num_repeat > 1:
# pylint: disable=protected-access
block_args = block_args._replace(
input_filters=block_args.output_filters, strides=[1, 1])
# pylint: enable=protected-access
for bidx in xrange(block_args.num_repeat - 1):
drop_rate = drop_connect_rate * float(block_num) / num_blocks_total
block_prefix = 'block{}{}_'.format(
idx + 1,
string.ascii_lowercase[bidx + 1]
)
x = mb_conv_block(x, block_args,
activation=activation,
drop_rate=drop_rate,
prefix=block_prefix,
freeze_bn=freeze_bn
)
block_num += 1
if idx < len(blocks_args) - 1 and blocks_args[idx + 1].strides[0] == 2:
features.append(x)
elif idx == len(blocks_args) - 1:
features.append(x)
return features
def EfficientNetB0(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.0, 224, 0.2,
model_name='efficientnet-b0',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB1(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.0, 1.1, 240, 0.2,
model_name='efficientnet-b1',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB2(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.1, 1.2, 260, 0.3,
model_name='efficientnet-b2',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB3(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.2, 1.4, 300, 0.3,
model_name='efficientnet-b3',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB4(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.4, 1.8, 380, 0.4,
model_name='efficientnet-b4',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB5(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.6, 2.2, 456, 0.4,
model_name='efficientnet-b5',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB6(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(1.8, 2.6, 528, 0.5,
model_name='efficientnet-b6',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
def EfficientNetB7(include_top=True,
weights='imagenet',
input_tensor=None,
input_shape=None,
pooling=None,
classes=1000,
**kwargs):
return EfficientNet(2.0, 3.1, 600, 0.5,
model_name='efficientnet-b7',
include_top=include_top, weights=weights,
input_tensor=input_tensor, input_shape=input_shape,
pooling=pooling, classes=classes,
**kwargs)
setattr(EfficientNetB0, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB1, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB2, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB3, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB4, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB5, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB6, '__doc__', EfficientNet.__doc__)
setattr(EfficientNetB7, '__doc__', EfficientNet.__doc__)
| 24,768 | 42.001736 | 109 | py |
EfficientDet | EfficientDet-master/setup.py | from distutils.core import setup
from Cython.Build import cythonize
import numpy
setup(
ext_modules=cythonize("utils/compute_overlap.pyx"),
include_dirs=[numpy.get_include()]
)
| 186 | 19.777778 | 55 | py |
EfficientDet | EfficientDet-master/model.py | from functools import reduce
# from keras import layers
# from keras import initializers
# from keras import models
# from keras_ import EfficientNetB0, EfficientNetB1, EfficientNetB2
# from keras_ import EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import initializers
from tensorflow.keras import models
from tfkeras import EfficientNetB0, EfficientNetB1, EfficientNetB2
from tfkeras import EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6
from layers import ClipBoxes, RegressBoxes, FilterDetections, wBiFPNAdd, BatchNormalization
from initializers import PriorProbability
from utils.anchors import anchors_for_shape
import numpy as np
w_bifpns = [64, 88, 112, 160, 224, 288, 384]
d_bifpns = [3, 4, 5, 6, 7, 7, 8]
d_heads = [3, 3, 3, 4, 4, 4, 5]
image_sizes = [512, 640, 768, 896, 1024, 1280, 1408]
backbones = [EfficientNetB0, EfficientNetB1, EfficientNetB2,
EfficientNetB3, EfficientNetB4, EfficientNetB5, EfficientNetB6]
MOMENTUM = 0.997
EPSILON = 1e-4
def SeparableConvBlock(num_channels, kernel_size, strides, name, freeze_bn=False):
f1 = layers.SeparableConv2D(num_channels, kernel_size=kernel_size, strides=strides, padding='same',
use_bias=True, name=f'{name}/conv')
f2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{name}/bn')
# f2 = BatchNormalization(freeze=freeze_bn, name=f'{name}/bn')
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2))
def ConvBlock(num_channels, kernel_size, strides, name, freeze_bn=False):
f1 = layers.Conv2D(num_channels, kernel_size=kernel_size, strides=strides, padding='same',
use_bias=True, name='{}_conv'.format(name))
f2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='{}_bn'.format(name))
# f2 = BatchNormalization(freeze=freeze_bn, name='{}_bn'.format(name))
f3 = layers.ReLU(name='{}_relu'.format(name))
return reduce(lambda f, g: lambda *args, **kwargs: g(f(*args, **kwargs)), (f1, f2, f3))
def build_wBiFPN(features, num_channels, id, freeze_bn=False):
if id == 0:
_, _, C3, C4, C5 = features
P3_in = C3
P4_in = C4
P5_in = C5
P6_in = layers.Conv2D(num_channels, kernel_size=1, padding='same', name='resample_p6/conv2d')(C5)
P6_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='resample_p6/bn')(P6_in)
# P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
P6_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p6/maxpool')(P6_in)
P7_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p7/maxpool')(P6_in)
P7_U = layers.UpSampling2D()(P7_in)
P6_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P5_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
P5_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
# P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in_1, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P4_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
P4_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
# P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in_1, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P3_in = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
P3_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
# P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P4_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
P4_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
# P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in_2, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P5_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
P5_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
# P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in_2, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
else:
P3_in, P4_in, P5_in, P6_in, P7_in = features
P7_U = layers.UpSampling2D()(P7_in)
P6_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = wBiFPNAdd(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
return P3_out, P4_td, P5_td, P6_td, P7_out
def build_BiFPN(features, num_channels, id, freeze_bn=False):
if id == 0:
_, _, C3, C4, C5 = features
P3_in = C3
P4_in = C4
P5_in = C5
P6_in = layers.Conv2D(num_channels, kernel_size=1, padding='same', name='resample_p6/conv2d')(C5)
P6_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name='resample_p6/bn')(P6_in)
# P6_in = BatchNormalization(freeze=freeze_bn, name='resample_p6/bn')(P6_in)
P6_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p6/maxpool')(P6_in)
P7_in = layers.MaxPooling2D(pool_size=3, strides=2, padding='same', name='resample_p7/maxpool')(P6_in)
P7_U = layers.UpSampling2D()(P7_in)
P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P5_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/conv2d')(P5_in)
P5_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
# P5_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode1/resample_0_2_6/bn')(P5_in_1)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in_1, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P4_in_1 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/conv2d')(P4_in)
P4_in_1 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
# P4_in_1 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode2/resample_0_1_7/bn')(P4_in_1)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in_1, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P3_in = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/conv2d')(P3_in)
P3_in = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
# P3_in = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode3/resample_0_0_8/bn')(P3_in)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P4_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/conv2d')(P4_in)
P4_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
# P4_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode4/resample_0_1_9/bn')(P4_in_2)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in_2, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P5_in_2 = layers.Conv2D(num_channels, kernel_size=1, padding='same',
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/conv2d')(P5_in)
P5_in_2 = layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON,
name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
# P5_in_2 = BatchNormalization(freeze=freeze_bn, name=f'fpn_cells/cell_{id}/fnode5/resample_0_2_10/bn')(P5_in_2)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in_2, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
else:
P3_in, P4_in, P5_in, P6_in, P7_in = features
P7_U = layers.UpSampling2D()(P7_in)
P6_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode0/add')([P6_in, P7_U])
P6_td = layers.Activation(lambda x: tf.nn.swish(x))(P6_td)
P6_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode0/op_after_combine5')(P6_td)
P6_U = layers.UpSampling2D()(P6_td)
P5_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode1/add')([P5_in, P6_U])
P5_td = layers.Activation(lambda x: tf.nn.swish(x))(P5_td)
P5_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode1/op_after_combine6')(P5_td)
P5_U = layers.UpSampling2D()(P5_td)
P4_td = layers.Add(name=f'fpn_cells/cell_{id}/fnode2/add')([P4_in, P5_U])
P4_td = layers.Activation(lambda x: tf.nn.swish(x))(P4_td)
P4_td = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode2/op_after_combine7')(P4_td)
P4_U = layers.UpSampling2D()(P4_td)
P3_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode3/add')([P3_in, P4_U])
P3_out = layers.Activation(lambda x: tf.nn.swish(x))(P3_out)
P3_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode3/op_after_combine8')(P3_out)
P3_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P3_out)
P4_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode4/add')([P4_in, P4_td, P3_D])
P4_out = layers.Activation(lambda x: tf.nn.swish(x))(P4_out)
P4_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode4/op_after_combine9')(P4_out)
P4_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P4_out)
P5_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode5/add')([P5_in, P5_td, P4_D])
P5_out = layers.Activation(lambda x: tf.nn.swish(x))(P5_out)
P5_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode5/op_after_combine10')(P5_out)
P5_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P5_out)
P6_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode6/add')([P6_in, P6_td, P5_D])
P6_out = layers.Activation(lambda x: tf.nn.swish(x))(P6_out)
P6_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode6/op_after_combine11')(P6_out)
P6_D = layers.MaxPooling2D(pool_size=3, strides=2, padding='same')(P6_out)
P7_out = layers.Add(name=f'fpn_cells/cell_{id}/fnode7/add')([P7_in, P6_D])
P7_out = layers.Activation(lambda x: tf.nn.swish(x))(P7_out)
P7_out = SeparableConvBlock(num_channels=num_channels, kernel_size=3, strides=1,
name=f'fpn_cells/cell_{id}/fnode7/op_after_combine12')(P7_out)
return P3_out, P4_td, P5_td, P6_td, P7_out
class BoxNet(models.Model):
def __init__(self, width, depth, num_anchors=9, separable_conv=True, freeze_bn=False, detect_quadrangle=False, **kwargs):
super(BoxNet, self).__init__(**kwargs)
self.width = width
self.depth = depth
self.num_anchors = num_anchors
self.separable_conv = separable_conv
self.detect_quadrangle = detect_quadrangle
num_values = 9 if detect_quadrangle else 4
options = {
'kernel_size': 3,
'strides': 1,
'padding': 'same',
'bias_initializer': 'zeros',
}
if separable_conv:
kernel_initializer = {
'depthwise_initializer': initializers.VarianceScaling(),
'pointwise_initializer': initializers.VarianceScaling(),
}
options.update(kernel_initializer)
self.convs = [layers.SeparableConv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in
range(depth)]
self.head = layers.SeparableConv2D(filters=num_anchors * num_values,
name=f'{self.name}/box-predict', **options)
else:
kernel_initializer = {
'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
}
options.update(kernel_initializer)
self.convs = [layers.Conv2D(filters=width, name=f'{self.name}/box-{i}', **options) for i in range(depth)]
self.head = layers.Conv2D(filters=num_anchors * num_values, name=f'{self.name}/box-predict', **options)
self.bns = [
[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/box-{i}-bn-{j}') for j in
range(3, 8)]
for i in range(depth)]
# self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/box-{i}-bn-{j}') for j in range(3, 8)]
# for i in range(depth)]
self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
self.reshape = layers.Reshape((-1, num_values))
self.level = 0
def call(self, inputs, **kwargs):
feature, level = inputs
for i in range(self.depth):
feature = self.convs[i](feature)
feature = self.bns[i][self.level](feature)
feature = self.relu(feature)
outputs = self.head(feature)
outputs = self.reshape(outputs)
self.level += 1
return outputs
class ClassNet(models.Model):
def __init__(self, width, depth, num_classes=20, num_anchors=9, separable_conv=True, freeze_bn=False, **kwargs):
super(ClassNet, self).__init__(**kwargs)
self.width = width
self.depth = depth
self.num_classes = num_classes
self.num_anchors = num_anchors
self.separable_conv = separable_conv
options = {
'kernel_size': 3,
'strides': 1,
'padding': 'same',
}
if self.separable_conv:
kernel_initializer = {
'depthwise_initializer': initializers.VarianceScaling(),
'pointwise_initializer': initializers.VarianceScaling(),
}
options.update(kernel_initializer)
self.convs = [layers.SeparableConv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
**options)
for i in range(depth)]
self.head = layers.SeparableConv2D(filters=num_classes * num_anchors,
bias_initializer=PriorProbability(probability=0.01),
name=f'{self.name}/class-predict', **options)
else:
kernel_initializer = {
'kernel_initializer': initializers.RandomNormal(mean=0.0, stddev=0.01, seed=None)
}
options.update(kernel_initializer)
self.convs = [layers.Conv2D(filters=width, bias_initializer='zeros', name=f'{self.name}/class-{i}',
**options)
for i in range(depth)]
self.head = layers.Conv2D(filters=num_classes * num_anchors,
bias_initializer=PriorProbability(probability=0.01),
name='class-predict', **options)
self.bns = [
[layers.BatchNormalization(momentum=MOMENTUM, epsilon=EPSILON, name=f'{self.name}/class-{i}-bn-{j}') for j
in range(3, 8)]
for i in range(depth)]
# self.bns = [[BatchNormalization(freeze=freeze_bn, name=f'{self.name}/class-{i}-bn-{j}') for j in range(3, 8)]
# for i in range(depth)]
self.relu = layers.Lambda(lambda x: tf.nn.swish(x))
self.reshape = layers.Reshape((-1, num_classes))
self.activation = layers.Activation('sigmoid')
self.level = 0
def call(self, inputs, **kwargs):
feature, level = inputs
for i in range(self.depth):
feature = self.convs[i](feature)
feature = self.bns[i][self.level](feature)
feature = self.relu(feature)
outputs = self.head(feature)
outputs = self.reshape(outputs)
outputs = self.activation(outputs)
self.level += 1
return outputs
def efficientdet(phi, num_classes=20, num_anchors=9, weighted_bifpn=False, freeze_bn=False,
score_threshold=0.01, detect_quadrangle=False, anchor_parameters=None, separable_conv=True):
assert phi in range(7)
input_size = image_sizes[phi]
input_shape = (input_size, input_size, 3)
image_input = layers.Input(input_shape)
w_bifpn = w_bifpns[phi]
d_bifpn = d_bifpns[phi]
w_head = w_bifpn
d_head = d_heads[phi]
backbone_cls = backbones[phi]
features = backbone_cls(input_tensor=image_input, freeze_bn=freeze_bn)
if weighted_bifpn:
fpn_features = features
for i in range(d_bifpn):
fpn_features = build_wBiFPN(fpn_features, w_bifpn, i, freeze_bn=freeze_bn)
else:
fpn_features = features
for i in range(d_bifpn):
fpn_features = build_BiFPN(fpn_features, w_bifpn, i, freeze_bn=freeze_bn)
box_net = BoxNet(w_head, d_head, num_anchors=num_anchors, separable_conv=separable_conv, freeze_bn=freeze_bn,
detect_quadrangle=detect_quadrangle, name='box_net')
class_net = ClassNet(w_head, d_head, num_classes=num_classes, num_anchors=num_anchors,
separable_conv=separable_conv, freeze_bn=freeze_bn, name='class_net')
classification = [class_net([feature, i]) for i, feature in enumerate(fpn_features)]
classification = layers.Concatenate(axis=1, name='classification')(classification)
regression = [box_net([feature, i]) for i, feature in enumerate(fpn_features)]
regression = layers.Concatenate(axis=1, name='regression')(regression)
model = models.Model(inputs=[image_input], outputs=[classification, regression], name='efficientdet')
# apply predicted regression to anchors
anchors = anchors_for_shape((input_size, input_size), anchor_params=anchor_parameters)
anchors_input = np.expand_dims(anchors, axis=0)
boxes = RegressBoxes(name='boxes')([anchors_input, regression[..., :4]])
boxes = ClipBoxes(name='clipped_boxes')([image_input, boxes])
# filter detections (apply NMS / score threshold / select top-k)
if detect_quadrangle:
detections = FilterDetections(
name='filtered_detections',
score_threshold=score_threshold,
detect_quadrangle=True
)([boxes, classification, regression[..., 4:8], regression[..., 8]])
else:
detections = FilterDetections(
name='filtered_detections',
score_threshold=score_threshold
)([boxes, classification])
prediction_model = models.Model(inputs=[image_input], outputs=detections, name='efficientdet_p')
return model, prediction_model
if __name__ == '__main__':
x, y = efficientdet(1)
| 29,621 | 61.362105 | 125 | py |
EfficientDet | EfficientDet-master/layers.py | # import keras
from tensorflow import keras
import tensorflow as tf
class BatchNormalization(keras.layers.BatchNormalization):
"""
Identical to keras.layers.BatchNormalization, but adds the option to freeze parameters.
"""
def __init__(self, freeze, *args, **kwargs):
self.freeze = freeze
super(BatchNormalization, self).__init__(*args, **kwargs)
# set to non-trainable if freeze is true
self.trainable = not self.freeze
def call(self, inputs, training=None, **kwargs):
# return super.call, but set training
if not training:
return super(BatchNormalization, self).call(inputs, training=False)
else:
return super(BatchNormalization, self).call(inputs, training=(not self.freeze))
def get_config(self):
config = super(BatchNormalization, self).get_config()
config.update({'freeze': self.freeze})
return config
class wBiFPNAdd(keras.layers.Layer):
def __init__(self, epsilon=1e-4, **kwargs):
super(wBiFPNAdd, self).__init__(**kwargs)
self.epsilon = epsilon
def build(self, input_shape):
num_in = len(input_shape)
self.w = self.add_weight(name=self.name,
shape=(num_in,),
initializer=keras.initializers.constant(1 / num_in),
trainable=True,
dtype=tf.float32)
def call(self, inputs, **kwargs):
w = keras.activations.relu(self.w)
x = tf.reduce_sum([w[i] * inputs[i] for i in range(len(inputs))], axis=0)
x = x / (tf.reduce_sum(w) + self.epsilon)
return x
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(wBiFPNAdd, self).get_config()
config.update({
'epsilon': self.epsilon
})
return config
def bbox_transform_inv(boxes, deltas, scale_factors=None):
cxa = (boxes[..., 0] + boxes[..., 2]) / 2
cya = (boxes[..., 1] + boxes[..., 3]) / 2
wa = boxes[..., 2] - boxes[..., 0]
ha = boxes[..., 3] - boxes[..., 1]
ty, tx, th, tw = deltas[..., 0], deltas[..., 1], deltas[..., 2], deltas[..., 3]
if scale_factors:
ty *= scale_factors[0]
tx *= scale_factors[1]
th *= scale_factors[2]
tw *= scale_factors[3]
w = tf.exp(tw) * wa
h = tf.exp(th) * ha
cy = ty * ha + cya
cx = tx * wa + cxa
ymin = cy - h / 2.
xmin = cx - w / 2.
ymax = cy + h / 2.
xmax = cx + w / 2.
return tf.stack([xmin, ymin, xmax, ymax], axis=-1)
class ClipBoxes(keras.layers.Layer):
def call(self, inputs, **kwargs):
image, boxes = inputs
shape = keras.backend.cast(keras.backend.shape(image), keras.backend.floatx())
height = shape[1]
width = shape[2]
x1 = tf.clip_by_value(boxes[:, :, 0], 0, width - 1)
y1 = tf.clip_by_value(boxes[:, :, 1], 0, height - 1)
x2 = tf.clip_by_value(boxes[:, :, 2], 0, width - 1)
y2 = tf.clip_by_value(boxes[:, :, 3], 0, height - 1)
return keras.backend.stack([x1, y1, x2, y2], axis=2)
def compute_output_shape(self, input_shape):
return input_shape[1]
class RegressBoxes(keras.layers.Layer):
def __init__(self, *args, **kwargs):
super(RegressBoxes, self).__init__(*args, **kwargs)
def call(self, inputs, **kwargs):
anchors, regression = inputs
return bbox_transform_inv(anchors, regression)
def compute_output_shape(self, input_shape):
return input_shape[0]
def get_config(self):
config = super(RegressBoxes, self).get_config()
return config
def filter_detections(
boxes,
classification,
alphas=None,
ratios=None,
class_specific_filter=True,
nms=True,
score_threshold=0.01,
max_detections=100,
nms_threshold=0.5,
detect_quadrangle=False,
):
"""
Filter detections using the boxes and classification values.
Args
boxes: Tensor of shape (num_boxes, 4) containing the boxes in (x1, y1, x2, y2) format.
classification: Tensor of shape (num_boxes, num_classes) containing the classification scores.
other: List of tensors of shape (num_boxes, ...) to filter along with the boxes and classification scores.
class_specific_filter: Whether to perform filtering per class, or take the best scoring class and filter those.
nms: Flag to enable/disable non maximum suppression.
score_threshold: Threshold used to prefilter the boxes with.
max_detections: Maximum number of detections to keep.
nms_threshold: Threshold for the IoU value to determine when a box should be suppressed.
Returns
A list of [boxes, scores, labels, other[0], other[1], ...].
boxes is shaped (max_detections, 4) and contains the (x1, y1, x2, y2) of the non-suppressed boxes.
scores is shaped (max_detections,) and contains the scores of the predicted class.
labels is shaped (max_detections,) and contains the predicted label.
other[i] is shaped (max_detections, ...) and contains the filtered other[i] data.
In case there are less than max_detections detections, the tensors are padded with -1's.
"""
def _filter_detections(scores_, labels_):
# threshold based on score
# (num_score_keeps, 1)
indices_ = tf.where(keras.backend.greater(scores_, score_threshold))
if nms:
# (num_score_keeps, 4)
filtered_boxes = tf.gather_nd(boxes, indices_)
# In [4]: scores = np.array([0.1, 0.5, 0.4, 0.2, 0.7, 0.2])
# In [5]: tf.greater(scores, 0.4)
# Out[5]: <tf.Tensor: id=2, shape=(6,), dtype=bool, numpy=array([False, True, False, False, True, False])>
# In [6]: tf.where(tf.greater(scores, 0.4))
# Out[6]:
# <tf.Tensor: id=7, shape=(2, 1), dtype=int64, numpy=
# array([[1],
# [4]])>
#
# In [7]: tf.gather(scores, tf.where(tf.greater(scores, 0.4)))
# Out[7]:
# <tf.Tensor: id=15, shape=(2, 1), dtype=float64, numpy=
# array([[0.5],
# [0.7]])>
filtered_scores = keras.backend.gather(scores_, indices_)[:, 0]
# perform NMS
# filtered_boxes = tf.concat([filtered_boxes[..., 1:2], filtered_boxes[..., 0:1],
# filtered_boxes[..., 3:4], filtered_boxes[..., 2:3]], axis=-1)
nms_indices = tf.image.non_max_suppression(filtered_boxes, filtered_scores, max_output_size=max_detections,
iou_threshold=nms_threshold)
# filter indices based on NMS
# (num_score_nms_keeps, 1)
indices_ = keras.backend.gather(indices_, nms_indices)
# add indices to list of all indices
# (num_score_nms_keeps, )
labels_ = tf.gather_nd(labels_, indices_)
# (num_score_nms_keeps, 2)
indices_ = keras.backend.stack([indices_[:, 0], labels_], axis=1)
return indices_
if class_specific_filter:
all_indices = []
# perform per class filtering
for c in range(int(classification.shape[1])):
scores = classification[:, c]
labels = c * tf.ones((keras.backend.shape(scores)[0],), dtype='int64')
all_indices.append(_filter_detections(scores, labels))
# concatenate indices to single tensor
# (concatenated_num_score_nms_keeps, 2)
indices = keras.backend.concatenate(all_indices, axis=0)
else:
scores = keras.backend.max(classification, axis=1)
labels = keras.backend.argmax(classification, axis=1)
indices = _filter_detections(scores, labels)
# select top k
scores = tf.gather_nd(classification, indices)
labels = indices[:, 1]
scores, top_indices = tf.nn.top_k(scores, k=keras.backend.minimum(max_detections, keras.backend.shape(scores)[0]))
# filter input using the final set of indices
indices = keras.backend.gather(indices[:, 0], top_indices)
boxes = keras.backend.gather(boxes, indices)
labels = keras.backend.gather(labels, top_indices)
# zero pad the outputs
pad_size = keras.backend.maximum(0, max_detections - keras.backend.shape(scores)[0])
boxes = tf.pad(boxes, [[0, pad_size], [0, 0]], constant_values=-1)
scores = tf.pad(scores, [[0, pad_size]], constant_values=-1)
labels = tf.pad(labels, [[0, pad_size]], constant_values=-1)
labels = keras.backend.cast(labels, 'int32')
# set shapes, since we know what they are
boxes.set_shape([max_detections, 4])
scores.set_shape([max_detections])
labels.set_shape([max_detections])
if detect_quadrangle:
alphas = keras.backend.gather(alphas, indices)
ratios = keras.backend.gather(ratios, indices)
alphas = tf.pad(alphas, [[0, pad_size], [0, 0]], constant_values=-1)
ratios = tf.pad(ratios, [[0, pad_size]], constant_values=-1)
alphas.set_shape([max_detections, 4])
ratios.set_shape([max_detections])
return [boxes, scores, alphas, ratios, labels]
else:
return [boxes, scores, labels]
class FilterDetections(keras.layers.Layer):
"""
Keras layer for filtering detections using score threshold and NMS.
"""
def __init__(
self,
nms=True,
class_specific_filter=True,
nms_threshold=0.5,
score_threshold=0.01,
max_detections=100,
parallel_iterations=32,
detect_quadrangle=False,
**kwargs
):
"""
Filters detections using score threshold, NMS and selecting the top-k detections.
Args
nms: Flag to enable/disable NMS.
class_specific_filter: Whether to perform filtering per class, or take the best scoring class and filter those.
nms_threshold: Threshold for the IoU value to determine when a box should be suppressed.
score_threshold: Threshold used to prefilter the boxes with.
max_detections: Maximum number of detections to keep.
parallel_iterations: Number of batch items to process in parallel.
"""
self.nms = nms
self.class_specific_filter = class_specific_filter
self.nms_threshold = nms_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.parallel_iterations = parallel_iterations
self.detect_quadrangle = detect_quadrangle
super(FilterDetections, self).__init__(**kwargs)
def call(self, inputs, **kwargs):
"""
Constructs the NMS graph.
Args
inputs : List of [boxes, classification, other[0], other[1], ...] tensors.
"""
boxes = inputs[0]
classification = inputs[1]
if self.detect_quadrangle:
alphas = inputs[2]
ratios = inputs[3]
# wrap nms with our parameters
def _filter_detections(args):
boxes_ = args[0]
classification_ = args[1]
alphas_ = args[2] if self.detect_quadrangle else None
ratios_ = args[3] if self.detect_quadrangle else None
return filter_detections(
boxes_,
classification_,
alphas_,
ratios_,
nms=self.nms,
class_specific_filter=self.class_specific_filter,
score_threshold=self.score_threshold,
max_detections=self.max_detections,
nms_threshold=self.nms_threshold,
detect_quadrangle=self.detect_quadrangle,
)
# call filter_detections on each batch item
if self.detect_quadrangle:
outputs = tf.map_fn(
_filter_detections,
elems=[boxes, classification, alphas, ratios],
dtype=['float32', 'float32', 'float32', 'float32', 'int32'],
parallel_iterations=self.parallel_iterations
)
else:
outputs = tf.map_fn(
_filter_detections,
elems=[boxes, classification],
dtype=['float32', 'float32', 'int32'],
parallel_iterations=self.parallel_iterations
)
return outputs
def compute_output_shape(self, input_shape):
"""
Computes the output shapes given the input shapes.
Args
input_shape : List of input shapes [boxes, classification].
Returns
List of tuples representing the output shapes:
[filtered_boxes.shape, filtered_scores.shape, filtered_labels.shape, filtered_other[0].shape, filtered_other[1].shape, ...]
"""
if self.detect_quadrangle:
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
]
else:
return [
(input_shape[0][0], self.max_detections, 4),
(input_shape[1][0], self.max_detections),
(input_shape[1][0], self.max_detections),
]
def compute_mask(self, inputs, mask=None):
"""
This is required in Keras when there is more than 1 output.
"""
return (len(inputs) + 1) * [None]
def get_config(self):
"""
Gets the configuration of this layer.
Returns
Dictionary containing the parameters of this layer.
"""
config = super(FilterDetections, self).get_config()
config.update({
'nms': self.nms,
'class_specific_filter': self.class_specific_filter,
'nms_threshold': self.nms_threshold,
'score_threshold': self.score_threshold,
'max_detections': self.max_detections,
'parallel_iterations': self.parallel_iterations,
})
return config
| 14,393 | 36.978892 | 135 | py |
EfficientDet | EfficientDet-master/inference_quad.py | from model import efficientdet
import cv2
import os
import numpy as np
import time
from utils import preprocess_image
from utils.anchors import anchors_for_shape, AnchorParameters
import os.path as osp
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 3
weighted_bifpn = False
model_path = 'checkpoints/2020-02-20/csv_02_1.6506_2.5878_w.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
# classes = [
# 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
# 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor',
# ]
classes = ['text']
num_classes = len(classes)
anchor_parameters = AnchorParameters(
ratios=(0.25, 0.5, 1., 2.),
sizes=(16, 32, 64, 128, 256))
score_threshold = 0.4
colors = [np.random.randint(0, 256, 3).tolist() for i in range(num_classes)]
model, prediction_model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
num_anchors=anchor_parameters.num_anchors(),
score_threshold=score_threshold,
detect_quadrangle=True,
anchor_parameters=anchor_parameters,
)
prediction_model.load_weights(model_path, by_name=True)
import glob
for image_path in glob.glob('datasets/ic15/test_images/*.jpg'):
image = cv2.imread(image_path)
src_image = image.copy()
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale, offset_h, offset_w = preprocess_image(image, image_size=image_size)
inputs = np.expand_dims(image, axis=0)
anchors = anchors_for_shape((image_size, image_size), anchor_params=anchor_parameters)
# run network
start = time.time()
boxes, scores, alphas, ratios, labels = prediction_model.predict_on_batch([np.expand_dims(image, axis=0),
np.expand_dims(anchors, axis=0)])
# alphas = np.exp(alphas)
alphas = 1 / (1 + np.exp(-alphas))
ratios = 1 / (1 + np.exp(-ratios))
quadrangles = np.zeros(boxes.shape[:2] + (8,))
quadrangles[:, :, 0] = boxes[:, :, 0] + (boxes[:, :, 2] - boxes[:, :, 0]) * alphas[:, :, 0]
quadrangles[:, :, 1] = boxes[:, :, 1]
quadrangles[:, :, 2] = boxes[:, :, 2]
quadrangles[:, :, 3] = boxes[:, :, 1] + (boxes[:, :, 3] - boxes[:, :, 1]) * alphas[:, :, 1]
quadrangles[:, :, 4] = boxes[:, :, 2] - (boxes[:, :, 2] - boxes[:, :, 0]) * alphas[:, :, 2]
quadrangles[:, :, 5] = boxes[:, :, 3]
quadrangles[:, :, 6] = boxes[:, :, 0]
quadrangles[:, :, 7] = boxes[:, :, 3] - (boxes[:, :, 3] - boxes[:, :, 1]) * alphas[:, :, 3]
print(time.time() - start)
boxes[0, :, [0, 2]] = boxes[0, :, [0, 2]] - offset_w
boxes[0, :, [1, 3]] = boxes[0, :, [1, 3]] - offset_h
boxes /= scale
boxes[0, :, 0] = np.clip(boxes[0, :, 0], 0, w - 1)
boxes[0, :, 1] = np.clip(boxes[0, :, 1], 0, h - 1)
boxes[0, :, 2] = np.clip(boxes[0, :, 2], 0, w - 1)
boxes[0, :, 3] = np.clip(boxes[0, :, 3], 0, h - 1)
quadrangles[0, :, [0, 2, 4, 6]] = quadrangles[0, :, [0, 2, 4, 6]] - offset_w
quadrangles[0, :, [1, 3, 5, 7]] = quadrangles[0, :, [1, 3, 5, 7]] - offset_h
quadrangles /= scale
quadrangles[0, :, [0, 2, 4, 6]] = np.clip(quadrangles[0, :, [0, 2, 4, 6]], 0, w - 1)
quadrangles[0, :, [1, 3, 5, 7]] = np.clip(quadrangles[0, :, [1, 3, 5, 7]], 0, h - 1)
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > score_threshold)[0]
# select those detections
boxes = boxes[0, indices]
scores = scores[0, indices]
labels = labels[0, indices]
quadrangles = quadrangles[0, indices]
ratios = ratios[0, indices]
for bbox, score, label, quadrangle, ratio in zip(boxes, scores, labels, quadrangles, ratios):
xmin = int(round(bbox[0]))
ymin = int(round(bbox[1]))
xmax = int(round(bbox[2]))
ymax = int(round(bbox[3]))
score = '{:.4f}'.format(score)
class_id = int(label)
color = colors[class_id]
class_name = classes[class_id]
label = '-'.join([class_name, score])
ret, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(src_image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 1)
# cv2.rectangle(src_image, (xmin, ymax - ret[1] - baseline), (xmin + ret[0], ymax), color, -1)
# cv2.putText(src_image, label, (xmin, ymax - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.putText(src_image, f'{ratio:.2f}', (xmin + (xmax - xmin) // 3, (ymin + ymax) // 2),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 255), 1)
cv2.drawContours(src_image, [quadrangle.astype(np.int32).reshape((4, 2))], -1, (0, 0, 255), 1)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', src_image)
cv2.waitKey(0)
| 5,103 | 44.981982 | 120 | py |
EfficientDet | EfficientDet-master/tfkeras.py | from utils import inject_tfkeras_modules, init_tfkeras_custom_objects
import efficientnet as model
EfficientNetB0 = inject_tfkeras_modules(model.EfficientNetB0)
EfficientNetB1 = inject_tfkeras_modules(model.EfficientNetB1)
EfficientNetB2 = inject_tfkeras_modules(model.EfficientNetB2)
EfficientNetB3 = inject_tfkeras_modules(model.EfficientNetB3)
EfficientNetB4 = inject_tfkeras_modules(model.EfficientNetB4)
EfficientNetB5 = inject_tfkeras_modules(model.EfficientNetB5)
EfficientNetB6 = inject_tfkeras_modules(model.EfficientNetB6)
EfficientNetB7 = inject_tfkeras_modules(model.EfficientNetB7)
preprocess_input = inject_tfkeras_modules(model.preprocess_input)
init_tfkeras_custom_objects()
| 694 | 42.4375 | 69 | py |
EfficientDet | EfficientDet-master/freeze_model.py | from model import efficientdet
import cv2
import os
import numpy as np
import time
from utils import preprocess_image
import tensorflow as tf
from tensorflow.keras import backend as K
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def freeze_session(session, keep_var_names=None, output_names=None, clear_devices=True):
graph = session.graph
with graph.as_default():
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
output_names = output_names or []
output_names += [v.op.name for v in tf.global_variables()]
input_graph_def = graph.as_graph_def()
if clear_devices:
for node in input_graph_def.node:
node.device = ""
frozen_graph = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph
def main():
phi = 1
weighted_bifpn = False
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
classes = [
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor',
]
num_classes = len(classes)
score_threshold = 0.5
model, prediction_model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
prediction_model.load_weights(model_path, by_name=True)
frozen_graph = freeze_session(K.get_session(), output_names=[out.op.name for out in prediction_model.outputs])
tf.train.write_graph(frozen_graph, "./checkpoints/2019-12-03/", "pascal_05.pb", as_text=False)
if __name__ == '__main__':
main()
| 2,011 | 38.45098 | 122 | py |
EfficientDet | EfficientDet-master/train.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
from datetime import date
import os
import sys
import tensorflow as tf
# import keras
# import keras.preprocessing.image
# import keras.backend as K
# from keras.optimizers import Adam, SGD
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam, SGD
from augmentor.color import VisualEffect
from augmentor.misc import MiscEffect
from model import efficientdet
from losses import smooth_l1, focal, smooth_l1_quad
from efficientnet import BASE_WEIGHTS_PATH, WEIGHTS_HASHES
def makedirs(path):
# Intended behavior: try to create the directory,
# pass if the directory exists already, fails otherwise.
# Meant for Python 2.7/3.n compatibility.
try:
os.makedirs(path)
except OSError:
if not os.path.isdir(path):
raise
def get_session():
"""
Construct a modified tf session.
"""
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
return tf.Session(config=config)
def create_callbacks(training_model, prediction_model, validation_generator, args):
"""
Creates the callbacks to use during training.
Args
training_model: The model that is used for training.
prediction_model: The model that should be used for validation.
validation_generator: The generator for creating validation data.
args: parseargs args object.
Returns:
A list of callbacks used for training.
"""
callbacks = []
tensorboard_callback = None
if args.tensorboard_dir:
if tf.version.VERSION > '2.0.0':
file_writer = tf.summary.create_file_writer(args.tensorboard_dir)
file_writer.set_as_default()
tensorboard_callback = keras.callbacks.TensorBoard(
log_dir=args.tensorboard_dir,
histogram_freq=0,
batch_size=args.batch_size,
write_graph=True,
write_grads=False,
write_images=False,
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None
)
callbacks.append(tensorboard_callback)
if args.evaluation and validation_generator:
if args.dataset_type == 'coco':
from eval.coco import Evaluate
# use prediction model for evaluation
evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback)
else:
from eval.pascal import Evaluate
evaluation = Evaluate(validation_generator, prediction_model, tensorboard=tensorboard_callback)
callbacks.append(evaluation)
# save the model
if args.snapshots:
# ensure directory created first; otherwise h5py will error after epoch.
makedirs(args.snapshot_path)
checkpoint = keras.callbacks.ModelCheckpoint(
os.path.join(
args.snapshot_path,
f'{args.dataset_type}_{{epoch:02d}}_{{loss:.4f}}_{{val_loss:.4f}}.h5' if args.compute_val_loss
else f'{args.dataset_type}_{{epoch:02d}}_{{loss:.4f}}.h5'
),
verbose=1,
save_weights_only=True,
# save_best_only=True,
# monitor="mAP",
# mode='max'
)
callbacks.append(checkpoint)
# callbacks.append(keras.callbacks.ReduceLROnPlateau(
# monitor='loss',
# factor=0.1,
# patience=2,
# verbose=1,
# mode='auto',
# min_delta=0.0001,
# cooldown=0,
# min_lr=0
# ))
return callbacks
def create_generators(args):
"""
Create generators for training and validation.
Args
args: parseargs object containing configuration for generators.
preprocess_image: Function that preprocesses an image for the network.
"""
common_args = {
'batch_size': args.batch_size,
'phi': args.phi,
'detect_text': args.detect_text,
'detect_quadrangle': args.detect_quadrangle
}
# create random transform generator for augmenting training data
if args.random_transform:
misc_effect = MiscEffect()
visual_effect = VisualEffect()
else:
misc_effect = None
visual_effect = None
if args.dataset_type == 'pascal':
from generators.pascal import PascalVocGenerator
train_generator = PascalVocGenerator(
args.pascal_path,
'trainval',
skip_difficult=True,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
validation_generator = PascalVocGenerator(
args.pascal_path,
'val',
skip_difficult=True,
shuffle_groups=False,
**common_args
)
elif args.dataset_type == 'csv':
from generators.csv_ import CSVGenerator
train_generator = CSVGenerator(
args.annotations_path,
args.classes_path,
misc_effect=misc_effect,
visual_effect=visual_effect,
**common_args
)
if args.val_annotations_path:
validation_generator = CSVGenerator(
args.val_annotations_path,
args.classes_path,
shuffle_groups=False,
**common_args
)
else:
validation_generator = None
elif args.dataset_type == 'coco':
# import here to prevent unnecessary dependency on cocoapi
from generators.coco import CocoGenerator
train_generator = CocoGenerator(
args.coco_path,
'train2017',
misc_effect=misc_effect,
visual_effect=visual_effect,
group_method='random',
**common_args
)
validation_generator = CocoGenerator(
args.coco_path,
'val2017',
shuffle_groups=False,
**common_args
)
else:
raise ValueError('Invalid data type received: {}'.format(args.dataset_type))
return train_generator, validation_generator
def check_args(parsed_args):
"""
Function to check for inherent contradictions within parsed arguments.
For example, batch_size < num_gpus
Intended to raise errors prior to backend initialisation.
Args
parsed_args: parser.parse_args()
Returns
parsed_args
"""
if parsed_args.gpu and parsed_args.batch_size < len(parsed_args.gpu.split(',')):
raise ValueError(
"Batch size ({}) must be equal to or higher than the number of GPUs ({})".format(parsed_args.batch_size,
len(parsed_args.gpu.split(
','))))
return parsed_args
def parse_args(args):
"""
Parse the arguments.
"""
today = str(date.today())
parser = argparse.ArgumentParser(description='Simple training script for training a RetinaNet network.')
subparsers = parser.add_subparsers(help='Arguments for specific dataset types.', dest='dataset_type')
subparsers.required = True
coco_parser = subparsers.add_parser('coco')
coco_parser.add_argument('coco_path', help='Path to dataset directory (ie. /tmp/COCO).')
pascal_parser = subparsers.add_parser('pascal')
pascal_parser.add_argument('pascal_path', help='Path to dataset directory (ie. /tmp/VOCdevkit).')
csv_parser = subparsers.add_parser('csv')
csv_parser.add_argument('annotations_path', help='Path to CSV file containing annotations for training.')
csv_parser.add_argument('classes_path', help='Path to a CSV file containing class label mapping.')
csv_parser.add_argument('--val-annotations-path',
help='Path to CSV file containing annotations for validation (optional).')
parser.add_argument('--detect-quadrangle', help='If to detect quadrangle.', action='store_true', default=False)
parser.add_argument('--detect-text', help='If is text detection task.', action='store_true', default=False)
parser.add_argument('--snapshot', help='Resume training from a snapshot.')
parser.add_argument('--freeze-backbone', help='Freeze training of backbone layers.', action='store_true')
parser.add_argument('--freeze-bn', help='Freeze training of BatchNormalization layers.', action='store_true')
parser.add_argument('--weighted-bifpn', help='Use weighted BiFPN', action='store_true')
parser.add_argument('--batch-size', help='Size of the batches.', default=1, type=int)
parser.add_argument('--phi', help='Hyper parameter phi', default=0, type=int, choices=(0, 1, 2, 3, 4, 5, 6))
parser.add_argument('--gpu', help='Id of the GPU to use (as reported by nvidia-smi).')
parser.add_argument('--epochs', help='Number of epochs to train.', type=int, default=50)
parser.add_argument('--steps', help='Number of steps per epoch.', type=int, default=10000)
parser.add_argument('--snapshot-path',
help='Path to store snapshots of models during training',
default='checkpoints/{}'.format(today))
parser.add_argument('--tensorboard-dir', help='Log directory for Tensorboard output',
default='logs/{}'.format(today))
parser.add_argument('--no-snapshots', help='Disable saving snapshots.', dest='snapshots', action='store_false')
parser.add_argument('--no-evaluation', help='Disable per epoch evaluation.', dest='evaluation',
action='store_false')
parser.add_argument('--random-transform', help='Randomly transform image and annotations.', action='store_true')
parser.add_argument('--compute-val-loss', help='Compute validation loss during training', dest='compute_val_loss',
action='store_true')
# Fit generator arguments
parser.add_argument('--multiprocessing', help='Use multiprocessing in fit_generator.', action='store_true')
parser.add_argument('--workers', help='Number of generator workers.', type=int, default=1)
parser.add_argument('--max-queue-size', help='Queue length for multiprocessing workers in fit_generator.', type=int,
default=10)
print(vars(parser.parse_args(args)))
return check_args(parser.parse_args(args))
def main(args=None):
# parse arguments
if args is None:
args = sys.argv[1:]
args = parse_args(args)
# create the generators
train_generator, validation_generator = create_generators(args)
num_classes = train_generator.num_classes()
num_anchors = train_generator.num_anchors
# optionally choose specific GPU
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
# K.set_session(get_session())
model, prediction_model = efficientdet(args.phi,
num_classes=num_classes,
num_anchors=num_anchors,
weighted_bifpn=args.weighted_bifpn,
freeze_bn=args.freeze_bn,
detect_quadrangle=args.detect_quadrangle
)
# load pretrained weights
if args.snapshot:
if args.snapshot == 'imagenet':
model_name = 'efficientnet-b{}'.format(args.phi)
file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(model_name)
file_hash = WEIGHTS_HASHES[model_name][1]
weights_path = keras.utils.get_file(file_name,
BASE_WEIGHTS_PATH + file_name,
cache_subdir='models',
file_hash=file_hash)
model.load_weights(weights_path, by_name=True)
else:
print('Loading model, this may take a second...')
model.load_weights(args.snapshot, by_name=True)
# freeze backbone layers
if args.freeze_backbone:
# 227, 329, 329, 374, 464, 566, 656
for i in range(1, [227, 329, 329, 374, 464, 566, 656][args.phi]):
model.layers[i].trainable = False
if args.gpu and len(args.gpu.split(',')) > 1:
model = keras.utils.multi_gpu_model(model, gpus=list(map(int, args.gpu.split(','))))
# compile model
model.compile(optimizer=Adam(lr=1e-3), loss={
'regression': smooth_l1_quad() if args.detect_quadrangle else smooth_l1(),
'classification': focal()
}, )
# print(model.summary())
# create the callbacks
callbacks = create_callbacks(
model,
prediction_model,
validation_generator,
args,
)
if not args.compute_val_loss:
validation_generator = None
elif args.compute_val_loss and validation_generator is None:
raise ValueError('When you have no validation data, you should not specify --compute-val-loss.')
# start training
return model.fit_generator(
generator=train_generator,
steps_per_epoch=args.steps,
initial_epoch=0,
epochs=args.epochs,
verbose=1,
callbacks=callbacks,
workers=args.workers,
use_multiprocessing=args.multiprocessing,
max_queue_size=args.max_queue_size,
validation_data=validation_generator
)
if __name__ == '__main__':
main()
| 14,180 | 36.123037 | 120 | py |
EfficientDet | EfficientDet-master/inference_video.py | import cv2
import json
import numpy as np
import os
import time
import glob
from model import efficientdet
from utils import preprocess_image, postprocess_boxes
from utils.draw_boxes import draw_boxes
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 1
weighted_bifpn = True
model_path = 'd1.h5'
image_sizes = (512, 640, 768, 896, 1024, 1280, 1408)
image_size = image_sizes[phi]
# coco classes
classes = {value['id'] - 1: value['name'] for value in json.load(open('coco_90.json', 'r')).values()}
num_classes = 90
score_threshold = 0.5
colors = [np.random.randint(0, 256, 3).tolist() for _ in range(num_classes)]
_, model = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=num_classes,
score_threshold=score_threshold)
model.load_weights(model_path, by_name=True)
video_path = 'datasets/video.mp4'
cap = cv2.VideoCapture(video_path)
while True:
ret, image = cap.read()
if not ret:
break
src_image = image.copy()
# BGR -> RGB
image = image[:, :, ::-1]
h, w = image.shape[:2]
image, scale = preprocess_image(image, image_size=image_size)
# run network
start = time.time()
boxes, scores, labels = model.predict_on_batch([np.expand_dims(image, axis=0)])
boxes, scores, labels = np.squeeze(boxes), np.squeeze(scores), np.squeeze(labels)
print(time.time() - start)
boxes = postprocess_boxes(boxes=boxes, scale=scale, height=h, width=w)
# select indices which have a score above the threshold
indices = np.where(scores[:] > score_threshold)[0]
# select those detections
boxes = boxes[indices]
labels = labels[indices]
draw_boxes(src_image, boxes, scores, labels, colors, classes)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', src_image)
cv2.waitKey(0)
if __name__ == '__main__':
main()
| 2,074 | 29.514706 | 105 | py |
EfficientDet | EfficientDet-master/keras_.py | from utils import inject_keras_modules, init_keras_custom_objects
import efficientnet as model
EfficientNetB0 = inject_keras_modules(model.EfficientNetB0)
EfficientNetB1 = inject_keras_modules(model.EfficientNetB1)
EfficientNetB2 = inject_keras_modules(model.EfficientNetB2)
EfficientNetB3 = inject_keras_modules(model.EfficientNetB3)
EfficientNetB4 = inject_keras_modules(model.EfficientNetB4)
EfficientNetB5 = inject_keras_modules(model.EfficientNetB5)
EfficientNetB6 = inject_keras_modules(model.EfficientNetB6)
EfficientNetB7 = inject_keras_modules(model.EfficientNetB7)
preprocess_input = inject_keras_modules(model.preprocess_input)
init_keras_custom_objects()
| 670 | 40.9375 | 65 | py |
EfficientDet | EfficientDet-master/generators/common.py | import numpy as np
import random
import warnings
import cv2
from tensorflow import keras
from utils.anchors import anchors_for_shape, anchor_targets_bbox, AnchorParameters
class Generator(keras.utils.Sequence):
"""
Abstract generator class.
"""
def __init__(
self,
phi=0,
image_sizes=(512, 640, 768, 896, 1024, 1280, 1408),
misc_effect=None,
visual_effect=None,
batch_size=1,
group_method='random', # one of 'none', 'random', 'ratio'
shuffle_groups=True,
detect_text=False,
detect_quadrangle=False,
):
"""
Initialize Generator object.
Args:
batch_size: The size of the batches to generate.
group_method: Determines how images are grouped together (defaults to 'ratio', one of ('none', 'random', 'ratio')).
shuffle_groups: If True, shuffles the groups each epoch.
image_sizes:
"""
self.misc_effect = misc_effect
self.visual_effect = visual_effect
self.batch_size = int(batch_size)
self.group_method = group_method
self.shuffle_groups = shuffle_groups
self.detect_text = detect_text
self.detect_quadrangle = detect_quadrangle
self.image_size = image_sizes[phi]
self.groups = None
self.anchor_parameters = AnchorParameters.default if not self.detect_text else AnchorParameters(
ratios=(0.25, 0.5, 1., 2.),
sizes=(16, 32, 64, 128, 256))
self.anchors = anchors_for_shape((self.image_size, self.image_size), anchor_params=self.anchor_parameters)
self.num_anchors = self.anchor_parameters.num_anchors()
# Define groups
self.group_images()
# Shuffle when initializing
if self.shuffle_groups:
random.shuffle(self.groups)
def on_epoch_end(self):
if self.shuffle_groups:
random.shuffle(self.groups)
def size(self):
"""
Size of the dataset.
"""
raise NotImplementedError('size method not implemented')
def get_anchors(self):
"""
loads the anchors from a txt file
"""
with open(self.anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
# (N, 2), wh
return np.array(anchors).reshape(-1, 2)
def num_classes(self):
"""
Number of classes in the dataset.
"""
raise NotImplementedError('num_classes method not implemented')
def has_label(self, label):
"""
Returns True if label is a known label.
"""
raise NotImplementedError('has_label method not implemented')
def has_name(self, name):
"""
Returns True if name is a known class.
"""
raise NotImplementedError('has_name method not implemented')
def name_to_label(self, name):
"""
Map name to label.
"""
raise NotImplementedError('name_to_label method not implemented')
def label_to_name(self, label):
"""
Map label to name.
"""
raise NotImplementedError('label_to_name method not implemented')
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
raise NotImplementedError('image_aspect_ratio method not implemented')
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
raise NotImplementedError('load_image method not implemented')
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
raise NotImplementedError('load_annotations method not implemented')
def load_annotations_group(self, group):
"""
Load annotations for all images in group.
"""
annotations_group = [self.load_annotations(image_index) for image_index in group]
for annotations in annotations_group:
assert (isinstance(annotations,
dict)), '\'load_annotations\' should return a list of dictionaries, received: {}'.format(
type(annotations))
assert (
'labels' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
assert (
'bboxes' in annotations), '\'load_annotations\' should return a list of dictionaries that contain \'labels\' and \'bboxes\'.'
return annotations_group
def filter_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
invalid_indices = np.where(
(annotations['bboxes'][:, 2] <= annotations['bboxes'][:, 0]) |
(annotations['bboxes'][:, 3] <= annotations['bboxes'][:, 1]) |
(annotations['bboxes'][:, 0] < 0) |
(annotations['bboxes'][:, 1] < 0) |
(annotations['bboxes'][:, 2] <= 0) |
(annotations['bboxes'][:, 3] <= 0) |
(annotations['bboxes'][:, 2] > image.shape[1]) |
(annotations['bboxes'][:, 3] > image.shape[0])
)[0]
# delete invalid indices
if len(invalid_indices):
warnings.warn('Image with id {} (shape {}) contains the following invalid boxes: {}.'.format(
group[index],
image.shape,
annotations['bboxes'][invalid_indices, :]
))
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], invalid_indices, axis=0)
# if annotations['bboxes'].shape[0] == 0:
# warnings.warn('Image with id {} (shape {}) contains no valid boxes before transform'.format(
# group[index],
# image.shape,
# ))
return image_group, annotations_group
def clip_transformed_annotations(self, image_group, annotations_group, group):
"""
Filter annotations by removing those that are outside of the image bounds or whose width/height < 0.
"""
# test all annotations
filtered_image_group = []
filtered_annotations_group = []
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
image_height = image.shape[0]
image_width = image.shape[1]
# x1
annotations['bboxes'][:, 0] = np.clip(annotations['bboxes'][:, 0], 0, image_width - 2)
# y1
annotations['bboxes'][:, 1] = np.clip(annotations['bboxes'][:, 1], 0, image_height - 2)
# x2
annotations['bboxes'][:, 2] = np.clip(annotations['bboxes'][:, 2], 1, image_width - 1)
# y2
annotations['bboxes'][:, 3] = np.clip(annotations['bboxes'][:, 3], 1, image_height - 1)
# test x2 < x1 | y2 < y1 | x1 < 0 | y1 < 0 | x2 <= 0 | y2 <= 0 | x2 >= image.shape[1] | y2 >= image.shape[0]
small_indices = np.where(
(annotations['bboxes'][:, 2] - annotations['bboxes'][:, 0] < 3) |
(annotations['bboxes'][:, 3] - annotations['bboxes'][:, 1] < 3)
)[0]
# delete invalid indices
if len(small_indices):
for k in annotations_group[index].keys():
annotations_group[index][k] = np.delete(annotations[k], small_indices, axis=0)
# import cv2
# for invalid_index in small_indices:
# x1, y1, x2, y2 = annotations['bboxes'][invalid_index]
# label = annotations['labels'][invalid_index]
# class_name = self.labels[label]
# print('width: {}'.format(x2 - x1))
# print('height: {}'.format(y2 - y1))
# cv2.rectangle(image, (int(round(x1)), int(round(y1))), (int(round(x2)), int(round(y2))), (0, 255, 0), 2)
# cv2.putText(image, class_name, (int(round(x1)), int(round(y1))), cv2.FONT_HERSHEY_SIMPLEX, 1.0, (0, 0, 255), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', image)
# cv2.waitKey(0)
filtered_image_group.append(image)
filtered_annotations_group.append(annotations_group[index])
return filtered_image_group, filtered_annotations_group
def load_image_group(self, group):
"""
Load images for all images in a group.
"""
return [self.load_image(image_index) for image_index in group]
def random_visual_effect_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# apply visual effect
image = self.visual_effect(image)
return image, annotations
def random_visual_effect_group(self, image_group, annotations_group):
"""
Randomly apply visual effect on each image.
"""
assert (len(image_group) == len(annotations_group))
if self.visual_effect is None:
# do nothing
return image_group, annotations_group
for index in range(len(image_group)):
# apply effect on a single group entry
image_group[index], annotations_group[index] = self.random_visual_effect_group_entry(
image_group[index], annotations_group[index]
)
return image_group, annotations_group
def random_misc_group_entry(self, image, annotations):
"""
Randomly transforms image and annotation.
"""
# randomly transform both image and annotations
image, annotations = self.misc_effect(image, annotations)
return image, annotations
def random_misc_group(self, image_group, annotations_group):
"""
Randomly transforms each image and its annotations.
"""
assert (len(image_group) == len(annotations_group))
if self.misc_effect is None:
return image_group, annotations_group
for index in range(len(image_group)):
# transform a single group entry
image_group[index], annotations_group[index] = self.random_misc_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def preprocess_group_entry(self, image, annotations):
"""
Preprocess image and its annotations.
"""
# preprocess the image
image, scale = self.preprocess_image(image)
# apply resizing to annotations too
annotations['bboxes'] *= scale
if self.detect_quadrangle:
annotations['quadrangles'] *= scale
return image, annotations
def preprocess_group(self, image_group, annotations_group):
"""
Preprocess each image and its annotations in its group.
"""
assert (len(image_group) == len(annotations_group))
for index in range(len(image_group)):
# preprocess a single group entry
image_group[index], annotations_group[index] = self.preprocess_group_entry(image_group[index],
annotations_group[index])
return image_group, annotations_group
def group_images(self):
"""
Order the images according to self.order and makes groups of self.batch_size.
"""
# determine the order of the images
order = list(range(self.size()))
if self.group_method == 'random':
random.shuffle(order)
elif self.group_method == 'ratio':
order.sort(key=lambda x: self.image_aspect_ratio(x))
# divide into groups, one group = one batch
self.groups = [[order[x % len(order)] for x in range(i, i + self.batch_size)] for i in
range(0, len(order), self.batch_size)]
def compute_inputs(self, image_group, annotations_group):
"""
Compute inputs for the network using an image_group.
"""
batch_images = np.array(image_group).astype(np.float32)
return [batch_images]
def compute_alphas_and_ratios(self, annotations_group):
for i, annotations in enumerate(annotations_group):
quadrangles = annotations['quadrangles']
alphas = np.zeros((quadrangles.shape[0], 4), dtype=np.float32)
xmin = np.min(quadrangles, axis=1)[:, 0]
ymin = np.min(quadrangles, axis=1)[:, 1]
xmax = np.max(quadrangles, axis=1)[:, 0]
ymax = np.max(quadrangles, axis=1)[:, 1]
# alpha1, alpha2, alpha3, alpha4
alphas[:, 0] = (quadrangles[:, 0, 0] - xmin) / (xmax - xmin)
alphas[:, 1] = (quadrangles[:, 1, 1] - ymin) / (ymax - ymin)
alphas[:, 2] = (xmax - quadrangles[:, 2, 0]) / (xmax - xmin)
alphas[:, 3] = (ymax - quadrangles[:, 3, 1]) / (ymax - ymin)
annotations['alphas'] = alphas
# ratio
area1 = 0.5 * alphas[:, 0] * (1 - alphas[:, 3])
area2 = 0.5 * alphas[:, 1] * (1 - alphas[:, 0])
area3 = 0.5 * alphas[:, 2] * (1 - alphas[:, 1])
area4 = 0.5 * alphas[:, 3] * (1 - alphas[:, 2])
annotations['ratios'] = 1 - area1 - area2 - area3 - area4
def compute_targets(self, image_group, annotations_group):
"""
Compute target outputs for the network using images and their annotations.
"""
"""
Compute target outputs for the network using images and their annotations.
"""
batches_targets = anchor_targets_bbox(
self.anchors,
image_group,
annotations_group,
num_classes=self.num_classes(),
detect_quadrangle=self.detect_quadrangle
)
return list(batches_targets)
def compute_inputs_targets(self, group, debug=False):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
# randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
# perform preprocessing steps
image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
# check validity of annotations
image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
assert len(image_group) != 0
assert len(image_group) == len(annotations_group)
if self.detect_quadrangle:
# compute alphas and ratio for targets
self.compute_alphas_and_ratios(annotations_group)
# compute network inputs
inputs = self.compute_inputs(image_group, annotations_group)
# compute network targets
targets = self.compute_targets(image_group, annotations_group)
if debug:
return inputs, targets, annotations_group
return inputs, targets
def __len__(self):
"""
Number of batches for generator.
"""
return len(self.groups)
def __getitem__(self, index):
"""
Keras sequence method for generating batches.
"""
group = self.groups[index]
inputs, targets = self.compute_inputs_targets(group)
return inputs, targets
def preprocess_image(self, image):
# image, RGB
image_height, image_width = image.shape[:2]
if image_height > image_width:
scale = self.image_size / image_height
resized_height = self.image_size
resized_width = int(image_width * scale)
else:
scale = self.image_size / image_width
resized_height = int(image_height * scale)
resized_width = self.image_size
image = cv2.resize(image, (resized_width, resized_height))
image = image.astype(np.float32)
image /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image -= mean
image /= std
pad_h = self.image_size - resized_height
pad_w = self.image_size - resized_width
image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')
return image, scale
def get_augmented_data(self, group):
"""
Compute inputs and target outputs for the network.
"""
# load images and annotations
# list
image_group = self.load_image_group(group)
annotations_group = self.load_annotations_group(group)
# check validity of annotations
image_group, annotations_group = self.filter_annotations(image_group, annotations_group, group)
# randomly apply visual effect
# image_group, annotations_group = self.random_visual_effect_group(image_group, annotations_group)
# randomly transform data
# image_group, annotations_group = self.random_transform_group(image_group, annotations_group)
# randomly apply misc effect
# image_group, annotations_group = self.random_misc_group(image_group, annotations_group)
# perform preprocessing steps
image_group, annotations_group = self.preprocess_group(image_group, annotations_group)
# check validity of annotations
image_group, annotations_group = self.clip_transformed_annotations(image_group, annotations_group, group)
assert len(image_group) != 0
assert len(image_group) == len(annotations_group)
# compute alphas for targets
self.compute_alphas_and_ratios(annotations_group)
return image_group, annotations_group
| 18,975 | 38.045267 | 145 | py |
EfficientDet | EfficientDet-master/generators/pascal.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from generators.common import Generator
import os
import numpy as np
from six import raise_from
import cv2
import xml.etree.ElementTree as ET
voc_classes = {
'aeroplane': 0,
'bicycle': 1,
'bird': 2,
'boat': 3,
'bottle': 4,
'bus': 5,
'car': 6,
'cat': 7,
'chair': 8,
'cow': 9,
'diningtable': 10,
'dog': 11,
'horse': 12,
'motorbike': 13,
'person': 14,
'pottedplant': 15,
'sheep': 16,
'sofa': 17,
'train': 18,
'tvmonitor': 19
}
def _findNode(parent, name, debug_name=None, parse=None):
if debug_name is None:
debug_name = name
result = parent.find(name)
if result is None:
raise ValueError('missing element \'{}\''.format(debug_name))
if parse is not None:
try:
return parse(result.text)
except ValueError as e:
raise_from(ValueError('illegal value for \'{}\': {}'.format(debug_name, e)), None)
return result
class PascalVocGenerator(Generator):
"""
Generate data for a Pascal VOC dataset.
See http://host.robots.ox.ac.uk/pascal/VOC/ for more information.
"""
def __init__(
self,
data_dir,
set_name,
classes=voc_classes,
image_extension='.jpg',
skip_truncated=False,
skip_difficult=False,
**kwargs
):
"""
Initialize a Pascal VOC data generator.
Args:
data_dir: the path of directory which contains ImageSets directory
set_name: test|trainval|train|val
classes: class names tos id mapping
image_extension: image filename ext
skip_truncated:
skip_difficult:
**kwargs:
"""
self.data_dir = data_dir
self.set_name = set_name
self.classes = classes
self.image_names = [l.strip().split(None, 1)[0] for l in
open(os.path.join(data_dir, 'ImageSets', 'Main', set_name + '.txt')).readlines()]
self.image_extension = image_extension
self.skip_truncated = skip_truncated
self.skip_difficult = skip_difficult
# class ids to names mapping
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
super(PascalVocGenerator, self).__init__(**kwargs)
def size(self):
"""
Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
"""
Number of classes in the dataset.
"""
return len(self.classes)
def has_label(self, label):
"""
Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
"""
Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
"""
Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
"""
Map label to name.
"""
return self.labels[label]
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
path = os.path.join(self.data_dir, 'JPEGImages', self.image_names[image_index] + self.image_extension)
image = cv2.imread(path)
h, w = image.shape[:2]
return float(w) / float(h)
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
path = os.path.join(self.data_dir, 'JPEGImages', self.image_names[image_index] + self.image_extension)
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def __parse_annotation(self, element):
"""
Parse an annotation given an XML element.
"""
truncated = _findNode(element, 'truncated', parse=int)
difficult = _findNode(element, 'difficult', parse=int)
class_name = _findNode(element, 'name').text
if class_name not in self.classes:
raise ValueError('class name \'{}\' not found in classes: {}'.format(class_name, list(self.classes.keys())))
box = np.zeros((4,))
label = self.name_to_label(class_name)
bndbox = _findNode(element, 'bndbox')
box[0] = _findNode(bndbox, 'xmin', 'bndbox.xmin', parse=float) - 1
box[1] = _findNode(bndbox, 'ymin', 'bndbox.ymin', parse=float) - 1
box[2] = _findNode(bndbox, 'xmax', 'bndbox.xmax', parse=float) - 1
box[3] = _findNode(bndbox, 'ymax', 'bndbox.ymax', parse=float) - 1
return truncated, difficult, box, label
def __parse_annotations(self, xml_root):
"""
Parse all annotations under the xml_root.
"""
annotations = {'labels': np.empty((0,), dtype=np.int32),
'bboxes': np.empty((0, 4))}
for i, element in enumerate(xml_root.iter('object')):
try:
truncated, difficult, box, label = self.__parse_annotation(element)
except ValueError as e:
raise_from(ValueError('could not parse object #{}: {}'.format(i, e)), None)
if truncated and self.skip_truncated:
continue
if difficult and self.skip_difficult:
continue
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [box]])
annotations['labels'] = np.concatenate([annotations['labels'], [label]])
return annotations
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
filename = self.image_names[image_index] + '.xml'
try:
tree = ET.parse(os.path.join(self.data_dir, 'Annotations', filename))
return self.__parse_annotations(tree.getroot())
except ET.ParseError as e:
raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)
except ValueError as e:
raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)
if __name__ == '__main__':
train_generator = PascalVocGenerator(
'datasets/voc_trainval/VOC2007',
'train',
phi=0,
skip_difficult=True,
batch_size=1,
misc_effect=None,
visual_effect=None,
)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
anchors = train_generator.anchors
for batch_inputs, batch_targets in train_generator:
image = batch_inputs[0][0]
image[..., 0] *= std[0]
image[..., 1] *= std[1]
image[..., 2] *= std[2]
image[..., 0] += mean[0]
image[..., 1] += mean[1]
image[..., 2] += mean[2]
image *= 255.
regression = batch_targets[0][0]
valid_ids = np.where(regression[:, -1] == 1)[0]
boxes = anchors[valid_ids]
deltas = regression[valid_ids]
class_ids = np.argmax(batch_targets[1][0][valid_ids], axis=-1)
mean_ = [0, 0, 0, 0]
std_ = [0.2, 0.2, 0.2, 0.2]
width = boxes[:, 2] - boxes[:, 0]
height = boxes[:, 3] - boxes[:, 1]
x1 = boxes[:, 0] + (deltas[:, 0] * std_[0] + mean_[0]) * width
y1 = boxes[:, 1] + (deltas[:, 1] * std_[1] + mean_[1]) * height
x2 = boxes[:, 2] + (deltas[:, 2] * std_[2] + mean_[2]) * width
y2 = boxes[:, 3] + (deltas[:, 3] * std_[3] + mean_[3]) * height
for x1_, y1_, x2_, y2_, class_id in zip(x1, y1, x2, y2, class_ids):
x1_, y1_, x2_, y2_ = int(x1_), int(y1_), int(x2_), int(y2_)
cv2.rectangle(image, (x1_, y1_), (x2_, y2_), (0, 255, 0), 2)
class_name = train_generator.labels[class_id]
label = class_name
ret, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.3, 1)
cv2.rectangle(image, (x1_, y2_ - ret[1] - baseline), (x1_ + ret[0], y2_), (255, 255, 255), -1)
cv2.putText(image, label, (x1_, y2_ - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
cv2.imshow('image', image.astype(np.uint8)[..., ::-1])
cv2.waitKey(0)
# 36864, 46080, 48384, 48960, 49104
# if first_valid_id < 36864:
# stride = 8
# elif 36864 <= first_valid_id < 46080:
# stride = 16
# elif 46080 <= first_valid_id < 48384:
# stride = 32
# elif 48384 <= first_valid_id < 48960:
# stride = 64
# else:
# stride = 128
pass
| 9,220 | 31.814947 | 120 | py |
EfficientDet | EfficientDet-master/generators/csv_.py | """
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
class CSVGenerator(Generator):
"""
Generate data for a custom CSV dataset.
See https://github.com/fizyr/keras-retinanet#csv-datasets for more information.
"""
def __init__(
self,
csv_data_file,
csv_class_file,
base_dir=None,
detect_quadrangle=False,
detect_text=False,
**kwargs
):
"""
Initialize a CSV data generator.
Args
csv_data_file: Path to the CSV annotations file.
csv_class_file: Path to the CSV classes file.
detect_text: if do text detection
base_dir: Directory w.r.t. where the files are to be searched (defaults to the directory containing the csv_data_file).
"""
self.image_names = []
self.image_data = {}
self.base_dir = base_dir
self.detect_quadrangle = detect_quadrangle
self.detect_text = detect_text
# Take base_dir from annotations file if not explicitly specified.
if self.base_dir is None:
if osp.exists(csv_data_file):
self.base_dir = ''
else:
self.base_dir = osp.dirname(csv_data_file)
# parse the provided class file
try:
with _open_for_csv(csv_class_file) as file:
# class_name --> class_id
self.classes = _read_classes(csv.reader(file, delimiter=','))
except ValueError as e:
raise_from(ValueError('invalid CSV class file: {}: {}'.format(csv_class_file, e)), None)
self.labels = {}
# class_id --> class_name
for key, value in self.classes.items():
self.labels[value] = key
# csv with img_path, x1, y1, x2, y2, x3, y3, x4, y4, class_name
try:
with _open_for_csv(csv_data_file) as file:
# {'img_path1':[{'x1':xx,'y1':xx,'x2':xx,'y2':xx,'x3':xx,'y3':xx,'x4':xx,'y4':xx, 'class':xx}...],...}
if self.detect_quadrangle:
self.image_data = _read_quadrangle_annotations(csv.reader(file, delimiter=','), self.classes,
self.detect_text)
else:
self.image_data = _read_annotations(csv.reader(file, delimiter=','), self.classes)
except ValueError as e:
raise_from(ValueError('invalid CSV annotations file: {}: {}'.format(csv_data_file, e)), None)
self.image_names = list(self.image_data.keys())
super(CSVGenerator, self).__init__(detect_text=detect_text, detect_quadrangle=detect_quadrangle, **kwargs)
def size(self):
"""
Size of the dataset.
"""
return len(self.image_names)
def num_classes(self):
"""
Number of classes in the dataset.
"""
return max(self.classes.values()) + 1
def has_label(self, label):
"""
Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
"""
Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
"""
Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
"""
Map label to name.
"""
return self.labels[label]
def image_path(self, image_index):
"""
Returns the image path for image_index.
"""
return osp.join(self.base_dir, self.image_names[image_index])
def image_aspect_ratio(self, image_index):
"""
Compute the aspect ratio for an image with image_index.
"""
# PIL is fast for metadata
image = Image.open(self.image_path(image_index))
return float(image.width) / float(image.height)
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
image = cv2.imread(self.image_path(image_index))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_annotations(self, image_index):
"""
Load annotations for an image_index.
"""
path = self.image_names[image_index]
annotations = {'labels': np.empty((0,), dtype=np.int32),
'bboxes': np.empty((0, 4), dtype=np.float32),
'quadrangles': np.empty((0, 4, 2), dtype=np.float32),
}
for idx, annot in enumerate(self.image_data[path]):
annotations['labels'] = np.concatenate((annotations['labels'], [self.name_to_label(annot['class'])]))
if self.detect_quadrangle:
quadrangle = np.array([[float(annot['x1']), float(annot['y1'])],
[float(annot['x2']), float(annot['y2'])],
[float(annot['x3']), float(annot['y3'])],
[float(annot['x4']), float(annot['y4'])]])
ordered_quadrangle = self.reorder_vertexes(quadrangle)
annotations['quadrangles'] = np.concatenate((annotations['quadrangles'], ordered_quadrangle[None]))
annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[
float(min(annot['x1'], annot['x2'], annot['x3'], annot['x4'])),
float(min(annot['y1'], annot['y2'], annot['y3'], annot['y4'])),
float(max(annot['x1'], annot['x2'], annot['x3'], annot['x4'])),
float(max(annot['y1'], annot['y2'], annot['y3'], annot['y4'])),
]]))
else:
annotations['bboxes'] = np.concatenate((annotations['bboxes'], [[
float(annot['x1']),
float(annot['y1']),
float(annot['x2']),
float(annot['y2']),
]]))
return annotations
def reorder_vertexes(self, vertexes):
"""
reorder vertexes as the paper shows, (top, right, bottom, left)
Args:
vertexes:
Returns:
"""
assert vertexes.shape == (4, 2)
xmin, ymin = np.min(vertexes, axis=0)
xmax, ymax = np.max(vertexes, axis=0)
# determine the first point with the smallest y,
# if two vertexes has same y, choose that with smaller x,
ordered_idxes = np.argsort(vertexes, axis=0)
ymin1_idx = ordered_idxes[0, 1]
ymin2_idx = ordered_idxes[1, 1]
if vertexes[ymin1_idx, 1] == vertexes[ymin2_idx, 1]:
if vertexes[ymin1_idx, 0] <= vertexes[ymin2_idx, 0]:
first_vertex_idx = ymin1_idx
else:
first_vertex_idx = ymin2_idx
else:
first_vertex_idx = ymin1_idx
ordered_idxes = [(first_vertex_idx + i) % 4 for i in range(4)]
ordered_vertexes = vertexes[ordered_idxes]
# drag the point to the corresponding edge
ordered_vertexes[0, 1] = ymin
ordered_vertexes[1, 0] = xmax
ordered_vertexes[2, 1] = ymax
ordered_vertexes[3, 0] = xmin
return ordered_vertexes
| 13,347 | 36.389356 | 131 | py |
EfficientDet | EfficientDet-master/generators/__init__.py | 0 | 0 | 0 | py |
|
EfficientDet | EfficientDet-master/generators/coco.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from generators.common import Generator
import os
import numpy as np
from pycocotools.coco import COCO
import cv2
class CocoGenerator(Generator):
"""
Generate data from the COCO dataset.
See https://github.com/cocodataset/cocoapi/tree/master/PythonAPI for more information.
"""
def __init__(self, data_dir, set_name, **kwargs):
"""
Initialize a COCO data generator.
Args
data_dir: Path to where the COCO dataset is stored.
set_name: Name of the set to parse.
"""
self.data_dir = data_dir
self.set_name = set_name
if set_name in ['train2017', 'val2017']:
self.coco = COCO(os.path.join(data_dir, 'annotations', 'instances_' + set_name + '.json'))
else:
self.coco = COCO(os.path.join(data_dir, 'annotations', 'image_info_' + set_name + '.json'))
self.image_ids = self.coco.getImgIds()
self.load_classes()
super(CocoGenerator, self).__init__(**kwargs)
def load_classes(self):
"""
Loads the class to label mapping (and inverse) for COCO.
"""
# load class names (name -> label)
categories = self.coco.loadCats(self.coco.getCatIds())
categories.sort(key=lambda x: x['id'])
self.classes = {}
self.coco_labels = {}
self.coco_labels_inverse = {}
for c in categories:
self.coco_labels[len(self.classes)] = c['id']
self.coco_labels_inverse[c['id']] = len(self.classes)
self.classes[c['name']] = len(self.classes)
# also load the reverse (label -> name)
self.labels = {}
for key, value in self.classes.items():
self.labels[value] = key
def size(self):
""" Size of the COCO dataset.
"""
return len(self.image_ids)
def num_classes(self):
""" Number of classes in the dataset. For COCO this is 80.
"""
return 90
def has_label(self, label):
""" Return True if label is a known label.
"""
return label in self.labels
def has_name(self, name):
""" Returns True if name is a known class.
"""
return name in self.classes
def name_to_label(self, name):
""" Map name to label.
"""
return self.classes[name]
def label_to_name(self, label):
""" Map label to name.
"""
return self.labels[label]
def coco_label_to_label(self, coco_label):
""" Map COCO label to the label as used in the network.
COCO has some gaps in the order of labels. The highest label is 90, but there are 80 classes.
"""
return self.coco_labels_inverse[coco_label]
def coco_label_to_name(self, coco_label):
""" Map COCO label to name.
"""
return self.label_to_name(self.coco_label_to_label(coco_label))
def label_to_coco_label(self, label):
""" Map label as used by the network to labels as used by COCO.
"""
return self.coco_labels[label]
def image_aspect_ratio(self, image_index):
""" Compute the aspect ratio for an image with image_index.
"""
image = self.coco.loadImgs(self.image_ids[image_index])[0]
return float(image['width']) / float(image['height'])
def load_image(self, image_index):
"""
Load an image at the image_index.
"""
# {'license': 2, 'file_name': '000000259765.jpg', 'coco_url': 'http://images.cocodataset.org/test2017/000000259765.jpg', 'height': 480, 'width': 640, 'date_captured': '2013-11-21 04:02:31', 'id': 259765}
image_info = self.coco.loadImgs(self.image_ids[image_index])[0]
path = os.path.join(self.data_dir, 'images', self.set_name, image_info['file_name'])
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
def load_annotations(self, image_index):
""" Load annotations for an image_index.
"""
# get ground truth annotations
annotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)
annotations = {'labels': np.empty((0,), dtype=np.float32), 'bboxes': np.empty((0, 4), dtype=np.float32)}
# some images appear to miss annotations (like image with id 257034)
if len(annotations_ids) == 0:
return annotations
# parse annotations
coco_annotations = self.coco.loadAnns(annotations_ids)
for idx, a in enumerate(coco_annotations):
# some annotations have basically no width / height, skip them
if a['bbox'][2] < 1 or a['bbox'][3] < 1:
continue
annotations['labels'] = np.concatenate(
[annotations['labels'], [a['category_id'] - 1]], axis=0)
annotations['bboxes'] = np.concatenate([annotations['bboxes'], [[
a['bbox'][0],
a['bbox'][1],
a['bbox'][0] + a['bbox'][2],
a['bbox'][1] + a['bbox'][3],
]]], axis=0)
return annotations
| 5,710 | 34.253086 | 211 | py |
EfficientDet | EfficientDet-master/eval/common.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from utils.compute_overlap import compute_overlap
from utils.visualization import draw_detections, draw_annotations
import numpy as np
import cv2
import progressbar
assert (callable(progressbar.progressbar)), "Using wrong progressbar module, install 'progressbar2' instead."
def _compute_ap(recall, precision):
"""
Compute the average precision, given the recall and precision curves.
Code originally from https://github.com/rbgirshick/py-faster-rcnn.
Args:
recall: The recall curve (list).
precision: The precision curve (list).
Returns:
The average precision as computed in py-faster-rcnn.
"""
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], recall, [1.]))
mpre = np.concatenate(([0.], precision, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _get_detections(generator, model, score_threshold=0.05, max_detections=100, visualize=False):
"""
Get the detections from the model using the generator.
The result is a list of lists such that the size is:
all_detections[num_images][num_classes] = detections[num_class_detections, 5]
Args:
generator: The generator used to run images through the model.
model: The model to run on the images.
score_threshold: The score confidence threshold to use.
max_detections: The maximum number of detections to use per image.
save_path: The path to save the images with visualized detections to.
Returns:
A list of lists containing the detections for each image in the generator.
"""
all_detections = [[None for i in range(generator.num_classes()) if generator.has_label(i)] for j in
range(generator.size())]
for i in progressbar.progressbar(range(generator.size()), prefix='Running network: '):
image = generator.load_image(i)
src_image = image.copy()
h, w = image.shape[:2]
anchors = generator.anchors
image, scale = generator.preprocess_image(image)
# run network
boxes, scores, *_, labels = model.predict_on_batch([np.expand_dims(image, axis=0)])
boxes /= scale
boxes[:, :, 0] = np.clip(boxes[:, :, 0], 0, w - 1)
boxes[:, :, 1] = np.clip(boxes[:, :, 1], 0, h - 1)
boxes[:, :, 2] = np.clip(boxes[:, :, 2], 0, w - 1)
boxes[:, :, 3] = np.clip(boxes[:, :, 3], 0, h - 1)
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > score_threshold)[0]
# select those scores
scores = scores[0][indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:max_detections]
# select detections
# (n, 4)
image_boxes = boxes[0, indices[scores_sort], :]
# (n, )
image_scores = scores[scores_sort]
# (n, )
image_labels = labels[0, indices[scores_sort]]
# (n, 6)
detections = np.concatenate(
[image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)], axis=1)
if visualize:
draw_annotations(src_image, generator.load_annotations(i), label_to_name=generator.label_to_name)
draw_detections(src_image, detections[:5, :4], detections[:5, 4], detections[:5, 5].astype(np.int32),
label_to_name=generator.label_to_name,
score_threshold=score_threshold)
# cv2.imwrite(os.path.join(save_path, '{}.png'.format(i)), raw_image)
cv2.namedWindow('{}'.format(i), cv2.WINDOW_NORMAL)
cv2.imshow('{}'.format(i), src_image)
cv2.waitKey(0)
# copy detections to all_detections
for class_id in range(generator.num_classes()):
all_detections[i][class_id] = detections[detections[:, -1] == class_id, :-1]
return all_detections
def _get_annotations(generator):
"""
Get the ground truth annotations from the generator.
The result is a list of lists such that the size is:
all_annotations[num_images][num_classes] = annotations[num_class_annotations, 5]
Args:
generator: The generator used to retrieve ground truth annotations.
Returns:
A list of lists containing the annotations for each image in the generator.
"""
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in progressbar.progressbar(range(generator.size()), prefix='Parsing annotations: '):
# load the annotations
annotations = generator.load_annotations(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
all_annotations[i][label] = annotations['bboxes'][annotations['labels'] == label, :].copy()
return all_annotations
def evaluate(
generator,
model,
iou_threshold=0.5,
score_threshold=0.01,
max_detections=100,
visualize=False,
epoch=0
):
"""
Evaluate a given dataset using a given model.
Args:
generator: The generator that represents the dataset to evaluate.
model: The model to evaluate.
iou_threshold: The threshold used to consider when a detection is positive or negative.
score_threshold: The score confidence threshold to use for detections.
max_detections: The maximum number of detections to use per image.
visualize: Show the visualized detections or not.
Returns:
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = _get_detections(generator, model, score_threshold=score_threshold, max_detections=max_detections,
visualize=visualize)
all_annotations = _get_annotations(generator)
average_precisions = {}
num_tp = 0
num_fp = 0
# process detections and annotations
for label in range(generator.num_classes()):
if not generator.has_label(label):
continue
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0, 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
if false_positives.shape[0] == 0:
num_fp += 0
else:
num_fp += false_positives[-1]
if true_positives.shape[0] == 0:
num_tp += 0
else:
num_tp += true_positives[-1]
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = _compute_ap(recall, precision)
average_precisions[label] = average_precision, num_annotations
print('num_fp={}, num_tp={}'.format(num_fp, num_tp))
return average_precisions
if __name__ == '__main__':
from generators.pascal import PascalVocGenerator
from model import efficientdet
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 1
weighted_bifpn = False
common_args = {
'batch_size': 1,
'phi': phi,
}
test_generator = PascalVocGenerator(
'datasets/VOC2007',
'test',
shuffle_groups=False,
skip_truncated=False,
skip_difficult=True,
**common_args
)
model_path = 'checkpoints/2019-12-03/pascal_05_0.6283_1.1975_0.8029.h5'
input_shape = (test_generator.image_size, test_generator.image_size)
anchors = test_generator.anchors
num_classes = test_generator.num_classes()
model, prediction_model = efficientdet(phi=phi, num_classes=num_classes, weighted_bifpn=weighted_bifpn)
prediction_model.load_weights(model_path, by_name=True)
average_precisions = evaluate(test_generator, prediction_model, visualize=False)
# compute per class average precision
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
print('{:.0f} instances of class'.format(num_annotations), test_generator.label_to_name(label),
'with average precision: {:.4f}'.format(average_precision))
total_instances.append(num_annotations)
precisions.append(average_precision)
mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
print('mAP: {:.4f}'.format(mean_ap))
| 11,199 | 35.842105 | 118 | py |
EfficientDet | EfficientDet-master/eval/pascal.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import tensorflow as tf
from eval.common import evaluate
class Evaluate(keras.callbacks.Callback):
"""
Evaluation callback for arbitrary datasets.
"""
def __init__(
self,
generator,
model,
iou_threshold=0.5,
score_threshold=0.01,
max_detections=100,
save_path=None,
tensorboard=None,
weighted_average=False,
verbose=1
):
"""
Evaluate a given dataset using a given model at the end of every epoch during training.
Args:
generator: The generator that represents the dataset to evaluate.
iou_threshold: The threshold used to consider when a detection is positive or negative.
score_threshold: The score confidence threshold to use for detections.
max_detections: The maximum number of detections to use per image.
save_path: The path to save images with visualized detections to.
tensorboard: Instance of keras.callbacks.TensorBoard used to log the mAP value.
weighted_average: Compute the mAP using the weighted average of precisions among classes.
verbose: Set the verbosity level, by default this is set to 1.
"""
self.generator = generator
self.iou_threshold = iou_threshold
self.score_threshold = score_threshold
self.max_detections = max_detections
self.save_path = save_path
self.tensorboard = tensorboard
self.weighted_average = weighted_average
self.verbose = verbose
self.active_model = model
super(Evaluate, self).__init__()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
# run evaluation
average_precisions = evaluate(
self.generator,
self.active_model,
iou_threshold=self.iou_threshold,
score_threshold=self.score_threshold,
max_detections=self.max_detections,
visualize=False
)
# compute per class average precision
total_instances = []
precisions = []
for label, (average_precision, num_annotations) in average_precisions.items():
if self.verbose == 1:
print('{:.0f} instances of class'.format(num_annotations),
self.generator.label_to_name(label), 'with average precision: {:.4f}'.format(average_precision))
total_instances.append(num_annotations)
precisions.append(average_precision)
if self.weighted_average:
self.mean_ap = sum([a * b for a, b in zip(total_instances, precisions)]) / sum(total_instances)
else:
self.mean_ap = sum(precisions) / sum(x > 0 for x in total_instances)
if self.tensorboard is not None:
if tf.version.VERSION < '2.0.0' and self.tensorboard.writer is not None:
summary = tf.Summary()
summary_value = summary.value.add()
summary_value.simple_value = self.mean_ap
summary_value.tag = "mAP"
self.tensorboard.writer.add_summary(summary, epoch)
else:
tf.summary.scalar('mAP', self.mean_ap, epoch)
logs['mAP'] = self.mean_ap
if self.verbose == 1:
print('mAP: {:.4f}'.format(self.mean_ap))
| 3,989 | 36.641509 | 118 | py |
EfficientDet | EfficientDet-master/eval/__init__.py | 0 | 0 | 0 | py |
|
EfficientDet | EfficientDet-master/eval/coco.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import keras
from tensorflow import keras
import tensorflow as tf
from pycocotools.cocoeval import COCOeval
import numpy as np
import json
from tqdm import trange
import cv2
from generators.coco import CocoGenerator
def evaluate(generator, model, threshold=0.01):
"""
Use the pycocotools to evaluate a COCO model on a dataset.
Args
generator: The generator for generating the evaluation data.
model: The model to evaluate.
threshold: The score threshold to use.
"""
# start collecting results
results = []
image_ids = []
for index in trange(generator.size(), desc='COCO evaluation: '):
image = generator.load_image(index)
src_image = image.copy()
h, w = image.shape[:2]
image, scale = generator.preprocess_image(image)
# run network
boxes, scores, labels = model.predict_on_batch([np.expand_dims(image, axis=0)])
boxes /= scale
boxes[:, :, 0] = np.clip(boxes[:, :, 0], 0, w - 1)
boxes[:, :, 1] = np.clip(boxes[:, :, 1], 0, h - 1)
boxes[:, :, 2] = np.clip(boxes[:, :, 2], 0, w - 1)
boxes[:, :, 3] = np.clip(boxes[:, :, 3], 0, h - 1)
# change to (x, y, w, h) (MS COCO standard)
boxes[:, :, 2] -= boxes[:, :, 0]
boxes[:, :, 3] -= boxes[:, :, 1]
# select indices which have a score above the threshold
indices = np.where(scores[0, :] > threshold)[0]
boxes = boxes[0, indices]
scores = scores[0, indices]
class_ids = labels[0, indices]
# compute predicted labels and scores
for box, score, class_id in zip(boxes, scores, class_ids):
# append detection for each positively labeled class
image_result = {
'image_id': generator.image_ids[index],
'category_id': int(class_id) + 1,
'score': float(score),
'bbox': box.tolist(),
}
# append detection to results
results.append(image_result)
# box = np.round(box).astype(np.int32)
# class_name = generator.label_to_name(generator.coco_label_to_label(class_id + 1))
# ret, baseline = cv2.getTextSize(class_name, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
# cv2.rectangle(src_image, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0, 255, 0), 1)
# cv2.putText(src_image, class_name, (box[0], box[1] + box[3] - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
# (0, 0, 0), 1)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', src_image)
# cv2.waitKey(0)
# append image to list of processed images
image_ids.append(generator.image_ids[index])
if not len(results):
return
# write output
json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)
# # load results in COCO evaluation tool
# coco_true = generator.coco
# coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))
#
# # run COCO evaluation
# coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
# coco_eval.params.imgIds = image_ids
# coco_eval.evaluate()
# coco_eval.accumulate()
# coco_eval.summarize()
# return coco_eval.stats
class Evaluate(keras.callbacks.Callback):
""" Performs COCO evaluation on each epoch.
"""
def __init__(self, generator, model, tensorboard=None, threshold=0.01):
""" Evaluate callback initializer.
Args
generator : The generator used for creating validation data.
model: prediction model
tensorboard : If given, the results will be written to tensorboard.
threshold : The score threshold to use.
"""
self.generator = generator
self.active_model = model
self.threshold = threshold
self.tensorboard = tensorboard
super(Evaluate, self).__init__()
def on_epoch_end(self, epoch, logs=None):
logs = logs or {}
coco_tag = ['AP @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
'AP @[ IoU=0.50 | area= all | maxDets=100 ]',
'AP @[ IoU=0.75 | area= all | maxDets=100 ]',
'AP @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
'AP @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
'AP @[ IoU=0.50:0.95 | area= large | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area= all | maxDets= 1 ]',
'AR @[ IoU=0.50:0.95 | area= all | maxDets= 10 ]',
'AR @[ IoU=0.50:0.95 | area= all | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area= small | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area=medium | maxDets=100 ]',
'AR @[ IoU=0.50:0.95 | area= large | maxDets=100 ]']
coco_eval_stats = evaluate(self.generator, self.active_model, self.threshold)
if coco_eval_stats is not None and self.tensorboard is not None:
if tf.version.VERSION < '2.0.0' and self.tensorboard.writer is not None:
summary = tf.Summary()
for index, result in enumerate(coco_eval_stats):
summary_value = summary.value.add()
summary_value.simple_value = result
summary_value.tag = '{}. {}'.format(index + 1, coco_tag[index])
self.tensorboard.writer.add_summary(summary, epoch)
logs[coco_tag[index]] = result
else:
for index, result in enumerate(coco_eval_stats):
tag = '{}. {}'.format(index + 1, coco_tag[index])
tf.summary.scalar(tag, result, epoch)
if __name__ == '__main__':
from model import efficientdet
import os
from generators.coco import CocoGenerator
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
phi = 2
weighted_bifpn = True
model_path = 'efficientdet-d2.h5'
common_args = {
'batch_size': 1,
'phi': phi,
}
test_generator = CocoGenerator(
'datasets/coco',
'test-dev2017',
shuffle_groups=False,
**common_args
)
num_classes = test_generator.num_classes()
model, prediction_model = efficientdet(phi=phi, num_classes=num_classes, weighted_bifpn=weighted_bifpn,
score_threshold=0.01)
prediction_model.load_weights(model_path, by_name=True)
evaluate(test_generator, prediction_model, threshold=0.01)
| 7,327 | 37.772487 | 117 | py |
EfficientDet | EfficientDet-master/augmentor/color.py | import numpy as np
from PIL import Image, ImageEnhance, ImageOps
def autocontrast(image, prob=0.5):
random_prob = np.random.uniform()
if random_prob > prob:
return image
image = Image.fromarray(image[..., ::-1])
image = ImageOps.autocontrast(image)
image = np.array(image)[..., ::-1]
return image
def equalize(image, prob=0.5):
random_prob = np.random.uniform()
if random_prob > prob:
return image
image = Image.fromarray(image[..., ::-1])
image = ImageOps.equalize(image)
image = np.array(image)[..., ::-1]
return image
def solarize(image, prob=0.5, threshold=128.):
random_prob = np.random.uniform()
if random_prob > prob:
return image
image = Image.fromarray(image[..., ::-1])
image = ImageOps.solarize(image, threshold=threshold)
image = np.array(image)[..., ::-1]
return image
def sharpness(image, prob=0.5, min=0, max=2, factor=None):
random_prob = np.random.uniform()
if random_prob > prob:
return image
if factor is None:
# 0 模糊一点, 1 原图, 2 清晰一点
factor = np.random.uniform(min, max)
image = Image.fromarray(image[..., ::-1])
enhancer = ImageEnhance.Sharpness(image)
image = enhancer.enhance(factor=factor)
return np.array(image)[..., ::-1]
def color(image, prob=0.5, min=0., max=1., factor=None):
random_prob = np.random.uniform()
if random_prob > prob:
return image
if factor is None:
# factor=0 返回黑白色, factor=1 返回原图
factor = np.random.uniform(min, max)
image = Image.fromarray(image[..., ::-1])
enhancer = ImageEnhance.Color(image)
image = enhancer.enhance(factor=factor)
return np.array(image)[..., ::-1]
def contrast(image, prob=0.5, min=0.2, max=1., factor=None):
random_prob = np.random.uniform()
if random_prob > prob:
return image
if factor is None:
# factor=0 返回灰色, factor=1 返回原图
factor = np.random.uniform(min, max)
image = Image.fromarray(image[..., ::-1])
enhancer = ImageEnhance.Contrast(image)
image = enhancer.enhance(factor=factor)
return np.array(image)[..., ::-1]
def brightness(image, prob=0.5, min=0.8, max=1., factor=None):
random_prob = np.random.uniform()
if random_prob > prob:
return image
if factor is None:
# factor=0 返回全黑色, factor=1 返回原图
factor = np.random.uniform(min, max)
image = Image.fromarray(image[..., ::-1])
enhancer = ImageEnhance.Brightness(image)
image = enhancer.enhance(factor=factor)
return np.array(image)[..., ::-1]
class VisualEffect:
"""
Struct holding parameters and applying image color transformation.
Args
solarize_threshold:
color_factor: A factor for adjusting color.
contrast_factor: A factor for adjusting contrast.
brightness_factor: A factor for adjusting brightness.
sharpness_factor: A factor for adjusting sharpness.
"""
def __init__(
self,
color_factor=None,
contrast_factor=None,
brightness_factor=None,
sharpness_factor=None,
color_prob=0.5,
contrast_prob=0.5,
brightness_prob=0.5,
sharpness_prob=0.5,
autocontrast_prob=0.5,
equalize_prob=0.5,
solarize_prob=0.1,
solarize_threshold=128.,
):
self.color_factor = color_factor
self.contrast_factor = contrast_factor
self.brightness_factor = brightness_factor
self.sharpness_factor = sharpness_factor
self.color_prob = color_prob
self.contrast_prob = contrast_prob
self.brightness_prob = brightness_prob
self.sharpness_prob = sharpness_prob
self.autocontrast_prob = autocontrast_prob
self.equalize_prob = equalize_prob
self.solarize_prob = solarize_prob
self.solarize_threshold = solarize_threshold
def __call__(self, image):
"""
Apply a visual effect on the image.
Args
image: Image to adjust
"""
random_enhance_id = np.random.randint(0, 4)
if random_enhance_id == 0:
image = color(image, prob=self.color_prob, factor=self.color_factor)
elif random_enhance_id == 1:
image = contrast(image, prob=self.contrast_prob, factor=self.contrast_factor)
elif random_enhance_id == 2:
image = brightness(image, prob=self.brightness_prob, factor=self.brightness_factor)
else:
image = sharpness(image, prob=self.sharpness_prob, factor=self.sharpness_factor)
random_ops_id = np.random.randint(0, 3)
if random_ops_id == 0:
image = autocontrast(image, prob=self.autocontrast_prob)
elif random_ops_id == 1:
image = equalize(image, prob=self.equalize_prob)
else:
image = solarize(image, prob=self.solarize_prob, threshold=self.solarize_threshold)
return image
if __name__ == '__main__':
from generators.pascal import PascalVocGenerator
import cv2
train_generator = PascalVocGenerator(
'datasets/VOC0712',
'trainval',
skip_difficult=True,
anchors_path='voc_anchors_416.txt',
batch_size=1
)
visual_effect = VisualEffect()
for i in range(train_generator.size()):
image = train_generator.load_image(i)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
annotations = train_generator.load_annotations(i)
boxes = annotations['bboxes']
for box in boxes.astype(np.int32):
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 2)
src_image = image.copy()
image = visual_effect(image)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', np.concatenate([src_image, image], axis=1))
cv2.waitKey(0)
| 5,921 | 32.083799 | 95 | py |
EfficientDet | EfficientDet-master/augmentor/transform.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
identity_matrix = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
def colvec(*args):
"""
Create a numpy array representing a column vector.
"""
return np.array([args]).T
def transform_aabb(transform_matrix, aabb):
"""
Apply a transformation to an axis aligned bounding box.
The result is a new AABB in the same coordinate system as the original AABB.
The new AABB contains all corner points of the original AABB after applying the given transformation.
Args
transform: The transformation to apply.
x1: The minimum x value of the AABB.
y1: The minimum y value of the AABB.
x2: The maximum x value of the AABB.
y2: The maximum y value of the AABB.
Returns
The new AABB as tuple (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = aabb
# Transform all 4 corners of the AABB.
points = transform_matrix.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
# Extract the min and max corners again.
# (3, ) (min_x, min_y, 1)
min_corner = points.min(axis=1)
# (3, ) (max_x, max_y, 1)
max_corner = points.max(axis=1)
return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]
def random_value(min, max):
return np.random.uniform(min, max)
def random_vector(min, max):
"""
Construct a random vector between min and max.
Args
min: the minimum value for each component, (n, )
max: the maximum value for each component, (n, )
"""
min = np.array(min)
max = np.array(max)
assert min.shape == max.shape
assert len(min.shape) == 1
return np.random.uniform(min, max)
def rotation(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D rotation matrix.
Args
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
Returns
the rotation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the angle in radians
angle = random_value(min=min, max=max)
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
else:
return identity_matrix
def translation_x(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D translation matrix.
Args:
min: a scalar for the minimum translation for x axis
max: a scalar for the maximum translation for x axis
Returns:
the translation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# translation: the translation 2D vector
translation = random_value(min=min, max=max)
return np.array([
[1, 0, translation],
[0, 1, ],
[0, 0, 1]
])
else:
return identity_matrix
def translation_y(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D translation matrix.
Args:
min: a scalar for the minimum translation for y axis
max: a scalar for the maximum translation for y axis
Returns:
the translation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# translation: the translation 2D vector
translation = random_value(min=min, max=max)
return np.array([
[1, 0],
[0, 1, translation],
[0, 0, 1]
])
else:
return identity_matrix
def translation_xy(min=(0, 0), max=(0, 0), prob=0.5):
"""
Construct a homogeneous 2D translation matrix.
Args:
min: a scalar for the minimum translation for y axis
max: a scalar for the maximum translation for y axis
Returns:
the translation matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob < prob:
# translation: the translation 2D vector
dx = np.random.randint(min[0], max[0])
dy = np.random.randint(min[1], max[1])
return np.array([
[1, 0, dx],
[0, 1, dy],
[0, 0, 1]
])
else:
return identity_matrix
def shear_x(min=0, max=0, prob=0.5):
"""
Construct a homogeneous 2D shear matrix.
Args
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
Returns
the shear matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
angle = random_value(min=min, max=max)
return np.array([
[1, np.tan(angle), 0],
[0, 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def shear_y(min, max, prob=0.5):
"""
Construct a homogeneous 2D shear matrix.
Args
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
Returns
the shear matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
angle = random_value(min=min, max=max)
return np.array([
[1, 0, 0],
[np.tan(angle), 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def scaling_x(min=0.9, max=1.1, prob=0.5):
"""
Construct a homogeneous 2D scaling matrix.
Args
factor: a 2D vector for X and Y scaling
Returns
the zoom matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
factor = random_value(min=min, max=max)
return np.array([
[factor, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def scaling_y(min=0.9, max=1.1, prob=0.5):
"""
Construct a homogeneous 2D scaling matrix.
Args
factor: a 2D vector for X and Y scaling
Returns
the zoom matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# angle: the shear angle in radians
factor = random_value(min=min, max=max)
return np.array([
[1, 0, 0],
[0, factor, 0],
[0, 0, 1]
])
else:
return identity_matrix
def scaling_xy(min=(0.9, 0.9), max=(1.1, 1.1), prob=0.5):
"""
Construct a homogeneous 2D scaling matrix.
Args
min: a 2D vector containing the minimum scaling factor for X and Y.
min: a 2D vector containing The maximum scaling factor for X and Y.
Returns
the zoom matrix as 3 by 3 numpy array
"""
random_prob = np.random.uniform()
if random_prob > prob:
# factor: a 2D vector for X and Y scaling
factor = random_vector(min=min, max=max)
return np.array([
[factor[0], 0, 0],
[0, factor[1], 0],
[0, 0, 1]
])
else:
return identity_matrix
def flip_x(prob=0.8):
"""
Construct a transformation randomly containing X/Y flips (or not).
Args
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
Returns
a homogeneous 3 by 3 transformation matrix
"""
random_prob = np.random.uniform()
if random_prob > prob:
# 1 - 2 * bool gives 1 for False and -1 for True.
return np.array([
[-1, 0, 0],
[0, 1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def flip_y(prob=0.8):
"""
Construct a transformation randomly containing X/Y flips (or not).
Args
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
Returns
a homogeneous 3 by 3 transformation matrix
"""
random_prob = np.random.uniform()
if random_prob > prob:
# 1 - 2 * bool gives 1 for False and -1 for True.
return np.array([
[1, 0, 0],
[0, -1, 0],
[0, 0, 1]
])
else:
return identity_matrix
def change_transform_origin(transform, center):
"""
Create a new transform representing the same transformation, only with the origin of the linear part changed.
Args
transform: the transformation matrix
center: the new origin of the transformation
Returns
translate(center) * transform * translate(-center)
"""
center = np.array(center)
return np.linalg.multi_dot([np.array([[1, 0, center[0]], [0, 1, center[1]], [0, 0, 1]]),
transform,
np.array([[1, 0, -center[0]], [0, 1, -center[1]], [0, 0, 1]])])
def random_transform(
min_rotation=0,
max_rotation=0,
min_translation=(0, 0),
max_translation=(0, 0),
min_shear=0,
max_shear=0,
min_scaling=(1, 1),
max_scaling=(1, 1),
):
"""
Create a random transformation.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
"""
return np.linalg.multi_dot([
rotation(min_rotation, max_rotation),
translation_xy(min_translation, max_translation),
shear_x(min_shear, max_shear) if np.random.uniform() > 0.5 else shear_y(min_shear, max_shear),
scaling_xy(min_scaling, max_scaling),
flip_x() if np.random.uniform() > 0.5 else flip_y(),
])
def random_transform_generator(**kwargs):
"""
Create a random transform generator.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
"""
while True:
yield random_transform(**kwargs)
def adjust_transform_for_image(transform, image, relative_translation):
"""
Adjust a transformation for a specific image.
The translation of the matrix will be scaled with the size of the image.
The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.
"""
height, width, channels = image.shape
result = transform
# Scale the translation with the image size if specified.
if relative_translation:
result[0:2, 2] *= [width, height]
# Move the origin of transformation.
result = change_transform_origin(transform, (0.5 * width, 0.5 * height))
return result
class TransformParameters:
"""
Struct holding parameters determining how to apply a transformation to an image.
Args
fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'
interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'
cval: Fill value to use with fill_mode='constant'
relative_translation: If true (the default), interpret translation as a factor of the image size.
If false, interpret it as absolute pixels.
"""
def __init__(
self,
fill_mode='nearest',
interpolation='linear',
cval=0,
relative_translation=True,
):
self.fill_mode = fill_mode
self.cval = cval
self.interpolation = interpolation
self.relative_translation = relative_translation
def cv_border_mode(self):
if self.fill_mode == 'constant':
return cv2.BORDER_CONSTANT
if self.fill_mode == 'nearest':
return cv2.BORDER_REPLICATE
if self.fill_mode == 'reflect':
return cv2.BORDER_REFLECT_101
if self.fill_mode == 'wrap':
return cv2.BORDER_WRAP
def cv_interpolation(self):
if self.interpolation == 'nearest':
return cv2.INTER_NEAREST
if self.interpolation == 'linear':
return cv2.INTER_LINEAR
if self.interpolation == 'cubic':
return cv2.INTER_CUBIC
if self.interpolation == 'area':
return cv2.INTER_AREA
if self.interpolation == 'lanczos4':
return cv2.INTER_LANCZOS4
def apply_transform(matrix, image, params):
"""
Apply a transformation to an image.
The origin of transformation is at the top left corner of the image.
The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.
Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.
Args
matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.
image: The image to transform.
params: The transform parameters (see TransformParameters)
"""
output = cv2.warpAffine(
image,
matrix[:2, :],
dsize=(image.shape[1], image.shape[0]),
flags=params.cvInterpolation(),
borderMode=params.cvBorderMode(),
borderValue=params.cval,
)
return output
| 16,261 | 30.034351 | 137 | py |
EfficientDet | EfficientDet-master/augmentor/misc.py | import cv2
import numpy as np
from augmentor.transform import translation_xy, change_transform_origin, scaling_xy
from utils import reorder_vertexes
def rotate(image, annotations, prob=0.5, border_value=(128, 128, 128)):
assert 'bboxes' in annotations, 'annotations should contain bboxes even if it is empty'
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, annotations
rotate_degree = np.random.uniform(low=-10, high=10)
h, w = image.shape[:2]
# Compute the rotation matrix.
M = cv2.getRotationMatrix2D(center=(w / 2, h / 2),
angle=rotate_degree,
scale=1)
# Get the sine and cosine from the rotation matrix.
abs_cos_angle = np.abs(M[0, 0])
abs_sin_angle = np.abs(M[0, 1])
# Compute the new bounding dimensions of the image.
new_w = int(h * abs_sin_angle + w * abs_cos_angle)
new_h = int(h * abs_cos_angle + w * abs_sin_angle)
# Adjust the rotation matrix to take into account the translation.
M[0, 2] += new_w // 2 - w // 2
M[1, 2] += new_h // 2 - h // 2
# Rotate the image.
image = cv2.warpAffine(image, M=M, dsize=(new_w, new_h), flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value)
bboxes = annotations['bboxes']
if bboxes.shape[0] != 0:
new_bboxes = []
for bbox in bboxes:
x1, y1, x2, y2 = bbox
points = M.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
# Extract the min and max corners again.
min_xy = np.sort(points, axis=1)[:, :2]
min_x = np.mean(min_xy[0])
min_y = np.mean(min_xy[1])
max_xy = np.sort(points, axis=1)[:, 2:]
max_x = np.mean(max_xy[0])
max_y = np.mean(max_xy[1])
new_bboxes.append([min_x, min_y, max_x, max_y])
annotations['bboxes'] = np.array(new_bboxes, dtype=np.float32)
if 'quadrangles' in annotations and annotations['quadrangles'].shape[0] != 0:
quadrangles = annotations['quadrangles']
rotated_quadrangles = []
for quadrangle in quadrangles:
quadrangle = np.concatenate([quadrangle, np.ones((4, 1))], axis=-1)
rotated_quadrangle = M.dot(quadrangle.T).T[:, :2]
quadrangle = reorder_vertexes(rotated_quadrangle)
rotated_quadrangles.append(quadrangle)
quadrangles = np.stack(rotated_quadrangles)
annotations['quadrangles'] = quadrangles
xmin = np.min(quadrangles, axis=1)[:, 0]
ymin = np.min(quadrangles, axis=1)[:, 1]
xmax = np.max(quadrangles, axis=1)[:, 0]
ymax = np.max(quadrangles, axis=1)[:, 1]
bboxes = np.stack([xmin, ymin, xmax, ymax], axis=1)
annotations['bboxes'] = bboxes
return image, annotations
def crop(image, annotations, prob=0.5):
assert 'bboxes' in annotations, 'annotations should contain bboxes even if it is empty'
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, annotations
h, w = image.shape[:2]
bboxes = annotations['bboxes']
if bboxes.shape[0] != 0:
min_x1, min_y1 = np.min(bboxes, axis=0)[:2]
max_x2, max_y2 = np.max(bboxes, axis=0)[2:]
random_x1 = np.random.randint(0, max(min_x1 // 2, 1))
random_y1 = np.random.randint(0, max(min_y1 // 2, 1))
random_x2 = np.random.randint(max_x2 + 1, max(min(w, max_x2 + (w - max_x2) // 2), max_x2 + 2))
random_y2 = np.random.randint(max_y2 + 1, max(min(h, max_y2 + (h - max_y2) // 2), max_y2 + 2))
image = image[random_y1:random_y2, random_x1:random_x2]
bboxes[:, [0, 2]] = bboxes[:, [0, 2]] - random_x1
bboxes[:, [1, 3]] = bboxes[:, [1, 3]] - random_y1
if 'quadrangles' in annotations and annotations['quadrangles'].shape[0] != 0:
quadrangles = annotations['quadrangles']
quadrangles[:, :, 0] = quadrangles[:, :, 0] - random_x1
quadrangles[:, :, 1] = quadrangles[:, :, 1] - random_y1
else:
random_x1 = np.random.randint(0, max(w // 8, 1))
random_y1 = np.random.randint(0, max(h // 8, 1))
random_x2 = np.random.randint(7 * w // 8, w - 1)
random_y2 = np.random.randint(7 * h // 8, h - 1)
image = image[random_y1:random_y2, random_x1:random_x2]
return image, annotations
def flipx(image, annotations, prob=0.5):
assert 'bboxes' in annotations, 'annotations should contain bboxes even if it is empty'
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, annotations
bboxes = annotations['bboxes']
h, w = image.shape[:2]
image = image[:, ::-1]
if bboxes.shape[0] != 0:
tmp = bboxes.copy()
bboxes[:, 0] = w - 1 - bboxes[:, 2]
bboxes[:, 2] = w - 1 - tmp[:, 0]
if 'quadrangles' in annotations and annotations['quadrangles'].shape[0] != 0:
quadrangles = annotations['quadrangles']
tmp = quadrangles.copy()
quadrangles[:, 0, 0] = w - 1 - quadrangles[:, 0, 0]
quadrangles[:, 1, 0] = w - 1 - tmp[:, 3, 0]
quadrangles[:, 1, 1] = tmp[:, 3, 1]
quadrangles[:, 2, 0] = w - 1 - quadrangles[:, 2, 0]
quadrangles[:, 3, 0] = w - 1 - tmp[:, 1, 0]
quadrangles[:, 3, 1] = tmp[:, 1, 1]
return image, annotations
def multi_scale(image, annotations, prob=1.):
assert 'bboxes' in annotations, 'annotations should contain bboxes even if it is empty'
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, annotations
h, w = image.shape[:2]
scale = np.random.choice(np.arange(0.7, 1.4, 0.1))
nh, nw = int(round(h * scale)), int(round(w * scale))
image = cv2.resize(image, (nw, nh), interpolation=cv2.INTER_LINEAR)
bboxes = annotations['bboxes']
if bboxes.shape[0] != 0:
annotations['bboxes'] = np.round(bboxes * scale)
if 'quadrangles' in annotations and annotations['quadrangles'].shape[0] != 0:
quadrangles = annotations['quadrangles']
annotations['quadrangles'] = np.round(quadrangles * scale)
return image, annotations
def translate(image, annotations, prob=0.5, border_value=(128, 128, 128)):
assert 'bboxes' in annotations, 'annotations should contain bboxes even if it is empty'
random_prob = np.random.uniform()
if random_prob < (1 - prob):
return image, annotations
h, w = image.shape[:2]
bboxes = annotations['bboxes']
if bboxes.shape[0] != 0:
min_x1, min_y1 = np.min(bboxes, axis=0)[:2].astype(np.int32)
max_x2, max_y2 = np.max(bboxes, axis=0)[2:].astype(np.int32)
translation_matrix = translation_xy(min=(min(-(min_x1 // 2), 0), min(-(min_y1 // 2), 0)),
max=(max((w - 1 - max_x2) // 2, 1), max((h - 1 - max_y2) // 2, 1)),
prob=1.)
else:
translation_matrix = translation_xy(min=(min(-w // 8, 0), min(-h // 8, 0)),
max=(max(w // 8, 1), max(h // 8, 1)))
translation_matrix = change_transform_origin(translation_matrix, (w / 2, h / 2))
image = cv2.warpAffine(
image,
translation_matrix[:2, :],
dsize=(w, h),
flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=border_value,
)
if bboxes.shape[0] != 0:
new_bboxes = []
for bbox in bboxes:
x1, y1, x2, y2 = bbox
points = translation_matrix.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
min_x, min_y = np.min(points, axis=1)[:2]
max_x, max_y = np.max(points, axis=1)[:2]
new_bboxes.append([min_x, min_y, max_x, max_y])
annotations['bboxes'] = np.array(new_bboxes).astype(np.float32)
if 'quadrangles' in annotations and annotations['quadrangles'].shape[0] != 0:
quadrangles = annotations['quadrangles']
translated_quadrangles = []
for quadrangle in quadrangles:
quadrangle = np.concatenate([quadrangle, np.ones((4, 1))], axis=-1)
translated_quadrangle = translation_matrix.dot(quadrangle.T).T[:, :2]
quadrangle = reorder_vertexes(translated_quadrangle)
translated_quadrangles.append(quadrangle)
quadrangles = np.stack(translated_quadrangles)
annotations['quadrangles'] = quadrangles
xmin = np.min(quadrangles, axis=1)[:, 0]
ymin = np.min(quadrangles, axis=1)[:, 1]
xmax = np.max(quadrangles, axis=1)[:, 0]
ymax = np.max(quadrangles, axis=1)[:, 1]
bboxes = np.stack([xmin, ymin, xmax, ymax], axis=1)
annotations['bboxes'] = bboxes
return image, annotations
class MiscEffect:
def __init__(self, multi_scale_prob=0.5, rotate_prob=0.05, flip_prob=0.5, crop_prob=0.5, translate_prob=0.5,
border_value=(128, 128, 128)):
self.multi_scale_prob = multi_scale_prob
self.rotate_prob = rotate_prob
self.flip_prob = flip_prob
self.crop_prob = crop_prob
self.translate_prob = translate_prob
self.border_value = border_value
def __call__(self, image, annotations):
image, annotations = multi_scale(image, annotations, prob=self.multi_scale_prob)
image, annotations = rotate(image, annotations, prob=self.rotate_prob, border_value=self.border_value)
image, annotations = flipx(image, annotations, prob=self.flip_prob)
image, annotations = crop(image, annotations, prob=self.crop_prob)
image, annotations = translate(image, annotations, prob=self.translate_prob, border_value=self.border_value)
return image, annotations
if __name__ == '__main__':
from generators.csv_ import CSVGenerator
train_generator = CSVGenerator('datasets/ic15/train.csv',
'datasets/ic15/classes.csv',
detect_text=True,
batch_size=1,
phi=5,
shuffle_groups=False)
misc_effect = MiscEffect()
for i in range(train_generator.size()):
image = train_generator.load_image(i)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
annotations = train_generator.load_annotations(i)
boxes = annotations['bboxes'].astype(np.int32)
quadrangles = annotations['quadrangles'].astype(np.int32)
for box in boxes:
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 0, 255), 1)
cv2.drawContours(image, quadrangles, -1, (0, 255, 255), 1)
src_image = image.copy()
# cv2.namedWindow('src_image', cv2.WINDOW_NORMAL)
cv2.imshow('src_image', src_image)
# image, annotations = misc_effect(image, annotations)
image, annotations = multi_scale(image, annotations, prob=1.)
image = image.copy()
boxes = annotations['bboxes'].astype(np.int32)
quadrangles = annotations['quadrangles'].astype(np.int32)
for box in boxes:
cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 1)
cv2.drawContours(image, quadrangles, -1, (255, 255, 0), 1)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', image)
cv2.waitKey(0)
| 11,766 | 43.57197 | 116 | py |
EfficientDet | EfficientDet-master/augmentor/__init__.py | 0 | 0 | 0 | py |
|
EfficientDet | EfficientDet-master/utils/anchors.py | # import keras
import numpy as np
from tensorflow import keras
from utils.compute_overlap import compute_overlap
class AnchorParameters:
"""
The parameters that define how anchors are generated.
Args
sizes : List of sizes to use. Each size corresponds to one feature level.
strides : List of strides to use. Each stride correspond to one feature level.
ratios : List of ratios to use per location in a feature map.
scales : List of scales to use per location in a feature map.
"""
def __init__(self, sizes=(32, 64, 128, 256, 512),
strides=(8, 16, 32, 64, 128),
ratios=(1, 0.5, 2),
scales=(2 ** 0, 2 ** (1. / 3.), 2 ** (2. / 3.))):
self.sizes = sizes
self.strides = strides
self.ratios = np.array(ratios, dtype=keras.backend.floatx())
self.scales = np.array(scales, dtype=keras.backend.floatx())
def num_anchors(self):
return len(self.ratios) * len(self.scales)
"""
The default anchor parameters.
"""
AnchorParameters.default = AnchorParameters(
sizes=[32, 64, 128, 256, 512],
strides=[8, 16, 32, 64, 128],
# ratio=h/w
ratios=np.array([1, 0.5, 2], keras.backend.floatx()),
scales=np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)], keras.backend.floatx()),
)
def anchor_targets_bbox(
anchors,
image_group,
annotations_group,
num_classes,
negative_overlap=0.4,
positive_overlap=0.5,
detect_quadrangle=False
):
"""
Generate anchor targets for bbox detection.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
image_group: List of BGR images.
annotations_group: List of annotations (np.array of shape (N, 5) for (x1, y1, x2, y2, label)).
num_classes: Number of classes to predict.
mask_shape: If the image is padded with zeros, mask_shape can be used to mark the relevant part of the image.
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
labels_batch: batch that contains labels & anchor states (np.array of shape (batch_size, N, num_classes + 1),
where N is the number of anchors for an image and the last column defines the anchor state
(-1 for ignore, 0 for bg, 1 for fg).
regression_batch: batch that contains bounding-box regression targets for an image & anchor states
(np.array of shape (batch_size, N, 4 + 1), where N is the number of anchors for an image,
the first 4 columns define regression targets for (x1, y1, x2, y2) and the last column defines
anchor states (-1 for ignore, 0 for bg, 1 for fg).
"""
assert (len(image_group) == len(annotations_group)), "The length of the images and annotations need to be equal."
assert (len(annotations_group) > 0), "No data received to compute anchor targets for."
for annotations in annotations_group:
assert ('bboxes' in annotations), "Annotations should contain bboxes."
assert ('labels' in annotations), "Annotations should contain labels."
batch_size = len(image_group)
if detect_quadrangle:
regression_batch = np.zeros((batch_size, anchors.shape[0], 9 + 1), dtype=np.float32)
else:
regression_batch = np.zeros((batch_size, anchors.shape[0], 4 + 1), dtype=np.float32)
labels_batch = np.zeros((batch_size, anchors.shape[0], num_classes + 1), dtype=np.float32)
# compute labels and regression targets
for index, (image, annotations) in enumerate(zip(image_group, annotations_group)):
if annotations['bboxes'].shape[0]:
# obtain indices of gt annotations with the greatest overlap
# argmax_overlaps_inds: id of ground truth box has greatest overlap with anchor
# (N, ), (N, ), (N, ) N is num_anchors
positive_indices, ignore_indices, argmax_overlaps_inds = compute_gt_annotations(anchors,
annotations['bboxes'],
negative_overlap,
positive_overlap)
labels_batch[index, ignore_indices, -1] = -1
labels_batch[index, positive_indices, -1] = 1
regression_batch[index, ignore_indices, -1] = -1
regression_batch[index, positive_indices, -1] = 1
# compute target class labels
labels_batch[
index, positive_indices, annotations['labels'][argmax_overlaps_inds[positive_indices]].astype(int)] = 1
regression_batch[index, :, :4] = bbox_transform(anchors, annotations['bboxes'][argmax_overlaps_inds, :])
if detect_quadrangle:
regression_batch[index, :, 4:8] = annotations['alphas'][argmax_overlaps_inds, :]
regression_batch[index, :, 8] = annotations['ratios'][argmax_overlaps_inds]
# ignore anchors outside of image
if image.shape:
anchors_centers = np.vstack([(anchors[:, 0] + anchors[:, 2]) / 2, (anchors[:, 1] + anchors[:, 3]) / 2]).T
indices = np.logical_or(anchors_centers[:, 0] >= image.shape[1], anchors_centers[:, 1] >= image.shape[0])
labels_batch[index, indices, -1] = -1
regression_batch[index, indices, -1] = -1
return labels_batch, regression_batch
def compute_gt_annotations(
anchors,
annotations,
negative_overlap=0.4,
positive_overlap=0.5
):
"""
Obtain indices of gt annotations with the greatest overlap.
Args
anchors: np.array of annotations of shape (N, 4) for (x1, y1, x2, y2).
annotations: np.array of shape (K, 5) for (x1, y1, x2, y2, label).
negative_overlap: IoU overlap for negative anchors (all anchors with overlap < negative_overlap are negative).
positive_overlap: IoU overlap or positive anchors (all anchors with overlap > positive_overlap are positive).
Returns
positive_indices: indices of positive anchors, (N, )
ignore_indices: indices of ignored anchors, (N, )
argmax_overlaps_inds: ordered overlaps indices, (N, )
"""
# (N, K)
overlaps = compute_overlap(anchors.astype(np.float64), annotations.astype(np.float64))
# (N, )
argmax_overlaps_inds = np.argmax(overlaps, axis=1)
# (N, )
max_overlaps = overlaps[np.arange(overlaps.shape[0]), argmax_overlaps_inds]
# assign "dont care" labels
# (N, )
positive_indices = max_overlaps >= positive_overlap
# adam: in case of there are gt boxes has no matched positive anchors
# nonzero_inds = np.nonzero(overlaps == np.max(overlaps, axis=0))
# positive_indices[nonzero_inds[0]] = 1
# (N, )
ignore_indices = (max_overlaps > negative_overlap) & ~positive_indices
return positive_indices, ignore_indices, argmax_overlaps_inds
def layer_shapes(image_shape, model):
"""
Compute layer shapes given input image shape and the model.
Args
image_shape: The shape of the image.
model: The model to use for computing how the image shape is transformed in the pyramid.
Returns
A dictionary mapping layer names to image shapes.
"""
shape = {
model.layers[0].name: (None,) + image_shape,
}
for layer in model.layers[1:]:
nodes = layer._inbound_nodes
for node in nodes:
input_shapes = [shape[inbound_layer.name] for inbound_layer in node.inbound_layers]
if not input_shapes:
continue
shape[layer.name] = layer.compute_output_shape(input_shapes[0] if len(input_shapes) == 1 else input_shapes)
return shape
def make_shapes_callback(model):
"""
Make a function for getting the shape of the pyramid levels.
"""
def get_shapes(image_shape, pyramid_levels):
shape = layer_shapes(image_shape, model)
image_shapes = [shape["P{}".format(level)][1:3] for level in pyramid_levels]
return image_shapes
return get_shapes
def guess_shapes(image_shape, pyramid_levels):
"""
Guess shapes based on pyramid levels.
Args
image_shape: The shape of the image.
pyramid_levels: A list of what pyramid levels are used.
Returns
A list of image shapes at each pyramid level.
"""
image_shape = np.array(image_shape[:2])
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in pyramid_levels]
return image_shapes
def anchors_for_shape(
image_shape,
pyramid_levels=None,
anchor_params=None,
shapes_callback=None,
):
"""
Generators anchors for a given shape.
Args
image_shape: The shape of the image.
pyramid_levels: List of ints representing which pyramids to use (defaults to [3, 4, 5, 6, 7]).
anchor_params: Struct containing anchor parameters. If None, default values are used.
shapes_callback: Function to call for getting the shape of the image at different pyramid levels.
Returns
np.array of shape (N, 4) containing the (x1, y1, x2, y2) coordinates for the anchors.
"""
if pyramid_levels is None:
pyramid_levels = [3, 4, 5, 6, 7]
if anchor_params is None:
anchor_params = AnchorParameters.default
if shapes_callback is None:
shapes_callback = guess_shapes
feature_map_shapes = shapes_callback(image_shape, pyramid_levels)
# compute anchors over all pyramid levels
all_anchors = np.zeros((0, 4), dtype=np.float32)
for idx, p in enumerate(pyramid_levels):
anchors = generate_anchors(
base_size=anchor_params.sizes[idx],
ratios=anchor_params.ratios,
scales=anchor_params.scales
)
shifted_anchors = shift(feature_map_shapes[idx], anchor_params.strides[idx], anchors)
all_anchors = np.append(all_anchors, shifted_anchors, axis=0)
return all_anchors.astype(np.float32)
def shift(feature_map_shape, stride, anchors):
"""
Produce shifted anchors based on shape of the map and stride size.
Args
feature_map_shape : Shape to shift the anchors over.
stride : Stride to shift the anchors with over the shape.
anchors: The anchors to apply at each location.
"""
# create a grid starting from half stride from the top left corner
shift_x = (np.arange(0, feature_map_shape[1]) + 0.5) * stride
shift_y = (np.arange(0, feature_map_shape[0]) + 0.5) * stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((
shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel()
)).transpose()
A = anchors.shape[0]
K = shifts.shape[0]
all_anchors = (anchors.reshape((1, A, 4)) + shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
return all_anchors
def generate_anchors(base_size=16, ratios=None, scales=None):
"""
Generate anchor (reference) windows by enumerating aspect ratios X scales w.r.t. a reference window.
Args:
base_size:
ratios:
scales:
Returns:
"""
if ratios is None:
ratios = AnchorParameters.default.ratios
if scales is None:
scales = AnchorParameters.default.scales
num_anchors = len(ratios) * len(scales)
# initialize output anchors
anchors = np.zeros((num_anchors, 4))
anchors[:, 2:] = base_size * np.tile(np.repeat(scales, len(ratios))[None], (2, 1)).T
areas = anchors[:, 2] * anchors[:, 3]
# correct for ratios
anchors[:, 2] = np.sqrt(areas / np.tile(ratios, len(scales)))
anchors[:, 3] = anchors[:, 2] * np.tile(ratios, len(scales))
anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T
return anchors
def bbox_transform(anchors, gt_boxes, scale_factors=None):
wa = anchors[:, 2] - anchors[:, 0]
ha = anchors[:, 3] - anchors[:, 1]
cxa = anchors[:, 0] + wa / 2.
cya = anchors[:, 1] + ha / 2.
w = gt_boxes[:, 2] - gt_boxes[:, 0]
h = gt_boxes[:, 3] - gt_boxes[:, 1]
cx = gt_boxes[:, 0] + w / 2.
cy = gt_boxes[:, 1] + h / 2.
# Avoid NaN in division and log below.
ha += 1e-7
wa += 1e-7
h += 1e-7
w += 1e-7
tx = (cx - cxa) / wa
ty = (cy - cya) / ha
tw = np.log(w / wa)
th = np.log(h / ha)
if scale_factors:
ty /= scale_factors[0]
tx /= scale_factors[1]
th /= scale_factors[2]
tw /= scale_factors[3]
targets = np.stack([ty, tx, th, tw], axis=1)
return targets
| 13,034 | 35.615169 | 119 | py |
EfficientDet | EfficientDet-master/utils/visualization.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
from .colors import label_color
def draw_box(image, box, color, thickness=2):
""" Draws a box on an image with a given color.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
color : The color of the box.
thickness : The thickness of the lines to draw a box with.
"""
b = np.array(box).astype(np.int32)
cv2.rectangle(image, (b[0], b[1]), (b[2], b[3]), color, thickness, cv2.LINE_AA)
def draw_caption(image, box, caption):
""" Draws a caption above the box in an image.
# Arguments
image : The image to draw on.
box : A list of 4 elements (x1, y1, x2, y2).
caption : String containing the text to draw.
"""
b = np.array(box).astype(int)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 2)
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def draw_boxes(image, boxes, color, thickness=2):
""" Draws boxes on an image with a given color.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
color : The color of the boxes.
thickness : The thickness of the lines to draw boxes with.
"""
for b in boxes:
draw_box(image, b, color, thickness=thickness)
def draw_detections(image, boxes, scores, labels, colors, label_to_name=None, score_threshold=0.5):
""" Draws detections in an image.
# Arguments
image : The image to draw on.
boxes : A [N, 4] matrix (x1, y1, x2, y2).
scores : A list of N classification scores.
labels : A list of N labels.
colors : The colors of the boxes.
label_to_name : (optional) Functor for mapping a label to a name.
score_threshold : Threshold used for determining what detections to draw.
"""
selection = np.where(scores > score_threshold)[0]
for i in selection:
c = colors[int(labels[i])]
draw_box(image, boxes[i, :], color=c)
# draw labels
caption = (label_to_name(labels[i]) if label_to_name else labels[i]) + ': {0:.2f}'.format(scores[i])
draw_caption(image, boxes[i, :], caption)
def draw_annotations(image, annotations, color=(0, 255, 0), label_to_name=None):
""" Draws annotations in an image.
# Arguments
image : The image to draw on.
annotations : A [N, 5] matrix (x1, y1, x2, y2, label) or dictionary containing bboxes (shaped [N, 4]) and labels (shaped [N]).
color : The color of the boxes. By default the color from keras_retinanet.utils.colors.label_color will be used.
label_to_name : (optional) Functor for mapping a label to a name.
"""
if isinstance(annotations, np.ndarray):
annotations = {'bboxes': annotations[:, :4], 'labels': annotations[:, 4]}
assert('bboxes' in annotations)
assert('labels' in annotations)
assert(annotations['bboxes'].shape[0] == annotations['labels'].shape[0])
for i in range(annotations['bboxes'].shape[0]):
label = annotations['labels'][i]
c = color if color is not None else label_color(label)
caption = '{}'.format(label_to_name(label) if label_to_name else label)
draw_caption(image, annotations['bboxes'][i], caption)
draw_box(image, annotations['bboxes'][i], color=c)
| 4,112 | 37.439252 | 136 | py |
EfficientDet | EfficientDet-master/utils/image.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import division
import numpy as np
import cv2
from PIL import Image
from .transform import change_transform_origin
def read_image_bgr(path):
"""
Read an image in BGR format.
Args
path: Path to the image.
"""
# We deliberately don't use cv2.imread here, since it gives no feedback on errors while reading the image.
image = np.asarray(Image.open(path).convert('RGB'))
return image[:, :, ::-1].copy()
def preprocess_image(x, mode='caffe'):
"""
Preprocess an image by subtracting the ImageNet mean.
Args
x: np.array of shape (None, None, 3) or (3, None, None).
mode: One of "caffe" or "tf".
- caffe: will zero-center each color channel with
respect to the ImageNet dataset, without scaling.
- tf: will scale pixels between -1 and 1, sample-wise.
Returns
The input with the ImageNet mean subtracted.
"""
# mostly identical to "https://github.com/keras-team/keras-applications/blob/master/keras_applications/imagenet_utils.py"
# except for converting RGB -> BGR since we assume BGR already
# covert always to float32 to keep compatibility with opencv
x = x.astype(np.float32)
if mode == 'tf':
x /= 127.5
x -= 1.
elif mode == 'caffe':
x[..., 0] -= 103.939
x[..., 1] -= 116.779
x[..., 2] -= 123.68
return x
def adjust_transform_for_image(transform, image, relative_translation):
"""
Adjust a transformation for a specific image.
The translation of the matrix will be scaled with the size of the image.
The linear part of the transformation will adjusted so that the origin of the transformation will be at the center of the image.
"""
height, width, channels = image.shape
result = transform
# Scale the translation with the image size if specified.
if relative_translation:
result[0:2, 2] *= [width, height]
# Move the origin of transformation.
result = change_transform_origin(transform, (0.5 * width, 0.5 * height))
return result
class TransformParameters:
"""
Struct holding parameters determining how to apply a transformation to an image.
Args
fill_mode: One of: 'constant', 'nearest', 'reflect', 'wrap'
interpolation: One of: 'nearest', 'linear', 'cubic', 'area', 'lanczos4'
cval: Fill value to use with fill_mode='constant'
relative_translation: If true (the default), interpret translation as a factor of the image size.
If false, interpret it as absolute pixels.
"""
def __init__(
self,
fill_mode='nearest',
interpolation='linear',
cval=0,
relative_translation=True,
):
self.fill_mode = fill_mode
self.cval = cval
self.interpolation = interpolation
self.relative_translation = relative_translation
def cvBorderMode(self):
if self.fill_mode == 'constant':
return cv2.BORDER_CONSTANT
if self.fill_mode == 'nearest':
return cv2.BORDER_REPLICATE
if self.fill_mode == 'reflect':
return cv2.BORDER_REFLECT_101
if self.fill_mode == 'wrap':
return cv2.BORDER_WRAP
def cvInterpolation(self):
if self.interpolation == 'nearest':
return cv2.INTER_NEAREST
if self.interpolation == 'linear':
return cv2.INTER_LINEAR
if self.interpolation == 'cubic':
return cv2.INTER_CUBIC
if self.interpolation == 'area':
return cv2.INTER_AREA
if self.interpolation == 'lanczos4':
return cv2.INTER_LANCZOS4
def apply_transform(matrix, image, params):
"""
Apply a transformation to an image.
The origin of transformation is at the top left corner of the image.
The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.
Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.
Args
matrix: A homogeneous 3 by 3 matrix holding representing the transformation to apply.
image: The image to transform.
params: The transform parameters (see TransformParameters)
"""
output = cv2.warpAffine(
image,
matrix[:2, :],
dsize=(image.shape[1], image.shape[0]),
flags=params.cvInterpolation(),
borderMode=params.cvBorderMode(),
borderValue=params.cval,
)
return output
def compute_resize_scale(image_shape, min_side=800, max_side=1333):
"""
Compute an image scale such that the image size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resizing scale.
"""
(rows, cols, _) = image_shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
return scale
def resize_image(img, min_side=800, max_side=1333):
"""
Resize an image such that the size is constrained to min_side and max_side.
Args
min_side: The image's min side will be equal to min_side after resizing.
max_side: If after resizing the image's max side is above max_side, resize until the max side is equal to max_side.
Returns
A resized image.
"""
# compute scale to resize the image
scale = compute_resize_scale(img.shape, min_side=min_side, max_side=max_side)
# resize the image with the computed scale
img = cv2.resize(img, None, fx=scale, fy=scale)
return img, scale
def _uniform(val_range):
"""
Uniformly sample from the given range.
Args
val_range: A pair of lower and upper bound.
"""
return np.random.uniform(val_range[0], val_range[1])
def _check_range(val_range, min_val=None, max_val=None):
"""
Check whether the range is a valid range.
Args
val_range: A pair of lower and upper bound.
min_val: Minimal value for the lower bound.
max_val: Maximal value for the upper bound.
"""
if val_range[0] > val_range[1]:
raise ValueError('interval lower bound > upper bound')
if min_val is not None and val_range[0] < min_val:
raise ValueError('invalid interval lower bound')
if max_val is not None and val_range[1] > max_val:
raise ValueError('invalid interval upper bound')
def _clip(image):
"""
Clip and convert an image to np.uint8.
Args
image: Image to clip.
"""
return np.clip(image, 0, 255).astype(np.uint8)
class VisualEffect:
"""
Struct holding parameters and applying image color transformation.
Args
contrast_factor: A factor for adjusting contrast. Should be between 0 and 3.
brightness_delta: Brightness offset between -1 and 1 added to the pixel values.
hue_delta: Hue offset between -1 and 1 added to the hue channel.
saturation_factor: A factor multiplying the saturation values of each pixel.
"""
def __init__(
self,
contrast_factor,
brightness_delta,
hue_delta,
saturation_factor,
):
self.contrast_factor = contrast_factor
self.brightness_delta = brightness_delta
self.hue_delta = hue_delta
self.saturation_factor = saturation_factor
def __call__(self, image):
"""
Apply a visual effect on the image.
Args
image: Image to adjust
"""
if self.contrast_factor:
image = adjust_contrast(image, self.contrast_factor)
if self.brightness_delta:
image = adjust_brightness(image, self.brightness_delta)
if self.hue_delta or self.saturation_factor:
image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
if self.hue_delta:
image = adjust_hue(image, self.hue_delta)
if self.saturation_factor:
image = adjust_saturation(image, self.saturation_factor)
image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR)
return image
def random_visual_effect_generator(
contrast_range=(0.9, 1.1),
brightness_range=(-.1, .1),
hue_range=(-0.05, 0.05),
saturation_range=(0.95, 1.05)
):
"""
Generate visual effect parameters uniformly sampled from the given intervals.
Args
contrast_factor: A factor interval for adjusting contrast. Should be between 0 and 3.
brightness_delta: An interval between -1 and 1 for the amount added to the pixels.
hue_delta: An interval between -1 and 1 for the amount added to the hue channel.
The values are rotated if they exceed 180.
saturation_factor: An interval for the factor multiplying the saturation values of each
pixel.
"""
_check_range(contrast_range, 0)
_check_range(brightness_range, -1, 1)
_check_range(hue_range, -1, 1)
_check_range(saturation_range, 0)
def _generate():
while True:
yield VisualEffect(
contrast_factor=_uniform(contrast_range),
brightness_delta=_uniform(brightness_range),
hue_delta=_uniform(hue_range),
saturation_factor=_uniform(saturation_range),
)
return _generate()
def adjust_contrast(image, factor):
"""
Adjust contrast of an image.
Args
image: Image to adjust.
factor: A factor for adjusting contrast.
"""
mean = image.mean(axis=0).mean(axis=0)
return _clip((image - mean) * factor + mean)
def adjust_brightness(image, delta):
"""
Adjust brightness of an image
Args
image: Image to adjust.
delta: Brightness offset between -1 and 1 added to the pixel values.
"""
return _clip(image + delta * 255)
def adjust_hue(image, delta):
"""
Adjust hue of an image.
Args
image: Image to adjust.
delta: An interval between -1 and 1 for the amount added to the hue channel.
The values are rotated if they exceed 180.
"""
image[..., 0] = np.mod(image[..., 0] + delta * 180, 180)
return image
def adjust_saturation(image, factor):
"""
Adjust saturation of an image.
Args
image: Image to adjust.
factor: An interval for the factor multiplying the saturation values of each pixel.
"""
image[..., 1] = np.clip(image[..., 1] * factor, 0, 255)
return image
| 11,667 | 30.281501 | 137 | py |
EfficientDet | EfficientDet-master/utils/draw_boxes.py | import cv2
def draw_boxes(image, boxes, scores, labels, colors, classes):
for b, l, s in zip(boxes, labels, scores):
class_id = int(l)
class_name = classes[class_id]
xmin, ymin, xmax, ymax = list(map(int, b))
score = '{:.4f}'.format(s)
color = colors[class_id]
label = '-'.join([class_name, score])
ret, baseline = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
cv2.rectangle(image, (xmin, ymin), (xmax, ymax), color, 1)
cv2.rectangle(image, (xmin, ymax - ret[1] - baseline), (xmin + ret[0], ymax), color, -1)
cv2.putText(image, label, (xmin, ymax - baseline), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0), 1)
| 712 | 38.611111 | 103 | py |
EfficientDet | EfficientDet-master/utils/transform.py | """
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
DEFAULT_PRNG = np.random
def colvec(*args):
"""
Create a numpy array representing a column vector.
"""
return np.array([args]).T
def transform_aabb(transform, aabb):
"""
Apply a transformation to an axis aligned bounding box.
The result is a new AABB in the same coordinate system as the original AABB.
The new AABB contains all corner points of the original AABB after applying the given transformation.
Args
transform: The transformation to apply.
x1: The minimum x value of the AABB.
y1: The minimum y value of the AABB.
x2: The maximum x value of the AABB.
y2: The maximum y value of the AABB.
Returns
The new AABB as tuple (x1, y1, x2, y2)
"""
x1, y1, x2, y2 = aabb
# Transform all 4 corners of the AABB.
points = transform.dot([
[x1, x2, x1, x2],
[y1, y2, y2, y1],
[1, 1, 1, 1],
])
# Extract the min and max corners again.
# (3, ) (min_x, min_y, 1)
min_corner = points.min(axis=1)
# (3, ) (max_x, max_y, 1)
max_corner = points.max(axis=1)
return [min_corner[0], min_corner[1], max_corner[0], max_corner[1]]
def _random_vector(min, max, prng=DEFAULT_PRNG):
"""
Construct a random vector between min and max.
Args
min: the minimum value for each component, (n, )
max: the maximum value for each component, (n, )
"""
min = np.array(min)
max = np.array(max)
assert min.shape == max.shape
assert len(min.shape) == 1
return prng.uniform(min, max)
def rotation(angle):
"""
Construct a homogeneous 2D rotation matrix.
Args
angle: the angle in radians
Returns
the rotation matrix as 3 by 3 numpy array
"""
return np.array([
[np.cos(angle), -np.sin(angle), 0],
[np.sin(angle), np.cos(angle), 0],
[0, 0, 1]
])
def random_rotation(min, max, prng=DEFAULT_PRNG):
"""
Construct a random rotation between -max and max.
Args
min: a scalar for the minimum absolute angle in radians
max: a scalar for the maximum absolute angle in radians
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 rotation matrix
"""
return rotation(prng.uniform(min, max))
def translation(translation):
"""
Construct a homogeneous 2D translation matrix.
Args:
translation: the translation 2D vector
Returns:
the translation matrix as 3 by 3 numpy array
"""
return np.array([
[1, 0, translation[0]],
[0, 1, translation[1]],
[0, 0, 1]
])
def random_translation(min, max, prng=DEFAULT_PRNG):
"""
Construct a random 2D translation between min and max.
Args
min: a 2D vector with the minimum translation for each dimension
max: a 2D vector with the maximum translation for each dimension
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 translation matrix
"""
return translation(_random_vector(min, max, prng))
def shear(angle):
"""
Construct a homogeneous 2D shear matrix.
Args
angle: the shear angle in radians
Returns
the shear matrix as 3 by 3 numpy array
"""
return np.array([
[1, -np.sin(angle), 0],
[0, np.cos(angle), 0],
[0, 0, 1]
])
def random_shear(min, max, prng=DEFAULT_PRNG):
"""
Construct a random 2D shear matrix with shear angle between -max and max.
Args
min: the minimum shear angle in radians.
max: the maximum shear angle in radians.
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 shear matrix
"""
return shear(prng.uniform(min, max))
def scaling(factor):
"""
Construct a homogeneous 2D scaling matrix.
Args
factor: a 2D vector for X and Y scaling
Returns
the zoom matrix as 3 by 3 numpy array
"""
return np.array([
[factor[0], 0, 0],
[0, factor[1], 0],
[0, 0, 1]
])
def random_scaling(min, max, prng=DEFAULT_PRNG):
"""
Construct a random 2D scale matrix between -max and max.
Args
min: a 2D vector containing the minimum scaling factor for X and Y.
min: a 2D vector containing The maximum scaling factor for X and Y.
prng: the pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 scaling matrix
"""
return scaling(_random_vector(min, max, prng))
def random_flip(flip_x_chance, flip_y_chance, prng=DEFAULT_PRNG):
"""
Construct a transformation randomly containing X/Y flips (or not).
Args
flip_x_chance: The chance that the result will contain a flip along the X axis.
flip_y_chance: The chance that the result will contain a flip along the Y axis.
prng: The pseudo-random number generator to use.
Returns
a homogeneous 3 by 3 transformation matrix
"""
flip_x = prng.uniform(0, 1) < flip_x_chance
flip_y = prng.uniform(0, 1) < flip_y_chance
# 1 - 2 * bool gives 1 for False and -1 for True.
return scaling((1 - 2 * flip_x, 1 - 2 * flip_y))
def change_transform_origin(transform, center):
"""
Create a new transform representing the same transformation, only with the origin of the linear part changed.
Args
transform: the transformation matrix
center: the new origin of the transformation
Returns
translate(center) * transform * translate(-center)
"""
center = np.array(center)
return np.linalg.multi_dot([translation(center), transform, translation(-center)])
def random_transform(
min_rotation=0,
max_rotation=0,
min_translation=(0, 0),
max_translation=(0, 0),
min_shear=0,
max_shear=0,
min_scaling=(1, 1),
max_scaling=(1, 1),
flip_x_chance=0,
flip_y_chance=0,
prng=DEFAULT_PRNG
):
"""
Create a random transformation.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.
flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.
prng: The pseudo-random number generator to use.
"""
return np.linalg.multi_dot([
random_rotation(min_rotation, max_rotation, prng),
random_translation(min_translation, max_translation, prng),
random_shear(min_shear, max_shear, prng),
random_scaling(min_scaling, max_scaling, prng),
random_flip(flip_x_chance, flip_y_chance, prng)
])
def random_transform_generator(prng=None, **kwargs):
"""
Create a random transform generator.
Uses a dedicated, newly created, properly seeded PRNG by default instead of the global DEFAULT_PRNG.
The transformation consists of the following operations in this order (from left to right):
* rotation
* translation
* shear
* scaling
* flip x (if applied)
* flip y (if applied)
Note that by default, the data generators in `keras_retinanet.preprocessing.generators` interpret the translation
as factor of the image size. So an X translation of 0.1 would translate the image by 10% of it's width.
Set `relative_translation` to `False` in the `TransformParameters` of a data generator to have it interpret
the translation directly as pixel distances instead.
Args
min_rotation: The minimum rotation in radians for the transform as scalar.
max_rotation: The maximum rotation in radians for the transform as scalar.
min_translation: The minimum translation for the transform as 2D column vector.
max_translation: The maximum translation for the transform as 2D column vector.
min_shear: The minimum shear angle for the transform in radians.
max_shear: The maximum shear angle for the transform in radians.
min_scaling: The minimum scaling for the transform as 2D column vector.
max_scaling: The maximum scaling for the transform as 2D column vector.
flip_x_chance: The chance (0 to 1) that a transform will contain a flip along X direction.
flip_y_chance: The chance (0 to 1) that a transform will contain a flip along Y direction.
prng: The pseudo-random number generator to use.
"""
if prng is None:
# RandomState automatically seeds using the best available method.
prng = np.random.RandomState()
while True:
yield random_transform(prng=prng, **kwargs)
| 10,565 | 32.01875 | 117 | py |
EfficientDet | EfficientDet-master/utils/__init__.py | # Copyright 2019 The TensorFlow Authors, Pavel Yakubovskiy. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import functools
import cv2
import numpy as np
_KERAS_BACKEND = None
_KERAS_LAYERS = None
_KERAS_MODELS = None
_KERAS_UTILS = None
def get_submodules_from_kwargs(kwargs):
backend = kwargs.get('backend', _KERAS_BACKEND)
layers = kwargs.get('layers', _KERAS_LAYERS)
models = kwargs.get('models', _KERAS_MODELS)
utils = kwargs.get('utils', _KERAS_UTILS)
for key in kwargs.keys():
if key not in ['backend', 'layers', 'models', 'utils']:
raise TypeError('Invalid keyword argument: %s', key)
return backend, layers, models, utils
def inject_keras_modules(func):
import keras
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['backend'] = keras.backend
kwargs['layers'] = keras.layers
kwargs['models'] = keras.models
kwargs['utils'] = keras.utils
return func(*args, **kwargs)
return wrapper
def inject_tfkeras_modules(func):
import tensorflow.keras as tfkeras
@functools.wraps(func)
def wrapper(*args, **kwargs):
kwargs['backend'] = tfkeras.backend
kwargs['layers'] = tfkeras.layers
kwargs['models'] = tfkeras.models
kwargs['utils'] = tfkeras.utils
return func(*args, **kwargs)
return wrapper
def init_keras_custom_objects():
import keras
import efficientnet as model
custom_objects = {
'swish': inject_keras_modules(model.get_swish)(),
'FixedDropout': inject_keras_modules(model.get_dropout)()
}
keras.utils.generic_utils.get_custom_objects().update(custom_objects)
def init_tfkeras_custom_objects():
import tensorflow.keras as tfkeras
import efficientnet as model
custom_objects = {
'swish': inject_tfkeras_modules(model.get_swish)(),
'FixedDropout': inject_tfkeras_modules(model.get_dropout)()
}
tfkeras.utils.get_custom_objects().update(custom_objects)
def preprocess_image(image, image_size):
# image, RGB
image_height, image_width = image.shape[:2]
if image_height > image_width:
scale = image_size / image_height
resized_height = image_size
resized_width = int(image_width * scale)
else:
scale = image_size / image_width
resized_height = int(image_height * scale)
resized_width = image_size
image = cv2.resize(image, (resized_width, resized_height))
image = image.astype(np.float32)
image /= 255.
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
image -= mean
image /= std
pad_h = image_size - resized_height
pad_w = image_size - resized_width
image = np.pad(image, [(0, pad_h), (0, pad_w), (0, 0)], mode='constant')
return image, scale
def rotate_image(image):
rotate_degree = np.random.uniform(low=-45, high=45)
h, w = image.shape[:2]
# Compute the rotation matrix.
M = cv2.getRotationMatrix2D(center=(w / 2, h / 2),
angle=rotate_degree,
scale=1)
# Get the sine and cosine from the rotation matrix.
abs_cos_angle = np.abs(M[0, 0])
abs_sin_angle = np.abs(M[0, 1])
# Compute the new bounding dimensions of the image.
new_w = int(h * abs_sin_angle + w * abs_cos_angle)
new_h = int(h * abs_cos_angle + w * abs_sin_angle)
# Adjust the rotation matrix to take into account the translation.
M[0, 2] += new_w // 2 - w // 2
M[1, 2] += new_h // 2 - h // 2
# Rotate the image.
image = cv2.warpAffine(image, M=M, dsize=(new_w, new_h), flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_CONSTANT,
borderValue=(128, 128, 128))
return image
def reorder_vertexes(vertexes):
"""
reorder vertexes as the paper shows, (top, right, bottom, left)
Args:
vertexes: np.array (4, 2), should be in clockwise
Returns:
"""
assert vertexes.shape == (4, 2)
xmin, ymin = np.min(vertexes, axis=0)
xmax, ymax = np.max(vertexes, axis=0)
# determine the first point with the smallest y,
# if two vertexes has same y, choose that with smaller x,
ordered_idxes = np.argsort(vertexes, axis=0)
ymin1_idx = ordered_idxes[0, 1]
ymin2_idx = ordered_idxes[1, 1]
if vertexes[ymin1_idx, 1] == vertexes[ymin2_idx, 1]:
if vertexes[ymin1_idx, 0] <= vertexes[ymin2_idx, 0]:
first_vertex_idx = ymin1_idx
else:
first_vertex_idx = ymin2_idx
else:
first_vertex_idx = ymin1_idx
ordered_idxes = [(first_vertex_idx + i) % 4 for i in range(4)]
ordered_vertexes = vertexes[ordered_idxes]
# drag the point to the corresponding edge
ordered_vertexes[0, 1] = ymin
ordered_vertexes[1, 0] = xmax
ordered_vertexes[2, 1] = ymax
ordered_vertexes[3, 0] = xmin
return ordered_vertexes
def postprocess_boxes(boxes, scale, height, width):
boxes /= scale
boxes[:, 0] = np.clip(boxes[:, 0], 0, width - 1)
boxes[:, 1] = np.clip(boxes[:, 1], 0, height - 1)
boxes[:, 2] = np.clip(boxes[:, 2], 0, width - 1)
boxes[:, 3] = np.clip(boxes[:, 3], 0, height - 1)
return boxes
| 5,843 | 30.934426 | 83 | py |
EfficientDet | EfficientDet-master/utils/colors.py | import warnings
def label_color(label):
""" Return a color from a set of predefined colors. Contains 80 colors in total.
Args
label: The label to get the color for.
Returns
A list of three values representing a RGB color.
If no color is defined for a certain label, the color green is returned and a warning is printed.
"""
if label < len(colors):
return colors[label]
else:
warnings.warn('Label {} has no color, returning default.'.format(label))
return (0, 255, 0)
"""
Generated using:
```
colors = [list((matplotlib.colors.hsv_to_rgb([x, 1.0, 1.0]) * 255).astype(int)) for x in np.arange(0, 1, 1.0 / 80)]
shuffle(colors)
pprint(colors)
```
"""
colors = [
[31 , 0 , 255] ,
[0 , 159 , 255] ,
[255 , 95 , 0] ,
[255 , 19 , 0] ,
[255 , 0 , 0] ,
[255 , 38 , 0] ,
[0 , 255 , 25] ,
[255 , 0 , 133] ,
[255 , 172 , 0] ,
[108 , 0 , 255] ,
[0 , 82 , 255] ,
[0 , 255 , 6] ,
[255 , 0 , 152] ,
[223 , 0 , 255] ,
[12 , 0 , 255] ,
[0 , 255 , 178] ,
[108 , 255 , 0] ,
[184 , 0 , 255] ,
[255 , 0 , 76] ,
[146 , 255 , 0] ,
[51 , 0 , 255] ,
[0 , 197 , 255] ,
[255 , 248 , 0] ,
[255 , 0 , 19] ,
[255 , 0 , 38] ,
[89 , 255 , 0] ,
[127 , 255 , 0] ,
[255 , 153 , 0] ,
[0 , 255 , 255] ,
[0 , 255 , 216] ,
[0 , 255 , 121] ,
[255 , 0 , 248] ,
[70 , 0 , 255] ,
[0 , 255 , 159] ,
[0 , 216 , 255] ,
[0 , 6 , 255] ,
[0 , 63 , 255] ,
[31 , 255 , 0] ,
[255 , 57 , 0] ,
[255 , 0 , 210] ,
[0 , 255 , 102] ,
[242 , 255 , 0] ,
[255 , 191 , 0] ,
[0 , 255 , 63] ,
[255 , 0 , 95] ,
[146 , 0 , 255] ,
[184 , 255 , 0] ,
[255 , 114 , 0] ,
[0 , 255 , 235] ,
[255 , 229 , 0] ,
[0 , 178 , 255] ,
[255 , 0 , 114] ,
[255 , 0 , 57] ,
[0 , 140 , 255] ,
[0 , 121 , 255] ,
[12 , 255 , 0] ,
[255 , 210 , 0] ,
[0 , 255 , 44] ,
[165 , 255 , 0] ,
[0 , 25 , 255] ,
[0 , 255 , 140] ,
[0 , 101 , 255] ,
[0 , 255 , 82] ,
[223 , 255 , 0] ,
[242 , 0 , 255] ,
[89 , 0 , 255] ,
[165 , 0 , 255] ,
[70 , 255 , 0] ,
[255 , 0 , 172] ,
[255 , 76 , 0] ,
[203 , 255 , 0] ,
[204 , 0 , 255] ,
[255 , 0 , 229] ,
[255 , 133 , 0] ,
[127 , 0 , 255] ,
[0 , 235 , 255] ,
[0 , 255 , 197] ,
[255 , 0 , 191] ,
[0 , 44 , 255] ,
[50 , 255 , 0]
]
| 2,656 | 22.513274 | 115 | py |
MNC | MNC-master/tools/test_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import argparse
import sys
import os
import time
import pprint
# User-defined module
import _init_paths
import caffe
from mnc_config import cfg, cfg_from_file
from db.imdb import get_imdb
from caffeWrapper.TesterWrapper import TesterWrapper
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU id to use',
default=0, type=int)
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--comp', dest='comp_mode', help='competition mode',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
parser.add_argument('--task', dest='task_name',
help='set task name', default='sds',
type=str)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
imdb = get_imdb(args.imdb_name)
_tester = TesterWrapper(args.prototxt, imdb, args.caffemodel, args.task_name)
_tester.get_result()
| 2,755 | 31.423529 | 81 | py |
MNC | MNC-master/tools/_init_paths.py |
import os.path
import sys
"""
Add lib paths and caffe path to system search path
"""
def add_path(path):
if path not in sys.path:
sys.path.insert(0, path)
cur_dir = os.path.dirname(__file__)
# Add caffe python to PYTHONPATH
caffe_path = os.path.join(cur_dir, '..', 'caffe-mnc', 'python')
add_path(caffe_path)
# Add lib to PYTHONPATH
lib_path = os.path.join(cur_dir, '..', 'lib')
add_path(lib_path)
| 417 | 17.173913 | 63 | py |
MNC | MNC-master/tools/demo.py | #!/usr/bin/python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import os
import argparse
import time
import cv2
import numpy as np
# User-defined module
import _init_paths
import caffe
from mnc_config import cfg
from transform.bbox_transform import clip_boxes
from utils.blob import prep_im_for_blob, im_list_to_blob
from transform.mask_transform import gpu_mask_voting
import matplotlib.pyplot as plt
from utils.vis_seg import _convert_pred_to_image, _get_voc_color_map
from PIL import Image
# VOC 20 classes
CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='MNC demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default='./models/VGG16/mnc_5stage/test.prototxt', type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default='./data/mnc_model/mnc_model.caffemodel.h5', type=str)
args = parser.parse_args()
return args
def prepare_mnc_args(im, net):
# Prepare image data blob
blobs = {'data': None}
processed_ims = []
im, im_scale_factors = \
prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TEST.SCALES[0], cfg.TRAIN.MAX_SIZE)
processed_ims.append(im)
blobs['data'] = im_list_to_blob(processed_ims)
# Prepare image info blob
im_scales = [np.array(im_scale_factors)]
assert len(im_scales) == 1, 'Only single-image batch implemented'
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# Reshape network inputs and do forward
net.blobs['data'].reshape(*blobs['data'].shape)
net.blobs['im_info'].reshape(*blobs['im_info'].shape)
forward_kwargs = {
'data': blobs['data'].astype(np.float32, copy=False),
'im_info': blobs['im_info'].astype(np.float32, copy=False)
}
return forward_kwargs, im_scales
def im_detect(im, net):
forward_kwargs, im_scales = prepare_mnc_args(im, net)
blobs_out = net.forward(**forward_kwargs)
# output we need to collect:
# 1. output from phase1'
rois_phase1 = net.blobs['rois'].data.copy()
masks_phase1 = net.blobs['mask_proposal'].data[...]
scores_phase1 = net.blobs['seg_cls_prob'].data[...]
# 2. output from phase2
rois_phase2 = net.blobs['rois_ext'].data[...]
masks_phase2 = net.blobs['mask_proposal_ext'].data[...]
scores_phase2 = net.blobs['seg_cls_prob_ext'].data[...]
# Boxes are in resized space, we un-scale them back
rois_phase1 = rois_phase1[:, 1:5] / im_scales[0]
rois_phase2 = rois_phase2[:, 1:5] / im_scales[0]
rois_phase1, _ = clip_boxes(rois_phase1, im.shape)
rois_phase2, _ = clip_boxes(rois_phase2, im.shape)
# concatenate two stages to get final network output
masks = np.concatenate((masks_phase1, masks_phase2), axis=0)
boxes = np.concatenate((rois_phase1, rois_phase2), axis=0)
scores = np.concatenate((scores_phase1, scores_phase2), axis=0)
return boxes, masks, scores
def get_vis_dict(result_box, result_mask, img_name, cls_names, vis_thresh=0.5):
box_for_img = []
mask_for_img = []
cls_for_img = []
for cls_ind, cls_name in enumerate(cls_names):
det_for_img = result_box[cls_ind]
seg_for_img = result_mask[cls_ind]
keep_inds = np.where(det_for_img[:, -1] >= vis_thresh)[0]
for keep in keep_inds:
box_for_img.append(det_for_img[keep])
mask_for_img.append(seg_for_img[keep][0])
cls_for_img.append(cls_ind + 1)
res_dict = {'image_name': img_name,
'cls_name': cls_for_img,
'boxes': box_for_img,
'masks': mask_for_img}
return res_dict
if __name__ == '__main__':
args = parse_args()
test_prototxt = args.prototxt
test_model = args.caffemodel
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(test_prototxt, test_model, caffe.TEST)
# Warm up for the first two images
im = 128 * np.ones((300, 500, 3), dtype=np.float32)
for i in xrange(2):
_, _, _ = im_detect(im, net)
im_names = ['2008_000533.jpg', '2008_000910.jpg', '2008_001602.jpg',
'2008_001717.jpg', '2008_008093.jpg']
demo_dir = './data/demo'
for im_name in im_names:
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/{}'.format(im_name)
gt_image = os.path.join(demo_dir, im_name)
im = cv2.imread(gt_image)
start = time.time()
boxes, masks, seg_scores = im_detect(im, net)
end = time.time()
print 'forward time %f' % (end-start)
result_mask, result_box = gpu_mask_voting(masks, boxes, seg_scores, len(CLASSES) + 1,
100, im.shape[1], im.shape[0])
pred_dict = get_vis_dict(result_box, result_mask, 'data/demo/' + im_name, CLASSES)
img_width = im.shape[1]
img_height = im.shape[0]
inst_img, cls_img = _convert_pred_to_image(img_width, img_height, pred_dict)
color_map = _get_voc_color_map()
target_cls_file = os.path.join(demo_dir, 'cls_' + im_name)
cls_out_img = np.zeros((img_height, img_width, 3))
for i in xrange(img_height):
for j in xrange(img_width):
cls_out_img[i][j] = color_map[cls_img[i][j]][::-1]
cv2.imwrite(target_cls_file, cls_out_img)
background = Image.open(gt_image)
mask = Image.open(target_cls_file)
background = background.convert('RGBA')
mask = mask.convert('RGBA')
superimpose_image = Image.blend(background, mask, 0.8)
superimpose_name = os.path.join(demo_dir, 'final_' + im_name)
superimpose_image.save(superimpose_name, 'JPEG')
im = cv2.imread(superimpose_name)
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
classes = pred_dict['cls_name']
for i in xrange(len(classes)):
score = pred_dict['boxes'][i][-1]
bbox = pred_dict['boxes'][i][:4]
cls_ind = classes[i] - 1
ax.text(bbox[0], bbox[1] - 8,
'{:s} {:.4f}'.format(CLASSES[cls_ind], score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
plt.axis('off')
plt.tight_layout()
plt.draw()
fig.savefig(os.path.join(demo_dir, im_name[:-4]+'.png'))
os.remove(superimpose_name)
os.remove(target_cls_file)
| 7,538 | 38.265625 | 93 | py |
MNC | MNC-master/tools/prepare_mcg_maskdb.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# System modules
import argparse
import os
import cPickle
import numpy as np
import scipy.io as sio
import cv2
from multiprocessing import Process
import time
import PIL
# User-defined module
import _init_paths
from mnc_config import cfg
from utils.cython_bbox import bbox_overlaps
from transform.mask_transform import mask_overlap, intersect_mask
from datasets.pascal_voc_seg import PascalVOCSeg
def parse_args():
""" Parse input arguments
"""
parser = argparse.ArgumentParser(description='Prepare MCG roidb')
parser.add_argument('--input', dest='input_dir',
help='folder contain input mcg proposals',
default='data/MCG-raw/', type=str)
parser.add_argument('--output', dest='output_dir',
help='folder contain output roidb', required=True,
type=str)
parser.add_argument('--gt_roi', dest='roidb', help='roidb',
default='data/cache/voc_2012_train_gt_roidb.pkl', type=str)
parser.add_argument('--gt_mask', dest='maskdb', help='maskdb',
default='data/cache/voc_2012_train_gt_maskdb.pkl', type=str)
parser.add_argument('-mask_sz', dest='mask_size',
help='compressed mask resolution',
default=21, type=int)
parser.add_argument('--top_k', dest='top_k',
help='number of generated proposal',
default=-1, type=int)
parser.add_argument('--db', dest='db_name',
help='train or validation',
default='train', type=str)
parser.add_argument('--para_job', dest='para_job',
help='launch several process',
default='1', type=int)
return parser.parse_args()
def process_roidb(file_start, file_end, db):
for cnt in xrange(file_start, file_end):
f = file_list[cnt]
full_file = os.path.join(input_dir, f)
output_cache = os.path.join(output_dir, f.split('.')[0] + '.mat')
timer_tic = time.time()
if os.path.exists(output_cache):
continue
mcg_mat = sio.loadmat(full_file)
mcg_mask_label = mcg_mat['labels']
mcg_superpixels = mcg_mat['superpixels']
num_proposal = len(mcg_mask_label)
mcg_boxes = np.zeros((num_proposal, 4))
mcg_masks = np.zeros((num_proposal, mask_size, mask_size), dtype=np.bool)
for ind_proposal in xrange(num_proposal):
label = mcg_mask_label[ind_proposal][0][0]
proposal = np.in1d(mcg_superpixels, label).reshape(mcg_superpixels.shape)
[r, c] = np.where(proposal == 1)
y1 = np.min(r)
x1 = np.min(c)
y2 = np.max(r)
x2 = np.max(c)
box = np.array([x1, y1, x2, y2])
proposal = proposal[y1:y2+1, x1:x2+1]
proposal = cv2.resize(proposal.astype(np.float), (mask_size, mask_size), interpolation=cv2.INTER_NEAREST)
mcg_masks[ind_proposal, :, :] = proposal
mcg_boxes[ind_proposal, :] = box
if top_k != -1:
mcg_boxes = mcg_boxes[:top_k, :]
mcg_masks = mcg_masks[:top_k, :]
if db == 'val':
# if we prepare validation data, we only need its masks and boxes
roidb = {
'masks': (mcg_masks >= cfg.BINARIZE_THRESH).astype(bool),
'boxes': mcg_boxes
}
sio.savemat(output_cache, roidb)
use_time = time.time() - timer_tic
print '%d/%d use time %f' % (cnt, len(file_list), use_time)
else:
# Otherwise we need to prepare other information like overlaps
num_mcg = mcg_boxes.shape[0]
gt_roidb = gt_roidbs[cnt]
gt_maskdb = gt_maskdbs[cnt]
gt_boxes = gt_roidb['boxes']
gt_masks = gt_maskdb['gt_masks']
gt_classes = gt_roidb['gt_classes']
num_gt = gt_boxes.shape[0]
num_all = num_gt + num_mcg
# define output structure
det_overlaps = np.zeros((num_all, 1))
seg_overlaps = np.zeros((num_all, 1))
seg_assignment = np.zeros((num_all, 1))
mask_targets = np.zeros((num_all, mask_size, mask_size))
# ------------------------------------------------------
all_boxes = np.vstack((gt_boxes[:, :4], mcg_boxes)).astype(int)
all_masks = np.zeros((num_all, mask_size, mask_size))
for i in xrange(num_gt):
all_masks[i, :, :] = (cv2.resize(gt_masks[i].astype(np.float),
(mask_size, mask_size)))
assert all_masks[num_gt:, :, :].shape == mcg_masks.shape
all_masks[num_gt:, :, :] = mcg_masks
# record bounding box overlaps
cur_overlap = bbox_overlaps(all_boxes.astype(np.float), gt_boxes.astype(np.float))
seg_assignment = cur_overlap.argmax(axis=1)
det_overlaps = cur_overlap.max(axis=1)
seg_assignment[det_overlaps == 0] = -1
# record mask region overlaps
seg_overlaps[:num_gt] = 1.0
for i in xrange(num_gt, num_all):
cur_mask = cv2.resize(all_masks[i, :, :].astype(np.float),
(all_boxes[i, 2] - all_boxes[i, 0] + 1,
all_boxes[i, 3] - all_boxes[i, 1] + 1)) >= cfg.BINARIZE_THRESH
for mask_ind in xrange(len(gt_masks)):
gt_mask = gt_masks[mask_ind]
gt_roi = gt_roidb['boxes'][mask_ind]
cur_ov = mask_overlap(all_boxes[i, :], gt_roi, cur_mask, gt_mask)
seg_overlaps[i] = max(seg_overlaps[i], cur_ov)
output_label = np.zeros((num_all, 1))
for i in xrange(num_all):
if seg_assignment[i] == -1:
continue
cur_ind = seg_assignment[i]
output_label[i] = gt_classes[seg_assignment[i]]
mask_targets[i, :, :] = intersect_mask(all_boxes[i, :], gt_roidb['boxes'][cur_ind], gt_masks[cur_ind])
# Some of the array need to insert a new axis to be consistent of savemat method
roidb = {
'masks': (all_masks >= cfg.BINARIZE_THRESH).astype(bool),
'boxes': all_boxes,
'det_overlap': det_overlaps[:, np.newaxis],
'seg_overlap': seg_overlaps,
'mask_targets': (mask_targets >= cfg.BINARIZE_THRESH).astype(bool),
'gt_classes': gt_classes[:, np.newaxis],
'output_label': output_label,
'gt_assignment': seg_assignment[:, np.newaxis],
'Flip': False
}
sio.savemat(output_cache, roidb)
use_time = time.time() - timer_tic
print '%d/%d use time %f' % (cnt, len(file_list), use_time)
def process_flip_masks(image_names, im_start, im_end):
widths = [PIL.Image.open('data/VOCdevkitSDS/img/' + im_name + '.jpg').size[0] for im_name in image_names]
cache_dir = output_dir
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
for index in xrange(im_start, im_end):
output_cache = os.path.join(cache_dir, image_names[index] + '_flip.mat')
if os.path.exists(output_cache):
continue
image_cache = os.path.join(cache_dir, image_names[index] + '.mat')
orig_maskdb = sio.loadmat(image_cache)
# Flip mask and mask regression targets
masks = orig_maskdb['masks']
mask_targets = orig_maskdb['mask_targets']
mask_flip = masks[:, :, ::-1]
mask_target_flip = mask_targets[:, :, ::-1]
# Flip boxes
boxes = orig_maskdb['boxes']
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[index] - oldx2 - 1
boxes[:, 2] = widths[index] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
# Other maskdb values are identical with original maskdb
flip_maskdb = {
'masks': (mask_flip >= cfg.BINARIZE_THRESH).astype(bool),
'boxes': boxes,
'det_overlap': orig_maskdb['det_overlap'],
'seg_overlap': orig_maskdb['seg_overlap'],
'mask_targets': (mask_target_flip >= cfg.BINARIZE_THRESH).astype(bool),
'gt_classes': orig_maskdb['gt_classes'],
'gt_assignment': orig_maskdb['gt_assignment'],
'Flip': True,
'output_label': orig_maskdb['output_label']
}
sio.savemat(output_cache, flip_maskdb)
if __name__ == '__main__':
args = parse_args()
input_dir = args.input_dir
assert os.path.exists(input_dir), 'Path does not exist: {}'.format(input_dir)
output_dir = args.output_dir
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
mask_size = args.mask_size
list_name = 'data/VOCdevkitSDS/train.txt' if args.db_name == 'train' else 'data/VOCdevkitSDS/val.txt'
with open(list_name) as f:
file_list = f.read().splitlines()
# If we want to prepare training maskdb, first try to load gts
if args.db_name == 'train':
if os.path.exists(args.roidb) and os.path.exists(args.maskdb):
with open(args.roidb, 'rb') as f:
gt_roidbs = cPickle.load(f)
with open(args.maskdb, 'rb') as f:
gt_maskdbs = cPickle.load(f)
else:
db = PascalVOCSeg('train', '2012', 'data/VOCdevkitSDS/')
gt_roidbs = db.gt_roidb()
gt_maskdbs = db.gt_maskdb()
top_k = args.top_k
num_process = args.para_job
# Prepare train/val maskdb use multi-process
processes = []
file_start = 0
file_offset = int(np.ceil(len(file_list) / float(num_process)))
for process_id in xrange(num_process):
file_end = min(file_start + file_offset, len(file_list))
p = Process(target=process_roidb, args=(file_start, file_end, args.db_name))
p.start()
processes.append(p)
file_start += file_offset
for p in processes:
p.join()
# If db_name == 'train', we still need to add flipped maskdb into output folder
# Add flipped mask and mask regression targets after prepare the original mcg proposals
if args.db_name == 'train':
print 'Appending flipped MCG to ROI'
processes = []
file_start = 0
file_offset = int(np.ceil(len(file_list) / float(num_process)))
for process_id in xrange(num_process):
file_end = min(file_start + file_offset, len(file_list))
p = Process(target=process_flip_masks, args=(file_list, file_start, file_end))
p.start()
processes.append(p)
file_start += file_offset
for p in processes:
p.join()
| 11,222 | 42 | 118 | py |
MNC | MNC-master/tools/train_net.py | #!/usr/bin/env python
# --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# Standard module
import argparse
import sys
import pprint
import numpy as np
# User-defined module
import _init_paths
from mnc_config import cfg, cfg_from_file, get_output_dir # config mnc
from db.roidb import attach_roidb
from db.maskdb import attach_maskdb
from caffeWrapper.SolverWrapper import SolverWrapper
import caffe
def parse_args():
""" Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id',
help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--solver', dest='solver',
help='solver prototxt',
default=None, type=str)
parser.add_argument('--iters', dest='max_iters',
help='number of iterations to train',
default=40000, type=int)
parser.add_argument('--weights', dest='pretrained_model',
help='initialize with pretrained model weights',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default=None, type=str)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--rand', dest='randomize',
help='randomize (do not use a fixed seed)',
action='store_true')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
cfg.GPU_ID = args.gpu_id
print('Using config:')
pprint.pprint(cfg)
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
if not args.randomize:
# fix the random seeds (numpy and caffe) for reproducibility
np.random.seed(cfg.RNG_SEED)
caffe.set_random_seed(cfg.RNG_SEED)
# get imdb and roidb from specified imdb_name
imdb, roidb = attach_roidb(args.imdb_name)
# Faster RCNN doesn't need
if cfg.MNC_MODE or cfg.CFM_MODE:
imdb, maskdb = attach_maskdb(args.imdb_name)
else:
maskdb = None
print '{:d} roidb entries'.format(len(roidb))
output_dir = get_output_dir(imdb, None)
print 'Output will be saved to `{:s}`'.format(output_dir)
_solver = SolverWrapper(args.solver, roidb, maskdb, output_dir, imdb,
pretrained_model=args.pretrained_model)
print 'Solving...'
_solver.train_model(args.max_iters)
print 'done solving'
| 3,331 | 32.656566 | 78 | py |
MNC | MNC-master/lib/mnc_config.py |
"""MNC config system
"""
import numpy as np
import os.path
from easydict import EasyDict as edict
__C = edict()
cfg = __C
# MNC/CFM mode
__C.MNC_MODE = True
__C.CFM_MODE = False
__C.EXP_DIR = 'default'
__C.USE_GPU_NMS = True
__C.GPU_ID = 0
__C.RNG_SEED = 3
__C.EPS = 1e-14
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# Root directory of project
__C.ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
# Data directory
__C.DATA_DIR = os.path.abspath(os.path.join(__C.ROOT_DIR, 'data'))
# Related to mask resizing and binarize predicted masks
__C.BINARIZE_THRESH = 0.4
# Mask estimation (if any) size (may be different from CFM input size)
__C.MASK_SIZE = 21
# Training options
__C.TRAIN = edict()
# ------- General setting ----
__C.TRAIN.IMS_PER_BATCH = 1
# Batch size for training Region CNN (not RPN)
__C.TRAIN.BATCH_SIZE = 64
# Make minibatches from images that have similar aspect ratios (i.e. both
# tall and thin or both short and wide) in order to avoid wasting computation
# on zero-padding.
__C.TRAIN.ASPECT_GROUPING = True
# Use flipped image for augmentation
__C.TRAIN.USE_FLIPPED = True
# Resize shortest side to 600
__C.TRAIN.SCALES = (600,)
__C.TRAIN.MAX_SIZE = 1000
__C.TRAIN.SNAPSHOT_ITERS = 5000
__C.TRAIN.SNAPSHOT_INFIX = ''
# Sample FG
__C.TRAIN.FG_FRACTION = [0.3]
__C.TRAIN.FG_THRESH_HI = [1.0]
__C.TRAIN.FG_THRESH_LO = [0.5]
# Sample BF according to remaining samples
__C.TRAIN.BG_FRACTION = [0.85, 0.15]
__C.TRAIN.BG_THRESH_HI = [0.5, 0.1]
__C.TRAIN.BG_THRESH_LO = [0.1, 0.0]
# ------- Proposal -------
__C.TRAIN.PROPOSAL_METHOD = 'gt'
# ------- BBOX Regression ---------
# Train bounding-box regressors
__C.TRAIN.BBOX_REG = True
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = False
__C.TRAIN.BBOX_THRESH = 0.5
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
# weight of smooth L1 loss
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# -------- RPN ----------
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IO < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
# Note this is class-agnostic anchors' FG_FRACTION
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TRAIN.RPN_MIN_SIZE = 16
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Mix anchors used for RPN and later layer
__C.TRAIN.MIX_INDEX = True
# -------- CFM ----------
__C.TRAIN.CFM_INPUT_MASK_SIZE = 14
__C.TRAIN.FG_DET_THRESH = 0.5
__C.TRAIN.FG_SEG_THRESH = 0.5
__C.TRAIN.FRACTION_SAMPLE = [0.3, 0.5, 0.2]
__C.TRAIN.THRESH_LO_SAMPLE = [0.5, 0.1, 0.0]
__C.TRAIN.THRESH_HI_SAMPLE = [1.0, 0.5, 0.1]
# Test option
__C.TEST = edict()
# Scales to use during testing (can list multiple scales)
# Each scale is the pixel size of an image's shortest side
__C.TEST.SCALES = (600,)
# Max pixel size of the longest side of a scaled input image
__C.TEST.MAX_SIZE = 1000
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Set this true in the yml file to specify proposed RPN
__C.TEST.HAS_RPN = True
# NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Proposal height and width both need to be greater than RPN_MIN_SIZE (at orig image scale)
__C.TEST.RPN_MIN_SIZE = 16
__C.TEST.BBOX_REG = True
# Aggregate nearby masks inside box, the box_IOU threshold
__C.TEST.MASK_MERGE_IOU_THRESH = 0.5
__C.TEST.MASK_MERGE_NMS_THRESH = 0.3
__C.TEST.CFM_INPUT_MASK_SIZE = 14
# Used for multi-scale testing, since naive implementation
# will waste a lot of on zero-padding, so we group each
# $GROUP_SCALE scales to feed in gpu. And max rois for
# each group is specified in MAX_ROIS_GPU
__C.TEST.MAX_ROIS_GPU = [2000]
__C.TEST.GROUP_SCALE = 1
# 0 means use all the MCG proposals
__C.TEST.USE_TOP_K_MCG = 0
# threshold for binarize a mask
__C.TEST.USE_MASK_MERGE = True
__C.TEST.USE_GPU_MASK_MERGE = True
def get_output_dir(imdb, net):
""" Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = os.path.abspath(os.path.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return os.path.join(path, net.name)
def _merge_two_config(user_cfg, default_cfg):
""" Merge user's config into default config dictionary, clobbering the
options in b whenever they are also specified in a.
Need to ensure the type of two val under same key are the same
Do recursive merge when encounter hierarchical dictionary
"""
if type(user_cfg) is not edict:
return
for key, val in user_cfg.iteritems():
# Since user_cfg is a sub-file of default_cfg
if not default_cfg.has_key(key):
raise KeyError('{} is not a valid config key'.format(key))
if type(default_cfg[key]) is not type(val):
if isinstance(default_cfg[key], np.ndarray):
val = np.array(val, dtype=default_cfg[key].dtype)
else:
raise ValueError(
'Type mismatch ({} vs. {}) '
'for config key: {}'.format(type(default_cfg[key]),
type(val), key))
# Recursive merge config
if type(val) is edict:
try:
_merge_two_config(user_cfg[key], default_cfg[key])
except:
print 'Error under config key: {}'.format(key)
raise
else:
default_cfg[key] = val
def cfg_from_file(file_name):
""" Load a config file and merge it into the default options.
"""
import yaml
with open(file_name, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_two_config(yaml_cfg, __C)
| 6,958 | 32.618357 | 91 | py |
MNC | MNC-master/lib/setup.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
from os.path import join as pjoin
from setuptools import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
import numpy as np
def find_in_path(name, path):
"Find a file in a search path"
# Adapted fom
# http://code.activestate.com/recipes/52224-find-a-file-given-a-search-path/
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
def locate_cuda():
"""Locate the CUDA environment on the system
Returns a dict with keys 'home', 'nvcc', 'include', and 'lib64'
and values giving the absolute path to each directory.
Starts by looking for the CUDAHOME env variable. If not found, everything
is based on finding 'nvcc' in the PATH.
"""
# first check if the CUDAHOME env variable is in use
if 'CUDAHOME' in os.environ:
home = os.environ['CUDAHOME']
nvcc = pjoin(home, 'bin', 'nvcc')
else:
# otherwise, search the PATH for NVCC
default_path = pjoin(os.sep, 'usr', 'local', 'cuda', 'bin')
nvcc = find_in_path('nvcc', os.environ['PATH'] + os.pathsep + default_path)
if nvcc is None:
raise EnvironmentError('The nvcc binary could not be '
'located in your $PATH. Either add it to your path, or set $CUDAHOME')
home = os.path.dirname(os.path.dirname(nvcc))
cudaconfig = {'home':home, 'nvcc':nvcc,
'include': pjoin(home, 'include'),
'lib64': pjoin(home, 'lib64')}
for k, v in cudaconfig.iteritems():
if not os.path.exists(v):
raise EnvironmentError('The CUDA %s path could not be located in %s' % (k, v))
return cudaconfig
CUDA = locate_cuda()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = np.get_include()
except AttributeError:
numpy_include = np.get_numpy_include()
def customize_compiler_for_nvcc(self):
"""inject deep into distutils to customize how the dispatch
to gcc/nvcc works.
If you subclass UnixCCompiler, it's not trivial to get your subclass
injected in, and still have the right customizations (i.e.
distutils.sysconfig.customize_compiler) run on it. So instead of going
the OO route, I have this. Note, it's kindof like a wierd functional
subclassing going on."""
# tell the compiler it can processes .cu
self.src_extensions.append('.cu')
# save references to the default compiler_so and _comple methods
default_compiler_so = self.compiler_so
super = self._compile
# now redefine the _compile method. This gets executed for each
# object but distutils doesn't have the ability to change compilers
# based on source extension: we add it.
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if os.path.splitext(src)[1] == '.cu':
# use the cuda for .cu files
self.set_executable('compiler_so', CUDA['nvcc'])
# use only a subset of the extra_postargs, which are 1-1 translated
# from the extra_compile_args in the Extension class
postargs = extra_postargs['nvcc']
else:
postargs = extra_postargs['gcc']
super(obj, src, ext, cc_args, postargs, pp_opts)
# reset the default compiler_so, which we might have changed for cuda
self.compiler_so = default_compiler_so
# inject our redefined _compile method into the class
self._compile = _compile
# run the customize_compiler
class custom_build_ext(build_ext):
def build_extensions(self):
customize_compiler_for_nvcc(self.compiler)
build_ext.build_extensions(self)
ext_modules = [
Extension(
"utils.cython_bbox",
["utils/bbox.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension(
"nms.cpu_nms",
["nms/cpu_nms.pyx"],
extra_compile_args={'gcc': ["-Wno-cpp", "-Wno-unused-function"]},
include_dirs = [numpy_include]
),
Extension('nms.gpu_nms',
['nms/nms_kernel.cu', 'nms/gpu_nms.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
Extension('nms.mv',
['nms/mv_kernel.cu', 'nms/gpu_mv.pyx'],
library_dirs=[CUDA['lib64']],
libraries=['cudart'],
language='c++',
runtime_library_dirs=[CUDA['lib64']],
# this syntax is specific to this build system
# we're only going to use certain compiler args with nvcc and not with
# gcc the implementation of this trick is in customize_compiler() below
extra_compile_args={'gcc': ["-Wno-unused-function"],
'nvcc': ['-arch=sm_35',
'--ptxas-options=-v',
'-c',
'--compiler-options',
"'-fPIC'"]},
include_dirs = [numpy_include, CUDA['include']]
),
]
setup(
name='MNC',
ext_modules=ext_modules,
# inject our custom trigger
cmdclass={'build_ext': custom_build_ext},
) | 6,247 | 36.413174 | 90 | py |
MNC | MNC-master/lib/db/roidb.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import PIL
import numpy as np
import os
import cPickle
import scipy
from db.imdb import get_imdb
from mnc_config import cfg
from transform.bbox_transform import compute_targets
def prepare_roidb(imdb):
""" Enrich the imdb's roidb by adding some derived quantities that
are useful for training. This function pre-computes the maximum
overlap, taken over ground-truth boxes, between each ROI and
each ground-truth box. The class with maximum overlap is also
recorded.
"""
sizes = [PIL.Image.open(imdb.image_path_at(i)).size
for i in xrange(imdb.num_images)]
roidb = imdb.roidb
for i in xrange(len(imdb.image_index)):
roidb[i]['image'] = imdb.image_path_at(i)
roidb[i]['width'] = sizes[i][0]
roidb[i]['height'] = sizes[i][1]
# need gt_overlaps as a dense array for argmax
gt_overlaps = roidb[i]['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
roidb[i]['max_classes'] = max_classes
roidb[i]['max_overlaps'] = max_overlaps
# sanity checks
# max overlap of 0 => class should be zero (background)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# max overlap > 0 => class should not be zero (must be a fg class)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
assert len(roidb) > 0
assert 'max_classes' in roidb[0], 'Did you call prepare_roidb first?'
num_images = len(roidb)
# Infer number of classes from the number of columns in gt_overlaps
num_classes = roidb[0]['gt_overlaps'].shape[1]
for im_i in xrange(num_images):
rois = roidb[im_i]['boxes']
max_overlaps = roidb[im_i]['max_overlaps']
max_classes = roidb[im_i]['max_classes']
roidb[im_i]['bbox_targets'] = \
compute_targets(rois, max_overlaps, max_classes)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED:
# Use fixed / precomputed "means" and "stds" instead of empirical values
means = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS), (num_classes, 1))
stds = np.tile(
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS), (num_classes, 1))
else:
# Compute values needed for means and stds
# var(x) = E(x^2) - E(x)^2
class_counts = np.zeros((num_classes, 1)) + cfg.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
if cls_inds.size > 0:
class_counts[cls] += cls_inds.size
sums[cls, :] += targets[cls_inds, 1:].sum(axis=0)
squared_sums[cls, :] += \
(targets[cls_inds, 1:] ** 2).sum(axis=0)
means = sums / class_counts
stds = np.sqrt(squared_sums / class_counts - means ** 2)
print 'bbox target means:'
print means
print means[1:, :].mean(axis=0) # ignore bg class
print 'bbox target stdevs:'
print stds
print stds[1:, :].mean(axis=0) # ignore bg class
# Normalize targets
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS:
print "Normalizing targets"
for im_i in xrange(num_images):
targets = roidb[im_i]['bbox_targets']
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
roidb[im_i]['bbox_targets'][cls_inds, 1:] -= means[cls, :]
roidb[im_i]['bbox_targets'][cls_inds, 1:] /= stds[cls, :]
else:
print "NOT normalizing targets"
# These values will be needed for making predictions
# (the predicts will need to be unnormalized and uncentered)
return means.ravel(), stds.ravel()
def get_roidb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# Here set handler function. (e.g. gt_roidb in faster RCNN)
imdb.set_roi_handler(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_rois()
print 'done'
print 'Preparing training data...'
prepare_roidb(imdb)
print 'done'
return imdb.roidb
def attach_roidb(imdb_names):
"""
only implement single roidb now
"""
roidbs = [get_roidb(s) for s in imdb_names.split('+')]
roidb = roidbs[0]
if len(roidbs) > 1:
raise NotImplementedError
else:
imdb = get_imdb(imdb_names)
return imdb, roidb
def compute_mcg_mean_std(roidb_dir, num_classes):
"""
Compute bbox mean and stds for mcg proposals
Since mcg proposal are stored on disk, so we precomputed it here once
and save them to disk to avoid disk I/O next time
Args:
roidb_dir: directory contain all the mcg proposals
"""
file_list = sorted(os.listdir(roidb_dir))
target_list = []
cnt = 0
for file_name in file_list:
roidb_cache = os.path.join(roidb_dir, file_name)
roidb = scipy.io.loadmat(roidb_cache)
target_list.append(compute_targets(roidb['boxes'], roidb['det_overlap'], roidb['output_label'].ravel()))
cnt += 1
class_counts = np.zeros((num_classes, 1)) + cfg.EPS
sums = np.zeros((num_classes, 4))
squared_sums = np.zeros((num_classes, 4))
for im_i in xrange(len(target_list)):
targets = target_list[im_i]
for cls in xrange(1, num_classes):
cls_inds = np.where(targets[:, 0] == cls)[0]
if cls_inds.size > 0:
class_counts[cls] += cls_inds.size
sums[cls, :] += targets[cls_inds, 1:].sum(axis=0)
squared_sums[cls, :] += \
(targets[cls_inds, 1:] ** 2).sum(axis=0)
means = sums / class_counts
stds = np.sqrt(squared_sums / class_counts - means ** 2)
np.save('data/cache/mcg_bbox_mean.npy', means)
np.save('data/cache/mcg_bbox_std.npy', stds)
return means, stds
| 6,862 | 37.55618 | 112 | py |
MNC | MNC-master/lib/db/imdb.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from datasets.pascal_voc_det import PascalVOCDet
from datasets.pascal_voc_seg import PascalVOCSeg
__sets = {
'voc_2012_seg_train': (lambda: PascalVOCSeg('train', '2012', 'data/VOCdevkitSDS/')),
'voc_2012_seg_val': (lambda: PascalVOCSeg('val', '2012', 'data/VOCdevkitSDS/')),
'voc_2007_trainval': (lambda: PascalVOCDet('trainval', '2007')),
'voc_2007_test': (lambda: PascalVOCDet('test', '2007'))
}
def get_imdb(name):
""" Get an imdb (image database) by name.
"""
if not __sets.has_key(name):
raise KeyError('Unknown dataset: {}'.format(name))
return __sets[name]()
def list_imdbs():
return __sets.keys()
| 970 | 32.482759 | 88 | py |
MNC | MNC-master/lib/db/__init__.py | 0 | 0 | 0 | py |
|
MNC | MNC-master/lib/db/maskdb.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from mnc_config import cfg
from db.imdb import get_imdb
def get_maskdb(imdb_name):
imdb = get_imdb(imdb_name)
print 'Loaded dataset `{:s}` for training'.format(imdb.name)
# Here set handler function. (e.g. gt_roidb in faster RCNN)
imdb.set_roi_handler(cfg.TRAIN.PROPOSAL_METHOD)
imdb.set_mask_handler(cfg.TRAIN.PROPOSAL_METHOD)
print 'Set proposal method: {:s}'.format(cfg.TRAIN.PROPOSAL_METHOD)
if cfg.TRAIN.USE_FLIPPED:
print 'Appending horizontally-flipped training examples...'
imdb.append_flipped_masks()
print 'done'
return imdb.maskdb
def attach_maskdb(imdb_names):
"""
only implement single maskdb now
"""
maskdbs = [get_maskdb(s) for s in imdb_names.split('+')]
maskdb = maskdbs[0]
if len(maskdbs) > 1:
raise NotImplementedError
else:
imdb = get_imdb(imdb_names)
return imdb, maskdb
| 1,162 | 29.605263 | 71 | py |
MNC | MNC-master/lib/datasets/pascal_voc.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import numpy as np
import scipy.sparse
from mnc_config import cfg
class PascalVOC(object):
""" A base class for image database."""
def __init__(self, name):
self._name = name
self._num_classes = 0
self._classes = []
self._image_index = []
self._obj_proposer = 'selective_search'
self._roidb = None
self._roidb_handler = self.default_roidb
self._maskdb = None
self._maskdb_handler = self.default_maskdb
# Use this dict for storing dataset specific config options
self.config = {}
@property
def name(self):
return self._name
@property
def num_classes(self):
return len(self._classes)
@property
def classes(self):
return self._classes
@property
def image_index(self):
return self._image_index
@property
def roidb_handler(self):
return self._roidb_handler
@roidb_handler.setter
def roidb_handler(self, val):
self._roidb_handler = val
@property
def maskdb_handler(self):
return self._roidb_handler
@maskdb_handler.setter
def maskdb_handler(self, val):
self._roidb_handler = val
@property
def roidb(self):
# A roidb is a 'list of dictionaries', each with the following keys:
# boxes: the numpy array for boxes coordinate
# gt_overlaps: overlap ratio for ground truth
# gt_classes: ground truth class for that box
# flipped: whether get flipped
if self._roidb is not None:
return self._roidb
self._roidb = self.roidb_handler()
return self._roidb
@property
def maskdb(self):
if self._maskdb is not None:
return self._maskdb
else:
self._maskdb = self.maskdb_handler()
return self._maskdb
@property
def cache_path(self):
cache_path = os.path.abspath(os.path.join(cfg.DATA_DIR, 'cache'))
if not os.path.exists(cache_path):
os.makedirs(cache_path)
return cache_path
@property
def num_images(self):
return len(self.image_index)
def set_roi_handler(self, method):
method = eval('self.' + method + '_roidb')
self.roidb_handler = method
def set_mask_handler(self, method):
method = eval('self.' + method + '_maskdb')
self.maskdb_handler = method
def image_path_at(self, i):
raise NotImplementedError
def default_roidb(self):
raise NotImplementedError
def default_maskdb(self):
raise NotImplementedError
def competition_mode(self, on):
"""Turn competition mode on or off."""
pass
@staticmethod
def merge_roidbs(a, b):
assert len(a) == len(b)
for i in xrange(len(a)):
a[i]['boxes'] = np.vstack((a[i]['boxes'], b[i]['boxes']))
a[i]['gt_classes'] = np.hstack((a[i]['gt_classes'],
b[i]['gt_classes']))
a[i]['gt_overlaps'] = scipy.sparse.vstack([a[i]['gt_overlaps'],
b[i]['gt_overlaps']])
return a
| 3,522 | 27.642276 | 77 | py |
MNC | MNC-master/lib/datasets/pascal_voc_det.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import uuid
import cPickle
import numpy as np
import scipy.sparse
import PIL
import xml.etree.ElementTree as xmlET
from datasets.pascal_voc import PascalVOC
from mnc_config import cfg
from utils.voc_eval import voc_eval
class PascalVOCDet(PascalVOC):
"""
A subclass for PascalVOC
"""
def __init__(self, image_set, year, devkit_path=None):
PascalVOC.__init__(self, 'voc_' + year + '_' + image_set)
self._year = year
self._image_set = image_set
self._devkit_path = self._get_default_path() if devkit_path is None else devkit_path
self._data_path = os.path.join(self._devkit_path, 'VOC' + self._year) if 'SDS' not in self._devkit_path else self._devkit_path
self._classes = ('__background__', # always index 0
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
self._image_ext = '.jpg'
self._image_index = self._load_image_set_index()
# Default to roidb handler
# self._roidb_handler = self.selective_search_roidb
self._salt = str(uuid.uuid4())
self._comp_id = 'comp4'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'top_k': 2000,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None}
assert os.path.exists(self._devkit_path), \
'VOCdevkit path does not exist: {}'.format(self._devkit_path)
assert os.path.exists(self._data_path), \
'Path does not exist: {}'.format(self._data_path)
def image_path_at(self, i):
return self.image_path_from_index(self._image_index[i])
def image_path_from_index(self, index):
image_path = os.path.join(self._data_path, 'JPEGImages',
index + self._image_ext)
assert os.path.exists(image_path), \
'Path does not exist: {}'.format(image_path)
return image_path
def gt_roidb(self):
"""
Return the database of ground-truth regions of interest.
This function loads/saves from/to a cache file to speed up future calls.
"""
cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
roidb = cPickle.load(fid)
print '{} gt roidb loaded from {}'.format(self.name, cache_file)
return roidb
num_image = len(self.image_index)
if cfg.MNC_MODE:
gt_roidb = [self._load_sbd_annotations(index) for index in xrange(num_image)]
else:
gt_roidb = [self._load_pascal_annotations(index) for index in xrange(num_image)]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_roidb
def _load_image_set_index(self):
"""
Load the indexes listed in this dataset's image set file.
Examples
path is: self._devkit_path + /VOCdevkit2007/VOC2007/ImageSets/Main/val.txt
--------
"""
image_set_file = os.path.join(self._data_path, 'ImageSets', 'Main',
self._image_set + '.txt')
assert os.path.exists(image_set_file), \
'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def append_flipped_rois(self):
"""
This method is irrelevant with database, so implement here
Append flipped images to ROI database
Note this method doesn't actually flip the 'image', it flip
boxes instead
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.TRAIN.PROPOSAL_METHOD + '_roidb_flip.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
flip_roidb = cPickle.load(fid)
print '{} gt flipped roidb loaded from {}'.format(self.name, cache_file)
else:
num_images = self.num_images
widths = [PIL.Image.open(self.image_path_at(i)).size[0]
for i in xrange(num_images)]
flip_roidb = []
for i in xrange(num_images):
boxes = self.roidb[i]['boxes'].copy()
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[i] - oldx2 - 1
boxes[:, 2] = widths[i] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
entry = {'boxes': boxes,
'gt_overlaps': self.roidb[i]['gt_overlaps'],
'gt_classes': self.roidb[i]['gt_classes'],
'flipped': True}
flip_roidb.append(entry)
with open(cache_file, 'wb') as fid:
cPickle.dump(flip_roidb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt flipped roidb to {}'.format(cache_file)
self.roidb.extend(flip_roidb)
self._image_index *= 2
def _get_default_path(self):
"""
Return the default path where PASCAL VOC is expected to be installed.
"""
return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)
def _load_pascal_annotations(self, index):
"""
Load image and bounding boxes info from XML file
in the PASCAL VOC format according to image index
"""
image_name = self._image_index[index]
filename = os.path.join(self._data_path, 'Annotations', image_name + '.xml')
tree = xmlET.parse(filename)
objs = tree.findall('object')
if not self.config['use_diff']:
# Exclude the samples labeled as difficult
non_diff_objs = [
obj for obj in objs if int(obj.find('difficult').text) == 0]
if len(non_diff_objs) != len(objs):
print 'Removed {} difficult objects'.format(len(objs) - len(non_diff_objs))
objs = non_diff_objs
num_objs = len(objs)
boxes = np.zeros((num_objs, 4), dtype=np.uint16)
gt_classes = np.zeros(num_objs, dtype=np.int32)
overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)
# Load object bounding boxes into a data frame.
# boxes[ind, :] will be boxes
# gt_classes[ind] will be the associated class name for this box
# overlaps[ind, class] will assign 1.0 to ground truth
for ix, obj in enumerate(objs):
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = float(bbox.find('xmin').text) - 1
y1 = float(bbox.find('ymin').text) - 1
x2 = float(bbox.find('xmax').text) - 1
y2 = float(bbox.find('ymax').text) - 1
cls = self._class_to_ind[obj.find('name').text.lower().strip()]
boxes[ix, :] = [x1, y1, x2, y2]
gt_classes[ix] = cls
overlaps[ix, cls] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False}
def _load_sbd_annotations(self, index):
if index % 1000 == 0: print '%d / %d' % (index, len(self._image_index))
image_name = self._image_index[index]
inst_file_name = os.path.join(self._data_path, 'inst', image_name + '.mat')
gt_inst_mat = scipy.io.loadmat(inst_file_name)
gt_inst_data = gt_inst_mat['GTinst']['Segmentation'][0][0]
unique_inst = np.unique(gt_inst_data)
background_ind = np.where(unique_inst == 0)[0]
unique_inst = np.delete(unique_inst, background_ind)
cls_file_name = os.path.join(self._data_path, 'cls', image_name + '.mat')
gt_cls_mat = scipy.io.loadmat(cls_file_name)
gt_cls_data = gt_cls_mat['GTcls']['Segmentation'][0][0]
boxes = np.zeros((len(unique_inst), 4), dtype=np.uint16)
gt_classes = np.zeros(len(unique_inst), dtype=np.int32)
overlaps = np.zeros((len(unique_inst), self.num_classes), dtype=np.float32)
for ind, inst_mask in enumerate(unique_inst):
im_mask = (gt_inst_data == inst_mask)
im_cls_mask = np.multiply(gt_cls_data, im_mask)
unique_cls_inst = np.unique(im_cls_mask)
background_ind = np.where(unique_cls_inst == 0)[0]
unique_cls_inst = np.delete(unique_cls_inst, background_ind)
assert len(unique_cls_inst) == 1
gt_classes[ind] = unique_cls_inst[0]
[r, c] = np.where(im_mask > 0)
boxes[ind, 0] = np.min(c)
boxes[ind, 1] = np.min(r)
boxes[ind, 2] = np.max(c)
boxes[ind, 3] = np.max(r)
overlaps[ind, unique_cls_inst[0]] = 1.0
overlaps = scipy.sparse.csr_matrix(overlaps)
return {'boxes': boxes,
'gt_classes': gt_classes,
'gt_overlaps': overlaps,
'flipped': False}
"""-----------------Evaluation--------------------"""
def evaluate_detections(self, all_boxes, output_dir):
self._write_voc_results_file(all_boxes)
self._do_python_eval(output_dir)
if self.config['matlab_eval']:
raise NotImplementedError
if self.config['cleanup']:
for cls in self._classes:
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
os.remove(filename)
def _get_comp_id(self):
comp_id = (self._comp_id + '_' + self._salt if self.config['use_salt']
else self._comp_id)
return comp_id
def _get_voc_results_file_template(self):
# VOCdevkit/results/VOC2007/Main/<comp_id>_det_test_aeroplane.txt
filename = self._get_comp_id() + '_det_' + self._image_set + '_{:s}.txt'
path = os.path.join(
self._devkit_path,
'results',
'VOC' + self._year,
'Main',
filename)
return path
def _write_voc_results_file(self, all_boxes):
for cls_ind, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = self._get_voc_results_file_template().format(cls)
with open(filename, 'wt') as f:
for im_ind, index in enumerate(self.image_index):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
# the VOCdevkit expects 1-based indices
for k in xrange(dets.shape[0]):
f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.
format(index, dets[k, -1],
dets[k, 0] + 1, dets[k, 1] + 1,
dets[k, 2] + 1, dets[k, 3] + 1))
def _do_python_eval(self, output_dir = 'output'):
print '--------------------------------------------------------------'
print 'Computing results with **unofficial** Python eval code.'
print 'Results should be very close to the official MATLAB eval code.'
print 'Recompute with `./tools/reval.py --matlab ...` for your paper.'
print '--------------------------------------------------------------'
annopath = os.path.join(
self._devkit_path,
'VOC' + self._year,
'Annotations',
'{:s}.xml')
imagesetfile = os.path.join(
self._devkit_path,
'VOC' + self._year,
'ImageSets',
'Main',
self._image_set + '.txt')
cachedir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# The PASCAL VOC metric changed in 2010
use_07_metric = True if int(self._year) < 2010 else False
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
filename = self._get_voc_results_file_template().format(cls)
rec, prec, ap = voc_eval(
filename, annopath, imagesetfile, cls, cachedir, ovthresh=0.5,
use_07_metric=use_07_metric)
aps += [ap]
print('AP for {} = {:.4f}'.format(cls, ap))
with open(os.path.join(output_dir, cls + '_pr.pkl'), 'w') as f:
cPickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f)
print('Mean AP = {:.4f}'.format(np.mean(aps)))
print('~~~~~~~~')
print('Results:')
for ap in aps:
print('{:.3f}'.format(ap))
print('{:.3f}'.format(np.mean(aps)))
print('~~~~~~~~')
| 13,855 | 42.435737 | 134 | py |
MNC | MNC-master/lib/datasets/__init__.py | 0 | 0 | 0 | py |
|
MNC | MNC-master/lib/datasets/pascal_voc_seg.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import cPickle
import os
import scipy.io as sio
import numpy as np
from datasets.pascal_voc_det import PascalVOCDet
from mnc_config import cfg
from utils.vis_seg import vis_seg
from utils.voc_eval import voc_eval_sds
import scipy
class PascalVOCSeg(PascalVOCDet):
"""
A subclass for datasets.imdb.imdb
This class contains information of ROIDB and MaskDB
This class implements roidb and maskdb related functions
"""
def __init__(self, image_set, year, devkit_path=None):
PascalVOCDet.__init__(self, image_set, year, devkit_path)
self._ori_image_num = len(self._image_index)
self._comp_id = 'comp6'
# PASCAL specific config options
self.config = {'cleanup': True,
'use_salt': True,
'top_k': 2000,
'use_diff': False,
'matlab_eval': False,
'rpn_file': None}
self._data_path = os.path.join(self._devkit_path)
self._roidb_path = os.path.join(self.cache_path, 'voc_2012_' + image_set + '_mcg_maskdb')
def image_path_at(self, i):
image_path = os.path.join(self._data_path, 'img', self._image_index[i] + self._image_ext)
assert os.path.exists(image_path), 'Path does not exist: {}'.format(image_path)
return image_path
def roidb_path_at(self, i):
if i >= self._ori_image_num:
return os.path.join(self._roidb_path,
self.image_index[i % self._ori_image_num] + '_flip.mat')
else:
return os.path.join(self._roidb_path,
self.image_index[i] + '.mat')
def gt_maskdb(self):
cache_file = os.path.join(self.cache_path, self.name + '_gt_maskdb.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
gt_maskdb = cPickle.load(fid)
print '{} gt maskdb loaded from {}'.format(self.name, cache_file)
else:
num_image = len(self.image_index)
gt_roidbs = self.gt_roidb()
gt_maskdb = [self._load_sbd_mask_annotations(index, gt_roidbs)
for index in xrange(num_image)]
with open(cache_file, 'wb') as fid:
cPickle.dump(gt_maskdb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt roidb to {}'.format(cache_file)
return gt_maskdb
def _load_image_set_index(self):
image_set_file = os.path.join(self._data_path, self._image_set + '.txt')
assert os.path.exists(image_set_file), 'Path does not exist: {}'.format(image_set_file)
with open(image_set_file) as f:
image_index = [x.strip() for x in f.readlines()]
return image_index
def _load_sbd_mask_annotations(self, index, gt_roidbs):
"""
Load gt_masks information from SBD's additional data
"""
if index % 1000 == 0:
print '%d / %d' % (index, len(self._image_index))
image_name = self._image_index[index]
inst_file_name = os.path.join(self._data_path, 'inst', image_name + '.mat')
gt_inst_mat = scipy.io.loadmat(inst_file_name)
gt_inst_data = gt_inst_mat['GTinst']['Segmentation'][0][0]
unique_inst = np.unique(gt_inst_data)
background_ind = np.where(unique_inst == 0)[0]
unique_inst = np.delete(unique_inst, background_ind)
gt_roidb = gt_roidbs[index]
cls_file_name = os.path.join(self._data_path, 'cls', image_name + '.mat')
gt_cls_mat = scipy.io.loadmat(cls_file_name)
gt_cls_data = gt_cls_mat['GTcls']['Segmentation'][0][0]
gt_masks = []
for ind, inst_mask in enumerate(unique_inst):
box = gt_roidb['boxes'][ind]
im_mask = (gt_inst_data == inst_mask)
im_cls_mask = np.multiply(gt_cls_data, im_mask)
unique_cls_inst = np.unique(im_cls_mask)
background_ind = np.where(unique_cls_inst == 0)[0]
unique_cls_inst = np.delete(unique_cls_inst, background_ind)
assert len(unique_cls_inst) == 1
assert unique_cls_inst[0] == gt_roidb['gt_classes'][ind]
mask = im_mask[box[1]: box[3]+1, box[0]:box[2]+1]
gt_masks.append(mask)
# Also record the maximum dimension to create fixed dimension array when do forwarding
mask_max_x = max(gt_masks[i].shape[1] for i in xrange(len(gt_masks)))
mask_max_y = max(gt_masks[i].shape[0] for i in xrange(len(gt_masks)))
return {
'gt_masks': gt_masks,
'mask_max': [mask_max_x, mask_max_y],
'flipped': False
}
def append_flipped_masks(self):
"""
This method is only accessed when we use maskdb, so implement here
Append flipped images to mask database
Note this method doesn't actually flip the 'image', it flip masks instead
"""
cache_file = os.path.join(self.cache_path, self.name + '_' + cfg.TRAIN.PROPOSAL_METHOD + '_maskdb_flip.pkl')
if os.path.exists(cache_file):
with open(cache_file, 'rb') as fid:
flip_maskdb = cPickle.load(fid)
print '{} gt flipped roidb loaded from {}'.format(self.name, cache_file)
self.maskdb.extend(flip_maskdb)
# Need to check this condition since otherwise we may occasionally *4
if self._image_index == self.num_images:
self._image_index *= 2
else:
# pure image number hold for future development
# this is useless since append flip mask will only be called once
num_images = self._ori_image_num
flip_maskdb = []
for i in xrange(num_images):
masks = self.maskdb[i]['gt_masks']
masks_flip = []
for mask_ind in xrange(len(masks)):
mask_flip = np.fliplr(masks[mask_ind])
masks_flip.append(mask_flip)
entry = {'gt_masks': masks_flip,
'mask_max': self.maskdb[i]['mask_max'],
'flipped': True}
flip_maskdb.append(entry)
with open(cache_file, 'wb') as fid:
cPickle.dump(flip_maskdb, fid, cPickle.HIGHEST_PROTOCOL)
print 'wrote gt flipped maskdb to {}'.format(cache_file)
self.maskdb.extend(flip_maskdb)
# Need to check this condition since otherwise we may occasionally *4
if self._image_index == self.num_images:
self._image_index *= 2
def visualization_segmentation(self, output_dir):
vis_seg(self.image_index, self.classes, output_dir, self._data_path)
# --------------------------- Evaluation ---------------------------
def evaluate_segmentation(self, all_boxes, all_masks, output_dir):
self._write_voc_seg_results_file(all_boxes, all_masks, output_dir)
self._py_evaluate_segmentation(output_dir)
def _write_voc_seg_results_file(self, all_boxes, all_masks, output_dir):
"""
Write results as a pkl file, note this is different from
detection task since it's difficult to write masks to txt
"""
# Always reformat result in case of sometimes masks are not
# binary or is in shape (n, sz*sz) instead of (n, sz, sz)
all_boxes, all_masks = self._reformat_result(all_boxes, all_masks)
for cls_inds, cls in enumerate(self.classes):
if cls == '__background__':
continue
print 'Writing {} VOC results file'.format(cls)
filename = os.path.join(output_dir, cls + '_det.pkl')
with open(filename, 'wr') as f:
cPickle.dump(all_boxes[cls_inds], f, cPickle.HIGHEST_PROTOCOL)
filename = os.path.join(output_dir, cls + '_seg.pkl')
with open(filename, 'wr') as f:
cPickle.dump(all_masks[cls_inds], f, cPickle.HIGHEST_PROTOCOL)
def _reformat_result(self, boxes, masks):
num_images = len(self.image_index)
num_class = len(self.classes)
reformat_masks = [[[] for _ in xrange(num_images)]
for _ in xrange(num_class)]
for cls_inds in xrange(1, num_class):
for img_inds in xrange(num_images):
if len(masks[cls_inds][img_inds]) == 0:
continue
num_inst = masks[cls_inds][img_inds].shape[0]
reformat_masks[cls_inds][img_inds] = masks[cls_inds][img_inds]\
.reshape(num_inst, cfg.MASK_SIZE, cfg.MASK_SIZE)
reformat_masks[cls_inds][img_inds] = reformat_masks[cls_inds][img_inds] >= cfg.BINARIZE_THRESH
all_masks = reformat_masks
return boxes, all_masks
def _py_evaluate_segmentation(self, output_dir):
gt_dir = self._data_path
imageset_file = os.path.join(gt_dir, self._image_set + '.txt')
cache_dir = os.path.join(self._devkit_path, 'annotations_cache')
aps = []
# define this as true according to SDS's evaluation protocol
use_07_metric = True
print 'VOC07 metric? ' + ('Yes' if use_07_metric else 'No')
if not os.path.isdir(output_dir):
os.mkdir(output_dir)
print '~~~~~~ Evaluation use min overlap = 0.5 ~~~~~~'
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
det_filename = os.path.join(output_dir, cls + '_det.pkl')
seg_filename = os.path.join(output_dir, cls + '_seg.pkl')
ap = voc_eval_sds(det_filename, seg_filename, gt_dir,
imageset_file, cls, cache_dir, self._classes, ov_thresh=0.5)
aps += [ap]
print('AP for {} = {:.2f}'.format(cls, ap*100))
print('Mean [email protected] = {:.2f}'.format(np.mean(aps)*100))
print '~~~~~~ Evaluation use min overlap = 0.7 ~~~~~~'
aps = []
for i, cls in enumerate(self._classes):
if cls == '__background__':
continue
det_filename = os.path.join(output_dir, cls + '_det.pkl')
seg_filename = os.path.join(output_dir, cls + '_seg.pkl')
ap = voc_eval_sds(det_filename, seg_filename, gt_dir,
imageset_file, cls, cache_dir, self._classes, ov_thresh=0.7)
aps += [ap]
print('AP for {} = {:.2f}'.format(cls, ap*100))
print('Mean [email protected] = {:.2f}'.format(np.mean(aps)*100))
| 10,828 | 46.28821 | 116 | py |
MNC | MNC-master/lib/caffeWrapper/TesterWrapper.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import cPickle
import scipy
import numpy as np
import cv2
import heapq
import caffe
from utils.timer import Timer
from nms.nms_wrapper import apply_nms, apply_nms_mask_single
from mnc_config import cfg, get_output_dir
from utils.blob import prep_im_for_blob, im_list_to_blob, prep_im_for_blob_cfm, pred_rois_for_blob
from transform.bbox_transform import clip_boxes, bbox_transform_inv, filter_small_boxes
from transform.mask_transform import cpu_mask_voting, gpu_mask_voting
class TesterWrapper(object):
"""
A simple wrapper around Caffe's test forward
"""
def __init__(self, test_prototxt, imdb, test_model, task_name):
# Pre-processing, test whether model stored in binary file or npy files
self.net = caffe.Net(test_prototxt, test_model, caffe.TEST)
self.net.name = os.path.splitext(os.path.basename(test_model))[0]
self.imdb = imdb
self.output_dir = get_output_dir(imdb, self.net)
self.task_name = task_name
# We define some class variables here to avoid defining them many times in every method
self.num_images = len(self.imdb.image_index)
self.num_classes = self.imdb.num_classes
# heuristic: keep an average of 40 detections per class per images prior to nms
self.max_per_set = 40 * self.num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
self.max_per_image = 100
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
def get_result(self):
output_dir = self.output_dir
det_file = os.path.join(output_dir, 'res_boxes.pkl')
seg_file = os.path.join(output_dir, 'res_masks.pkl')
if self.task_name == 'det':
self.get_detection_result()
elif self.task_name == 'vis_seg':
self.vis_segmentation_result()
elif self.task_name == 'seg':
if os.path.isfile(det_file) and os.path.isfile(seg_file):
with open(det_file, 'rb') as f:
seg_box = cPickle.load(f)
with open(seg_file, 'rb') as f:
seg_mask = cPickle.load(f)
else:
seg_box, seg_mask = self.get_segmentation_result()
with open(det_file, 'wb') as f:
cPickle.dump(seg_box, f, cPickle.HIGHEST_PROTOCOL)
with open(seg_file, 'wb') as f:
cPickle.dump(seg_mask, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating segmentation using MNC 5 stage inference'
self.imdb.evaluate_segmentation(seg_box, seg_mask, output_dir)
elif self.task_name == 'cfm':
if os.path.isfile(det_file) and os.path.isfile(seg_file):
with open(det_file, 'rb') as f:
cfm_boxes = cPickle.load(f)
with open(seg_file, 'rb') as f:
cfm_masks = cPickle.load(f)
else:
cfm_boxes, cfm_masks = self.get_cfm_result()
with open(det_file, 'wb') as f:
cPickle.dump(cfm_boxes, f, cPickle.HIGHEST_PROTOCOL)
with open(seg_file, 'wb') as f:
cPickle.dump(cfm_masks, f, cPickle.HIGHEST_PROTOCOL)
print 'Evaluating segmentation using convolutional feature masking'
self.imdb.evaluate_segmentation(cfm_boxes, cfm_masks, output_dir)
else:
print 'task name only support \'det\', \'seg\', \'cfm\' and \'vis_seg\''
raise NotImplementedError
def get_detection_result(self):
output_dir = self.output_dir
# heuristic: keep an average of 40 detections per class per images prior to NMS
max_per_set = 40 * self.num_images
# heuristic: keep at most 100 detection per class per image prior to NMS
max_per_image = 100
# detection threshold for each class (this is adaptively set based on the
# max_per_set constraint)
thresh = -np.inf * np.ones(self.num_classes)
# top_scores will hold one min heap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(self.num_classes)]
# all detections are collected into:
# all_boxes[cls][image] = N x 5 array of detections in
# (x1, y1, x2, y2, score)
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in xrange(self.num_images):
im = cv2.imread(self.imdb.image_path_at(i))
_t['im_detect'].tic()
scores, boxes = self._detection_forward(im)
_t['im_detect'].toc()
for j in xrange(1, self.num_classes):
inds = np.where(scores[:, j] > thresh[j])[0]
cls_scores = scores[inds, j]
cls_boxes = boxes[inds, j*4:(j+1)*4]
top_inds = np.argsort(-cls_scores)[:max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
# push new scores onto the min heap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the min heap and update the class threshold
if len(top_scores[j]) > max_per_set:
while len(top_scores[j]) > max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
all_boxes[j][i] = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))\
.astype(np.float32, copy=False)
print 'process image %d/%d, forward average time %f' % (i, self.num_images,
_t['im_detect'].average_time)
for j in xrange(1, self.num_classes):
for i in xrange(self.num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
det_file = os.path.join(output_dir, 'detections.pkl')
with open(det_file, 'wb') as f:
cPickle.dump(all_boxes, f, cPickle.HIGHEST_PROTOCOL)
print 'Applying NMS to all detections'
nms_dets = apply_nms(all_boxes, cfg.TEST.NMS)
print 'Evaluating detections'
self.imdb.evaluate_detections(nms_dets, output_dir)
def vis_segmentation_result(self):
self.imdb.visualization_segmentation(self.output_dir)
def get_segmentation_result(self):
# detection threshold for each class
# (this is adaptively set based on the max_per_set constraint)
thresh = -np.inf * np.ones(self.num_classes)
# top_scores will hold one min heap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(self.num_classes)]
# all detections and segmentation are collected into a list:
# Since the number of dets/segs are of variable size
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
all_masks = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in xrange(self.num_images):
im = cv2.imread(self.imdb.image_path_at(i))
_t['im_detect'].tic()
masks, boxes, seg_scores = self._segmentation_forward(im)
_t['im_detect'].toc()
if not cfg.TEST.USE_MASK_MERGE:
for j in xrange(1, self.num_classes):
inds = np.where(seg_scores[:, j] > thresh[j])[0]
cls_scores = seg_scores[inds, j]
cls_boxes = boxes[inds, :]
cls_masks = masks[inds, :]
top_inds = np.argsort(-cls_scores)[:self.max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
cls_masks = cls_masks[top_inds, :]
# push new scores onto the min heap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the min heap and update the class threshold
if len(top_scores[j]) > self.max_per_set:
while len(top_scores[j]) > self.max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
# Add new boxes into record
box_before_nms = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))\
.astype(np.float32, copy=False)
mask_before_nms = cls_masks.astype(np.float32, copy=False)
all_boxes[j][i], all_masks[j][i] = apply_nms_mask_single(box_before_nms, mask_before_nms, cfg.TEST.NMS)
else:
if cfg.TEST.USE_GPU_MASK_MERGE:
result_mask, result_box = gpu_mask_voting(masks, boxes, seg_scores, self.num_classes,
self.max_per_image, im.shape[1], im.shape[0])
else:
result_box, result_mask = cpu_mask_voting(masks, boxes, seg_scores, self.num_classes,
self.max_per_image, im.shape[1], im.shape[0])
# no need to create a min heap since the output will not exceed max number of detection
for j in xrange(1, self.num_classes):
all_boxes[j][i] = result_box[j-1]
all_masks[j][i] = result_mask[j-1]
print 'process image %d/%d, forward average time %f' % (i, self.num_images,
_t['im_detect'].average_time)
for j in xrange(1, self.num_classes):
for i in xrange(self.num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
all_masks[j][i] = all_masks[j][i][inds]
return all_boxes, all_masks
def _detection_forward(self, im):
""" Detect object classes in an image given object proposals.
Arguments:
im (ndarray): color image to test (in BGR order)
Returns:
box_scores (ndarray): R x K array of object class scores (K includes
background as object category 0)
all_boxes (ndarray): R x (4*K) array of predicted bounding boxes
"""
forward_kwargs, im_scales = self._prepare_mnc_args(im)
blobs_out = self.net.forward(**forward_kwargs)
# There are some data we need to get:
# 1. ROIS (with bbox regression)
rois = self.net.blobs['rois'].data.copy()
# un-scale back to raw image space
boxes = rois[:, 1:5] / im_scales[0]
box_deltas = blobs_out['bbox_pred']
pred_boxes = bbox_transform_inv(boxes, box_deltas)
pred_boxes, _ = clip_boxes(pred_boxes, im.shape)
# 2. Detection score
scores = blobs_out['cls_prob']
return scores, pred_boxes
def _segmentation_forward(self, im):
forward_kwargs, im_scales = self._prepare_mnc_args(im)
blobs_out = self.net.forward(**forward_kwargs)
# output we need to collect:
# 1. output from phase1'
rois_phase1 = self.net.blobs['rois'].data.copy()
masks_phase1 = self.net.blobs['mask_proposal'].data[...]
scores_phase1 = self.net.blobs['seg_cls_prob'].data[...]
# 2. output from phase2
rois_phase2 = self.net.blobs['rois_ext'].data[...]
masks_phase2 = self.net.blobs['mask_proposal_ext'].data[...]
scores_phase2 = self.net.blobs['seg_cls_prob_ext'].data[...]
# Boxes are in resized space, we un-scale them back
rois_phase1 = rois_phase1[:, 1:5] / im_scales[0]
rois_phase2 = rois_phase2[:, 1:5] / im_scales[0]
rois_phase1, _ = clip_boxes(rois_phase1, im.shape)
rois_phase2, _ = clip_boxes(rois_phase2, im.shape)
# concatenate two stages to get final network output
masks = np.concatenate((masks_phase1, masks_phase2), axis=0)
boxes = np.concatenate((rois_phase1, rois_phase2), axis=0)
scores = np.concatenate((scores_phase1, scores_phase2), axis=0)
return masks, boxes, scores
def _prepare_mnc_args(self, im):
# Prepare image data blob
blobs = {'data': None}
processed_ims = []
im, im_scale_factors = \
prep_im_for_blob(im, cfg.PIXEL_MEANS, cfg.TEST.SCALES[0], cfg.TRAIN.MAX_SIZE)
processed_ims.append(im)
blobs['data'] = im_list_to_blob(processed_ims)
# Prepare image info blob
im_scales = [np.array(im_scale_factors)]
assert len(im_scales) == 1, 'Only single-image batch implemented'
im_blob = blobs['data']
blobs['im_info'] = np.array(
[[im_blob.shape[2], im_blob.shape[3], im_scales[0]]],
dtype=np.float32)
# Reshape network inputs and do forward
self.net.blobs['data'].reshape(*blobs['data'].shape)
self.net.blobs['im_info'].reshape(*blobs['im_info'].shape)
forward_kwargs = {
'data': blobs['data'].astype(np.float32, copy=False),
'im_info': blobs['im_info'].astype(np.float32, copy=False)
}
return forward_kwargs, im_scales
def get_cfm_result(self):
# detection threshold for each class
# (this is adaptively set based on the max_per_set constraint)
thresh = -np.inf * np.ones(self.num_classes)
# top_scores will hold one min heap of scores per class (used to enforce
# the max_per_set constraint)
top_scores = [[] for _ in xrange(self.num_classes)]
# all detections and segmentation are collected into a list:
# Since the number of dets/segs are of variable size
all_boxes = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
all_masks = [[[] for _ in xrange(self.num_images)]
for _ in xrange(self.num_classes)]
_t = {'im_detect': Timer(), 'misc': Timer()}
for i in xrange(self.num_images):
_t['im_detect'].tic()
masks, boxes, seg_scores = self.cfm_network_forward(i)
for j in xrange(1, self.num_classes):
inds = np.where(seg_scores[:, j] > thresh[j])[0]
cls_scores = seg_scores[inds, j]
cls_boxes = boxes[inds, :]
cls_masks = masks[inds, :]
top_inds = np.argsort(-cls_scores)[:self.max_per_image]
cls_scores = cls_scores[top_inds]
cls_boxes = cls_boxes[top_inds, :]
cls_masks = cls_masks[top_inds, :]
# push new scores onto the min heap
for val in cls_scores:
heapq.heappush(top_scores[j], val)
# if we've collected more than the max number of detection,
# then pop items off the min heap and update the class threshold
if len(top_scores[j]) > self.max_per_set:
while len(top_scores[j]) > self.max_per_set:
heapq.heappop(top_scores[j])
thresh[j] = top_scores[j][0]
box_before_nms = np.hstack((cls_boxes, cls_scores[:, np.newaxis]))\
.astype(np.float32, copy=False)
mask_before_nms = cls_masks.astype(np.float32, copy=False)
all_boxes[j][i], all_masks[j][i] = apply_nms_mask_single(box_before_nms, mask_before_nms, cfg.TEST.NMS)
_t['im_detect'].toc()
print 'process image %d/%d, forward average time %f' % (i, self.num_images,
_t['im_detect'].average_time)
for j in xrange(1, self.num_classes):
for i in xrange(self.num_images):
inds = np.where(all_boxes[j][i][:, -1] > thresh[j])[0]
all_boxes[j][i] = all_boxes[j][i][inds, :]
all_masks[j][i] = all_masks[j][i][inds]
return all_boxes, all_masks
def cfm_network_forward(self, im_i):
im = cv2.imread(self.imdb.image_path_at(im_i))
roidb_cache = os.path.join('data/cache/voc_2012_val_mcg_maskdb/', self.imdb._image_index[im_i] + '.mat')
roidb = scipy.io.loadmat(roidb_cache)
boxes = roidb['boxes']
filter_keep = filter_small_boxes(boxes, min_size=16)
boxes = boxes[filter_keep, :]
masks = roidb['masks']
masks = masks[filter_keep, :, :]
assert boxes.shape[0] == masks.shape[0]
# Resize input mask, make it the same as CFM's input size
mask_resize = np.zeros((masks.shape[0], cfg.TEST.CFM_INPUT_MASK_SIZE, cfg.TEST.CFM_INPUT_MASK_SIZE))
for i in xrange(masks.shape[0]):
mask_resize[i, :, :] = cv2.resize(masks[i, :, :].astype(np.float),
(cfg.TEST.CFM_INPUT_MASK_SIZE, cfg.TEST.CFM_INPUT_MASK_SIZE))
masks = mask_resize
# Get top-k proposals from MCG
if cfg.TEST.USE_TOP_K_MCG:
num_keep = min(boxes.shape[0], cfg.TEST.USE_TOP_K_MCG)
boxes = boxes[:num_keep, :]
masks = masks[:num_keep, :, :]
assert boxes.shape[0] == masks.shape[0]
# deal with multi-scale test
# we group several adjacent scales to do forward
_, im_scale_factors = prep_im_for_blob_cfm(im, cfg.TEST.SCALES)
orig_boxes = boxes.copy()
boxes = pred_rois_for_blob(boxes, im_scale_factors)
num_scale_iter = int(np.ceil(len(cfg.TEST.SCALES) / float(cfg.TEST.GROUP_SCALE)))
LO_SCALE = 0
MAX_ROIS_GPU = cfg.TEST.MAX_ROIS_GPU
# set up return results
res_boxes = np.zeros((0, 4), dtype=np.float32)
res_masks = np.zeros((0, 1, cfg.MASK_SIZE, cfg.MASK_SIZE), dtype=np.float32)
res_seg_scores = np.zeros((0, self.num_classes), dtype=np.float32)
for scale_iter in xrange(num_scale_iter):
HI_SCALE = min(LO_SCALE + cfg.TEST.GROUP_SCALE, len(cfg.TEST.SCALES))
inds_this_scale = np.where((boxes[:, 0] >= LO_SCALE) & (boxes[:, 0] < HI_SCALE))[0]
if len(inds_this_scale) == 0:
LO_SCALE += cfg.TEST.GROUP_SCALE
continue
max_rois_this_scale = MAX_ROIS_GPU[scale_iter]
boxes_this_scale = boxes[inds_this_scale, :]
masks_this_scale = masks[inds_this_scale, :, :]
num_iter_this_scale = int(np.ceil(boxes_this_scale.shape[0] / float(max_rois_this_scale)))
# make the batch index of input box start from 0
boxes_this_scale[:, 0] -= min(boxes_this_scale[:, 0])
# re-prepare im blob for this_scale
input_blobs = {}
input_blobs['data'], _ = prep_im_for_blob_cfm(im, cfg.TEST.SCALES[LO_SCALE:HI_SCALE])
input_blobs['data'] = input_blobs['data'].astype(np.float32, copy=False)
input_start = 0
for test_iter in xrange(num_iter_this_scale):
input_end = min(input_start + max_rois_this_scale, boxes_this_scale.shape[0])
input_box = boxes_this_scale[input_start:input_end, :]
input_mask = masks_this_scale[input_start:input_end, :, :]
input_blobs['rois'] = input_box.astype(np.float32, copy=False)
input_blobs['masks'] = input_mask.reshape(input_box.shape[0], 1,
cfg.TEST.CFM_INPUT_MASK_SIZE, cfg.TEST.CFM_INPUT_MASK_SIZE
).astype(np.float32, copy=False)
input_blobs['masks'] = (input_blobs['masks'] >= cfg.BINARIZE_THRESH).astype(np.float32, copy=False)
self.net.blobs['data'].reshape(*input_blobs['data'].shape)
self.net.blobs['rois'].reshape(*input_blobs['rois'].shape)
self.net.blobs['masks'].reshape(*input_blobs['masks'].shape)
blobs_out = self.net.forward(**input_blobs)
output_mask = blobs_out['mask_prob'].copy()
output_score = blobs_out['seg_cls_prob'].copy()
res_masks = np.vstack((res_masks,
output_mask.reshape(
input_box.shape[0], 1, cfg.MASK_SIZE, cfg.MASK_SIZE
).astype(np.float32, copy=False)))
res_seg_scores = np.vstack((res_seg_scores, output_score))
input_start += max_rois_this_scale
res_boxes = np.vstack((res_boxes, orig_boxes[inds_this_scale, :]))
LO_SCALE += cfg.TEST.GROUP_SCALE
return res_masks, res_boxes, res_seg_scores
| 21,633 | 51.13012 | 123 | py |
MNC | MNC-master/lib/caffeWrapper/__init__.py | 0 | 0 | 0 | py |
|
MNC | MNC-master/lib/caffeWrapper/SolverWrapper.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import os
import numpy as np
from utils.timer import Timer
from mnc_config import cfg
from db.roidb import add_bbox_regression_targets, compute_mcg_mean_std
import caffe
from caffe.proto import caffe_pb2
import google.protobuf as pb2
class SolverWrapper(object):
""" A simple wrapper around Caffe's solver.
This wrapper gives us control over he snapshotting process, which we
use to unnormalize the learned bounding-box regression weights.
"""
def __init__(self, solver_prototxt, roidb, maskdb, output_dir, imdb,
pretrained_model=None):
self.output_dir = output_dir
if (cfg.TRAIN.HAS_RPN and cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS):
# RPN can only use precomputed normalization because there are no
# fixed statistics to compute a priori
assert cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED
if cfg.TRAIN.BBOX_REG:
if not cfg.CFM_MODE:
print 'Computing bounding-box regression targets...'
self.bbox_means, self.bbox_stds = add_bbox_regression_targets(roidb)
print 'done'
else:
# Pre-defined mcg bbox_mean and bbox_std
# We store them on disk to avoid disk level IO
# multiple times (mcg boxes are stored on disk)
mean_cache = './data/cache/mcg_bbox_mean.npy'
std_cache = './data/cache/mcg_bbox_std.npy'
roidb_dir = imdb._roidb_path
if os.path.exists(mean_cache) and os.path.exists(std_cache):
self.bbox_means = np.load(mean_cache)
self.bbox_stds = np.load(std_cache)
else:
self.bbox_means, self.bbox_stds = compute_mcg_mean_std(roidb_dir, imdb.num_classes)
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print 'Loading pretrained model weights from {:s}'.format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
if not cfg.CFM_MODE:
self.solver.net.layers[0].set_roidb(roidb)
if cfg.MNC_MODE:
self.solver.net.layers[0].set_maskdb(maskdb)
else:
self.solver.net.layers[0].set_image_info(imdb, self.bbox_means, self.bbox_stds)
def snapshot(self):
""" Take a snapshot of the network after unnormalizing the learned
bounding-box regression weights. This enables easy use at test-time.
"""
net = self.solver.net
# I'm wondering whether I still need to keep it if only faster-RCNN is needed
scale_bbox_params = (cfg.TRAIN.BBOX_REG and
cfg.TRAIN.BBOX_NORMALIZE_TARGETS and
'bbox_pred' in net.params)
if scale_bbox_params:
# save original values
orig_0 = net.params['bbox_pred'][0].data.copy()
orig_1 = net.params['bbox_pred'][1].data.copy()
if cfg.CFM_MODE:
cfm_mean = self.bbox_means.ravel()
cfm_std = self.bbox_stds.ravel()
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data * cfm_std[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data * cfm_std + cfm_mean)
else:
# scale and shift with transform reg unnormalization; then save snapshot
net.params['bbox_pred'][0].data[...] = \
(net.params['bbox_pred'][0].data *
self.bbox_stds[:, np.newaxis])
net.params['bbox_pred'][1].data[...] = \
(net.params['bbox_pred'][1].data *
self.bbox_stds + self.bbox_means)
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
# If we specify an infix in the configuration
infix = ('_' + cfg.TRAIN.SNAPSHOT_INFIX
if cfg.TRAIN.SNAPSHOT_INFIX != '' else '')
filename = (self.solver_param.snapshot_prefix + infix +
'_iter_{:d}'.format(self.solver.iter) + '.caffemodel')
# For snapshot caffemodel, since MNC use shared parameters
# but caffe save parameters according to layer name instead of
# parameter names, its size will exceed 2GB, which make program crash
# Luckily, we may save it to HDF5 to avoid this issues
if not cfg.MNC_MODE:
filename = os.path.join(self.output_dir, filename)
net.save(str(filename))
else:
filename = os.path.join(self.output_dir, filename + '.h5')
net.save_to_hdf5(str(filename), False)
print 'Wrote snapshot to: {:s}'.format(filename)
if scale_bbox_params:
# restore net to original state
net.params['bbox_pred'][0].data[...] = orig_0
net.params['bbox_pred'][1].data[...] = orig_1
def train_model(self, max_iters):
last_snapshot_iter = -1
timer = Timer()
while self.solver.iter < max_iters:
timer.tic()
self.solver.step(1)
timer.toc()
if self.solver.iter % (10 * self.solver_param.display) == 0:
print 'speed: {:.3f}s / iter'.format(timer.average_time)
if self.solver.iter % cfg.TRAIN.SNAPSHOT_ITERS == 0:
last_snapshot_iter = self.solver.iter
self.snapshot()
if last_snapshot_iter != self.solver.iter:
self.snapshot()
| 6,147 | 43.230216 | 103 | py |
MNC | MNC-master/lib/pylayer/proposal_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from mnc_config import cfg
from transform.anchors import generate_anchors
from transform.bbox_transform import clip_boxes, bbox_transform_inv, filter_small_boxes
from nms.nms_wrapper import nms
DEBUG = False
PRINT_GRADIENT = 1
class ProposalLayer(caffe.Layer):
"""
Outputs object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
self._anchors = generate_anchors()
self._num_anchors = self._anchors.shape[0]
self._use_clip = layer_params.get('use_clip', 0)
self._clip_denominator = float(layer_params.get('clip_base', 256))
self._clip_thresh = 1.0 / self._clip_denominator
# rois blob: holds R regions of interest, each is a 5-tuple
# (n, x1, y1, x2, y2) specifying an image batch index n and a
# rectangle (x1, y1, x2, y2)
self._top_name_map = {}
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
# For MNC, we force the output proposals will also be used to train RPN
# this is achieved by passing proposal_index to anchor_target_layer
if str(self.phase) == 'TRAIN':
if cfg.TRAIN.MIX_INDEX:
top[1].reshape(1, 1)
self._top_name_map['proposal_index'] = 1
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate A anchor boxes centered on cell i
# apply predicted transform deltas at cell i to each of the A anchors
# clip predicted boxes to image
# remove predicted boxes with either height or width < threshold
# sort all (proposal, score) pairs by score from highest to lowest
# take top pre_nms_topN proposals before NMS
# apply NMS with threshold 0.7 to remaining proposals
# take after_nms_topN proposals after NMS
# return the top proposals (-> RoIs top, scores top)
assert bottom[0].data.shape[0] == 1, 'Only single item batches are supported'
cfg_key = str(self.phase) # either 'TRAIN' or 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# the first set of _num_anchors channels are bg probs
# the second set are the fg probs, which we want
scores = bottom[0].data[:, self._num_anchors:, :, :]
bbox_deltas = bottom[1].data
im_info = bottom[2].data[0, :]
# 1. Generate proposals from transform deltas and shifted anchors
height, width = scores.shape[-2:]
self._height = height
self._width = width
# Enumerate all shifts
shift_x = np.arange(0, self._width) * self._feat_stride
shift_y = np.arange(0, self._height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
anchors = self._anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
_, keep = clip_boxes(anchors, im_info[:2])
self._anchor_index_before_clip = keep
# Transpose and reshape predicted transform transformations to get them
# into the same order as the anchors:
#
# transform deltas will be (1, 4 * A, H, W) format
# transpose to (1, H, W, 4 * A)
# reshape to (1 * H * W * A, 4) where rows are ordered by (h, w, a)
# in slowest to fastest order
bbox_deltas = bbox_deltas.transpose((0, 2, 3, 1)).reshape((-1, 4))
# Same story for the scores:
#
# scores are (1, A, H, W) format
# transpose to (1, H, W, A)
# reshape to (1 * H * W * A, 1) where rows are ordered by (h, w, a)
scores = scores.transpose((0, 2, 3, 1)).reshape((-1, 1))
# Convert anchors into proposals via transform transformations
proposals = bbox_transform_inv(anchors, bbox_deltas)
# 2. clip predicted boxes to image
proposals, keep = clip_boxes(proposals, im_info[:2])
# Record the cooresponding index before and after clip
# This step doesn't need unmap
# We need it to decide whether do back propagation
self._proposal_index_before_clip = keep
# 3. remove predicted boxes with either height or width < threshold
# (NOTE: convert min_size to input image scale stored in im_info[2])
keep = filter_small_boxes(proposals, min_size * im_info[2])
proposals = proposals[keep, :]
scores = scores[keep]
self._ind_after_filter = keep
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
order = scores.ravel().argsort()[::-1]
if pre_nms_topN > 0:
order = order[:pre_nms_topN]
proposals = proposals[order, :]
scores = scores[order]
self._ind_after_sort = order
# 6. apply nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
keep = nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
# Output rois blob
# Our RPN implementation only supports a single input image, so all
# batch inds are 0
batch_inds = np.zeros((proposals.shape[0], 1), dtype=np.float32)
proposals = np.hstack((batch_inds, proposals.astype(np.float32, copy=False)))
self._proposal_index = keep
blobs = {
'rois': proposals
}
if str(self.phase) == 'TRAIN':
if cfg.TRAIN.MIX_INDEX:
all_rois_index = self._ind_after_filter[self._ind_after_sort[self._proposal_index]].reshape(1, len(keep))
blobs['proposal_index'] = all_rois_index
# Copy data to forward to top layer
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
if propagate_down[1]:
bottom[1].diff.fill(0.0)
# first count only non-zero top gradient to accelerate computing
top_non_zero_ind = np.unique(np.where(abs(top[0].diff[:, :]) > 0)[0])
proposal_index = np.asarray(self._proposal_index)
# unmap indexes to the original scale
unmap_val = self._ind_after_filter[self._ind_after_sort[proposal_index[top_non_zero_ind]]]
# not back propagate gradient if proposals/anchors are out of image boundary
# this is a 0/1 mask so we just multiply them when calculating bottom gradient
weight_out_proposal = np.in1d(unmap_val, self._proposal_index_before_clip)
weight_out_anchor = np.in1d(unmap_val, self._anchor_index_before_clip)
# unmap_val are arranged as (H * W * A) as stated in forward comment
# with A as the fastest dimension (which is different from caffe)
c = unmap_val % self._num_anchors
w = (unmap_val / self._num_anchors) % self._width
h = (unmap_val / self._num_anchors / self._width) % self._height
# width and height should be in feature map scale
anchor_w = (self._anchors[c, 2] - self._anchors[c, 0])
anchor_h = (self._anchors[c, 3] - self._anchors[c, 1])
dfdx1 = top[0].diff[top_non_zero_ind, 1]
dfdy1 = top[0].diff[top_non_zero_ind, 2]
dfdx2 = top[0].diff[top_non_zero_ind, 3]
dfdy2 = top[0].diff[top_non_zero_ind, 4]
dfdxc = dfdx1 + dfdx2
dfdyc = dfdy1 + dfdy2
dfdw = 0.5 * (dfdx2 - dfdx1)
dfdh = 0.5 * (dfdy2 - dfdy1)
bottom[1].diff[0, 4*c, h, w] = \
dfdxc * anchor_w * weight_out_proposal * weight_out_anchor
bottom[1].diff[0, 4*c+1, h, w] = \
dfdyc * anchor_h * weight_out_proposal * weight_out_anchor
bottom[1].diff[0, 4*c+2, h, w] = \
dfdw * np.exp(bottom[1].data[0, 4*c+2, h, w]) * anchor_w * weight_out_proposal * weight_out_anchor
bottom[1].diff[0, 4*c+3, h, w] = \
dfdh * np.exp(bottom[1].data[0, 4*c+3, h, w]) * anchor_h * weight_out_proposal * weight_out_anchor
# if use gradient clip, constraint gradient inside [-thresh, thresh]
if self._use_clip:
bottom[1].diff[0, 4*c, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c, h, w], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[0, 4*c+1, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c+1, h, w], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[0, 4*c+2, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c+2, h, w], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[0, 4*c+3, h, w] = np.minimum(np.maximum(
bottom[1].diff[0, 4*c+3, h, w], -self._clip_thresh), self._clip_thresh)
| 10,386 | 43.965368 | 121 | py |
MNC | MNC-master/lib/pylayer/mnc_data_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import cv2
import numpy as np
import yaml
import caffe
from mnc_config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
class MNCDataLayer(caffe.Layer):
"""
Provide image, image w/h/scale, gt boxes/masks and mask info to upper layers
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
# data blob: holds a batch of N images, each with 3 channels
top[0].reshape(cfg.TRAIN.IMS_PER_BATCH, 3, max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = 0
assert(cfg.TRAIN.HAS_RPN, 'Use RPN for this project')
# Just pseudo setup
top[1].reshape(1, 3)
self._name_to_top_map['im_info'] = 1
top[2].reshape(1, 4)
self._name_to_top_map['gt_boxes'] = 2
if cfg.MNC_MODE:
top[3].reshape(1, 21, 21)
self._name_to_top_map['gt_masks'] = 3
top[4].reshape(1, 3)
self._name_to_top_map['mask_info'] = 4
assert len(top) == len(self._name_to_top_map)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*blob.shape)
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def set_roidb(self, roidb):
"""Set the roidb to be used by this layer during training."""
self._roidb = roidb
self._shuffle_roidb_inds()
def set_maskdb(self, maskdb):
self._maskdb = maskdb
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(inds, (-1, 2))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_image_blob(self, roidb, scale_inds):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = 1 # len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(roidb['image'])
if roidb['flipped']:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_next_minibatch(self):
"""
Return the blobs to be used for the next minibatch.
"""
assert cfg.TRAIN.IMS_PER_BATCH == 1, 'Only single batch forwarding is supported'
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur]
self._cur += 1
roidb = self._roidb[db_inds]
random_scale_inds = np.random.randint(0, high=len(cfg.TRAIN.SCALES), size=1)
im_blob, im_scales = self._get_image_blob(roidb, random_scale_inds)
gt_label = np.where(roidb['gt_classes'] != 0)[0]
gt_boxes = np.hstack((roidb['boxes'][gt_label, :] * im_scales[0],
roidb['gt_classes'][gt_label, np.newaxis])).astype(np.float32)
blobs = {
'data': im_blob,
'gt_boxes': gt_boxes,
'im_info': np.array([[im_blob.shape[2], im_blob.shape[3], im_scales[0]]], dtype=np.float32)
}
if cfg.MNC_MODE:
maskdb = self._maskdb[db_inds]
mask_list = maskdb['gt_masks']
mask_max_x = maskdb['mask_max'][0]
mask_max_y = maskdb['mask_max'][1]
gt_masks = np.zeros((len(mask_list), mask_max_y, mask_max_x))
mask_info = np.zeros((len(mask_list), 2))
for j in xrange(len(mask_list)):
mask = mask_list[j]
mask_x = mask.shape[1]
mask_y = mask.shape[0]
gt_masks[j, 0:mask_y, 0:mask_x] = mask
mask_info[j, 0] = mask_y
mask_info[j, 1] = mask_x
blobs['gt_masks'] = gt_masks
blobs['mask_info'] = mask_info
return blobs
| 5,826 | 37.589404 | 103 | py |
MNC | MNC-master/lib/pylayer/proposal_target_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import yaml
import numpy as np
import numpy.random as npr
from mnc_config import cfg
from transform.bbox_transform import \
bbox_transform, bbox_compute_targets, \
scale_boxes, get_bbox_regression_label
from transform.anchors import generate_anchors
from transform.mask_transform import intersect_mask
from utils.cython_bbox import bbox_overlaps
class ProposalTargetLayer(caffe.Layer):
"""
Assign object detection proposals to ground-truth targets. Produces proposal
classification labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._anchors = generate_anchors()
self._num_anchors = self._anchors.shape[0]
self._num_classes = layer_params['num_classes']
self._bp_all = layer_params.get('bp_all', True)
self._top_name_map = {}
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
top[1].reshape(1, 1)
self._top_name_map['labels'] = 1
top[2].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_targets'] = 2
top[3].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_inside_weights'] = 3
top[4].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_outside_weights'] = 4
# Add mask-related information
if cfg.MNC_MODE:
top[5].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_targets'] = 5
top[6].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_weight'] = 6
top[7].reshape(1, 4)
self._top_name_map['gt_masks_info'] = 7
if cfg.TRAIN.MIX_INDEX:
top[8].reshape(1, 4)
self._top_name_map['fg_inds'] = 8
top[9].reshape(1, 4)
self._top_name_map['bg_inds'] = 9
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
# Proposal ROIs (0, x1, y1, x2, y2) coming from RPN
# (i.e., rpn.proposal_layer.ProposalLayer), or any other source
all_rois = bottom[0].data
# GT boxes (x1, y1, x2, y2, label)
gt_boxes = bottom[1].data
im_info = bottom[2].data[0, :]
im_scale = im_info[2]
# get original masks
if cfg.MNC_MODE:
gt_masks = bottom[3].data
mask_info = bottom[4].data
else:
gt_masks = None
mask_info = None
# Include ground-truth boxes in the set of candidate rois
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
# Sanity check: single batch only
assert np.all(all_rois[:, 0] == 0), \
'Only single item batches are supported'
num_images = 1
rois_per_image = cfg.TRAIN.BATCH_SIZE / num_images
# Sample rois with classification labels and bounding box regression targets
blobs, fg_inds, bg_inds, keep_inds = _sample_rois(
all_rois, gt_boxes, rois_per_image, self._num_classes, gt_masks, im_scale, mask_info)
self._keep_ind = keep_inds if self._bp_all else fg_inds
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
if cfg.TRAIN.MIX_INDEX:
all_rois_index = bottom[5].data
fg_inds = fg_inds[fg_inds < all_rois_index.shape[1]].astype(int)
fg_inds = all_rois_index[0, fg_inds]
bg_inds = all_rois_index[0, bg_inds.astype(int)]
top[self._top_name_map['fg_inds']].reshape(*fg_inds.shape)
top[self._top_name_map['fg_inds']].data[...] = fg_inds
top[self._top_name_map['bg_inds']].reshape(*bg_inds.shape)
top[self._top_name_map['bg_inds']].data[...] = bg_inds
def backward(self, top, propagate_down, bottom):
if propagate_down[0]:
bottom[0].diff.fill(0.)
# Eliminate gt_inds from the keep inds
valid_inds = np.where(self._keep_ind < bottom[0].diff.shape[0])[0]
valid_bot_inds = self._keep_ind[valid_inds].astype(int)
bottom[0].diff[valid_bot_inds, :] = top[0].diff[valid_inds, :]
def _sample_rois(all_rois, gt_boxes, rois_per_image, num_classes, gt_masks, im_scale, mask_info):
"""
Generate a random sample of RoIs comprising
foreground and background examples.
"""
# overlaps: (rois x gt_boxes)
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Sample foreground indexes
fg_inds = []
for i in xrange(len(cfg.TRAIN.FG_FRACTION)):
cur_inds = np.where((max_overlaps >= cfg.TRAIN.FG_THRESH_LO[i]) &
(max_overlaps <= cfg.TRAIN.FG_THRESH_HI[i]))[0]
cur_rois_this_image = min(cur_inds.size, np.round(rois_per_image *
cfg.TRAIN.FG_FRACTION[i]))
if cur_inds.size > 0:
cur_inds = npr.choice(cur_inds, size=cur_rois_this_image, replace=False)
fg_inds = np.hstack((fg_inds, cur_inds))
fg_inds = np.unique(fg_inds)
fg_rois_per_image = fg_inds.size
# Sample background indexes according to number of foreground
bg_rois_per_this_image = rois_per_image - fg_rois_per_image
bg_inds = []
for i in xrange(len(cfg.TRAIN.BG_FRACTION)):
cur_inds = np.where((max_overlaps >= cfg.TRAIN.BG_THRESH_LO[i]) &
(max_overlaps <= cfg.TRAIN.BG_THRESH_HI[i]))[0]
cur_rois_this_image = min(cur_inds.size, np.round(bg_rois_per_this_image *
cfg.TRAIN.BG_FRACTION[i]))
if cur_inds.size > 0:
cur_inds = npr.choice(cur_inds, size=cur_rois_this_image, replace=False)
bg_inds = np.hstack((bg_inds, cur_inds))
bg_inds = np.unique(bg_inds)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds).astype(int)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[fg_rois_per_image:] = 0
rois = all_rois[keep_inds]
bbox_target_data = bbox_compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], normalize=True)
bbox_target_data = np.hstack((labels[:, np.newaxis], bbox_target_data))\
.astype(np.float32, copy=False)
bbox_targets, bbox_inside_weights = get_bbox_regression_label(
bbox_target_data, num_classes)
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
blobs = {
'rois': rois,
'labels': labels,
'bbox_targets': bbox_targets,
'bbox_inside_weights': bbox_inside_weights,
'bbox_outside_weights': bbox_outside_weights
}
if cfg.MNC_MODE:
scaled_rois = rois[:, 1:5] / float(im_scale)
# map to original image space
scaled_gt_boxes = gt_boxes[:, :4] / float(im_scale)
pos_masks = np.zeros((len(keep_inds), 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
top_mask_info = np.zeros((len(keep_inds), 12))
top_mask_info[len(fg_inds):, :] = -1
for i, val in enumerate(fg_inds):
gt_box = scaled_gt_boxes[gt_assignment[val]]
gt_box = np.around(gt_box).astype(int)
ex_box = np.around(scaled_rois[i]).astype(int)
gt_mask = gt_masks[gt_assignment[val]]
gt_mask_info = mask_info[gt_assignment[val]]
gt_mask = gt_mask[0:gt_mask_info[0], 0:gt_mask_info[1]]
# calculate mask regression targets
# (intersection of bounding box and gt mask)
ex_mask = intersect_mask(ex_box, gt_box, gt_mask)
pos_masks[i, ...] = ex_mask
top_mask_info[i, 0] = gt_assignment[val]
top_mask_info[i, 1] = gt_mask_info[0]
top_mask_info[i, 2] = gt_mask_info[1]
top_mask_info[i, 3] = labels[i]
top_mask_info[i, 4:8] = ex_box
top_mask_info[i, 8:12] = gt_box
mask_weight = np.zeros((rois.shape[0], 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
# only assign box-level foreground as positive mask regression
mask_weight[0:len(fg_inds), :, :, :] = 1
blobs['mask_targets'] = pos_masks
blobs['mask_weight'] = mask_weight
blobs['gt_masks_info'] = top_mask_info
return blobs, fg_inds, bg_inds, keep_inds
| 9,255 | 41.654378 | 97 | py |
MNC | MNC-master/lib/pylayer/mask_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import cv2
import numpy as np
from transform.mask_transform import mask_overlap
from mnc_config import cfg
class MaskLayer(caffe.Layer):
"""
This layer Take input from sigmoid predicted masks
Assign each label for segmentation classifier according
to region overlap
"""
def setup(self, bottom, top):
self._phase = str(self.phase)
self._top_name_map = {}
top[0].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_proposal'] = 0
if self._phase == 'TRAIN':
top[1].reshape(1, 1)
self._top_name_map['mask_proposal_label'] = 1
def reshape(self, bottom, top):
"""
Reshaping happens during the call to forward
"""
pass
def forward(self, bottom, top):
if str(self.phase) == 'TRAIN':
blobs = self.forward_train(bottom, top)
elif str(self.phase) == 'TEST':
blobs = self.forward_test(bottom, top)
else:
print 'Unrecognized phase'
raise NotImplementedError
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
if propagate_down[0]:
bottom[0].diff.fill(0.)
top_grad = top[0].diff.reshape(top[0].diff.shape[0], cfg.MASK_SIZE * cfg.MASK_SIZE)
bottom[0].diff[self.pos_sample, :] = top_grad[self.pos_sample, :]
def forward_train(self, bottom, top):
# Take sigmoid prediction as input
mask_pred = bottom[0].data
# get ground truth mask and labels
gt_masks = bottom[1].data
gt_masks_info = bottom[2].data
num_mask_pred = mask_pred.shape[0]
top_label = np.zeros((gt_masks_info.shape[0], 1))
# 2. Calculate region overlap
# Since the target gt mask may have different size
# We need to resize predicted masks into different sizes
mask_size = cfg.MASK_SIZE
for i in xrange(num_mask_pred):
# if the bounding box is itself background
if gt_masks_info[i][0] == -1:
top_label[i][0] = 0
continue
else:
info = gt_masks_info[i]
gt_mask = gt_masks[info[0]][0:info[1], 0:info[2]]
ex_mask = mask_pred[i].reshape((mask_size, mask_size))
ex_box = np.round(info[4:8]).astype(int)
gt_box = np.round(info[8:12]).astype(int)
# resize to large gt_masks, note cv2.resize is column first
ex_mask = cv2.resize(ex_mask.astype(np.float32), (ex_box[2] - ex_box[0] + 1,
ex_box[3] - ex_box[1] + 1))
ex_mask = ex_mask >= cfg.BINARIZE_THRESH
top_label[i][0] = 0 if mask_overlap(ex_box, gt_box, ex_mask, gt_mask) < cfg.TRAIN.FG_SEG_THRESH else info[3]
# output continuous mask for MNC
resized_mask_pred = mask_pred.reshape((num_mask_pred, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
self.pos_sample = np.where(top_label > 0)[0]
blobs = {
'mask_proposal': resized_mask_pred,
'mask_proposal_label': top_label
}
return blobs
def forward_test(self, bottom, top):
mask_pred = bottom[0].data
num_mask_pred = mask_pred.shape[0]
resized_mask_pred = mask_pred.reshape((num_mask_pred, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
blobs = {
'mask_proposal': resized_mask_pred
}
return blobs
| 3,988 | 37.728155 | 124 | py |
MNC | MNC-master/lib/pylayer/stage_bridge_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import caffe
import numpy as np
import yaml
from transform.bbox_transform import \
bbox_transform_inv, bbox_compute_targets, \
clip_boxes, get_bbox_regression_label
from transform.mask_transform import intersect_mask
from mnc_config import cfg
from utils.cython_bbox import bbox_overlaps
class StageBridgeLayer(caffe.Layer):
"""
This layer take input from bounding box prediction
and output a set of new rois after applying transformation
It will also provide mask/bbox regression targets
during training phase
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
# bottom 0 is ~ n ROIs to train Fast RCNN
# bottom 1 is ~ n * 4(1+c) bbox prediction
# bottom 2 is ~ n * (1+c) bbox scores (seg classification)
self._phase = str(self.phase)
if self._phase == 'TRAIN':
self._use_clip = layer_params['use_clip']
self._clip_denominator = float(layer_params.get('clip_base', 64))
self._clip_thresh = 1.0 / self._clip_denominator
self._feat_stride = layer_params['feat_stride']
self._num_classes = layer_params['num_classes']
# meaning of top blobs speak for themselves
self._top_name_map = {}
if self._phase == 'TRAIN':
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
top[1].reshape(1, 1)
self._top_name_map['labels'] = 1
top[2].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_targets'] = 2
top[3].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._top_name_map['mask_weight'] = 3
top[4].reshape(1, 4)
self._top_name_map['gt_mask_info'] = 4
top[5].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_targets'] = 5
top[6].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_inside_weights'] = 6
top[7].reshape(1, self._num_classes * 4)
self._top_name_map['bbox_outside_weights'] = 7
elif self._phase == 'TEST':
top[0].reshape(1, 5)
self._top_name_map['rois'] = 0
else:
print 'Unrecognized phase'
raise NotImplementedError
def reshape(self, bottom, top):
# reshape happens during forward
pass
def forward(self, bottom, top):
if str(self.phase) == 'TRAIN':
blobs = self.forward_train(bottom, top)
elif str(self.phase) == 'TEST':
blobs = self.forward_test(bottom, top)
else:
print 'Unrecognized phase'
raise NotImplementedError
for blob_name, blob in blobs.iteritems():
top[self._top_name_map[blob_name]].reshape(*blob.shape)
top[self._top_name_map[blob_name]].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""
Description:
We need to implement bp for 2 bottoms:
The top diff is x_new, y_new, w_new, h_new
"""
deltas = bottom[1].data
dfdxc = top[0].diff[:, 1]
dfdyc = top[0].diff[:, 2]
dfdw = top[0].diff[:, 3]
dfdh = top[0].diff[:, 4]
W_old = bottom[0].data[:, 2] - bottom[0].data[:, 0]
H_old = bottom[0].data[:, 3] - bottom[0].data[:, 1]
if propagate_down[0]:
bottom[0].diff.fill(0.)
for ind, i in enumerate(self._keep_inds):
if i >= bottom[0].diff.shape[0] or self._bbox_reg_labels[i] == 0:
continue
delta_x = deltas[i, 4*self._bbox_reg_labels[i]]
delta_y = deltas[i, 4*self._bbox_reg_labels[i]+1]
delta_w = deltas[i, 4*self._bbox_reg_labels[i]+2]
delta_h = deltas[i, 4*self._bbox_reg_labels[i]+3]
bottom[0].diff[i, 1] = dfdxc[ind]
bottom[0].diff[i, 2] = dfdyc[ind]
bottom[0].diff[i, 3] = dfdw[ind] * (delta_x + np.exp(delta_w))
bottom[0].diff[i, 4] = dfdh[ind] * (delta_y + np.exp(delta_h))
if propagate_down[1]:
bottom[1].diff.fill(0.)
for ind, i in enumerate(self._keep_inds):
if i >= bottom[1].diff.shape[0] or i not in self._clip_keep or self._bbox_reg_labels[i] == 0:
continue
delta_w = deltas[i, 4*self._bbox_reg_labels[i]+2]
delta_h = deltas[i, 4*self._bbox_reg_labels[i]+3]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]] = dfdxc[ind] * W_old[i]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+1] = dfdyc[ind] * H_old[i]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+2] = dfdw[ind] * np.exp(delta_w) * W_old[i]
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+3] = dfdh[ind] * np.exp(delta_h) * H_old[i]
if self._use_clip:
bottom[1].diff[i, 4*self._bbox_reg_labels[i]] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+1] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+1], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+2] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+2], -self._clip_thresh), self._clip_thresh)
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+3] = np.minimum(np.maximum(
bottom[1].diff[i, 4*self._bbox_reg_labels[i]+3], -self._clip_thresh), self._clip_thresh)
def forward_train(self, bottom, top):
"""
During forward, we need to do several things:
1. Apply bounding box regression output which has highest
classification score to proposed ROIs
2. Sample ROIs based on there current overlaps, assign labels
on them
3. Make mask regression targets and positive/negative weights,
just like the proposal_target_layer
"""
rois = bottom[0].data
bbox_deltas = bottom[1].data
# Apply bounding box regression according to maximum segmentation score
seg_scores = bottom[2].data
self._bbox_reg_labels = seg_scores[:, 1:].argmax(axis=1) + 1
gt_boxes = bottom[3].data
gt_masks = bottom[4].data
im_info = bottom[5].data[0, :]
mask_info = bottom[6].data
# select bbox_deltas according to
artificial_deltas = np.zeros((rois.shape[0], 4))
for i in xrange(rois.shape[0]):
artificial_deltas[i, :] = bbox_deltas[i, 4*self._bbox_reg_labels[i]:4*(self._bbox_reg_labels[i]+1)]
artificial_deltas[self._bbox_reg_labels == 0, :] = 0
all_rois = np.zeros((rois.shape[0], 5))
all_rois[:, 0] = 0
all_rois[:, 1:5] = bbox_transform_inv(rois[:, 1:5], artificial_deltas)
zeros = np.zeros((gt_boxes.shape[0], 1), dtype=gt_boxes.dtype)
all_rois = np.vstack(
(all_rois, np.hstack((zeros, gt_boxes[:, :-1])))
)
all_rois[:, 1:5], self._clip_keep = clip_boxes(all_rois[:, 1:5], im_info[:2])
labels, rois_out, fg_inds, keep_inds, mask_targets, top_mask_info, bbox_targets, bbox_inside_weights = \
self._sample_output(all_rois, gt_boxes, im_info[2], gt_masks, mask_info)
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
self._keep_inds = keep_inds
mask_weight = np.zeros((rois_out.shape[0], 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
mask_weight[0:len(fg_inds), :, :, :] = 1
blobs = {
'rois': rois_out,
'labels': labels,
'mask_targets': mask_targets,
'mask_weight': mask_weight,
'gt_mask_info': top_mask_info,
'bbox_targets': bbox_targets,
'bbox_inside_weights': bbox_inside_weights,
'bbox_outside_weights': bbox_outside_weights
}
return blobs
def _sample_output(self, all_rois, gt_boxes, im_scale, gt_masks, mask_info):
overlaps = bbox_overlaps(
np.ascontiguousarray(all_rois[:, 1:5], dtype=np.float),
np.ascontiguousarray(gt_boxes[:, :4], dtype=np.float))
gt_assignment = overlaps.argmax(axis=1)
max_overlaps = overlaps.max(axis=1)
labels = gt_boxes[gt_assignment, 4]
# Sample foreground indexes
fg_inds = np.where(max_overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
bg_inds = np.where(max_overlaps < cfg.TRAIN.BBOX_THRESH)[0]
keep_inds = np.append(fg_inds, bg_inds).astype(int)
# Select sampled values from various arrays:
labels = labels[keep_inds]
# Clamp labels for the background RoIs to 0
labels[len(fg_inds):] = 0
rois = all_rois[keep_inds]
bbox_target_data = bbox_compute_targets(
rois[:, 1:5], gt_boxes[gt_assignment[keep_inds], :4], normalize=True)
bbox_target_data = np.hstack((labels[:, np.newaxis], bbox_target_data))\
.astype(np.float32, copy=False)
bbox_targets, bbox_inside_weights = get_bbox_regression_label(
bbox_target_data, self._num_classes)
scaled_rois = rois[:, 1:5] / float(im_scale)
scaled_gt_boxes = gt_boxes[:, :4] / float(im_scale)
pos_masks = np.zeros((len(keep_inds), 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
top_mask_info = np.zeros((len(keep_inds), 12))
top_mask_info[len(fg_inds):, :] = -1
for i, val in enumerate(fg_inds):
gt_box = scaled_gt_boxes[gt_assignment[val]]
gt_box = np.around(gt_box).astype(int)
ex_box = np.around(scaled_rois[i]).astype(int)
gt_mask = gt_masks[gt_assignment[val]]
gt_mask_info = mask_info[gt_assignment[val]]
gt_mask = gt_mask[0:gt_mask_info[0], 0:gt_mask_info[1]]
# regression targets is the intersection of bounding box and gt mask
ex_mask = intersect_mask(ex_box, gt_box, gt_mask)
pos_masks[i, ...] = ex_mask
top_mask_info[i, 0] = gt_assignment[val]
top_mask_info[i, 1] = gt_mask_info[0]
top_mask_info[i, 2] = gt_mask_info[1]
top_mask_info[i, 3] = labels[i]
top_mask_info[i, 4:8] = ex_box
top_mask_info[i, 8:12] = gt_box
return labels, rois, fg_inds, keep_inds, pos_masks, top_mask_info, bbox_targets, bbox_inside_weights
def forward_test(self, bottom, top):
rois = bottom[0].data
bbox_deltas = bottom[1].data
# get ~ n * 4(1+c) new rois
all_rois = bbox_transform_inv(rois[:, 1:5], bbox_deltas)
scores = bottom[2].data
im_info = bottom[3].data
# get highest scored category's bounding box regressor
score_max = scores.argmax(axis=1)
rois_out = np.zeros((rois.shape[0], 5))
# Single batch training
rois_out[:, 0] = 0
for i in xrange(len(score_max)):
rois_out[i, 1:5] = all_rois[i, 4*score_max[i]:4*(score_max[i]+1)]
rois_out[:, 1:5], _ = clip_boxes(rois_out[:, 1:5], im_info[0, :2])
blobs = {
'rois': rois_out
}
return blobs
| 11,685 | 44.648438 | 112 | py |
MNC | MNC-master/lib/pylayer/anchor_target_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import yaml
import numpy as np
import caffe
from transform.anchors import generate_anchors
from utils.cython_bbox import bbox_overlaps
from utils.unmap import unmap
from mnc_config import cfg
from transform.bbox_transform import bbox_transform
class AnchorTargetLayer(caffe.Layer):
"""
Assign anchors to ground-truth targets. Produces anchor classification
labels and bounding-box regression targets.
"""
def setup(self, bottom, top):
self._anchors = generate_anchors()
self._num_anchors = self._anchors.shape[0]
layer_params = yaml.load(self.param_str_)
self._feat_stride = layer_params['feat_stride']
# allow boxes to sit over the edge by a small amount
self._allowed_border = layer_params.get('allowed_border', 0)
height, width = bottom[0].data.shape[-2:]
A = self._num_anchors
# labels
top[0].reshape(1, 1, A * height, width)
# bbox_targets
top[1].reshape(1, A * 4, height, width)
# bbox_inside_weights
top[2].reshape(1, A * 4, height, width)
# bbox_outside_weights
top[3].reshape(1, A * 4, height, width)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward"""
pass
def forward(self, bottom, top):
# Algorithm:
#
# for each (H, W) location i
# generate 9 anchor boxes centered on cell i
# apply predicted transform deltas at cell i to each of the 9 anchors
# filter out-of-image anchors
# measure GT overlap
#
# Output target referenced value
height, width = bottom[0].data.shape[-2:]
assert bottom[0].data.shape[0] == 1, 'Only single item batches are supported'
gt_boxes = bottom[1].data
im_info = bottom[2].data[0, :]
# 1. Generate proposals from shifted anchors
# note: unlike proposal layer, in this stage, no deltas involved
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
A = self._num_anchors
K = shifts.shape[0]
all_anchors = (self._anchors.reshape((1, A, 4)) +
shifts.reshape((1, K, 4)).transpose((1, 0, 2)))
all_anchors = all_anchors.reshape((K * A, 4))
total_anchors = int(K * A)
# only keep anchors inside the image
inds_inside = np.where(
(all_anchors[:, 0] >= -self._allowed_border) &
(all_anchors[:, 1] >= -self._allowed_border) &
(all_anchors[:, 2] < im_info[1] + self._allowed_border) & # width
(all_anchors[:, 3] < im_info[0] + self._allowed_border) # height
)[0]
# 2. For each anchor, we assign positive or negative
anchors = all_anchors[inds_inside, :]
# label: 1 is positive, 0 is negative, -1 is don't care
labels = np.empty((len(inds_inside), ), dtype=np.float32)
labels.fill(-1)
# overlaps between the anchors and the gt boxes
# overlaps (ex, gt)
overlaps = bbox_overlaps(
np.ascontiguousarray(anchors, dtype=np.float),
np.ascontiguousarray(gt_boxes, dtype=np.float))
argmax_overlaps = overlaps.argmax(axis=1)
max_overlaps = overlaps[np.arange(len(inds_inside)), argmax_overlaps]
gt_argmax_overlaps = overlaps.argmax(axis=0)
gt_max_overlaps = overlaps[gt_argmax_overlaps,
np.arange(overlaps.shape[1])]
gt_argmax_overlaps = np.where(overlaps == gt_max_overlaps)[0]
if not cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels first so that positive labels can clobber them
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
# We assign two types of anchors as positve
# fg label: for each gt, anchor with highest overlap
labels[gt_argmax_overlaps] = 1
# fg label: above threshold IOU
labels[max_overlaps >= cfg.TRAIN.RPN_POSITIVE_OVERLAP] = 1
if cfg.TRAIN.RPN_CLOBBER_POSITIVES:
# assign bg labels last so that negative labels can clobber positives
labels[max_overlaps < cfg.TRAIN.RPN_NEGATIVE_OVERLAP] = 0
num_fg = int(cfg.TRAIN.RPN_FG_FRACTION * cfg.TRAIN.RPN_BATCHSIZE)
fg_inds = np.where(labels == 1)[0]
if len(fg_inds) > num_fg:
disable_inds = np.random.choice(
fg_inds, size=(len(fg_inds) - num_fg), replace=False)
labels[disable_inds] = -1
# subsample negative labels if we have too many
num_bg = cfg.TRAIN.RPN_BATCHSIZE - np.sum(labels == 1)
bg_inds = np.where(labels == 0)[0]
if len(bg_inds) > num_bg:
disable_inds = np.random.choice(
bg_inds, size=(len(bg_inds) - num_bg), replace=False)
labels[disable_inds] = -1
if cfg.TRAIN.MIX_INDEX:
bottom_fg = bottom[3].data
bottom_bg = bottom[4].data
unmapped_fg_ind = []
unmapped_bg_ind = []
for i in list(bottom_fg):
zal = np.where(i == inds_inside)[0]
if len(zal) > 0:
unmapped_fg_ind.append(zal[0])
for i in list(bottom_bg):
zal = np.where(i == inds_inside)[0]
if len(zal) > 0:
unmapped_bg_ind.append(zal[0])
labels[unmapped_bg_ind] = 0
labels[unmapped_fg_ind] = 1
bbox_targets = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_targets = _compute_targets(anchors, gt_boxes[argmax_overlaps, :])
bbox_inside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
bbox_inside_weights[labels == 1, :] = np.array(cfg.TRAIN.RPN_BBOX_INSIDE_WEIGHTS)
bbox_outside_weights = np.zeros((len(inds_inside), 4), dtype=np.float32)
if cfg.TRAIN.RPN_POSITIVE_WEIGHT < 0:
# uniform weighting of examples (given non-uniform sampling)
num_examples = np.sum(labels >= 0)
positive_weights = np.ones((1, 4)) * 1.0 / num_examples
negative_weights = np.ones((1, 4)) * 1.0 / num_examples
else:
assert ((cfg.TRAIN.RPN_POSITIVE_WEIGHT > 0) &
(cfg.TRAIN.RPN_POSITIVE_WEIGHT < 1))
positive_weights = (cfg.TRAIN.RPN_POSITIVE_WEIGHT /
np.sum(labels == 1))
negative_weights = ((1.0 - cfg.TRAIN.RPN_POSITIVE_WEIGHT) /
np.sum(labels == 0))
bbox_outside_weights[labels == 1, :] = positive_weights
bbox_outside_weights[labels == 0, :] = negative_weights
# Currently all the indices are in the clipped index space
# we map up to original set of anchors
# In this process, we need to set clipped boxes as label -1, weights 0
labels = unmap(labels, total_anchors, inds_inside, fill=-1)
bbox_targets = unmap(bbox_targets, total_anchors, inds_inside, fill=0)
bbox_inside_weights = unmap(bbox_inside_weights, total_anchors, inds_inside, fill=0)
bbox_outside_weights = unmap(bbox_outside_weights, total_anchors, inds_inside, fill=0)
# labels
labels = labels.reshape((1, height, width, A)).transpose(0, 3, 1, 2)
labels = labels.reshape((1, 1, A * height, width))
top[0].reshape(*labels.shape)
top[0].data[...] = labels
# bbox_targets
bbox_targets = bbox_targets \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
top[1].reshape(*bbox_targets.shape)
top[1].data[...] = bbox_targets
# bbox_inside_weights
bbox_inside_weights = bbox_inside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_inside_weights.shape[2] == height
assert bbox_inside_weights.shape[3] == width
top[2].reshape(*bbox_inside_weights.shape)
top[2].data[...] = bbox_inside_weights
# bbox_outside_weights
bbox_outside_weights = bbox_outside_weights \
.reshape((1, height, width, A * 4)).transpose(0, 3, 1, 2)
assert bbox_outside_weights.shape[2] == height
assert bbox_outside_weights.shape[3] == width
top[3].reshape(*bbox_outside_weights.shape)
top[3].data[...] = bbox_outside_weights
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def _compute_targets(ex_rois, gt_rois):
"""
Compute bounding-box regression targets for an image.
Parameters:
-----------
ex_rois: ROIs from external source (selective search or RPN)
gt_rois: ground truth rois
Returns:
---------
The correct relative value for this anchor (combined when generate proposal)
"""
assert ex_rois.shape[0] == gt_rois.shape[0]
assert ex_rois.shape[1] == 4
assert gt_rois.shape[1] == 5
return bbox_transform(ex_rois, gt_rois[:, :4]).astype(np.float32, copy=False) | 9,757 | 40.879828 | 94 | py |
MNC | MNC-master/lib/pylayer/cfm_data_layer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import cv2
import yaml
import scipy
import numpy as np
import numpy.random as npr
import caffe
from mnc_config import cfg
from utils.blob import prep_im_for_blob, im_list_to_blob
from transform.bbox_transform import get_bbox_regression_label, bbox_compute_targets
class CFMDataLayer(caffe.Layer):
"""
Provide image, image w/h/scale, gt boxes/masks and mask info to upper layers
"""
def setup(self, bottom, top):
layer_params = yaml.load(self.param_str_)
self._num_classes = layer_params['num_classes']
self._name_to_top_map = {}
self.input_mz = cfg.TEST.CFM_INPUT_MASK_SIZE
# For CFM architect, we have nine entries since there is no intermediate layer
top[0].reshape(cfg.TRAIN.IMS_PER_BATCH, 3, max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE)
self._name_to_top_map['data'] = 0
top[1].reshape(1, 4)
self._name_to_top_map['rois'] = 1
top[2].reshape(1, 1, self.input_mz, self.input_mz)
self._name_to_top_map['masks'] = 2
top[3].reshape(1, 1)
self._name_to_top_map['box_label'] = 3
top[4].reshape(1, 1)
self._name_to_top_map['mask_label'] = 4
top[5].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_targets'] = 5
top[6].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._name_to_top_map['mask_targets'] = 6
top[7].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_inside_weights'] = 7
top[8].reshape(1, self._num_classes * 4)
self._name_to_top_map['bbox_outside_weights'] = 8
top[9].reshape(1, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)
self._name_to_top_map['mask_weight'] = 9
assert len(top) == len(self._name_to_top_map)
def reshape(self, bottom, top):
"""Reshaping happens during the call to forward."""
pass
def forward(self, bottom, top):
"""Get blobs and copy them into this layer's top blob vector."""
blobs = self._get_next_minibatch()
for blob_name, blob in blobs.iteritems():
top_ind = self._name_to_top_map[blob_name]
# Reshape net's input blobs
top[top_ind].reshape(*blob.shape)
# Copy data into net's input blobs
top[top_ind].data[...] = blob.astype(np.float32, copy=False)
def backward(self, top, propagate_down, bottom):
"""This layer does not propagate gradients."""
pass
def set_image_info(self, imdb, mean, std):
self.imdb = imdb
self._mean = mean
self._std = std
self._shuffle_roidb_inds()
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb."""
if cfg.TRAIN.ASPECT_GROUPING:
import PIL
num_images = len(self.imdb.image_index)
width_r = [PIL.Image.open(self.imdb.image_path_at(i)).size[0] for i in xrange(num_images)]
height_r = [PIL.Image.open(self.imdb.image_path_at(i)).size[0] for i in xrange(num_images)]
widths = np.array([width_r[i] for i in xrange(len(width_r))])
heights = np.array([height_r[i] for i in xrange(len(height_r))])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
inds = np.hstack((
np.random.permutation(horz_inds),
np.random.permutation(vert_inds)))
inds = np.reshape(np.hstack((inds, inds+num_images)), (-1, 2))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1,))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._cur = 0
def _get_image_blob(self, roidb, scale_inds, im_names):
"""Builds an input blob from the images in the roidb at the specified
scales.
"""
num_images = len(roidb)
processed_ims = []
im_scales = []
for i in xrange(num_images):
im = cv2.imread(im_names[i])
# here [0][0] is due to the nature of scipy.io.savemat
# since it will change True/False to [[1]] or [[0]] with shape (1,1)
# so we judge whether flip image in this un-normal way
if roidb[i]['Flip'][0][0]:
im = im[:, ::-1, :]
target_size = cfg.TRAIN.SCALES[scale_inds[i]]
im, im_scale = prep_im_for_blob(im, cfg.PIXEL_MEANS, target_size,
cfg.TRAIN.MAX_SIZE)
im_scales.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, im_scales
def _get_next_minibatch(self):
"""
Return the blobs to be used for the next minibatch.
"""
if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._perm):
self._shuffle_roidb_inds()
db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH]
self._cur += cfg.TRAIN.IMS_PER_BATCH
total_imgs = self.imdb.num_images
roidbs = []
img_names = []
for db_ind in list(db_inds):
cache_dir = self.imdb.roidb_path_at(db_ind)
roidb = scipy.io.loadmat(cache_dir)
roidbs.append(roidb)
img_names.append(self.imdb.image_path_at(db_ind % total_imgs))
blobs = self._sample_blobs(roidbs, img_names)
return blobs
def _sample_blobs(self, roidbs, img_names):
random_scale_inds = np.random.randint(0, high=len(cfg.TRAIN.SCALES), size=cfg.TRAIN.IMS_PER_BATCH)
im_blob, im_scales = self._get_image_blob(roidbs, random_scale_inds, img_names)
rois_per_img = cfg.TRAIN.BATCH_SIZE / cfg.TRAIN.IMS_PER_BATCH
rois_blob = np.zeros((0, 5), dtype=np.float32)
masks_blob = np.zeros((0, 1, self.input_mz, self.input_mz))
box_labels_blob = np.zeros((0, 1))
mask_labels_blob = np.zeros((0, 1))
bbox_targets_blob = np.zeros((0, self._num_classes * 4))
mask_targets_blob = np.zeros((0, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
bbox_inside_weights_blob = np.zeros((0, self._num_classes * 4))
bbox_outside_weights_blob = np.zeros((0, self._num_classes * 4))
mask_weights_blob = np.zeros((0, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
for im_i, roidb in enumerate(roidbs):
# Sample positive/negative using box-level overlap
det_overlap = roidb['det_overlap']
num_gt = len(roidb['gt_classes'])
fg_det_inds = np.where(det_overlap >= cfg.TRAIN.FG_DET_THRESH)
keep_inds = []
for i in xrange(len(cfg.TRAIN.FRACTION_SAMPLE)):
cur_keep_inds = np.where((det_overlap >= cfg.TRAIN.THRESH_LO_SAMPLE[i]) &
(det_overlap <= cfg.TRAIN.THRESH_HI_SAMPLE[i]))[0]
cur_rois_this_image = np.round(rois_per_img * cfg.TRAIN.FRACTION_SAMPLE[i])
cur_rois_this_image = min(cur_rois_this_image, len(cur_keep_inds))
if cur_keep_inds.size > 0:
cur_keep_inds = npr.choice(cur_keep_inds, size=cur_rois_this_image, replace=False)
if i == 0:
keep_inds = cur_keep_inds
else:
keep_inds = np.unique(np.hstack((keep_inds, cur_keep_inds)))
fg_inds_det = keep_inds[np.in1d(keep_inds, fg_det_inds)]
bg_inds_det = keep_inds[np.in1d(keep_inds, fg_det_inds, invert=True)]
keep_inds = np.append(fg_inds_det, bg_inds_det).astype(int)
# Assign box-level label and mask-level label
input_box_labels = roidb['output_label'][keep_inds]
# input_box_labels[len(fg_inds_det):] = 0
input_box_labels[len(fg_inds_det):] = 0
seg_overlap = roidb['seg_overlap'][keep_inds]
bg_inds_seg = np.where(seg_overlap < cfg.TRAIN.FG_SEG_THRESH)[0]
input_mask_labels = input_box_labels.copy()
input_mask_labels[bg_inds_seg] = 0
gt_classes = roidb['gt_classes']
input_masks = roidb['masks'][keep_inds, :, :]
input_boxes = roidb['boxes'][keep_inds, :] * im_scales[im_i]
mask_target = roidb['mask_targets']
mask_target = mask_target[keep_inds, :, :]
mask_resize = np.zeros((input_masks.shape[0], self.input_mz, self.input_mz))
for i in xrange(mask_target.shape[0]):
mask_resize[i, :, :] = cv2.resize(input_masks[i, :, :].astype(np.float), (self.input_mz, self.input_mz))
mask_resize = mask_resize >= cfg.BINARIZE_THRESH
mask_target_weights = np.zeros(mask_target.shape)
mask_target_weights[0:len(fg_inds_det), :, :] = 1
gt_boxes = roidb['boxes'][0:num_gt, :] * im_scales[im_i]
gt_assignment = roidb['gt_assignment'][:, 0]
bbox_target_data = bbox_compute_targets(input_boxes, gt_boxes[gt_assignment[keep_inds], :4], False)
# normalize targets
bbox_target_data = np.hstack((input_box_labels, bbox_target_data))\
.astype(np.float32, copy=False)
bbox_targets, bbox_inside_weights = get_bbox_regression_label(
bbox_target_data, self._num_classes)
for i in xrange(len(fg_inds_det)):
cls = gt_classes[gt_assignment[fg_inds_det[i]]][0]
if cls == 0:
continue
mean = self._mean
std = self._std
bbox_targets[i, cls*4:cls*4+4] -= mean[cls, :]
bbox_targets[i, cls*4:cls*4+4] /= std[cls, :]
bbox_outside_weights = np.array(bbox_inside_weights > 0).astype(np.float32)
input_boxes = np.hstack((im_i * np.ones((input_boxes.shape[0], 1)), input_boxes))
bz = input_boxes.shape[0]
rois_blob = np.vstack((rois_blob, input_boxes))
masks_blob = np.concatenate((masks_blob,
mask_resize.reshape(bz, 1, self.input_mz, self.input_mz)), axis=0)
box_labels_blob = np.concatenate((box_labels_blob, input_box_labels), axis=0)
mask_labels_blob = np.concatenate((mask_labels_blob, input_mask_labels), axis=0)
bbox_targets_blob = np.concatenate((bbox_targets_blob, bbox_targets), axis=0)
mask_targets_blob = np.concatenate((mask_targets_blob,
mask_target.reshape(bz, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)), axis=0)
bbox_inside_weights_blob = np.concatenate((bbox_inside_weights_blob, bbox_inside_weights), axis=0)
bbox_outside_weights_blob = np.concatenate((bbox_outside_weights_blob, bbox_outside_weights), axis=0)
mask_weights_blob = np.concatenate((mask_weights_blob,
mask_target_weights.reshape(bz, 1, cfg.MASK_SIZE, cfg.MASK_SIZE)), axis=0)
return {
'data': im_blob,
'rois': rois_blob,
'masks': masks_blob,
'box_label': box_labels_blob,
'mask_label': mask_labels_blob,
'bbox_targets': bbox_targets_blob,
'mask_targets': mask_targets_blob,
'bbox_inside_weights': bbox_inside_weights_blob,
'bbox_outside_weights': bbox_outside_weights_blob,
'mask_weight': mask_weights_blob
}
| 11,892 | 44.39313 | 122 | py |
MNC | MNC-master/lib/pylayer/__init__.py | 0 | 0 | 0 | py |
|
MNC | MNC-master/lib/utils/timer.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
| 1,016 | 28.911765 | 77 | py |
MNC | MNC-master/lib/utils/voc_eval.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import xml.etree.ElementTree as ET
import os
import cPickle
import numpy as np
import cv2
import scipy.io as sio
from transform.mask_transform import mask_overlap
from mnc_config import cfg
def voc_ap(rec, prec, use_07_metric=False):
"""
Compute VOC AP given precision and recall. If use_07_metric is true, uses the
VOC 07 11 point method (default:False).
Args:
rec: recall
prec: precision
use_07_metric:
Returns:
ap: average precision
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_eval(detpath,
annopath,
imagesetfile,
classname,
cachedir,
ovthresh=0.5,
use_07_metric=False):
"""rec, prec, ap = voc_eval(detpath,
annopath,
imagesetfile,
classname,
[ovthresh],
[use_07_metric])
Top level function that does the PASCAL VOC evaluation.
detpath: Path to detections
detpath.format(classname) should produce the detection results file.
annopath: Path to annotations
annopath.format(imagename) should be the xml annotations file.
imagesetfile: Text file containing the list of images, one image per line.
classname: Category name (duh)
cachedir: Directory for caching the annotations
[ovthresh]: Overlap threshold (default = 0.5)
[use_07_metric]: Whether to use VOC07's 11 point AP computation
(default False)
"""
# assumes detections are in detpath.format(classname)
# assumes annotations are in annopath.format(imagename)
# assumes imagesetfile is a text file with each line an image name
# cachedir caches the annotations in a pickle file
# first load gt
if not os.path.isdir(cachedir):
os.mkdir(cachedir)
cachefile = os.path.join(cachedir, 'annots.pkl')
# read list of images
with open(imagesetfile, 'r') as f:
lines = f.readlines()
imagenames = [x.strip() for x in lines]
if not os.path.isfile(cachefile):
# load annots
recs = {}
for i, imagename in enumerate(imagenames):
recs[imagename] = parse_rec(annopath.format(imagename))
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(
i + 1, len(imagenames))
# save
print 'Saving cached annotations to {:s}'.format(cachefile)
with open(cachefile, 'w') as f:
cPickle.dump(recs, f)
else:
# load
with open(cachefile, 'r') as f:
recs = cPickle.load(f)
# extract gt objects for this class
class_recs = {}
npos = 0
for imagename in imagenames:
R = [obj for obj in recs[imagename] if obj['name'] == classname]
bbox = np.array([x['bbox'] for x in R])
difficult = np.array([x['difficult'] for x in R]).astype(np.bool)
det = [False] * len(R)
npos = npos + sum(~difficult)
class_recs[imagename] = {'bbox': bbox,
'difficult': difficult,
'det': det}
# read dets
detfile = detpath.format(classname)
with open(detfile, 'r') as f:
lines = f.readlines()
splitlines = [x.strip().split(' ') for x in lines]
image_ids = [x[0] for x in splitlines]
confidence = np.array([float(x[1]) for x in splitlines])
BB = np.array([[float(z) for z in x[2:]] for x in splitlines])
# sort by confidence
sorted_ind = np.argsort(-confidence)
sorted_scores = np.sort(-confidence)
BB = BB[sorted_ind, :]
image_ids = [image_ids[x] for x in sorted_ind]
# go down dets and mark TPs and FPs
nd = len(image_ids)
tp = np.zeros(nd)
fp = np.zeros(nd)
for d in range(nd):
R = class_recs[image_ids[d]]
bb = BB[d, :].astype(float)
ovmax = -np.inf
BBGT = R['bbox'].astype(float)
if BBGT.size > 0:
# compute overlaps
# intersection
ixmin = np.maximum(BBGT[:, 0], bb[0])
iymin = np.maximum(BBGT[:, 1], bb[1])
ixmax = np.minimum(BBGT[:, 2], bb[2])
iymax = np.minimum(BBGT[:, 3], bb[3])
iw = np.maximum(ixmax - ixmin + 1., 0.)
ih = np.maximum(iymax - iymin + 1., 0.)
inters = iw * ih
# union
uni = ((bb[2] - bb[0] + 1.) * (bb[3] - bb[1] + 1.) +
(BBGT[:, 2] - BBGT[:, 0] + 1.) *
(BBGT[:, 3] - BBGT[:, 1] + 1.) - inters)
overlaps = inters / uni
ovmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if ovmax > ovthresh:
if not R['difficult'][jmax]:
if not R['det'][jmax]:
tp[d] = 1.
R['det'][jmax] = 1
else:
fp[d] = 1.
else:
fp[d] = 1.
# compute precision recall
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(npos)
prec = tp / (tp + fp)
ap = voc_ap(rec, prec, use_07_metric)
return rec, prec, ap
def voc_eval_sds(det_file, seg_file, devkit_path, image_list, cls_name, cache_dir,
class_names, ov_thresh=0.5):
# 1. Check whether ground truth cache file exists
with open(image_list, 'r') as f:
lines = f.readlines()
image_names = [x.strip() for x in lines]
check_voc_sds_cache(cache_dir, devkit_path, image_names, class_names)
gt_cache = cache_dir + '/' + cls_name + '_mask_gt.pkl'
with open(gt_cache, 'rb') as f:
gt_pkl = cPickle.load(f)
# 2. Get predict pickle file for this class
with open(det_file, 'rb') as f:
boxes_pkl = cPickle.load(f)
with open(seg_file, 'rb') as f:
masks_pkl = cPickle.load(f)
# 3. Pre-compute number of total instances to allocate memory
num_image = len(image_names)
box_num = 0
for im_i in xrange(num_image):
box_num += len(boxes_pkl[im_i])
# 4. Re-organize all the predicted boxes
new_boxes = np.zeros((box_num, 5))
new_masks = np.zeros((box_num, cfg.MASK_SIZE, cfg.MASK_SIZE))
new_image = []
cnt = 0
for image_ind in xrange(len(image_names)):
boxes = boxes_pkl[image_ind]
masks = masks_pkl[image_ind]
num_instance = len(boxes)
for box_ind in xrange(num_instance):
new_boxes[cnt] = boxes[box_ind]
new_masks[cnt] = masks[box_ind]
new_image.append(image_names[image_ind])
cnt += 1
# 5. Rearrange boxes according to their scores
seg_scores = new_boxes[:, -1]
keep_inds = np.argsort(-seg_scores)
new_boxes = new_boxes[keep_inds, :]
new_masks = new_masks[keep_inds, :, :]
num_pred = new_boxes.shape[0]
# 6. Calculate t/f positive
fp = np.zeros((num_pred, 1))
tp = np.zeros((num_pred, 1))
for i in xrange(num_pred):
pred_box = np.round(new_boxes[i, :4]).astype(int)
pred_mask = new_masks[i]
pred_mask = cv2.resize(pred_mask.astype(np.float32), (pred_box[2] - pred_box[0] + 1, pred_box[3] - pred_box[1] + 1))
pred_mask = pred_mask >= cfg.BINARIZE_THRESH
image_index = new_image[keep_inds[i]]
if image_index not in gt_pkl:
fp[i] = 1
continue
gt_dict_list = gt_pkl[image_index]
# calculate max region overlap
cur_overlap = -1000
cur_overlap_ind = -1
for ind2, gt_dict in enumerate(gt_dict_list):
gt_mask_bound = np.round(gt_dict['mask_bound']).astype(int)
pred_mask_bound = pred_box
ov = mask_overlap(gt_mask_bound, pred_mask_bound, gt_dict['mask'], pred_mask)
if ov > cur_overlap:
cur_overlap = ov
cur_overlap_ind = ind2
if cur_overlap >= ov_thresh:
if gt_dict_list[cur_overlap_ind]['already_detect']:
fp[i] = 1
else:
tp[i] = 1
gt_dict_list[cur_overlap_ind]['already_detect'] = 1
else:
fp[i] = 1
# 7. Calculate precision
num_pos = 0
for key, val in gt_pkl.iteritems():
num_pos += len(val)
fp = np.cumsum(fp)
tp = np.cumsum(tp)
rec = tp / float(num_pos)
# avoid divide by zero in case the first matches a difficult gt
prec = tp / np.maximum(fp+tp, np.finfo(np.float64).eps)
ap = voc_ap(rec, prec, True)
return ap
def parse_rec(filename):
""" Parse a PASCAL VOC xml file """
tree = ET.parse(filename)
objects = []
for obj in tree.findall('object'):
obj_struct = {}
obj_struct['name'] = obj.find('name').text
obj_struct['pose'] = obj.find('pose').text
obj_struct['truncated'] = int(obj.find('truncated').text)
obj_struct['difficult'] = int(obj.find('difficult').text)
bbox = obj.find('bndbox')
obj_struct['bbox'] = [int(bbox.find('xmin').text),
int(bbox.find('ymin').text),
int(bbox.find('xmax').text),
int(bbox.find('ymax').text)]
objects.append(obj_struct)
return objects
def parse_inst(image_name, devkit_path):
"""
Get cooresponding masks, boxes, classes according to image name
Args:
image_name: input image name
devkit_path: root dir for devkit SDS
Returns:
roi/mask dictionary of this image
"""
gt_mask_im_name = os.path.join(devkit_path, 'inst',
image_name + '.mat')
gt_inst_mat = sio.loadmat(gt_mask_im_name)
gt_inst_data = gt_inst_mat['GTinst']['Segmentation'][0][0]
gt_mask_class_name = os.path.join(devkit_path, 'cls',
image_name + '.mat')
gt_cls_mat = sio.loadmat(gt_mask_class_name)
gt_cls_data = gt_cls_mat['GTcls']['Segmentation'][0][0]
unique_inst = np.unique(gt_inst_data)
# delete background pixels
background_ind = np.where(unique_inst == 0)[0]
unique_inst = np.delete(unique_inst, background_ind)
record = []
for inst_ind in xrange(unique_inst.shape[0]):
[r, c] = np.where(gt_inst_data == unique_inst[inst_ind])
mask_bound = np.zeros(4)
mask_bound[0] = np.min(c)
mask_bound[1] = np.min(r)
mask_bound[2] = np.max(c)
mask_bound[3] = np.max(r)
mask = gt_inst_data[mask_bound[1]:mask_bound[3]+1, mask_bound[0]:mask_bound[2]+1]
mask = (mask == unique_inst[inst_ind])
mask_cls = gt_cls_data[mask_bound[1]:mask_bound[3]+1, mask_bound[0]:mask_bound[2]+1]
mask_cls = mask_cls[mask]
num_cls = np.unique(mask_cls)
assert num_cls.shape[0] == 1
cur_inst = num_cls[0]
record.append({
'mask': mask,
'mask_cls': cur_inst,
'mask_bound': mask_bound
})
return record
def check_voc_sds_cache(cache_dir, devkit_path, image_names, class_names):
"""
Args:
cache_dir: output directory for cached mask annotation
devkit_path: root directory of VOCdevkitSDS
image_names: used for parse image instances
class_names: VOC 20 class names
"""
if not os.path.isdir(cache_dir):
os.mkdir(cache_dir)
exist_cache = True
for cls_name in class_names:
if cls_name == '__background__':
continue
cache_name = os.path.join(cache_dir, cls_name + '_mask_gt.pkl')
if not os.path.isfile(cache_name):
exist_cache = False
break
if not exist_cache:
# load annotations:
# create a list with size classes
record_list = [{} for _ in xrange(21)]
for i, image_name in enumerate(image_names):
record = parse_inst(image_name, devkit_path)
for j, mask_dic in enumerate(record):
cls = mask_dic['mask_cls']
mask_dic['already_detect'] = False
if image_name not in record_list[cls]:
record_list[cls][image_name] = []
record_list[cls][image_name].append(mask_dic)
if i % 100 == 0:
print 'Reading annotation for {:d}/{:d}'.format(i + 1, len(image_names))
print 'Saving cached annotations...'
for cls_ind, name in enumerate(class_names):
if name == '__background__':
continue
cachefile = os.path.join(cache_dir, name + '_mask_gt.pkl')
with open(cachefile, 'w') as f:
cPickle.dump(record_list[cls_ind], f)
| 13,824 | 34.088832 | 124 | py |
MNC | MNC-master/lib/utils/blob.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
"""Blob helper functions."""
import numpy as np
import random
import cv2
from utils.cython_bbox import bbox_overlaps
from mnc_config import cfg
def im_list_to_blob(ims):
"""
Convert a list of images into a network input.
Assumes images are already prepared (means subtracted, BGR order, ...).
"""
max_shape = np.array([im.shape for im in ims]).max(axis=0)
num_images = len(ims)
blob = np.zeros((num_images, max_shape[0], max_shape[1], 3),
dtype=np.float32)
for i in xrange(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Mean subtract and scale an image for use in a blob."""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
return im, im_scale
def prep_im_for_blob_cfm(im, input_scales):
"""Converts an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale_factors (list): list of image scales (relative to im) used
in the image pyramid
"""
im_orig = im.astype(np.float32, copy=True)
im_orig -= cfg.PIXEL_MEANS
im_shape = im_orig.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
processed_ims = []
im_scale_factors = []
for target_size in input_scales:
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than MAX_SIZE
if np.round(im_scale * im_size_max) > cfg.TEST.MAX_SIZE:
im_scale = float(cfg.TEST.MAX_SIZE) / float(im_size_max)
im = cv2.resize(im_orig, None, None, fx=im_scale, fy=im_scale,
interpolation=cv2.INTER_LINEAR)
im_scale_factors.append(im_scale)
processed_ims.append(im)
# Create a blob to hold the input images
blob = im_list_to_blob(processed_ims)
return blob, np.array(im_scale_factors)
def pred_rois_for_blob(im_rois, im_scales):
"""
Convert rois to network input
support multi-scale testing
"""
im_rois = im_rois.astype(np.float, copy=False)
if len(im_scales) > 1:
widths = im_rois[:, 2] - im_rois[:, 0] + 1
heights = im_rois[:, 3] - im_rois[:, 1] + 1
areas = widths * heights
scaled_areas = areas[:, np.newaxis] * (im_scales[np.newaxis, :] ** 2)
diff_areas = np.abs(scaled_areas - 224 * 224)
levels = diff_areas.argmin(axis=1)[:, np.newaxis]
else:
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
im_rois = im_rois * im_scales[levels]
rois_blob = np.hstack((levels.astype(np.float), im_rois))
return rois_blob
| 3,732 | 33.564815 | 77 | py |
MNC | MNC-master/lib/utils/unmap.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
def unmap(data, count, inds, fill=0):
""" Unmap a subset of item (data) back to the original set of items (of
size count) """
if len(data.shape) == 1:
ret = np.empty((count, ), dtype=np.float32)
ret.fill(fill)
ret[inds] = data
else:
ret = np.empty((count, ) + data.shape[1:], dtype=np.float32)
ret.fill(fill)
ret[inds, :] = data
return ret
| 745 | 31.434783 | 77 | py |
MNC | MNC-master/lib/utils/__init__.py | 0 | 0 | 0 | py |
|
MNC | MNC-master/lib/utils/vis_seg.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
import cPickle
import os
import cv2
from PIL import Image
from mnc_config import cfg
def vis_seg(img_names, cls_names, output_dir, gt_dir):
"""
This function plot segmentation results to specific directory
Args:
img_names: list
"""
assert os.path.exists(output_dir)
# a list of dictionary
inst_dir = os.path.join(output_dir, 'SegInst')
cls_dir = os.path.join(output_dir, 'SegCls')
res_dir = os.path.join(output_dir, 'SegRes')
if not os.path.isdir(inst_dir):
os.mkdir(inst_dir)
if not os.path.isdir(cls_dir):
os.mkdir(cls_dir)
if not os.path.isdir(res_dir):
os.mkdir(res_dir)
res_list = _prepare_dict(img_names, cls_names, output_dir)
for img_ind, image_name in enumerate(img_names):
target_inst_file = os.path.join(inst_dir, image_name + '.jpg')
target_cls_file = os.path.join(cls_dir, image_name + '.jpg')
print image_name
gt_image = gt_dir + '/img/' + image_name + '.jpg'
img_data = cv2.imread(gt_image)
img_width = img_data.shape[1]
img_height = img_data.shape[0]
pred_dict = res_list[img_ind]
inst_img, cls_img = _convert_pred_to_image(img_width, img_height, pred_dict)
color_map = _get_voc_color_map()
inst_out_img = np.zeros((img_height, img_width, 3))
cls_out_img = np.zeros((img_height, img_width, 3))
for i in xrange(img_height):
for j in xrange(img_width):
inst_out_img[i][j] = color_map[inst_img[i][j]][::-1]
cls_out_img[i][j] = color_map[cls_img[i][j]][::-1]
cv2.imwrite(target_inst_file, inst_out_img)
cv2.imwrite(target_cls_file, cls_out_img)
background = Image.open(gt_image)
mask = Image.open(target_cls_file)
background = background.convert('RGBA')
mask = mask.convert('RGBA')
superimpose_image = Image.blend(background, mask, 0.8)
name = os.path.join(res_dir, image_name + '.png')
superimpose_image.save(name, 'PNG')
def _prepare_dict(img_names, cls_names, cache_dir, vis_thresh=0.5):
"""
Returns:
list, each list is a dictionary contains mask list, box list
"""
res_list = []
det_file = os.path.join(cache_dir, 'res_boxes.pkl')
with open(det_file, 'rb') as f:
det_pkl = cPickle.load(f)
seg_file = os.path.join(cache_dir, 'res_masks.pkl')
with open(seg_file, 'rb') as f:
seg_pkl = cPickle.load(f)
for img_ind, image_name in enumerate(img_names):
box_for_img = []
mask_for_img = []
cls_for_img = []
for cls_ind, cls_name in enumerate(cls_names):
if cls_name == '__background__' or len(det_pkl[cls_ind][img_ind]) == 0:
continue
det_for_img = det_pkl[cls_ind][img_ind]
seg_for_img = seg_pkl[cls_ind][img_ind]
keep_inds = np.where(det_for_img[:, -1] >= vis_thresh)[0]
for keep in keep_inds:
box_for_img.append(det_for_img[keep])
# TODO: remove this annoying 0
mask_for_img.append(seg_for_img[keep][0])
cls_for_img.append(cls_ind)
res_dict = {'image_name': image_name,
'cls_name': cls_for_img,
'boxes': box_for_img,
'masks': mask_for_img}
res_list.append(res_dict)
return res_list
def _convert_pred_to_image(img_width, img_height, pred_dict):
num_inst = len(pred_dict['boxes'])
inst_img = np.zeros((img_height, img_width))
cls_img = np.zeros((img_height, img_width))
for i in xrange(num_inst):
box = np.round(pred_dict['boxes'][i]).astype(int)
mask = pred_dict['masks'][i]
cls_num = pred_dict['cls_name'][i]
# clip box into image space
box[0] = min(max(box[0], 0), img_width - 1)
box[1] = min(max(box[1], 0), img_height - 1)
box[2] = min(max(box[2], 0), img_width - 1)
box[3] = min(max(box[3], 0), img_height - 1)
mask = cv2.resize(mask.astype(np.float32), (box[2]-box[0]+1, box[3]-box[1]+1))
mask = mask >= cfg.BINARIZE_THRESH
part1 = (i+1) * mask.astype(np.float32)
part2 = np.multiply(np.logical_not(mask), inst_img[box[1]:box[3]+1, box[0]:box[2]+1])
part3 = np.multiply(np.logical_not(mask), cls_img[box[1]:box[3]+1, box[0]:box[2]+1])
inst_img[box[1]:box[3]+1, box[0]:box[2]+1] = part1 + part2
cls_img[box[1]:box[3]+1, box[0]:box[2]+1] = cls_num * mask.astype(np.float32) + part3
# Plot bounding boxes simultaneously
cls_img[box[1]:box[3]+1, box[0]-1:box[0]+1] = 150
cls_img[box[1]:box[3]+1, box[2]-1:box[2]+1] = 150
cls_img[box[1]-1:box[1]+1, box[0]:box[2]+1] = 150
cls_img[box[3]-1:box[3]+1, box[0]:box[2]+1] = 150
inst_img = inst_img.astype(int)
cls_img = cls_img.astype(int)
return inst_img, cls_img
def _get_voc_color_map(n=256):
color_map = np.zeros((n, 3))
for i in xrange(n):
r = b = g = 0
cid = i
for j in xrange(0, 8):
r = np.bitwise_or(r, np.left_shift(np.unpackbits(np.array([cid], dtype=np.uint8))[-1], 7-j))
g = np.bitwise_or(g, np.left_shift(np.unpackbits(np.array([cid], dtype=np.uint8))[-2], 7-j))
b = np.bitwise_or(b, np.left_shift(np.unpackbits(np.array([cid], dtype=np.uint8))[-3], 7-j))
cid = np.right_shift(cid, 3)
color_map[i][0] = r
color_map[i][1] = g
color_map[i][2] = b
return color_map
| 5,842 | 38.47973 | 104 | py |
MNC | MNC-master/lib/transform/anchors.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
# Verify that we compute the same anchors as Shaoqing's matlab implementation:
#
# >> load output/rpn_cachedir/faster_rcnn_VOC2007_ZF_stage1_rpn/anchors.mat
# >> anchors
#
# anchors =
#
# -83 -39 100 56
# -175 -87 192 104
# -359 -183 376 200
# -55 -55 72 72
# -119 -119 136 136
# -247 -247 264 264
# -35 -79 52 96
# -79 -167 96 184
# -167 -343 184 360
# array([[ -83., -39., 100., 56.],
# [-175., -87., 192., 104.],
# [-359., -183., 376., 200.],
# [ -55., -55., 72., 72.],
# [-119., -119., 136., 136.],
# [-247., -247., 264., 264.],
# [ -35., -79., 52., 96.],
# [ -79., -167., 96., 184.],
# [-167., -343., 184., 360.]])
def generate_anchors(base_size=16, ratios=[0.5, 1, 2],
scales=2**np.arange(3, 6)):
"""
Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, 15, 15) window.
"""
base_anchor = np.array([1, 1, base_size, base_size]) - 1
ratio_anchors = _ratio_enum(base_anchor, ratios)
anchors = np.vstack([_scale_enum(ratio_anchors[i, :], scales)
for i in xrange(ratio_anchors.shape[0])])
return anchors
def _whctrs(anchor):
"""
Return width, height, x center, and y center for an anchor (window).
"""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""
Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack((x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)))
return anchors
def _ratio_enum(anchor, ratios):
"""
Enumerate a set of anchors for each aspect ratio wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _scale_enum(anchor, scales):
"""
Enumerate a set of anchors for each scale wrt an anchor.
"""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def generate_shifted_anchors(anchors, height, width, feat_stride):
# Enumerate all shifted anchors:
#
# add A anchors (1, A, 4) to
# cell K shifts (K, 1, 4) to get
# shift anchors (K, A, 4)
# reshape to (K*A, 4) shifted anchors
shift_x = np.arange(0, width) * feat_stride
shift_y = np.arange(0, height) * feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
A = anchors.shape[0]
K = shifts.shape[0]
anchors = anchors.reshape((1, A, 4)) + \
shifts.reshape((1, K, 4)).transpose((1, 0, 2))
anchors = anchors.reshape((K * A, 4))
return anchors
if __name__ == '__main__':
import time
t = time.time()
a = generate_anchors()
print time.time() - t
print a
from IPython import embed
embed()
| 3,927 | 28.533835 | 78 | py |
MNC | MNC-master/lib/transform/bbox_transform.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
from utils.cython_bbox import bbox_overlaps
from mnc_config import cfg
def compute_targets(rois, overlaps, labels):
"""
Compute bounding-box regression targets for an image.
"""
# Indices of ground-truth ROIs
gt_inds = np.where(overlaps == 1)[0]
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap each ex ROI and gt ROI
ex_gt_overlaps = bbox_overlaps(
np.ascontiguousarray(rois[ex_inds, :], dtype=np.float),
np.ascontiguousarray(rois[gt_inds, :], dtype=np.float))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
targets[ex_inds, 0] = labels[ex_inds]
targets[ex_inds, 1:] = bbox_transform(ex_rois, gt_rois)
return targets
def bbox_transform(ex_rois, gt_rois):
"""
Compute bbox regression targets of external rois
with respect to gt rois
"""
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * ex_widths
ex_ctr_y = ex_rois[:, 1] + 0.5 * ex_heights
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_rois[:, 1] + 0.5 * gt_heights
targets_dx = (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = np.log(gt_widths / ex_widths)
targets_dh = np.log(gt_heights / ex_heights)
targets = np.vstack(
(targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets
def bbox_transform_inv(boxes, deltas):
"""
invert bounding box transform
apply delta on anchors to get transformed proposals
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
dx = deltas[:, 0::4]
dy = deltas[:, 1::4]
dw = deltas[:, 2::4]
dh = deltas[:, 3::4]
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w
# y2
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h
return pred_boxes
def clip_boxes(boxes, im_shape):
"""
Clip boxes inside image boundaries
"""
x1 = boxes[:, 0::4]
y1 = boxes[:, 1::4]
x2 = boxes[:, 2::4]
y2 = boxes[:, 3::4]
keep = np.where((x1 >= 0) & (x2 <= im_shape[1] - 1) & (y1 >= 0) & (y2 <= im_shape[0] - 1))[0]
clipped_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
# x1 >= 0
clipped_boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
clipped_boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
clipped_boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
clipped_boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return clipped_boxes, keep
def filter_small_boxes(boxes, min_size):
"""
Remove all boxes with any side smaller than min_size.
"""
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
keep = np.where((ws >= min_size) & (hs >= min_size))[0]
return keep
def scale_boxes(boxes, alpha):
"""
Scale boxes from w/h to alpha * w/h while keep center unchanged
Args:
boxes: a set of boxes specified using x1, y1, x2, y2
alpha: scaling factor
Returns:
boxes: boxes after applying scaling
"""
w = boxes[:, 2] - boxes[:, 0] + 1
h = boxes[:, 3] - boxes[:, 1] + 1
ctr_x = boxes[:, 0] + 0.5 * w
ctr_y = boxes[:, 1] + 0.5 * h
scaled_w = w * alpha
scaled_h = h * alpha
scaled_boxes = np.zeros(boxes.shape, dtype=boxes.dtype)
scaled_boxes[:, 0] = ctr_x - 0.5 * scaled_w
scaled_boxes[:, 1] = ctr_y - 0.5 * scaled_h
scaled_boxes[:, 2] = ctr_x + 0.5 * scaled_w
scaled_boxes[:, 3] = ctr_y + 0.5 * scaled_h
return scaled_boxes
def bbox_compute_targets(ex_rois, gt_rois, normalize):
"""
Compute bounding-box regression targets for an image
Parameters:
-----------
ex_rois: ROIs from external source (anchors or proposals)
gt_rois: ground truth ROIs
normalize: whether normalize box (since RPN doesn't need to normalize)
Returns:
-----------
Relative value for anchor or proposals
"""
assert ex_rois.shape == gt_rois.shape
targets = bbox_transform(ex_rois, gt_rois)
if cfg.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED and normalize:
# Optionally normalize targets by a precomputed mean and std
targets = ((targets - np.array(cfg.TRAIN.BBOX_NORMALIZE_MEANS)) /
np.array(cfg.TRAIN.BBOX_NORMALIZE_STDS))
return targets.astype(np.float32, copy=False)
def get_bbox_regression_label(bbox_target_data, num_class):
"""Bounding-box regression targets (bbox_target_data) are stored in a
compact form N x (class, tx, ty, tw, th)
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets).
Returns:
bbox_target (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
assert bbox_target_data.shape[1] == 5
clss = bbox_target_data[:, 0]
bbox_targets = np.zeros((clss.size, 4 * num_class), dtype=np.float32)
bbox_inside_weights = np.zeros(bbox_targets.shape, dtype=np.float32)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = clss[ind]
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = cfg.TRAIN.BBOX_INSIDE_WEIGHTS
return bbox_targets, bbox_inside_weights
| 6,973 | 33.186275 | 97 | py |
MNC | MNC-master/lib/transform/__init__.py | 0 | 0 | 0 | py |
|
MNC | MNC-master/lib/transform/mask_transform.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
import cv2
from mnc_config import cfg
from nms.nms_wrapper import nms
from utils.cython_bbox import bbox_overlaps
from nms.mv import mv
def mask_overlap(box1, box2, mask1, mask2):
"""
This function calculate region IOU when masks are
inside different boxes
Returns:
intersection over unions of this two masks
"""
x1 = max(box1[0], box2[0])
y1 = max(box1[1], box2[1])
x2 = min(box1[2], box2[2])
y2 = min(box1[3], box2[3])
if x1 > x2 or y1 > y2:
return 0
w = x2 - x1 + 1
h = y2 - y1 + 1
# get masks in the intersection part
start_ya = y1 - box1[1]
start_xa = x1 - box1[0]
inter_maska = mask1[start_ya: start_ya + h, start_xa:start_xa + w]
start_yb = y1 - box2[1]
start_xb = x1 - box2[0]
inter_maskb = mask2[start_yb: start_yb + h, start_xb:start_xb + w]
assert inter_maska.shape == inter_maskb.shape
inter = np.logical_and(inter_maskb, inter_maska).sum()
union = mask1.sum() + mask2.sum() - inter
if union < 1.0:
return 0
return float(inter) / float(union)
def intersect_mask(ex_box, gt_box, gt_mask):
"""
This function calculate the intersection part of a external box
and gt_box, mask it according to gt_mask
Args:
ex_box: external ROIS
gt_box: ground truth boxes
gt_mask: ground truth masks, not been resized yet
Returns:
regression_target: logical numpy array
"""
x1 = max(ex_box[0], gt_box[0])
y1 = max(ex_box[1], gt_box[1])
x2 = min(ex_box[2], gt_box[2])
y2 = min(ex_box[3], gt_box[3])
if x1 > x2 or y1 > y2:
return np.zeros((21, 21), dtype=bool)
w = x2 - x1 + 1
h = y2 - y1 + 1
ex_starty = y1 - ex_box[1]
ex_startx = x1 - ex_box[0]
gt_starty = y1 - gt_box[1]
gt_startx = x1 - gt_box[0]
inter_maskb = gt_mask[gt_starty: gt_starty + h, gt_startx: gt_startx + w]
regression_target = np.zeros((ex_box[3] - ex_box[1] + 1, ex_box[2] - ex_box[0] + 1))
regression_target[ex_starty: ex_starty + h, ex_startx: ex_startx + w] = inter_maskb
regression_target = regression_target.astype(np.float32)
regression_target = cv2.resize(regression_target, (cfg.MASK_SIZE, cfg.MASK_SIZE))
regression_target = regression_target >= cfg.BINARIZE_THRESH
return regression_target
def clip_masked_boxes(boxes, masks, im_shape):
"""
Clipped masked boxes inside image boundary
"""
num_box = boxes.shape[0]
for i in xrange(num_box):
box = np.round(boxes[i]).astype(int)
mask = cv2.resize(masks[i, 0].astype(np.float32), (box[2] - box[0] + 1, box[3] - box[1] + 1))
clip_x1 = max(0, 0 - box[0])
clip_y1 = max(0, 0 - box[1])
clip_width = min(box[2], im_shape[1] - 1) - clip_x1
clip_height = min(box[3], im_shape[0] - 1) - clip_y1
clip_x2 = clip_x1 + clip_width
clip_y2 = clip_y1 + clip_height
mask = mask[clip_y1:clip_y2, clip_x1:clip_x2]
masks[i, 0] = cv2.resize(mask.astype(np.float32), (cfg.MASK_SIZE, cfg.MASK_SIZE))
box[0] = clip_x1
box[1] = clip_y1
box[2] = clip_x2
box[3] = clip_y2
boxes[i] = box
return boxes, masks
def mask_aggregation(boxes, masks, mask_weights, im_width, im_height):
"""
This function implements mask voting mechanism to give finer mask
n is the candidate boxes (masks) number
Args:
masks: All masks need to be aggregated (n x sz x sz)
mask_weights: class score associated with each mask (n x 1)
boxes: tight box enclose each mask (n x 4)
im_width, im_height: image information
TODO: Ensure mask size is sz x sz or tight box size
"""
assert boxes.shape[0] == len(masks) and boxes.shape[0] == mask_weights.shape[0]
im_mask = np.zeros((im_height, im_width))
for mask_ind in xrange(len(masks)):
box = np.round(boxes[mask_ind])
mask = (masks[mask_ind] >= cfg.BINARIZE_THRESH).astype(float)
mask_weight = mask_weights[mask_ind]
im_mask[box[1]:box[3]+1, box[0]:box[2]+1] += mask * mask_weight
[r, c] = np.where(im_mask >= cfg.BINARIZE_THRESH)
if len(r) == 0 or len(c) == 0:
min_y = np.ceil(im_height / 2)
min_x = np.ceil(im_width / 2)
max_y = min_y
max_x = min_x
else:
min_y = np.min(r)
min_x = np.min(c)
max_y = np.max(r)
max_x = np.max(c)
clipped_mask = im_mask[min_y:max_y+1, min_x:max_x+1]
clipped_box = np.array((min_x, min_y, max_x, max_y), dtype=np.float32)
return clipped_mask, clipped_box
def cpu_mask_voting(masks, boxes, scores, num_classes, max_per_image, im_width, im_height):
"""
Wrapper function for mask voting, note we already know the class of boxes and masks
Args:
masks: ~ n x mask_sz x mask_sz
boxes: ~ n x 4
scores: ~ n x 1
max_per_image: default would be 100
im_width: width of image
im_height: height of image
"""
# apply nms and sort to get first images according to their scores
scores = scores[:, 1:]
num_detect = boxes.shape[0]
res_mask = [[] for _ in xrange(num_detect)]
for i in xrange(num_detect):
box = np.round(boxes[i]).astype(int)
mask = cv2.resize(masks[i, 0].astype(np.float32), (box[2] - box[0] + 1, box[3] - box[1] + 1))
res_mask[i] = mask
# Intermediate results
sup_boxes = []
sup_masks = []
sup_scores = []
tobesort_scores = []
for i in xrange(num_classes - 1):
dets = np.hstack((boxes.astype(np.float32), scores[:, i:i+1]))
inds = nms(dets, cfg.TEST.MASK_MERGE_NMS_THRESH)
ind_boxes = boxes[inds]
ind_masks = masks[inds]
ind_scores = scores[inds, i]
order = ind_scores.ravel().argsort()[::-1]
num_keep = min(len(order), max_per_image)
order = order[0:num_keep]
sup_boxes.append(ind_boxes[order])
sup_masks.append(ind_masks[order])
sup_scores.append(ind_scores[order])
tobesort_scores.extend(ind_scores[order])
sorted_scores = np.sort(tobesort_scores)[::-1]
num_keep = min(len(sorted_scores), max_per_image)
thresh = sorted_scores[num_keep-1]
result_box = []
result_mask = []
for c in xrange(num_classes - 1):
cls_box = sup_boxes[c]
cls_score = sup_scores[c]
keep = np.where(cls_score >= thresh)[0]
new_sup_boxes = cls_box[keep]
num_sup_box = len(new_sup_boxes)
masks_ar = np.zeros((num_sup_box, 1, cfg.MASK_SIZE, cfg.MASK_SIZE))
boxes_ar = np.zeros((num_sup_box, 4))
for i in xrange(num_sup_box):
# Get weights according to their segmentation scores
cur_ov = bbox_overlaps(boxes.astype(np.float), new_sup_boxes[i, np.newaxis].astype(np.float))
cur_inds = np.where(cur_ov >= cfg.TEST.MASK_MERGE_IOU_THRESH)[0]
cur_weights = scores[cur_inds, c]
cur_weights = cur_weights / sum(cur_weights)
# Re-format mask when passing it to mask_aggregation
pass_mask = [res_mask[j] for j in list(cur_inds)]
# do mask aggregation
tmp_mask, boxes_ar[i] = mask_aggregation(boxes[cur_inds], pass_mask, cur_weights, im_width, im_height)
tmp_mask = cv2.resize(tmp_mask.astype(np.float32), (cfg.MASK_SIZE, cfg.MASK_SIZE))
masks_ar[i, 0] = tmp_mask
# make new array such that scores is the last dimension of boxes
boxes_scored_ar = np.hstack((boxes_ar, cls_score[keep, np.newaxis]))
result_box.append(boxes_scored_ar)
result_mask.append(masks_ar)
return result_box, result_mask
def gpu_mask_voting(masks, boxes, scores, num_classes, max_per_image, im_width, im_height):
"""
A wrapper function, note we already know the class of boxes and masks
Args:
masks: ~ 300 x 21 x 21
boxes: ~ 300 x 4
scores: ~ 300 x 1
max_per_image: default would be 100
im_width:
im_height:
"""
# Intermediate results
sup_boxes = []
sup_scores = []
tobesort_scores = []
for i in xrange(num_classes):
if i == 0:
sup_boxes.append([])
sup_scores.append([])
continue
dets = np.hstack((boxes.astype(np.float32), scores[:, i:i+1]))
inds = nms(dets, cfg.TEST.MASK_MERGE_NMS_THRESH)
ind_boxes = boxes[inds]
ind_scores = scores[inds, i]
num_keep = min(len(ind_scores), max_per_image)
sup_boxes.append(ind_boxes[0:num_keep, :])
sup_scores.append(ind_scores[0:num_keep])
tobesort_scores.extend(ind_scores[0:num_keep])
sorted_scores = np.sort(tobesort_scores)[::-1]
num_keep = min(len(sorted_scores), max_per_image)
thresh = sorted_scores[num_keep-1]
# inds array to record which mask should be aggregated together
candidate_inds = []
# weight for each element in the candidate inds
candidate_weights = []
# start position for candidate array
candidate_start = []
candidate_scores = []
class_bar = []
for c in xrange(num_classes):
if c == 0:
continue
cls_box = sup_boxes[c]
cls_score = sup_scores[c]
keep = np.where(cls_score >= thresh)[0]
new_sup_boxes = cls_box[keep]
num_sup_box = len(new_sup_boxes)
for i in xrange(num_sup_box):
cur_ov = bbox_overlaps(boxes.astype(np.float), new_sup_boxes[i, np.newaxis].astype(np.float))
cur_inds = np.where(cur_ov >= cfg.TEST.MASK_MERGE_IOU_THRESH)[0]
candidate_inds.extend(cur_inds)
cur_weights = scores[cur_inds, c]
cur_weights = cur_weights / sum(cur_weights)
candidate_weights.extend(cur_weights)
candidate_start.append(len(candidate_inds))
candidate_scores.extend(cls_score[keep])
class_bar.append(len(candidate_scores))
candidate_inds = np.array(candidate_inds, dtype=np.int32)
candidate_weights = np.array(candidate_weights, dtype=np.float32)
candidate_start = np.array(candidate_start, dtype=np.int32)
candidate_scores = np.array(candidate_scores, dtype=np.float32)
result_mask, result_box = mv(boxes.astype(np.float32), masks, candidate_inds, candidate_start, candidate_weights, im_height, im_width)
result_box = np.hstack((result_box, candidate_scores[:, np.newaxis]))
list_result_box = []
list_result_mask = []
# separate result mask into different classes
for i in xrange(num_classes - 1):
cls_start = class_bar[i - 1] if i > 0 else 0
cls_end = class_bar[i]
list_result_box.append(result_box[cls_start:cls_end, :])
list_result_mask.append(result_mask[cls_start:cls_end, :, :, :])
return list_result_mask, list_result_box
| 11,105 | 37.696864 | 138 | py |
MNC | MNC-master/lib/nms/py_cpu_nms.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
import numpy as np
def py_cpu_nms(dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
| 1,118 | 27.692308 | 77 | py |
MNC | MNC-master/lib/nms/nms_wrapper.py | # --------------------------------------------------------
# Multitask Network Cascade
# Modified from py-faster-rcnn (https://github.com/rbgirshick/py-faster-rcnn)
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
from mnc_config import cfg
from gpu_nms import gpu_nms
from cpu_nms import cpu_nms
def nms(dets, thresh):
"""Dispatch to either CPU or GPU NMS implementations."""
if dets.shape[0] == 0:
return []
if cfg.USE_GPU_NMS:
return gpu_nms(dets, thresh, device_id=cfg.GPU_ID)
else:
return cpu_nms(dets, thresh)
def apply_nms(all_boxes, thresh):
"""Apply non-maximum suppression to all predicted boxes output by the
test_net method.
"""
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
return nms_boxes
def apply_nms_mask(all_boxes, all_masks, thresh):
num_classes = len(all_boxes)
num_images = len(all_boxes[0])
nms_boxes = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
nms_masks = [[[] for _ in xrange(num_images)]
for _ in xrange(num_classes)]
for cls_ind in xrange(num_classes):
for im_ind in xrange(num_images):
dets = all_boxes[cls_ind][im_ind]
masks = all_masks[cls_ind][im_ind]
if dets == []:
continue
keep = nms(dets, thresh)
if len(keep) == 0:
continue
nms_boxes[cls_ind][im_ind] = dets[keep, :].copy()
nms_masks[cls_ind][im_ind] = masks[keep, :].copy()
return nms_boxes, nms_masks
def apply_nms_mask_single(box, mask, thresh):
if box == []:
return box, mask
keep = nms(box, thresh)
if len(keep) == 0:
return box, mask
return box[keep, :].copy(), mask[keep, :].copy()
| 2,332 | 31.402778 | 77 | py |
MNC | MNC-master/lib/nms/__init__.py | 0 | 0 | 0 | py |
|
ContextsFair | ContextsFair-main/compute_results.py | import numpy as np
# from collections import defaultdict
if __name__ == '__main__':
datasets = ['Yelp', 'Gowalla']
samplings = [100]
models = ['GeoSoCa', 'LORE', 'CF', 'PFM']
top_n = [5, 10, 20]
for dataset in datasets:
for sampling in samplings:
for model in models:
if model in ['CF', 'PFM']:
fusions = ['main']
elif dataset== 'Gowalla' and model == 'GeoSoCa':
fusions = ['mul', 'sum', 'local', 'local_1', 'local_2', 'local_3', 'w19', 'w37', 'w55', 'w73', 'w91']
else:
fusions = ['mul', 'sum', 'local', 'local_1', 'local_2', 'local_3', 'mul', 'sum', 'local', 'w117', 'w144', 'w171', 'w333', 'w414', 'w441', 'w711']
for fusion in fusions:
path = "results/" + dataset + "/" + model + "/" + model + "_" + fusion + "/"
# ['all', 'leisure', 'working', 'active', 'inactive']
user_groups = ['all', 'leisure', 'working']
for user_group in user_groups:
# load user group data: all, active, inactive, leisure, working
users_ids = set()
with open(f'./groups/user_groups/{dataset}/{user_group}.txt','r') as user_group_data:
for uid in user_group_data.readlines():
users_ids.add(uid.strip())
for topN in top_n:
all_precision, all_recall, all_nDCG, all_MAP, all_novel, all_diversity = [], [], [], [], [], []
result_file = open(path + "result_top_" + str(topN) + ".txt", 'r')
result_data = result_file.readlines()
for eachline in result_data:
cnt, uid, precision, recall, nDCG, MAP, novel, diversity = eachline.strip().split('\t')
precision, recall, MAP, NDCG = float(precision), float(recall), float(MAP), float(nDCG)
if uid in users_ids:
all_precision.append(precision)
all_recall.append(recall)
all_MAP.append(MAP)
all_nDCG.append(NDCG)
final_results = open(path + "result_mean_" + str(topN) + "_" + user_group + ".txt", 'w')
final_results.write("Precision\tRecall\tMAP\tNDCG\n")
final_results.write('\t'.join([str(round(np.mean(all_precision), 4)), str(round(np.mean(all_recall), 4)), str(round(np.mean(all_MAP), 4)), str(round(np.mean(all_nDCG), 4))]) + '\n')
result_file.close()
result_data.clear()
| 2,888 | 59.1875 | 209 | py |
flowseq | flowseq-master/flownmt/utils.py | import logging
import sys
from typing import Tuple, List
import torch
from torch._six import inf
def get_logger(name, level=logging.INFO, handler=sys.stdout,
formatter='%(asctime)s - %(name)s - %(levelname)s - %(message)s'):
logger = logging.getLogger(name)
logger.setLevel(logging.INFO)
formatter = logging.Formatter(formatter)
stream_handler = logging.StreamHandler(handler)
stream_handler.setLevel(level)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
def norm(p: torch.Tensor, dim: int):
"""Computes the norm over all dimensions except dim"""
if dim is None:
return p.norm()
elif dim == 0:
output_size = (p.size(0),) + (1,) * (p.dim() - 1)
return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size)
elif dim == p.dim() - 1:
output_size = (1,) * (p.dim() - 1) + (p.size(-1),)
return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size)
else:
return norm(p.transpose(0, dim), 0).transpose(0, dim)
def exponentialMovingAverage(original, shadow, decay_rate, init=False):
params = dict()
for name, param in shadow.named_parameters():
params[name] = param
for name, param in original.named_parameters():
shadow_param = params[name]
if init:
shadow_param.data.copy_(param.data)
else:
shadow_param.data.add_((1 - decay_rate) * (param.data - shadow_param.data))
def logPlusOne(x):
"""
compute log(x + 1) for small x
Args:
x: Tensor
Returns: Tensor
log(x+1)
"""
eps = 1e-4
mask = x.abs().le(eps).type_as(x)
return x.mul(x.mul(-0.5) + 1.0) * mask + (x + 1.0).log() * (1.0 - mask)
def gate(x1, x2):
return x1 * x2.sigmoid_()
def total_grad_norm(parameters, norm_type=2):
if isinstance(parameters, torch.Tensor):
parameters = [parameters]
parameters = list(filter(lambda p: p.grad is not None, parameters))
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(p.grad.data.abs().max() for p in parameters)
else:
total_norm = 0
for p in parameters:
param_norm = p.grad.data.norm(norm_type)
total_norm += param_norm.item() ** norm_type
total_norm = total_norm ** (1. / norm_type)
return total_norm
def squeeze(x: torch.Tensor, mask: torch.Tensor, factor: int = 2) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
x: Tensor
input tensor [batch, length, features]
mask: Tensor
mask tensor [batch, length]
factor: int
squeeze factor (default 2)
Returns: Tensor1, Tensor2
squeezed x [batch, length // factor, factor * features]
squeezed mask [batch, length // factor]
"""
assert factor >= 1
if factor == 1:
return x
batch, length, features = x.size()
assert length % factor == 0
# [batch, length // factor, factor * features]
x = x.contiguous().view(batch, length // factor, factor * features)
mask = mask.view(batch, length // factor, factor).sum(dim=2).clamp(max=1.0)
return x, mask
def unsqueeze(x: torch.Tensor, factor: int = 2) -> torch.Tensor:
"""
Args:
x: Tensor
input tensor [batch, length, features]
factor: int
unsqueeze factor (default 2)
Returns: Tensor
squeezed tensor [batch, length * 2, features // 2]
"""
assert factor >= 1
if factor == 1:
return x
batch, length, features = x.size()
assert features % factor == 0
# [batch, length * factor, features // factor]
x = x.view(batch, length * factor, features // factor)
return x
def split(x: torch.Tensor, z1_features) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
x: Tensor
input tensor [batch, length, features]
z1_features: int
the number of features of z1
Returns: Tensor, Tensor
split tensors [batch, length, z1_features], [batch, length, features-z1_features]
"""
z1 = x[:, :, :z1_features]
z2 = x[:, :, z1_features:]
return z1, z2
def unsplit(xs: List[torch.Tensor]) -> torch.Tensor:
"""
Args:
xs: List[Tensor]
tensors to be combined
Returns: Tensor
combined tensor
"""
return torch.cat(xs, dim=2)
def make_positions(tensor, padding_idx):
"""Replace non-padding symbols with their position numbers.
Position numbers begin at padding_idx+1. Padding symbols are ignored.
"""
mask = tensor.ne(padding_idx).long()
return torch.cumsum(mask, dim=1) * mask
# def prepare_rnn_seq(rnn_input, lengths, batch_first=False):
# '''
# Args:
# rnn_input: [seq_len, batch, input_size]: tensor containing the features of the input sequence.
# lengths: [batch]: tensor containing the lengthes of the input sequence
# batch_first: If True, then the input and output tensors are provided as [batch, seq_len, feature].
# Returns:
# '''
#
# def check_decreasing(lengths):
# lens, order = torch.sort(lengths, dim=0, descending=True)
# if torch.ne(lens, lengths).sum() == 0:
# return None
# else:
# _, rev_order = torch.sort(order)
# return lens, order, rev_order
#
# check_res = check_decreasing(lengths)
#
# if check_res is None:
# lens = lengths
# rev_order = None
# else:
# lens, order, rev_order = check_res
# batch_dim = 0 if batch_first else 1
# rnn_input = rnn_input.index_select(batch_dim, order)
# lens = lens.tolist()
# seq = pack_padded_sequence(rnn_input, lens, batch_first=batch_first)
# return seq, rev_order
#
# def recover_rnn_seq(seq, rev_order, batch_first=False, total_length=None):
# output, _ = pad_packed_sequence(seq, batch_first=batch_first, total_length=total_length)
# if rev_order is not None:
# batch_dim = 0 if batch_first else 1
# output = output.index_select(batch_dim, rev_order)
# return output
#
#
# def recover_order(tensors, rev_order):
# if rev_order is None:
# return tensors
# recovered_tensors = [tensor.index_select(0, rev_order) for tensor in tensors]
# return recovered_tensors
#
#
# def decreasing_order(lengths, tensors):
# def check_decreasing(lengths):
# lens, order = torch.sort(lengths, dim=0, descending=True)
# if torch.ne(lens, lengths).sum() == 0:
# return None
# else:
# _, rev_order = torch.sort(order)
# return lens, order, rev_order
#
# check_res = check_decreasing(lengths)
#
# if check_res is None:
# lens = lengths
# rev_order = None
# ordered_tensors = tensors
# else:
# lens, order, rev_order = check_res
# ordered_tensors = [tensor.index_select(0, order) for tensor in tensors]
#
# return lens, ordered_tensors, rev_order
| 7,058 | 30.513393 | 108 | py |
flowseq | flowseq-master/flownmt/flownmt.py | import os
import json
import math
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.distributed as dist
from apex.parallel import DistributedDataParallel
from apex.parallel.distributed import flat_dist_call
from flownmt.modules import Encoder
from flownmt.modules import Posterior
from flownmt.modules import Decoder
from flownmt.modules import Prior
class FlowNMTCore(nn.Module):
"""
core module for flow nmt model
"""
def __init__(self, encoder: Encoder, prior: Prior, posterior: Posterior, decoder: Decoder):
super(FlowNMTCore, self).__init__()
self.encoder = encoder
self.prior = prior
self.posterior = posterior
self.decoder = decoder
def sync(self):
self.prior.sync()
def init(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
src_enc = self.encoder.init(src_sents, masks=src_masks, init_scale=init_scale)
z, _ = self.posterior.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=True, init_var=True)
self.prior.init(z, tgt_masks, src_enc, src_masks, init_scale=init_scale)
self.decoder.init(z, tgt_masks, src_enc, src_masks, init_scale=init_scale)
def init_posterior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
src_enc = self.encoder.init(src_sents, masks=src_masks, init_scale=init_scale)
z, _ = self.posterior.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=True, init_var=False)
self.decoder.init(z, tgt_masks, src_enc, src_masks, init_scale=init_scale)
def init_prior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
with torch.no_grad():
src_enc, _ = self.encoder(src_sents, masks=src_masks)
z, _ = self.posterior.init(tgt_sents, tgt_masks, src_enc, src_masks,
init_scale=init_scale, init_mu=False, init_var=True)
self.prior.init(z.squeeze(1), tgt_masks, src_enc, src_masks, init_scale=init_scale)
def sample_from_prior(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
nlengths: int = 1, nsamples: int = 1, tau: float = 0.0,
include_zero=False) \
-> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
sampling from prior distribution
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
nlengths: int (default 1)
number of length candidates
nsamples: int (default 1)
number of samples per src per length candidate
tau: float (default 0.0)
temperature
Returns: (Tensor1, Tensor2, Tensor3), (Tensor4, Tensor5), (Tensor6, Tensor7)
Tensor1: samples from the prior [batch * nlengths * nsamples, tgt_length, nz]
Tensor2: log probabilities [batch * nlengths * nsamples]
Tensor3: target masks [batch * nlengths * nsamples, tgt_length]
Tensor4: lengths [batch * nlengths]
Tensor5: log probabilities of lengths [batch * nlengths]
Tensor6: source encoding with shape [batch * nlengths * nsamples, src_length, hidden_size]
Tensor7: tensor for global state [batch * nlengths * nsamples, hidden_size]
Tensor8: source masks with shape [batch * nlengths * nsamples, src_length]
"""
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
# [batch, nsamples, tgt_length, nz]
return self.prior.sample(nlengths, nsamples, src_enc, ctx, src_masks, tau=tau,
include_zero=include_zero)
def sample_from_posterior(self, tgt_sents: torch, tgt_masks: torch.Tensor,
src_enc: torch.Tensor, src_masks: torch.Tensor,
nsamples: int = 1, random=True) -> Tuple[torch.Tensor, torch.Tensor]:
"""
sampling from posterior distribution
Args:
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
tgt_masks: Tensor [batch, tgt_length]
tensor for target masks
src_enc: Tensor [batch, src_length, hidden_size]
tensor for source encoding
src_masks: Tensor [batch, src_length] or None
tensor for source masks
nsamples: int
number of samples
random: bool
if True, perform random sampling. Otherwise, return mean.
Returns: Tensor1, Tensor2
Tensor1: samples from the posterior [batch, nsamples, tgt_length, nz]
Tensor2: log probabilities [batch, nsamples]
"""
return self.posterior.sample(tgt_sents, tgt_masks, src_enc, src_masks, nsamples=nsamples, random=random)
def reconstruct(self, src_sents: torch.Tensor, tgt_sents: torch.Tensor,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
z, _ = self.sample_from_posterior(tgt_sents, tgt_masks, src_enc, src_masks, random=False)
z = z.squeeze(1)
recon, _ = self.decoder.decode(z, tgt_masks, src_enc, src_masks)
recon_err = self.decoder.loss(z, tgt_sents, tgt_masks, src_enc, src_masks)
loss_length = self.prior.length_loss(ctx, src_masks, tgt_masks)
lengths, log_probs = self.prior.predict_length(ctx, src_masks, topk=1)
return recon, recon_err, loss_length, lengths.squeeze(1), log_probs.squeeze(1) * -1.
def translate_argmax(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
batch = src_sents.size(0)
# [batch * n_tr, tgt_length, nz]
(z, log_probs, tgt_masks), (lengths, _), (src, _, _) = self.sample_from_prior(src_sents, src_masks, nlengths=1, nsamples=n_tr, tau=tau)
if n_tr > 1:
nbatch, length, nz = z.size()
# [batch, n_tr, tgt_length, nz]
z = z.view(batch, n_tr, length, nz)
# [batch, n_tr]
log_probs = log_probs.view(batch, n_tr)
# [batch, n_tr, tgt_length]
tgt_masks = tgt_masks.view(batch, n_tr, length)
# [batch, n_tr, src_length, dim]
src = src.view(batch, n_tr, *src.size()[1:])
# [batch]
idx = log_probs.argmax(dim=1)
batch_idx = torch.arange(0, batch).long().to(idx.device)
# [batch, tgt_length, nz]
z = z[batch_idx, idx]
# [batch, tgt_length]
tgt_masks = tgt_masks[batch_idx, idx]
# [batch, src_length, n_tr]
src = src[:, 0]
# [batch, tgt_length]
trans, _ = self.decoder.decode(z, tgt_masks, src, src_masks)
return trans, lengths
def translate_iw(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1,
tau: float = 0.0, k: int = 1) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
k: int (default 1)
number of samples for importance weighted sampling
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
batch = src_sents.size(0)
# [batch * n_len * n_tr, tgt_length, nz]
(z, _, tgt_masks), \
(lengths, log_probs_length), \
(src, ctx, src_masks) = self.sample_from_prior(src_sents, src_masks,
nlengths=n_len, nsamples=n_tr,
tau=tau, include_zero=True)
# [batch, n_len]
lengths = lengths.view(batch, n_len)
log_probs_length = log_probs_length.view(batch, n_len)
# [batch * n_len * n_tr, tgt_length]
trans, _ = self.decoder.decode(z, tgt_masks, src, src_masks)
# [batch, n_len * n_tr, tgt_length]
trans_org = trans.view(batch, n_len * n_tr, trans.size(1))
# [batch * n_len * n_tr, k, tgt_length, nz]
z, log_probs_posterior = self.sample_from_posterior(trans, tgt_masks, src, src_masks, nsamples=k, random=True)
nbatch, _, length, nz = z.size()
if k > 1:
# [batch * n_len * n_tr, k, src_length, hidden_size]
src = src.unsqueeze(1) + src.new_zeros(nbatch, k, *src.size()[1:])
# [batch * n_len * n_tr * k, src_length, hidden_size]
src = src.view(nbatch * k, *src.size()[2:])
# [batch * n_len * n_tr, k, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(nbatch, k, ctx.size(1))
# [batch * n_len * n_tr * k, hidden_size]
ctx = ctx.view(nbatch * k, ctx.size(2))
# [batch * n_len * n_tr, k, src_length]
src_masks = src_masks.unsqueeze(1) + src_masks.new_zeros(nbatch, k, src_masks.size(1))
# [batch * n_len * n_tr * k, src_length]
src_masks = src_masks.view(nbatch * k, src_masks.size(2))
# [batch * n_len * n_tr, k, tgt_length]
tgt_masks = tgt_masks.unsqueeze(1) + tgt_masks.new_zeros(nbatch, k, tgt_masks.size(1))
# [batch * n_len * n_tr * k, src_length]
tgt_masks = tgt_masks.view(nbatch * k, tgt_masks.size(2))
# [batch * n_len * n_tr, k, tgt_length]
trans = trans.unsqueeze(1) + trans.new_zeros(nbatch, k, trans.size(1))
# [batch * n_len * n_tr * k, tgt_length]
trans = trans.view(nbatch * k, trans.size(2))
# [batch * n_len * n_tr * k, tgt_length, nz]
z = z.view(-1, length, nz)
# [batch * n_len * n_tr * k]
log_probs_prior, _ = self.prior.log_probability(z, tgt_masks, src, ctx, src_masks, length_loss=False)
# [batch * n_len * n_tr, k]
log_probs_prior = log_probs_prior.view(nbatch, k)
minus_log_prob_decode = self.decoder.loss(z, trans, tgt_masks, src, src_masks).view(nbatch, k)
log_iw = log_probs_prior - minus_log_prob_decode - log_probs_posterior
# [batch, n_len, n_tr]
nlprobs = math.log(k) - torch.logsumexp(log_iw, dim=1).view(batch, n_len, n_tr)
# [batch, n_len, n_tr]
nlprobs = nlprobs - log_probs_length.unsqueeze(2)
nlprobs = nlprobs / lengths.unsqueeze(2).float()
idx = nlprobs.view(batch, -1).argmin(dim=1)
batch_idx = torch.arange(0, batch).long().to(idx.device)
trans = trans_org[batch_idx, idx]
lengths = lengths[batch_idx, idx.div(n_tr)]
return trans, lengths
def translate_sample(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch * n_len * n_tr, tgt_length]
Tensor2: lengths [batch * n_len * n_tr]
"""
batch = src_sents.size(0)
# [batch * n_len * n_tr, tgt_length, nz]
(z, _, tgt_masks), \
(lengths, _), \
(src, _, src_masks) = self.sample_from_prior(src_sents, src_masks,
nlengths=n_len, nsamples=n_tr,
tau=tau, include_zero=False)
# [batch * n_len * n_tr, tgt_length]
trans, _ = self.decoder.decode(z, tgt_masks, src, src_masks)
# [batch, n_len]
lengths = lengths.view(batch, n_len, 1).expand(batch, n_len, n_tr).contiguous()
lengths = lengths.view(batch * n_len * n_tr)
return trans, lengths
def reconstruct_loss(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
Returns: Tensor1, Tensor2
Tensor1: reconstruction error [batch]
Tensor2: length loss [batch]
"""
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
z, _ = self.sample_from_posterior(tgt_sents, tgt_masks, src_enc, src_masks, random=False)
# [batch, tgt_length, nz]
z = z.squeeze(1)
loss_length = self.prior.length_loss(ctx, src_masks, tgt_masks)
recon_err = self.decoder.loss(z, tgt_sents, tgt_masks, src_enc, src_masks)
return recon_err, loss_length
def translate_loss(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor,
nsamples: int = 1) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
nsamples: int
number of samples
Returns: Tensor1, Tensor2, Tensor3
Tensor1: reconstruction error [batch]
Tensor2: KL [batch]
Tensor3: length loss [batch]
"""
src_enc, ctx = self.encoder(src_sents, masks=src_masks)
z, log_probs_posterior = self.sample_from_posterior(tgt_sents, tgt_masks, src_enc, src_masks,
nsamples=nsamples, random=True)
batch, _, length, nz = z.size()
if nsamples > 1:
# [batch, nsamples, src_length, hidden_size]
src_enc = src_enc.unsqueeze(1) + src_enc.new_zeros(batch, nsamples, *src_enc.size()[1:])
# [batch * nsamples, src_length, hidden_size]
src_enc = src_enc.view(batch * nsamples, *src_enc.size()[2:])
# [batch, nsamples, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(batch, nsamples, ctx.size(1))
ctx = ctx.view(batch * nsamples, ctx.size(2))
# [batch, nsamples, src_length]
src_masks = src_masks.unsqueeze(1) + src_masks.new_zeros(batch, nsamples, src_masks.size(1))
# [batch * nsamples, src_length]
src_masks = src_masks.view(batch * nsamples, src_masks.size(2))
# [batch, nsamples, tgt_length]
tgt_masks = tgt_masks.unsqueeze(1) + tgt_masks.new_zeros(batch, nsamples, tgt_masks.size(1))
# [batch * nsamples, src_length]
tgt_masks = tgt_masks.view(batch * nsamples, tgt_masks.size(2))
# [batch, nsamples, tgt_length]
tgt_sents = tgt_sents.unsqueeze(1) + tgt_sents.new_zeros(batch, nsamples, tgt_sents.size(1))
tgt_sents = tgt_sents.view(batch * nsamples, tgt_sents.size(2))
# [batch * nsamples, tgt_length, nz]
z = z.view(-1, length, nz)
# [batch * nsamples] -> [batch, nsamples]
log_probs_prior, loss_length = self.prior.log_probability(z, tgt_masks, src_enc, ctx, src_masks, length_loss=True)
log_probs_prior = log_probs_prior.view(batch, nsamples)
loss_length = loss_length.view(batch, nsamples)
# [batch]
KL = (log_probs_posterior - log_probs_prior).mean(dim=1)
loss_length = loss_length.mean(dim=1)
# [batch * nsamples] -> [batch, nsamples] -> [batch]
recon_err = self.decoder.loss(z, tgt_sents, tgt_masks, src_enc, src_masks).view(batch, nsamples).mean(dim=1)
return recon_err, KL, loss_length
def forward(self, src_sents: torch.Tensor, tgt_sents: torch, src_masks: torch.Tensor, tgt_masks: torch.Tensor,
nsamples: int = 1, only_recon_loss=False):
if only_recon_loss:
return self.reconstruct_loss(src_sents, tgt_sents, src_masks, tgt_masks)
else:
return self.translate_loss(src_sents, tgt_sents, src_masks, tgt_masks, nsamples=nsamples)
class FlowNMT(nn.Module):
"""
NMT model with Generative Flow.
"""
def __init__(self, core: FlowNMTCore):
super(FlowNMT, self).__init__()
self.core = core
self.length_unit = self.core.prior.length_unit
self.distribured_enabled = False
def _get_core(self):
return self.core.module if self.distribured_enabled else self.core
def sync(self):
core = self._get_core()
core.prior.sync()
def init(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
core = self._get_core()
core.init(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=init_scale)
def init_posterior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
core = self._get_core()
core.init_posterior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=init_scale)
def init_prior(self, src_sents, tgt_sents, src_masks, tgt_masks, init_scale=1.0):
core = self._get_core()
core.init_prior(src_sents, tgt_sents, src_masks, tgt_masks, init_scale=init_scale)
def reconstruct(self, src_sents: torch.Tensor, tgt_sents: torch.Tensor,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
return self._get_core().reconstruct(src_sents, tgt_sents, src_masks, tgt_masks)
def translate_argmax(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
return self._get_core().translate_argmax(src_sents, src_masks, n_tr=n_tr, tau=tau)
def translate_iw(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1,
tau: float = 0.0, k: int = 1) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
k: int (default 1)
number of samples for importance weighted sampling
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch, tgt_length]
Tensor2: lengths [batch]
"""
return self._get_core().translate_iw(src_sents, src_masks, n_len=n_len, n_tr=n_tr,
tau=tau, k=k)
def translate_sample(self, src_sents: torch.Tensor, src_masks: torch.Tensor,
n_len: int = 1, n_tr: int = 1, tau: float = 0.0) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
n_len: int (default 1)
number of length candidates
n_tr: int (default 1)
number of translations per sentence per length candidate
tau: float (default 0.0)
temperature
Returns: Tensor1, Tensor2
Tensor1: tensor for translations [batch * n_len * n_tr, tgt_length]
Tensor2: lengths [batch * n_len * n_tr]
"""
return self._get_core().translate_sample(src_sents, src_masks, n_len=n_len, n_tr=n_tr, tau=tau)
def reconstruct_error(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
Returns: Tensor1, Tensor2
Tensor1: reconstruction error [batch]
Tensor2: length loss [batch]
"""
return self.core(src_sents, tgt_sents, src_masks, tgt_masks, only_recon_loss=True)
def loss(self, src_sents: torch.Tensor, tgt_sents: torch,
src_masks: torch.Tensor, tgt_masks: torch.Tensor,
nsamples: int = 1, eval=False) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Args:
src_sents: Tensor [batch, src_length]
tensor for source sentences
tgt_sents: Tensor [batch, tgt_length]
tensor for target sentences
src_masks: Tensor [batch, src_length] or None
tensor for source masks
tgt_masks: Tensor [batch, tgt_length] or None
tensor for target masks
nsamples: int
number of samples
eval: bool
if eval, turn off distributed mode
Returns: Tensor1, Tensor2, Tensor3
Tensor1: reconstruction error [batch]
Tensor2: KL [batch]
Tensor3: length loss [batch]
"""
core = self._get_core() if eval else self.core
return core(src_sents, tgt_sents, src_masks, tgt_masks, nsamples=nsamples)
def init_distributed(self, rank, local_rank):
assert not self.distribured_enabled
self.distribured_enabled = True
print("Initializing Distributed, rank {}, local rank {}".format(rank, local_rank))
dist.init_process_group(backend='nccl', rank=rank)
torch.cuda.set_device(local_rank)
self.core = DistributedDataParallel(self.core)
def sync_params(self):
assert self.distribured_enabled
core = self._get_core()
flat_dist_call([param.data for param in core.parameters()], dist.all_reduce)
self.core.needs_refresh = True
def enable_allreduce(self):
assert self.distribured_enabled
self.core.enable_allreduce()
def disable_allreduce(self):
assert self.distribured_enabled
self.core.disable_allreduce()
def save(self, model_path):
model = {'core': self._get_core().state_dict()}
model_name = os.path.join(model_path, 'model.pt')
torch.save(model, model_name)
def save_core(self, path):
core = self._get_core()
model = {'prior': core.prior.state_dict(),
'encoder': core.encoder.state_dict(),
'decoder': core.decoder.state_dict(),
'posterior': core.posterior.state_dict()}
torch.save(model, path)
def load_core(self, path, device, load_prior=True):
model = torch.load(path, map_location=device)
core = self._get_core()
core.posterior.load_state_dict(model['posterior'])
core.encoder.load_state_dict(model['encoder'])
core.decoder.load_state_dict(model['decoder'])
if load_prior:
core.prior.load_state_dict(model['prior'])
@classmethod
def load(cls, model_path, device):
params = json.load(open(os.path.join(model_path, 'config.json'), 'r'))
flownmt = FlowNMT.from_params(params).to(device)
model_name = os.path.join(model_path, 'model.pt')
model = torch.load(model_name, map_location=device)
flownmt.core.load_state_dict(model['core'])
return flownmt
@classmethod
def from_params(cls, params: Dict) -> "FlowNMT":
src_vocab_size = params.pop('src_vocab_size')
tgt_vocab_size = params.pop('tgt_vocab_size')
embed_dim = params.pop('embed_dim')
latent_dim = params.pop('latent_dim')
hidden_size = params.pop('hidden_size')
max_src_length = params.pop('max_src_length')
max_tgt_length = params.pop('max_tgt_length')
src_pad_idx = params.pop('src_pad_idx')
tgt_pad_idx = params.pop('tgt_pad_idx')
share_embed = params.pop('share_embed')
tie_weights = params.pop('tie_weights')
# prior
prior_params = params.pop('prior')
prior_params['flow']['features'] = latent_dim
prior_params['flow']['src_features'] = latent_dim
prior_params['length_predictor']['features'] = latent_dim
prior_params['length_predictor']['max_src_length'] = max_src_length
prior = Prior.by_name(prior_params.pop('type')).from_params(prior_params)
# eocoder
encoder_params = params.pop('encoder')
encoder_params['vocab_size'] = src_vocab_size
encoder_params['embed_dim'] = embed_dim
encoder_params['padding_idx'] = src_pad_idx
encoder_params['latent_dim'] = latent_dim
encoder_params['hidden_size'] = hidden_size
encoder = Encoder.by_name(encoder_params.pop('type')).from_params(encoder_params)
# posterior
posterior_params = params.pop('posterior')
posterior_params['vocab_size'] = tgt_vocab_size
posterior_params['embed_dim'] = embed_dim
posterior_params['padding_idx'] = tgt_pad_idx
posterior_params['latent_dim'] = latent_dim
posterior_params['hidden_size'] = hidden_size
_shared_embed = encoder.embed if share_embed else None
posterior_params['_shared_embed'] = _shared_embed
posterior = Posterior.by_name(posterior_params.pop('type')).from_params(posterior_params)
# decoder
decoder_params = params.pop('decoder')
decoder_params['vocab_size'] = tgt_vocab_size
decoder_params['latent_dim'] = latent_dim
decoder_params['hidden_size'] = hidden_size
_shared_weight = posterior.tgt_embed.weight if tie_weights else None
decoder_params['_shared_weight'] = _shared_weight
decoder = Decoder.by_name(decoder_params.pop('type')).from_params(decoder_params)
return FlowNMT(FlowNMTCore(encoder, prior, posterior, decoder))
| 29,121 | 44.432137 | 154 | py |
flowseq | flowseq-master/flownmt/__init__.py | from flownmt.flownmt import FlowNMT
| 37 | 11.666667 | 35 | py |
flowseq | flowseq-master/flownmt/modules/__init__.py | from flownmt.modules.encoders import *
from flownmt.modules.posteriors import *
from flownmt.modules.decoders import *
from flownmt.modules.priors import *
| 156 | 30.4 | 40 | py |
flowseq | flowseq-master/flownmt/modules/decoders/simple.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.decoders.decoder import Decoder
from flownmt.nnet.attention import GlobalAttention
class SimpleDecoder(Decoder):
"""
Simple Decoder to predict translations from latent z
"""
def __init__(self, vocab_size, latent_dim, hidden_size, dropout=0.0, label_smoothing=0., _shared_weight=None):
super(SimpleDecoder, self).__init__(vocab_size, latent_dim,
label_smoothing=label_smoothing,
_shared_weight=_shared_weight)
self.attn = GlobalAttention(latent_dim, latent_dim, latent_dim, hidden_features=hidden_size)
ctx_features = latent_dim * 2
self.ctx_proj = nn.Sequential(nn.Linear(ctx_features, latent_dim), nn.ELU())
self.dropout = dropout
@overrides
def forward(self, z, src, src_mask):
ctx = self.attn(z, src, key_mask=src_mask.eq(0))
ctx = F.dropout(self.ctx_proj(torch.cat([ctx, z], dim=2)), p=self.dropout, training=self.training)
return self.readout(ctx)
@overrides
def init(self, z, mask, src, src_mask, init_scale=1.0):
with torch.no_grad():
self(z, src, src_mask)
@overrides
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
# [batch, length, vocab_size]
log_probs = F.log_softmax(self(z, src, src_mask), dim=2)
# [batch, length]
log_probs, dec = log_probs.max(dim=2)
dec = dec * mask.long()
# [batch]
log_probs = log_probs.mul(mask).sum(dim=1)
return dec, log_probs
@overrides
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch, length, vocab_size] -> [batch, vocab_size, length]
logits = self(z, src, src_mask).transpose(1, 2)
# [batch, length]
loss = self.criterion(logits, target).mul(mask)
return loss.sum(dim=1)
@classmethod
def from_params(cls, params: Dict) -> "SimpleDecoder":
return SimpleDecoder(**params)
SimpleDecoder.register('simple')
| 3,347 | 33.875 | 138 | py |
flowseq | flowseq-master/flownmt/modules/decoders/transformer.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.decoders.decoder import Decoder
from flownmt.nnet.attention import MultiHeadAttention
from flownmt.nnet.transformer import TransformerDecoderLayer
from flownmt.nnet.positional_encoding import PositionalEncoding
class TransformerDecoder(Decoder):
"""
Decoder with Transformer
"""
def __init__(self, vocab_size, latent_dim, num_layers, hidden_size, heads, label_smoothing=0.,
dropout=0.0, dropword=0.0, max_length=100, _shared_weight=None):
super(TransformerDecoder, self).__init__(vocab_size, latent_dim,
label_smoothing=label_smoothing,
_shared_weight=_shared_weight)
self.pos_enc = PositionalEncoding(latent_dim, None, max_length + 1)
self.pos_attn = MultiHeadAttention(latent_dim, heads, dropout=dropout)
layers = [TransformerDecoderLayer(latent_dim, hidden_size, heads, dropout=dropout) for _ in range(num_layers)]
self.layers = nn.ModuleList(layers)
self.dropword = dropword # drop entire tokens
def forward(self, z, mask, src, src_mask):
z = F.dropout2d(z, p=self.dropword, training=self.training)
# [batch, length, latent_dim]
pos_enc = self.pos_enc(z) * mask.unsqueeze(2)
key_mask = mask.eq(0)
ctx = self.pos_attn(pos_enc, z, z, key_mask)
src_mask = src_mask.eq(0)
for layer in self.layers:
ctx = layer(ctx, key_mask, src, src_mask)
return self.readout(ctx)
@overrides
def init(self, z, mask, src, src_mask, init_scale=1.0):
with torch.no_grad():
return self(z, mask, src, src_mask)
@overrides
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
# [batch, length, vocab_size]
log_probs = F.log_softmax(self(z, mask, src, src_mask), dim=2)
# [batch, length]
log_probs, dec = log_probs.max(dim=2)
dec = dec * mask.long()
# [batch]
log_probs = log_probs.mul(mask).sum(dim=1)
return dec, log_probs
@overrides
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch, length, vocab_size] -> [batch, vocab_size, length]
logits = self(z, mask, src, src_mask).transpose(1, 2)
# [batch, length]
loss = self.criterion(logits, target).mul(mask)
return loss.sum(dim=1)
@classmethod
def from_params(cls, params: Dict) -> "TransformerDecoder":
return TransformerDecoder(**params)
TransformerDecoder.register('transformer')
| 3,889 | 35.018519 | 138 | py |
flowseq | flowseq-master/flownmt/modules/decoders/decoder.py | from typing import Dict, Tuple
import torch
import torch.nn as nn
from flownmt.nnet.criterion import LabelSmoothedCrossEntropyLoss
class Decoder(nn.Module):
"""
Decoder to predict translations from latent z
"""
_registry = dict()
def __init__(self, vocab_size, latent_dim, label_smoothing=0., _shared_weight=None):
super(Decoder, self).__init__()
self.readout = nn.Linear(latent_dim, vocab_size, bias=True)
if _shared_weight is not None:
self.readout.weight = _shared_weight
nn.init.constant_(self.readout.bias, 0.)
else:
self.reset_parameters(latent_dim)
if label_smoothing < 1e-5:
self.criterion = nn.CrossEntropyLoss(reduction='none')
elif 1e-5 < label_smoothing < 1.0:
self.criterion = LabelSmoothedCrossEntropyLoss(label_smoothing)
else:
raise ValueError('label smoothing should be less than 1.0.')
def reset_parameters(self, dim):
# nn.init.normal_(self.readout.weight, mean=0, std=dim ** -0.5)
nn.init.uniform_(self.readout.weight, -0.1, 0.1)
nn.init.constant_(self.readout.bias, 0.)
def init(self, z, mask, src, src_mask, init_scale=1.0):
raise NotImplementedError
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
raise NotImplementedError
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
raise NotImplementedError
@classmethod
def register(cls, name: str):
Decoder._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Decoder._registry[name]
@classmethod
def from_params(cls, params: Dict) -> "Decoder":
raise NotImplementedError
Decoder.register('simple')
| 2,946 | 30.351064 | 138 | py |
flowseq | flowseq-master/flownmt/modules/decoders/__init__.py | from flownmt.modules.decoders.decoder import Decoder
from flownmt.modules.decoders.simple import SimpleDecoder
from flownmt.modules.decoders.rnn import RecurrentDecoder
from flownmt.modules.decoders.transformer import TransformerDecoder
| 237 | 46.6 | 67 | py |
flowseq | flowseq-master/flownmt/modules/decoders/rnn.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
from flownmt.modules.decoders.decoder import Decoder
from flownmt.nnet.attention import GlobalAttention
class RecurrentDecoder(Decoder):
"""
Decoder with Recurrent Neural Networks
"""
def __init__(self, vocab_size, latent_dim, rnn_mode, num_layers, hidden_size, bidirectional=True,
dropout=0.0, dropword=0.0, label_smoothing=0., _shared_weight=None):
super(RecurrentDecoder, self).__init__(vocab_size, latent_dim,
label_smoothing=label_smoothing,
_shared_weight=_shared_weight)
if rnn_mode == 'RNN':
RNN = nn.RNN
elif rnn_mode == 'LSTM':
RNN = nn.LSTM
elif rnn_mode == 'GRU':
RNN = nn.GRU
else:
raise ValueError('Unknown RNN mode: %s' % rnn_mode)
assert hidden_size % 2 == 0
# RNN for processing latent variables zs
if bidirectional:
self.rnn = RNN(latent_dim, hidden_size // 2, num_layers=num_layers, batch_first=True, bidirectional=True)
else:
self.rnn = RNN(latent_dim, hidden_size, num_layers=num_layers, batch_first=True, bidirectional=False)
self.attn = GlobalAttention(latent_dim, hidden_size, latent_dim, hidden_features=hidden_size)
self.ctx_proj = nn.Sequential(nn.Linear(latent_dim + hidden_size, latent_dim), nn.ELU())
self.dropout = dropout
self.dropout2d = nn.Dropout2d(dropword) if dropword > 0. else None # drop entire tokens
def forward(self, z, mask, src, src_mask):
lengths = mask.sum(dim=1).long()
if self.dropout2d is not None:
z = self.dropout2d(z)
packed_z = pack_padded_sequence(z, lengths, batch_first=True, enforce_sorted=False)
packed_enc, _ = self.rnn(packed_z)
enc, _ = pad_packed_sequence(packed_enc, batch_first=True, total_length=mask.size(1))
ctx = self.attn(enc, src, key_mask=src_mask.eq(0))
ctx = torch.cat([ctx, enc], dim=2)
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
return self.readout(ctx)
@overrides
def init(self, z, mask, src, src_mask, init_scale=1.0):
with torch.no_grad():
return self(z, mask, src, src_mask)
@overrides
def decode(self, z: torch.Tensor, mask: torch.Tensor, src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
z: Tensor
latent code [batch, length, hidden_size]
mask: Tensor
mask [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor1, Tensor2
Tenser1: decoded word index [batch, length]
Tensor2: log probabilities of decoding [batch]
"""
# [batch, length, vocab_size]
log_probs = F.log_softmax(self(z, mask, src, src_mask), dim=2)
# [batch, length]
log_probs, dec = log_probs.max(dim=2)
dec = dec * mask.long()
# [batch]
log_probs = log_probs.mul(mask).sum(dim=1)
return dec, log_probs
@overrides
def loss(self, z: torch.Tensor, target: torch.Tensor, mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
z: Tensor
latent codes [batch, length, hidden_size]
target: LongTensor
target translations [batch, length]
mask: Tensor
masks for target sentence [batch, length]
src: Tensor
src encoding [batch, src_length, hidden_size]
src_mask: Tensor
source mask [batch, src_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch, length, vocab_size] -> [batch, vocab_size, length]
logits = self(z, mask, src, src_mask).transpose(1, 2)
# [batch, length]
loss = self.criterion(logits, target).mul(mask)
return loss.sum(dim=1)
@classmethod
def from_params(cls, params: Dict) -> "RecurrentDecoder":
return RecurrentDecoder(**params)
RecurrentDecoder.register('rnn')
| 4,558 | 36.368852 | 138 | py |
flowseq | flowseq-master/flownmt/modules/priors/prior.py | import math
from typing import Dict, Tuple, Union
import torch
import torch.nn as nn
from flownmt.flows.nmt import NMTFlow
from flownmt.modules.priors.length_predictors import LengthPredictor
class Prior(nn.Module):
"""
class for Prior with a NMTFlow inside
"""
_registry = dict()
def __init__(self, flow: NMTFlow, length_predictor: LengthPredictor):
super(Prior, self).__init__()
assert flow.inverse, 'prior flow should have inverse mode'
self.flow = flow
self.length_unit = max(2, 2 ** (self.flow.levels - 1))
self.features = self.flow.features
self._length_predictor = length_predictor
self._length_predictor.set_length_unit(self.length_unit)
def sync(self):
self.flow.sync()
def predict_length(self, ctx: torch.Tensor, src_mask: torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length [batch, topk]
"""
return self._length_predictor.predict(ctx, src_mask, topk=topk)
def length_loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
return self._length_predictor.loss(ctx, src_mask, tgt_mask)
def decode(self, epsilon: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, src_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Args:
epsilon: Tensor
epslion [batch, tgt_length, nz]
tgt_mask: Tensor
tensor of target masks [batch, tgt_length]
src: Tensor
source encoding [batch, src_length, hidden_size]
src_mask: Tensor
tensor of source masks [batch, src_length]
Returns: Tensor1, Tensor2
Tensor1: decoded latent code z [batch, tgt_length, nz]
Tensor2: log probabilities [batch]
"""
# [batch, tgt_length, nz]
z, logdet = self.flow.fwdpass(epsilon, tgt_mask, src, src_mask)
# [batch, tgt_length, nz]
log_probs = epsilon.mul(epsilon) + math.log(math.pi * 2.0)
# apply mask
log_probs = log_probs.mul(tgt_mask.unsqueeze(2))
# [batch]
log_probs = log_probs.view(z.size(0), -1).sum(dim=1).mul(-0.5) + logdet
return z, log_probs
def sample(self, nlengths: int, nsamples: int, src: torch.Tensor,
ctx: torch.Tensor, src_mask: torch.Tensor,
tau=0.0, include_zero=False) -> Tuple[Tuple[torch.Tensor, torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
"""
Args:
nlengths: int
number of lengths per sentence
nsamples: int
number of samples per sentence per length
src: Tensor
source encoding [batch, src_length, hidden_size]
ctx: Tensor
tensor for global state [batch, hidden_size]
src_mask: Tensor
tensor of masks [batch, src_length]
tau: float (default 0.0)
temperature of density
include_zero: bool (default False)
include zero sample
Returns: (Tensor1, Tensor2, Tensor3), (Tensor4, Tensor5), (Tensor6, Tensor7, Tensor8)
Tensor1: samples from the prior [batch * nlengths * nsamples, tgt_length, nz]
Tensor2: log probabilities [batch * nlengths * nsamples]
Tensor3: target masks [batch * nlengths * nsamples, tgt_length]
Tensor4: lengths [batch * nlengths]
Tensor5: log probabilities of lengths [batch * nlengths]
Tensor6: source encoding with shape [batch * nlengths * nsamples, src_length, hidden_size]
Tensor7: tensor for global state [batch * nlengths * nsamples, hidden_size]
Tensor8: source masks with shape [batch * nlengths * nsamples, src_length]
"""
batch = src.size(0)
batch_nlen = batch * nlengths
# [batch, nlenths]
lengths, log_probs_length = self.predict_length(ctx, src_mask, topk=nlengths)
# [batch * nlengths]
log_probs_length = log_probs_length.view(-1)
lengths = lengths.view(-1)
max_length = lengths.max().item()
# [batch * nlengths, max_length]
tgt_mask = torch.arange(max_length).to(src.device).unsqueeze(0).expand(batch_nlen, max_length).lt(lengths.unsqueeze(1)).float()
# [batch * nlengths, nsamples, tgt_length, nz]
epsilon = src.new_empty(batch_nlen, nsamples, max_length, self.features).normal_()
epsilon = epsilon.mul(tgt_mask.view(batch_nlen, 1, max_length, 1)) * tau
if include_zero:
epsilon[:, 0].zero_()
# [batch * nlengths * nsamples, tgt_length, nz]
epsilon = epsilon.view(-1, max_length, self.features)
if nsamples * nlengths > 1:
# [batch, nlengths * nsamples, src_length, hidden_size]
src = src.unsqueeze(1) + src.new_zeros(batch, nlengths * nsamples, *src.size()[1:])
# [batch * nlengths * nsamples, src_length, hidden_size]
src = src.view(batch_nlen * nsamples, *src.size()[2:])
# [batch, nlengths * nsamples, hidden_size]
ctx = ctx.unsqueeze(1) + ctx.new_zeros(batch, nlengths * nsamples, ctx.size(1))
# [batch * nlengths * nsamples, hidden_size]
ctx = ctx.view(batch_nlen * nsamples, ctx.size(2))
# [batch, nlengths * nsamples, src_length]
src_mask = src_mask.unsqueeze(1) + src_mask.new_zeros(batch, nlengths * nsamples, src_mask.size(1))
# [batch * nlengths * nsamples, src_length]
src_mask = src_mask.view(batch_nlen * nsamples, src_mask.size(2))
# [batch * nlengths, nsamples, tgt_length]
tgt_mask = tgt_mask.unsqueeze(1) + tgt_mask.new_zeros(batch_nlen, nsamples, tgt_mask.size(1))
# [batch * nlengths * nsamples, tgt_length]
tgt_mask = tgt_mask.view(batch_nlen * nsamples, tgt_mask.size(2))
# [batch * nlength * nsamples, tgt_length, nz]
z, log_probs = self.decode(epsilon, tgt_mask, src, src_mask)
return (z, log_probs, tgt_mask), (lengths, log_probs_length), (src, ctx, src_mask)
def log_probability(self, z: torch.Tensor, tgt_mask: torch.Tensor,
src: torch.Tensor, ctx: torch.Tensor, src_mask: torch.Tensor,
length_loss: bool = True) -> Tuple[torch.Tensor, Union[torch.Tensor, None]]:
"""
Args:
z: Tensor
tensor of latent code [batch, length, nz]
tgt_mask: Tensor
tensor of target masks [batch, length]
src: Tensor
source encoding [batch, src_length, hidden_size]
ctx: Tensor
tensor for global state [batch, hidden_size]
src_mask: Tensor
tensor of source masks [batch, src_length]
length_loss: bool (default True)
compute loss of length
Returns: Tensor1, Tensor2
Tensor1: log probabilities of z [batch]
Tensor2: length loss [batch]
"""
# [batch]
loss_length = self.length_loss(ctx, src_mask, tgt_mask) if length_loss else None
# [batch, length, nz]
epsilon, logdet = self.flow.bwdpass(z, tgt_mask, src, src_mask)
# [batch, tgt_length, nz]
log_probs = epsilon.mul(epsilon) + math.log(math.pi * 2.0)
# apply mask
log_probs = log_probs.mul(tgt_mask.unsqueeze(2))
log_probs = log_probs.view(z.size(0), -1).sum(dim=1).mul(-0.5) + logdet
return log_probs, loss_length
def init(self, z, tgt_mask, src, src_mask, init_scale=1.0):
return self.flow.bwdpass(z, tgt_mask, src, src_mask, init=True, init_scale=init_scale)
@classmethod
def register(cls, name: str):
Prior._registry[name] = cls
@classmethod
def by_name(cls, name: str):
return Prior._registry[name]
@classmethod
def from_params(cls, params: Dict) -> "Prior":
flow_params = params.pop('flow')
flow = NMTFlow.from_params(flow_params)
predictor_params = params.pop('length_predictor')
length_predictor = LengthPredictor.by_name(predictor_params.pop('type')).from_params(predictor_params)
return Prior(flow, length_predictor)
Prior.register('normal')
| 9,219 | 41.293578 | 186 | py |
flowseq | flowseq-master/flownmt/modules/priors/__init__.py | from flownmt.modules.priors.prior import Prior
from flownmt.modules.priors.length_predictors import *
| 102 | 33.333333 | 54 | py |
flowseq | flowseq-master/flownmt/modules/priors/length_predictors/diff_softmax.py | from overrides import overrides
from typing import Dict, Tuple
import torch
import torch.nn as nn
import torch.nn.functional as F
from flownmt.modules.priors.length_predictors.predictor import LengthPredictor
from flownmt.nnet.criterion import LabelSmoothedCrossEntropyLoss
class DiffSoftMaxLengthPredictor(LengthPredictor):
def __init__(self, features, max_src_length, diff_range, dropout=0.0, label_smoothing=0.):
super(DiffSoftMaxLengthPredictor, self).__init__()
self.max_src_length = max_src_length
self.range = diff_range
self.features = features
self.dropout = dropout
self.ctx_proj = None
self.diff = None
if label_smoothing < 1e-5:
self.criterion = nn.CrossEntropyLoss(reduction='none')
elif 1e-5 < label_smoothing < 1.0:
self.criterion = LabelSmoothedCrossEntropyLoss(label_smoothing)
else:
raise ValueError('label smoothing should be less than 1.0.')
def set_length_unit(self, length_unit):
self.length_unit = length_unit
self.ctx_proj = nn.Sequential(nn.Linear(self.features, self.features), nn.ELU(),
nn.Linear(self.features, self.features), nn.ELU())
self.diff = nn.Linear(self.features, 2 * self.range + 1)
self.reset_parameters()
def reset_parameters(self):
nn.init.constant_(self.ctx_proj[0].bias, 0.)
nn.init.constant_(self.ctx_proj[2].bias, 0.)
nn.init.uniform_(self.diff.weight, -0.1, 0.1)
nn.init.constant_(self.diff.bias, 0.)
def forward(self, ctx):
ctx = F.dropout(self.ctx_proj(ctx), p=self.dropout, training=self.training)
return self.diff(ctx)
@overrides
def loss(self, ctx: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor) -> torch.Tensor:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
tgt_mask: Tensor
tensor for target mask [batch, tgt_length]
Returns: Tensor
tensor for loss [batch]
"""
# [batch]
src_lengths = src_mask.sum(dim=1).long()
tgt_lengths = tgt_mask.sum(dim=1).long()
# [batch, 2 * range + 1]
logits = self(ctx)
# [1, 2 * range + 1]
mask = torch.arange(0, logits.size(1), device=logits.device).unsqueeze(0)
# [batch, 2 * range + 1]
mask = (mask + src_lengths.unsqueeze(1) - self.range).fmod(self.length_unit).ne(0)
logits = logits.masked_fill(mask, float('-inf'))
# handle tgt < src - range
x = (tgt_lengths - src_lengths + self.range).clamp(min=0)
tgt = x + src_lengths - self.range
res = tgt.fmod(self.length_unit)
padding = (self.length_unit - res).fmod(self.length_unit)
tgt = tgt + padding
# handle tgt > src + range
x = (tgt - src_lengths + self.range).clamp(max=2 * self.range)
tgt = x + src_lengths - self.range
tgt = tgt - tgt.fmod(self.length_unit)
x = tgt - src_lengths + self.range
loss_length = self.criterion(logits, x)
return loss_length
@overrides
def predict(self, ctx: torch.Tensor, src_mask:torch.Tensor, topk: int = 1) -> Tuple[torch.LongTensor, torch.Tensor]:
"""
Args:
ctx: Tensor
tensor [batch, features]
src_mask: Tensor
tensor for source mask [batch, src_length]
topk: int (default 1)
return top k length candidates for each src sentence
Returns: LongTensor1, Tensor2
LongTensor1: tensor for lengths [batch, topk]
Tensor2: log probs for each length
"""
# [batch]
src_lengths = src_mask.sum(dim=1).long()
# [batch, 2 * range + 1]
logits = self(ctx)
# [1, 2 * range + 1]
x = torch.arange(0, logits.size(1), device=logits.device).unsqueeze(0)
# [batch, 2 * range + 1]
tgt = x + src_lengths.unsqueeze(1) - self.range
mask = tgt.fmod(self.length_unit).ne(0)
logits = logits.masked_fill(mask, float('-inf'))
# [batch, 2 * range + 1]
log_probs = F.log_softmax(logits, dim=1)
# handle tgt length <= 0
mask = tgt.le(0)
log_probs = log_probs.masked_fill(mask, float('-inf'))
# [batch, topk]
log_probs, x = log_probs.topk(topk, dim=1)
lengths = x + src_lengths.unsqueeze(1) - self.range
return lengths, log_probs
@classmethod
def from_params(cls, params: Dict) -> 'DiffSoftMaxLengthPredictor':
return DiffSoftMaxLengthPredictor(**params)
DiffSoftMaxLengthPredictor.register('diff_softmax')
| 4,818 | 38.178862 | 120 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.