repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
lanro-gym
|
lanro-gym-main/test/pybrobot_test.py
|
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.robots import Panda
def test_panda_robot_state_obs():
sim = PyBulletSimulation()
panda1 = Panda(sim, full_state=False, fixed_gripper=True)
panda2 = Panda(sim, full_state=False, fixed_gripper=False)
panda3 = Panda(sim, full_state=True, fixed_gripper=True)
panda4 = Panda(sim, full_state=True, fixed_gripper=False)
assert panda1.get_obs().size == 6
assert panda2.get_obs().size == 7
assert panda3.get_obs().size == 19
assert panda4.get_obs().size == 20
assert panda1.action_space.shape == (7, )
assert panda2.action_space.shape == (8, )
assert panda3.action_space.shape == (7, )
assert panda4.action_space.shape == (8, )
assert panda1.get_ee_position().shape == (3, )
assert panda1.get_ee_velocity().shape == (3, )
assert panda2.get_ee_position().shape == (3, )
assert panda2.get_ee_velocity().shape == (3, )
assert panda3.get_ee_position().shape == (3, )
assert panda3.get_ee_velocity().shape == (3, )
assert panda4.get_ee_position().shape == (3, )
assert panda4.get_ee_velocity().shape == (3, )
assert panda1.get_current_pos().shape == (7, )
assert panda2.get_current_pos().shape == (7, )
assert panda3.get_current_pos().shape == (7, )
assert panda4.get_current_pos().shape == (7, )
assert panda1.get_fingers_width() == 0.0
assert panda2.get_fingers_width() >= 0.0
assert panda3.get_fingers_width() == 0.0
assert panda4.get_fingers_width() >= 0.0
assert panda1.get_obs().shape == (6, )
assert panda2.get_obs().shape == (7, )
assert panda3.get_obs().shape == (19, )
assert panda4.get_obs().shape == (20, )
| 1,715 | 34.75 | 62 |
py
|
lanro-gym
|
lanro-gym-main/test/nl_env_test.py
|
import numpy as np
import gymnasium as gym
import lanro_gym
from lanro_gym.language_utils import parse_instructions
def check_instruction(env, obs):
instruction_representation = obs['instruction']
sentence = env.decode_instruction(instruction_representation)
instruction_representation2 = env.encode_instruction(sentence)
assert np.all(instruction_representation == instruction_representation2)
assert sentence == env.pad_instruction(env.task.current_instruction[0])
instruction_list = env.task.get_all_instructions()
word_list, max_instruction_len = parse_instructions(instruction_list)
instruction_space = env.observation_space['instruction']
assert instruction_space.high[-1] == len(word_list) + 1 # for <pad> token
assert instruction_space.shape[0] == max_instruction_len
def test_single_hi_env():
env = gym.make("PandaNLReach2HI-v0", render=False)
env.reset()
assert env.task.use_hindsight_instructions == True
assert env.task.use_action_repair == False
env.task.generate_hindsight_instruction(1)
assert len(env.task.hindsight_instruction)
assert len(env.task.get_all_instructions()) == 9
def test_single_hi_env_synonyms():
env = gym.make("PandaNLReach2SynonymsHI-v0", render=False)
env.reset()
assert env.task.use_hindsight_instructions == True
assert env.task.use_action_repair == False
env.task.generate_hindsight_instruction(1)
assert len(env.task.hindsight_instruction)
assert len(env.task.get_all_instructions()) == 18
def test_single_ar_env():
env = gym.make("PandaNLReach2AR-v0", render=False)
env.reset()
assert env.task.use_hindsight_instructions == False
assert env.task.use_action_repair == True
assert len(env.task.get_all_instructions()) == 171
def test_single_ar_env_synonyms():
env = gym.make("PandaNLReach2SynonymsAR-v0", render=False)
env.reset()
assert env.task.use_hindsight_instructions == False
assert env.task.use_action_repair == True
assert len(env.task.get_all_instructions()) == 666
def test_single_arn_env():
env = gym.make("PandaNLReach2ARN-v0", render=False)
env.reset()
assert env.task.use_hindsight_instructions == False
assert env.task.use_action_repair == True
assert len(env.task.get_all_instructions()) == 198
def test_single_arn_env_synonyms():
env = gym.make("PandaNLReach2SynonymsARN-v0", render=False)
env.reset()
assert env.task.use_hindsight_instructions == False
assert env.task.use_action_repair == True
assert len(env.task.get_all_instructions()) == 774
def test_nl_envs():
for robot in ['Panda']:
for lang_task in ['NLReach', 'NLPush', 'NLGrasp', 'NLLift']:
for obj_count in [2]:
for _mode in [
'', 'Color', 'Shape', 'Weight', 'Size', 'ColorShape', 'WeightShape', 'SizeShape',
'ColorShapeSize'
]:
for _obstype in ["", "PixelEgo", "PixelStatic"]:
for _use_syn in ["", "Synonyms"]:
for _hindsight_instr in ["", "HI"]:
for _action_repair in ["", "AR", "ARN", "ARD", "ARND"]:
id = f'{robot}{lang_task}{obj_count}{_mode}{_obstype}{_use_syn}{_hindsight_instr}{_action_repair}-v0'
env = gym.make(id, render=False)
obs, _ = env.reset()
check_instruction(env, obs)
env.close()
def test_pixel_envs():
for lang_task in ['NLReach', 'NLPush', 'NLGrasp', 'NLLift']:
for _pixel_obstype in ["PixelEgo", "PixelStatic"]:
env = gym.make(f"Panda{lang_task}2{_pixel_obstype}-v0")
obs, _ = env.reset()
img = obs['observation']
assert img.shape == (84, 84, 3)
assert env.observation_space['observation'].shape == (84, 84, 3)
assert env.observation_space['observation'].dtype == np.uint8
env.close()
| 4,105 | 39.653465 | 137 |
py
|
lanro-gym
|
lanro-gym-main/test/simulation_test.py
|
import numpy as np
from lanro_gym.simulation import PyBulletSimulation
def test_init_step_close():
sim = PyBulletSimulation()
sim.step()
sim.close()
def test_box_base_pos_orn():
sim = PyBulletSimulation()
body_name = "test_box"
sim.create_box(body_name, [0.5, 0.5, 0.5], 1.0, [0, 0, 0], [1, 0, 0, 0])
base_pos = sim.get_base_position(body_name)
base_orn = sim.get_base_orientation(body_name)
assert base_pos == (0, 0, 0)
assert base_orn == (0, 0, 0, 1)
assert sim._bodies_idx[body_name] == sim.get_object_id(body_name)
sim.close()
def test_cylinder_base_pos_orn():
sim = PyBulletSimulation()
body_name = "test_cylinder"
sim.create_cylinder(body_name, 0.5, 0.5, 1.0, [0, 0, 0], [1, 0, 0, 1])
base_pos = sim.get_base_position(body_name)
assert base_pos == (0, 0, 0)
assert sim._bodies_idx[body_name] == sim.get_object_id(body_name)
sim.close()
def test_sphere_base_pos_orn():
sim = PyBulletSimulation()
body_name = "test_sphere"
sim.create_sphere(body_name, 0.5, 1.0, [0, 0, 0], [1, 0, 0, 1])
base_pos = sim.get_base_position(body_name)
assert base_pos == (0, 0, 0)
assert sim._bodies_idx[body_name] == sim.get_object_id(body_name)
sim.close()
def test_delta_t():
sim = PyBulletSimulation()
assert sim.dt == 1 / 500. * 20
def test_euler_quat():
sim = PyBulletSimulation()
quat = [0, np.pi, 0, 0]
assert sim.get_euler_from_quaternion(quat) == (3.141592653589793, -0.0, 3.141592653589793)
euler = [0, np.pi, 0]
assert sim.get_quaternion_from_euler(euler) == (0.0, 1.0, 0.0, 6.123233995736766e-17)
def test_remove_body():
sim = PyBulletSimulation()
sim.create_sphere("remove_this", 0.5, 1.0, [0, 0, 0], [1, 0, 0, 1])
sim.remove_body("remove_this")
sim.remove_body("not_extant")
def test_set_base_pos():
sim = PyBulletSimulation()
sphere_id = sim.create_sphere("test", 0.5, 1.0, [0, 0, 0], [1, 0, 0, 1])
sim.set_base_pose("test", [2, 2, 2], [0, 0, 0, 0])
assert sim.get_base_position("test") == (2, 2, 2)
def test_get_link_state():
sim = PyBulletSimulation()
sphere_id = sim.create_sphere("test", 0.5, 1.0, [0, 0, 0], [1, 0, 0, 1])
assert sim.get_link_state("test", 0) == None
| 2,265 | 29.621622 | 94 |
py
|
lanro-gym
|
lanro-gym-main/test/utils_test.py
|
import numpy as np
from lanro_gym.env_utils import RGBCOLORS, SHAPES, TaskObject, valid_task_object_combination, dummys_not_goal_props
from lanro_gym.env_utils.object_properties import WEIGHTS
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.utils import goal_distance, scale_rgb, get_one_hot_list, get_prop_combinations, expand_enums, get_random_enum_with_exceptions
def test_get_prop_combinations():
stream = [RGBCOLORS.RED, SHAPES.CYLINDER]
combinations = get_prop_combinations(stream)
assert len(combinations) == 2
stream = [RGBCOLORS.RED, SHAPES.CYLINDER, RGBCOLORS.GREEN]
combinations = get_prop_combinations(stream)
assert len(combinations) == 4
stream = [RGBCOLORS.RED, SHAPES.CYLINDER, RGBCOLORS.GREEN, SHAPES.CUBE]
combinations = get_prop_combinations(stream)
assert len(combinations) == 8
def test_expand_enums():
expanded_enums = expand_enums([RGBCOLORS])
assert len(expanded_enums) == 12
expanded_enums = expand_enums([SHAPES])
assert len(expanded_enums) == 3
expanded_enums = expand_enums([WEIGHTS])
assert len(expanded_enums) == 2
expanded_enums = expand_enums([RGBCOLORS, SHAPES])
assert len(expanded_enums) == 15
expanded_enums = expand_enums([RGBCOLORS, SHAPES, WEIGHTS])
assert len(expanded_enums) == 17
def test_get_random_enum_with_exceptions():
assert get_random_enum_with_exceptions(RGBCOLORS, [RGBCOLORS.RED])[0] != RGBCOLORS.RED
assert get_random_enum_with_exceptions(SHAPES, [SHAPES.CUBE])[0] != SHAPES.CUBE
assert get_random_enum_with_exceptions(WEIGHTS, [WEIGHTS.HEAVY])[0] != WEIGHTS.HEAVY
def test_scale_rgb():
assert np.allclose(scale_rgb([255., 255., 255.]), [1, 1, 1])
assert np.allclose(scale_rgb([128., 128., 128.]), [.5019607, .5019607, .5019607])
assert np.allclose(scale_rgb([0., 0., 0.]), [0, 0, 0])
def test_get_one_hot_list():
one_hots = get_one_hot_list(1)
assert len(one_hots) == 1
assert np.all(one_hots[-1] == np.array([1.]))
one_hots = get_one_hot_list(2)
assert len(one_hots) == 2
assert np.all(one_hots[0] == np.array([1., 0.]))
assert np.all(one_hots[1] == np.array([0., 1.]))
one_hots = get_one_hot_list(10)
assert len(one_hots) == 10
assert np.all(one_hots[-1] == np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1]))
def test_goal_distance():
vec1 = np.array([1, 1, 1])
vec2 = np.array([2, 1.5, 2])
assert goal_distance(vec1, vec2) == np.linalg.norm(vec1 - vec2)
assert goal_distance(vec1, vec1) == 0
assert goal_distance(vec2, vec2) == 0
assert goal_distance(vec1, vec2) == 1.5
def test_valid_task_combinations():
sim = PyBulletSimulation()
task_obj1 = TaskObject(sim, RGBCOLORS.RED, SHAPES.CUBE)
task_obj2 = TaskObject(sim, SHAPES.CUBE, RGBCOLORS.RED)
assert not valid_task_object_combination(task_obj1, task_obj2)
assert not valid_task_object_combination(task_obj2, task_obj1)
task_obj3 = TaskObject(sim, RGBCOLORS.BLUE, SHAPES.CUBE)
assert valid_task_object_combination(task_obj1, task_obj3)
assert valid_task_object_combination(task_obj3, task_obj1)
task_obj4 = TaskObject(sim, SHAPES.CUBE, RGBCOLORS.BLUE)
assert valid_task_object_combination(task_obj2, task_obj3)
assert valid_task_object_combination(task_obj3, task_obj2)
task_obj5 = TaskObject(sim, RGBCOLORS.BLUE)
assert not valid_task_object_combination(task_obj5, task_obj3)
# valid, as we refer to the goal as "cube blue" when other object is not a cube
if task_obj5.get_shape() != SHAPES.CUBE:
assert valid_task_object_combination(task_obj3, task_obj5)
assert not valid_task_object_combination(task_obj5, task_obj4)
# valid, as we refer to the goal as "cube blue" when other object is not a cube
if task_obj5.get_shape() != SHAPES.CUBE:
assert valid_task_object_combination(task_obj4, task_obj5)
task_obj6 = TaskObject(sim, RGBCOLORS.YELLOW)
task_obj7 = TaskObject(sim, RGBCOLORS.BLUE)
assert valid_task_object_combination(task_obj6, task_obj7)
task_obj8 = TaskObject(sim, RGBCOLORS.YELLOW, SHAPES.CUBE)
task_obj9 = TaskObject(sim, RGBCOLORS.YELLOW, SHAPES.CUBOID)
assert valid_task_object_combination(task_obj8, task_obj9)
task_obj10 = TaskObject(sim, SHAPES.CUBOID, RGBCOLORS.YELLOW)
assert valid_task_object_combination(task_obj10, task_obj8)
assert not valid_task_object_combination(task_obj10, task_obj9)
def test_valid_task_combinations2():
sim = PyBulletSimulation()
task_obj1 = TaskObject(sim, RGBCOLORS.RED)
task_obj1._shape = SHAPES.CUBE
assert task_obj1.has_dummy_weight and task_obj1.has_dummy_size
task_obj2 = TaskObject(sim, SHAPES.CUBE)
task_obj2.color = RGBCOLORS.RED
assert task_obj2.has_dummy_weight and task_obj2.has_dummy_size
# instruction: ... red object
# red object (dummy == cube) and cube object (dummy == red)
assert not valid_task_object_combination(task_obj1, task_obj2)
task_obj3 = TaskObject(sim, SHAPES.CUBE)
task_obj3.color = RGBCOLORS.BLUE
# instruction: ... red object
# red object (dummy == cube) and cube object (dummy == blue)
assert valid_task_object_combination(task_obj1, task_obj3)
task_obj4 = TaskObject(sim, SHAPES.CUBE)
task_obj4.color = RGBCOLORS.BLUE
assert not valid_task_object_combination(task_obj3, task_obj4)
task_obj5 = TaskObject(sim, SHAPES.CUBOID)
task_obj5.color = RGBCOLORS.BLUE
assert valid_task_object_combination(task_obj4, task_obj5)
task_obj6 = TaskObject(sim, WEIGHTS.HEAVY)
task_obj6.color = RGBCOLORS.BLUE
assert valid_task_object_combination(task_obj5, task_obj6)
task_obj7 = TaskObject(sim, WEIGHTS.HEAVY)
task_obj7.color = RGBCOLORS.BLUE
assert not valid_task_object_combination(task_obj6, task_obj7)
def test_dummys_not_goal_primary():
sim = PyBulletSimulation()
task_obj1 = TaskObject(sim, primary=RGBCOLORS.RED, secondary=SHAPES.CUBE, onehot_idx=0)
task_obj2 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=0)
if task_obj2.get_shape() in [SHAPES.CUBE]:
# 0: red cube and red object (dummy in {cube})
assert not dummys_not_goal_props(task_obj1, task_obj2)
else:
# 1: red cube and red object (dummy in {cuboid, cylinder})
assert dummys_not_goal_props(task_obj1, task_obj2)
task_obj3 = TaskObject(sim, primary=RGBCOLORS.BLUE, onehot_idx=0)
# 1: red cube and blue dummy
assert dummys_not_goal_props(task_obj1, task_obj3)
task_obj4 = TaskObject(sim, primary=SHAPES.CUBE, onehot_idx=0)
if task_obj4.get_color() in [RGBCOLORS.RED]:
assert not dummys_not_goal_props(task_obj1, task_obj4)
else:
# 1: red cube and cube object (dummy in {green, blue})
assert dummys_not_goal_props(task_obj1, task_obj4)
task_obj5 = TaskObject(sim, primary=SHAPES.CUBOID, onehot_idx=0)
# 1: red cube and cuboid dummy
assert dummys_not_goal_props(task_obj1, task_obj5)
| 6,983 | 39.842105 | 140 |
py
|
lanro-gym
|
lanro-gym-main/test/env_utils/task_object_list.py
|
import numpy as np
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.env_utils import TaskObjectList, RGBCOLORS, SHAPES
def test_task_object_list_default():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim)
assert len(obj_list) == 3
assert obj_list[0].get_color() == RGBCOLORS.RED
assert obj_list[1].get_color() == RGBCOLORS.GREEN
assert obj_list[2].get_color() == RGBCOLORS.BLUE
task_obj_args = obj_list.get_task_obj_args({}, RGBCOLORS.RED, primary=True)
assert task_obj_args['primary'] == RGBCOLORS.RED
assert task_obj_args['onehot_idx'] == 9
task_obj_args = obj_list.get_task_obj_args({}, RGBCOLORS.RED, primary=False)
assert task_obj_args['secondary'] == RGBCOLORS.RED
assert task_obj_args['sec_onehot_idx'] == 9
def test_task_object_list_shape_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, shape_mode=True)
assert len(obj_list) == 24
assert obj_list[0].get_color() == RGBCOLORS.RED
assert obj_list[5].get_shape() == SHAPES.CYLINDER
expected_oh = np.array([0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0., 0., 1., 0.])
assert np.all(obj_list[0].get_onehot() == expected_oh)
def test_task_object_list_color_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, color_mode=True)
assert len(obj_list) == 9
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 9
def test_task_object_list_weight_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, weight_mode=True)
assert len(obj_list) == 17
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 17
def test_task_object_list_size_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, size_mode=True)
assert len(obj_list) == 24
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 24
def test_task_object_list_sizeshape_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, shape_mode=True, size_mode=True)
assert len(obj_list) == 63
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 63
def test_task_object_list_colorshape_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, color_mode=True, shape_mode=True)
assert len(obj_list) == 66
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 66
def test_task_object_list_weightshape_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, weight_mode=True, shape_mode=True)
assert len(obj_list) == 50
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 50
def test_task_object_list_colorshapesize_mode():
sim = PyBulletSimulation()
obj_list = TaskObjectList(sim, color_mode=True, shape_mode=True, size_mode=True)
assert len(obj_list) == 141
obj_props = obj_list.get_obj_properties()
assert len(obj_props) == 141
| 2,970 | 33.149425 | 108 |
py
|
lanro-gym
|
lanro-gym-main/test/env_utils/task_object.py
|
import pytest
import numpy as np
from lanro_gym.env_utils.object_properties import WEIGHTS
from lanro_gym.simulation import PyBulletSimulation
from lanro_gym.env_utils import TaskObject, RGBCOLORS, SHAPES, SIZES, DUMMY
def test_task_object_primary():
sim = PyBulletSimulation()
task_obj = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=0)
assert isinstance(task_obj.primary, RGBCOLORS)
assert isinstance(task_obj.secondary, DUMMY)
primary, _ = task_obj.get_properties()
assert task_obj.primary == primary
assert task_obj.get_color() == RGBCOLORS.RED
assert task_obj.get_shape() == SHAPES.CUBE
assert task_obj.get_size() == SIZES.MEDIUM
assert task_obj.get_weight() == WEIGHTS.LIGHT
task_obj_onehot = task_obj.get_onehot()
assert len(task_obj_onehot) == 20
assert np.all(
task_obj_onehot == np.array([0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 1., 0.]))
assert task_obj.onehot_idx_colors == 0
assert task_obj.obj_mass == 2
def test_task_object_primary2():
sim = PyBulletSimulation()
task_obj = TaskObject(sim, primary=SHAPES.CYLINDER, onehot_idx=2)
assert isinstance(task_obj.primary, SHAPES)
assert isinstance(task_obj.secondary, DUMMY)
primary, _ = task_obj.get_properties()
assert task_obj.primary == primary
assert task_obj.get_color() == RGBCOLORS.RED
assert task_obj.get_shape() == SHAPES.CYLINDER
assert task_obj.get_size() == SIZES.MEDIUM
assert task_obj.get_weight() == WEIGHTS.LIGHT
task_obj_onehot = task_obj.get_onehot()
assert len(task_obj_onehot) == 20
assert np.all(
task_obj_onehot == np.array([0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 1., 0.]))
assert task_obj.onehot_idx_colors == 9
assert task_obj.obj_mass == 2
def test_task_object_primary3():
sim = PyBulletSimulation()
task_obj = TaskObject(sim, primary=SHAPES.CYLINDER, onehot_idx=2, secondary=WEIGHTS.HEAVY, sec_onehot_idx=1)
assert isinstance(task_obj.primary, SHAPES)
assert isinstance(task_obj.secondary, WEIGHTS)
primary, _ = task_obj.get_properties()
assert task_obj.primary == primary
assert task_obj.get_color() == RGBCOLORS.RED
assert task_obj.get_shape() == SHAPES.CYLINDER
assert task_obj.get_size() == SIZES.MEDIUM
assert task_obj.get_weight() == WEIGHTS.HEAVY
task_obj_onehot = task_obj.get_onehot()
assert len(task_obj_onehot) == 20
assert np.all(
task_obj_onehot == np.array([0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 0., 0., 0., 1., 0., 1.]))
assert task_obj.onehot_idx_colors == 9
assert task_obj.obj_mass == 8
def test_task_object_with_secondary():
sim = PyBulletSimulation()
task_obj = TaskObject(sim, primary=RGBCOLORS.RED, secondary=SHAPES.CUBOID, onehot_idx=0, sec_onehot_idx=1)
assert isinstance(task_obj.primary, RGBCOLORS)
assert isinstance(task_obj.secondary, SHAPES)
primary, secondary = task_obj.get_properties()
assert task_obj.primary == primary
assert task_obj.secondary == secondary
assert task_obj.get_color() == RGBCOLORS.RED
assert task_obj.get_shape() == SHAPES.CUBOID
assert task_obj.get_size() == SIZES.MEDIUM
assert task_obj.get_weight() == WEIGHTS.LIGHT
task_obj_onehot = task_obj.get_onehot()
assert len(task_obj_onehot) == 20
assert np.all(
task_obj_onehot == np.array([0., 1., 0., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 1., 0., 1., 0.]))
assert task_obj.onehot_idx_colors == 0
assert task_obj.obj_mass == 2
def test_task_object_with_secondary_error():
sim = PyBulletSimulation()
with pytest.raises(ValueError):
TaskObject(sim, primary=SIZES.BIG, secondary=SIZES.BIG, onehot_idx=1, sec_onehot_idx=1)
def test_equality_objects():
sim = PyBulletSimulation()
task_obj1 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=1, secondary=SHAPES.CUBE)
task_obj2 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=1, secondary=SHAPES.CUBE)
assert task_obj1 == task_obj2
task_obj3 = TaskObject(sim, primary=RGBCOLORS.BLUE, onehot_idx=1, secondary=SHAPES.CUBOID)
assert task_obj1 != task_obj3
task_obj4 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=1, secondary=SHAPES.CUBE)
task_obj4._size = SIZES.BIG
task_obj5 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=1, secondary=SHAPES.CUBE)
task_obj5._size = SIZES.BIG
assert task_obj4 == task_obj5
task_obj6 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=1, secondary=SHAPES.CUBE)
task_obj6.color = RGBCOLORS.BLUE
task_obj6._size = SIZES.BIG
task_obj7 = TaskObject(sim, primary=RGBCOLORS.RED, onehot_idx=1, secondary=SHAPES.CUBE)
task_obj7._size = SIZES.SMALL
assert task_obj6 != task_obj7
task_obj8 = TaskObject(sim, primary=RGBCOLORS.BLUE, onehot_idx=1, secondary=WEIGHTS.HEAVY)
assert task_obj8 != task_obj1
| 4,961 | 38.696 | 118 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/setup.py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': "Minicourse SBRC'2019",
'version': '0.2',
'description': 'Exploring hybrid multi-modal urban routes collected from tweets in São Paulo.',
'author': 'Diego Oliveira and Frances Santos',
'url': '--',
'download_url': '--',
'author_email': '[diego, francessantos]@lrc.ic.unicamp.br',
'install_requires': ['pandas', 'haversine', 'numpy', 'scipy', 'matplotlib', 'sklearn', 'hdbscan', 'xmltodict', 'beautifulsoup4', 'googlemaps', 'gmplot', 'seaborn', 'polyline']
}
setup(**config)
| 622 | 33.611111 | 179 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/common/address_keywords_extension_map.py
|
import re
'''
* Parses an address string to collect the relevant keywords.
*
* @param address - The address string.
* @param mode - `extend` (to add abbreviations) or `clean` (to remove commom words).
'''
def parse_str(address, mode='clean'):
address_str = re.sub('[^a-zA-Z0-9\-]+', ' ', address).lower()
address_keywords = address_str.split()
if mode == 'extend':
extensions = list(map(lambda k: next((tp for tp in address_keywords_extensions if k in tp), []), address_keywords))
for e in extensions:
if len(e):
address_keywords.extend(e)
elif mode == 'clean':
address_keywords = [item for item in address_keywords if item not in address_stop_words]
return address_keywords
address_stop_words = ["alley","allee","aly","ally","anex","anx","annex","annx","arcade","arc","avenue","av","ave","aven","avenu","avn","avnue","bayou","bayoo","byu","beach","bch","bend","bnd","bluff","blf","bluf","bluffs","blfs","bottom","bot","btm","bottm","boulevard","blvd","boul","boulv","branch","br","brnch","bridge","brdge","brg","brook","brk","brooks","brks","burg","bg","burgs","bgs","bypass","byp","bypa","bypas","byps","camp","cp","cmp","canyon","canyn","cyn","cnyn","cape","cpe","causeway","cswy","causwa","center","cen","ctr","cent","centr","centre","cnter","cntr","centers","ctrs","circle","cir","circ","circl","crcl","crcle","circles","cirs","cliff","clf","cliffs","clfs","club","clb","common","cmn","commons","cmns","corner","cor","corners","cors","course","crse","court","ct","courts","cts","cove","cv","coves","cvs","creek","crk","crescent","cres","crsent","crsnt","crest","crst","crossing","xing","crssng","crossroad","xrd","crossroads","xrds","curve","curv","dale","dl","dam","dm","divide","div","dv","dvd","drive","dr","driv","drv","drives","drs","estate","est","estates","ests","expressway","exp","expy","expr","express","expw","extension","ext","extn","extnsn","extensions","exts","fall","falls","fls","ferry","fry","frry","field","fld","fields","flds","flat","flt","flats","flts","ford","frd","fords","frds","forest","frst","forests","forge","forg","frg","forges","frgs","fork","frk","forks","frks","fort","ft","frt","freeway","fwy","freewy","frway","frwy","garden","gdn","gardn","grden","grdn","gardens","gdns","grdns","gateway","gtwy","gatewy","gatway","gtway","glen","gln","glens","glns","green","grn","greens","grns","grove","grov","grv","groves","grvs","harbor","harb","hbr","harbr","hrbor","harbors","hbrs","haven","hvn","heights","ht","hts","highway","hwy","highwy","hiway","hiwy","hway","hill","hl","hills","hls","hollow","hllw","holw","hollows","holws","inlet","inlt","island","is","islnd","islands","iss","islnds","isle","isles","junction","jct","jction","jctn","junctn","juncton","junctions","jctns","jcts","key","ky","keys","kys","knoll","knl","knol","knolls","knls","lake","lk","lakes","lks","land","landing","lndg","lndng","lane","ln","light","lgt","lights","lgts","loaf","lf","lock","lck","locks","lcks","lodge","ldg","ldge","lodg","loop","loops","mall","manor","mnr","manors","mnrs","meadow","mdw","meadows","mdw","mdws","medows","mews","mill","ml","mills","mls","mission","missn","msn","mssn","motorway","mtwy","mount","mnt","mt","mountain","mntain","mtn","mntn","mountin","mtin","mountains","mntns","mtns","neck","nck","orchard","orch","orchrd","oval","ovl","overpass","opas","park","prk","parks","park","parkway","pkwy","parkwy","pkway","pky","parkways","pkwy","pkwys","pass","passage","psge","path","paths","pike","pikes","pine","pne","pines","pnes","place","pl","plain","pln","plains","plns","plaza","plz","plza","point","pt","points","pts","port","prt","ports","prts","prairie","pr","prr","radial","rad","radl","radiel","ramp","ranch","rnch","ranches","rnchs","rapid","rpd","rapids","rpds","rest","rst","ridge","rdg","rdge","ridges","rdgs","river","riv","rvr","rivr","road","rd","roads","rds","route","rte","row","rue","run","shoal","shl","shoals","shls","shore","shoar","shr","shores","shoars","shrs","skyway","skwy","spring","spg","spng","sprng","springs","spgs","spngs","sprngs","spur","spurs","spur","square","sq","sqr","sqre","squ","squares","sqrs","sqs","station","sta","statn","stn","stravenue","stra","strav","straven","stravn","strvn","strvnue","stream","strm","streme","street","st","strt","str","streets","sts","summit","smt","sumit","sumitt","terrace","ter","terr","throughway","trwy","trace","trce","traces","track","trak","tracks","trk","trks","trafficway","trfy","trail","trl","trails","trls","trailer","trlr","trlrs","tunnel","tunel","tunl","tunls","tunnels","tunnl","turnpike","trnpk","tpke","turnpk","underpass","upas","union","un","unions","uns","valley","vly","vally","vlly","valleys","vlys","viaduct","vdct","via","viadct","view","vw","views","vws","village","vill","vlg","villag","villg","villiage","villages","vlgs","ville","vl","vista","vis","vist","vst","vsta","walk","walks","walk","wall","way","wy","ways","well","wl","wells","wls"]
'''
* Map to extend addresses keywords, extracted from USPS.com Postal Explorer: C1 Street Suffix Abbreviations
*
* @param key - The key to retrieve extension options
'''
address_keywords_extensions = [
[
"alley",
"allee",
"aly",
"ally"
],
[
"anex",
"anx",
"annex",
"annx"
],
[
"arcade",
"arc"
],
[
"avenue",
"av",
"ave",
"aven",
"avenu",
"avn",
"avnue"
],
[
"bayou",
"bayoo",
"byu"
],
[
"beach",
"bch"
],
[
"bend",
"bnd"
],
[
"bluff",
"blf",
"bluf"
],
[
"bluffs",
"blfs"
],
[
"bottom",
"bot",
"btm",
"bottm"
],
[
"boulevard",
"blvd",
"boul",
"boulv"
],
[
"branch",
"br",
"brnch"
],
[
"bridge",
"brdge",
"brg"
],
[
"brook",
"brk"
],
[
"brooks",
"brks"
],
[
"burg",
"bg"
],
[
"burgs",
"bgs"
],
[
"bypass",
"byp",
"bypa",
"bypas",
"byps"
],
[
"camp",
"cp",
"cmp"
],
[
"canyon",
"canyn",
"cyn",
"cnyn"
],
[
"cape",
"cpe"
],
[
"causeway",
"cswy",
"causwa"
],
[
"center",
"cen",
"ctr",
"cent",
"centr",
"centre",
"cnter",
"cntr"
],
[
"centers",
"ctrs"
],
[
"circle",
"cir",
"circ",
"circl",
"crcl",
"crcle"
],
[
"circles",
"cirs"
],
[
"cliff",
"clf"
],
[
"cliffs",
"clfs"
],
[
"club",
"clb"
],
[
"common",
"cmn"
],
[
"commons",
"cmns"
],
[
"corner",
"cor"
],
[
"corners",
"cors"
],
[
"course",
"crse"
],
[
"court",
"ct"
],
[
"courts",
"cts"
],
[
"cove",
"cv"
],
[
"coves",
"cvs"
],
[
"creek",
"crk"
],
[
"crescent",
"cres",
"crsent",
"crsnt"
],
[
"crest",
"crst"
],
[
"crossing",
"xing",
"crssng"
],
[
"crossroad",
"xrd"
],
[
"crossroads",
"xrds"
],
[
"curve",
"curv"
],
[
"dale",
"dl"
],
[
"dam",
"dm"
],
[
"divide",
"div",
"dv",
"dvd"
],
[
"drive",
"dr",
"driv",
"drv"
],
[
"drives",
"drs"
],
[
"estate",
"est"
],
[
"estates",
"ests"
],
[
"expressway",
"exp",
"expy",
"expr",
"express",
"expw"
],
[
"extension",
"ext",
"extn",
"extnsn"
],
[
"extensions",
"exts"
],
[
"fall"
],
[
"falls",
"fls"
],
[
"ferry",
"fry",
"frry"
],
[
"field",
"fld"
],
[
"fields",
"flds"
],
[
"flat",
"flt"
],
[
"flats",
"flts"
],
[
"ford",
"frd"
],
[
"fords",
"frds"
],
[
"forest",
"frst",
"forests"
],
[
"forge",
"forg",
"frg"
],
[
"forges",
"frgs"
],
[
"fork",
"frk"
],
[
"forks",
"frks"
],
[
"fort",
"ft",
"frt"
],
[
"freeway",
"fwy",
"freewy",
"frway",
"frwy"
],
[
"garden",
"gdn",
"gardn",
"grden",
"grdn"
],
[
"gardens",
"gdns",
"grdns"
],
[
"gateway",
"gtwy",
"gatewy",
"gatway",
"gtway"
],
[
"glen",
"gln"
],
[
"glens",
"glns"
],
[
"green",
"grn"
],
[
"greens",
"grns"
],
[
"grove",
"grov",
"grv"
],
[
"groves",
"grvs"
],
[
"harbor",
"harb",
"hbr",
"harbr",
"hrbor"
],
[
"harbors",
"hbrs"
],
[
"haven",
"hvn"
],
[
"heights",
"ht",
"hts"
],
[
"highway",
"hwy",
"highwy",
"hiway",
"hiwy",
"hway"
],
[
"hill",
"hl"
],
[
"hills",
"hls"
],
[
"hollow",
"hllw",
"holw",
"hollows",
"holws"
],
[
"inlet",
"inlt"
],
[
"island",
"is",
"islnd"
],
[
"islands",
"iss",
"islnds"
],
[
"isle",
"isles"
],
[
"junction",
"jct",
"jction",
"jctn",
"junctn",
"juncton"
],
[
"junctions",
"jctns",
"jcts"
],
[
"key",
"ky"
],
[
"keys",
"kys"
],
[
"knoll",
"knl",
"knol"
],
[
"knolls",
"knls"
],
[
"lake",
"lk"
],
[
"lakes",
"lks"
],
[
"land"
],
[
"landing",
"lndg",
"lndng"
],
[
"lane",
"ln"
],
[
"light",
"lgt"
],
[
"lights",
"lgts"
],
[
"loaf",
"lf"
],
[
"lock",
"lck"
],
[
"locks",
"lcks"
],
[
"lodge",
"ldg",
"ldge",
"lodg"
],
[
"loop",
"loops"
],
[
"mall"
],
[
"manor",
"mnr"
],
[
"manors",
"mnrs"
],
[
"meadow",
"mdw"
],
[
"meadows",
"mdw",
"mdws",
"medows"
],
[
"mews"
],
[
"mill",
"ml"
],
[
"mills",
"mls"
],
[
"mission",
"missn",
"msn",
"mssn"
],
[
"motorway",
"mtwy"
],
[
"mount",
"mnt",
"mt"
],
[
"mountain",
"mntain",
"mtn",
"mntn",
"mountin",
"mtin"
],
[
"mountains",
"mntns",
"mtns"
],
[
"neck",
"nck"
],
[
"orchard",
"orch",
"orchrd"
],
[
"oval",
"ovl"
],
[
"overpass",
"opas"
],
[
"park",
"prk"
],
[
"parks",
"park"
],
[
"parkway",
"pkwy",
"parkwy",
"pkway",
"pky"
],
[
"parkways",
"pkwy",
"pkwys"
],
[
"pass"
],
[
"passage",
"psge"
],
[
"path",
"paths"
],
[
"pike",
"pikes"
],
[
"pine",
"pne"
],
[
"pines",
"pnes"
],
[
"place",
"pl"
],
[
"plain",
"pln"
],
[
"plains",
"plns"
],
[
"plaza",
"plz",
"plza"
],
[
"point",
"pt"
],
[
"points",
"pts"
],
[
"port",
"prt"
],
[
"ports",
"prts"
],
[
"prairie",
"pr",
"prr"
],
[
"radial",
"rad",
"radl",
"radiel"
],
[
"ramp"
],
[
"ranch",
"rnch",
"ranches",
"rnchs"
],
[
"rapid",
"rpd"
],
[
"rapids",
"rpds"
],
[
"rest",
"rst"
],
[
"ridge",
"rdg",
"rdge"
],
[
"ridges",
"rdgs"
],
[
"river",
"riv",
"rvr",
"rivr"
],
[
"road",
"rd"
],
[
"roads",
"rds"
],
[
"route",
"rte"
],
[
"row"
],
[
"rue"
],
[
"run"
],
[
"shoal",
"shl"
],
[
"shoals",
"shls"
],
[
"shore",
"shoar",
"shr"
],
[
"shores",
"shoars",
"shrs"
],
[
"skyway",
"skwy"
],
[
"spring",
"spg",
"spng",
"sprng"
],
[
"springs",
"spgs",
"spngs",
"sprngs"
],
[
"spur"
],
[
"spurs",
"spur"
],
[
"square",
"sq",
"sqr",
"sqre",
"squ"
],
[
"squares",
"sqrs",
"sqs"
],
[
"station",
"sta",
"statn",
"stn"
],
[
"stravenue",
"stra",
"strav",
"straven",
"stravn",
"strvn",
"strvnue"
],
[
"stream",
"strm",
"streme"
],
[
"street",
"st",
"strt",
"str"
],
[
"streets",
"sts"
],
[
"summit",
"smt",
"sumit",
"sumitt"
],
[
"terrace",
"ter",
"terr"
],
[
"throughway",
"trwy"
],
[
"trace",
"trce",
"traces"
],
[
"track",
"trak",
"tracks",
"trk",
"trks"
],
[
"trafficway",
"trfy"
],
[
"trail",
"trl",
"trails",
"trls"
],
[
"trailer",
"trlr",
"trlrs"
],
[
"tunnel",
"tunel",
"tunl",
"tunls",
"tunnels",
"tunnl"
],
[
"turnpike",
"trnpk",
"tpke",
"turnpk"
],
[
"underpass",
"upas"
],
[
"union",
"un"
],
[
"unions",
"uns"
],
[
"valley",
"vly",
"vally",
"vlly"
],
[
"valleys",
"vlys"
],
[
"viaduct",
"vdct",
"via",
"viadct"
],
[
"view",
"vw"
],
[
"views",
"vws"
],
[
"village",
"vill",
"vlg",
"villag",
"villg",
"villiage"
],
[
"villages",
"vlgs"
],
[
"ville",
"vl"
],
[
"vista",
"vis",
"vist",
"vst",
"vsta"
],
[
"walk"
],
[
"walks",
"walk"
],
[
"wall"
],
[
"way",
"wy"
],
[
"ways"
],
[
"well",
"wl"
],
[
"wells",
"wls"
]
]
| 20,680 | 19.764056 | 4,205 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/common/distribution.py
|
import numpy as np
def next(matrix):
random = np.random.rand(*matrix.shape)
random = np.divide(random, matrix)
argmin = random.argmin()
return np.unravel_index(argmin, matrix.shape)
| 186 | 25.714286 | 46 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/common/hashing.py
|
import hashlib
def md5(string):
m = hashlib.md5()
m.update(string.encode('utf-8'))
return m.hexdigest()
| 116 | 18.5 | 36 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/common/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/common/env.py
|
import re
"""
* Reads configuration from the .env file.
*
* @param key: the key to look for in the .env file
* @param default: the default value to return in case key is not found
"""
def env(key=None, default=None, **kwargs):
config = {"filename": '.env'}
config.update(kwargs)
with open(config['filename']) as infile:
data = {}
for line in infile:
line = re.sub('#.*$', '', line).strip()
if not line:
continue
info = line.split('=')
if not key:
data[info[0]] = info[1]
continue
if info[0] == key:
return info[1]
if not key:
return data
return default
| 783 | 25.133333 | 71 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/filters/nyc_yellow_taxis.py
|
from haversine import haversine
import os, time, datetime as dt
import multiprocessing as mp
import uuid as IdGenerator
import pandas as pd
import numpy as np
# constant to convert miles to km
mile2km = 1.60934400
# constant to conver km to m
km2m = 1000
# constant to convert hours to seconds
h2s = 3600
# date format used in the original file
date_format = '%Y-%m-%d %H:%M:%S'
'''
* @param path: the path of the directory containing the data file (also used to save the output file)
* @param file: the name of the input data file
* @param **kwargs: allows the specification of custom values for fields, such as:
* min_lat
* max_lat
* min_lon
* max_lon
* min_timestamp (unix timestamp)
* max_timestamp (unix timestamp)
* max_speed (km/h)
* min_speed (km/h)
'''
def filter(path, file, **kwargs):
filename = os.path.join(path, file)
df = pd.read_csv(filename, header=0)
timezone = time.strftime("%z", time.gmtime())
timezone = int(timezone.replace('+', '')) / 100 * 60 * 60
df['pickup_unixdatetime'] = df['tpep_pickup_datetime'].map(lambda r: int(time.mktime(dt.datetime.strptime(r, date_format).timetuple())) + timezone)
df['dropoff_unixdatetime'] = df['tpep_dropoff_datetime'].map(lambda r: int(time.mktime(dt.datetime.strptime(r, date_format).timetuple())) + timezone)
df['time_elapsed'] = df['dropoff_unixdatetime'] - df['pickup_unixdatetime']
config = {
'min_lat': 40.632,
'max_lat': 40.849,
'min_lon': -74.060,
'max_lon': -73.762,
'min_timestamp': 1464739200,
'max_timestamp': 1467331199,
'max_speed': 100 * km2m / h2s,
'min_speed': 5 * km2m / h2s,
}
config.update(kwargs)
df = df[(df['pickup_latitude'] <= config['max_lat']) & (df['pickup_latitude'] >= config['min_lat']) & (df['dropoff_latitude'] <= config['max_lat']) & (df['dropoff_latitude'] >= config['min_lat'])]
df = df[(df['pickup_longitude'] <= config['max_lon']) & (df['pickup_longitude'] >= config['min_lon']) & (df['dropoff_longitude'] <= config['max_lon']) & (df['dropoff_longitude'] >= config['min_lon'])]
df = df[(df['dropoff_unixdatetime'] <= config['max_timestamp']) & (df['dropoff_unixdatetime'] >= config['min_timestamp']) & (df['pickup_unixdatetime'] <= config['max_timestamp']) & (df['pickup_unixdatetime'] >= config['min_timestamp'])]
cte = km2m / config['max_speed']
df = df[df['time_elapsed'] >= df['trip_distance'] * cte]
cte = mile2km * km2m / config['min_speed']
df = df[df['time_elapsed'] <= df['trip_distance'] * cte]
df = df[['tpep_pickup_datetime','pickup_latitude','pickup_longitude','tpep_dropoff_datetime','dropoff_latitude','dropoff_longitude']]
if 'split' in config.keys() and config['split']:
for frame in np.array_split(df, int(config['split'])):
filename = os.path.join(path, file[:-4] + '-' + IdGenerator.uuid4().hex + '.csv')
frame.to_csv(filename, index=False)
return
if 'overwrite' in config.keys() and not config['overwrite']:
filename = os.path.join(path, file[:-4] + '-' + IdGenerator.uuid4().hex + '.csv')
df.to_csv(filename, index=False)
return filename
| 3,229 | 40.948052 | 240 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/filters/nyc_green_taxis.py
|
import os, time, datetime as dt
import multiprocessing as mp
import uuid as IdGenerator
import pandas as pd
import numpy as np
mile2km = 1.60934400
km2m = 1000
h2s = 3600
date_format = '%m/%d/%Y %I:%M:%S %p'
def filter(path, **kwargs):
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
pool.apply_async(filter_file, args=(path, file, kwargs))
pool.close()
pool.join()
else:
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
filter_file(path, file, kwargs)
def filter_file(path, file, config):
filename = os.path.join(path, file)
df = pd.read_csv(filename, header=0)
timezone = time.strftime("%z", time.gmtime())
timezone = int(timezone.replace('+', '')) / 100 * 60 * 60
df['pickup_unixdatetime'] = df['lpep_pickup_datetime'].map(lambda r: int(time.mktime(dt.datetime.strptime(r, date_format).timetuple())) + timezone)
df['dropoff_unixdatetime'] = df['Lpep_dropoff_datetime'].map(lambda r: int(time.mktime(dt.datetime.strptime(r, date_format).timetuple())) + timezone)
df['time_elapsed'] = df['dropoff_unixdatetime'] - df['pickup_unixdatetime']
if 'max_lat' in config.keys() and 'min_lat' in config.keys():
df = df[(df.Pickup_latitude <= config['max_lat']) & (df.Pickup_latitude >= config['min_lat']) & (df.Dropoff_latitude <= config['max_lat']) & (df.Dropoff_latitude >= config['min_lat'])]
if 'max_lon' in config.keys() and 'min_lon' in config.keys():
df = df[(df.Pickup_longitude <= config['max_lon']) & (df.Pickup_longitude >= config['min_lon']) & (df.Dropoff_longitude <= config['max_lon']) & (df.Dropoff_longitude >= config['min_lon'])]
if 'max_timestamp' in config.keys() and 'min_timestamp' in config.keys():
df = df[(df.dropoff_unixdatetime <= config['max_timestamp']) & (df.dropoff_unixdatetime >= config['min_timestamp']) & (df.pickup_unixdatetime <= config['max_timestamp']) & (df.pickup_unixdatetime >= config['min_timestamp'])]
if 'max_speed' not in config.keys():
config['max_speed'] = 100 * km2m / h2s
cte = mile2km * km2m / config['max_speed']
df = df[df['time_elapsed'] >= df['Trip_distance'] * cte]
if 'min_speed' not in config.keys():
config['min_speed'] = 5 * km2m / h2s
cte = mile2km * km2m / config['min_speed']
df = df[df['time_elapsed'] <= df['Trip_distance'] * cte]
df = df[['lpep_pickup_datetime','Pickup_longitude','Pickup_latitude','Lpep_dropoff_datetime','Dropoff_longitude','Dropoff_latitude']]
if 'split' in config.keys() and config['split']:
for frame in np.array_split(df, int(config['split'])):
filename = os.path.join(path, file[:-4] + '-' + IdGenerator.uuid4().hex + '.csv')
frame.to_csv(filename, index=False)
return
if 'overwrite' in config.keys() and not config['overwrite']:
filename = os.path.join(path, file[:-4] + '-' + IdGenerator.uuid4().hex + '.csv')
df.to_csv(filename, index=False)
| 3,258 | 48.378788 | 232 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/filters/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/bucketizer.py
|
import os
import matplotlib
import pandas as pd
from geopy import Point
import uuid as IdGenerator
from geopy import distance
import multiprocessing as mp
from math import sin, cos, atan2, floor, sqrt, radians
# @deprecated, use smaframework.analyzer.bucketwalk.filesystem
def histogram(path, layers, show=True, max_x=None, save_log=True, **kwargs):
if isinstance(layers, str):
layers = [layers]
for layer in layers:
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(load_csv, [(path, file, layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_csv((path, file, layer)))
if len(result) == 0:
print('Layer %s empty!' % layer)
continue
frame = pd.concat(list(result))
if len(frame) == 0:
print('Layer %s empty!' % layer)
continue
frame = frame.groupby(['lat_bucket','lon_bucket','timestamp_bucket']).size()
maximum = frame.max()
frame = frame.map(lambda a: a / maximum)
if pd.__version__ >= '0.17.0':
frame.sort_values(ascending=False, inplace=True)
else:
frame.sort(ascending=False, inplace=True)
if len(frame) == 0:
print('Layer %s empty!' % layer)
continue
if save_log:
frame.to_csv('data/results/%s-bucket-histogram.log' % layer)
frame.index = [i for i in range(0, len(frame))]
plot = frame.plot(kind='line', label=layer + (' (max: %d)' % maximum))
if max_x:
plot.axis([0,max_x,0,maximum+1])
# plot.set_yscale('log', nonposy='clip')
plot.legend()
if show:
matplotlib.pyplot.show(block=True)
else:
fig = plot.get_figure()
fig.savefig('data/results/bucket-histogram.png')
# df1 = pd.read_csv(
# 'data/twitter-bucket-histogram.log',
# header=None,
# skiprows=1,
# low_memory=False,
# memory_map=True,
# names=['i','j','k','c']
# )['c']
# paddf = pd.DataFrame([0 for i in range(200610, 2000000)])
# df1 = pd.concat([df1, paddf])
# df1.index = [i for i in range(0,len(df1))]
# plot = df1.plot(kind='line', label='twitter: (200610 used buckets)', color='r')
# # maximum = df1.max()
# # print(maximum)
# plot.axis([0,500000,0,1200])
# # matplotlib.pyplot.yscale('log')
# plot.text(0, 420, '(0, 420)', color='r')
# # plot.plot(0, 420, 'ro')
# # plot.text(350000, 800, "twitter: \nyellow_taxis (1996165 used buckets)")
# # plot.plot(187, 10, 'ro')
# plot.legend()
# df2 = pd.read_csv(
# 'data/yellow_taxis-bucket-histogram.log',
# header=None,
# skiprows=1,
# low_memory=False,
# memory_map=True,
# names=['i','j','k','c']
# )['c']
# df2.index = [i for i in range(0,len(df2))]
# plot = df2.plot(kind='line', label='yellow_taxis: (1996165 used buckets)', color='b')
# # maximum = df2.max()
# # print(maximum)
# plot.axis([0,500000,0,1200])
# # matplotlib.pyplot.yscale('log')
# plot.text(0, 1130, '(0, 1130)', color='b')
# # plot.plot(0, 1130, 'bo')
# # plot.text(500686, 10, '10', color='b')
# # plot.plot(500686, 10, 'bo')
# plot.legend()
# matplotlib.pyplot.show(block=True)
# # main('data/buckets/', 'twitter', 16, False, 4)
# # main('data/buckets/', 'yellow_taxis', 16, False, 4)
def index(path, distance_precision, temporal_precision, layer, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
result = pool.map(load_csv, [(path, file, layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_csv((path, file, layer)))
frame = pd.concat(list(result))
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Data loaded...', flush=True)
# split layers
layer1 = frame[frame.layer == layer].groupby(['lat_bucket','lon_bucket','timestamp_bucket'])
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Layers splited...', flush=True)
buckets_location = 'data/buckets/index/'
if not os.path.exists(buckets_location):
os.makedirs(buckets_location)
for name, g in layer1:
g.to_csv('data/buckets/index/%s-%d-%d-%d.csv' % (layer.replace('-', '_'), name[0], name[1], name[2]))
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Buckets indexed...', flush=True)
def load_csv(args):
path, file, layer = args
df = pd.read_csv(
os.path.join(path, file),
header=0,
low_memory=False,
memory_map=True,
index_col='id'
)
return df[(df.layer == layer)]
def bucketize(path, origin, distance_precision, time_precision, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
filelist = os.listdir(path)
for file in filelist:
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
if multiprocess:
pool.apply_async(bucketize_file, args=(path, file, origin, distance_precision, time_precision, kwargs))
else:
bucketize_file(path, file, origin, distance_precision, time_precision, kwargs)
if multiprocess:
pool.close()
pool.join()
def bucketize_file(path, file, origin, distance_precision, time_precision, config):
filename = os.path.join(path, file)
df = pd.read_csv(filename, header=0)
tdf = df[['lat', 'lon', 'timestamp']]
tdf.columns = ['lat_bucket', 'lon_bucket', 'timestamp_bucket']
df = pd.concat([df, tdf], axis=1)
fileid = IdGenerator.uuid4().hex
df['uid'] = df['uid'].map(lambda x: x + fileid)
df['lat_bucket'] = df['lat_bucket'].map(lambda x: floor(lat(origin, x) / distance_precision))
df['lon_bucket'] = df['lon_bucket'].map(lambda x: floor(lon(origin, x) / distance_precision))
df['timestamp_bucket'] = df['timestamp_bucket'].map(lambda x: floor((x - origin[2]) / time_precision))
buckets_location = 'data/buckets/'
if not os.path.exists(buckets_location):
os.makedirs(buckets_location)
df.to_csv(buckets_location + file, index=False)
def lat(origin, l):
p1 = Point("%f %f" % (origin[0], origin[1]))
p2 = Point("%f %f" % (l, origin[1]))
return distance.distance(p1, p2).meters
def lon(origin, l):
p1 = Point("%f %f" % (origin[0], origin[1]))
p2 = Point("%f %f" % (origin[0], l))
return distance.distance(p1, p2).meters
| 7,443 | 34.279621 | 164 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/routematcher.py
|
import smaframework.analyzer.bucketwalk.filesystem as BucketWalkFS
import smaframework.analyzer.magtools.mag as Mag
from haversine import haversine
from functools import partial
import multiprocessing as mp
import pandas as pd
"""
* @param trips - list of trips retrieved from Google with smaframework.extractor.google.transit
* @param index_path - path to the index used to store the data of busses and stops, created via smaframework.analyzer.bucketwalk.filesystem
* @return trips - list of trips given containing for each trip a list of route options and for wach option a list of positions of the stops and changes
"""
def match(trips, index_path, **kwargs):
if 'pool_size' not in kwargs.keys() or kwargs['pool_size'] == 1:
return list(map(partial(match_trip, index_path), trips))
with mp.Pool(processes=kwargs['pool_size']) as pool:
return pool.map(partial(match_trip, index_path), trips)
def match_trip(index_path, trip):
trip_points = []
for route in trip:
route_points = []
for step in route:
if step['travel_mode'] == 'WALKING':
route_points.append(step['origin'])
route_points.append(step['destination'])
elif step['travel_mode'] == 'TRANSIT':
layers = ['nyc_subway'] if step['vehicle_type'] == 'SUBWAY' else ['bus', 'express_buss', 'lirr', 'path']
points = match_route(step['origin'], step['destination'], index_path, layers)
route_points.extend(points)
trip_points.append(route_points)
return trip_points
def match_route(origin, destination, index_path, layers):
dist = lambda p1, p2: haversine((p2['lat'], p2['lon']), p1)
origin = BucketWalkFS.closest(index_path, {"lat": origin[0], "lon": origin[1]}, partial(dist, origin), layers=layers)
destination = BucketWalkFS.closest(index_path, {"lat": destination[0], "lon": destination[1]}, partial(dist, destination), layers=layers)
frame = pd.merge(origin, destination, how='inner', on=['uid'])
origin = frame[['id_x', 'uid', 'timestamp_x', 'lat_x', 'lon_x', 'layer_x']]
origin.columns = ['id', 'uid', 'timestamp', 'lat', 'lon', 'layer']
destination = frame[['id_y', 'uid', 'timestamp_y', 'lat_y', 'lon_y', 'layer_y']]
destination.columns = ['id', 'uid', 'timestamp', 'lat', 'lon', 'layer']
return trace_path(origin, destination)
def trace_path(origin, destination):
paths = []
for i, row in origin.iterrows():
o = row['id']
d = destination['id'].loc[i]
nodes = Mag.nodes_by('uid', row['uid'])
ids = nodes['id']
nodes.to_csv('data/nodes.csv')
nodes = Mag.get_simple_path(ids, o, d)
if not nodes.empty:
paths.append(nodes)
path = min(paths, key=lambda p: len(p))
return path[['lat', 'lon']].as_matrix().tolist()
| 2,656 | 39.876923 | 152 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/fuzzymatcher.py
|
import os, json, math, re, gc, base64
import multiprocessing as mp
import pandas as pd
import numpy as np
from geopy import distance
from geopy import Point
import uuid as IdGenerator
from random import randint
import sklearn
from sklearn.cluster import DBSCAN, Birch, KMeans
import smaframework.tool.distribution as Distribution
import shapely, shapely.geometry, shapely.ops
from hdbscan import HDBSCAN
def _persistent_matches_formater(row, layer1, layer2):
if row['beta_%s_timestamp' % layer1] > row['alpha_%s_timestamp' % layer1]:
return [{
'lat': row['alpha_%s_lat' % layer1],
'lng': row['alpha_%s_lon' % layer1],
'timestamp': row['alpha_%s_timestamp' % layer1],
'%s_uid' % layer1: row['alpha_%s_uid' % layer1],
'%s_uid' % layer2: row['alpha_%s_uid' % layer2],
'distance': float("{:5.1f}".format(row['distance'])),
'score_spatial': float("{:1.4f}".format(row['alpha_score_spatial'])),
'score_temporal': float("{:1.4f}".format(row['alpha_score_temporal']))
}, {
'lat': row['beta_%s_lat' % layer1],
'lng': row['beta_%s_lon' % layer1],
'timestamp': row['beta_%s_timestamp' % layer1],
'%s_uid' % layer1: row['beta_%s_uid' % layer1],
'%s_uid' % layer2: row['beta_%s_uid' % layer2],
}]
else:
return [{
'lat': row['beta_%s_lat' % layer1],
'lng': row['beta_%s_lon' % layer1],
'timestamp': row['beta_%s_timestamp' % layer1],
'%s_uid' % layer1: row['beta_%s_uid' % layer1],
'%s_uid' % layer2: row['beta_%s_uid' % layer2],
'distance': float("{:5.1f}".format(row['distance'])),
'score_spatial': float("{:1.4f}".format(row['beta_score_spatial'])),
'score_temporal': float("{:1.4f}".format(row['beta_score_temporal']))
}, {
'lat': row['alpha_%s_lat' % layer1],
'lng': row['alpha_%s_lon' % layer1],
'timestamp': row['alpha_%s_timestamp' % layer1],
'%s_uid' % layer1: row['alpha_%s_uid' % layer1],
'%s_uid' % layer2: row['alpha_%s_uid' % layer2],
}]
def _persistent_matches_load_csv(args):
path, file, layer1, layer2 = args
df = pd.read_csv(os.path.join(path, file), header=0, low_memory=True, dtype={'twitter_uid': str, 'yellow_taxis_uid': str})
return df[['%s_uid' % layer1, '%s_uid' % layer2, '%s_lat' % layer1, '%s_lon' % layer1, '%s_timestamp' % layer1, 'score_spatial', 'score_temporal']]
def persistent_matches(key, path, layer1, layer2, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
results = pool.map(_persistent_matches_load_csv, [(path, file, layer1, layer2) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
results = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
results.append(_persistent_matches_load_csv((path, file, layer1, layer2)))
df = pd.concat(results)
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print("Loaded...", flush=True)
if pd.__version__ >= '0.17.0':
df.sort_values(by=['%s_uid' % layer1, '%s_uid' % layer2], inplace=True)
else:
df.sort(['%s_uid' % layer1, '%s_uid' % layer2], inplace=True)
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print("Concatenated...", flush=True)
df2 = df.shift(-1)
df = pd.concat([df, df2], axis=1)
df.columns = [
'alpha_%s_uid' % layer1,
'alpha_%s_uid' % layer2,
'alpha_%s_lat' % layer1,
'alpha_%s_lon' % layer1,
'alpha_%s_timestamp' % layer1,
'alpha_score_spatial',
'alpha_score_temporal',
'beta_%s_uid' % layer1,
'beta_%s_uid' % layer2,
'beta_%s_lat' % layer1,
'beta_%s_lon' % layer1,
'beta_%s_timestamp' % layer1,
'beta_score_spatial',
'beta_score_temporal',
]
df = df[(df['alpha_%s_uid' % layer1] == df['beta_%s_uid' % layer1]) & (df['alpha_%s_uid' % layer2] == df['beta_%s_uid' % layer2])]
df = df.drop_duplicates([
'alpha_%s_lat' % layer1,
'alpha_%s_lon' % layer1,
'beta_%s_lat' % layer1,
'beta_%s_lon' % layer1
])
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print("Filtered...", flush=True)
df['distance'] = df.apply(lambda r: distance.distance(Point("%f %f" % (r['alpha_%s_lat' % layer1], r['alpha_%s_lon' % layer1])), Point("%f %f" % (r['beta_%s_lat' % layer1], r['beta_%s_lon' % layer1]))).meters, axis=1)
df['time_elapsed'] = df.apply(lambda r: abs(r['alpha_%s_timestamp' % layer1] - r['beta_%s_timestamp' % layer1]), axis=1)
if 'min_distance' in kwargs.keys():
min_distance = kwargs['min_distance']
else:
min_distance = 500
if 'max_speed' in kwargs.keys():
max_speed = 1 / kwargs['max_speed']
else:
max_speed = 0.036 # (1 / 100 Km/h) ~= (1 / 27.8 m/s)
df = df[df['distance'] > min_distance]
df = df[df['time_elapsed'] > max_speed * df['distance']]
if pd.__version__ >= '0.17.0':
df.sort_values(by=['distance'], inplace=True)
else:
df.sort(['distance'], inplace=True)
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print("Distance Filter...", flush=True)
df = df.apply(lambda r: _persistent_matches_formater(r, layer1, layer2), axis=1)
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print("Mapped...", flush=True)
length = len(df)
if length == 0:
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Empty!')
return
result = json.dumps(df.tolist())
with open('templates/google-polyline.html', 'r') as file:
template = file.read()
template = template.replace('<?=LIST?>', result).replace('<?=KEY?>', key)
if 'filename' in kwargs.keys():
filename = kwargs['filename'] % length
else:
filename = 'persistnet-matches-%d.html' % length
with open('data/results/' + filename, 'w+') as outfile:
outfile.write(template)
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print("Done!", flush=True)
def heatpoint(args):
filename, layer = args
df = pd.read_csv(filename, header=0)
spatial = df['score_spatial'].sum()
temporal = df['score_temporal'].sum()
lat = df['%s_lat' % layer].mean()
lon = df['%s_lon' % layer].mean()
return '{location: new google.maps.LatLng(%f, %f), weight: %f},' % (lat, lon, spatial + temporal)
def heatmap(key, path, layer, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
results = pool.map(heatpoint, [(os.path.join(path, file), layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
results = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
results.append(heatpoint((os.path.join(path, file), layer)))
results = '\n'.join(results)
with open('templates/google-heatmap.html', 'r') as file:
template = file.read()
template = template.replace('<?=LIST?>', results).replace('<?=KEY?>', key)
if 'filename' in kwargs.keys():
filename = kwargs['filename']
else:
filename = 'heatmap-fuzzymatcher-' + IdGenerator.uuid4().hex + '.html'
with open('data/results/' + filename, 'w+') as file:
file.write(template)
def collect_matches(col, spatial_scores, layer1, layer2):
count = 0
values = col.tolist()
for index in col.index.tolist():
if values[count] > 0:
spatial_scores.append((col.name, index, layer1['uid'][index], layer1['lat'][index], layer1['lon'][index], layer1['timestamp'][index], layer2['uid'][col.name], layer2['lat'][col.name], layer2['lon'][col.name], layer2['timestamp'][col.name], values[count]))
count = count + 1
def spatial_item(node, precision, df):
func = linear(precision)
d = df.apply(lambda row: func(dist(row['lat'], row['lon'], node['lat'], node['lon'])), axis=1)
d.columns = [node.index]
return d
def temporal_item(node, precision, df):
func = linear(precision)
d = df.apply(lambda row: func(abs(row['timestamp'] - node['timestamp'])), axis=1)
d.columns = [node.index]
return d
def linear(x0):
f = np.array([x0 for i in range(0, x0)])
g = (np.arange(0, x0 + 1) * -1) + x0
f = np.append(f, g)
def func(x):
x = int(x)
return (f[x] if x < len(f) else 0) / x0
return func
def dist(alat, alon, blat, blon):
p1 = Point("%f %f" % (alat, alon))
p2 = Point("%f %f" % (blat, blon))
return distance.distance(p1, p2).meters
def analyse_cube(args):
path, file, l1, l2, distance_precision, temporal_precision, config = args
filename = os.path.join(path, file)
try:
layer, latb, lonb, timestampb = file.replace('.csv', '').split('-')
except Exception as e:
return None
latb = int(latb)
lonb = int(lonb)
timestampb = int(timestampb)
# load cube for layer1
layer1 = pd.read_csv(filename, header=0, low_memory=False, memory_map=True, index_col='id')
# load cubes for layer2
dfs = []
for i in [latb - 1, latb, latb + 1]:
for j in [lonb - 1, lonb, lonb + 1]:
for k in [timestampb - 1, timestampb, timestampb + 1]:
f = os.path.join(path, '%s-%d-%d-%d.csv' % (l2, i, j, k))
if not os.path.isfile(f):
continue
dfs.append(pd.read_csv(f, header=0, low_memory=False, memory_map=True, index_col='id'))
if len(dfs) == 0:
# print('File %s analysed with no matching bucket...' % file)
return None
layer2 = pd.concat(dfs)
# map distances
spatial_scores = layer1.apply(lambda node: spatial_item(node, distance_precision, layer2), axis=1)
spatial_scores = spatial_scores.loc[(spatial_scores.sum(axis=1) != 0), (spatial_scores.sum(axis=0) != 0)]
# get distance matches
ss = []
spatial_scores.apply(lambda col: collect_matches(col, ss, layer1, layer2))
if len(ss) == 0:
# print('File %s analysed with no matching distances...' % file)
return None
spatial_scores = pd.DataFrame(ss, columns=['source', 'target', l1 + '_uid', l1 + '_lat', l1 + '_lon', l1 + '_timestamp', l2 + '_uid', l2 + '_lat', l2 + '_lon', l2 + '_timestamp', 'score'])
# map times
temporal_scores = layer1.apply(lambda node: temporal_item(node, temporal_precision, layer2), axis=1)
temporal_scores = temporal_scores.loc[(temporal_scores.sum(axis=1) != 0), (temporal_scores.sum(axis=0) != 0)]
# get time matches
ts = []
temporal_scores.apply(lambda col: collect_matches(col, ts, layer1, layer2))
if len(ts) == 0:
# print('File %s analysed with no matching times...' % file)
return None
temporal_scores = pd.DataFrame(ts, columns=['source', 'target', l1 + '_uid', l1 + '_lat', l1 + '_lon', l1 + '_timestamp', l2 + '_uid', l2 + '_lat', l2 + '_lon', l2 + '_timestamp', 'score'])
# merge results
df = pd.merge(spatial_scores, temporal_scores, on=['source', 'target'], suffixes=['_spatial', '_temporal'])
df = df[['source', 'target', l1 + '_uid_spatial', l1 + '_lat_spatial', l1 + '_lon_spatial', l1 + '_timestamp_spatial', l2 + '_uid_spatial', l2 + '_lat_spatial', l2 + '_lon_spatial', l2 + '_timestamp_spatial', 'score_spatial', 'score_temporal']]
df.columns = ['source', 'target', l1 + '_uid', l1 + '_lat', l1 + '_lon', l1 + '_timestamp', l2 + '_uid', l2 + '_lat', l2 + '_lon', l2 + '_timestamp', 'score_spatial', 'score_temporal']
result_location = 'data/fuzzy-matches/'
if not os.path.exists(result_location):
try:
os.makedirs(result_location)
except Exception as e:
pass
if len(df.index):
df.to_csv('data/fuzzy-matches/%s-%s-%s.csv' % (l1, l2, IdGenerator.uuid4().hex), index=False)
def analyze(path, distance_precision, temporal_precision, l1, l2, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
pool.map(analyse_cube, [(path, file, l1, l2, distance_precision, temporal_precision, kwargs) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
analyse_cube((path, file, l1, l2, distance_precision, temporal_precision, kwargs))
def load_matches_csv(args):
path, file, counts = args
df = pd.read_csv(os.path.join(path, file), header=0, low_memory=False, memory_map=True)
counts.append(df.shape[0])
return df
def clusterer(path, layer, epss, epst, **kwargs):
# evaluate min_samples for clustering
min_samples = 20 if 'min_samples' not in kwargs.keys() else kwargs['min_samples']
# metric = 'seuclidean' if 'metric' not in kwargs.keys() else kwargs['metric']
# metric_params = None if 'metric_params' not in kwargs.keys() else kwargs['metric_params']
# algorithm for NN query {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}
# nnalgorithm = 'ball_tree' if 'nnalgorithm' not in kwargs.keys() else kwargs['nnalgorithm']
# creating file for clusters
cluster_dir = 'data/fuzzy-matches/clusters/'
if not os.path.exists(cluster_dir):
os.makedirs(cluster_dir)
# load data
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
counts = mp.Manager().list()
result = pool.map(load_matches_csv, [(path, file, counts) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
kwargs['pool_size'] = 1
result = []
counts = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_matches_csv((path, file, counts)))
# organize data
frame = pd.concat(list(result))
frame.reset_index(inplace=True)
frame = frame[[layer + '_lat', layer + '_lon', layer + '_timestamp', 'score_spatial', 'score_temporal']]
frame.columns = ['lat', 'lon', 'timestamp', 'score_spatial', 'score_temporal']
# hash for recovery
cluster_hash = '-l%s-ms%d-epss%d-epst%d' % (layer, min_samples, epss, epst)
if os.path.exists('%stotalizer%s.json' % (cluster_dir, cluster_hash)):
return cluster_hash
# epss meters to degrees
earth_circumference = 2 * math.pi * distance.EARTH_RADIUS * 1000 # meters
epss = epss * 360 / earth_circumference
# using time scaling for ST-DBSCAN
min_time = frame['timestamp'].min()
frame['timestamp'] = (frame['timestamp'] - min_time) * epss / epst
eps = epss
# using space scaling for ST-DBSCAN
# min_lat = frame['lat'].min()
# min_lon = frame['lon'].min()
# frame['lat'] = (frame['lat'] - min_lat) * epst / epss
# frame['lon'] = (frame['lon'] - min_lon) * epst / epss
# eps = epst
### cluster data and separate in frame
clusterer = None
fname = 'data/results/hdbscan%s.csv' % cluster_hash
if os.path.isfile(fname):
print('INFO: loading clusters')
frame = pd.read_csv(fname)
else:
print('INFO: running ST-HDBSCAN')
clusterer = HDBSCAN(min_samples=min_samples).fit(frame[['lat', 'lon', 'timestamp']].as_matrix())
frame = pd.concat([frame, pd.DataFrame({'label': clusterer.labels_})], axis=1)
frame = frame[frame['label'] != -1]
frame.to_csv(fname)
fname = 'data/results/kmeans%s.csv' % cluster_hash
if os.path.isfile(fname):
print('INFO: loading plot data')
frame = pd.read_csv(fname)
else:
print('INFO: running KMEANS for ploting')
n_clusters = int((frame['label'].max() - 1) * 0.1)
clusterer = KMeans(n_clusters=n_clusters, n_jobs=int(kwargs['pool_size'])).fit(frame[['lat', 'lon']].as_matrix())
frame = pd.concat([frame, pd.DataFrame({'label': clusterer.labels_})], axis=1)
frame.to_csv(fname)
frame = frame.groupby(by='label')
for label, df in frame:
if label == -1:
continue
df.to_csv('%s%d%s.csv' % (cluster_dir, label, cluster_hash))
# get metadata about clusters
totalizer = frame[['score_spatial', 'score_temporal']].mean()
totalizer['count'] = frame['lat'].agg('count')
totalizer = '{score_spatial: '+totalizer['score_spatial'].map(str)+', score_temporal: '+totalizer['score_temporal'].map(str)+', count: '+totalizer['count'].map(str)+'}'
totalizer = totalizer.str.cat(sep=',')
with open('%stotalizer%s.json' % (cluster_dir, cluster_hash), 'w+') as file:
file.write(totalizer)
return cluster_hash
def get_zones(key, path, layer, epss, epst, **kwargs):
results_dir = 'data/results/'
cluster_hash = clusterer(path, layer, epss, epst, **kwargs)
# draw regions
result = []
cluster_dir = 'data/fuzzy-matches/clusters/'
regex = re.compile('^(\d+)%s.csv$' % cluster_hash)
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(Distribution.get_region, [pd.read_csv(cluster_dir + filename) for filename in os.listdir(cluster_dir) if regex.match(filename)])
pool.close()
pool.join()
else:
for filename in os.listdir(cluster_dir):
if regex.match(filename):
result.append(Distribution.get_region(pd.read_csv(cluster_dir + filename)))
# create json for ploting on Google Maps
print('INFO: creating plot object')
regions = ''
for region in result:
df = '{lat: '+ region['lat'].map(str) +', lng: '+ region['lon'].map(str) +'}'
json = '[' + df.str.cat(sep=',') + ']'
regions = regions + json + ','
# create HTML file with plot and finish
with open('templates/google-shape.html', 'r') as file:
template = file.read()
with open('%stotalizer%s.json' % (cluster_dir, cluster_hash)) as file:
totalizer = file.read()
template = template.replace('<?=LIST?>', regions).replace('<?=KEY?>', key).replace('<?=DATA?>', totalizer)
if 'filename' in kwargs.keys():
filename = kwargs['filename']
else:
filename = 'regions-fuzzymatcher-' + IdGenerator.uuid4().hex + '.html'
with open(results_dir + filename, 'w+') as file:
file.write(template)
print(results_dir + filename)
| 19,407 | 39.517745 | 267 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/simulator.py
|
from math import floor
from geopy import Point, distance
import smaframework.tool.mag as Mag
import smaframework.tool.paralel as Paralel
def learn(path, layer, distance_precision=100, **kwargs):
pool_size = 1 if 'pool_size' not in kwargs.keys() else kwargs['pool_size']
nodes = Mag.nodes(path, layer, **kwargs)
nodes = Paralel.prepare(nodes[['timestamp', 'lat', 'lon']], pool_size=pool_size)
hour = 60 * 60
day = 24 * hour
week = 7 * day
weekdays = 5 * day
daytype_classifier = lambda timestamp: 'weekend' if timestamp % week > weekdays else 'weekday'
hourly_classifier = lambda timestamp: floor((timestamp % day) / hour)
nodes['day_type'] = nodes['timestamp'].map(daytype_classifier, meta=('day_type', str)).compute()
nodes['hour'] = nodes['timestamp'].map(hourly_classifier, meta=('hour', int)).compute()
min_lat = nodes['lat'].min().compute()
min_lon = nodes['lon'].min().compute()
origin = (min_lat, min_lon)
nodes['lat'] = nodes['lat'].map(lambda x: floor(lat(origin, x) / distance_precision)).compute()
nodes['lon'] = nodes['lon'].map(lambda x: floor(lon(origin, x) / distance_precision)).compute()
print(nodes.head(10))
def lat(origin, l):
p1 = Point("%f %f" % (origin[0], origin[1]))
p2 = Point("%f %f" % (l, origin[1]))
return distance.distance(p1, p2).meters
def lon(origin, l):
p1 = Point("%f %f" % (origin[0], origin[1]))
p2 = Point("%f %f" % (origin[0], l))
return distance.distance(p1, p2).meters
| 1,524 | 37.125 | 100 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/hybridrouter.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/bucketwalk/memory.py
|
from haversine import haversine
import itertools
def closest(index, point, dist=None, radius=1):
if dist == None:
dist = haversine
key = hash_sample(point, index['hashing_dist'], index['origin'])
cube = get_cube(index, key, radius)
min_distance = float("inf")
closest = None
for i in cube:
distance = dist(point, index['points'][i])
if distance < min_distance:
min_distance = distance
closest = i
return (closest, min_distance)
def get_cube(index, key, radius):
ranges = map(lambda k: list(range(k - radius, k + radius + 1)), key)
keys = list(itertools.product(*ranges))
keys = map(lambda key: '-'.join([str(k) for k in key]), keys)
result = []
for k in keys:
if k not in index.keys():
continue
result.extend(index[k])
return result
def hash_sample(point, hashing_dist, origin):
if isinstance(hashing_dist, list):
return [int((point[i]-origin[i]) / hashing_dist[i]) for i in range(0, len(point))]
return [int((point[i]-origin[i]) / hashing_dist) for i in range(0, len(point))]
def in_memory(points, hashing_dist=0.005, origin=None):
if len(points) == 0:
return None
dimension = len(points[0])
if origin == None:
origin = (0,) * dimension
keys = list(map(lambda point: hash_sample(point, hashing_dist, origin), points))
index = {
"hashing_dist": hashing_dist,
"dimension": dimension,
"origin": origin,
"points": points
}
for i in range(0, len(keys)):
key = '-'.join([str(k) for k in keys[i]])
if key not in index.keys():
index[key] = []
index[key].append(i)
return index
| 1,745 | 26.28125 | 90 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/bucketwalk/filesystem.py
|
import os
import matplotlib
import pandas as pd
from geopy import Point
import uuid as IdGenerator
from geopy import distance
import multiprocessing as mp
from math import sin, cos, atan2, floor, sqrt, radians
import smaframework.tool.paralel as Paralel
from functools import partial
import itertools, json, sys
def histogram(path, layers, show=True, max_x=None, save_log=True, **kwargs):
if isinstance(layers, str):
layers = [layers]
for layer in layers:
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(load_csv, [(path, file, layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_csv((path, file, layer)))
if len(result) == 0:
print('Layer %s empty!' % layer)
continue
frame = pd.concat(list(result))
if len(frame) == 0:
print('Layer %s empty!' % layer)
continue
frame = frame.groupby(['lat_bucket','lon_bucket','timestamp_bucket']).size()
maximum = frame.max()
frame = frame.map(lambda a: a / maximum)
if pd.__version__ >= '0.17.0':
frame.sort_values(ascending=False, inplace=True)
else:
frame.sort(ascending=False, inplace=True)
if len(frame) == 0:
print('Layer %s empty!' % layer)
continue
if save_log:
frame.to_csv('data/results/%s-bucket-histogram.log' % layer)
frame.index = [i for i in range(0, len(frame))]
plot = frame.plot(kind='line', label=layer + (' (max: %d)' % maximum))
if max_x:
plot.axis([0,max_x,0,maximum+1])
# plot.set_yscale('log', nonposy='clip')
plot.legend()
if show:
matplotlib.pyplot.show(block=True)
else:
fig = plot.get_figure()
fig.savefig('data/results/bucket-histogram.png')
# df1 = pd.read_csv(
# 'data/twitter-bucket-histogram.log',
# header=None,
# skiprows=1,
# low_memory=False,
# memory_map=True,
# names=['i','j','k','c']
# )['c']
# paddf = pd.DataFrame([0 for i in range(200610, 2000000)])
# df1 = pd.concat([df1, paddf])
# df1.index = [i for i in range(0,len(df1))]
# plot = df1.plot(kind='line', label='twitter: (200610 used buckets)', color='r')
# # maximum = df1.max()
# # print(maximum)
# plot.axis([0,500000,0,1200])
# # matplotlib.pyplot.yscale('log')
# plot.text(0, 420, '(0, 420)', color='r')
# # plot.plot(0, 420, 'ro')
# # plot.text(350000, 800, "twitter: \nyellow_taxis (1996165 used buckets)")
# # plot.plot(187, 10, 'ro')
# plot.legend()
# df2 = pd.read_csv(
# 'data/yellow_taxis-bucket-histogram.log',
# header=None,
# skiprows=1,
# low_memory=False,
# memory_map=True,
# names=['i','j','k','c']
# )['c']
# df2.index = [i for i in range(0,len(df2))]
# plot = df2.plot(kind='line', label='yellow_taxis: (1996165 used buckets)', color='b')
# # maximum = df2.max()
# # print(maximum)
# plot.axis([0,500000,0,1200])
# # matplotlib.pyplot.yscale('log')
# plot.text(0, 1130, '(0, 1130)', color='b')
# # plot.plot(0, 1130, 'bo')
# # plot.text(500686, 10, '10', color='b')
# # plot.plot(500686, 10, 'bo')
# plot.legend()
# matplotlib.pyplot.show(block=True)
# # main('data/buckets/', 'twitter', 16, False, 4)
# # main('data/buckets/', 'yellow_taxis', 16, False, 4)
def index(path, hashing_distance, origin, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
result = pool.map(load_csv, [(path, file) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_csv((path, file)))
frame = pd.concat(list(result))
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Data loaded...', flush=True)
frame = Paralel.map(frame, hash_df, hashing_distance, origin)
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Data hashed...', flush=True)
buckets_location = path + 'bucketwalk-index/'
if not os.path.exists(buckets_location):
os.makedirs(buckets_location)
frame = frame.groupby(by=[key + '_bucket' for key in sorted(hashing_distance.keys())])
for name, g in frame:
format_str = '-%d' * len(hashing_distance.keys())
format_str = format_str[1:]
format_str = '%s'+ format_str +'.csv'
format_params = [buckets_location]
format_params.extend(name)
format_params = tuple(format_params)
g.to_csv(format_str % format_params, mode='a')
if 'verbose' in kwargs.keys() and kwargs['verbose']:
print('Buckets indexed...', flush=True)
json.dump({
"hashing_distance": hashing_distance,
"origin": origin,
"path": buckets_location
}, open(buckets_location + 'metadata.json', 'w+'))
def load_csv(args):
path, file = args
return pd.read_csv(
os.path.join(path, file),
header=0,
low_memory=False,
memory_map=True,
index_col='id'
)
def hash_sample(hashing_distance, origin, point):
return int((point - origin) / hashing_distance)
def hash_df(params):
df, args, kwargs = (params)
hashing_distance, origin = args
for dimension in sorted(hashing_distance.keys()):
df['%s_bucket' % dimension] = df[dimension].map(partial(hash_sample, hashing_distance[dimension], origin[dimension]))
return df
def closest(index_path, point, dist, radius=1, **kwargs):
index = json.load(open(index_path, 'r'))
key = {}
for c in point.keys():
key[c] = hash_sample(index['hashing_distance'][c], index['origin'][c], point[c])
cube = get_cube(index, key, radius)
if 'layers' in kwargs.keys():
cube = cube[cube['layer'].isin(kwargs['layers'])]
cube['distance'] = cube[list(sorted(index['origin'].keys()))].apply(dist, axis=1)
minimum = cube['distance'].min()
return cube[cube['distance'] == minimum]
def get_cube(index, key, radius):
ranges = list(map(lambda i: list(range(key[i] - radius, key[i] + radius + 1)), [i for i in sorted(key.keys())]))
keys = list(itertools.product(*ranges))
keys = list(map(lambda key: '-'.join([str(k) for k in key]), keys))
result = []
for k in keys:
filename = index['path'] + k + '.csv'
if not os.path.isfile(filename):
continue
result.append(pd.read_csv(filename))
if len(result) == 0:
return pd.DataFrame()
return pd.concat(result, axis=0)
| 7,400 | 32.488688 | 164 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/bucketwalk/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/clustering/flow.py
|
import smaframework.tool.distribution as Distribution
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import DBSCAN
from hdbscan import HDBSCAN
import pandas as pd
import numpy as np
import sklearn, json
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
def cluster_hdbscan(filename, origin_columns, destination_columns, **kwargs):
frame = pd.read_csv(filename, header=0, low_memory=True)
output_file = kwargs['output_file'] if 'output_file' in kwargs.keys() else 'data/results/flow-cluster-' + IdGenerator.uuid4().hex
pool_size = int(kwargs['pool_size']) if 'pool_size' in kwargs.keys() else 1
gmaps_key = kwargs['gmaps_key'] if 'gmaps_key' in kwargs.keys() else False
min_size = kwargs['min_size'] if 'min_size' in kwargs.keys() else int(len(frame)/1000)
frame = clusterize_hdbscan(frame, origin_columns, destination_columns, min_size, pool_size)
return summarize_data(frame, gmaps_key, output_file, origin_columns, destination_columns)
def cluster(filename, origin_columns, destination_columns, **kwargs):
frame = pd.read_csv(filename, header=0, low_memory=True)
min_samples = 15 if 'min_samples' not in kwargs.keys() else kwargs['min_samples']
nnalgorithm = 'ball_tree' if 'nnalgorithm' not in kwargs.keys() else kwargs['nnalgorithm'] # algorithm for NN query {‘auto’, ‘ball_tree’, ‘kd_tree’, ‘brute’}
output_file = kwargs['output_file'] if 'output_file' in kwargs.keys() else 'data/results/flow-cluster-' + IdGenerator.uuid4().hex
pool_size = int(kwargs['pool_size']) if 'pool_size' in kwargs.keys() else 1
gmaps_key = kwargs['gmaps_key'] if 'gmaps_key' in kwargs.keys() else False
if 'eps' in kwargs.keys():
eps_origin = kwargs['eps']
eps_destination = kwargs['eps']
else:
sharpener = len(frame) / 1000
eps_origin = select_eps(frame[origin_columns], min_samples) / sharpener
eps_destination = select_eps(frame[destination_columns], min_samples) / sharpener
print('INFO: eps(origin=%f, destination=%f) for file=%s' % (eps_origin, eps_destination, output_file))
frame = clusterize(frame, eps_origin, eps_destination, min_samples, origin_columns, destination_columns, nnalgorithm, pool_size)
return summarize_data(frame, gmaps_key, output_file, origin_columns, destination_columns, {
'min_samples': float(min_samples),
'eps_origin': float(eps_origin),
'eps_destination': float(eps_destination)
})
def summarize_data(frame, gmaps_key, output_file, origin_columns, destination_columns, metadata={}):
frame.to_csv(output_file + '.csv')
origin_frame = frame.groupby('labels_origin')
destination_frame = frame.groupby('labels_destination')
flow_frame = frame.groupby(['labels_origin', 'labels_destination'])
result = []
flows = []
for (group, df) in flow_frame:
if group[0] == -1 or group[1] == -1:
continue
origin = origin_frame.get_group(group[0])
origin_region = get_region(origin, origin_columns)
origin_centroid = origin.mean()
destination = destination_frame.get_group(group[1])
destination_region = get_region(destination, destination_columns)
destination_centroid = destination.mean()
item = {}
for key in origin_columns:
item[key] = origin_centroid[key]
for key in destination_columns:
item[key] = destination_centroid[key]
item['flow'] = len(df)
result.append(item)
if gmaps_key:
flow = {
'weight': len(df),
'origin_region_id': int(group[0]),
'destination_region_id': int(group[1]),
'origin_centroid': {
'lat': origin_centroid[origin_columns[0]],
'lng': origin_centroid[origin_columns[1]]
},
'destination_centroid': {
'lat': destination_centroid[destination_columns[0]],
'lng': destination_centroid[destination_columns[1]]
},
'origin_region': json.loads(origin_region),
'destination_region': json.loads(destination_region),
'link': [{
'lat': origin_centroid[origin_columns[0]],
'lng': origin_centroid[origin_columns[1]]
}, {
'lat': destination_centroid[destination_columns[0]],
'lng': destination_centroid[destination_columns[1]]
}]
}
flows.append(flow)
frame = pd.DataFrame(result)
if pd.__version__ >= '0.17.0':
flow_thershold = select_knee(frame['flow'].sort_values().values)
else:
flow_thershold = select_knee(frame['flow'].sort().values)
print('INFO: flow_thershold=%f for file=%s' % (flow_thershold, output_file))
frame = frame[frame['flow'] > flow_thershold]
if gmaps_key:
flows = list(filter(lambda flow: flow['weight'] >= flow_thershold, flows))
with open('templates/google-flow.html', 'r') as file:
template = file.read()
template = template.replace('<?=FLOWS?>', json.dumps(flows)).replace('<?=KEY?>', gmaps_key)
with open(output_file + '.html', 'w+') as outfile:
outfile.write(template)
with open(output_file + '.json', 'w+') as outfile:
json.dump(flows, outfile)
metadata['flow_thershold'] = float(flow_thershold)
with open(output_file + '.metadata.json', 'w+') as outfile:
json.dump(metadata, outfile)
return frame
def get_region(df, columns):
df = df[columns]
df.columns = ['lat', 'lon']
df = Distribution.get_region(df)
df = '{"lat": '+ df['lat'].map(str) +', "lng": '+ df['lon'].map(str) +', "teta": '+ df['teta'].map(str) +'}'
return '[' + df.str.cat(sep=',') + ']'
# from: https://www.quora.com/What-is-the-mathematical-characterization-of-a-%E2%80%9Cknee%E2%80%9D-in-a-curve
def select_knee(y):
try:
dy = np.gradient(y)
ddy = np.gradient(dy)
x = np.arange(len(y))
dx = np.gradient(x)
ddx = np.gradient(dx)
k = np.absolute(dx*ddy-dy*ddx) / np.power(dx*dx+dy*dy, 3/2)
dk = np.gradient(k)
return y[np.argmin(dk)]
except Exception as e:
print(len(y))
return y[int(len(y) / 2)]
def clusterize_hdbscan(frame, origin_columns, destination_columns, min_size, pool_size=1):
print('INFO: running HDBSCAN')
clusterer_origin = HDBSCAN(min_cluster_size=min_size).fit(frame[origin_columns].values)
clusterer_destination = HDBSCAN(min_cluster_size=min_size).fit(frame[destination_columns].values)
print('INFO: finished HDBSCAN with nclusters(origin=%d, destination=%d)' % (int(clusterer_origin.labels_.max()), int(clusterer_destination.labels_.max())))
return pd.concat([frame, pd.DataFrame({'labels_origin': clusterer_origin.labels_, 'labels_destination': clusterer_destination.labels_})], axis=1)
def clusterize(frame, eps_origin, eps_destination, min_samples, origin_columns, destination_columns, nnalgorithm='ball_tree', pool_size=1):
clusterer_origin = None
clusterer_destination = None
print('INFO: running DBSCAN')
if sklearn.__version__ > '0.15.2':
print("\033[93mWARNING: in case of high memory usage error, downgrade scikit: `pip install scikit-learn==0.15.2`\033[0m")
clusterer_origin = DBSCAN(eps=eps_origin, min_samples=min_samples, n_jobs=pool_size, algorithm=nnalgorithm).fit(frame[origin_columns].as_matrix())
clusterer_destination = DBSCAN(eps=eps_destination, min_samples=min_samples, n_jobs=pool_size, algorithm=nnalgorithm).fit(frame[destination_columns].as_matrix())
else:
clusterer_origin = DBSCAN(eps=eps_origin, min_samples=min_samples).fit(frame[origin_columns].as_matrix())
clusterer_destination = DBSCAN(eps=eps_destination, min_samples=min_samples).fit(frame[destination_columns].as_matrix())
print('INFO: finished DBSCAN with nclusters(origin=%d, destination=%d)' % (int(clusterer_origin.labels_.max()), int(clusterer_destination.labels_.max())))
return pd.concat([frame, pd.DataFrame({'labels_origin': clusterer_origin.labels_, 'labels_destination': clusterer_destination.labels_})], axis=1)
def select_eps(frame, min_samples):
nbrs = NearestNeighbors(n_neighbors=min_samples).fit(frame)
distances, indices = nbrs.kneighbors(frame)
distances = distances[:,distances.shape[1] - 1]
distances.sort()
return select_knee(distances)
| 8,664 | 43.896373 | 169 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/clustering/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/magtools/heatmap.py
|
import os, json
import multiprocessing as mp
import pandas as pd
import numpy as np
def _heatmap_file(args):
filename, layer = args
df = pd.read_csv(filename, header=0)
df = df[df['layer'] == layer]
return df.apply(lambda r: '{location: new google.maps.LatLng(%f, %f)},' % (r['lat'], r['lon']), axis=1)
def _heatmap_bucket(args):
filename, layer = args
df = pd.read_csv(filename, header=0)
df = df[df['layer'] == layer]
lat = df['lat'].mean()
lon = df['lon'].mean()
weight = len(df)
return '{location: new google.maps.LatLng(%f, %f), weight: %f},' % (lat, lon, weight)
def layer(layer, key, path, **kwargs):
if 'buckets' in kwargs.keys() and kwargs['buckets']:
f = _heatmap_bucket
else:
f = _heatmap_file
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
results = pool.map(f, [(os.path.join(path, file), layer) for file in os.listdir(path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
results = []
for file in os.listdir(path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
results.append(f((os.path.join(path, file), layer)))
if 'buckets' not in kwargs.keys() or not kwargs['buckets']:
frame = pd.concat(results)
if len(frame) == 0:
return None
results = frame[0].tolist()
results = '\n'.join(results)
with open('templates/google-heatmap.html', 'r') as file:
template = file.read()
template = template.replace('<?=LIST?>', results).replace('<?=KEY?>', key)
if 'filename' in kwargs.keys():
filename = kwargs['filename']
else:
filename = 'heatmap-%s.html' % layer
with open('data/results/' + filename, 'w+') as file:
file.write(template)
| 2,006 | 33.603448 | 168 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/magtools/mag.py
|
import os, re
import pandas as pd
_mag = None
def load(path='data/mag/', **kwargs):
global _mag
if 'target' in kwargs.keys() and kwargs['target']:
_mag = kwargs['target']
else:
_mag = {}
if 'file_regex' not in kwargs.keys():
kwargs['file_regex'] = re.compile(r"^(.*)\.csv$")
if 'nodes' not in _mag.keys():
frames = []
nodes_path = os.path.join(path, 'nodes')
for file in os.listdir(nodes_path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
frames.append(pd.read_csv(os.path.join(nodes_path, file)))
_mag['nodes'] = pd.concat(frames, axis=0, ignore_index=True)
if 'edges' not in _mag.keys():
frames = []
edges_path = os.path.join(path, 'edges')
for file in os.listdir(edges_path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
frames.append(pd.read_csv(os.path.join(edges_path, file)))
_mag['edges'] = pd.concat(frames, axis=0, ignore_index=True)
return _mag
def nodes_by(prop, value, **kwargs):
path = 'data/mag/' if 'mag_dir' not in kwargs.keys() else kwargs['mag_dir']
if not _mag:
load(path, **kwargs)
frame = _mag['nodes']
return frame[frame[prop] == value]
def get_simple_path(ids, start=None, end=None, **kwargs):
path = 'data/mag/' if 'mag_dir' not in kwargs.keys() else kwargs['mag_dir']
if not _mag:
load(path, **kwargs)
frame = _mag['edges']
edges = frame[frame['source'].isin(ids) | frame['target'].isin(ids)]
if not start or not end:
return edges
edges.to_csv('data/edges.csv')
target = edges[edges['source'] == start]
path = []
while (not target['source'].empty) and target['source'].values[0] != end:
source = target
path.append(source)
target = edges[edges['source'] == source['target'].values[0]]
if not target['source'].empty:
frame = pd.concat(path, axis=0, ignore_index=True)
frame = pd.concat([frame['source'], frame['target']], axis=1).stack().reset_index(drop=True)
frame.drop_duplicates(inplace=True)
return _mag['nodes'][_mag['nodes']['id'].isin(frame)]
target = edges[edges['target'] == start]
path = []
while (not target['target'].empty) and target['target'].values[0] != end:
source = target
path.append(source)
target = edges[edges['target'] == source['source'].values[0]]
if target['target'].empty:
return pd.DataFrame()
frame = pd.concat(path, axis=0, ignore_index=True)
frame = pd.concat([frame['source'], frame['target']], axis=1).stack().reset_index(drop=True)
frame.drop_duplicates(inplace=True)
return _mag['nodes'][_mag['nodes']['id'].isin(frame)]
| 2,844 | 31.701149 | 100 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/magtools/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/hybrid_multimodal_router/router.py
|
from smaframework.common.address_keywords_extension_map import parse_str as parse_address_str
import smaframework.extractor.here.traffic as HereTrafficExtractor
import smaframework.extractor.google.directions as GoogleDirectionsExtractor
import smaframework.extractor.uber as UberExtractor
import numpy as np
import networkx as nx
import haversine
import time
def analyse_ways(app_id, app_code, ways):
result = []
for way in ways:
routes = []
for route in way:
route = analyse_route(app_id, app_code, route)
routes.append(route)
result.append(routes)
return result
def analyse_route(app_id, app_code, route):
resulting_route = []
for step in route:
if step['distance'] > 500:
observations = int(step['distance'] / 500) + 1
for i in range(observations, 0, -1):
origin = list(step['origin'])
destination = list(step['destination'])
destination = [abs(origin[0] - destination[0])/i + origin[0], abs(origin[1] - destination[1])/i + origin[1]]
step_ = step.copy()
step_['origin'] = origin
step_['destination'] = destination
step_['duration'] = step['duration'] / i
step_['distance'] = step['distance'] / i
o = [(destination[0] + origin[0])/2, (destination[1] + origin[1])/2]
traffic = HereTrafficExtractor.lat_lon_zoom(app_id, app_code, o[0], o[1])
step_['traffic'] = match_data(step['address_keywords'], traffic, True)
resulting_route.append(step_)
step['duration'] = step['duration'] - step_['duration']
step['distance'] = step['distance'] - step_['distance']
step['origin'] = destination
origin = destination
else:
o = [(step['destination'][0] + step['origin'][0])/2, (step['destination'][1] + step['origin'][1])/2]
traffic = HereTrafficExtractor.lat_lon_zoom(app_id, app_code, o[0], o[1])
step['traffic'] = match_data(step['address_keywords'], traffic, True)
resulting_route.append(step)
return resulting_route
def match_data(keywords, traffic, summary=False):
max_score = -1
best_observations = []
for k, observation in traffic['data'].items():
kws = parse_address_str(observation['DE'])
score = len(set(keywords).intersection(kws))
if score == max_score:
best_observations.append(observation)
elif score > max_score:
max_score = score
best_observations = [observation]
if not summary:
return best_observations
jfp = [o['JF'] for o in best_observations if o['QD'] == '+']
jfp = np.mean(jfp) if len(jfp) > 0 else 0
jfn = [o['JF'] for o in best_observations if o['QD'] == '-']
jfn = np.mean(jfn) if len(jfn) > 0 else 0
jf = max(jfp, jfn)
cnp = [o['CN'] for o in best_observations if o['QD'] == '+']
cnp = np.mean(cnp) if len(cnp) > 0 else 0
cnn = [o['CN'] for o in best_observations if o['QD'] == '-']
cnn = np.mean(cnn) if len(cnn) > 0 else 0
cn = max(cnp, cnn)
return {'JF': jf, 'CN': cn}
def merge_segments(driving_ways, **kwargs):
config = {}
config.update({'thershold': 5})
config.update(kwargs)
result = []
for way in driving_ways:
routes = []
for route in way:
route = merge_route_segments(route, config['thershold'])
routes.append(route)
result.append(routes)
return result
def merge_route_segments(route, thershold=5, merge=False):
result = []
previous = {'class': ''}
pointer = None
first_critical = None
last_critical = None
for step in route:
clazz = 'critical' if step['traffic']['JF'] >= thershold else 'non-critical'
s = None
if clazz == previous['class']:
s = {
'origin': previous['origin'],
'destination': step['destination'],
'duration': step['duration'] + previous['duration'],
'distance': step['distance'] + previous['distance'],
'class': clazz
}
result[pointer] = s
else:
s = {
'origin': step['origin'],
'destination': step['destination'],
'duration': step['duration'],
'distance': step['distance'],
'class': clazz
}
result.append(s)
pointer = len(result) - 1
if clazz == 'critical':
if first_critical == None:
first_critical = pointer
last_critical = pointer
previous = s
if not merge or first_critical == last_critical:
return result
# merge intermediary regions
duration = 0
distance = 0
for i in range(first_critical, last_critical):
duration = duration + result[i]['duration']
distance = distance + result[i]['distance']
s = {
'origin': result[first_critical]['origin'],
'destination': result[last_critical]['destination'],
'duration': duration,
'distance': distance,
'class': 'critical'
}
result[first_critical : last_critical+1] = [s]
return result
def select_best_transit_route(trip, score_function):
selected = None
min_cost = float("inf")
for route in trip:
cost = 0
for (i, step) in enumerate(route):
cost = cost + score_function(i, step)
if cost < min_cost:
min_cost = cost
selected = route
return selected
'''
* Get the available options to replace a set of trips from the same source to the same sink position.
*
* @param access_keys - The keys to access Uber and GoogleMaps APIs.
* @param trips - The set of trips to be evaluated.
'''
def get_available_options(access_keys, trips, **kwargs):
config = {
"uber_modality": 'uberX',
"prices": {
"TRANSIT": 2.50,
"WALKING": 0,
},
'score_function': lambda i, s: s['duration']
}
config.update(kwargs)
result = []
for driving_ways in trips:
result.append(get_trip_available_options(access_keys, driving_ways, **config))
return result
def _capsule(params):
(fn, params) = params
kwargs = {}
if isinstance(params[-1], dict):
kwargs = params[-1]
del params[-1]
return fn(*params, **kwargs)
'''
* Get the available options to replace a set of driving ways from the same source to the same sink position.
*
* PSEUDO-CODE:
*
* def get_hybrid_route(origin, destination):
* driving_way <- get_driving_way(origin, destination) # retrieves driving path using Google Directions
* transit_start_candidates <- new list()
* transit_end_candidates <- new list()
* options <- new list()
*
* foreach (index, step) in driving_way.steps:
* if step.length > 500:
* fragments <- split_step(step, 500) # split the step in 500m chunks
* splice(driving_way.steps, index, 1, fragments) # replace 1 position from the index with the specified list
* continue
*
* traffic = get_traffic_data(step.origin, step.destination) # consult the traffic data from HERE in the middle position between origin and destination, also performs the address-GPS matching using USPS address abreviation dataset
* if is_congested(traffic):
* append(transit_start_candidates, step.origin)
* append(transit_end_candidates, step.destination)
*
* foreach (index, ts) in transit_start_candidates:
* option <- get_hpv_started_option(origin, ts, destination) # gets a HPV route from origin to TS and a transit route from TS to destination
* append(options, option)
*
* foreach (index, te) in transit_end_candidates:
* option <- get_hpv_started_option(origin, te, destination) # gets a transit route from origin to TE and a HPV route from TE to destination
* append(options, option)
*
* foreach (index, ts) in transit_start_candidates:
* foreach (jindex, te) in transit_end_candidates:
* mixed_options <- get_mixed_option(origin, ts, te, destination) # gets four options where origin to TS is made by HPV or WALK, TE to destination is made by HPV or WALK and TS to TE is made by transit
* concat(options, mixed_options) # join lists
*
* return options
*
* @param access_keys - The keys to access Uber and GoogleMaps APIs.
* @param driving_ways - The set of driving ways to be evaluated.
'''
def get_trip_available_options(access_keys, driving_ways, **config):
start = driving_ways[0][0]['origin']
end = driving_ways[0][-1]['destination']
congested_times = []
for driving_way in driving_ways:
congested_time = 0
for segment in driving_way:
if segment['class'] == 'critical':
congested_time = congested_time + segment['duration']
congested_times.append(congested_time)
transit_starts = []
transit_ends = []
congested_time = 0
for (i, driving_way) in enumerate(driving_ways):
for segment in driving_way:
if segment['class'] == 'critical':
transit_starts.append({'position': segment['origin'], 'traffic': congested_time})
transit_ends.append({'position': segment['destination'], 'traffic': congested_times[i] - congested_time})
congested_time = congested_time + segment['duration']
options = get_taxi_started_options(access_keys, transit_starts, start, end, config)
options.extend(get_taxi_ended_options(access_keys, transit_ends, start, end, config))
options.extend(get_skip_traffic_only_options(access_keys, start, end, transit_starts, transit_ends, config))
full_taxi_trip = UberExtractor.estimate(access_keys['UBER_SERVER_TOKEN'], start, end, 1, config['uber_modality'])
full_taxi_step = [{
"address_keywords": [],
"duration": full_taxi_trip['duration'],
"congested_time": congested_time,
"wait": full_taxi_trip['wait'],
"travel_mode": "TAXI",
"vehicle_type": config['uber_modality'],
"origin": full_taxi_trip['origin'],
"distance": full_taxi_trip['distance'],
"destination": full_taxi_trip['destination'],
"price": full_taxi_trip['price']
}]
full_transit_trip = GoogleDirectionsExtractor.extract_single(access_keys['GOOGLE_MAPS_KEY'], start, end, int(time.time()), 'transit', config['prices'])
full_transit_trip = select_best_transit_route(full_transit_trip, config['score_function'])
options.append(full_taxi_step)
options.append(full_transit_trip)
return options
'''
* Choose one of the given options based on the score function minimization.
*
* @param options - The given options
* @param score_function - The function to evaluate the score of a step in the option. Receives as a params:
* * i - the step counter
* * data - the step data (price, duration, distance, mode, vehicle, origin, end)
'''
def choose(options, score_function):
min_score = float("inf")
for option in options:
current_score = 0
for i, step in enumerate(option):
current_score = current_score + score_function(i, step)
if current_score < min_score:
min_score = current_score
selected = option
return selected
def get_skip_traffic_only_options(access_keys, start, end, transit_starts, transit_ends, config):
options = []
for ts in transit_starts:
for te in transit_ends:
transit_route = GoogleDirectionsExtractor.extract_single(access_keys['GOOGLE_MAPS_KEY'], ts['position'], te['position'], int(time.time()), 'transit', config['prices'])
transit_route = select_best_transit_route([summarize_steps(steps) for steps in transit_route], config['score_function'])
end_step = transit_route[-1]
start_step = transit_route[0]
length = len(transit_route)
if transit_route[-1]['travel_mode'] == 'WALKING':
end_nearest_stop = transit_route[-1]['origin']
if length > 1:
del transit_route[-1]
else:
end_nearest_stop = transit_route[-1]['destination']
if transit_route[0]['travel_mode'] == 'WALKING':
start_nearest_stop = transit_route[0]['destination']
if length > 1:
del transit_route[0]
else:
start_nearest_stop = transit_route[0]['origin']
if length == 1 and transit_route[0]['travel_mode'] == 'WALKING':
end_nearest_stop = start_nearest_stop
del transit_route[0]
start_taxi_segment = UberExtractor.estimate(access_keys['UBER_SERVER_TOKEN'], start, start_nearest_stop, 1, config['uber_modality'])
start_taxi_step = [{
"address_keywords": [],
"duration": start_taxi_segment['duration'],
"congested_time": ts['traffic'],
"wait": start_taxi_segment['wait'],
"travel_mode": "TAXI",
"vehicle_type": config['uber_modality'],
"origin": start_taxi_segment['origin'],
"distance": start_taxi_segment['distance'],
"destination": start_taxi_segment['destination'],
"price": start_taxi_segment['price']
}]
start_walk_segment = GoogleDirectionsExtractor.extract_single(access_keys['GOOGLE_MAPS_KEY'], start, start_nearest_stop, int(time.time()), 'walking', config['prices'])
start_walk_step = select_best_transit_route([summarize_steps(s) for s in start_walk_segment], config['score_function'])
end_taxi_segment = UberExtractor.estimate(access_keys['UBER_SERVER_TOKEN'], end_nearest_stop, end, 1, config['uber_modality'])
end_taxi_step = [{
"address_keywords": [],
"duration": end_taxi_segment['duration'],
"congested_time": te['traffic'],
"wait": end_taxi_segment['wait'],
"travel_mode": "TAXI",
"vehicle_type": config['uber_modality'],
"origin": end_taxi_segment['origin'],
"distance": end_taxi_segment['distance'],
"destination": end_taxi_segment['destination'],
"price": end_taxi_segment['price']
}]
end_walk_segment = GoogleDirectionsExtractor.extract_single(access_keys['GOOGLE_MAPS_KEY'], end_nearest_stop, end, int(time.time()), 'walking', config['prices'])
end_walk_step = select_best_transit_route([summarize_steps(s) for s in end_walk_segment], config['score_function'])
if start_walk_step and transit_route and end_walk_step:
options.append(summarize_steps(start_walk_step + transit_route + end_walk_step))
if start_walk_step and transit_route and end_taxi_step:
options.append(start_walk_step + transit_route + end_taxi_step)
if start_taxi_step and transit_route and end_walk_step:
options.append(start_taxi_step + transit_route + end_walk_step)
route = start_taxi_step + transit_route + end_taxi_step
if len(route) > 2:
options.append(route)
return options
def summarize_steps(steps):
result = []
current = {'travel_mode': '', 'phase': ''}
for (i, step) in enumerate(steps):
if current['travel_mode'] != step['travel_mode']:
if current['travel_mode'] != '':
result.append(current)
if current['phase'] == 'access' and step['travel_mode'] == 'TRANSIT':
current['next_mode'] = step['vehicle_type']
current = {
"address_keywords": [],
"duration": 0,
"wait": 0,
"travel_mode": step['travel_mode'],
"vehicle_type": step['vehicle_type'],
"origin": steps[0]['origin'],
"distance": 0,
"price": 0,
"destination": steps[-1]['destination'],
}
current['duration'] = current['duration'] + step['duration']
current['distance'] = current['distance'] + step['distance']
current['price'] = current['price'] + step['price']
current['wait'] = current['wait'] + step['wait']
current['phase'] = 'headway' if step['travel_mode'] != 'WALKING' else ('egress' if i == len(steps) - 1 else 'access')
if current['travel_mode'] != '':
result.append(current)
return result
def get_taxi_ended_options(access_keys, transit_ends, start, end, config):
exchanges = len(transit_ends)
transit_trip_segments = GoogleDirectionsExtractor.extract(access_keys['GOOGLE_MAPS_KEY'], [start] * exchanges, [te['position'] for te in transit_ends], [int(time.time())] * exchanges, 'transit', config['prices'])
transit_ends_nearest_stops = []
for i in range(0, exchanges):
route = select_best_transit_route(transit_trip_segments[i], config['score_function'])
if route[-1]['travel_mode'] == 'WALKING':
transit_ends_nearest_stops.append(route[-1]['origin'])
else:
transit_ends_nearest_stops.append(route[-1]['destination'])
transit_trip_segments[i] = route
taxi_trip_segments = UberExtractor.extract(access_keys['UBER_SERVER_TOKEN'], transit_ends_nearest_stops, [end] * exchanges, [1]*exchanges, config['uber_modality'])
for i in range(0, exchanges):
if transit_trip_segments[i][-1]['travel_mode'] == 'WALKING':
del transit_trip_segments[i][-1]
taxi_step = {
"address_keywords": [],
"duration": taxi_trip_segments[i]['duration'],
"congested_time": transit_ends[i]['traffic'],
"wait": taxi_trip_segments[i]['wait'],
"travel_mode": "TAXI",
"vehicle_type": config['uber_modality'],
"origin": taxi_trip_segments[i]['origin'],
"distance": taxi_trip_segments[i]['distance'],
"destination": taxi_trip_segments[i]['destination'],
"price": taxi_trip_segments[i]['price']
}
transit_trip_segments[i].append(taxi_step)
return transit_trip_segments
def get_taxi_started_options(access_keys, transit_starts, start, end, config):
exchanges = len(transit_starts)
transit_trip_segments = GoogleDirectionsExtractor.extract(access_keys['GOOGLE_MAPS_KEY'], [ts['position'] for ts in transit_starts], [end] * exchanges, [int(time.time())] * exchanges, 'transit', config['prices'])
transit_starts_nearest_stops = []
for i in range(0, exchanges):
route = select_best_transit_route(transit_trip_segments[i], config['score_function'])
if route[0]['travel_mode'] == 'WALKING':
transit_starts_nearest_stops.append(route[0]['destination'])
else:
transit_starts_nearest_stops.append(route[0]['origin'])
transit_trip_segments[i] = route
taxi_trip_segments = UberExtractor.extract(access_keys['UBER_SERVER_TOKEN'], [start] * exchanges, transit_starts_nearest_stops, [1]*exchanges, config['uber_modality'])
for i in range(0, exchanges):
if transit_trip_segments[i][0]['travel_mode'] == 'WALKING':
transit_trip_segments[i] = transit_trip_segments[i][1:]
taxi_step = {
"address_keywords": [],
"duration": taxi_trip_segments[i]['duration'],
"wait": taxi_trip_segments[i]['wait'],
"congested_time": transit_starts[i]['traffic'],
"travel_mode": "TAXI",
"vehicle_type": config['uber_modality'],
"origin": taxi_trip_segments[i]['origin'],
"distance": taxi_trip_segments[i]['distance'],
"destination": taxi_trip_segments[i]['destination'],
"price": taxi_trip_segments[i]['price']
}
transit_trip_segments[i].insert(0, taxi_step)
return transit_trip_segments
| 20,794 | 40.424303 | 237 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/hybrid_multimodal_router/model.py
|
import math
from smaframework.tool.constants import miles2km
'''
* Evaluate the perceived time of travel.
* Based on Paper: Abrantes, P. A. L., & Wardman, M. R. (2011). Meta-analysis of UK values of travel time: An update. Transportation Research Part A: Policy and Practice, 45(1), 1–17. https://doi.org/10.1016/J.TRA.2010.08.003
*
* @param i - the step counter
* @param s - the step data
'''
def perceived_time(i, s):
if s['travel_mode'] == 'TAXI':
if 'congested_time' in s.keys():
score = (s['duration'] - s['congested_time']) + s['congested_time'] * 1.54 + s['wait'] * 1.7
else:
score = s['duration'] + s['wait'] * 1.7
elif s['travel_mode'] == 'WALKING':
# Based on Book Chapter: Ch5 Pg 5-11 - Transit Capacity and Quality of Service Manual, 3rd ed. describing predisposition to walk for reaching a rapid-transit mode
distance = (s['distance'] / 2) if s['phase'] == 'access' and s['next_mode'] in ['SUBWAY'] else s['distance']
score = s['duration'] * (1.65 / walkability(distance))
else:
score = s['duration'] * 0.78 + s['wait'] * 1.7
return score
'''
* Based on Report: Cycling and Walking: the grease in our mobility chain. Pg. 21 - https://www.researchgate.net/publication/311773579_Cycling_and_walking_the_grease_in_our_mobility_chain
* Interpolate curve for Acceptable walking distance between parking place and store using `fit cubic {0,1}, {290,0.8}, {435,0.6}, {520,0.4}, {675,0.2}, {1000,0}` on WolframAlpha.
* Note: all trips purpouse, not only transit access.
*
* @param x - the walking distance in meters.
'''
def walking_acceptability(x):
y = 2.78409e-9 * pow(x, 3) - 4.03692e-6 * pow(x, 2) - 2.53449e-4 * x + 1.00038
return y if y > 0.01 else 0.01 # ensure non-zero division
'''
* Based on Paper: Yang, Y., & Diez-Roux, A. V. (2012). Walking Distance by Trip Purpose and Population Subgroups. American Journal of Preventive Medicine, 43(1), 11–19. https://doi.org/10.1016/J.AMEPRE.2012.03.015
* Note: all trips purpouse, not only transit access.
*
* @param x - the walking distance in meters.
'''
def walking_distance_decay(x):
x = x / 1000 / miles2km # meters to miles
y = 0.98 * math.exp(-1.71 * x)
return y if y > 0.01 else 0.01 # ensure non-zero division
'''
* Based on Book Chapter: Ch4 Pg 4-18 - Transit Capacity and Quality of Service Manual, 3rd ed. https://www.researchgate.net/publication/293811979_Transit_Capacity_and_Quality_of_Service_Manual_3rd_ed
* Interpolate curve for Washignton DC (low income) using `interpolating polynomial {0,1},{0.075,0.75},{0.15,0.5},{0.25,0.25},{0.45, 0}` or `fit exponential {0,1},{0.075,0.75},{0.15,0.5},{0.25,0.25},{0.45, 0}` on WolframAlpha.
* Note: specific for transit access.
*
* @param x - the walking distance in meters.
'''
def walkability(x, mode='exponential'):
x = x / 1000 / miles2km # meters to miles
if mode == 'exponential':
y = 1.0388 * math.exp(-5.36561 * x)
else:
y = -45.8554 * pow(x, 4) + 40.8289 * pow(x, 3) - 7.38095 * pow(x, 2) - 2.99008 * x + 1
return y if y > 0.01 else 0.01 # ensure non-zero division
'''
* Reshaped curve from Based on Book Chapter: Ch4 Pg 4-18 - Transit Capacity and Quality of Service Manual, 3rd ed. https://www.researchgate.net/publication/293811979_Transit_Capacity_and_Quality_of_Service_Manual_3rd_ed
* Fit curve for Washignton DC (low income) using `fit exponential {0,1},{0.075,0.75},{0.15,0.5},{0.25,0.25},{0.45, 0}` on WolframAlpha.
* Use lamda and reshape the curve to start close to 0 and raise.
* Note: specific for transit access.
*
* @param x - the walking distance in meters.
'''
def walking_monetary_impact(x):
x = x / 1000 # meters to km
y = 0.001 * math.exp(5.36561 * x)
return y if y > 0.01 else 0.01 # ensure non-zero division
'''
* Evaluate the perceived price per minute of trip.
*
* Uber price per minute extracted froom: https://www.ridesharingdriver.com/how-much-does-uber-cost-uber-fare-estimator/
* Cost/Mile Cost/Minute Base Fare Booking Fee Minimum Fare
* UberX $0.90 $0.15 $0 $2.10 $5.60
* UberPool $0.85 $0.11 $0 $2.10 $5.60
* UberXL $1.55 $0.30 $1 $2.35 $8.35
* UberSelect $2.35 $0.40 $5 $2.35 $11.65
* UberBlack $3.55 $0.45 $8 n/a $15
* UberSUV $4.25 $0.55 $15 n/a $25
*
* TODO: evaluate price per minute on transit from real data.
*
* @param i - the step counter
* @param s - the step data
'''
def percived_price_per_minute(i, s):
# normalizer = 0
# if s['travel_mode'] == 'TAXI':
# normalizer = 0.15
# else:
# normalizer = 0.05 # TODO: evaluate from real data
from haversine import haversine
price = s['price'] if s['price'] > 0 else 2.5
duration = s['duration'] / 60
distance = haversine(tuple(s['origin']), tuple(s['destination']))
if distance == 0:
normalizer = float('inf')
else:
normalizer = duration * price / distance
# print(i, s['travel_mode'], duration, distance, price, normalizer)
return perceived_time(i, s) * normalizer
def perceived_score(coef, i, s):
score = coef * s['price'] + (1-coef) * percived_price_per_minute(i, s)
# print(i, s['travel_mode'], percived_price_per_minute(i, s), s['price'], score)
return score
| 5,509 | 44.53719 | 226 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/hybrid_multimodal_router/evaluator.py
|
import numpy as np
import pandas as pd
from haversine import haversine
import smaframework.analyzer.hybrid_multimodal_router.model as Model
def evaluate(trips, routes, group_id, profile=1):
frames = []
for route in routes:
if not isinstance(route, dict) or len(route['options']) == 0:
continue
metadatas = []
for option in route['options'][0]:
if not option:
continue
metadata = extract_metadata(option)
metadatas.append(metadata)
metadatas = pd.DataFrame(metadatas)
norm = metadatas[['duration', 'wait', 'congested_time', 'perceived_duration', 'cost', 'traversed_distance', 'walking_distance']].apply(lambda x: x / np.max(x))
metadatas = metadatas.join(norm, rsuffix='_norm')
metadatas['effective_cost'] = metadatas['duration'] * metadatas['cost']
metadatas['effective_cost_perceived'] = metadatas['perceived_duration'] * metadatas['cost']
metadatas['effective_cost_norm'] = metadatas['duration_norm'] * metadatas['cost_norm']
metadatas['effective_cost_perceived_norm'] = metadatas['perceived_duration_norm'] * metadatas['cost_norm']
metadatas['weight'] = trips[route['index']]['weight']
metadatas['origin_lat'] = trips[route['index']]['link'][0]['lat']
metadatas['origin_lon'] = trips[route['index']]['link'][0]['lng']
metadatas['destination_lat'] = trips[route['index']]['link'][1]['lat']
metadatas['destination_lon'] = trips[route['index']]['link'][1]['lng']
metadatas['group_id'] = group_id
distance = haversine(tuple(route['options'][0][0][0]['origin']), tuple(route['options'][0][0][-1]['destination']))
metadatas['distance'] = distance
frames.append(metadatas)
if len(frames) == 0:
return pd.DataFrame()
return pd.concat(frames, ignore_index=True)
'''
TAXI - only TAXI
TRANSIT - no TAXI
HYBRID - has TAXI and OTHER
WALKING - only WALKING
'''
def select_category(modes):
modes = list(set(modes))
if len(modes) == 1:
if modes[0] == 'uberX':
return 'TAXI'
elif modes[0] == 'WALKING':
return 'WALKING'
return 'TRANSIT'
return 'HYBRID' if 'uberX' in modes else 'TRANSIT'
def extract_metadata(option):
traversed_distance = 0
duration = 0
perceived_duration = 0
cost = 0
walking_distance = 0
modes = []
congested_time = 0
wait = 0
for (i, step) in enumerate(option):
traversed_distance = traversed_distance + abs(step['distance'])
duration = duration + abs(step['duration'])
cost = cost + abs(step['price'])
wait = wait + abs(step['wait'])
modes.append(step['vehicle_type'] if step['travel_mode'] in ['TRANSIT', 'TAXI'] else step['travel_mode'])
if step['travel_mode'] == 'WALKING':
walking_distance = walking_distance + abs(step['distance'])
if 'congested_time' in step.keys():
congested_time = congested_time + abs(step['congested_time'])
perceived_duration = perceived_duration + abs(Model.perceived_time(i, step))
return {
'traversed_distance': traversed_distance,
'duration': duration,
'perceived_duration': perceived_duration,
'cost': cost,
'walking_distance': walking_distance,
'modes': modes,
'category': select_category(modes),
'congested_time': congested_time,
'wait': wait,
}
| 3,570 | 33.669903 | 167 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/analyzer/hybrid_multimodal_router/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/tool/constants.py
|
earth_radius = 6371000.7
miles2km = 1.60934
| 51 | 16.333333 | 24 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/tool/distribution.py
|
import time, math
import numpy as np
import pandas as pd
import pylab as pl
import scipy
def get_region(df, angle_step=5, **kwargs):
df = df.copy()
min_lat = df['lat'].min()
max_lat = df['lat'].max()
min_lon = df['lon'].min()
max_lon = df['lon'].max()
origin = ((max_lat - min_lat) / 2 + min_lat, (max_lon - min_lon) / 2 + min_lon)
df['teta'] = np.arctan2((df['lon'] - origin[1]), (df['lat'] - origin[0]))
df['r'] = (df['lat'] - origin[0]) * np.cos(df['teta'])
df['teta'] = np.round(df['teta'] * 180 / math.pi / angle_step) * angle_step
df = df[df['r'] == df.groupby('teta')['r'].transform(max)]
df = df.groupby('teta').max().reset_index()
if pd.__version__ >= '0.17.0':
return df[['lat', 'lon', 'teta']].sort_values(by='teta')
else:
return df[['lat', 'lon', 'teta']].sort('teta')
def _angle_to_point(point, centre):
'''calculate angle in 2-D between points and x axis'''
delta = point - centre
res = np.arctan(delta[1] / delta[0])
if delta[0] < 0:
res += np.pi
return res
def _draw_triangle(p1, p2, p3, **kwargs):
tmp = np.vstack((p1,p2,p3))
x,y = [x[0] for x in zip(tmp.transpose())]
pl.fill(x,y, **kwargs)
def area_of_triangle(p1, p2, p3):
'''calculate area of any triangle given co-ordinates of the corners'''
return np.linalg.norm(np.cross((p2 - p1), (p3 - p1)))/2.
def convex_hull(points, graphic=False, smidgen=0.0075):
'''
Calculate subset of points that make a convex hull around points
Recursively eliminates points that lie inside two neighbouring points until only convex hull is remaining.
:Parameters:
points : ndarray (2 x m)
array of points for which to find hull
graphic : bool
use pylab to show progress?
smidgen : float
offset for graphic number labels - useful values depend on your data range
:Returns:
hull_points : ndarray (2 x n)
convex hull surrounding points
'''
points = points[['lat', 'lon']].as_matrix().T
if graphic:
pl.clf()
pl.plot(points[0], points[1], 'ro')
n_pts = points.shape[1]
assert(n_pts > 5)
centre = points.mean(1)
if graphic: pl.plot((centre[0],),(centre[1],),'bo')
angles = np.apply_along_axis(_angle_to_point, 0, points, centre)
pts_ord = points[:,angles.argsort()]
if graphic:
for i in xrange(n_pts):
pl.text(pts_ord[0,i] + smidgen, pts_ord[1,i] + smidgen, \
'%d' % i)
pts = [x[0] for x in zip(pts_ord.transpose())]
prev_pts = len(pts) + 1
k = 0
while prev_pts > n_pts:
prev_pts = n_pts
n_pts = len(pts)
if graphic: pl.gca().patches = []
i = -2
while i < (n_pts - 2):
Aij = area_of_triangle(centre, pts[i], pts[(i + 1) % n_pts])
Ajk = area_of_triangle(centre, pts[(i + 1) % n_pts], \
pts[(i + 2) % n_pts])
Aik = area_of_triangle(centre, pts[i], pts[(i + 2) % n_pts])
if graphic:
_draw_triangle(centre, pts[i], pts[(i + 1) % n_pts], \
facecolor='blue', alpha = 0.2)
_draw_triangle(centre, pts[(i + 1) % n_pts], \
pts[(i + 2) % n_pts], \
facecolor='green', alpha = 0.2)
_draw_triangle(centre, pts[i], pts[(i + 2) % n_pts], \
facecolor='red', alpha = 0.2)
if Aij + Ajk < Aik:
if graphic: pl.plot((pts[i + 1][0],),(pts[i + 1][1],),'go')
del pts[i+1]
i += 1
n_pts = len(pts)
k += 1
df = pd.DataFrame(np.asarray(pts))
df.columns = ['lat', 'lon']
return df
def smoother_region(points):
x, y = np.array(points['lat'].tolist()), np.array(points['lon'].tolist())
nt = np.linspace(0, 1, 100)
t = np.zeros(x.shape)
t[1:] = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)
t = np.cumsum(t)
t /= t[-1]
x2 = scipy.interpolate.spline(t, x, nt)
y2 = scipy.interpolate.spline(t, y, nt)
return pd.DataFrame({'lat': x2, 'lon': y2})
| 4,186 | 33.04065 | 110 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/tool/paralel.py
|
# import dask.dataframe as dd
import multiprocessing as mp
import numpy as np
import pandas as pd
# def prepare(df, **kwargs):
# if 'pool_size' in kwargs.keys():
# kwargs['npartitions'] = kwargs['pool_size']
# del kwargs['pool_size']
# elif 'npartitions' not in kwargs.keys() and 'chunksize' not in kwargs.keys():
# kwargs['npartitions'] = 1
# return dd.from_pandas(df, **kwargs)
def map(df, callback, *args, **kwargs):
if 'pool_size' not in kwargs.keys():
kwargs['pool_size'] = 1
chunksize = len(df) / kwargs['pool_size']
groups = df.groupby(np.arange(len(df)) // chunksize)
with mp.Pool() as pool:
result = pool.map(callback, [(df, args, kwargs) for g, df in groups])
return pd.concat(result, axis=0)
| 782 | 33.043478 | 83 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/tool/conversor.py
|
import numpy as np
import smaframework.tool.constants as Constants
def kmph2mps(speed):
'''
* Coverts a speed from Km/h to m/s.
*
* @param speed the speed to convert.
* @return float the converted speed.
'''
return speed / 3.6
def deg2rad(degree):
'''
* Coverts an angle from Deg to Rad.
*
* @param degree the angle to convert.
* @return float the converted angle.
'''
rad = degree * 2 * np.pi / 360
return rad
def meters2geodist(x, merge=True, lat=0):
'''
* Coverts an distance in meters to geo-coordinates.
*
* @param x the distance to convert.
* @param merge whether lat and long distances are meant to be merged (as the maximum) or return separately.
* @param lat the assumed reference latitude in degrees (default 0 deg).
* @return float the converted distance.
'''
R = Constants.earth_radius
dLat = x / R * 180 / np.pi
dLon = x / (R * np.cos(np.pi * lat / 180)) * 180 / np.pi
# print(dLat, dLon)
if merge:
return max([dLat, dLon])
return (dLat, dLon)
| 1,143 | 26.902439 | 116 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/tool/mag.py
|
import os
import math
import datetime
import pandas as pd
import uuid as IdGenerator
import multiprocessing as mp
def edges(path, edge_type=None, load_nodes=False, **kwargs):
edges_path = os.path.join(path, 'edges/')
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(load_edges_csv, [(edges_path, file, edge_type) for file in os.listdir(edges_path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(edges_path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_edges_csv((edges_path, file, edge_type)))
df = pd.concat(result)
if load_nodes:
df = load_nodes_for_edges(df, os.path.join(path, 'nodes/'), **kwargs)
return df
def nodes(path, layer=None, **kwargs):
nodes_path = os.path.join(path, 'nodes/')
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(load_nodes_csv, [(nodes_path, file, layer) for file in os.listdir(nodes_path) if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(nodes_path):
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
result.append(load_nodes_csv((nodes_path, file, layer)))
return pd.concat(result)
def load_nodes_csv(args):
path, file, layer = args
df = pd.read_csv(os.path.join(path, file), header=0)
if layer:
return df[(df.layer == layer)]
return df
def load_edges_csv(args):
path, file, edge_type = args
df = pd.read_csv(os.path.join(path, file), header=0)
if edge_type:
return df[(df.type == edge_type)]
return df
def load_nodes_csv(args):
path, file, layer = args
df = pd.read_csv(os.path.join(path, file), header=0)
if layer:
return df[(df.layer == layer)]
return df
def load_nodes_for_edges(edges, path, **kwargs):
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
result = pool.map(load_nodes_csv, [(path, file, None) for file in os.listdir(path) if 'nodes_file_regex' not in kwargs.keys() or kwargs['nodes_file_regex'].match(file)])
pool.close()
pool.join()
else:
result = []
for file in os.listdir(path):
if 'nodes_file_regex' not in kwargs.keys() or kwargs['nodes_file_regex'].match(file):
result.append(load_nodes_csv((path, file, None)))
nodes = pd.concat(result)
return (edges
.merge(nodes, left_on='source', right_on='id', suffixes=('_edge', ''))
.merge(nodes, left_on='target', right_on='id', suffixes=('_source', '_target'))
.drop('id_source', axis=1)
.drop('id_target', axis=1))
def add_node(node, **kwargs):
if 'filename' not in kwargs.keys():
filename = 'symulated-' + datetime.datetime.now().strftime('%Y-%m-%d') + '.csv'
else:
filename = kwargs['filename']
filename = os.path.join('data/mag/nodes/', filename)
if not os.path.isfile(filename):
with open(filename, 'w+') as file:
file.writelines('id,uid,timestamp,lat,lon,layer')
node['id'] = IdGenerator.uuid4().hex
with open(filename, 'a+') as file:
file.write("\n%s,%s,%d,%f,%f,%s" % (node['id'], node['uid'],node['timestamp'],node['lat'],node['lon'],node['layer']))
return node['id']
def add_edge(edge, **kwargs):
if 'filename' not in kwargs.keys():
filename = 'symulated-' + datetime.datetime.now().strftime('%Y-%m-%d') + '.csv'
else:
filename = kwargs['filename']
filename = os.path.join('data/mag/edges/', filename)
if not os.path.isfile(filename):
with open(filename, 'w+') as file:
file.writelines('source,target,id,type')
edge['id'] = IdGenerator.uuid4().hex
with open(filename, 'a+') as file:
file.write("\n%s,%s,%s,%s" % (edge['source'], edge['target'],edge['id'],edge['type']))
return edge['id']
def uid_entries_distribution(path, layer, **kwargs):
df = nodes(path, layer, **kwargs)
df = df[['id', 'uid', 'timestamp']]
day = 24 * 60 * 60
df['timestamp'] = df.apply(lambda r: math.floor(r['timestamp'] / day), axis=1)
df = df.groupby(['uid', 'timestamp']).count()
per_uid_distribution = df['id'].reset_index()
uid_amount = per_uid_distribution.copy().groupby(['timestamp']).count().reset_index()
uid_amount = math.floor(uid_amount['uid'].mean())
per_uid_distribution = per_uid_distribution.groupby(['id']).count().reset_index()[['id', 'uid']]
sumation = per_uid_distribution['uid'].sum()
per_uid_distribution['uid'] = per_uid_distribution['uid'].map(lambda r: r / sumation).to_frame()
per_uid_distribution.columns = ['amount', 'probability']
return {
'amounts': per_uid_distribution['amount'].values,
'probabilities': per_uid_distribution['probability'].values,
'count': uid_amount
}
| 5,339 | 35.326531 | 182 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/tool/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/shapefile.py
|
import smaframework.analyzer.bucketwalk.memory as BucketWalk
from pyproj import Proj, transform
from haversine import haversine
import fiona, os, re
import uuid as IdGenerator
import pandas as pd
import numpy as np
import multiprocessing as mp
import json
def get_route(params):
feature, stops, inProj, outProj, kwargs = params
error = 0.05
route = []
for segment in feature['geometry']['coordinates']:
if any(isinstance(i, tuple) for i in segment):
for p in segment:
point = tuple(reversed(transform(inProj, outProj, p[0], p[1])))
route.append(point)
else:
point = tuple(reversed(transform(inProj, outProj, segment[0], segment[1])))
route.append(point)
if 'index' in kwargs.keys():
index = kwargs['index']
else:
index = BucketWalk.in_memory([stop['location'] for stop in stops])
line_stops = []
route = clean_route(route)
for p in route:
i, distance = BucketWalk.closest(index, p)
if distance < error:
line_stops.append(stops[i])
seen = []
remove = []
for i, s in enumerate(line_stops):
id_prop = 'stop_id' if 'stop_id' in s['properties'].keys() else 'station_id'
if s['properties'][id_prop] in seen:
remove.append(i)
else:
seen.append(s['properties'][id_prop])
for r in sorted(remove, reverse=True):
del line_stops[r]
return {
"path": route,
"stops": line_stops,
"properties": feature['properties']
}
def clean_route(route):
cleaned = []
for i in range(0, len(route)):
if i > 0 and route[i][0] == route[i-1][0] and route[i][1] == route[i-1][1]:
continue
cleaned.append(route[i])
if i > 0 and route[0][0] == route[len(cleaned) - 1][0] and route[0][1] == route[len(cleaned) - 1][1]:
del cleaned[len(cleaned) - 1]
return cleaned
def transit_routes(routes_filepath, stops_filepath, layer, inproj='epsg:2263', outproj='epsg:4326', **kwargs):
inProj = Proj(init=inproj, preserve_units = True)
outProj = Proj(init=outproj)
stops = [{
"location": tuple(reversed(transform(inProj, outProj, f['geometry']['coordinates'][0], f['geometry']['coordinates'][1]))),
"properties": f['properties']
} for f in fiona.open(stops_filepath)]
index = BucketWalk.in_memory([stop['location'] for stop in stops])
if 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1:
pool = mp.Pool(int(kwargs['pool_size']))
routes = pool.map(get_route, [(feature, stops, inProj, outProj, {"index": index}) for feature in fiona.open(routes_filepath)])
pool.close()
pool.join()
else:
routes = [get_route((feature, stops, inProj, outProj, {"index": index})) for feature in fiona.open(routes_filepath)]
if 'uid_property' not in kwargs.keys():
kwargs['uid_property'] = 'route_shor'
if 'geoid_property' not in kwargs.keys():
kwargs['geoid_property'] = 'GEOID'
df = []
for route in routes:
i = 0
for stop in route['stops']:
df.append([IdGenerator.uuid4().hex, route['properties'][kwargs['uid_property']], i, stop['location'][0], stop['location'][1], layer, stop['properties'][kwargs['geoid_property']]])
i = i + 1
df = pd.DataFrame.from_records(df, columns=['id', 'uid', 'timestamp', 'lat', 'lon', 'layer', 'geoid'])
if 'filename' not in kwargs.keys():
kwargs['filename'] = 'data/entries/%s.csv' % layer
if not os.path.exists('data/entries/'):
os.makedirs('data/entries/')
if 'batch_size' in kwargs.keys():
for g, frame in df.groupby(np.arange(len(df)) // kwargs['batch_size']):
frame.to_csv(re.sub(r'\.(.*)$', str(g) + r'.\1', kwargs['filename']), index=False)
else:
df.to_csv(kwargs['filename'], index=False)
return routes
| 3,961 | 33.452174 | 191 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/twitterstream.py
|
import tweepy, time, datetime, logging, os
from tweepy import OAuthHandler
import pandas as pd
import pyproj
import shapely
import shapely.ops as ops
from shapely.geometry.polygon import Polygon as ShapelyPolygon
from functools import partial
"""
Twitter data extarctor. The available data collected by tweepy in the current version of the API is:
[
'_api',
'_json',
'created_at',
'id',
'id_str',
'text',
'truncated',
'entities',
'metadata',
'source',
'source_url',
'in_reply_to_status_id',
'in_reply_to_status_id_str',
'in_reply_to_user_id',
'in_reply_to_user_id_str',
'in_reply_to_screen_name',
'author',
'user',
'geo',
'coordinates',
'place',
'contributors',
'retweeted_status',
'is_quote_status',
'retweet_count',
'favorite_count',
'favorited',
'retweeted',
'lang'
]
"""
def extract(access, geocode, layer='twitter', **kwargs):
if 'consumer_key' in access.keys() and 'consumer_secret' in access.keys() and 'access_token' in access.keys() and 'access_secret' in access.keys():
auth = OAuthHandler(access['consumer_key'], access['consumer_secret'])
auth.set_access_token(access['access_token'], access['access_secret'])
if 'items_per_request' not in kwargs.keys():
kwargs['items_per_request'] = 100
if 'wait' not in kwargs.keys():
kwargs['wait'] = 60
if 'max_area' not in kwargs.keys():
kwargs['max_area'] = 200
api = tweepy.API(auth)
count = 0
total = 0
data = []
while True:
try:
collection = tweepy.Cursor(api.search, geocode=geocode).items(kwargs['items_per_request'])
for status in collection:
if status.coordinates:
data.append((status.user.screen_name + ':' + str(status.user.id), int(time.mktime(status.created_at.timetuple())), status.coordinates['coordinates'][1], status.coordinates['coordinates'][0], layer))
total = total + 1
elif status.place:
polygon = ShapelyPolygon(status.place.bounding_box.coordinates[0])
geom_area = ops.transform(
partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(
proj='aea',
lat1=polygon.bounds[1],
lat2=polygon.bounds[3])),
polygon)
if geom_area.area < kwargs['max_area']:
point = polygon.representative_point().xy
data.append((status.user.screen_name + ':' + str(status.user.id), int(time.mktime(status.created_at.timetuple())), point[1][0], point[0][0], layer))
# except tweepy.TweepError as e:
except:
logpath = 'data/logs/'
if not os.path.exists(logpath):
os.makedirs(logpath)
filename = datetime.datetime.fromtimestamp(time.time()).strftime(logpath + 'error_%Y%m%d.log')
logging.basicConfig(filename=filename, filemode='a+')
logging.exception("message")
if 'force' in kwargs.keys() and kwargs['force']:
time.sleep(5 * kwargs['wait'])
continue
if len(data) > 0:
frame = pd.DataFrame(data)
frame.columns = ['uid', 'timestamp', 'lat', 'lon', 'layer']
data = []
if 'filename' not in kwargs.keys():
kwargs['filename'] = 'data/entries/twitter.csv'
if not os.path.exists('data/entries/'):
os.makedirs('data/entries/')
header = not os.path.exists(kwargs['filename'])
with open(kwargs['filename'], 'a+') as file:
frame.to_csv(file, header=header, index=False)
count = count + 1
if 'limit' in kwargs.keys() and count >= kwargs['limit']:
break
time.sleep(kwargs['wait'])
| 4,250 | 34.425 | 218 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/openweathermap.py
|
import urllib, json, uuid, os, datetime, time, logging, random, math, traceback
import pandas as pd
from shapely.geometry import Point
def extract(access, region, layer='openweathermap', **kwargs):
if 'samples' not in kwargs.keys():
kwargs['samples'] = 3
if 'wait' not in kwargs.keys():
kwargs['wait'] = 3600
if 'api_version' not in kwargs.keys():
kwargs['api_version'] = '2.5'
if 'force' not in kwargs.keys():
kwargs['force'] = True
count = 0
minx, miny, maxx, maxy = region.bounds
while True:
try:
point = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
while not region.contains(point):
point = Point(random.uniform(minx, maxx), random.uniform(miny, maxy))
point = point.xy
content = urllib.request.urlopen("http://api.openweathermap.org/data/%s/weather?lat=%f&lon=%f&appid=%s" % (kwargs['api_version'], point[1][0], point[0][0], access)).read().decode('utf-8')
d = json.loads(content)
if 'filename' not in kwargs.keys():
kwargs['filename'] = 'data/entries/openweathermap.csv'
if not os.path.exists('data/entries/'):
os.makedirs('data/entries/')
header = not os.path.exists(kwargs['filename'])
with open(kwargs['filename'], 'a+') as file:
if header:
file.write(",".join([
'uid',
'timestamp',
'lat',
'lon',
'layer',
'weather',
'base',
'temperature',
'pressure',
'humidity',
'temp_min',
'temp_max',
'sea_level',
'grnd_level',
'wind_speed',
'wind_deg',
'cloudiness',
'sunrise',
'sunset',
'city_name'
]) + "\n")
name = None
if 'district' in kwargs.keys():
name = kwargs['district'].encode('utf-8')
elif 'name' in d.keys():
name = d['name'].encode('utf-8')
data = (
'openweathermap_%s' % uuid.uuid4().hex,
d['dt'] or math.floor(time.time()),
d['coord']['lat'] if 'coord' in d.keys() and 'lat' in d['coord'].keys() else None,
d['coord']['lon'] if 'coord' in d.keys() and 'lon' in d['coord'].keys() else None,
layer,
d['weather'][0]['description'] if 'weather' in d.keys() and len(d['weather']) > 0 else None,
d['base'] if 'base' in d.keys() else None,
d['main']['temp'] if 'main' in d.keys() and 'temp' in d['main'].keys() else None,
d['main']['pressure'] if 'main' in d.keys() and 'pressure' in d['main'].keys() else None,
d['main']['humidity'] if 'main' in d.keys() and 'humidity' in d['main'].keys() else None,
d['main']['temp_min'] if 'main' in d.keys() and 'temp_min' in d['main'].keys() else None,
d['main']['temp_max'] if 'main' in d.keys() and 'temp_max' in d['main'].keys() else None,
d['main']['sea_level'] if 'main' in d.keys() and 'sea_level' in d['main'].keys() else None,
d['main']['grnd_level'] if 'main' in d.keys() and 'grnd_level' in d['main'].keys() else None,
d['wind']['speed'] if 'wind' in d.keys() and 'speed' in d['wind'].keys() else None,
d['wind']['deg'] if 'wind' in d.keys() and 'deg' in d['wind'].keys() else None,
d['clouds']['all'] if 'clouds' in d.keys() and 'all' in d['clouds'].keys() else None,
d['sys']['sunrise'] if 'sys' in d.keys() and 'sunrise' in d['sys'].keys() else None,
d['sys']['sunset'] if 'sys' in d.keys() and 'sunset' in d['sys'].keys() else None,
name
)
file.write(",".join(map(lambda n: str(n), data)) + "\n")
count = count + 1
if 'limit' in kwargs.keys() and count >= kwargs['limit']:
break
time.sleep(math.flor(kwargs['wait'] / kwargs['samples']))
except:
if 'force' in kwargs.keys() and kwargs['force']:
logpath = 'data/logs/'
if not os.path.exists(logpath):
os.makedirs(logpath)
filename = datetime.datetime.fromtimestamp(time.time()).strftime(logpath + 'error_%Y%m%d.log')
logging.basicConfig(filename=filename, filemode='a+')
logging.exception("message")
time.sleep(kwargs['wait'])
continue
else:
traceback.print_exc()
return
| 5,191 | 44.946903 | 199 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/uber.py
|
from smaframework.tool.constants import miles2km
import urllib.request
import json, math
'''
* Estimate the duration and cost of a list of trips. Response provided using meters for distance, seconds for time and avarage cost for price.
*
* @param token - The Uber API token to be used in the request
* @param departures - The position (lat, lon) of departure of the trip
* @param arrivals - The position (lat, lon) of arrival of the trip
* @param seat_counts - The amount of travelers
* @param modality - The Uber modality (e.g. uberX, uberXL, POOL)
'''
def extract(token, departures, arrivals, seat_counts=None, modality='uberX', **kwargs):
if seat_counts == None:
seat_counts = [1] * len(departures)
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
trips = pool.map(_function_capsule, [(estimate, token, departures[i], arrivals[i], seat_counts[i], modality) for i in range(0, len(departures))])
pool.close()
pool.join()
else:
trips = []
for i in range(0, len(departures)):
trips.append(estimate(token, departures[i], arrivals[i], seat_counts[i], modality))
return trips
def _function_capsule(params):
fn, token, departure, arrival, seat_count, modality = params
return fn(token, departure, arrival, seat_count, modality)
'''
* Estimate the duration and cost of a trip. Response provided using meters for distance, seconds for time and avarage cost for price.
*
* @param token - The Uber API token to be used in the request
* @param departure - The position (lat, lon) of departure of the trip
* @param arrival - The position (lat, lon) of arrival of the trip
* @param seat_count - The amount of travelers
* @param modality - The Uber modality (e.g. uberX, uberXL, POOL)
'''
def estimate(token, departure, arrival, seat_count=1, modality='uberX'):
estimates = get_url(token, 'https://api.uber.com/v1.2/estimates/price?', {
"start_latitude": departure[0],
"start_longitude": departure[1],
"end_latitude": arrival[0],
"end_longitude": arrival[1],
"seat_count": seat_count,
})
trip = {}
for estimate in estimates['prices']:
if estimate['display_name'] == modality:
trip = {
"distance": math.ceil(estimate['distance'] * miles2km * 1000),
"duration": estimate['duration'],
"currency": estimate['currency_code'],
"price": math.ceil((estimate['low_estimate'] + estimate['high_estimate']) / 2)
}
break
waits = get_url(token, 'https://api.uber.com/v1.2/estimates/time?', {
"start_latitude": departure[0],
"start_longitude": departure[1]
})
for estimate in waits['times']:
if estimate['display_name'] == modality:
trip['wait'] = estimate['estimate']
break
trip['origin'] = departure
trip['destination'] = arrival
trip['phase'] = 'headway'
return trip
def get_url(token, url, params):
request = urllib.request.Request(url + urllib.parse.urlencode(params))
request.add_header('Authorization', 'Token %s' % token)
request.add_header('Accept-Language', 'en_US')
request.add_header('Content-Type', 'application/json')
response = urllib.request.urlopen(request).read().decode("utf-8")
return json.loads(response)
| 3,506 | 39.310345 | 153 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/csv.py
|
import os
import multiprocessing as mp
import numpy as np
import pandas as pd
import time
import datetime as dt
import uuid as IdGenerator
def extract_file(args):
file, source, dest, nodes, layer, config = args
filename = os.path.join(source, file)
df = pd.read_csv(filename, header=0)
if 'id' not in df.columns:
df['id'] = df.index.map(lambda r: int(r))
df = df[list(set().union(*nodes))]
cdf = pd.DataFrame()
for node in nodes:
tdf = df[node]
tdf.columns = ['uid', 'timestamp', 'lat', 'lon']
cdf = pd.concat([cdf, tdf])
df = cdf
df['layer'] = layer
if 'datetime_format' in config.keys() and config['datetime_format'] != '%u':
timezone = time.strftime("%z", time.gmtime())
timezone = int(timezone.replace('+', '')) / 100 * 60 * 60
df['timestamp'] = df['timestamp'].map(lambda r: int(time.mktime(dt.datetime.strptime(r, config['datetime_format']).timetuple())) + timezone)
else:
df['timestamp'] = df['timestamp'].map(lambda r: int(r))
if 'max_lat' in config.keys() and 'min_lat' in config.keys():
df = df[(df.lat <= config['max_lat']) & (df.lat >= config['min_lat'])]
if 'max_lon' in config.keys() and 'min_lon' in config.keys():
df = df[(df.lon <= config['max_lon']) & (df.lon >= config['min_lon'])]
if 'max_timestamp' in config.keys() and 'min_timestamp' in config.keys():
df = df[(df.timestamp <= config['max_timestamp']) & (df.timestamp >= config['min_timestamp'])]
file_id = IdGenerator.uuid4().hex
pd.options.mode.chained_assignment = None
df['uid'] = df['uid'].map(lambda x: str(x) + file_id)
pd.options.mode.chained_assignment = 'warn'
if not os.path.exists(dest):
os.makedirs(dest)
df.to_csv(os.path.join(dest, file), index=False)
def extract(source, dest, layer, nodes, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
filelist = os.listdir(source)
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
pool.map(extract_file, [(file, source, dest, nodes, layer, kwargs) for file in filelist if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file)])
pool.close()
pool.join()
else:
for file in filelist:
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
extract_file((file, source, dest, nodes, layer, kwargs))
| 2,523 | 36.117647 | 170 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/google/transit.py
|
import os
import numpy as np
import pandas as pd
import datetime as dt
import uuid as IdGenerator
import multiprocessing as mp
import urllib
import json
import time
def extract_url(params):
(app_key, departure, arrival, date, mode, kwargs) = params
kwargs["origin"] = '%f,%f' % departure
kwargs["destination"] = '%f,%f' % arrival
kwargs["key"] = app_key
kwargs["mode"] = mode
kwargs["departure_time"] = '%d' % time.mktime(date.timetuple())
url = 'https://maps.googleapis.com/maps/api/directions/json?' + urllib.parse.urlencode(kwargs)
response = urllib.request.urlopen(url).read()
response = json.loads(response.decode("utf-8"))
routes = []
for route in response['routes']:
r = []
for leg in route['legs']:
for step in leg['steps']:
r.append({
"travel_mode": step['travel_mode'],
"duration": step['duration']['value'], # seconds
"origin": (step['start_location']['lat'], step['start_location']['lng']),
"destination": (step['end_location']['lat'], step['end_location']['lng']),
"distance": step['distance']['value'], # meters
"vehicle_type": None if step['travel_mode'] != 'TRANSIT' else step['transit_details']['line']['vehicle']['type']
})
routes.append(r)
return routes
def extract(app_key, departures, arrivals, dates, mode, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
del kwargs['pool_size']
pool = mp.Pool(pool_size)
trips = pool.map(extract_url, [(app_key, departures[i], arrivals[i], dates[i], mode, kwargs) for i in range(0, len(departures))])
pool.close()
pool.join()
else:
trips = []
for i in range(0, len(departures)):
trips.append(extract_url((app_key, departures[i], arrivals[i], dates[i], mode, kwargs)))
return trips
# def get_lines(trips, **kwargs):
| 2,124 | 33.836066 | 137 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/google/directions.py
|
import numpy as np
import pandas as pd
import datetime as dt
import uuid as IdGenerator
import multiprocessing as mp
import os, urllib, json, time, re
from smaframework.common.address_keywords_extension_map import address_keywords_extensions
from smaframework.common.address_keywords_extension_map import parse_str as parse_address_str
'''
* Obtain the suggested routes for a given departure, arival, date and mode. The params are given as a single tuple.
*
* @param app_key - The Google API key to perform the request.
* @param departure - The location (lat, lon) of departure.
* @param arrival - The location (lat, lon) of arrival.
* @param date - The departure time.
* @param mode - The travel mode (e.g., TRANSIT, WALKING, DRIVING).
* @param kwargs - Other optional params as a dict.
'''
def extract_url(params):
(app_key, departure, arrival, date, mode, prices, kwargs) = params
kwargs["origin"] = '%f,%f' % tuple(departure)
kwargs["destination"] = '%f,%f' % tuple(arrival)
kwargs["key"] = app_key
kwargs["mode"] = mode
kwargs["alternatives"] = "true"
kwargs["departure_time"] = str(date) if isinstance(date, int) else '%d' % time.mktime(date.timetuple())
kwargs["units"] = 'metric' if "units" not in kwargs.keys() else kwargs["units"]
url = 'https://maps.googleapis.com/maps/api/directions/json?' + urllib.parse.urlencode(kwargs)
response = urllib.request.urlopen(url).read().decode("utf-8")
response = json.loads(response)
if response['status'] == 'OVER_QUERY_LIMIT':
raise ValueError('Google says: You have exceeded your daily request quota for this API.')
routes = []
for route in response['routes']:
r = []
for leg in route['legs']:
arrival_time = int(kwargs["departure_time"])
for i, step in enumerate(leg['steps']):
step = parse_step(step, prices, arrival_time)
step['phase'] = 'headway' if mode != 'transit' or step['travel_mode'] == 'TRANSIT' else ('egress' if i == len(leg['steps']) - 1 else 'access')
if step['phase'] == 'access':
step['next_mode'] = leg['steps'][i+1]['transit_details']['line']['vehicle']['type']
arrival_time = arrival_time + step['duration'] + step['wait']
r.append(step)
routes.append(r)
return routes
'''
* Obtain the suggested routes for a given departure, arival, date and mode. The params are given as a single tuple.
*
* @param app_key - The Google API key to perform the request.
* @param departure - The location (lat, lon) of departure.
* @param arrival - The location (lat, lon) of arrival.
* @param date - The departure time.
* @param mode - The travel mode (e.g., TRANSIT, WALKING, DRIVING).
* @param kwargs - Other optional params as a dict.
'''
def extract_single(app_key, departure, arrival, date, mode, prices, **kwargs):
return extract_url((app_key, departure, arrival, date, mode, prices, kwargs))
'''
* Parses a step in a Google Route to collect relevant data.
*
* @param step - the object representing the step.
'''
def parse_step(step, prices={}, arrival_time=0):
address_keywords = []
if step['travel_mode'] == 'DRIVING':
matches = re.findall(r"<b>(.*?)</b>", step['html_instructions'])
if len(matches) == 0:
address_keywords = []
else:
address_keywords = []
for m in matches:
if len(m.split()) > 1:
address_keywords.extend(parse_address_str(m))
vehicle_type = None if step['travel_mode'] != 'TRANSIT' else step['transit_details']['line']['vehicle']['type']
wait = step['transit_details']['departure_time']['value'] - arrival_time if step['travel_mode'] == 'TRANSIT' else 0
return {
"travel_mode": step['travel_mode'],
"duration": step['duration']['value'], # seconds
"wait": wait, # seconds
"origin": (step['start_location']['lat'], step['start_location']['lng']),
"destination": (step['end_location']['lat'], step['end_location']['lng']),
"distance": step['distance']['value'], # meters
"vehicle_type": vehicle_type,
"address_keywords": address_keywords,
"price": prices[step['travel_mode']] if step['travel_mode'] in prices.keys() else 0
}
'''
* Get the Google suggested routes for a list of departures, arrivals and dates.
*
* @param app_key - The Google API key to perform the request.
* @param departures - The list of locations (lat, lon) of departure.
* @param arrivals - The list of locations (lat, lon) of arrival.
* @param dates - The list of departure times.
* @param mode - The travel mode (e.g., TRANSIT, WALKING, DRIVING).
* @param kwargs - Other optional params as a dict.
'''
def extract(app_key, departures, arrivals, dates, mode, prices, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
del kwargs['pool_size']
pool = mp.Pool(pool_size)
trips = pool.map(extract_url, [(app_key, departures[i], arrivals[i], dates[i], mode, prices, kwargs) for i in range(0, len(departures))])
pool.close()
pool.join()
else:
trips = []
for i in range(0, len(departures)):
trips.append(extract_url((app_key, departures[i], arrivals[i], dates[i], mode, prices, kwargs)))
return trips
| 5,497 | 42.984 | 158 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/google/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/here/transit.py
|
import os
import numpy as np
import pandas as pd
import time
import datetime as dt
import uuid as IdGenerator
import multiprocessing as mp
import urllib.request
import json
def extract_url(params):
(app_id, app_code, departure, arrival, date, modes_str) = params
query = {
"app_id": app_id,
"app_code": app_code,
"dep": '%f,%f' % departure,
"arr": '%f,%f' % arrival,
"time": "%s" % date.isoformat(),
"routing": modes_str
}
url = 'https://transit.cit.api.here.com/v3/route.json?' + urllib.parse.urlencode(query)
response = urllib.request.urlopen(url).read().decode("utf-8")
print(response)
response = json.loads(response)
def extract(app_id, app_code, departures, arrivals, dates, modes_str, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
pool.map(extract_url, [(app_id, app_code, departures[i], arrivals[i], dates[i], modes_str) for i in range(0, len(departures))])
pool.close()
pool.join()
else:
for i in range(0, len(departures)):
extract_url((app_id, app_code, departures[i], arrivals[i], dates[i], modes_str))
| 1,289 | 30.463415 | 135 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/here/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/here/traffic.py
|
from smaframework.common.env import env
import urllib.request
import requests
import json
import math
APP_KEY = env('HERE_APP_ID')
APP_CODE = env('HERE_APP_CODE')
'''
* Run the request to collect traffic data.
*
* @param query - the params to be sent in the querystring
'''
def extract_url(query):
url = 'https://traffic.cit.api.here.com/traffic/6.2/flow.json?' + urllib.parse.urlencode(query)
response = urllib.request.urlopen(url).read().decode("utf-8")
return parse_response(json.loads(response))
'''
* Gets traffic data in a corridor.
*
* @param app_id - the APP_ID to be used in the request to HERE API
* @param app_code - the APP_CODE to be used in the request to HERE API
* @param path - a list of coordinates to form the corridor path
* @param width - the width of the corridor
* @param **kwargs - optional query params to be sent in the request
'''
def corridor(app_id, app_code, path, width, **kwargs):
kwargs["app_id"]= app_id
kwargs["app_code"]= app_code
kwargs["corridor"] = ';'.join(['%f,%f' % tuple(position) for position in path]) + (';%d' % width)
return extract_url(kwargs)
'''
* Gets traffic data using the Mercator Projection.
*
* @param app_id - the APP_ID to be used in the request to HERE API.
* @param app_code - the APP_CODE to be used in the request to HERE API.
* @param lat - the latitude of the desired traffic data.
* @param lon - the longitude of the desired traffic data.
* @param zoom - the zoom level of the desired traffic data from 0 to 21, where 0 is the whole earth and 21 is a specific location at building level (default=21).
'''
def lat_lon_zoom(app_id, app_code, lat, lon, zoom=21):
latRad = lat * math.pi / 180;
n = math.pow(2, zoom);
x = n * ((lon + 180) / 360);
y = n * (1-(math.log(math.tan(latRad) + 1/math.cos(latRad)) /math.pi)) / 2;
params = {'app_code': app_code, 'app_id': app_id}
request_url = 'https://traffic.cit.api.here.com/traffic/6.1/flow/json/%d/%d/%d' % (zoom, x, y)
response = requests.get(request_url, params=params)
obj = None
try:
return {
'data': parse_response(json.loads(response.content.decode('utf8'))),
'status_code': response.status_code
}
except Exception as e:
return {
'data': {},
'status_code': response.status_code
}
def point(lat, lon, r=50):
if not _validate_key():
return None
result = {}
prox = str(lat) + ',' + str(lon) + ',' + str(r)
params = {'app_code': APP_CODE, 'app_id': APP_KEY, 'prox': prox}
request_url = 'https://traffic.cit.api.here.com/traffic/6.1/flow.json'
response = requests.get(request_url, params=params)
result['status_code'] = response.status_code
resp_json = json.loads(response.content.decode('utf8'))
if 'error' in resp_json or 'Details' in resp_json:
result['data'] = resp_json
return result
result['data'] = parse_response(resp_json)
return result
def get_multiple_info_list(points, r=50):
result = []
for coords in points:
result.append(point(coords[0], coords[1], r))
return result
def _validate_key():
return APP_KEY and APP_CODE
'''
* Parse the response and keep only relevant data.
*
* @param resp_json - the object received from the call of the HERE API
'''
def parse_response(resp_json):
temp = {}
for i1 in range(0, len(resp_json['RWS'])):
resp_json_rws = resp_json['RWS'][i1]
for i2 in range(0, len(resp_json_rws['RW'])):
resp_json_rw = resp_json_rws['RW'][i2]
for i3 in range(0, len(resp_json_rw['FIS'])):
resp_json_fis = resp_json_rw['FIS'][i3]
for i4 in range(0, len(resp_json_fis)):
resp_json_fi = resp_json_fis['FI'][i4]
resp_json_tmc = resp_json_fi['TMC']
resp_json_cf = resp_json_fi['CF'][0]
aux = {}
aux['DE'] = resp_json_tmc['DE']
aux['QD'] = resp_json_tmc['QD']
aux['JF'] = resp_json_cf['JF']
aux['CN'] = resp_json_cf['CN']
temp[str(resp_json_tmc['PC'])] = aux
return temp
| 4,251 | 31.96124 | 162 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/tomtom/router.py
|
import urllib.request
import xmltodict, json, sys
from urllib.parse import quote
import smaframework.tool.conversor as Conversor
def parse(response):
if 'calculateRouteResponse' not in response.keys() or 'route' not in response['calculateRouteResponse'].keys():
return None
routes = response['calculateRouteResponse']['route']
if not isinstance(routes, list):
routes = [routes]
for route in routes:
points = route['leg']['points']['point']
for p in points:
p['@latitude'] = float(p['@latitude'])
p['@longitude'] = float(p['@longitude'])
pointer = 0
size = len(points)
instruction_indexes = []
for (index, instruction) in enumerate(route['guidance']['instructions']['instruction']):
for i in range(pointer, size):
instruction['routeOffsetInMeters'] = int(instruction['routeOffsetInMeters'])
instruction['travelTimeInSeconds'] = int(instruction['travelTimeInSeconds'])
instruction['point']['@latitude'] = float(instruction['point']['@latitude'])
instruction['point']['@longitude'] = float(instruction['point']['@longitude'])
if instruction['point']['@latitude'] == points[i]['@latitude'] and instruction['point']['@longitude'] == points[i]['@longitude']:
route['guidance']['instructions']['instruction'][index]['point']['@index'] = i
instruction_indexes.append(index)
pointer = i
break
if 'sections' not in route.keys():
route['sections'] = {
'section': []
}
continue
if not isinstance(route['sections']['section'], list):
route['sections']['section'] = [route['sections']['section']]
for (i, section) in enumerate(route['sections']['section']):
if section['sectionType'] != 'TRAFFIC':
continue
section['startPointIndex'] = int(section['startPointIndex'])
section['endPointIndex'] = int(section['endPointIndex'])
section['effectiveSpeed'] = Conversor.kmph2mps(float(section['effectiveSpeedInKmh']))
section['delayInSeconds'] = int(section['delayInSeconds'])
section['magnitudeOfDelay'] = int(section['magnitudeOfDelay'])
section['startPoint'] = points[section['startPointIndex']]
section['endPoint'] = points[section['endPointIndex']]
start = section['startPointIndex']
end = section['endPointIndex']
index = 0
if len(instruction_indexes) > 0:
index = min(instruction_indexes, key=lambda x: abs(x - start))
route['sections']['section'][i]['startInstructionIndex'] = index
if len(instruction_indexes) > 0:
index = min(instruction_indexes, key=lambda x: abs(x - end))
route['sections']['section'][i]['endInstructionIndex'] = index
return routes
def getRoute(origin, destination, key, maxAlternatives=0, parseData=True, log='debug', **kwargs):
'''
* Obtain the suggested routes for a given origin, and destination.
*
* @param origin The Google API key to perform the request.
* @param destination The location (lat, lon) of departure.
* @param key The TomTom API key.
* @param maxAlternatives The maximum amount of route alternatives to be retrieved.
* @param parseData Wether to parse the data according to a convenient format or return it raw.
* @param kwargs Other optional params as a dict.
* - travelMode (default: car; options: car, truck, taxi, bus, van, motorcycle, bicycle, pedestrian)
'''
config = {
'travelMode': 'car',
}
config.update(kwargs)
query = '%f,%f' % origin + ':%f,%f' % destination
query = quote(query)
url = 'https://api.tomtom.com/routing/1/calculateRoute/%s?key=%s§ionType=traffic&instructionsType=coded&maxAlternatives=%d&traffic=true&travelMode=%s' % (query, key, maxAlternatives, config['travelMode'])
response = ''
try:
response = urllib.request.urlopen(url).read().decode("utf-8")
response = xmltodict.parse(response)
if log == 'debug':
print('DEBUG TOMTOM: %s' % json.dumps(response))
except urllib.error.HTTPError as e:
error = xmltodict.parse(e.read())
print('ERROR TOMTOM URL: %s' % url, file=sys.stderr)
print('ERROR TOMTOM: %s' % error, file=sys.stderr)
return []
if 'error' in response['calculateRouteResponse'].keys():
return []
if parseData:
return parse(response)
return response
| 4,827 | 40.264957 | 212 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/extractor/tomtom/__init__.py
| 0 | 0 | 0 |
py
|
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/organizer/magify.py
|
import os
import multiprocessing as mp
import pandas as pd
import uuid as IdGenerator
def organize_file(filename, edge_type, config):
mag_location = 'data/mag/'
nodes_location = 'data/mag/nodes/'
edges_location = 'data/mag/edges/'
if not os.path.exists(mag_location):
os.makedirs(mag_location)
if not os.path.exists(nodes_location):
os.makedirs(nodes_location)
if not os.path.exists(edges_location):
os.makedirs(edges_location)
# read checkpoints
df = pd.read_csv(filename, header=0)
if 'columns' not in config.keys():
config['columns'] = ['id', 'uid', 'timestamp', 'lat', 'lon', 'layer']
# add ids to nodes
iddf = df.apply(lambda x: IdGenerator.uuid4().hex, axis = 1)
df['id'] = iddf
df = df[config['columns']]
# save nodes to disk
if len(df.index):
df.to_csv(nodes_location + IdGenerator.uuid4().hex + '.csv', index=False)
else:
return True
# organize and filter nodes to create edges
if pd.__version__ >= '0.17.0':
df.sort_values(by=['uid', 'timestamp'], ascending=[1,1], inplace=True)
else:
df.sort(['uid', 'timestamp'], ascending=[1,1], inplace=True)
df.reset_index(drop=True, inplace=True)
df = df[['id', 'uid']]
# match nodes to create edges
df2 = df.shift(-1)
df2.columns = ['id2','uid2']
df = pd.concat([df, df2], axis=1)
df = df[df.uid == df.uid2]
# fillter nodes data
df = df[['id', 'id2']]
df.columns = ['source', 'target']
# add missing data to edges
iddf = df.apply(lambda x: IdGenerator.uuid4().hex, axis = 1)
df['id'] = iddf
df['type'] = edge_type
# save edges to disk
if len(df.index):
df.to_csv(edges_location + IdGenerator.uuid4().hex + '.csv', index=False)
return True
def organize(path, edge_type, **kwargs):
multiprocess = 'pool_size' in kwargs.keys() and int(kwargs['pool_size']) > 1
if multiprocess:
pool_size = int(kwargs['pool_size'])
pool = mp.Pool(pool_size)
filelist = os.listdir(path)
for file in filelist:
if 'file_regex' not in kwargs.keys() or kwargs['file_regex'].match(file):
if multiprocess:
pool.apply_async(organize_file, args=(os.path.join(path, file), edge_type, kwargs))
else:
organize_file(os.path.join(path, file), edge_type, kwargs)
if multiprocess:
pool.close()
pool.join()
| 2,469 | 29.875 | 99 |
py
|
hybrid-urban-routing-tutorial-sbrc
|
hybrid-urban-routing-tutorial-sbrc-master/smaframework/organizer/__init__.py
| 0 | 0 | 0 |
py
|
|
ERD
|
ERD-main/setup.py
|
#!/usr/bin/env python
# Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import platform
import shutil
import sys
import warnings
from setuptools import find_packages, setup
import torch
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def make_cuda_ext(name, module, sources, sources_cuda=[]):
define_macros = []
extra_compile_args = {'cxx': []}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print(f'Compiling {name} without CUDA')
extension = CppExtension
return extension(
name=f'{module}.{name}',
sources=[os.path.join(*module.split('.'), p) for p in sources],
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extension():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extension()
setup(
name='mmdet',
version=get_version(),
description='OpenMMLab Detection Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection Contributors',
author_email='[email protected]',
keywords='computer vision, object detection',
url='https://github.com/open-mmlab/mmdetection',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
'mim': parse_requirements('requirements/mminstall.txt'),
},
ext_modules=[],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 7,887 | 34.692308 | 125 |
py
|
ERD
|
ERD-main/tools/test.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
from copy import deepcopy
from mmengine import ConfigDict
from mmengine.config import Config, DictAction
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.evaluation import DumpDetResults
from mmdet.registry import RUNNERS
from mmdet.utils import setup_cache_size_limit_of_dynamo
# TODO: support fuse_conv_bn and format_only
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--show', action='store_true', help='show prediction results')
parser.add_argument(
'--show-dir',
help='directory where painted images will be saved. '
'If specified, it will be automatically saved '
'to the work_dir/timestamp/show_dir')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--tta', action='store_true')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# testing speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
if args.tta:
if 'tta_model' not in cfg:
warnings.warn('Cannot find ``tta_model`` in config, '
'we will set it as default.')
cfg.tta_model = dict(
type='DetTTAModel',
tta_cfg=dict(
nms=dict(type='nms', iou_threshold=0.5), max_per_img=100))
if 'tta_pipeline' not in cfg:
warnings.warn('Cannot find ``tta_pipeline`` in config, '
'we will set it as default.')
test_data_cfg = cfg.test_dataloader.dataset
while 'dataset' in test_data_cfg:
test_data_cfg = test_data_cfg['dataset']
cfg.tta_pipeline = deepcopy(test_data_cfg.pipeline)
flip_tta = dict(
type='TestTimeAug',
transforms=[
[
dict(type='RandomFlip', prob=1.),
dict(type='RandomFlip', prob=0.)
],
[
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape',
'img_shape', 'scale_factor', 'flip',
'flip_direction'))
],
])
cfg.tta_pipeline[-1] = flip_tta
cfg.model = ConfigDict(**cfg.tta_model, module=cfg.model)
cfg.test_dataloader.dataset.pipeline = cfg.tta_pipeline
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpDetResults(out_file_path=args.out))
# start testing
runner.test()
if __name__ == '__main__':
main()
| 5,594 | 36.3 | 79 |
py
|
ERD
|
ERD-main/tools/train.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import logging
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.logging import print_log
from mmengine.registry import RUNNERS
from mmengine.runner import Runner
from mmdet.utils import setup_cache_size_limit_of_dynamo
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--amp',
action='store_true',
default=False,
help='enable automatic-mixed-precision training')
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
parser.add_argument(
'--resume',
nargs='?',
type=str,
const='auto',
help='If specify checkpoint path, resume from it, while if not '
'specify, try to auto resume from the latest checkpoint '
'in the work directory.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
# Reduce the number of repeated compilations and improve
# training speed.
setup_cache_size_limit_of_dynamo()
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
# enable automatic-mixed-precision training
if args.amp is True:
optim_wrapper = cfg.optim_wrapper.type
if optim_wrapper == 'AmpOptimWrapper':
print_log(
'AMP training is already enabled in your config.',
logger='current',
level=logging.WARNING)
else:
assert optim_wrapper == 'OptimWrapper', (
'`--amp` is only supported when the optimizer wrapper type is '
f'`OptimWrapper` but got {optim_wrapper}.')
cfg.optim_wrapper.type = 'AmpOptimWrapper'
cfg.optim_wrapper.loss_scale = 'dynamic'
# enable automatically scaling LR
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
raise RuntimeError('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file.')
# resume is determined in this priority: resume from > auto_resume
if args.resume == 'auto':
cfg.resume = True
cfg.load_from = None
elif args.resume is not None:
cfg.resume = True
cfg.load_from = args.resume
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# start training
runner.train()
if __name__ == '__main__':
main()
| 4,770 | 34.604478 | 79 |
py
|
ERD
|
ERD-main/tools/deployment/test_torchserver.py
|
import os
from argparse import ArgumentParser
import mmcv
import requests
import torch
from mmengine.structures import InstanceData
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.structures import DetDataSample
def parse_args():
parser = ArgumentParser()
parser.add_argument('img', help='Image file')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument('model_name', help='The model name in the server')
parser.add_argument(
'--inference-addr',
default='127.0.0.1:8080',
help='Address and port of the inference server')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--score-thr', type=float, default=0.5, help='bbox score threshold')
parser.add_argument(
'--work-dir',
type=str,
default=None,
help='output directory to save drawn results.')
args = parser.parse_args()
return args
def align_ts_output(inputs, metainfo, device):
bboxes = []
labels = []
scores = []
for i, pred in enumerate(inputs):
bboxes.append(pred['bbox'])
labels.append(pred['class_label'])
scores.append(pred['score'])
pred_instances = InstanceData(metainfo=metainfo)
pred_instances.bboxes = torch.tensor(
bboxes, dtype=torch.float32, device=device)
pred_instances.labels = torch.tensor(
labels, dtype=torch.int64, device=device)
pred_instances.scores = torch.tensor(
scores, dtype=torch.float32, device=device)
ts_data_sample = DetDataSample(pred_instances=pred_instances)
return ts_data_sample
def main(args):
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
# test a single image
pytorch_results = inference_detector(model, args.img)
keep = pytorch_results.pred_instances.scores >= args.score_thr
pytorch_results.pred_instances = pytorch_results.pred_instances[keep]
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
# the dataset_meta is loaded from the checkpoint and
# then pass to the model in init_detector
visualizer.dataset_meta = model.dataset_meta
# show the results
img = mmcv.imread(args.img)
img = mmcv.imconvert(img, 'bgr', 'rgb')
pt_out_file = None
ts_out_file = None
if args.work_dir is not None:
os.makedirs(args.work_dir, exist_ok=True)
pt_out_file = os.path.join(args.work_dir, 'pytorch_result.png')
ts_out_file = os.path.join(args.work_dir, 'torchserve_result.png')
visualizer.add_datasample(
'pytorch_result',
img.copy(),
data_sample=pytorch_results,
draw_gt=False,
out_file=pt_out_file,
show=True,
wait_time=0)
url = 'http://' + args.inference_addr + '/predictions/' + args.model_name
with open(args.img, 'rb') as image:
response = requests.post(url, image)
metainfo = pytorch_results.pred_instances.metainfo
ts_results = align_ts_output(response.json(), metainfo, args.device)
visualizer.add_datasample(
'torchserve_result',
img,
data_sample=ts_results,
draw_gt=False,
out_file=ts_out_file,
show=True,
wait_time=0)
assert torch.allclose(pytorch_results.pred_instances.bboxes,
ts_results.pred_instances.bboxes)
assert torch.allclose(pytorch_results.pred_instances.labels,
ts_results.pred_instances.labels)
assert torch.allclose(pytorch_results.pred_instances.scores,
ts_results.pred_instances.scores)
if __name__ == '__main__':
args = parse_args()
main(args)
| 3,906 | 33.27193 | 77 |
py
|
ERD
|
ERD-main/tools/deployment/mmdet2torchserve.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
from mmengine.config import Config
from mmengine.utils import mkdir_or_exist
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mkdir_or_exist(output_folder)
config = Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,748 | 32.473214 | 78 |
py
|
ERD
|
ERD-main/tools/deployment/mmdet_handler.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import numpy as np
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for data_sample in data:
pred_instances = data_sample.pred_instances
bboxes = pred_instances.bboxes.cpu().numpy().astype(
np.float32).tolist()
labels = pred_instances.labels.cpu().numpy().astype(
np.int32).tolist()
scores = pred_instances.scores.cpu().numpy().astype(
np.float32).tolist()
preds = []
for idx in range(len(labels)):
cls_score, bbox, cls_label = scores[idx], bboxes[idx], labels[
idx]
if cls_score >= self.threshold:
class_name = self.model.dataset_meta['classes'][cls_label]
result = dict(
class_label=cls_label,
class_name=class_name,
bbox=bbox,
score=cls_score)
preds.append(result)
output.append(preds)
return output
| 2,620 | 34.90411 | 79 |
py
|
ERD
|
ERD-main/tools/misc/get_image_metas.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image metas on a specific dataset.
Here is an example to run this script.
Example:
python tools/misc/get_image_metas.py ${CONFIG} \
--out ${OUTPUT FILE NAME}
"""
import argparse
import csv
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import dump, get
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
default='val',
choices=['train', 'val', 'test'],
help='Collect image metas from which dataset')
parser.add_argument(
'--out',
default='validation-image-metas.pkl',
help='The output image metas file name. The save dir is in the '
'same directory as `dataset.ann_file` path')
parser.add_argument(
'--nproc',
default=4,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_metas_from_csv_style_ann_file(ann_file):
data_infos = []
cp_filename = None
with open(ann_file, 'r') as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
if i == 0:
continue
img_id = line[0]
filename = f'{img_id}.jpg'
if filename != cp_filename:
data_infos.append(dict(filename=filename))
cp_filename = filename
return data_infos
def get_metas_from_txt_style_ann_file(ann_file):
with open(ann_file) as f:
lines = f.readlines()
i = 0
data_infos = []
while i < len(lines):
filename = lines[i].rstrip()
data_infos.append(dict(filename=filename))
skip_lines = int(lines[i + 2]) + 3
i += skip_lines
return data_infos
def get_image_metas(data_info, img_prefix):
filename = data_info.get('filename', None)
if filename is not None:
if img_prefix is not None:
filename = osp.join(img_prefix, filename)
img_bytes = get(filename)
img = mmcv.imfrombytes(img_bytes, flag='color')
shape = img.shape
meta = dict(filename=filename, ori_shape=shape)
else:
raise NotImplementedError('Missing `filename` in data_info')
return meta
def main():
args = parse_args()
assert args.out.endswith('pkl'), 'The output file name must be pkl suffix'
# load config files
cfg = Config.fromfile(args.config)
dataloader_cfg = cfg.get(f'{args.dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
print(f'{"-" * 5} Start Processing {"-" * 5}')
if ann_file.endswith('csv'):
data_infos = get_metas_from_csv_style_ann_file(ann_file)
elif ann_file.endswith('txt'):
data_infos = get_metas_from_txt_style_ann_file(ann_file)
else:
shuffix = ann_file.split('.')[-1]
raise NotImplementedError('File name must be csv or txt suffix but '
f'get {shuffix}')
print(f'Successfully load annotation file from {ann_file}')
print(f'Processing {len(data_infos)} images...')
pool = Pool(args.nproc)
# get image metas with multiple processes
image_metas = pool.starmap(
get_image_metas,
zip(data_infos, [img_prefix for _ in range(len(data_infos))]),
)
pool.close()
# save image metas
root_path = dataloader_cfg.dataset.ann_file.rsplit('/', 1)[0]
save_path = osp.join(root_path, args.out)
dump(image_metas, save_path, protocol=4)
print(f'Image meta file save to: {save_path}')
if __name__ == '__main__':
main()
| 3,935 | 30.238095 | 78 |
py
|
ERD
|
ERD-main/tools/misc/get_crowdhuman_id_hw.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Get image shape on CrowdHuman dataset.
Here is an example to run this script.
Example:
python tools/misc/get_crowdhuman_id_hw.py ${CONFIG} \
--dataset ${DATASET_TYPE}
"""
import argparse
import json
import logging
import os.path as osp
from multiprocessing import Pool
import mmcv
from mmengine.config import Config
from mmengine.fileio import dump, get, get_text
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(description='Collect image metas')
parser.add_argument('config', help='Config file path')
parser.add_argument(
'--dataset',
choices=['train', 'val'],
help='Collect image metas from which dataset')
parser.add_argument(
'--nproc',
default=10,
type=int,
help='Processes used for get image metas')
args = parser.parse_args()
return args
def get_image_metas(anno_str, img_prefix):
id_hw = {}
anno_dict = json.loads(anno_str)
img_path = osp.join(img_prefix, f"{anno_dict['ID']}.jpg")
img_id = anno_dict['ID']
img_bytes = get(img_path)
img = mmcv.imfrombytes(img_bytes, backend='cv2')
id_hw[img_id] = img.shape[:2]
return id_hw
def main():
args = parse_args()
# get ann_file and img_prefix from config files
cfg = Config.fromfile(args.config)
dataset = args.dataset
dataloader_cfg = cfg.get(f'{dataset}_dataloader')
ann_file = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.ann_file)
img_prefix = osp.join(dataloader_cfg.dataset.data_root,
dataloader_cfg.dataset.data_prefix['img'])
# load image metas
print_log(
f'loading CrowdHuman {dataset} annotation...', level=logging.INFO)
anno_strs = get_text(ann_file).strip().split('\n')
pool = Pool(args.nproc)
# get image metas with multiple processes
id_hw_temp = pool.starmap(
get_image_metas,
zip(anno_strs, [img_prefix for _ in range(len(anno_strs))]),
)
pool.close()
# save image metas
id_hw = {}
for sub_dict in id_hw_temp:
id_hw.update(sub_dict)
data_root = osp.dirname(ann_file)
save_path = osp.join(data_root, f'id_hw_{dataset}.json')
print_log(
f'\nsaving "id_hw_{dataset}.json" in "{data_root}"',
level=logging.INFO)
dump(id_hw, save_path, file_format='json')
if __name__ == '__main__':
main()
| 2,492 | 27.329545 | 74 |
py
|
ERD
|
ERD-main/tools/misc/gen_coco_panoptic_test_info.py
|
import argparse
import os.path as osp
from mmengine.fileio import dump, load
def parse_args():
parser = argparse.ArgumentParser(
description='Generate COCO test image information '
'for COCO panoptic segmentation.')
parser.add_argument('data_root', help='Path to COCO annotation directory.')
args = parser.parse_args()
return args
def main():
args = parse_args()
data_root = args.data_root
val_info = load(osp.join(data_root, 'panoptic_val2017.json'))
test_old_info = load(osp.join(data_root, 'image_info_test-dev2017.json'))
# replace categories from image_info_test-dev2017.json
# with categories from panoptic_val2017.json which
# has attribute `isthing`.
test_info = test_old_info
test_info.update({'categories': val_info['categories']})
dump(test_info, osp.join(data_root,
'panoptic_image_info_test-dev2017.json'))
if __name__ == '__main__':
main()
| 968 | 27.5 | 79 |
py
|
ERD
|
ERD-main/tools/misc/download_dataset.py
|
import argparse
import tarfile
from itertools import repeat
from multiprocessing.pool import ThreadPool
from pathlib import Path
from tarfile import TarFile
from zipfile import ZipFile
import torch
from mmengine.utils.path import mkdir_or_exist
def parse_args():
parser = argparse.ArgumentParser(
description='Download datasets for training')
parser.add_argument(
'--dataset-name', type=str, help='dataset name', default='coco2017')
parser.add_argument(
'--save-dir',
type=str,
help='the dir to save dataset',
default='data/coco')
parser.add_argument(
'--unzip',
action='store_true',
help='whether unzip dataset or not, zipped files will be saved')
parser.add_argument(
'--delete',
action='store_true',
help='delete the download zipped files')
parser.add_argument(
'--threads', type=int, help='number of threading', default=4)
args = parser.parse_args()
return args
def download(url, dir, unzip=True, delete=False, threads=1):
def download_one(url, dir):
f = dir / Path(url).name
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and f.suffix in ('.zip', '.tar'):
print(f'Unzipping {f.name}')
if f.suffix == '.zip':
ZipFile(f).extractall(path=dir)
elif f.suffix == '.tar':
TarFile(f).extractall(path=dir)
if delete:
f.unlink()
print(f'Delete {f}')
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_one(*x), zip(url, repeat(dir)))
pool.close()
pool.join()
else:
for u in [url] if isinstance(url, (str, Path)) else url:
download_one(u, dir)
def download_objects365v2(url, dir, unzip=True, delete=False, threads=1):
def download_single(url, dir):
if 'train' in url:
saving_dir = dir / Path('train_zip')
mkdir_or_exist(saving_dir)
f = saving_dir / Path(url).name
unzip_dir = dir / Path('train')
mkdir_or_exist(unzip_dir)
elif 'val' in url:
saving_dir = dir / Path('val')
mkdir_or_exist(saving_dir)
f = saving_dir / Path(url).name
unzip_dir = dir / Path('val')
mkdir_or_exist(unzip_dir)
else:
raise NotImplementedError
if Path(url).is_file():
Path(url).rename(f)
elif not f.exists():
print(f'Downloading {url} to {f}')
torch.hub.download_url_to_file(url, f, progress=True)
if unzip and str(f).endswith('.tar.gz'):
print(f'Unzipping {f.name}')
tar = tarfile.open(f)
tar.extractall(path=unzip_dir)
if delete:
f.unlink()
print(f'Delete {f}')
# process annotations
full_url = []
for _url in url:
if 'zhiyuan_objv2_train.tar.gz' in _url or \
'zhiyuan_objv2_val.json' in _url:
full_url.append(_url)
elif 'train' in _url:
for i in range(51):
full_url.append(f'{_url}patch{i}.tar.gz')
elif 'val/images/v1' in _url:
for i in range(16):
full_url.append(f'{_url}patch{i}.tar.gz')
elif 'val/images/v2' in _url:
for i in range(16, 44):
full_url.append(f'{_url}patch{i}.tar.gz')
else:
raise NotImplementedError
dir = Path(dir)
if threads > 1:
pool = ThreadPool(threads)
pool.imap(lambda x: download_single(*x), zip(full_url, repeat(dir)))
pool.close()
pool.join()
else:
for u in full_url:
download_single(u, dir)
def main():
args = parse_args()
path = Path(args.save_dir)
if not path.exists():
path.mkdir(parents=True, exist_ok=True)
data2url = dict(
# TODO: Support for downloading Panoptic Segmentation of COCO
coco2017=[
'http://images.cocodataset.org/zips/train2017.zip',
'http://images.cocodataset.org/zips/val2017.zip',
'http://images.cocodataset.org/zips/test2017.zip',
'http://images.cocodataset.org/zips/unlabeled2017.zip',
'http://images.cocodataset.org/annotations/annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/stuff_annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip', # noqa
'http://images.cocodataset.org/annotations/image_info_test2017.zip', # noqa
'http://images.cocodataset.org/annotations/image_info_unlabeled2017.zip', # noqa
],
lvis=[
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
'https://s3-us-west-2.amazonaws.com/dl.fbaipublicfiles.com/LVIS/lvis_v1_train.json.zip', # noqa
],
voc2007=[
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', # noqa
'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCdevkit_08-Jun-2007.tar', # noqa
],
# Note: There is no download link for Objects365-V1 right now. If you
# would like to download Objects365-V1, please visit
# http://www.objects365.org/ to concat the author.
objects365v2=[
# training annotations
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/zhiyuan_objv2_train.tar.gz', # noqa
# validation annotations
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/zhiyuan_objv2_val.json', # noqa
# training url root
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/train/', # noqa
# validation url root_1
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v1/', # noqa
# validation url root_2
'https://dorc.ks3-cn-beijing.ksyun.com/data-set/2020Objects365%E6%95%B0%E6%8D%AE%E9%9B%86/val/images/v2/' # noqa
])
url = data2url.get(args.dataset_name, None)
if url is None:
print('Only support COCO, VOC, LVIS, and Objects365v2 now!')
return
if args.dataset_name == 'objects365v2':
download_objects365v2(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
else:
download(
url,
dir=path,
unzip=args.unzip,
delete=args.delete,
threads=args.threads)
if __name__ == '__main__':
main()
| 7,177 | 35.810256 | 144 |
py
|
ERD
|
ERD-main/tools/misc/split_coco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import numpy as np
from mmengine.fileio import dump, load
from mmengine.utils import mkdir_or_exist, track_parallel_progress
prog_description = '''K-Fold coco split.
To split coco data for semi-supervised object detection:
python tools/misc/split_coco.py
'''
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'--data-root',
type=str,
help='The data root of coco dataset.',
default='./data/coco/')
parser.add_argument(
'--out-dir',
type=str,
help='The output directory of coco semi-supervised annotations.',
default='./data/coco/semi_anns/')
parser.add_argument(
'--labeled-percent',
type=float,
nargs='+',
help='The percentage of labeled data in the training set.',
default=[1, 2, 5, 10])
parser.add_argument(
'--fold',
type=int,
help='K-fold cross validation for semi-supervised object detection.',
default=5)
args = parser.parse_args()
return args
def split_coco(data_root, out_dir, percent, fold):
"""Split COCO data for Semi-supervised object detection.
Args:
data_root (str): The data root of coco dataset.
out_dir (str): The output directory of coco semi-supervised
annotations.
percent (float): The percentage of labeled data in the training set.
fold (int): The fold of dataset and set as random seed for data split.
"""
def save_anns(name, images, annotations):
sub_anns = dict()
sub_anns['images'] = images
sub_anns['annotations'] = annotations
sub_anns['licenses'] = anns['licenses']
sub_anns['categories'] = anns['categories']
sub_anns['info'] = anns['info']
mkdir_or_exist(out_dir)
dump(sub_anns, f'{out_dir}/{name}.json')
# set random seed with the fold
np.random.seed(fold)
ann_file = osp.join(data_root, 'annotations/instances_train2017.json')
anns = load(ann_file)
image_list = anns['images']
labeled_total = int(percent / 100. * len(image_list))
labeled_inds = set(
np.random.choice(range(len(image_list)), size=labeled_total))
labeled_ids, labeled_images, unlabeled_images = [], [], []
for i in range(len(image_list)):
if i in labeled_inds:
labeled_images.append(image_list[i])
labeled_ids.append(image_list[i]['id'])
else:
unlabeled_images.append(image_list[i])
# get all annotations of labeled images
labeled_ids = set(labeled_ids)
labeled_annotations, unlabeled_annotations = [], []
for ann in anns['annotations']:
if ann['image_id'] in labeled_ids:
labeled_annotations.append(ann)
else:
unlabeled_annotations.append(ann)
# save labeled and unlabeled
labeled_name = f'instances_train2017.{fold}@{percent}'
unlabeled_name = f'instances_train2017.{fold}@{percent}-unlabeled'
save_anns(labeled_name, labeled_images, labeled_annotations)
save_anns(unlabeled_name, unlabeled_images, unlabeled_annotations)
def multi_wrapper(args):
return split_coco(*args)
if __name__ == '__main__':
args = parse_args()
arguments_list = [(args.data_root, args.out_dir, p, f)
for f in range(1, args.fold + 1)
for p in args.labeled_percent]
track_parallel_progress(multi_wrapper, arguments_list, args.fold)
| 3,560 | 31.081081 | 78 |
py
|
ERD
|
ERD-main/tools/misc/print_config.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine import Config, DictAction
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--save-path',
default=None,
help='save path of whole config, suffixed with .py, .json or .yml')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
if args.save_path is not None:
save_path = args.save_path
suffix = os.path.splitext(save_path)[-1]
assert suffix in ['.py', '.json', '.yml']
if not os.path.exists(os.path.split(save_path)[0]):
os.makedirs(os.path.split(save_path)[0])
cfg.dump(save_path)
print(f'Config saving at {save_path}')
if __name__ == '__main__':
main()
| 1,797 | 28.47541 | 78 |
py
|
ERD
|
ERD-main/tools/model_converters/selfsup2mmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def moco_convert(src, dst):
"""Convert keys in pycls pretrained moco models to mmdet style."""
# load caffe model
moco_model = torch.load(src)
blobs = moco_model['state_dict']
# convert to pytorch style
state_dict = OrderedDict()
for k, v in blobs.items():
if not k.startswith('module.encoder_q.'):
continue
old_k = k
k = k.replace('module.encoder_q.', '')
state_dict[k] = v
print(old_k, '->', k)
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument(
'--selfsup', type=str, choices=['moco', 'swav'], help='save path')
args = parser.parse_args()
if args.selfsup == 'moco':
moco_convert(args.src, args.dst)
elif args.selfsup == 'swav':
print('SWAV does not need to convert the keys')
if __name__ == '__main__':
main()
| 1,243 | 27.930233 | 74 |
py
|
ERD
|
ERD-main/tools/model_converters/publish_model.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
from mmengine.logging import print_log
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
parser.add_argument(
'--save-keys',
nargs='+',
type=str,
default=['meta', 'state_dict'],
help='keys to save in the published checkpoint')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file, save_keys=['meta', 'state_dict']):
checkpoint = torch.load(in_file, map_location='cpu')
# only keep `meta` and `state_dict` for smaller file size
ckpt_keys = list(checkpoint.keys())
for k in ckpt_keys:
if k not in save_keys:
print_log(
f'Key `{k}` will be removed because it is not in '
f'save_keys. If you want to keep it, '
f'please set --save-keys.',
logger='current')
checkpoint.pop(k, None)
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
if out_file.endswith('.pth'):
out_file_name = out_file[:-4]
else:
out_file_name = out_file
final_file = out_file_name + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
print_log(
f'The published model is saved at {final_file}.', logger='current')
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file, args.save_keys)
if __name__ == '__main__':
main()
| 1,966 | 30.725806 | 78 |
py
|
ERD
|
ERD-main/tools/model_converters/regnet2mmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
def convert_stem(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('stem.conv', 'conv1')
new_key = new_key.replace('stem.bn', 'bn1')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_head(model_key, model_weight, state_dict, converted_names):
new_key = model_key.replace('head.fc', 'fc')
state_dict[new_key] = model_weight
converted_names.add(model_key)
print(f'Convert {model_key} to {new_key}')
def convert_reslayer(model_key, model_weight, state_dict, converted_names):
split_keys = model_key.split('.')
layer, block, module = split_keys[:3]
block_id = int(block[1:])
layer_name = f'layer{int(layer[1:])}'
block_name = f'{block_id - 1}'
if block_id == 1 and module == 'bn':
new_key = f'{layer_name}.{block_name}.downsample.1.{split_keys[-1]}'
elif block_id == 1 and module == 'proj':
new_key = f'{layer_name}.{block_name}.downsample.0.{split_keys[-1]}'
elif module == 'f':
if split_keys[3] == 'a_bn':
module_name = 'bn1'
elif split_keys[3] == 'b_bn':
module_name = 'bn2'
elif split_keys[3] == 'c_bn':
module_name = 'bn3'
elif split_keys[3] == 'a':
module_name = 'conv1'
elif split_keys[3] == 'b':
module_name = 'conv2'
elif split_keys[3] == 'c':
module_name = 'conv3'
new_key = f'{layer_name}.{block_name}.{module_name}.{split_keys[-1]}'
else:
raise ValueError(f'Unsupported conversion of key {model_key}')
print(f'Convert {model_key} to {new_key}')
state_dict[new_key] = model_weight
converted_names.add(model_key)
def convert(src, dst):
"""Convert keys in pycls pretrained RegNet models to mmdet style."""
# load caffe model
regnet_model = torch.load(src)
blobs = regnet_model['model_state']
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
for key, weight in blobs.items():
if 'stem' in key:
convert_stem(key, weight, state_dict, converted_names)
elif 'head' in key:
convert_head(key, weight, state_dict, converted_names)
elif key.startswith('s'):
convert_reslayer(key, weight, state_dict, converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'not converted: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
args = parser.parse_args()
convert(args.src, args.dst)
if __name__ == '__main__':
main()
| 3,063 | 32.67033 | 77 |
py
|
ERD
|
ERD-main/tools/model_converters/upgrade_model_version.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import re
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
def is_head(key):
valid_head_list = [
'bbox_head', 'mask_head', 'semantic_head', 'grid_head', 'mask_iou_head'
]
return any(key.startswith(h) for h in valid_head_list)
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
is_two_stage = True
is_ssd = False
is_retina = False
reg_cls_agnostic = False
if 'rpn_head' not in config.model:
is_two_stage = False
# check whether it is SSD
if config.model.bbox_head.type == 'SSDHead':
is_ssd = True
elif config.model.bbox_head.type == 'RetinaHead':
is_retina = True
elif isinstance(config.model['bbox_head'], list):
reg_cls_agnostic = True
elif 'reg_class_agnostic' in config.model.bbox_head:
reg_cls_agnostic = config.model.bbox_head \
.reg_class_agnostic
temp_file.close()
return is_two_stage, is_ssd, is_retina, reg_cls_agnostic
def reorder_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_cls for softmax output
if out_channels != num_classes and out_channels % num_classes == 0:
new_val = val.reshape(-1, num_classes, in_channels, *val.shape[2:])
new_val = torch.cat((new_val[:, 1:], new_val[:, :1]), dim=1)
new_val = new_val.reshape(val.size())
# fc_cls
elif out_channels == num_classes:
new_val = torch.cat((val[1:], val[:1]), dim=0)
# agnostic | retina_cls | rpn_cls
else:
new_val = val
return new_val
def truncate_cls_channel(val, num_classes=81):
# bias
if val.dim() == 1:
if val.size(0) % num_classes == 0:
new_val = val[:num_classes - 1]
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# conv_logits
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, in_channels, *val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def truncate_reg_channel(val, num_classes=81):
# bias
if val.dim() == 1:
# fc_reg | rpn_reg
if val.size(0) % num_classes == 0:
new_val = val.reshape(num_classes, -1)[:num_classes - 1]
new_val = new_val.reshape(-1)
# agnostic
else:
new_val = val
# weight
else:
out_channels, in_channels = val.shape[:2]
# fc_reg | rpn_reg
if out_channels % num_classes == 0:
new_val = val.reshape(num_classes, -1, in_channels,
*val.shape[2:])[1:]
new_val = new_val.reshape(-1, *val.shape[1:])
# agnostic
else:
new_val = val
return new_val
def convert(in_file, out_file, num_classes):
"""Convert keys in checkpoints.
There can be some breaking changes during the development of mmdetection,
and this tool is used for upgrading checkpoints trained with old versions
to the latest one.
"""
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
is_two_stage, is_ssd, is_retina, reg_cls_agnostic = parse_config(
'#' + meta_info['config'])
if meta_info['mmdet_version'] <= '0.5.3' and is_retina:
upgrade_retina = True
else:
upgrade_retina = False
# MMDetection v2.5.0 unifies the class order in RPN
# if the model is trained in version<v2.5.0
# The RPN model should be upgraded to be used in version>=2.5.0
if meta_info['mmdet_version'] < '2.5.0':
upgrade_rpn = True
else:
upgrade_rpn = False
for key, val in in_state_dict.items():
new_key = key
new_val = val
if is_two_stage and is_head(key):
new_key = 'roi_head.{}'.format(key)
# classification
if upgrade_rpn:
m = re.search(
r'(conv_cls|retina_cls|rpn_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
else:
m = re.search(
r'(conv_cls|retina_cls|fc_cls|fcos_cls|'
r'fovea_cls).(weight|bias)', new_key)
if m is not None:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
# regression
if upgrade_rpn:
m = re.search(r'(fc_reg).(weight|bias)', new_key)
else:
m = re.search(r'(fc_reg|rpn_reg).(weight|bias)', new_key)
if m is not None and not reg_cls_agnostic:
print(f'truncate regression channels of {new_key}')
new_val = truncate_reg_channel(val, num_classes)
# mask head
m = re.search(r'(conv_logits).(weight|bias)', new_key)
if m is not None:
print(f'truncate mask prediction channels of {new_key}')
new_val = truncate_cls_channel(val, num_classes)
m = re.search(r'(cls_convs|reg_convs).\d.(weight|bias)', key)
# Legacy issues in RetinaNet since V1.x
# Use ConvModule instead of nn.Conv2d in RetinaNet
# cls_convs.0.weight -> cls_convs.0.conv.weight
if m is not None and upgrade_retina:
param = m.groups()[1]
new_key = key.replace(param, f'conv.{param}')
out_state_dict[new_key] = val
print(f'rename the name of {key} to {new_key}')
continue
m = re.search(r'(cls_convs).\d.(weight|bias)', key)
if m is not None and is_ssd:
print(f'reorder cls channels of {new_key}')
new_val = reorder_cls_channel(val, num_classes)
out_state_dict[new_key] = new_val
checkpoint['state_dict'] = out_state_dict
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade model version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
parser.add_argument(
'--num-classes',
type=int,
default=81,
help='number of classes of the original model')
args = parser.parse_args()
convert(args.in_file, args.out_file, args.num_classes)
if __name__ == '__main__':
main()
| 6,852 | 31.478673 | 79 |
py
|
ERD
|
ERD-main/tools/model_converters/detectron2_to_mmdet.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
from mmengine.fileio import load
from mmengine.runner import save_checkpoint
def convert(src: str, dst: str, prefix: str = 'd2_model') -> None:
"""Convert Detectron2 checkpoint to MMDetection style.
Args:
src (str): The Detectron2 checkpoint path, should endswith `pkl`.
dst (str): The MMDetection checkpoint path.
prefix (str): The prefix of MMDetection model, defaults to 'd2_model'.
"""
# load arch_settings
assert src.endswith('pkl'), \
'the source Detectron2 checkpoint should endswith `pkl`.'
d2_model = load(src, encoding='latin1').get('model')
assert d2_model is not None
# convert to mmdet style
dst_state_dict = OrderedDict()
for name, value in d2_model.items():
if not isinstance(value, torch.Tensor):
value = torch.from_numpy(value)
dst_state_dict[f'{prefix}.{name}'] = value
mmdet_model = dict(state_dict=dst_state_dict, meta=dict())
save_checkpoint(mmdet_model, dst)
print(f'Convert Detectron2 model {src} to MMDetection model {dst}')
def main():
parser = argparse.ArgumentParser(
description='Convert Detectron2 checkpoint to MMDetection style')
parser.add_argument('src', help='Detectron2 model path')
parser.add_argument('dst', help='MMDetectron model save path')
parser.add_argument(
'--prefix', default='d2_model', type=str, help='prefix of the model')
args = parser.parse_args()
convert(args.src, args.dst, args.prefix)
if __name__ == '__main__':
main()
| 1,653 | 32.755102 | 78 |
py
|
ERD
|
ERD-main/tools/model_converters/upgrade_ssd_version.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from collections import OrderedDict
import torch
from mmengine import Config
def parse_config(config_strings):
temp_file = tempfile.NamedTemporaryFile()
config_path = f'{temp_file.name}.py'
with open(config_path, 'w') as f:
f.write(config_strings)
config = Config.fromfile(config_path)
# check whether it is SSD
if config.model.bbox_head.type != 'SSDHead':
raise AssertionError('This is not a SSD model.')
def convert(in_file, out_file):
checkpoint = torch.load(in_file)
in_state_dict = checkpoint.pop('state_dict')
out_state_dict = OrderedDict()
meta_info = checkpoint['meta']
parse_config('#' + meta_info['config'])
for key, value in in_state_dict.items():
if 'extra' in key:
layer_idx = int(key.split('.')[2])
new_key = 'neck.extra_layers.{}.{}.conv.'.format(
layer_idx // 2, layer_idx % 2) + key.split('.')[-1]
elif 'l2_norm' in key:
new_key = 'neck.l2_norm.weight'
elif 'bbox_head' in key:
new_key = key[:21] + '.0' + key[21:]
else:
new_key = key
out_state_dict[new_key] = value
checkpoint['state_dict'] = out_state_dict
if torch.__version__ >= '1.6':
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
def main():
parser = argparse.ArgumentParser(description='Upgrade SSD version')
parser.add_argument('in_file', help='input checkpoint file')
parser.add_argument('out_file', help='output checkpoint file')
args = parser.parse_args()
convert(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,793 | 29.40678 | 78 |
py
|
ERD
|
ERD-main/tools/model_converters/detectron2pytorch.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
from collections import OrderedDict
import torch
from mmengine.fileio import load
arch_settings = {50: (3, 4, 6, 3), 101: (3, 4, 23, 3)}
def convert_bn(blobs, state_dict, caffe_name, torch_name, converted_names):
# detectron replace bn with affine channel layer
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_s'])
bn_size = state_dict[torch_name + '.weight'].size()
state_dict[torch_name + '.running_mean'] = torch.zeros(bn_size)
state_dict[torch_name + '.running_var'] = torch.ones(bn_size)
converted_names.add(caffe_name + '_b')
converted_names.add(caffe_name + '_s')
def convert_conv_fc(blobs, state_dict, caffe_name, torch_name,
converted_names):
state_dict[torch_name + '.weight'] = torch.from_numpy(blobs[caffe_name +
'_w'])
converted_names.add(caffe_name + '_w')
if caffe_name + '_b' in blobs:
state_dict[torch_name + '.bias'] = torch.from_numpy(blobs[caffe_name +
'_b'])
converted_names.add(caffe_name + '_b')
def convert(src, dst, depth):
"""Convert keys in detectron pretrained ResNet models to pytorch style."""
# load arch_settings
if depth not in arch_settings:
raise ValueError('Only support ResNet-50 and ResNet-101 currently')
block_nums = arch_settings[depth]
# load caffe model
caffe_model = load(src, encoding='latin1')
blobs = caffe_model['blobs'] if 'blobs' in caffe_model else caffe_model
# convert to pytorch style
state_dict = OrderedDict()
converted_names = set()
convert_conv_fc(blobs, state_dict, 'conv1', 'conv1', converted_names)
convert_bn(blobs, state_dict, 'res_conv1_bn', 'bn1', converted_names)
for i in range(1, len(block_nums) + 1):
for j in range(block_nums[i - 1]):
if j == 0:
convert_conv_fc(blobs, state_dict, f'res{i + 1}_{j}_branch1',
f'layer{i}.{j}.downsample.0', converted_names)
convert_bn(blobs, state_dict, f'res{i + 1}_{j}_branch1_bn',
f'layer{i}.{j}.downsample.1', converted_names)
for k, letter in enumerate(['a', 'b', 'c']):
convert_conv_fc(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}',
f'layer{i}.{j}.conv{k+1}', converted_names)
convert_bn(blobs, state_dict,
f'res{i + 1}_{j}_branch2{letter}_bn',
f'layer{i}.{j}.bn{k + 1}', converted_names)
# check if all layers are converted
for key in blobs:
if key not in converted_names:
print(f'Not Convert: {key}')
# save checkpoint
checkpoint = dict()
checkpoint['state_dict'] = state_dict
torch.save(checkpoint, dst)
def main():
parser = argparse.ArgumentParser(description='Convert model keys')
parser.add_argument('src', help='src detectron model path')
parser.add_argument('dst', help='save path')
parser.add_argument('depth', type=int, help='ResNet model depth')
args = parser.parse_args()
convert(args.src, args.dst, args.depth)
if __name__ == '__main__':
main()
| 3,594 | 41.797619 | 78 |
py
|
ERD
|
ERD-main/tools/dataset_converters/images2coco.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine.fileio import dump, list_from_file
from mmengine.utils import mkdir_or_exist, scandir, track_iter_progress
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = scandir(path, recursive=True)
for image_path in track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
| 3,193 | 30.009709 | 77 |
py
|
ERD
|
ERD-main/tools/dataset_converters/cityscapes.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import cityscapesscripts.helpers.labels as CSLabels
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
from mmengine.fileio import dump
from mmengine.utils import (Timer, mkdir_or_exist, track_parallel_progress,
track_progress)
def collect_files(img_dir, gt_dir):
suffix = 'leftImg8bit.png'
files = []
for img_file in glob.glob(osp.join(img_dir, '**/*.png')):
assert img_file.endswith(suffix), img_file
inst_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'
# Note that labelIds are not converted to trainId for seg map
segm_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
files.append((img_file, inst_file, segm_file))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
print('Loading annotation images')
if nproc > 1:
images = track_parallel_progress(load_img_info, files, nproc=nproc)
else:
images = track_progress(load_img_info, files)
return images
def load_img_info(files):
img_file, inst_file, segm_file = files
inst_img = mmcv.imread(inst_file, 'unchanged')
# ids < 24 are stuff labels (filtering them first is about 5% faster)
unique_inst_ids = np.unique(inst_img[inst_img >= 24])
anno_info = []
for inst_id in unique_inst_ids:
# For non-crowd annotations, inst_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = inst_id // 1000 if inst_id >= 1000 else inst_id
label = CSLabels.id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
category_id = label.id
iscrowd = int(inst_id < 1000)
mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')
mask_rle = maskUtils.encode(mask[:, :, None])[0]
area = maskUtils.area(mask_rle)
# convert to COCO style XYWH format
bbox = maskUtils.toBbox(mask_rle)
# for json encoding
mask_rle['counts'] = mask_rle['counts'].decode()
anno = dict(
iscrowd=iscrowd,
category_id=category_id,
bbox=bbox.tolist(),
area=area.tolist(),
segmentation=mask_rle)
anno_info.append(anno)
video_name = osp.basename(osp.dirname(img_file))
img_info = dict(
# remove img_prefix for filename
file_name=osp.join(video_name, osp.basename(img_file)),
height=inst_img.shape[0],
width=inst_img.shape[1],
anno_info=anno_info,
segm_file=osp.join(video_name, osp.basename(segm_file)))
return img_info
def cvt_annotations(image_infos, out_json_name):
out_json = dict()
img_id = 0
ann_id = 0
out_json['images'] = []
out_json['categories'] = []
out_json['annotations'] = []
for image_info in image_infos:
image_info['id'] = img_id
anno_infos = image_info.pop('anno_info')
out_json['images'].append(image_info)
for anno_info in anno_infos:
anno_info['image_id'] = img_id
anno_info['id'] = ann_id
out_json['annotations'].append(anno_info)
ann_id += 1
img_id += 1
for label in CSLabels.labels:
if label.hasInstances and not label.ignoreInEval:
cat = dict(id=label.id, name=label.name)
out_json['categories'].append(cat)
if len(out_json['annotations']) == 0:
out_json.pop('annotations')
dump(out_json, out_json_name)
return out_json
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to COCO format')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--img-dir', default='leftImg8bit', type=str)
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
mkdir_or_exist(out_dir)
img_dir = osp.join(cityscapes_path, args.img_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
set_name = dict(
train='instancesonly_filtered_gtFine_train.json',
val='instancesonly_filtered_gtFine_val.json',
test='instancesonly_filtered_gtFine_test.json')
for split, json_name in set_name.items():
print(f'Converting {split} into {json_name}')
with Timer(print_tmpl='It took {}s to convert Cityscapes annotation'):
files = collect_files(
osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)
cvt_annotations(image_infos, osp.join(out_dir, json_name))
if __name__ == '__main__':
main()
| 5,270 | 33.227273 | 78 |
py
|
ERD
|
ERD-main/tools/dataset_converters/pascal_voc.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import xml.etree.ElementTree as ET
import numpy as np
from mmengine.fileio import dump, list_from_file
from mmengine.utils import mkdir_or_exist, track_progress
from mmdet.evaluation import voc_classes
label_ids = {name: i for i, name in enumerate(voc_classes())}
def parse_xml(args):
xml_path, img_path = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'ann': {
'bboxes': bboxes.astype(np.float32),
'labels': labels.astype(np.int64),
'bboxes_ignore': bboxes_ignore.astype(np.float32),
'labels_ignore': labels_ignore.astype(np.int64)
}
}
return annotation
def cvt_annotations(devkit_path, years, split, out_file):
if not isinstance(years, list):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path,
f'VOC{year}/ImageSets/Main/{split}.txt')
if not osp.isfile(filelist):
print(f'filelist does not exist: {filelist}, '
f'skip voc{year} {split}')
return
img_names = list_from_file(filelist)
xml_paths = [
osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
for img_name in img_names
]
img_paths = [
f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
]
part_annotations = track_progress(parse_xml,
list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
if out_file.endswith('json'):
annotations = cvt_to_coco_json(annotations)
dump(annotations, out_file)
return annotations
def cvt_to_coco_json(annotations):
image_id = 0
annotation_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag):
annotation_item = dict()
annotation_item['segmentation'] = []
seg = []
# bbox[] is x1,y1,x2,y2
# left_top
seg.append(int(bbox[0]))
seg.append(int(bbox[1]))
# left_bottom
seg.append(int(bbox[0]))
seg.append(int(bbox[3]))
# right_bottom
seg.append(int(bbox[2]))
seg.append(int(bbox[3]))
# right_top
seg.append(int(bbox[2]))
seg.append(int(bbox[1]))
annotation_item['segmentation'].append(seg)
xywh = np.array(
[bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]])
annotation_item['area'] = int(xywh[2] * xywh[3])
if difficult_flag == 1:
annotation_item['ignore'] = 0
annotation_item['iscrowd'] = 1
else:
annotation_item['ignore'] = 0
annotation_item['iscrowd'] = 0
annotation_item['image_id'] = int(image_id)
annotation_item['bbox'] = xywh.astype(int).tolist()
annotation_item['category_id'] = int(category_id)
annotation_item['id'] = int(annotation_id)
coco['annotations'].append(annotation_item)
return annotation_id + 1
for category_id, name in enumerate(voc_classes()):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for ann_dict in annotations:
file_name = ann_dict['filename']
ann = ann_dict['ann']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(ann_dict['height'])
image_item['width'] = int(ann_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
bboxes = ann['bboxes'][:, :4]
labels = ann['labels']
for bbox_id in range(len(bboxes)):
bbox = bboxes[bbox_id]
label = labels[bbox_id]
annotation_id = addAnnItem(
annotation_id, image_id, label, bbox, difficult_flag=0)
bboxes_ignore = ann['bboxes_ignore'][:, :4]
labels_ignore = ann['labels_ignore']
for bbox_id in range(len(bboxes_ignore)):
bbox = bboxes_ignore[bbox_id]
label = labels_ignore[bbox_id]
annotation_id = addAnnItem(
annotation_id, image_id, label, bbox, difficult_flag=1)
image_id += 1
return coco
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--out-format',
default='pkl',
choices=('pkl', 'coco'),
help='output format, "coco" indicates coco annotation format')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = args.out_dir if args.out_dir else devkit_path
mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if '2007' in years and '2012' in years:
years.append(['2007', '2012'])
if not years:
raise IOError(f'The devkit path {devkit_path} contains neither '
'"VOC2007" nor "VOC2012" subfolder')
out_fmt = f'.{args.out_format}'
if args.out_format == 'coco':
out_fmt = '.json'
for year in years:
if year == '2007':
prefix = 'voc07'
elif year == '2012':
prefix = 'voc12'
elif year == ['2007', '2012']:
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = prefix + '_' + split
print(f'processing {dataset_name} ...')
cvt_annotations(devkit_path, year, split,
osp.join(out_dir, dataset_name + out_fmt))
if not isinstance(year, list):
dataset_name = prefix + '_test'
print(f'processing {dataset_name} ...')
cvt_annotations(devkit_path, year, 'test',
osp.join(out_dir, dataset_name + out_fmt))
print('Done!')
if __name__ == '__main__':
main()
| 7,917 | 32.129707 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/analyze_results.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from multiprocessing import Pool
import mmcv
import numpy as np
from mmengine.config import Config, DictAction
from mmengine.fileio import load
from mmengine.registry import init_default_scope
from mmengine.runner import Runner
from mmengine.structures import InstanceData, PixelData
from mmengine.utils import ProgressBar, check_file_exist, mkdir_or_exist
from mmdet.datasets import get_loading_pipeline
from mmdet.evaluation import eval_map
from mmdet.registry import DATASETS, RUNNERS
from mmdet.structures import DetDataSample
from mmdet.utils import replace_cfg_vals, update_data_root
from mmdet.visualization import DetLocalVisualizer
def bbox_map_eval(det_result, annotation, nproc=4):
"""Evaluate mAP of single image det result.
Args:
det_result (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotation (dict): Ground truth annotations where keys of
annotations are:
- bboxes: numpy array of shape (n, 4)
- labels: numpy array of shape (n, )
- bboxes_ignore (optional): numpy array of shape (k, 4)
- labels_ignore (optional): numpy array of shape (k, )
nproc (int): Processes used for computing mAP.
Default: 4.
Returns:
float: mAP
"""
# use only bbox det result
if isinstance(det_result, tuple):
bbox_det_result = [det_result[0]]
else:
bbox_det_result = [det_result]
# mAP
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
processes = []
workers = Pool(processes=nproc)
for thr in iou_thrs:
p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), {
'iou_thr': thr,
'logger': 'silent',
'nproc': 1
})
processes.append(p)
workers.close()
workers.join()
mean_aps = []
for p in processes:
mean_aps.append(p.get()[0])
return sum(mean_aps) / len(mean_aps)
class ResultVisualizer:
"""Display and save evaluation results.
Args:
show (bool): Whether to show the image. Default: True.
wait_time (float): Value of waitKey param. Default: 0.
score_thr (float): Minimum score of bboxes to be shown.
Default: 0.
runner (:obj:`Runner`): The runner of the visualization process.
"""
def __init__(self, show=False, wait_time=0, score_thr=0, runner=None):
self.show = show
self.wait_time = wait_time
self.score_thr = score_thr
self.visualizer = DetLocalVisualizer()
self.runner = runner
self.evaluator = runner.test_evaluator
def _save_image_gts_results(self,
dataset,
results,
performances,
out_dir=None,
task='det'):
"""Display or save image with groung truths and predictions from a
model.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection or panoptic segmentation
results from test results pkl file.
performances (dict): A dict contains samples's indices
in dataset and model's performance on them.
out_dir (str, optional): The filename to write the image.
Defaults: None.
task (str): The task to be performed. Defaults: 'det'
"""
mkdir_or_exist(out_dir)
for performance_info in performances:
index, performance = performance_info
data_info = dataset[index]
data_info['gt_instances'] = data_info['instances']
# calc save file path
filename = data_info['img_path']
fname, name = osp.splitext(osp.basename(filename))
save_filename = fname + '_' + str(round(performance, 3)) + name
out_file = osp.join(out_dir, save_filename)
if task == 'det':
gt_instances = InstanceData()
gt_instances.bboxes = results[index]['gt_instances']['bboxes']
gt_instances.labels = results[index]['gt_instances']['labels']
pred_instances = InstanceData()
pred_instances.bboxes = results[index]['pred_instances'][
'bboxes']
pred_instances.labels = results[index]['pred_instances'][
'labels']
pred_instances.scores = results[index]['pred_instances'][
'scores']
data_samples = DetDataSample()
data_samples.pred_instances = pred_instances
data_samples.gt_instances = gt_instances
elif task == 'seg':
gt_panoptic_seg = PixelData()
gt_panoptic_seg.sem_seg = results[index]['gt_seg_map']
pred_panoptic_seg = PixelData()
pred_panoptic_seg.sem_seg = results[index][
'pred_panoptic_seg']['sem_seg']
data_samples = DetDataSample()
data_samples.pred_panoptic_seg = pred_panoptic_seg
data_samples.gt_panoptic_seg = gt_panoptic_seg
img = mmcv.imread(filename, channel_order='rgb')
self.visualizer.add_datasample(
'image',
img,
data_samples,
show=self.show,
draw_gt=False,
pred_score_thr=self.score_thr,
out_file=out_file)
def evaluate_and_show(self,
dataset,
results,
topk=20,
show_dir='work_dir'):
"""Evaluate and show results.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection or panoptic segmentation
results from test results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
show_dir (str, optional): The filename to write the image.
Default: 'work_dir'
"""
self.visualizer.dataset_meta = dataset.metainfo
assert topk > 0
if (topk * 2) > len(dataset):
topk = len(dataset) // 2
good_dir = osp.abspath(osp.join(show_dir, 'good'))
bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
if 'pred_panoptic_seg' in results[0].keys():
good_samples, bad_samples = self.panoptic_evaluate(
dataset, results, topk=topk)
self._save_image_gts_results(
dataset, results, good_samples, good_dir, task='seg')
self._save_image_gts_results(
dataset, results, bad_samples, bad_dir, task='seg')
elif 'pred_instances' in results[0].keys():
good_samples, bad_samples = self.detection_evaluate(
dataset, results, topk=topk)
self._save_image_gts_results(
dataset, results, good_samples, good_dir, task='det')
self._save_image_gts_results(
dataset, results, bad_samples, bad_dir, task='det')
else:
raise 'expect \'pred_panoptic_seg\' or \'pred_instances\' \
in dict result'
def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):
"""Evaluation for object detection.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection results from test
results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
eval_fn (callable, optional): Eval function, Default: None.
Returns:
tuple: A tuple contains good samples and bad samples.
good_mAPs (dict[int, float]): A dict contains good
samples's indices in dataset and model's
performance on them.
bad_mAPs (dict[int, float]): A dict contains bad
samples's indices in dataset and model's
performance on them.
"""
if eval_fn is None:
eval_fn = bbox_map_eval
else:
assert callable(eval_fn)
prog_bar = ProgressBar(len(results))
_mAPs = {}
data_info = {}
for i, (result, ) in enumerate(zip(results)):
# self.dataset[i] should not call directly
# because there is a risk of mismatch
data_info = dataset.prepare_data(i)
data_info['bboxes'] = data_info['gt_bboxes'].tensor
data_info['labels'] = data_info['gt_bboxes_labels']
pred = result['pred_instances']
pred_bboxes = pred['bboxes'].cpu().numpy()
pred_scores = pred['scores'].cpu().numpy()
pred_labels = pred['labels'].cpu().numpy()
dets = []
for label in range(len(dataset.metainfo['classes'])):
index = np.where(pred_labels == label)[0]
pred_bbox_scores = np.hstack(
[pred_bboxes[index], pred_scores[index].reshape((-1, 1))])
dets.append(pred_bbox_scores)
mAP = eval_fn(dets, data_info)
_mAPs[i] = mAP
prog_bar.update()
# descending select topk image
_mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))
good_mAPs = _mAPs[-topk:]
bad_mAPs = _mAPs[:topk]
return good_mAPs, bad_mAPs
def panoptic_evaluate(self, dataset, results, topk=20):
"""Evaluation for panoptic segmentation.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Panoptic segmentation results from test
results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
Returns:
tuple: A tuple contains good samples and bad samples.
good_pqs (dict[int, float]): A dict contains good
samples's indices in dataset and model's
performance on them.
bad_pqs (dict[int, float]): A dict contains bad
samples's indices in dataset and model's
performance on them.
"""
pqs = {}
prog_bar = ProgressBar(len(results))
for i in range(len(results)):
data_sample = {}
for k in dataset[i].keys():
data_sample[k] = dataset[i][k]
for k in results[i].keys():
data_sample[k] = results[i][k]
self.evaluator.process([data_sample])
metrics = self.evaluator.evaluate(1)
pqs[i] = metrics['coco_panoptic/PQ']
prog_bar.update()
# descending select topk image
pqs = list(sorted(pqs.items(), key=lambda kv: kv[1]))
good_pqs = pqs[-topk:]
bad_pqs = pqs[:topk]
return good_pqs, bad_pqs
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet eval image prediction result for each')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test pkl result')
parser.add_argument(
'show_dir', help='directory where painted images will be saved')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=0,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--topk',
default=20,
type=int,
help='saved Number of the highest topk '
'and lowest topk after index sorting')
parser.add_argument(
'--show-score-thr',
type=float,
default=0,
help='score threshold (default: 0.)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
cfg.test_dataloader.dataset.test_mode = True
cfg.test_dataloader.pop('batch_size', 0)
if cfg.train_dataloader.dataset.type in ('MultiImageMixDataset',
'ClassBalancedDataset',
'RepeatDataset', 'ConcatDataset'):
cfg.test_dataloader.dataset.pipeline = get_loading_pipeline(
cfg.train_dataloader.dataset.dataset.pipeline)
else:
cfg.test_dataloader.dataset.pipeline = get_loading_pipeline(
cfg.train_dataloader.dataset.pipeline)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
outputs = load(args.prediction_path)
cfg.work_dir = args.show_dir
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
result_visualizer = ResultVisualizer(args.show, args.wait_time,
args.show_score_thr, runner)
result_visualizer.evaluate_and_show(
dataset, outputs, topk=args.topk, show_dir=args.show_dir)
if __name__ == '__main__':
main()
| 14,578 | 35.538847 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/eval_metric.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmengine
from mmengine import Config, DictAction
from mmengine.evaluator import Evaluator
from mmengine.registry import init_default_scope
from mmdet.registry import DATASETS
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
init_default_scope(cfg.get('default_scope', 'mmdet'))
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
predictions = mmengine.load(args.pkl_results)
evaluator = Evaluator(cfg.val_evaluator)
evaluator.dataset_meta = dataset.metainfo
eval_results = evaluator.offline_evaluate(predictions)
print(eval_results)
if __name__ == '__main__':
main()
| 1,645 | 31.27451 | 78 |
py
|
ERD
|
ERD-main/tools/analysis_tools/benchmark.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
from mmengine import MMLogger
from mmengine.config import Config, DictAction
from mmengine.dist import init_dist
from mmengine.registry import init_default_scope
from mmengine.utils import mkdir_or_exist
from mmdet.utils.benchmark import (DataLoaderBenchmark, DatasetBenchmark,
InferenceBenchmark)
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file')
parser.add_argument(
'--task',
choices=['inference', 'dataloader', 'dataset'],
default='dataloader',
help='Which task do you want to go to benchmark')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--num-warmup', type=int, default=5, help='Number of warmup')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--dataset-type',
choices=['train', 'val', 'test'],
default='test',
help='Benchmark dataset type. only supports train, val and test')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing '
'benchmark metrics')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def inference_benchmark(args, cfg, distributed, logger):
benchmark = InferenceBenchmark(
cfg,
args.checkpoint,
distributed,
args.fuse_conv_bn,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataloader_benchmark(args, cfg, distributed, logger):
benchmark = DataLoaderBenchmark(
cfg,
distributed,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def dataset_benchmark(args, cfg, distributed, logger):
benchmark = DatasetBenchmark(
cfg,
args.dataset_type,
args.max_iter,
args.log_interval,
args.num_warmup,
logger=logger)
return benchmark
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
distributed = False
if args.launcher != 'none':
init_dist(args.launcher, **cfg.get('env_cfg', {}).get('dist_cfg', {}))
distributed = True
log_file = None
if args.work_dir:
log_file = os.path.join(args.work_dir, 'benchmark.log')
mkdir_or_exist(args.work_dir)
logger = MMLogger.get_instance(
'mmdet', log_file=log_file, log_level='INFO')
benchmark = eval(f'{args.task}_benchmark')(args, cfg, distributed, logger)
benchmark.run(args.repeat_num)
if __name__ == '__main__':
main()
| 4,242 | 30.664179 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/optimize_anchors.py
|
# Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides two method to optimize YOLO anchors including k-means
anchor cluster and differential evolution. You can use ``--algorithm k-means``
and ``--algorithm differential_evolution`` to switch two method.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
"""
import argparse
import os.path as osp
import numpy as np
import torch
from mmengine.config import Config
from mmengine.fileio import dump
from mmengine.logging import MMLogger
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from scipy.optimize import differential_evolution
from mmdet.registry import DATASETS
from mmdet.structures.bbox import (bbox_cxcywh_to_xyxy, bbox_overlaps,
bbox_xyxy_to_cxcywh)
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[608, 608],
help='input image size')
parser.add_argument(
'--algorithm',
default='differential_evolution',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
data_info = self.dataset.get_data_info(idx)
img_shape = np.array([data_info['width'], data_info['height']])
gt_instances = data_info['instances']
for instance in gt_instances:
bbox = np.array(instance['bbox'])
wh = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(wh)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
for w, h in anchors:
anchor_results.append([round(w), round(h)])
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
"""
def __init__(self, num_anchors, iters, **kwargs):
super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
num_anchors,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super(YOLODEAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = MMLogger.get_current_instance()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
init_default_scope(cfg.get('default_scope', 'mmdet'))
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.anchor_generator.type
assert anchor_type == 'YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes
num_anchors = sum([len(sizes) for sizes in base_sizes])
train_data_cfg = cfg.train_dataloader
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = DATASETS.build(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
elif args.algorithm == 'differential_evolution':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 13,631 | 34.592689 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/coco_occluded_separated_recall.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
import mmengine
from mmengine.logging import print_log
from mmdet.datasets import CocoDataset
from mmdet.evaluation import CocoOccludedSeparatedMetric
def main():
parser = ArgumentParser(
description='Compute recall of COCO occluded and separated masks '
'presented in paper https://arxiv.org/abs/2210.10046.')
parser.add_argument('result', help='result file (pkl format) path')
parser.add_argument('--out', help='file path to save evaluation results')
parser.add_argument(
'--score-thr',
type=float,
default=0.3,
help='Score threshold for the recall calculation. Defaults to 0.3')
parser.add_argument(
'--iou-thr',
type=float,
default=0.75,
help='IoU threshold for the recall calculation. Defaults to 0.75.')
parser.add_argument(
'--ann',
default='data/coco/annotations/instances_val2017.json',
help='coco annotation file path')
args = parser.parse_args()
results = mmengine.load(args.result)
assert 'masks' in results[0]['pred_instances'], \
'The results must be predicted by instance segmentation model.'
metric = CocoOccludedSeparatedMetric(
ann_file=args.ann, iou_thr=args.iou_thr, score_thr=args.score_thr)
metric.dataset_meta = CocoDataset.METAINFO
for datasample in results:
metric.process(data_batch=None, data_samples=[datasample])
metric_res = metric.compute_metrics(metric.results)
if args.out is not None:
mmengine.dump(metric_res, args.out)
print_log(f'Evaluation results have been saved to {args.out}.')
if __name__ == '__main__':
main()
| 1,748 | 34.693878 | 77 |
py
|
ERD
|
ERD-main/tools/analysis_tools/get_flops.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import tempfile
from functools import partial
from pathlib import Path
import numpy as np
import torch
from mmengine.config import Config, DictAction
from mmengine.logging import MMLogger
from mmengine.model import revert_sync_batchnorm
from mmengine.registry import init_default_scope
from mmengine.runner import Runner
from mmdet.registry import MODELS
try:
from mmengine.analysis import get_model_complexity_info
from mmengine.analysis.print_helper import _format_size
except ImportError:
raise ImportError('Please upgrade mmengine >= 0.6.0')
def parse_args():
parser = argparse.ArgumentParser(description='Get a detector flops')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--num-images',
type=int,
default=100,
help='num images of calculate model flops')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def inference(args, logger):
if str(torch.__version__) < '1.12':
logger.warning(
'Some config files, such as configs/yolact and configs/detectors,'
'may have compatibility issues with torch.jit when torch<1.12. '
'If you want to calculate flops for these models, '
'please make sure your pytorch version is >=1.12.')
config_name = Path(args.config)
if not config_name.exists():
logger.error(f'{config_name} not found.')
cfg = Config.fromfile(args.config)
cfg.val_dataloader.batch_size = 1
cfg.work_dir = tempfile.TemporaryDirectory().name
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
# TODO: The following usage is temporary and not safe
# use hard code to convert mmSyncBN to SyncBN. This is a known
# bug in mmengine, mmSyncBN requires a distributed environment,
# this question involves models like configs/strong_baselines
if hasattr(cfg, 'head_norm_cfg'):
cfg['head_norm_cfg'] = dict(type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['bbox_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
cfg['model']['roi_head']['mask_head']['norm_cfg'] = dict(
type='SyncBN', requires_grad=True)
result = {}
avg_flops = []
data_loader = Runner.build_dataloader(cfg.val_dataloader)
model = MODELS.build(cfg.model)
if torch.cuda.is_available():
model = model.cuda()
model = revert_sync_batchnorm(model)
model.eval()
_forward = model.forward
for idx, data_batch in enumerate(data_loader):
if idx == args.num_images:
break
data = model.data_preprocessor(data_batch)
result['ori_shape'] = data['data_samples'][0].ori_shape
result['pad_shape'] = data['data_samples'][0].pad_shape
if hasattr(data['data_samples'][0], 'batch_input_shape'):
result['pad_shape'] = data['data_samples'][0].batch_input_shape
model.forward = partial(_forward, data_samples=data['data_samples'])
outputs = get_model_complexity_info(
model,
None,
inputs=data['inputs'],
show_table=False,
show_arch=False)
avg_flops.append(outputs['flops'])
params = outputs['params']
result['compute_type'] = 'dataloader: load a picture from the dataset'
del data_loader
mean_flops = _format_size(int(np.average(avg_flops)))
params = _format_size(params)
result['flops'] = mean_flops
result['params'] = params
return result
def main():
args = parse_args()
logger = MMLogger.get_instance(name='MMLogger')
result = inference(args, logger)
split_line = '=' * 30
ori_shape = result['ori_shape']
pad_shape = result['pad_shape']
flops = result['flops']
params = result['params']
compute_type = result['compute_type']
if pad_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {pad_shape}')
print(f'{split_line}\nCompute type: {compute_type}\n'
f'Input shape: {pad_shape}\nFlops: {flops}\n'
f'Params: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify '
'that the flops computation is correct.')
if __name__ == '__main__':
main()
| 5,026 | 34.907143 | 78 |
py
|
ERD
|
ERD-main/tools/analysis_tools/analyze_logs.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
if not all_times:
raise KeyError(
'Please reduce the log interval in the config so that'
'interval is less than iterations of one epoch.')
epoch_ave_time = np.array(list(map(lambda x: np.mean(x), all_times)))
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f} s/iter')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f} s/iter')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(epoch_ave_time):.4f} s/iter\n')
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
# TODO: support dynamic eval interval(e.g. RTMDet) when plotting mAP.
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
if metric not in log_dict[epochs[int(args.eval_interval) - 1]]:
if 'mAP' in metric:
raise KeyError(
f'{args.json_logs[i]} does not contain metric '
f'{metric}. Please check if "--no-validate" is '
'specified when you trained the model. Or check '
f'if the eval_interval {args.eval_interval} in args '
'is equal to the eval_interval during training.')
raise KeyError(
f'{args.json_logs[i]} does not contain metric {metric}. '
'Please reduce the log interval in the config so that '
'interval is less than iterations of one epoch.')
if 'mAP' in metric:
xs = []
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
if log_dict[epoch][metric]:
xs += [epoch]
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
else:
xs = []
ys = []
for epoch in epochs:
iters = log_dict[epoch]['step']
xs.append(np.array(iters))
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(
xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla()
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['bbox_mAP'],
help='the metric that you want to plot')
parser_plt.add_argument(
'--start-epoch',
type=str,
default='1',
help='the epoch that you want to start')
parser_plt.add_argument(
'--eval-interval',
type=str,
default='1',
help='the eval interval when training')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
epoch = 1
for i, line in enumerate(log_file):
log = json.loads(line.strip())
val_flag = False
# skip lines only contains one key
if not len(log) > 1:
continue
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
if '/' in k:
log_dict[epoch][k.split('/')[-1]].append(v)
val_flag = True
elif val_flag:
continue
else:
log_dict[epoch][k].append(v)
if 'epoch' in log.keys():
epoch = log['epoch']
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
if __name__ == '__main__':
main()
| 7,576 | 34.740566 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/browse_dataset.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from mmdet.models.utils import mask2ndarray
from mmdet.registry import DATASETS, VISUALIZERS
from mmdet.structures.bbox import BaseBoxes
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=float,
default=2,
help='the interval of show (s)')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# register all modules in mmdet into the registries
init_default_scope(cfg.get('default_scope', 'mmdet'))
dataset = DATASETS.build(cfg.train_dataloader.dataset)
visualizer = VISUALIZERS.build(cfg.visualizer)
visualizer.dataset_meta = dataset.metainfo
progress_bar = ProgressBar(len(dataset))
for item in dataset:
img = item['inputs'].permute(1, 2, 0).numpy()
data_sample = item['data_samples'].numpy()
gt_instances = data_sample.gt_instances
img_path = osp.basename(item['data_samples'].img_path)
out_file = osp.join(
args.output_dir,
osp.basename(img_path)) if args.output_dir is not None else None
img = img[..., [2, 1, 0]] # bgr to rgb
gt_bboxes = gt_instances.get('bboxes', None)
if gt_bboxes is not None and isinstance(gt_bboxes, BaseBoxes):
gt_instances.bboxes = gt_bboxes.tensor
gt_masks = gt_instances.get('masks', None)
if gt_masks is not None:
masks = mask2ndarray(gt_masks)
gt_instances.masks = masks.astype(bool)
data_sample.gt_instances = gt_instances
visualizer.add_datasample(
osp.basename(img_path),
img,
data_sample,
draw_pred=False,
show=not args.not_show,
wait_time=args.show_interval,
out_file=out_file)
progress_bar.update()
if __name__ == '__main__':
main()
| 3,061 | 33.022222 | 78 |
py
|
ERD
|
ERD-main/tools/analysis_tools/confusion_matrix.py
|
import argparse
import os
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import MultipleLocator
from mmcv.ops import nms
from mmengine import Config, DictAction
from mmengine.fileio import load
from mmengine.registry import init_default_scope
from mmengine.utils import ProgressBar
from mmdet.evaluation import bbox_overlaps
from mmdet.registry import DATASETS
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(
description='Generate confusion matrix from detection results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test .pkl result')
parser.add_argument(
'save_dir', help='directory where confusion matrix will be saved')
parser.add_argument(
'--show', action='store_true', help='show confusion matrix')
parser.add_argument(
'--color-theme',
default='plasma',
help='theme of the matrix color map')
parser.add_argument(
'--score-thr',
type=float,
default=0.3,
help='score threshold to filter detection bboxes')
parser.add_argument(
'--tp-iou-thr',
type=float,
default=0.5,
help='IoU threshold to be considered as matched')
parser.add_argument(
'--nms-iou-thr',
type=float,
default=None,
help='nms IoU threshold, only applied when users want to change the'
'nms IoU threshold.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def calculate_confusion_matrix(dataset,
results,
score_thr=0,
nms_iou_thr=None,
tp_iou_thr=0.5):
"""Calculate the confusion matrix.
Args:
dataset (Dataset): Test or val dataset.
results (list[ndarray]): A list of detection results in each image.
score_thr (float|optional): Score threshold to filter bboxes.
Default: 0.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
tp_iou_thr (float|optional): IoU threshold to be considered as matched.
Default: 0.5.
"""
num_classes = len(dataset.metainfo['classes'])
confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1])
assert len(dataset) == len(results)
prog_bar = ProgressBar(len(results))
for idx, per_img_res in enumerate(results):
res_bboxes = per_img_res['pred_instances']
gts = dataset.get_data_info(idx)['instances']
analyze_per_img_dets(confusion_matrix, gts, res_bboxes, score_thr,
tp_iou_thr, nms_iou_thr)
prog_bar.update()
return confusion_matrix
def analyze_per_img_dets(confusion_matrix,
gts,
result,
score_thr=0,
tp_iou_thr=0.5,
nms_iou_thr=None):
"""Analyze detection results on each image.
Args:
confusion_matrix (ndarray): The confusion matrix,
has shape (num_classes + 1, num_classes + 1).
gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4).
gt_labels (ndarray): Ground truth labels, has shape (num_gt).
result (ndarray): Detection results, has shape
(num_classes, num_bboxes, 5).
score_thr (float): Score threshold to filter bboxes.
Default: 0.
tp_iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
"""
true_positives = np.zeros(len(gts))
gt_bboxes = []
gt_labels = []
for gt in gts:
gt_bboxes.append(gt['bbox'])
gt_labels.append(gt['bbox_label'])
gt_bboxes = np.array(gt_bboxes)
gt_labels = np.array(gt_labels)
unique_label = np.unique(result['labels'].numpy())
for det_label in unique_label:
mask = (result['labels'] == det_label)
det_bboxes = result['bboxes'][mask].numpy()
det_scores = result['scores'][mask].numpy()
if nms_iou_thr:
det_bboxes, _ = nms(
det_bboxes, det_scores, nms_iou_thr, score_threshold=score_thr)
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes)
for i, score in enumerate(det_scores):
det_match = 0
if score >= score_thr:
for j, gt_label in enumerate(gt_labels):
if ious[i, j] >= tp_iou_thr:
det_match += 1
if gt_label == det_label:
true_positives[j] += 1 # TP
confusion_matrix[gt_label, det_label] += 1
if det_match == 0: # BG FP
confusion_matrix[-1, det_label] += 1
for num_tp, gt_label in zip(true_positives, gt_labels):
if num_tp == 0: # FN
confusion_matrix[gt_label, -1] += 1
def plot_confusion_matrix(confusion_matrix,
labels,
save_dir=None,
show=True,
title='Normalized Confusion Matrix',
color_theme='plasma'):
"""Draw confusion matrix with matplotlib.
Args:
confusion_matrix (ndarray): The confusion matrix.
labels (list[str]): List of class names.
save_dir (str|optional): If set, save the confusion matrix plot to the
given path. Default: None.
show (bool): Whether to show the plot. Default: True.
title (str): Title of the plot. Default: `Normalized Confusion Matrix`.
color_theme (str): Theme of the matrix color map. Default: `plasma`.
"""
# normalize the confusion matrix
per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]
confusion_matrix = \
confusion_matrix.astype(np.float32) / per_label_sums * 100
num_classes = len(labels)
fig, ax = plt.subplots(
figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180)
cmap = plt.get_cmap(color_theme)
im = ax.imshow(confusion_matrix, cmap=cmap)
plt.colorbar(mappable=im, ax=ax)
title_font = {'weight': 'bold', 'size': 12}
ax.set_title(title, fontdict=title_font)
label_font = {'size': 10}
plt.ylabel('Ground Truth Label', fontdict=label_font)
plt.xlabel('Prediction Label', fontdict=label_font)
# draw locator
xmajor_locator = MultipleLocator(1)
xminor_locator = MultipleLocator(0.5)
ax.xaxis.set_major_locator(xmajor_locator)
ax.xaxis.set_minor_locator(xminor_locator)
ymajor_locator = MultipleLocator(1)
yminor_locator = MultipleLocator(0.5)
ax.yaxis.set_major_locator(ymajor_locator)
ax.yaxis.set_minor_locator(yminor_locator)
# draw grid
ax.grid(True, which='minor', linestyle='-')
# draw label
ax.set_xticks(np.arange(num_classes))
ax.set_yticks(np.arange(num_classes))
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
ax.tick_params(
axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)
plt.setp(
ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
# draw confution matrix value
for i in range(num_classes):
for j in range(num_classes):
ax.text(
j,
i,
'{}%'.format(
int(confusion_matrix[
i,
j]) if not np.isnan(confusion_matrix[i, j]) else -1),
ha='center',
va='center',
color='w',
size=7)
ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1
fig.tight_layout()
if save_dir is not None:
plt.savefig(
os.path.join(save_dir, 'confusion_matrix.png'), format='png')
if show:
plt.show()
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
init_default_scope(cfg.get('default_scope', 'mmdet'))
results = load(args.prediction_path)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
dataset = DATASETS.build(cfg.test_dataloader.dataset)
confusion_matrix = calculate_confusion_matrix(dataset, results,
args.score_thr,
args.nms_iou_thr,
args.tp_iou_thr)
plot_confusion_matrix(
confusion_matrix,
dataset.metainfo['classes'] + ('background', ),
save_dir=args.save_dir,
show=args.show,
color_theme=args.color_theme)
if __name__ == '__main__':
main()
| 9,900 | 35.135036 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/test_robustness.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
from mmengine.config import Config, DictAction
from mmengine.dist import get_dist_info
from mmengine.evaluator import DumpResults
from mmengine.fileio import dump
from mmengine.runner import Runner
from mmdet.engine.hooks.utils import trigger_visualization_hook
from mmdet.registry import RUNNERS
from tools.analysis_tools.robustness_eval import get_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--out',
type=str,
help='dump predictions to a pickle file for offline evaluation')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--wait-time', type=float, default=2, help='the interval of show (s)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
# load config
cfg = Config.fromfile(args.config)
cfg.launcher = args.launcher
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
cfg.model.backbone.init_cfg.type = None
cfg.test_dataloader.dataset.test_mode = True
cfg.load_from = args.checkpoint
if args.show or args.show_dir:
cfg = trigger_visualization_hook(cfg, args)
# build the runner from config
if 'runner_type' not in cfg:
# build the default runner
runner = Runner.from_cfg(cfg)
else:
# build customized runner from the registry
# if 'runner_type' is set in the cfg
runner = RUNNERS.build(cfg)
# add `DumpResults` dummy metric
if args.out is not None:
assert args.out.endswith(('.pkl', '.pickle')), \
'The dump file must be a pkl file.'
runner.test_evaluator.metrics.append(
DumpResults(out_file_path=args.out))
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_loader_cfg = copy.deepcopy(cfg.test_dataloader)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_loader_cfg.dataset.pipeline.insert(1, corruption_trans)
test_loader = runner.build_dataloader(test_loader_cfg)
runner.test_loop.dataloader = test_loader
# set random seeds
if args.seed is not None:
runner.set_randomness(args.seed)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
eval_results = runner.test()
if args.out:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
aggregated_results[corruption][
corruption_severity] = eval_results
dump(aggregated_results, eval_results_filename)
rank, _ = get_dist_info()
if rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' + osp.splitext(args.out)[1])
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 9,120 | 37.004167 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/coco_error_analysis.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
from argparse import ArgumentParser
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def makeplot(rs, ps, outDir, class_name, iou_type):
cs = np.vstack([
np.ones((2, 3)),
np.array([0.31, 0.51, 0.74]),
np.array([0.75, 0.31, 0.30]),
np.array([0.36, 0.90, 0.38]),
np.array([0.50, 0.39, 0.64]),
np.array([1, 0.6, 0]),
])
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
for i in range(len(areaNames)):
area_ps = ps[..., i, 0]
figure_title = iou_type + '-' + class_name + '-' + areaNames[i]
aps = [ps_.mean() for ps_ in area_ps]
ps_curve = [
ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
]
ps_curve.insert(0, np.zeros(ps_curve[0].shape))
fig = plt.figure()
ax = plt.subplot(111)
for k in range(len(types)):
ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
ax.fill_between(
rs,
ps_curve[k],
ps_curve[k + 1],
color=cs[k],
label=str(f'[{aps[k]:.3f}]' + types[k]),
)
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.title(figure_title)
plt.legend()
# plt.show()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def autolabel(ax, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
if height > 0 and height <= 1: # for percent values
text_label = '{:2.0f}'.format(height * 100)
else:
text_label = '{:2.0f}'.format(height)
ax.annotate(
text_label,
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords='offset points',
ha='center',
va='bottom',
fontsize='x-small',
)
def makebarplot(rs, ps, outDir, class_name, iou_type):
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
fig, ax = plt.subplots()
x = np.arange(len(areaNames)) # the areaNames locations
width = 0.60 # the width of the bars
rects_list = []
figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot'
for i in range(len(types) - 1):
type_ps = ps[i, ..., 0]
aps = [ps_.mean() for ps_ in type_ps.T]
rects_list.append(
ax.bar(
x - width / 2 + (i + 1) * width / len(types),
aps,
width / len(types),
label=types[i],
))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Mean Average Precision (mAP)')
ax.set_title(figure_title)
ax.set_xticks(x)
ax.set_xticklabels(areaNames)
ax.legend()
# Add score texts over bars
for rects in rects_list:
autolabel(ax, rects)
# Save plot
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def get_gt_area_group_numbers(cocoEval):
areaRng = cocoEval.params.areaRng
areaRngStr = [str(aRng) for aRng in areaRng]
areaRngLbl = cocoEval.params.areaRngLbl
areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl))
areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0)
for evalImg in cocoEval.evalImgs:
if evalImg:
for gtIgnore in evalImg['gtIgnore']:
if not gtIgnore:
aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])]
areaRngLbl2Number[aRngLbl] += 1
return areaRngLbl2Number
def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True):
areaRngLbl2Number = get_gt_area_group_numbers(cocoEval)
areaRngLbl = areaRngLbl2Number.keys()
if verbose:
print('number of annotations per area group:', areaRngLbl2Number)
# Init figure
fig, ax = plt.subplots()
x = np.arange(len(areaRngLbl)) # the areaNames locations
width = 0.60 # the width of the bars
figure_title = 'number of annotations per area group'
rects = ax.bar(x, areaRngLbl2Number.values(), width)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Number of annotations')
ax.set_title(figure_title)
ax.set_xticks(x)
ax.set_xticklabels(areaRngLbl)
# Add score texts over bars
autolabel(ax, rects)
# Save plot
fig.tight_layout()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def make_gt_area_histogram_plot(cocoEval, outDir):
n_bins = 100
areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()]
# init figure
figure_title = 'gt annotation areas histogram plot'
fig, ax = plt.subplots()
# Set the number of bins
ax.hist(np.sqrt(areas), bins=n_bins)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_xlabel('Squareroot Area')
ax.set_ylabel('Number of annotations')
ax.set_title(figure_title)
# Save plot
fig.tight_layout()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def analyze_individual_category(k,
cocoDt,
cocoGt,
catId,
iou_type,
areas=None):
nm = cocoGt.loadCats(catId)[0]
print(f'--------------analyzing {k + 1}-{nm["name"]}---------------')
ps_ = {}
dt = copy.deepcopy(cocoDt)
nm = cocoGt.loadCats(catId)[0]
imgIds = cocoGt.getImgIds()
dt_anns = dt.dataset['annotations']
select_dt_anns = []
for ann in dt_anns:
if ann['category_id'] == catId:
select_dt_anns.append(ann)
dt.dataset['annotations'] = select_dt_anns
dt.createIndex()
# compute precision but ignore superclass confusion
gt = copy.deepcopy(cocoGt)
child_catIds = gt.getCatIds(supNms=[nm['supercategory']])
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] in child_catIds and ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_supercategory'] = ps_supercategory
# compute precision but ignore any class confusion
gt = copy.deepcopy(cocoGt)
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_allcategory'] = ps_allcategory
return k, ps_
def analyze_results(res_file,
ann_file,
res_types,
out_dir,
extraplots=None,
areas=None):
for res_type in res_types:
assert res_type in ['bbox', 'segm']
if areas:
assert len(areas) == 3, '3 integers should be specified as areas, \
representing 3 area regions'
directory = os.path.dirname(out_dir + '/')
if not os.path.exists(directory):
print(f'-------------create {out_dir}-----------------')
os.makedirs(directory)
cocoGt = COCO(ann_file)
cocoDt = cocoGt.loadRes(res_file)
imgIds = cocoGt.getImgIds()
for res_type in res_types:
res_out_dir = out_dir + '/' + res_type + '/'
res_directory = os.path.dirname(res_out_dir)
if not os.path.exists(res_directory):
print(f'-------------create {res_out_dir}-----------------')
os.makedirs(res_directory)
iou_type = res_type
cocoEval = COCOeval(
copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.iouThrs = [0.75, 0.5, 0.1]
cocoEval.params.maxDets = [100]
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]],
[areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps = cocoEval.eval['precision']
ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))])
catIds = cocoGt.getCatIds()
recThrs = cocoEval.params.recThrs
with Pool(processes=48) as pool:
args = [(k, cocoDt, cocoGt, catId, iou_type, areas)
for k, catId in enumerate(catIds)]
analyze_results = pool.starmap(analyze_individual_category, args)
for k, catId in enumerate(catIds):
nm = cocoGt.loadCats(catId)[0]
print(f'--------------saving {k + 1}-{nm["name"]}---------------')
analyze_result = analyze_results[k]
assert k == analyze_result[0]
ps_supercategory = analyze_result[1]['ps_supercategory']
ps_allcategory = analyze_result[1]['ps_allcategory']
# compute precision but ignore superclass confusion
ps[3, :, k, :, :] = ps_supercategory
# compute precision but ignore any class confusion
ps[4, :, k, :, :] = ps_allcategory
# fill in background and false negative errors and plot
ps[ps == -1] = 0
ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0
ps[6, :, k, :, :] = 1.0
makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type)
if extraplots:
makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'],
iou_type)
makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
if extraplots:
makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
make_gt_area_group_numbers_plot(
cocoEval=cocoEval, outDir=res_out_dir, verbose=True)
make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir)
def main():
parser = ArgumentParser(description='COCO Error Analysis Tool')
parser.add_argument('result', help='result file (json format) path')
parser.add_argument('out_dir', help='dir to save analyze result images')
parser.add_argument(
'--ann',
default='data/coco/annotations/instances_val2017.json',
help='annotation file path')
parser.add_argument(
'--types', type=str, nargs='+', default=['bbox'], help='result types')
parser.add_argument(
'--extraplots',
action='store_true',
help='export extra bar/stat plots')
parser.add_argument(
'--areas',
type=int,
nargs='+',
default=[1024, 9216, 10000000000],
help='area regions')
args = parser.parse_args()
analyze_results(
args.result,
args.ann,
args.types,
out_dir=args.out_dir,
extraplots=args.extraplots,
areas=args.areas)
if __name__ == '__main__':
main()
| 12,389 | 35.441176 | 79 |
py
|
ERD
|
ERD-main/tools/analysis_tools/robustness_eval.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from argparse import ArgumentParser
import numpy as np
from mmengine.fileio import load
def print_coco_results(results):
def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '0.50:0.95' \
if iouThr is None else f'{iouThr:0.2f}'
iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | '
iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}'
print(iStr)
stats = np.zeros((12, ))
stats[0] = _print(results[0], 1)
stats[1] = _print(results[1], 1, iouThr=.5)
stats[2] = _print(results[2], 1, iouThr=.75)
stats[3] = _print(results[3], 1, areaRng='small')
stats[4] = _print(results[4], 1, areaRng='medium')
stats[5] = _print(results[5], 1, areaRng='large')
# TODO support recall metric
'''
stats[6] = _print(results[6], 0, maxDets=1)
stats[7] = _print(results[7], 0, maxDets=10)
stats[8] = _print(results[8], 0)
stats[9] = _print(results[9], 0, areaRng='small')
stats[10] = _print(results[10], 0, areaRng='medium')
stats[11] = _print(results[11], 0, areaRng='large')
'''
def get_coco_style_results(filename,
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
if metric is None:
metrics = [
'mAP',
'mAP_50',
'mAP_75',
'mAP_s',
'mAP_m',
'mAP_l',
]
elif isinstance(metric, list):
metrics = metric
else:
metrics = [metric]
for metric_name in metrics:
assert metric_name in [
'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
]
eval_output = load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32')
for corr_i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
for metric_j, metric_name in enumerate(metrics):
metric_dict = eval_output[distortion][severity]
new_metric_dict = {}
for k, v in metric_dict.items():
if '/' in k:
new_metric_dict[k.split('/')[-1]] = v
mAP = new_metric_dict['_'.join((task, metric_name))]
results[corr_i, severity, metric_j] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if metric is None:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
print_coco_results(P)
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
print_coco_results(mPC)
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
print_coco_results(rPC)
else:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {P[metric_i]:0.3f}')
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {mPC[metric_i]:0.3f}')
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %')
return results
def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
eval_output = load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, 20), dtype='float32')
for i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
mAP = [
eval_output[distortion][severity][j]['ap']
for j in range(len(eval_output[distortion][severity]))
]
results[i, severity, :] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if 'P' in prints:
print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}')
if 'mPC' in prints:
print('Mean Performance under Corruption [mPC] in AP50 = '
f'{np.mean(mPC):0.3f}')
if 'rPC' in prints:
print('Relative Performance under Corruption [rPC] in % = '
f'{np.mean(rPC) * 100:0.1f}')
return np.mean(results, axis=2, keepdims=True)
def get_results(filename,
dataset='coco',
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert dataset in ['coco', 'voc', 'cityscapes']
if dataset in ['coco', 'cityscapes']:
results = get_coco_style_results(
filename,
task=task,
metric=metric,
prints=prints,
aggregate=aggregate)
elif dataset == 'voc':
if task != 'bbox':
print('Only bbox analysis is supported for Pascal VOC')
print('Will report bbox results\n')
if metric not in [None, ['AP'], ['AP50']]:
print('Only the AP50 metric is supported for Pascal VOC')
print('Will report AP50 metric\n')
results = get_voc_style_results(
filename, prints=prints, aggregate=aggregate)
return results
def get_distortions_from_file(filename):
eval_output = load(filename)
return get_distortions_from_results(eval_output)
def get_distortions_from_results(eval_output):
distortions = []
for i, distortion in enumerate(eval_output):
distortions.append(distortion.replace('_', ' '))
return distortions
def main():
parser = ArgumentParser(description='Corruption Result Analysis')
parser.add_argument('filename', help='result file path')
parser.add_argument(
'--dataset',
type=str,
choices=['coco', 'voc', 'cityscapes'],
default='coco',
help='dataset type')
parser.add_argument(
'--task',
type=str,
nargs='+',
choices=['bbox', 'segm'],
default=['bbox'],
help='task to report')
parser.add_argument(
'--metric',
nargs='+',
choices=[
None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
],
default=None,
help='metric to report')
parser.add_argument(
'--prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print')
parser.add_argument(
'--aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those \
for benchmark corruptions')
args = parser.parse_args()
for task in args.task:
get_results(
args.filename,
dataset=args.dataset,
task=task,
metric=args.metric,
prints=args.prints,
aggregate=args.aggregate)
if __name__ == '__main__':
main()
| 8,376 | 30.731061 | 79 |
py
|
ERD
|
ERD-main/projects/Detic/demo.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import os
import urllib
from argparse import ArgumentParser
import mmcv
import torch
from mmengine.logging import print_log
from mmengine.utils import ProgressBar, scandir
from mmdet.apis import inference_detector, init_detector
from mmdet.registry import VISUALIZERS
from mmdet.utils import register_all_modules
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif',
'.tiff', '.webp')
def get_file_list(source_root: str) -> [list, dict]:
"""Get file list.
Args:
source_root (str): image or video source path
Return:
source_file_path_list (list): A list for all source file.
source_type (dict): Source type: file or url or dir.
"""
is_dir = os.path.isdir(source_root)
is_url = source_root.startswith(('http:/', 'https:/'))
is_file = os.path.splitext(source_root)[-1].lower() in IMG_EXTENSIONS
source_file_path_list = []
if is_dir:
# when input source is dir
for file in scandir(source_root, IMG_EXTENSIONS, recursive=True):
source_file_path_list.append(os.path.join(source_root, file))
elif is_url:
# when input source is url
filename = os.path.basename(
urllib.parse.unquote(source_root).split('?')[0])
file_save_path = os.path.join(os.getcwd(), filename)
print(f'Downloading source file to {file_save_path}')
torch.hub.download_url_to_file(source_root, file_save_path)
source_file_path_list = [file_save_path]
elif is_file:
# when input source is single image
source_file_path_list = [source_root]
else:
print('Cannot find image file.')
source_type = dict(is_dir=is_dir, is_url=is_url, is_file=is_file)
return source_file_path_list, source_type
def parse_args():
parser = ArgumentParser()
parser.add_argument(
'img', help='Image path, include image file, dir and URL.')
parser.add_argument('config', help='Config file')
parser.add_argument('checkpoint', help='Checkpoint file')
parser.add_argument(
'--out-dir', default='./output', help='Path to output file')
parser.add_argument(
'--device', default='cuda:0', help='Device used for inference')
parser.add_argument(
'--show', action='store_true', help='Show the detection results')
parser.add_argument(
'--score-thr', type=float, default=0.3, help='Bbox score threshold')
parser.add_argument(
'--dataset', type=str, help='dataset name to load the text embedding')
parser.add_argument(
'--class-name', nargs='+', type=str, help='custom class names')
args = parser.parse_args()
return args
def main():
args = parse_args()
# register all modules in mmdet into the registries
register_all_modules()
# build the model from a config file and a checkpoint file
model = init_detector(args.config, args.checkpoint, device=args.device)
if not os.path.exists(args.out_dir) and not args.show:
os.mkdir(args.out_dir)
# init visualizer
visualizer = VISUALIZERS.build(model.cfg.visualizer)
visualizer.dataset_meta = model.dataset_meta
# get file list
files, source_type = get_file_list(args.img)
from detic.utils import (get_class_names, get_text_embeddings,
reset_cls_layer_weight)
# class name embeddings
if args.class_name:
dataset_classes = args.class_name
elif args.dataset:
dataset_classes = get_class_names(args.dataset)
embedding = get_text_embeddings(
dataset=args.dataset, custom_vocabulary=args.class_name)
visualizer.dataset_meta['classes'] = dataset_classes
reset_cls_layer_weight(model, embedding)
# start detector inference
progress_bar = ProgressBar(len(files))
for file in files:
result = inference_detector(model, file)
img = mmcv.imread(file)
img = mmcv.imconvert(img, 'bgr', 'rgb')
if source_type['is_dir']:
filename = os.path.relpath(file, args.img).replace('/', '_')
else:
filename = os.path.basename(file)
out_file = None if args.show else os.path.join(args.out_dir, filename)
progress_bar.update()
visualizer.add_datasample(
filename,
img,
data_sample=result,
draw_gt=False,
show=args.show,
wait_time=0,
out_file=out_file,
pred_score_thr=args.score_thr)
if not args.show:
print_log(
f'\nResults have been saved at {os.path.abspath(args.out_dir)}')
if __name__ == '__main__':
main()
| 4,710 | 31.944056 | 78 |
py
|
ERD
|
ERD-main/projects/Detic/detic/detic_bbox_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Union
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.layers import multiclass_nms
from mmdet.models.roi_heads.bbox_heads import Shared2FCBBoxHead
from mmdet.models.utils import empty_instances
from mmdet.registry import MODELS
from mmdet.structures.bbox import get_box_tensor, scale_boxes
@MODELS.register_module(force=True) # avoid bug
class DeticBBoxHead(Shared2FCBBoxHead):
def __init__(self,
*args,
init_cfg: Optional[Union[dict, ConfigDict]] = None,
**kwargs) -> None:
super().__init__(*args, init_cfg=init_cfg, **kwargs)
# reconstruct fc_cls and fc_reg since input channels are changed
assert self.with_cls
cls_channels = self.num_classes
cls_predictor_cfg_ = self.cls_predictor_cfg.copy()
cls_predictor_cfg_.update(
in_features=self.cls_last_dim, out_features=cls_channels)
self.fc_cls = MODELS.build(cls_predictor_cfg_)
def _predict_by_feat_single(
self,
roi: Tensor,
cls_score: Tensor,
bbox_pred: Tensor,
img_meta: dict,
rescale: bool = False,
rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_meta (dict): image information.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.
Defaults to None
Returns:
:obj:`InstanceData`: Detection results of each image\
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
results = InstanceData()
if roi.shape[0] == 0:
return empty_instances([img_meta],
roi.device,
task_type='bbox',
instance_results=[results],
box_type=self.predict_box_type,
use_box_type=False,
num_classes=self.num_classes,
score_per_cls=rcnn_test_cfg is None)[0]
scores = cls_score
img_shape = img_meta['img_shape']
num_rois = roi.size(0)
num_classes = 1 if self.reg_class_agnostic else self.num_classes
roi = roi.repeat_interleave(num_classes, dim=0)
bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)
bboxes = self.bbox_coder.decode(
roi[..., 1:], bbox_pred, max_shape=img_shape)
if rescale and bboxes.size(0) > 0:
assert img_meta.get('scale_factor') is not None
scale_factor = [1 / s for s in img_meta['scale_factor']]
bboxes = scale_boxes(bboxes, scale_factor)
# Get the inside tensor when `bboxes` is a box type
bboxes = get_box_tensor(bboxes)
box_dim = bboxes.size(-1)
bboxes = bboxes.view(num_rois, -1)
if rcnn_test_cfg is None:
# This means that it is aug test.
# It needs to return the raw results without nms.
results.bboxes = bboxes
results.scores = scores
else:
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img,
box_dim=box_dim)
results.bboxes = det_bboxes[:, :-1]
results.scores = det_bboxes[:, -1]
results.labels = det_labels
return results
| 4,599 | 39.707965 | 76 |
py
|
ERD
|
ERD-main/projects/Detic/detic/utils.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn.functional as F
from mmengine.logging import print_log
from .text_encoder import CLIPTextEncoder
# download from
# https://github.com/facebookresearch/Detic/tree/main/datasets/metadata
DATASET_EMBEDDINGS = {
'lvis': 'datasets/metadata/lvis_v1_clip_a+cname.npy',
'objects365': 'datasets/metadata/o365_clip_a+cnamefix.npy',
'openimages': 'datasets/metadata/oid_clip_a+cname.npy',
'coco': 'datasets/metadata/coco_clip_a+cname.npy',
}
def get_text_embeddings(dataset=None,
custom_vocabulary=None,
prompt_prefix='a '):
assert (dataset is None) ^ (custom_vocabulary is None), \
'Either `dataset` or `custom_vocabulary` should be specified.'
if dataset:
if dataset in DATASET_EMBEDDINGS:
return DATASET_EMBEDDINGS[dataset]
else:
custom_vocabulary = get_class_names(dataset)
text_encoder = CLIPTextEncoder()
text_encoder.eval()
texts = [prompt_prefix + x for x in custom_vocabulary]
print_log(
f'Computing text embeddings for {len(custom_vocabulary)} classes.')
embeddings = text_encoder(texts).detach().permute(1, 0).contiguous().cpu()
return embeddings
def get_class_names(dataset):
if dataset == 'coco':
from mmdet.datasets import CocoDataset
class_names = CocoDataset.METAINFO['classes']
elif dataset == 'cityscapes':
from mmdet.datasets import CityscapesDataset
class_names = CityscapesDataset.METAINFO['classes']
elif dataset == 'voc':
from mmdet.datasets import VOCDataset
class_names = VOCDataset.METAINFO['classes']
elif dataset == 'openimages':
from mmdet.datasets import OpenImagesDataset
class_names = OpenImagesDataset.METAINFO['classes']
elif dataset == 'lvis':
from mmdet.datasets import LVISV1Dataset
class_names = LVISV1Dataset.METAINFO['classes']
else:
raise TypeError(f'Invalid type for dataset name: {type(dataset)}')
return class_names
def reset_cls_layer_weight(model, weight):
if type(weight) == str:
print_log(f'Resetting cls_layer_weight from file: {weight}')
zs_weight = torch.tensor(
np.load(weight),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
else:
zs_weight = weight
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight.shape[0], 1))], dim=1) # D x (C + 1)
zs_weight = F.normalize(zs_weight, p=2, dim=0)
zs_weight = zs_weight.to('cuda')
num_classes = zs_weight.shape[-1]
for bbox_head in model.roi_head.bbox_head:
bbox_head.num_classes = num_classes
del bbox_head.fc_cls.zs_weight
bbox_head.fc_cls.zs_weight = zs_weight
| 2,864 | 35.265823 | 78 |
py
|
ERD
|
ERD-main/projects/Detic/detic/detic_roi_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Sequence, Tuple
import torch
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.roi_heads import CascadeRoIHead
from mmdet.models.task_modules.samplers import SamplingResult
from mmdet.models.test_time_augs import merge_aug_masks
from mmdet.models.utils.misc import empty_instances
from mmdet.registry import MODELS
from mmdet.structures import SampleList
from mmdet.structures.bbox import bbox2roi, get_box_tensor
from mmdet.utils import ConfigType, InstanceList, MultiConfig
@MODELS.register_module(force=True) # avoid bug
class DeticRoIHead(CascadeRoIHead):
def init_mask_head(self, mask_roi_extractor: MultiConfig,
mask_head: MultiConfig) -> None:
"""Initialize mask head and mask roi extractor.
Args:
mask_head (dict): Config of mask in mask head.
mask_roi_extractor (:obj:`ConfigDict`, dict or list):
Config of mask roi extractor.
"""
self.mask_head = MODELS.build(mask_head)
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = MODELS.build(mask_roi_extractor)
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
def _refine_roi(self, x: Tuple[Tensor], rois: Tensor,
batch_img_metas: List[dict],
num_proposals_per_img: Sequence[int], **kwargs) -> tuple:
"""Multi-stage refinement of RoI.
Args:
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): shape (n, 5), [batch_ind, x1, y1, x2, y2]
batch_img_metas (list[dict]): List of image information.
num_proposals_per_img (sequence[int]): number of proposals
in each image.
Returns:
tuple:
- rois (Tensor): Refined RoI.
- cls_scores (list[Tensor]): Average predicted
cls score per image.
- bbox_preds (list[Tensor]): Bbox branch predictions
for the last stage of per image.
"""
# "ms" in variable names means multi-stage
ms_scores = []
for stage in range(self.num_stages):
bbox_results = self._bbox_forward(
stage=stage, x=x, rois=rois, **kwargs)
# split batch bbox prediction back to each image
cls_scores = bbox_results['cls_score'].sigmoid()
bbox_preds = bbox_results['bbox_pred']
rois = rois.split(num_proposals_per_img, 0)
cls_scores = cls_scores.split(num_proposals_per_img, 0)
ms_scores.append(cls_scores)
bbox_preds = bbox_preds.split(num_proposals_per_img, 0)
if stage < self.num_stages - 1:
bbox_head = self.bbox_head[stage]
refine_rois_list = []
for i in range(len(batch_img_metas)):
if rois[i].shape[0] > 0:
bbox_label = cls_scores[i][:, :-1].argmax(dim=1)
# Refactor `bbox_head.regress_by_class` to only accept
# box tensor without img_idx concatenated.
refined_bboxes = bbox_head.regress_by_class(
rois[i][:, 1:], bbox_label, bbox_preds[i],
batch_img_metas[i])
refined_bboxes = get_box_tensor(refined_bboxes)
refined_rois = torch.cat(
[rois[i][:, [0]], refined_bboxes], dim=1)
refine_rois_list.append(refined_rois)
rois = torch.cat(refine_rois_list)
# ms_scores aligned
# average scores of each image by stages
cls_scores = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(len(batch_img_metas))
] # aligned
return rois, cls_scores, bbox_preds
def _bbox_forward(self, stage: int, x: Tuple[Tensor],
rois: Tensor) -> dict:
"""Box head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): List of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict[str, Tensor]: Usually returns a dictionary with keys:
- `cls_score` (Tensor): Classification scores.
- `bbox_pred` (Tensor): Box energies / deltas.
- `bbox_feats` (Tensor): Extract bbox RoI features.
"""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def predict_bbox(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
rpn_results_list: InstanceList,
rcnn_test_cfg: ConfigType,
rescale: bool = False,
**kwargs) -> InstanceList:
"""Perform forward propagation of the bbox head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
proposals = [res.bboxes for res in rpn_results_list]
proposal_scores = [res.scores for res in rpn_results_list]
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = bbox2roi(proposals)
if rois.shape[0] == 0:
return empty_instances(
batch_img_metas,
rois.device,
task_type='bbox',
box_type=self.bbox_head[-1].predict_box_type,
num_classes=self.bbox_head[-1].num_classes,
score_per_cls=rcnn_test_cfg is None)
# rois aligned
rois, cls_scores, bbox_preds = self._refine_roi(
x=x,
rois=rois,
batch_img_metas=batch_img_metas,
num_proposals_per_img=num_proposals_per_img,
**kwargs)
# score reweighting in centernet2
cls_scores = [(s * ps[:, None])**0.5
for s, ps in zip(cls_scores, proposal_scores)]
cls_scores = [
s * (s == s[:, :-1].max(dim=1)[0][:, None]).float()
for s in cls_scores
]
# fast_rcnn_inference
results_list = self.bbox_head[-1].predict_by_feat(
rois=rois,
cls_scores=cls_scores,
bbox_preds=bbox_preds,
batch_img_metas=batch_img_metas,
rescale=rescale,
rcnn_test_cfg=rcnn_test_cfg)
return results_list
def _mask_forward(self, x: Tuple[Tensor], rois: Tensor) -> dict:
"""Mask head forward function used in both training and testing.
Args:
stage (int): The current stage in Cascade RoI Head.
x (tuple[Tensor]): Tuple of multi-level img features.
rois (Tensor): RoIs with the shape (n, 5) where the first
column indicates batch id of each RoI.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
"""
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
# do not support caffe_c4 model anymore
mask_preds = self.mask_head(mask_feats)
mask_results = dict(mask_preds=mask_preds)
return mask_results
def mask_loss(self, x, sampling_results: List[SamplingResult],
batch_gt_instances: InstanceList) -> dict:
"""Run forward function and calculate loss for mask head in training.
Args:
x (tuple[Tensor]): Tuple of multi-level img features.
sampling_results (list["obj:`SamplingResult`]): Sampling results.
batch_gt_instances (list[:obj:`InstanceData`]): Batch of
gt_instance. It usually includes ``bboxes``, ``labels``, and
``masks`` attributes.
Returns:
dict: Usually returns a dictionary with keys:
- `mask_preds` (Tensor): Mask prediction.
- `loss_mask` (dict): A dictionary of mask loss components.
"""
pos_rois = bbox2roi([res.pos_priors for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
mask_loss_and_target = self.mask_head.loss_and_target(
mask_preds=mask_results['mask_preds'],
sampling_results=sampling_results,
batch_gt_instances=batch_gt_instances,
rcnn_train_cfg=self.train_cfg[-1])
mask_results.update(mask_loss_and_target)
return mask_results
def loss(self, x: Tuple[Tensor], rpn_results_list: InstanceList,
batch_data_samples: SampleList) -> dict:
"""Perform forward propagation and loss calculation of the detection
roi on the features of the upstream network.
Args:
x (tuple[Tensor]): List of multi-level img features.
rpn_results_list (list[:obj:`InstanceData`]): List of region
proposals.
batch_data_samples (list[:obj:`DetDataSample`]): The batch
data samples. It usually includes information such
as `gt_instance` or `gt_panoptic_seg` or `gt_sem_seg`.
Returns:
dict[str, Tensor]: A dictionary of loss components
"""
raise NotImplementedError
def predict_mask(self,
x: Tuple[Tensor],
batch_img_metas: List[dict],
results_list: List[InstanceData],
rescale: bool = False) -> List[InstanceData]:
"""Perform forward propagation of the mask head and predict detection
results on the features of the upstream network.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
batch_img_metas (list[dict]): List of image information.
results_list (list[:obj:`InstanceData`]): Detection results of
each image.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
Returns:
list[:obj:`InstanceData`]: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
- masks (Tensor): Has a shape (num_instances, H, W).
"""
bboxes = [res.bboxes for res in results_list]
mask_rois = bbox2roi(bboxes)
if mask_rois.shape[0] == 0:
results_list = empty_instances(
batch_img_metas,
mask_rois.device,
task_type='mask',
instance_results=results_list,
mask_thr_binary=self.test_cfg.mask_thr_binary)
return results_list
num_mask_rois_per_img = [len(res) for res in results_list]
aug_masks = []
mask_results = self._mask_forward(x, mask_rois)
mask_preds = mask_results['mask_preds']
# split batch mask prediction back to each image
mask_preds = mask_preds.split(num_mask_rois_per_img, 0)
aug_masks.append([m.sigmoid().detach() for m in mask_preds])
merged_masks = []
for i in range(len(batch_img_metas)):
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(aug_mask, batch_img_metas[i])
merged_masks.append(merged_mask)
results_list = self.mask_head.predict_by_feat(
mask_preds=merged_masks,
results_list=results_list,
batch_img_metas=batch_img_metas,
rcnn_test_cfg=self.test_cfg,
rescale=rescale,
activate_map=True)
return results_list
| 13,673 | 40.816514 | 78 |
py
|
ERD
|
ERD-main/projects/Detic/detic/zero_shot_classifier.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from mmdet.registry import MODELS
@MODELS.register_module(force=True) # avoid bug
class ZeroShotClassifier(nn.Module):
def __init__(
self,
in_features: int,
out_features: int, # num_classes
zs_weight_path: str,
zs_weight_dim: int = 512,
use_bias: float = 0.0,
norm_weight: bool = True,
norm_temperature: float = 50.0,
):
super().__init__()
num_classes = out_features
self.norm_weight = norm_weight
self.norm_temperature = norm_temperature
self.use_bias = use_bias < 0
if self.use_bias:
self.cls_bias = nn.Parameter(torch.ones(1) * use_bias)
self.linear = nn.Linear(in_features, zs_weight_dim)
if zs_weight_path == 'rand':
zs_weight = torch.randn((zs_weight_dim, num_classes))
nn.init.normal_(zs_weight, std=0.01)
else:
zs_weight = torch.tensor(
np.load(zs_weight_path),
dtype=torch.float32).permute(1, 0).contiguous() # D x C
zs_weight = torch.cat(
[zs_weight, zs_weight.new_zeros(
(zs_weight_dim, 1))], dim=1) # D x (C + 1)
if self.norm_weight:
zs_weight = F.normalize(zs_weight, p=2, dim=0)
if zs_weight_path == 'rand':
self.zs_weight = nn.Parameter(zs_weight)
else:
self.register_buffer('zs_weight', zs_weight)
assert self.zs_weight.shape[1] == num_classes + 1, self.zs_weight.shape
def forward(self, x, classifier=None):
'''
Inputs:
x: B x D'
classifier_info: (C', C' x D)
'''
x = self.linear(x)
if classifier is not None:
zs_weight = classifier.permute(1, 0).contiguous() # D x C'
zs_weight = F.normalize(zs_weight, p=2, dim=0) \
if self.norm_weight else zs_weight
else:
zs_weight = self.zs_weight
if self.norm_weight:
x = self.norm_temperature * F.normalize(x, p=2, dim=1)
x = torch.mm(x, zs_weight)
if self.use_bias:
x = x + self.cls_bias
return x
| 2,324 | 30.418919 | 79 |
py
|
ERD
|
ERD-main/projects/Detic/detic/centernet_rpn_head.py
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
from typing import List, Sequence, Tuple
import torch
import torch.nn as nn
from mmcv.cnn import Scale
from mmengine import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.models.dense_heads import CenterNetUpdateHead
from mmdet.models.utils import multi_apply
from mmdet.registry import MODELS
INF = 1000000000
RangeType = Sequence[Tuple[int, int]]
@MODELS.register_module(force=True) # avoid bug
class CenterNetRPNHead(CenterNetUpdateHead):
"""CenterNetUpdateHead is an improved version of CenterNet in CenterNet2.
Paper link `<https://arxiv.org/abs/2103.07461>`_.
"""
def _init_layers(self) -> None:
"""Initialize layers of the head."""
self._init_reg_convs()
self._init_predictor()
def _init_predictor(self) -> None:
"""Initialize predictor layers of the head."""
self.conv_cls = nn.Conv2d(
self.feat_channels, self.num_classes, 3, padding=1)
self.conv_reg = nn.Conv2d(self.feat_channels, 4, 3, padding=1)
def forward(self, x: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]:
"""Forward features from the upstream network.
Args:
x (tuple[Tensor]): Features from the upstream network, each is
a 4D-tensor.
Returns:
tuple: A tuple of each level outputs.
- cls_scores (list[Tensor]): Box scores for each scale level, \
each is a 4D-tensor, the channel number is num_classes.
- bbox_preds (list[Tensor]): Box energies / deltas for each \
scale level, each is a 4D-tensor, the channel number is 4.
"""
res = multi_apply(self.forward_single, x, self.scales, self.strides)
return res
def forward_single(self, x: Tensor, scale: Scale,
stride: int) -> Tuple[Tensor, Tensor]:
"""Forward features of a single scale level.
Args:
x (Tensor): FPN feature maps of the specified stride.
scale (:obj:`mmcv.cnn.Scale`): Learnable scale module to resize
the bbox prediction.
stride (int): The corresponding stride for feature maps.
Returns:
tuple: scores for each class, bbox predictions of
input feature maps.
"""
for m in self.reg_convs:
x = m(x)
cls_score = self.conv_cls(x)
bbox_pred = self.conv_reg(x)
# scale the bbox_pred of different level
# float to avoid overflow when enabling FP16
bbox_pred = scale(bbox_pred).float()
# bbox_pred needed for gradient computation has been modified
# by F.relu(bbox_pred) when run with PyTorch 1.10. So replace
# F.relu(bbox_pred) with bbox_pred.clamp(min=0)
bbox_pred = bbox_pred.clamp(min=0)
if not self.training:
bbox_pred *= stride
return cls_score, bbox_pred # score aligned, box larger
def _predict_by_feat_single(self,
cls_score_list: List[Tensor],
bbox_pred_list: List[Tensor],
score_factor_list: List[Tensor],
mlvl_priors: List[Tensor],
img_meta: dict,
cfg: ConfigDict,
rescale: bool = False,
with_nms: bool = True) -> InstanceData:
"""Transform a single image's features extracted from the head into
bbox results.
Args:
cls_score_list (list[Tensor]): Box scores from all scale
levels of a single image, each item has shape
(num_priors * num_classes, H, W).
bbox_pred_list (list[Tensor]): Box energies / deltas from
all scale levels of a single image, each item has shape
(num_priors * 4, H, W).
score_factor_list (list[Tensor]): Score factor from all scale
levels of a single image, each item has shape
(num_priors * 1, H, W).
mlvl_priors (list[Tensor]): Each element in the list is
the priors of a single level in feature pyramid. In all
anchor-based methods, it has shape (num_priors, 4). In
all anchor-free methods, it has shape (num_priors, 2)
when `with_stride=True`, otherwise it still has shape
(num_priors, 4).
img_meta (dict): Image meta info.
cfg (mmengine.Config): Test / postprocessing configuration,
if None, test_cfg would be used.
rescale (bool): If True, return boxes in original image space.
Defaults to False.
with_nms (bool): If True, do nms before return boxes.
Defaults to True.
Returns:
:obj:`InstanceData`: Detection results of each image
after the post process.
Each item usually contains following keys.
- scores (Tensor): Classification scores, has a shape
(num_instance, )
- labels (Tensor): Labels of bboxes, has a shape
(num_instances, ).
- bboxes (Tensor): Has a shape (num_instances, 4),
the last dimension 4 arrange as (x1, y1, x2, y2).
"""
cfg = self.test_cfg if cfg is None else cfg
cfg = copy.deepcopy(cfg)
nms_pre = cfg.get('nms_pre', -1)
mlvl_bbox_preds = []
mlvl_valid_priors = []
mlvl_scores = []
mlvl_labels = []
for level_idx, (cls_score, bbox_pred, score_factor, priors) in \
enumerate(zip(cls_score_list, bbox_pred_list,
score_factor_list, mlvl_priors)):
assert cls_score.size()[-2:] == bbox_pred.size()[-2:]
dim = self.bbox_coder.encode_size
bbox_pred = bbox_pred.permute(1, 2, 0).reshape(-1, dim)
cls_score = cls_score.permute(1, 2,
0).reshape(-1, self.cls_out_channels)
heatmap = cls_score.sigmoid()
score_thr = cfg.get('score_thr', 0)
candidate_inds = heatmap > score_thr # 0.05
pre_nms_top_n = candidate_inds.sum() # N
pre_nms_top_n = pre_nms_top_n.clamp(max=nms_pre) # N
heatmap = heatmap[candidate_inds] # n
candidate_nonzeros = candidate_inds.nonzero() # n
box_loc = candidate_nonzeros[:, 0] # n
labels = candidate_nonzeros[:, 1] # n
bbox_pred = bbox_pred[box_loc] # n x 4
per_grids = priors[box_loc] # n x 2
if candidate_inds.sum().item() > pre_nms_top_n.item():
heatmap, top_k_indices = \
heatmap.topk(pre_nms_top_n, sorted=False)
labels = labels[top_k_indices]
bbox_pred = bbox_pred[top_k_indices]
per_grids = per_grids[top_k_indices]
bboxes = self.bbox_coder.decode(per_grids, bbox_pred)
# avoid invalid boxes in RoI heads
bboxes[:, 2] = torch.max(bboxes[:, 2], bboxes[:, 0] + 0.01)
bboxes[:, 3] = torch.max(bboxes[:, 3], bboxes[:, 1] + 0.01)
mlvl_bbox_preds.append(bboxes)
mlvl_valid_priors.append(priors)
mlvl_scores.append(torch.sqrt(heatmap))
mlvl_labels.append(labels)
results = InstanceData()
results.bboxes = torch.cat(mlvl_bbox_preds)
results.scores = torch.cat(mlvl_scores)
results.labels = torch.cat(mlvl_labels)
return self._bbox_post_process(
results=results,
cfg=cfg,
rescale=rescale,
with_nms=with_nms,
img_meta=img_meta)
| 7,938 | 39.299492 | 79 |
py
|
ERD
|
ERD-main/projects/Detic/detic/__init__.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from .centernet_rpn_head import CenterNetRPNHead
from .detic_bbox_head import DeticBBoxHead
from .detic_roi_head import DeticRoIHead
from .zero_shot_classifier import ZeroShotClassifier
__all__ = [
'CenterNetRPNHead', 'DeticBBoxHead', 'DeticRoIHead', 'ZeroShotClassifier'
]
| 327 | 31.8 | 77 |
py
|
ERD
|
ERD-main/projects/Detic/detic/text_encoder.py
|
# Copyright (c) OpenMMLab. All rights reserved.
from typing import List, Union
import torch
import torch.nn as nn
class CLIPTextEncoder(nn.Module):
def __init__(self, model_name='ViT-B/32'):
super().__init__()
import clip
from clip.simple_tokenizer import SimpleTokenizer
self.tokenizer = SimpleTokenizer()
pretrained_model, _ = clip.load(model_name, device='cpu')
self.clip = pretrained_model
@property
def device(self):
return self.clip.device
@property
def dtype(self):
return self.clip.dtype
def tokenize(self,
texts: Union[str, List[str]],
context_length: int = 77) -> torch.LongTensor:
if isinstance(texts, str):
texts = [texts]
sot_token = self.tokenizer.encoder['<|startoftext|>']
eot_token = self.tokenizer.encoder['<|endoftext|>']
all_tokens = [[sot_token] + self.tokenizer.encode(text) + [eot_token]
for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
st = torch.randint(len(tokens) - context_length + 1,
(1, ))[0].item()
tokens = tokens[st:st + context_length]
result[i, :len(tokens)] = torch.tensor(tokens)
return result
def forward(self, text):
text = self.tokenize(text)
text_features = self.clip.encode_text(text)
return text_features
| 1,605 | 30.490196 | 79 |
py
|
ERD
|
ERD-main/projects/Detic/configs/detic_centernet2_swin-b_fpn_4x_lvis-coco-in21k.py
|
_base_ = 'mmdet::common/lsj-200e_coco-detection.py'
custom_imports = dict(
imports=['projects.Detic.detic'], allow_failed_imports=False)
image_size = (1024, 1024)
batch_augments = [dict(type='BatchFixedSizePad', size=image_size)]
cls_layer = dict(
type='ZeroShotClassifier',
zs_weight_path='rand',
zs_weight_dim=512,
use_bias=0.0,
norm_weight=True,
norm_temperature=50.0)
reg_layer = [
dict(type='Linear', in_features=1024, out_features=1024),
dict(type='ReLU', inplace=True),
dict(type='Linear', in_features=1024, out_features=4)
]
num_classes = 22047
model = dict(
type='CascadeRCNN',
data_preprocessor=dict(
type='DetDataPreprocessor',
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
bgr_to_rgb=True,
pad_size_divisor=32,
batch_augments=batch_augments),
backbone=dict(
type='SwinTransformer',
embed_dims=128,
depths=[2, 2, 18, 2],
num_heads=[4, 8, 16, 32],
window_size=7,
mlp_ratio=4,
qkv_bias=True,
qk_scale=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.3,
patch_norm=True,
out_indices=(1, 2, 3),
with_cp=False),
neck=dict(
type='FPN',
in_channels=[256, 512, 1024],
out_channels=256,
start_level=0,
add_extra_convs='on_output',
num_outs=5,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d'),
relu_before_extra_convs=True),
rpn_head=dict(
type='CenterNetRPNHead',
num_classes=1,
in_channels=256,
stacked_convs=4,
feat_channels=256,
strides=[8, 16, 32, 64, 128],
conv_bias=True,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_cls=dict(
type='GaussianFocalLoss',
pos_weight=0.25,
neg_weight=0.75,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
),
roi_head=dict(
type='DeticRoIHead',
num_stages=3,
stage_loss_weights=[1, 0.5, 0.25],
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign',
output_size=7,
sampling_ratio=0,
use_torchvision=True),
out_channels=256,
featmap_strides=[8, 16, 32],
# approximately equal to
# canonical_box_size=224, canonical_level=4 in D2
finest_scale=112),
bbox_head=[
dict(
type='DeticBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=num_classes,
cls_predictor_cfg=cls_layer,
reg_predictor_cfg=reg_layer,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='DeticBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=num_classes,
cls_predictor_cfg=cls_layer,
reg_predictor_cfg=reg_layer,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.05, 0.05, 0.1, 0.1]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0)),
dict(
type='DeticBBoxHead',
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=num_classes,
cls_predictor_cfg=cls_layer,
reg_predictor_cfg=reg_layer,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.033, 0.033, 0.067, 0.067]),
reg_class_agnostic=True,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))
],
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=0),
out_channels=256,
featmap_strides=[8, 16, 32],
# approximately equal to
# canonical_box_size=224, canonical_level=4 in D2
finest_scale=112),
mask_head=dict(
type='FCNMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
class_agnostic=True,
num_classes=num_classes,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=2000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=[
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.6,
neg_iou_thr=0.6,
min_pos_iou=0.6,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.7,
min_pos_iou=0.7,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False),
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.8,
neg_iou_thr=0.8,
min_pos_iou=0.8,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
mask_size=28,
pos_weight=-1,
debug=False)
]),
test_cfg=dict(
rpn=dict(
score_thr=0.0001,
nms_pre=1000,
max_per_img=256,
nms=dict(type='nms', iou_threshold=0.9),
min_bbox_size=0),
rcnn=dict(
score_thr=0.02,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=300,
mask_thr_binary=0.5)))
backend = 'pillow'
test_pipeline = [
dict(
type='LoadImageFromFile',
backend_args=_base_.backend_args,
imdecode_backend=backend),
dict(type='Resize', scale=(1333, 800), keep_ratio=True, backend=backend),
dict(
type='LoadAnnotations',
with_bbox=True,
with_mask=True,
poly2mask=False),
dict(
type='PackDetInputs',
meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
'scale_factor'))
]
train_dataloader = dict(batch_size=8, num_workers=4)
val_dataloader = dict(dataset=dict(pipeline=test_pipeline))
test_dataloader = val_dataloader
# Enable automatic-mixed-precision training with AmpOptimWrapper.
optim_wrapper = dict(
type='AmpOptimWrapper',
optimizer=dict(
type='SGD', lr=0.01 * 4, momentum=0.9, weight_decay=0.00004),
paramwise_cfg=dict(norm_decay_mult=0.))
param_scheduler = [
dict(
type='LinearLR',
start_factor=0.00025,
by_epoch=False,
begin=0,
end=4000),
dict(
type='MultiStepLR',
begin=0,
end=25,
by_epoch=True,
milestones=[22, 24],
gamma=0.1)
]
# NOTE: `auto_scale_lr` is for automatically scaling LR,
# USER SHOULD NOT CHANGE ITS VALUES.
# base_batch_size = (8 GPUs) x (8 samples per GPU)
auto_scale_lr = dict(base_batch_size=64)
| 9,887 | 32.070234 | 79 |
py
|
ERD
|
ERD-main/projects/DiffusionDet/diffusiondet/loss.py
|
# Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
# Modified from https://github.com/ShoufaChen/DiffusionDet/blob/main/diffusiondet/loss.py # noqa
# This work is licensed under the CC-BY-NC 4.0 License.
# Users should be careful about adopting these features in any commercial matters. # noqa
# For more details, please refer to https://github.com/ShoufaChen/DiffusionDet/blob/main/LICENSE # noqa
from typing import List, Tuple, Union
import torch
import torch.nn as nn
from mmengine.config import ConfigDict
from mmengine.structures import InstanceData
from torch import Tensor
from mmdet.registry import MODELS, TASK_UTILS
from mmdet.structures.bbox import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh
from mmdet.utils import ConfigType
@TASK_UTILS.register_module()
class DiffusionDetCriterion(nn.Module):
def __init__(
self,
num_classes,
assigner: Union[ConfigDict, nn.Module],
deep_supervision=True,
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
alpha=0.25,
gamma=2.0,
reduction='sum',
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', reduction='sum', loss_weight=5.0),
loss_giou=dict(type='GIoULoss', reduction='sum', loss_weight=2.0),
):
super().__init__()
self.num_classes = num_classes
if isinstance(assigner, nn.Module):
self.assigner = assigner
else:
self.assigner = TASK_UTILS.build(assigner)
self.deep_supervision = deep_supervision
self.loss_cls = MODELS.build(loss_cls)
self.loss_bbox = MODELS.build(loss_bbox)
self.loss_giou = MODELS.build(loss_giou)
def forward(self, outputs, batch_gt_instances, batch_img_metas):
batch_indices = self.assigner(outputs, batch_gt_instances,
batch_img_metas)
# Compute all the requested losses
loss_cls = self.loss_classification(outputs, batch_gt_instances,
batch_indices)
loss_bbox, loss_giou = self.loss_boxes(outputs, batch_gt_instances,
batch_indices)
losses = dict(
loss_cls=loss_cls, loss_bbox=loss_bbox, loss_giou=loss_giou)
if self.deep_supervision:
assert 'aux_outputs' in outputs
for i, aux_outputs in enumerate(outputs['aux_outputs']):
batch_indices = self.assigner(aux_outputs, batch_gt_instances,
batch_img_metas)
loss_cls = self.loss_classification(aux_outputs,
batch_gt_instances,
batch_indices)
loss_bbox, loss_giou = self.loss_boxes(aux_outputs,
batch_gt_instances,
batch_indices)
tmp_losses = dict(
loss_cls=loss_cls,
loss_bbox=loss_bbox,
loss_giou=loss_giou)
for name, value in tmp_losses.items():
losses[f's.{i}.{name}'] = value
return losses
def loss_classification(self, outputs, batch_gt_instances, indices):
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
target_classes_list = [
gt.labels[J] for gt, (_, J) in zip(batch_gt_instances, indices)
]
target_classes = torch.full(
src_logits.shape[:2],
self.num_classes,
dtype=torch.int64,
device=src_logits.device)
for idx in range(len(batch_gt_instances)):
target_classes[idx, indices[idx][0]] = target_classes_list[idx]
src_logits = src_logits.flatten(0, 1)
target_classes = target_classes.flatten(0, 1)
# comp focal loss.
num_instances = max(torch.cat(target_classes_list).shape[0], 1)
loss_cls = self.loss_cls(
src_logits,
target_classes,
) / num_instances
return loss_cls
def loss_boxes(self, outputs, batch_gt_instances, indices):
assert 'pred_boxes' in outputs
pred_boxes = outputs['pred_boxes']
target_bboxes_norm_list = [
gt.norm_bboxes_cxcywh[J]
for gt, (_, J) in zip(batch_gt_instances, indices)
]
target_bboxes_list = [
gt.bboxes[J] for gt, (_, J) in zip(batch_gt_instances, indices)
]
pred_bboxes_list = []
pred_bboxes_norm_list = []
for idx in range(len(batch_gt_instances)):
pred_bboxes_list.append(pred_boxes[idx, indices[idx][0]])
image_size = batch_gt_instances[idx].image_size
pred_bboxes_norm_list.append(pred_boxes[idx, indices[idx][0]] /
image_size)
pred_boxes_cat = torch.cat(pred_bboxes_list)
pred_boxes_norm_cat = torch.cat(pred_bboxes_norm_list)
target_bboxes_cat = torch.cat(target_bboxes_list)
target_bboxes_norm_cat = torch.cat(target_bboxes_norm_list)
if len(pred_boxes_cat) > 0:
num_instances = pred_boxes_cat.shape[0]
loss_bbox = self.loss_bbox(
pred_boxes_norm_cat,
bbox_cxcywh_to_xyxy(target_bboxes_norm_cat)) / num_instances
loss_giou = self.loss_giou(pred_boxes_cat,
target_bboxes_cat) / num_instances
else:
loss_bbox = pred_boxes.sum() * 0
loss_giou = pred_boxes.sum() * 0
return loss_bbox, loss_giou
@TASK_UTILS.register_module()
class DiffusionDetMatcher(nn.Module):
"""This class computes an assignment between the targets and the
predictions of the network For efficiency reasons, the targets don't
include the no_object.
Because of this, in general, there are more predictions than targets. In
this case, we do a 1-to-k (dynamic) matching of the best predictions, while
the others are un-matched (and thus treated as non-objects).
"""
def __init__(self,
match_costs: Union[List[Union[dict, ConfigDict]], dict,
ConfigDict],
center_radius: float = 2.5,
candidate_topk: int = 5,
iou_calculator: ConfigType = dict(type='BboxOverlaps2D'),
**kwargs):
super().__init__()
self.center_radius = center_radius
self.candidate_topk = candidate_topk
if isinstance(match_costs, dict):
match_costs = [match_costs]
elif isinstance(match_costs, list):
assert len(match_costs) > 0, \
'match_costs must not be a empty list.'
self.use_focal_loss = False
self.use_fed_loss = False
for _match_cost in match_costs:
if _match_cost.get('type') == 'FocalLossCost':
self.use_focal_loss = True
if _match_cost.get('type') == 'FedLoss':
self.use_fed_loss = True
raise NotImplementedError
self.match_costs = [
TASK_UTILS.build(match_cost) for match_cost in match_costs
]
self.iou_calculator = TASK_UTILS.build(iou_calculator)
def forward(self, outputs, batch_gt_instances, batch_img_metas):
assert 'pred_logits' in outputs and 'pred_boxes' in outputs
pred_logits = outputs['pred_logits']
pred_bboxes = outputs['pred_boxes']
batch_size = len(batch_gt_instances)
assert batch_size == pred_logits.shape[0] == pred_bboxes.shape[0]
batch_indices = []
for i in range(batch_size):
pred_instances = InstanceData()
pred_instances.bboxes = pred_bboxes[i, ...]
pred_instances.scores = pred_logits[i, ...]
gt_instances = batch_gt_instances[i]
img_meta = batch_img_metas[i]
indices = self.single_assigner(pred_instances, gt_instances,
img_meta)
batch_indices.append(indices)
return batch_indices
def single_assigner(self, pred_instances, gt_instances, img_meta):
with torch.no_grad():
gt_bboxes = gt_instances.bboxes
pred_bboxes = pred_instances.bboxes
num_gt = gt_bboxes.size(0)
if num_gt == 0: # empty object in key frame
valid_mask = pred_bboxes.new_zeros((pred_bboxes.shape[0], ),
dtype=torch.bool)
matched_gt_inds = pred_bboxes.new_zeros((gt_bboxes.shape[0], ),
dtype=torch.long)
return valid_mask, matched_gt_inds
valid_mask, is_in_boxes_and_center = \
self.get_in_gt_and_in_center_info(
bbox_xyxy_to_cxcywh(pred_bboxes),
bbox_xyxy_to_cxcywh(gt_bboxes)
)
cost_list = []
for match_cost in self.match_costs:
cost = match_cost(
pred_instances=pred_instances,
gt_instances=gt_instances,
img_meta=img_meta)
cost_list.append(cost)
pairwise_ious = self.iou_calculator(pred_bboxes, gt_bboxes)
cost_list.append((~is_in_boxes_and_center) * 100.0)
cost_matrix = torch.stack(cost_list).sum(0)
cost_matrix[~valid_mask] = cost_matrix[~valid_mask] + 10000.0
fg_mask_inboxes, matched_gt_inds = \
self.dynamic_k_matching(
cost_matrix, pairwise_ious, num_gt)
return fg_mask_inboxes, matched_gt_inds
def get_in_gt_and_in_center_info(
self, pred_bboxes: Tensor,
gt_bboxes: Tensor) -> Tuple[Tensor, Tensor]:
"""Get the information of which prior is in gt bboxes and gt center
priors."""
xy_target_gts = bbox_cxcywh_to_xyxy(gt_bboxes) # (x1, y1, x2, y2)
pred_bboxes_center_x = pred_bboxes[:, 0].unsqueeze(1)
pred_bboxes_center_y = pred_bboxes[:, 1].unsqueeze(1)
# whether the center of each anchor is inside a gt box
b_l = pred_bboxes_center_x > xy_target_gts[:, 0].unsqueeze(0)
b_r = pred_bboxes_center_x < xy_target_gts[:, 2].unsqueeze(0)
b_t = pred_bboxes_center_y > xy_target_gts[:, 1].unsqueeze(0)
b_b = pred_bboxes_center_y < xy_target_gts[:, 3].unsqueeze(0)
# (b_l.long()+b_r.long()+b_t.long()+b_b.long())==4 [300,num_gt] ,
is_in_boxes = ((b_l.long() + b_r.long() + b_t.long() +
b_b.long()) == 4)
is_in_boxes_all = is_in_boxes.sum(1) > 0 # [num_query]
# in fixed center
center_radius = 2.5
# Modified to self-adapted sampling --- the center size depends
# on the size of the gt boxes
# https://github.com/dulucas/UVO_Challenge/blob/main/Track1/detection/mmdet/core/bbox/assigners/rpn_sim_ota_assigner.py#L212 # noqa
b_l = pred_bboxes_center_x > (
gt_bboxes[:, 0] -
(center_radius *
(xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)
b_r = pred_bboxes_center_x < (
gt_bboxes[:, 0] +
(center_radius *
(xy_target_gts[:, 2] - xy_target_gts[:, 0]))).unsqueeze(0)
b_t = pred_bboxes_center_y > (
gt_bboxes[:, 1] -
(center_radius *
(xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)
b_b = pred_bboxes_center_y < (
gt_bboxes[:, 1] +
(center_radius *
(xy_target_gts[:, 3] - xy_target_gts[:, 1]))).unsqueeze(0)
is_in_centers = ((b_l.long() + b_r.long() + b_t.long() +
b_b.long()) == 4)
is_in_centers_all = is_in_centers.sum(1) > 0
is_in_boxes_anchor = is_in_boxes_all | is_in_centers_all
is_in_boxes_and_center = (is_in_boxes & is_in_centers)
return is_in_boxes_anchor, is_in_boxes_and_center
def dynamic_k_matching(self, cost: Tensor, pairwise_ious: Tensor,
num_gt: int) -> Tuple[Tensor, Tensor]:
"""Use IoU and matching cost to calculate the dynamic top-k positive
targets."""
matching_matrix = torch.zeros_like(cost)
# select candidate topk ious for dynamic-k calculation
candidate_topk = min(self.candidate_topk, pairwise_ious.size(0))
topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)
# calculate dynamic k for each gt
dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)
for gt_idx in range(num_gt):
_, pos_idx = torch.topk(
cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False)
matching_matrix[:, gt_idx][pos_idx] = 1
del topk_ious, dynamic_ks, pos_idx
prior_match_gt_mask = matching_matrix.sum(1) > 1
if prior_match_gt_mask.sum() > 0:
_, cost_argmin = torch.min(cost[prior_match_gt_mask, :], dim=1)
matching_matrix[prior_match_gt_mask, :] *= 0
matching_matrix[prior_match_gt_mask, cost_argmin] = 1
while (matching_matrix.sum(0) == 0).any():
matched_query_id = matching_matrix.sum(1) > 0
cost[matched_query_id] += 100000.0
unmatch_id = torch.nonzero(
matching_matrix.sum(0) == 0, as_tuple=False).squeeze(1)
for gt_idx in unmatch_id:
pos_idx = torch.argmin(cost[:, gt_idx])
matching_matrix[:, gt_idx][pos_idx] = 1.0
if (matching_matrix.sum(1) > 1).sum() > 0:
_, cost_argmin = torch.min(cost[prior_match_gt_mask], dim=1)
matching_matrix[prior_match_gt_mask] *= 0
matching_matrix[prior_match_gt_mask, cost_argmin, ] = 1
assert not (matching_matrix.sum(0) == 0).any()
# get foreground mask inside box and center prior
fg_mask_inboxes = matching_matrix.sum(1) > 0
matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)
return fg_mask_inboxes, matched_gt_inds
| 14,481 | 41.345029 | 142 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.