repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
more
|
more-main/more_main.py
|
import argparse
from collections import defaultdict
import math
import time
from constants import (
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
GRASP_Q_GRASP_THRESHOLD,
GRASP_Q_PUSH_THRESHOLD,
GRIPPER_GRASP_WIDTH_PIXEL,
GRIPPER_PUSH_RADIUS_PIXEL,
IMAGE_PAD_WIDTH,
IS_REAL,
MCTS_EARLY_ROLLOUTS,
MCTS_MAX_LEVEL,
NUM_ROTATION,
PIXEL_SIZE,
PUSH_DISTANCE,
PUSH_DISTANCE_PIXEL,
TARGET_LOWER,
TARGET_UPPER,
WORKSPACE_LIMITS,
)
import numpy as np
from mcts_utils import MCTSHelper
from torchvision.transforms import functional as TF
import random
import cv2
import torch
from environment_sim import Environment
from models import reinforcement_net, PushNet
from mcts_main import SeachCollector
import utils
from mcts_network.search import MonteCarloTreeSearch
from mcts_network.nodes import PushSearchNode
from mcts_network.push import PushState
from mcts_utils import _sampled_prediction_precise
from mcts_main import SeachCollector
@torch.no_grad()
def get_q(model, color_heightmap, depth_heightmap):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(3, 2, 0, 1)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(3, 2, 0, 1)
# Helper
# mask of target object
# temp = cv2.cvtColor(color_heightmap_pad, cv2.COLOR_RGB2HSV)
# mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
# # mask of clearance of target object
# target_erode = cv2.filter2D(mask, -1, self.kernel_erode)
# clearance = np.zeros_like(mask)
# clearance[
# np.logical_and(
# np.logical_and(target_erode > 0, mask == 0), depth_heightmap_pad < DEPTH_MIN
# )
# ] = 255
# temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
# mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
# mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
# mask_pad.shape = (
# mask_pad.shape[0],
# mask_pad.shape[1],
# mask_pad.shape[2],
# 1,
# )
# mask_pad = torch.from_numpy(mask_pad.astype(np.float32)).permute(3, 2, 0, 1)
# Pass input data through model
# output_prob = model(input_color_data, input_depth_data, True, -1, use_push=True, push_only=True)
output_prob = model(input_color_data, input_depth_data, True, -1)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
# output = torch.sigmoid(output_prob[rotate_idx])
output = output_prob[rotate_idx]
if rotate_idx == 0:
push_predictions = output.cpu().data.numpy()[
:, 0, :, :,
]
else:
push_predictions = np.concatenate(
(push_predictions, output.cpu().data.numpy()[:, 0, :, :,],), axis=0,
)
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = push_predictions[0].shape[0] - IMAGE_PAD_WIDTH
push_predictions = push_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
grasp_q_value = push_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, push_predictions
def filter_prediction(mask_heightmap, push_predictions):
kernel_collision = np.ones(
(GRIPPER_PUSH_RADIUS_PIXEL * 2, GRIPPER_GRASP_WIDTH_PIXEL), dtype=np.float32
)
kernel_right = np.zeros(
(
GRIPPER_PUSH_RADIUS_PIXEL * 2,
(PUSH_DISTANCE_PIXEL + round(GRIPPER_GRASP_WIDTH_PIXEL / 2)) * 2,
),
dtype=np.float32,
)
kernel_right[:, PUSH_DISTANCE_PIXEL + round(GRIPPER_GRASP_WIDTH_PIXEL / 2) :] = 1
num_rotations = push_predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
# rotate
pred_pad = utils.rotate(
push_predictions[rotate_idx], rotate_idx * (360.0 / num_rotations)
)
mask_pad = utils.rotate(
mask_heightmap, rotate_idx * (360.0 / num_rotations), is_mask=True
)
# filter collision
target_invalid = cv2.filter2D(mask_pad, -1, kernel_collision)
pred_pad[(target_invalid > 0)] = 0
# # filter point to right
target_invalid = cv2.filter2D(mask_pad, -1, kernel_right)
pred_pad[(target_invalid == 0)] = 0
# rotate back
pred_pad = utils.rotate(pred_pad, -rotate_idx * (360.0 / num_rotations))
push_predictions[rotate_idx] = pred_pad
return push_predictions
@torch.no_grad()
def sampled_prediction_precise(mcts_helper, env, model, color_image, mask_image):
actions = mcts_helper.sample_actions(None, color_image, mask_image)
out_q = _sampled_prediction_precise(env, model, actions, mask_image)
print(out_q)
final = actions[np.argmax(out_q)]
return final[0], final[1]
def sampled_prediction(mcts_helper, env, color_image, mask_image, push_predictions):
actions = mcts_helper.sample_actions(None, color_image, mask_image)
right = (1, 0)
new_push_predictions = np.zeros_like(push_predictions)
for action in actions:
action_start = (action[0][1], action[0][0])
action_end = (action[1][1], action[1][0])
current = (
action_end[0] - action_start[0],
action_end[1] - action_start[1],
)
dot = (
right[0] * current[0] + right[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = right[0] * current[1] - right[1] * current[0] # determinant
rot_angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
rot_angle = math.degrees(rot_angle)
if rot_angle < 0:
rot_angle = 360 + rot_angle
rotate_idx = round(rot_angle / (360 / NUM_ROTATION))
if rotate_idx == NUM_ROTATION:
rotate_idx = 0
new_push_predictions[
rotate_idx,
action_start[1] - 3 : action_start[1] + 4,
action_start[0] - 3 : action_start[0] + 4,
] = push_predictions[
rotate_idx,
action_start[1] - 3 : action_start[1] + 4,
action_start[0] - 3 : action_start[0] + 4,
]
print(
np.max(
push_predictions[
rotate_idx,
action_start[1] - 3 : action_start[1] + 4,
action_start[0] - 3 : action_start[0] + 4,
]
)
)
# new_push_predictions[rotate_idx, action_start[1], action_start[0]] = push_predictions[rotate_idx, action_start[1], action_start[0]]
# best_locate = [rot_angle, action_start[1], action_start[0], action_end[1], action_end[0]]
# action_start = (best_locate[1], best_locate[2])
# rotated_color_image = utils.rotate(color_image, rot_angle)
# origin = mask_image.shape
# origin = ((origin[0] - 1) / 2, (origin[1] - 1) / 2)
# new_action_start = utils.rotate_point(origin, action_start, math.radians(rot_angle))
# new_action_start = (round(new_action_start[0]), round(new_action_start[1]))
# point_from = (int(new_action_start[1]), int(new_action_start[0]))
# point_to = (int(point_from[0] + PUSH_DISTANCE_PIXEL), int(point_from[1]))
# rotated_color_image = cv2.arrowedLine(
# rotated_color_image, point_from, point_to, (100, 200, 0), 2, tipLength=0.2,
# )
# cv2.imshow('before', color_image)
# cv2.imshow('after', rotated_color_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return new_push_predictions
@torch.no_grad()
def get_q_mask(model, mask_heightmap, env):
mask_heightmap = np.copy(mask_heightmap)
# relabel
mask_heightmap = utils.relabel_mask(env, mask_heightmap)
# focus on target, so make one extra channel
target_mask_img = np.zeros_like(mask_heightmap, dtype=np.uint8)
target_mask_img[mask_heightmap == 255] = 255
mask_heightmap = np.dstack((target_mask_img, mask_heightmap))
# Add extra padding (to handle rotations inside network)
mask_heightmap = np.pad(
mask_heightmap,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
input_image = mask_heightmap.astype(float) / 255
# Construct minibatch of size 1 (b,c,h,w)
input_image.shape = (
input_image.shape[0],
input_image.shape[1],
input_image.shape[2],
1,
)
input_data = torch.from_numpy(input_image.astype(np.float32)).permute(3, 2, 0, 1)
# Pass input data through model
# output_prob = model(input_color_data, input_depth_data, True, -1, use_push=True, push_only=True)
output_prob = model(input_data, True, -1)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
# output = torch.sigmoid(output_prob[rotate_idx])
output = output_prob[rotate_idx]
if rotate_idx == 0:
push_predictions = output.cpu().data.numpy()[
:, 0, :, :,
]
else:
push_predictions = np.concatenate(
(push_predictions, output.cpu().data.numpy()[:, 0, :, :,],), axis=0,
)
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = push_predictions[0].shape[0] - IMAGE_PAD_WIDTH
push_predictions = push_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
grasp_q_value = push_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, push_predictions
def parse_args():
parser = argparse.ArgumentParser(description="Lifelong DQN")
parser.add_argument("--test_case", action="store", help="File for testing")
parser.add_argument("--test_cases", nargs="+", help="Files for testing")
parser.add_argument(
"--max_test_trials",
action="store",
type=int,
default=5,
help="maximum number of test runs per case/scenario",
)
parser.add_argument(
"--num_iter",
action="store",
type=int,
default=50,
)
parser.add_argument(
"--push_model",
action="store",
type=str,
default="logs_mcts/runs/2021-09-02-22-59-train-ratio-1-final/lifelong_model-20.pth",
)
parser.add_argument("--switch", action="store", type=int, help="Switch target")
args = parser.parse_args()
return args
if __name__ == "__main__":
# set seed
random.seed(1234)
torch.manual_seed(1234)
np.random.seed(1234)
# below is network with MCTS (MORE)
iteration = 0
args = parse_args()
case = args.test_case
cases = args.test_cases
switch = args.switch
if switch is not None:
print(f"Target ID has been switched to {switch}")
if cases:
repeat_num = len(cases)
else:
repeat_num = args.max_test_trials
cases = [case] * repeat_num
collector = SeachCollector(cases)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
env = Environment(gui=False)
env_sim = Environment(gui=False)
mcts_helper = MCTSHelper(env_sim, "logs_grasp/snapshot-post-020000.reinforcement.pth")
push_model = PushNet()
push_model.load_state_dict(torch.load(args.push_model)["model"])
push_model = push_model.to(device)
push_model.eval()
num_action_log = defaultdict(list)
for repeat_idx in range(repeat_num):
success = False
while not success:
env.reset()
env_sim.reset()
success = env.add_object_push_from_file(cases[repeat_idx])
success &= env_sim.add_object_push_from_file(cases[repeat_idx])
print(f"Reset environment of {repeat_idx}")
num_action = [0, 0, 0]
start_time = time.time()
while True:
num_action[0] += 1
color_image, depth_image, _ = utils.get_true_heightmap(env)
temp = cv2.cvtColor(color_image, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
print(f"Target on the table (value: {np.sum(mask) / 255})")
if np.sum(mask) / 255 < 10:
break
q_value, best_pix_ind, grasp_predictions = mcts_helper.get_grasp_q(
color_image, depth_image, post_checking=True
)
print(f"Max grasp Q value: {q_value}")
# record
collector.save_heightmaps(iteration, color_image, depth_image)
grasp_pred_vis = mcts_helper.get_prediction_vis(
grasp_predictions, color_image, best_pix_ind
)
collector.save_visualizations(iteration, grasp_pred_vis, "grasp")
# Grasp >>>>>
if q_value > GRASP_Q_GRASP_THRESHOLD:
best_rotation_angle = np.deg2rad(best_pix_ind[0] * (360.0 / NUM_ROTATION))
primitive_position = [
best_pix_ind[1] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
best_pix_ind[2] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
depth_image[best_pix_ind[1]][best_pix_ind[2]] + WORKSPACE_LIMITS[2][0],
]
if not IS_REAL:
success = env.grasp(primitive_position, best_rotation_angle)
else:
grasp_sucess = env.grasp(primitive_position, best_rotation_angle)
success = grasp_sucess
# record
reward_value = 1 if success else 0
collector.executed_action_log.append(
[
1, # grasp
primitive_position[0],
primitive_position[1],
primitive_position[2],
best_rotation_angle,
-1,
-1,
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
if success:
num_action[2] += 1
break
else:
continue
# Grasp <<<<<
# Search >>>>>
object_states = env.save_objects()
initial_state = PushState(
"root",
object_states,
q_value,
0,
mcts_helper,
push_model,
max_q=GRASP_Q_PUSH_THRESHOLD,
max_level=MCTS_MAX_LEVEL-1,
)
root = PushSearchNode(initial_state)
mcts = MonteCarloTreeSearch(root)
best_node = mcts.best_action(args.num_iter, MCTS_EARLY_ROLLOUTS, True)
print("best node:")
print(best_node.state.uid)
print(best_node.state.q_value)
print(best_node.prev_move)
print(len(root.children))
node = best_node
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
node_action = str(node.prev_move).split("_")
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
collector.save_predictions(iteration, node_image)
# Search <<<<<
# Push >>>>>
num_action[1] += 1
push_start = best_node.prev_move.pos0
push_end = best_node.prev_move.pos1
push_start = [
push_start[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_start[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
push_end = [
push_end[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_end[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
env.push(push_start, push_end)
# record
reward_value = 0
collector.executed_action_log.append(
[
0, # push
push_start[0],
push_start[1],
push_start[2],
push_end[0],
push_end[1],
push_end[2],
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
# clean up for memory
del initial_state
del mcts
del root
del best_node
del push_start
del push_end
mcts_helper.reset()
end_time = time.time()
collector.time_log.append(end_time - start_time)
collector.write_to_log("executed-time", collector.time_log)
print(num_action)
num_action_log[cases[repeat_idx]].append(num_action)
print(num_action_log)
total_case = 0
total_action = 0
for key in num_action_log:
this_total_case = 0
this_total_action = 0
this_total_push = 0
this_total_grasp = 0
this_total_success = 0
for trial in num_action_log[key]:
this_total_case += 1
this_total_action += trial[0]
this_total_push += trial[1]
this_total_grasp += (trial[0] - trial[1])
this_total_success += trial[2]
print(key, "this_case:", this_total_case, "this_action:", this_total_action,
"this_push:", this_total_push, "this_grasp:", this_total_grasp,
"average num", this_total_action/this_total_case, "average_grasp", this_total_success / this_total_grasp, "total_complete", this_total_success
)
total_case += len(num_action_log[key])
for re in num_action_log[key]:
total_action += re[0]
print(total_case, total_action, total_action / total_case)
| 20,203 | 35.143113 | 155 |
py
|
more
|
more-main/environment_sim.py
|
import time
import glob
import os
import pybullet as pb
import pybullet_data
from pybullet_utils import bullet_client
import numpy as np
import cameras
from constants import PIXEL_SIZE, WORKSPACE_LIMITS
class Environment:
def __init__(self, gui=True, time_step=1 / 240):
"""Creates environment with PyBullet.
Args:
gui: show environment with PyBullet's built-in display viewer
time_step: PyBullet physics simulation step speed. Default is 1 / 240.
"""
self.time_step = time_step
self.gui = gui
self.pixel_size = PIXEL_SIZE
self.obj_ids = {"fixed": [], "rigid": []}
self.agent_cams = cameras.RealSenseD455.CONFIG
self.oracle_cams = cameras.Oracle.CONFIG
self.bounds = WORKSPACE_LIMITS
self.home_joints = np.array([0, -0.8, 0.5, -0.2, -0.5, 0]) * np.pi
self.ik_rest_joints = np.array([0, -0.5, 0.5, -0.5, -0.5, 0]) * np.pi
self.drop_joints0 = np.array([0.5, -0.8, 0.5, -0.2, -0.5, 0]) * np.pi
self.drop_joints1 = np.array([1, -0.5, 0.5, -0.5, -0.5, 0]) * np.pi
# Start PyBullet.
self._pb = bullet_client.BulletClient(connection_mode=pb.GUI if gui else pb.DIRECT)
self._client_id = self._pb._client
self._pb.setAdditionalSearchPath(pybullet_data.getDataPath())
self._pb.setTimeStep(time_step)
if gui:
target = self._pb.getDebugVisualizerCamera()[11]
self._pb.resetDebugVisualizerCamera(
cameraDistance=1.5, cameraYaw=90, cameraPitch=-25, cameraTargetPosition=target,
)
@property
def is_static(self):
"""Return true if objects are no longer moving."""
v = [
np.linalg.norm(self._pb.getBaseVelocity(i, physicsClientId=self._client_id)[0])
for i in self.obj_ids["rigid"]
]
return all(np.array(v) < 5e-3)
@property
def info(self):
"""Environment info variable with object poses, dimensions, and colors."""
info = {} # object id : (position, rotation, dimensions)
for obj_ids in self.obj_ids.values():
for obj_id in obj_ids:
pos, rot = self._pb.getBasePositionAndOrientation(
obj_id, physicsClientId=self._client_id
)
dim = self._pb.getVisualShapeData(obj_id, physicsClientId=self._client_id)[0][3]
info[obj_id] = (pos, rot, dim)
return info
def add_object_id(self, obj_id, category="rigid"):
"""List of (fixed, rigid) objects in env."""
self.obj_ids[category].append(obj_id)
def remove_object_id(self, obj_id, category="rigid"):
"""List of (fixed, rigid) objects in env."""
self.obj_ids[category].remove(obj_id)
def save_objects(self):
"""Save states of all rigid objects. If this is unstable, could use saveBullet."""
success = False
while not success:
success = self.wait_static()
object_states = []
for obj in self.obj_ids["rigid"]:
pos, orn = self._pb.getBasePositionAndOrientation(obj)
linVel, angVel = self._pb.getBaseVelocity(obj)
object_states.append((pos, orn, linVel, angVel))
return object_states
def restore_objects(self, object_states):
"""Restore states of all rigid objects. If this is unstable, could use restoreState along with saveBullet."""
for idx, obj in enumerate(self.obj_ids["rigid"]):
pos, orn, linVel, angVel = object_states[idx]
self._pb.resetBasePositionAndOrientation(obj, pos, orn)
self._pb.resetBaseVelocity(obj, linVel, angVel)
success = self.wait_static()
return success
def wait_static(self, timeout=3):
"""Step simulator asynchronously until objects settle."""
self._pb.stepSimulation()
t0 = time.time()
while (time.time() - t0) < timeout:
if self.is_static:
return True
self._pb.stepSimulation()
print(f"Warning: Wait static exceeded {timeout} second timeout. Skipping.")
return False
def reset(self):
self.obj_ids = {"fixed": [], "rigid": []}
self.target_obj_id = -1
self._pb.resetSimulation()
self._pb.setGravity(0, 0, -9.8)
# Temporarily disable rendering to load scene faster.
if self.gui:
self._pb.configureDebugVisualizer(pb.COV_ENABLE_RENDERING, 0)
# Load workspace
self.plane = self._pb.loadURDF(
"plane.urdf", basePosition=(0, 0, -0.0005), useFixedBase=True,
)
self.workspace = self._pb.loadURDF(
"assets/workspace/workspace.urdf", basePosition=(0.5, 0, 0), useFixedBase=True,
)
self._pb.changeDynamics(
self.plane,
-1,
lateralFriction=1.1,
restitution=0.5,
linearDamping=0.5,
angularDamping=0.5,
)
self._pb.changeDynamics(
self.workspace,
-1,
lateralFriction=1.1,
restitution=0.5,
linearDamping=0.5,
angularDamping=0.5,
)
# Load UR5e
self.ur5e = self._pb.loadURDF(
"assets/ur5e/ur5e.urdf", basePosition=(0, 0, 0), useFixedBase=True,
)
self.ur5e_joints = []
for i in range(self._pb.getNumJoints(self.ur5e)):
info = self._pb.getJointInfo(self.ur5e, i)
joint_id = info[0]
joint_name = info[1].decode("utf-8")
joint_type = info[2]
if joint_name == "ee_fixed_joint":
self.ur5e_ee_id = joint_id
if joint_type == pb.JOINT_REVOLUTE:
self.ur5e_joints.append(joint_id)
self._pb.enableJointForceTorqueSensor(self.ur5e, self.ur5e_ee_id, 1)
self.setup_gripper()
# Move robot to home joint configuration.
success = self.go_home()
self.close_gripper()
self.open_gripper()
if not success:
print("Simulation is wrong!")
exit()
# Re-enable rendering.
if self.gui:
self._pb.configureDebugVisualizer(
self._pb.COV_ENABLE_RENDERING, 1, physicsClientId=self._client_id
)
def setup_gripper(self):
"""Load end-effector: gripper"""
ee_position, _ = self.get_link_pose(self.ur5e, self.ur5e_ee_id)
self.ee = self._pb.loadURDF(
"assets/ur5e/gripper/robotiq_2f_85.urdf",
ee_position,
self._pb.getQuaternionFromEuler((0, -np.pi / 2, 0)),
)
self.ee_tip_offset = 0.1625
self.gripper_angle_open = 0.03
self.gripper_angle_close = 0.8
self.gripper_angle_close_threshold = 0.73
self.gripper_mimic_joints = {
"left_inner_finger_joint": -1,
"left_inner_knuckle_joint": -1,
"right_outer_knuckle_joint": -1,
"right_inner_finger_joint": -1,
"right_inner_knuckle_joint": -1,
}
for i in range(self._pb.getNumJoints(self.ee)):
info = self._pb.getJointInfo(self.ee, i)
joint_id = info[0]
joint_name = info[1].decode("utf-8")
joint_type = info[2]
if joint_name == "finger_joint":
self.gripper_main_joint = joint_id
elif joint_name == "dummy_center_fixed_joint":
self.ee_tip_id = joint_id
elif "finger_pad_joint" in joint_name:
self._pb.changeDynamics(
self.ee, joint_id, lateralFriction=0.9
)
self.ee_finger_pad_id = joint_id
elif joint_type == pb.JOINT_REVOLUTE:
self.gripper_mimic_joints[joint_name] = joint_id
# Keep the joints static
self._pb.setJointMotorControl2(
self.ee, joint_id, pb.VELOCITY_CONTROL, targetVelocity=0, force=0,
)
self.ee_constraint = self._pb.createConstraint(
parentBodyUniqueId=self.ur5e,
parentLinkIndex=self.ur5e_ee_id,
childBodyUniqueId=self.ee,
childLinkIndex=-1,
jointType=pb.JOINT_FIXED,
jointAxis=(0, 0, 1),
parentFramePosition=(0, 0, 0),
childFramePosition=(0, 0, -0.02),
childFrameOrientation=pb.getQuaternionFromEuler((0, -np.pi / 2, 0)),
physicsClientId=self._client_id,
)
self._pb.changeConstraint(self.ee_constraint, maxForce=10000)
self._pb.enableJointForceTorqueSensor(self.ee, self.gripper_main_joint, 1)
# Set up mimic joints in robotiq gripper: left
c = self._pb.createConstraint(
self.ee,
self.gripper_main_joint,
self.ee,
self.gripper_mimic_joints["left_inner_finger_joint"],
jointType=pb.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0],
)
self._pb.changeConstraint(c, gearRatio=1, erp=0.8, maxForce=10000)
c = self._pb.createConstraint(
self.ee,
self.gripper_main_joint,
self.ee,
self.gripper_mimic_joints["left_inner_knuckle_joint"],
jointType=pb.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0],
)
self._pb.changeConstraint(c, gearRatio=-1, erp=0.8, maxForce=10000)
# Set up mimic joints in robotiq gripper: right
c = self._pb.createConstraint(
self.ee,
self.gripper_mimic_joints["right_outer_knuckle_joint"],
self.ee,
self.gripper_mimic_joints["right_inner_finger_joint"],
jointType=pb.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0],
)
self._pb.changeConstraint(c, gearRatio=1, erp=0.8, maxForce=10000)
c = self._pb.createConstraint(
self.ee,
self.gripper_mimic_joints["right_outer_knuckle_joint"],
self.ee,
self.gripper_mimic_joints["right_inner_knuckle_joint"],
jointType=pb.JOINT_GEAR,
jointAxis=[1, 0, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0],
)
self._pb.changeConstraint(c, gearRatio=-1, erp=0.8, maxForce=10000)
# Set up mimic joints in robotiq gripper: connect left and right
c = self._pb.createConstraint(
self.ee,
self.gripper_main_joint,
self.ee,
self.gripper_mimic_joints["right_outer_knuckle_joint"],
jointType=pb.JOINT_GEAR,
jointAxis=[0, 1, 0],
parentFramePosition=[0, 0, 0],
childFramePosition=[0, 0, 0],
physicsClientId=self._client_id,
)
self._pb.changeConstraint(c, gearRatio=-1, erp=0.8, maxForce=1000)
def step(self, pose0=None, pose1=None):
"""Execute action with specified primitive.
Args:
action: action to execute.
Returns:
obs, done
"""
if pose0 is not None and pose1 is not None:
success = self.push(pose0, pose1)
# Exit early if action times out.
if not success:
return {}, False
# Step simulator asynchronously until objects settle.
while not self.is_static:
self._pb.stepSimulation()
# Get RGB-D camera image observations.
obs = {"color": [], "depth": []}
for config in self.agent_cams:
color, depth, _ = self.render_camera(config)
obs["color"].append(color)
obs["depth"].append(depth)
return obs, True
def seed(self, seed=None):
self._random = np.random.RandomState(seed)
return seed
def render_camera(self, config):
"""Render RGB-D image with specified camera configuration."""
# OpenGL camera settings.
lookdir = np.float32([0, 0, 1]).reshape(3, 1)
updir = np.float32([0, -1, 0]).reshape(3, 1)
rotation = pb.getMatrixFromQuaternion(config["rotation"])
rotm = np.float32(rotation).reshape(3, 3)
lookdir = (rotm @ lookdir).reshape(-1)
updir = (rotm @ updir).reshape(-1)
lookat = config["position"] + lookdir
focal_len = config["intrinsics"][0, 0]
znear, zfar = config["zrange"]
viewm = pb.computeViewMatrix(config["position"], lookat, updir)
fovh = (config["image_size"][0] / 2) / focal_len
fovh = 180 * np.arctan(fovh) * 2 / np.pi
# Notes: 1) FOV is vertical FOV 2) aspect must be float
aspect_ratio = config["image_size"][1] / config["image_size"][0]
projm = pb.computeProjectionMatrixFOV(fovh, aspect_ratio, znear, zfar)
# Render with OpenGL camera settings.
_, _, color, depth, segm = self._pb.getCameraImage(
width=config["image_size"][1],
height=config["image_size"][0],
viewMatrix=viewm,
projectionMatrix=projm,
shadow=0,
flags=pb.ER_SEGMENTATION_MASK_OBJECT_AND_LINKINDEX,
renderer=pb.ER_BULLET_HARDWARE_OPENGL,
)
# Get color image.
color_image_size = (config["image_size"][0], config["image_size"][1], 4)
color = np.array(color, dtype=np.uint8).reshape(color_image_size)
color = color[:, :, :3] # remove alpha channel
if config["noise"]:
color = np.int32(color)
color += np.int32(self._random.normal(0, 3, color.shape))
color = np.uint8(np.clip(color, 0, 255))
# Get depth image.
depth_image_size = (config["image_size"][0], config["image_size"][1])
zbuffer = np.array(depth).reshape(depth_image_size)
depth = zfar + znear - (2.0 * zbuffer - 1.0) * (zfar - znear)
depth = (2.0 * znear * zfar) / depth
if config["noise"]:
depth += self._random.normal(0, 0.003, depth_image_size)
# Get segmentation image.
segm = np.uint8(segm).reshape(depth_image_size)
return color, depth, segm
def __del__(self):
self._pb.disconnect()
def get_link_pose(self, body, link):
result = self._pb.getLinkState(body, link)
return result[4], result[5]
def add_objects(self, num_obj, workspace_limits):
"""Randomly dropped objects to the workspace"""
color_space = (
np.asarray(
[
[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167], # pink
]
)
/ 255.0
)
mesh_list = glob.glob("assets/blocks/*.urdf")
obj_mesh_ind = np.random.randint(0, len(mesh_list), size=num_obj)
obj_mesh_color = color_space[np.asarray(range(num_obj)) % 10, :]
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
body_ids = []
with open("hard-cases/" + "temp.txt", "w") as out_file:
for object_idx in range(len(obj_mesh_ind)):
curr_mesh_file = mesh_list[obj_mesh_ind[object_idx]]
drop_x = (
(workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample()
+ workspace_limits[0][0]
+ 0.1
)
drop_y = (
(workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample()
+ workspace_limits[1][0]
+ 0.1
)
object_position = [drop_x, drop_y, 0.2]
object_orientation = [
2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample(),
]
object_color = [
obj_mesh_color[object_idx][0],
obj_mesh_color[object_idx][1],
obj_mesh_color[object_idx][2],
1,
]
body_id = self._pb.loadURDF(
curr_mesh_file, object_position, self._pb.getQuaternionFromEuler(object_orientation),
flags=pb.URDF_ENABLE_SLEEPING
)
self._pb.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
env.add_object_id(body_id)
env.wait_static()
out_file.write(
"%s %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e\n"
% (
curr_mesh_file,
object_color[0],
object_color[1],
object_color[2],
object_position[0],
object_position[1],
object_position[2],
object_orientation[0],
object_orientation[1],
object_orientation[2],
)
)
return body_ids, True
def add_object_push_from_file(self, file_name, switch=None):
success = True
# Read data
with open(file_name, "r") as preset_file:
file_content = preset_file.readlines()
num_obj = len(file_content)
obj_files = []
obj_mesh_colors = []
obj_positions = []
obj_orientations = []
for object_idx in range(num_obj):
file_content_curr_object = file_content[object_idx].split()
obj_file = os.path.join("assets", "blocks", file_content_curr_object[0])
obj_files.append(obj_file)
obj_positions.append(
[
float(file_content_curr_object[4]),
float(file_content_curr_object[5]),
float(file_content_curr_object[6]),
]
)
obj_orientations.append(
[
float(file_content_curr_object[7]),
float(file_content_curr_object[8]),
float(file_content_curr_object[9]),
]
)
obj_mesh_colors.append(
[
float(file_content_curr_object[1]),
float(file_content_curr_object[2]),
float(file_content_curr_object[3]),
]
)
# Switch color of the first and the second for augmentation
if switch is not None:
temp = obj_mesh_colors[0]
obj_mesh_colors[0] = obj_mesh_colors[switch]
obj_mesh_colors[switch] = temp
# Import objects
for object_idx in range(num_obj):
curr_mesh_file = obj_files[object_idx]
object_position = [
obj_positions[object_idx][0],
obj_positions[object_idx][1],
obj_positions[object_idx][2],
]
object_orientation = [
obj_orientations[object_idx][0],
obj_orientations[object_idx][1],
obj_orientations[object_idx][2],
]
object_color = [
obj_mesh_colors[object_idx][0],
obj_mesh_colors[object_idx][1],
obj_mesh_colors[object_idx][2],
1,
]
body_id = self._pb.loadURDF(
curr_mesh_file,
object_position,
self._pb.getQuaternionFromEuler(object_orientation),
flags=pb.URDF_ENABLE_SLEEPING
)
self._pb.changeVisualShape(body_id, -1, rgbaColor=object_color)
self.add_object_id(body_id)
if switch is not None:
if switch == object_idx:
self.target_obj_id = body_id
else:
if object_idx == 0:
self.target_obj_id = body_id
success &= self.wait_static()
success &= self.wait_static()
# give time to stop
for _ in range(5):
self._pb.stepSimulation()
return success
# ---------------------------------------------------------------------------
# Robot Movement Functions
# ---------------------------------------------------------------------------
def go_home(self):
return self.move_joints(self.home_joints)
def move_joints(self, target_joints, speed=0.01, timeout=3):
"""Move UR5e to target joint configuration."""
t0 = time.time()
while (time.time() - t0) < timeout:
current_joints = np.array(
[
self._pb.getJointState(self.ur5e, i, physicsClientId=self._client_id)[0]
for i in self.ur5e_joints
]
)
pos, _ = self.get_link_pose(self.ee, self.ee_tip_id)
if pos[2] < 0.005:
print(f"Warning: move_joints tip height is {pos[2]}. Skipping.")
return False
diff_joints = target_joints - current_joints
if all(np.abs(diff_joints) < 0.05):
# give time to stop
for _ in range(5):
self._pb.stepSimulation()
return True
# Move with constant velocity
norm = np.linalg.norm(diff_joints)
v = diff_joints / norm if norm > 0 else 0
step_joints = current_joints + v * speed
self._pb.setJointMotorControlArray(
bodyIndex=self.ur5e,
jointIndices=self.ur5e_joints,
controlMode=pb.POSITION_CONTROL,
targetPositions=step_joints,
positionGains=np.ones(len(self.ur5e_joints)),
)
self._pb.stepSimulation()
print(f"Warning: move_joints exceeded {timeout} second timeout. Skipping.")
return False
def move_ee_pose(self, pose, speed=0.01):
"""Move UR5e to target end effector pose."""
target_joints = self.solve_ik(pose)
return self.move_joints(target_joints, speed)
def solve_ik(self, pose):
"""Calculate joint configuration with inverse kinematics."""
joints = self._pb.calculateInverseKinematics(
bodyUniqueId=self.ur5e,
endEffectorLinkIndex=self.ur5e_ee_id,
targetPosition=pose[0],
targetOrientation=pose[1],
lowerLimits=[-6.283, -6.283, -3.141, -6.283, -6.283, -6.283],
upperLimits=[6.283, 6.283, 3.141, 6.283, 6.283, 6.283],
jointRanges=[12.566, 12.566, 6.282, 12.566, 12.566, 12.566],
restPoses=np.float32(self.ik_rest_joints).tolist(),
maxNumIterations=100,
residualThreshold=1e-5,
)
joints = np.array(joints, dtype=np.float32)
# joints[2:] = (joints[2:] + np.pi) % (2 * np.pi) - np.pi
return joints
def straight_move(self, pose0, pose1, rot, speed=0.01, max_force=300, detect_force=False, is_push=False):
"""Move every 1 cm, keep the move in a straight line instead of a curve. Keep level with rot"""
step_distance = 0.01 # every 1 cm
vec = np.float32(pose1) - np.float32(pose0)
length = np.linalg.norm(vec)
vec = vec / length
n_push = np.int32(np.floor(length / step_distance)) # every 1 cm
success = True
for n in range(n_push):
target = pose0 + vec * n * step_distance
success &= self.move_ee_pose((target, rot), speed)
if detect_force:
force = np.sum(
np.abs(np.array(self._pb.getJointState(self.ur5e, self.ur5e_ee_id)[2]))
)
if force > max_force:
target = target - vec * 2 * step_distance
self.move_ee_pose((target, rot), speed)
print(f"Force is {force}, exceed the max force {max_force}")
return False
if is_push:
speed /= 5
success &= self.move_ee_pose((pose1, rot), speed)
return success
def push(self, pose0, pose1, speed=0.002, verbose=True):
"""Execute pushing primitive.
Args:
pose0: SE(3) starting pose.
pose1: SE(3) ending pose.
speed: the speed of the planar push.
Returns:
success: robot movement success if True.
"""
# close the gripper
self.close_gripper(is_slow=False)
# Adjust push start and end positions.
pos0 = np.array(pose0, dtype=np.float32)
pos1 = np.array(pose1, dtype=np.float32)
pos0[2] += self.ee_tip_offset
pos1[2] += self.ee_tip_offset
vec = pos1 - pos0
length = np.linalg.norm(vec)
vec = vec / length
over0 = np.array((pos0[0], pos0[1], pos0[2] + 0.05))
over0h = np.array((pos0[0], pos0[1], pos0[2] + 0.2))
over1 = np.array((pos1[0], pos1[1], pos1[2] + 0.05))
over1h = np.array((pos1[0], pos1[1], pos1[2] + 0.2))
# Align against push direction.
theta = np.arctan2(vec[1], vec[0]) + np.pi / 2
rot = pb.getQuaternionFromEuler([np.pi / 2, np.pi / 2, theta])
# Execute push.
success = self.move_joints(self.ik_rest_joints)
if success:
success = self.move_ee_pose((over0h, rot))
if success:
success = self.straight_move(over0h, over0, rot, detect_force=True)
if success:
success = self.straight_move(over0, pos0, rot, detect_force=True)
if success:
success = self.straight_move(pos0, pos1, rot, speed, detect_force=True, is_push=True)
if success:
success = self.straight_move(pos1, over1, rot, speed)
if success:
success = self.straight_move(over1, over1h, rot)
self.go_home()
if verbose:
print(f"Push from {pose0} to {pose1}, {success}")
return success
def grasp(self, pose, angle, speed=0.005):
"""Execute grasping primitive.
Args:
pose: SE(3) grasping pose.
angle: rotation angle
Returns:
success: robot movement success if True.
"""
# Handle unexpected behavior
self._pb.changeDynamics(
self.ee, self.ee_finger_pad_id, lateralFriction=0.9, spinningFriction=0.1
)
# Adjust grasp positions.
pos = np.array(pose, dtype=np.float32)
pos[2] = max(pos[2] - 0.04, self.bounds[2][0])
pos[2] += self.ee_tip_offset
# Align against grasp direction.
angle = ((angle) % np.pi) - np.pi / 2
rot = pb.getQuaternionFromEuler([np.pi / 2, np.pi / 2, -angle])
over = np.array((pos[0], pos[1], pos[2] + 0.2))
# Execute push.
self.open_gripper()
success = self.move_joints(self.ik_rest_joints)
if success:
success = self.move_ee_pose((over, rot))
if success:
success = self.straight_move(over, pos, rot, speed, detect_force=True)
if success:
self.close_gripper()
success = self.straight_move(pos, over, rot, speed)
success &= self.is_gripper_closed
if success:
success = self.move_joints(self.drop_joints1)
success &= self.is_gripper_closed
self.open_gripper(is_slow=True)
self.go_home()
print(f"Grasp at {pose}, the grasp {success}")
self._pb.changeDynamics(
self.ee, self.ee_finger_pad_id, lateralFriction=0.9
)
return success
def open_gripper(self, is_slow=False):
self._move_gripper(self.gripper_angle_open, is_slow=is_slow)
def close_gripper(self, is_slow=True):
self._move_gripper(self.gripper_angle_close, is_slow=is_slow)
@property
def is_gripper_closed(self):
gripper_angle = self._pb.getJointState(
self.ee, self.gripper_main_joint, physicsClientId=self._client_id
)[0]
return gripper_angle < self.gripper_angle_close_threshold
def _move_gripper(self, target_angle, timeout=3, is_slow=False):
t0 = time.time()
prev_angle = self._pb.getJointState(
self.ee, self.gripper_main_joint, physicsClientId=self._client_id
)[0]
if is_slow:
self._pb.setJointMotorControl2(
self.ee,
self.gripper_main_joint,
pb.VELOCITY_CONTROL,
targetVelocity=1 if target_angle > 0.5 else -1,
maxVelocity=1 if target_angle > 0.5 else -1,
force=3,
physicsClientId=self._client_id,
)
self._pb.setJointMotorControl2(
self.ee,
self.gripper_mimic_joints["right_outer_knuckle_joint"],
pb.VELOCITY_CONTROL,
targetVelocity=1 if target_angle > 0.5 else -1,
maxVelocity=1 if target_angle > 0.5 else -1,
force=3,
physicsClientId=self._client_id,
)
for _ in range(10):
self._pb.stepSimulation()
while (time.time() - t0) < timeout:
current_angle = self._pb.getJointState(self.ee, self.gripper_main_joint)[0]
diff_angle = abs(current_angle - prev_angle)
if diff_angle < 1e-4:
break
prev_angle = current_angle
for _ in range(10):
self._pb.stepSimulation()
# maintain the angles
self._pb.setJointMotorControl2(
self.ee,
self.gripper_main_joint,
pb.POSITION_CONTROL,
targetPosition=target_angle,
force=3.1,
)
self._pb.setJointMotorControl2(
self.ee,
self.gripper_mimic_joints["right_outer_knuckle_joint"],
pb.POSITION_CONTROL,
targetPosition=target_angle,
force=3.1,
)
for _ in range(10):
self._pb.stepSimulation()
if __name__ == "__main__":
env = Environment()
env.reset()
print(pb.getPhysicsEngineParameters(env._client_id))
time.sleep(1)
# env.add_object_push_from_file("hard-cases/temp.txt", switch=None)
# push_start = [4.280000000000000471e-01, -3.400000000000000244e-02, 0.01]
# push_end = [5.020000000000000018e-01, -3.400000000000000244e-02, 0.01]
# env.push(push_start, push_end)
# time.sleep(1)
env.render_camera(env.oracle_cams[0])
for i in range(16):
best_rotation_angle = np.deg2rad(90 - i * (360.0 / 16))
primitive_position = [0.6, 0, 0.01]
primitive_position_end = [
primitive_position[0] + 0.1 * np.cos(best_rotation_angle),
primitive_position[1] + 0.1 * np.sin(best_rotation_angle),
0.01,
]
env.push(primitive_position, primitive_position_end, speed=0.0002)
env._pb.addUserDebugLine(primitive_position, primitive_position_end, lifeTime=0)
# angle = np.deg2rad(i * 360 / 16)
# pos = [0.5, 0, 0.05]
# env.grasp(pos, angle)
time.sleep(1)
| 32,129 | 37.387097 | 117 |
py
|
more
|
more-main/lifelong_trainer.py
|
import numpy as np
import utils
import torch
from models import PushNet, reinforcement_net
from dataset import LifelongDataset
import argparse
import time
import datetime
import cv2
from torchvision.transforms import ToPILImage
import os
from constants import (
GRIPPER_GRASP_INNER_DISTANCE,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_WIDTH_PIXEL,
PUSH_DISTANCE_PIXEL,
NUM_ROTATION,
)
from torch.utils.tensorboard import SummaryWriter
from torch.autograd import Variable
import log_utils
import torch_utils
def parse_args():
default_params = {
"lr": 1e-4,
"batch_size": 8,
"t_0": 50, # CosineAnnealing, start 1 21 61
"t_mult": 2, # CosineAnnealing, period 20 40
"eta_min": 1e-8, # CosineAnnealing, minimum lr
"epochs": 51, # CosineAnnealing, should end before warm start
# "lr": 1e-5,
# "batch_size": 28,
# "t_0": 20, # CosineAnnealing, start 1 21 61
# "t_mult": 2, # CosineAnnealing, period 20 40
# "eta_min": 1e-8, # CosineAnnealing, minimum lr
# "epochs": 21, # CosineAnnealing, should end before warm start
"loss_beta": 0.8,
"num_rotation": NUM_ROTATION,
}
parser = argparse.ArgumentParser(description="Train lifelong")
parser.add_argument(
"--lr",
action="store",
type=float,
default=default_params["lr"],
help="Enter the learning rate",
)
parser.add_argument(
"--batch_size",
action="store",
default=default_params["batch_size"],
type=int,
help="Enter the batchsize for training and testing",
)
parser.add_argument(
"--t_0",
action="store",
default=default_params["t_0"],
type=int,
help="The t_0 of CosineAnnealing",
)
parser.add_argument(
"--t_mult",
action="store",
default=default_params["t_mult"],
type=int,
help="The t_mult of CosineAnnealing",
)
parser.add_argument(
"--eta_min",
action="store",
default=default_params["eta_min"],
type=float,
help="The eta_min of CosineAnnealing",
)
parser.add_argument(
"--epochs",
action="store",
default=default_params["epochs"],
type=int,
help="Enter the epoch for training",
)
parser.add_argument(
"--loss_beta",
action="store",
default=default_params["loss_beta"],
type=int,
help="The beta of SmoothL1Loss",
)
parser.add_argument(
"--num_rotation",
action="store",
default=default_params["num_rotation"],
type=int,
help="Number of rotation",
)
parser.add_argument("--dataset_root", action="store", help="Enter the path to the dataset")
parser.add_argument(
"--pretrained_model", action="store", help="The path to the pretrained model"
)
parser.add_argument(
"--ratio",
action="store",
default=1,
type=float,
help="ratio of how many data we use",
)
args = parser.parse_args()
return args
def get_prediction_vis(predictions, color_heightmap, best_pix_ind, is_push=False):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis, (int(best_pix_ind[2]), int(best_pix_ind[1])), 7, (0, 0, 255), 2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
if rotate_idx == best_pix_ind[0]:
center = np.array([[[int(best_pix_ind[2]), int(best_pix_ind[1])]]])
M = cv2.getRotationMatrix2D(
(prediction_vis.shape[1] // 2, prediction_vis.shape[0] // 2,),
rotate_idx * (360.0 / num_rotations),
1,
)
center = cv2.transform(center, M)
center = np.transpose(center[0])
if is_push:
point_from = (int(center[0]), int(center[1]))
point_to = (int(center[0] + PUSH_DISTANCE_PIXEL), int(center[1]))
prediction_vis = cv2.arrowedLine(
prediction_vis, point_from, point_to, (100, 255, 0), 2, tipLength=0.2,
)
else:
prediction_vis = cv2.rectangle(
prediction_vis,
(
max(0, int(center[0]) - GRIPPER_GRASP_INNER_DISTANCE // 2),
max(0, int(center[1]) - GRIPPER_GRASP_WIDTH_PIXEL // 2),
),
(
min(
prediction_vis.shape[1],
int(center[0]) + GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2,
),
min(
prediction_vis.shape[0],
int(center[1]) + GRIPPER_GRASP_WIDTH_PIXEL // 2,
),
),
(100, 255, 0),
1,
)
prediction_vis = cv2.rectangle(
prediction_vis,
(
max(0, int(center[0]) - GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2),
max(0, int(center[1]) - GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2),
),
(
min(
prediction_vis.shape[1],
int(center[0]) + GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
min(
prediction_vis.shape[0],
int(center[1]) + GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
),
),
(100, 100, 155),
1,
)
background_image = utils.rotate(color_heightmap, rotate_idx * (360.0 / num_rotations))
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
class LifelongTrainer:
def __init__(self, args):
self.params = {
"lr": args.lr,
"batch_size": args.batch_size,
"t_0": args.t_0, # CosineAnnealing, start 0 4 12 28
"t_mult": args.t_mult, # CosineAnnealing, period 4 8 16
"eta_min": args.eta_min, # CosineAnnealing, minimum lr
"epochs": args.epochs, # CosineAnnealing, should end before warm start
"loss_beta": args.loss_beta,
"num_rotation": args.num_rotation,
"ratio": args.ratio,
}
self.dataset_root = args.dataset_root
self.pretrained_model = args.pretrained_model
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
log_dir_path = self.dataset_root[0: self.dataset_root.rfind('/')]
self.log_dir = os.path.join(log_dir_path, "runs")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
timestamp_value = datetime.datetime.fromtimestamp(time.time())
time_name = timestamp_value.strftime("%Y-%m-%d-%H-%M")
self.log_dir = os.path.join(self.log_dir, time_name)
self.tb_logger = SummaryWriter(self.log_dir)
self.logger = log_utils.setup_logger(self.log_dir, "Lifelong")
def main(self):
model = PushNet(True)
# model = reinforcement_net(True)
model = model.to(self.device)
criterion = torch.nn.SmoothL1Loss(beta=self.params["loss_beta"], reduction="none")
optimizer = torch.optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=0.9,
weight_decay=2e-5,
)
# criterion = torch.nn.BCEWithLogitsLoss(reduction="none")
# optimizer = torch.optim.Adam([p for p in model.parameters() if p.requires_grad], lr=args.lr)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=self.params["t_0"],
T_mult=self.params["t_mult"],
eta_min=self.params["eta_min"],
last_epoch=-1,
verbose=False,
)
start_epoch = 0
if self.pretrained_model is not None:
checkpoint = torch.load(self.pretrained_model)
model.load_state_dict(checkpoint["model"], strict=False)
# optimizer.load_state_dict(checkpoint["optimizer"])
# lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
# start_epoch = checkpoint["epoch"] + 1
# prev_params = checkpoint["params"]
self.logger.info(f"Hyperparameters: {self.params}")
if self.pretrained_model is not None:
self.logger.info(f"Start from the pretrained model: {self.pretrained_model}")
# self.logger.info(f"Previous Hyperparameters: {prev_params}")
data_loader_train = self._get_data_loader(self.params["batch_size"], self.params["ratio"], shuffle=True)
for epoch in range(start_epoch, self.params["epochs"]):
# warmup start
if epoch < 0:
warmup_factor = 0.001
warmup_iters = min(1000, len(data_loader_train) - 1)
current_lr_scheduler = torch_utils.warmup_lr_scheduler(
optimizer, warmup_iters, warmup_factor
)
else:
current_lr_scheduler = lr_scheduler
train_loss = self._train_one_epoch(
model, criterion, optimizer, data_loader_train, current_lr_scheduler, epoch,
)
if epoch % 2 == 0 or (self.params["epochs"] - epoch) < 2:
save_state = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"params": self.params,
}
torch.save(save_state, os.path.join(self.log_dir, f"lifelong_model-{epoch}.pth"))
self.tb_logger.add_scalars("Epoch_Loss", {"train": train_loss}, epoch)
self.tb_logger.flush()
self.tb_logger.add_hparams(self.params, {"hparam/train": train_loss})
self.logger.info("Training completed!")
def _train_one_epoch(
self, model, criterion, optimizer, data_loader, lr_scheduler, epoch, print_freq=50,
):
model.train()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", log_utils.SmoothedValue(window_size=1, fmt="{value:.12f}"))
metric_logger.add_meter("loss", log_utils.SmoothedValue())
metric_logger.add_meter("error", log_utils.SmoothedValue())
header = "Epoch: [{}]".format(epoch)
losses = []
n_iter = 0
total_iters = len(data_loader)
for (
mask_images,
target_images,
weight_images,
best_locs
# for (
# color_images,
# depth_images,
# help_images,
# target_images,
# weight_images,
# best_loc,
) in metric_logger.log_every(data_loader, print_freq, self.logger, header):
# color_images = color_images.to(self.device, non_blocking=True)
# depth_images = depth_images.to(self.device, non_blocking=True)
# help_images = help_images.to(self.device, non_blocking=True)
mask_images = mask_images.to(self.device, non_blocking=True)
target_images = target_images.to(self.device, non_blocking=True)
weight_images = weight_images.to(self.device, non_blocking=True)
output_prob = model(mask_images)
# output_prob = model(color_images, depth_images, help_images)
# output_prob = model(
# color_images, depth_images, input_help_data=None, use_push=True, push_only=True
# )
errors = 0
for i in range(best_locs.size(0)):
error = (
output_prob[i, 0, best_locs[i][0], best_locs[i][1]]
- target_images[i, 0, best_locs[i][0], best_locs[i][1]]
)
error = error.abs()
errors += error
error = errors / best_locs.size(0)
loss = criterion(output_prob, target_images) * weight_images
loss = loss.sum() / target_images.size(0)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log
log_loss = loss.item()
log_lr = optimizer.param_groups[0]["lr"]
log_error = error.item()
metric_logger.update(loss=log_loss, lr=log_lr, error=log_error)
self.tb_logger.add_scalar("Step/Train/Loss", log_loss, total_iters * epoch + n_iter)
self.tb_logger.add_scalar("Step/Train/Error", log_error, total_iters * epoch + n_iter)
self.tb_logger.add_scalar("Step/LR", log_lr, total_iters * epoch + n_iter)
losses.append(log_loss)
if epoch == 0:
lr_scheduler.step()
n_iter += 1
if epoch != 0:
lr_scheduler.step(epoch)
# color version
# push_predictions = output_prob[0][0].cpu().detach().numpy()
# color_img = ToPILImage()(color_images[0].cpu()).convert("RGB")
# color_img = np.array(color_img)
# color_img = color_img[:, :, ::-1].copy()
# center = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
# point_from = (int(center[1]), int(center[0]))
# point_to = (int(center[1] + PUSH_DISTANCE_PIXEL), int(center[0]))
# color_img = cv2.arrowedLine(
# color_img, point_from, point_to, (100, 255, 0), 2, tipLength=0.2,
# )
# cv2.imwrite(f"vis{epoch}_color.png", color_img)
# mask version
push_predictions = output_prob[0][0].cpu().detach().numpy()
center = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
push_predictions[push_predictions < 0] = 0
push_predictions = push_predictions / np.max(push_predictions)
push_predictions = np.clip(push_predictions, 0, 1)
push_predictions = cv2.applyColorMap(
(push_predictions * 255).astype(np.uint8), cv2.COLORMAP_JET
)
mask_image = ToPILImage()(mask_images[0].cpu())
mask_image = np.array(mask_image)
mask_image = np.array(mask_image[:, :, 1])
point_from = (int(center[1]), int(center[0]))
point_to = (int(center[1] + PUSH_DISTANCE_PIXEL), int(center[0]))
mask_image = cv2.arrowedLine(mask_image, point_from, point_to, 200, 2, tipLength=0.2,)
point_from = (int(best_locs[0][1]), int(best_locs[0][0]))
point_to = (point_from[0] + PUSH_DISTANCE_PIXEL, point_from[1])
if torch.max(target_images[0]) >= 1:
mask_image = cv2.arrowedLine(mask_image, point_from, point_to, 160, 3, tipLength=0.1,)
else:
mask_image = cv2.arrowedLine(mask_image, point_from, point_to, 100, 2, tipLength=0.1,)
prediction_vis = (
0.5 * cv2.cvtColor(mask_image, cv2.COLOR_RGB2BGR) + 0.5 * push_predictions
).astype(np.uint8)
cv2.imwrite(f"vis{epoch}_mask.png", prediction_vis)
return sum(losses) / len(losses)
def _get_data_loader(self, batch_size, ratio=1, shuffle=False):
"""Get data loader."""
dataset = LifelongDataset(self.dataset_root, ratio)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0, drop_last=False
)
return data_loader
if __name__ == "__main__":
args = parse_args()
trainer = LifelongTrainer(args)
trainer.main()
| 17,323 | 38.825287 | 112 |
py
|
more
|
more-main/evaluate.py
|
import os
import numpy as np
import argparse
import glob
parser = argparse.ArgumentParser()
# parser.add_argument('--file_reward', action='store', type=str)
# parser.add_argument('--file_action', action='store', type=str)
parser.add_argument('--log', action='store', type=str)
parser.add_argument('--num', default=1, type=int, action='store')
args = parser.parse_args()
sub_roots = glob.glob(f"{args.log}/*")
sub_roots = sorted(sub_roots, key=lambda sub: sub.split('-')[-1])
total = []
completion = []
grasp_success = []
num_action = []
time = []
for sub in sub_roots:
reward_file = os.path.join(sub, "transitions", "label-value.log.txt")
action_file = os.path.join(sub, "transitions", "executed-action.log.txt")
time_file = os.path.join(sub, "transitions", "executed-time.log.txt")
reward_log = np.loadtxt(reward_file, delimiter=' ')
print(sub)
print(f"total action is: {len(reward_log)}")
print(f"get the target object: {np.sum(reward_log == 1)}")
print(f"average number: {len(reward_log) / np.sum(reward_log == 1)}")
total.append(len(reward_log) / np.sum(reward_log == 1))
completion.append(np.sum(reward_log == 1))
num_action.append(len(reward_log) / np.sum(reward_log == 1))
action_log = np.loadtxt(action_file, delimiter=' ')
assert len(reward_log) == len(action_log)
action_log = action_log[:, 0]
print(f"grasp success: {np.sum(reward_log[action_log == 1]) / np.sum(action_log == 1)}")
print(reward_log[action_log == 1])
grasp_success.append(np.sum(reward_log[action_log == 1]) / np.sum(action_log == 1))
time_log = np.loadtxt(time_file, delimiter=' ')
average_time = np.sum(time_log) / len(time_log)
print(f'time: {time_log}, average: {average_time}')
time.append(average_time)
print(sum(total) / len(total))
print(f"completion: {completion}")
print(f"grasp_success: {grasp_success}")
print(f"num_action: {num_action}")
print(f"time: {time}")
# new = os.path.isfile(os.path.join(args.log, "transitions", "label-value.log.txt"))
# print(args.log)
# if new:
# reward_file = os.path.join(args.log, "transitions", "label-value.log.txt")
# action_file = os.path.join(args.log, "transitions", "executed-action.log.txt")
# reward_log = np.loadtxt(reward_file, delimiter=' ')
# print(f"total action is: {len(reward_log)}")
# print(f"get the target object: {np.sum(reward_log == 1)}")
# print(f"average number: {len(reward_log) / np.sum(reward_log == 1)}")
# action_log = np.loadtxt(action_file, delimiter=' ')
# assert len(reward_log) == len(action_log)
# action_log = action_log[:, 0]
# print(f"grasp success: {np.sum(reward_log[action_log == 1]) / np.sum(action_log == 1)}")
# print(reward_log[action_log == 1])
# else:
# reward_file = os.path.join(args.log, "transitions", "reward-value.log.txt")
# action_file = os.path.join(args.log, "transitions", "executed-action.log.txt")
# reward_log = np.loadtxt(reward_file, delimiter=' ')
# print(f"total action is: {len(reward_log)}")
# print(f"get the target object: {np.sum(reward_log == 1)}")
# print(f"average number: {len(reward_log) / np.sum(reward_log == 1)}")
# action_log = np.loadtxt(action_file, delimiter=' ')
# assert len(reward_log) == len(action_log)
# action_log = action_log[:, 0]
# print(f"grasp success: {np.sum(reward_log[action_log == 0]) / np.sum(action_log == 0)}")
# print(reward_log[action_log == 0])
| 3,456 | 41.158537 | 94 |
py
|
more
|
more-main/constants.py
|
import numpy as np
import math
IS_REAL = False
WORKSPACE_LIMITS = np.asarray([[0.276, 0.724], [-0.224, 0.224], [-0.0001, 0.4]])
# image
REAL_PIXEL_SIZE = 0.002
REAL_IMAGE_SIZE = 224
PIXEL_SIZE = 0.002
IMAGE_SIZE = 224
IMAGE_OBJ_CROP_SIZE = 60 # this is related to the IMAGE_SIZE and PIXEL_SIZE
IMAGE_PAD_SIZE = math.ceil(IMAGE_SIZE * math.sqrt(2) / 32) * 32 # 320
IMAGE_PAD_WIDTH = math.ceil((IMAGE_PAD_SIZE - IMAGE_SIZE) / 2) # 48
IMAGE_PAD_DIFF = IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH # 272
# gripper
GRIPPER_GRASP_INNER_DISTANCE = 0.07
GRIPPER_GRASP_INNER_DISTANCE_PIXEL = math.ceil(GRIPPER_GRASP_INNER_DISTANCE / PIXEL_SIZE) # 40
GRIPPER_GRASP_OUTER_DISTANCE = 0.125
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL = math.ceil(GRIPPER_GRASP_OUTER_DISTANCE / PIXEL_SIZE) # 63
GRIPPER_GRASP_WIDTH = 0.022
GRIPPER_GRASP_WIDTH_PIXEL = math.ceil(GRIPPER_GRASP_WIDTH / PIXEL_SIZE) # 11
GRIPPER_GRASP_SAFE_WIDTH = 0.025
GRIPPER_GRASP_SAFE_WIDTH_PIXEL = math.ceil(GRIPPER_GRASP_SAFE_WIDTH / PIXEL_SIZE) # 13
GRIPPER_PUSH_RADIUS = 0.015
GRIPPER_PUSH_RADIUS_PIXEL = math.ceil(GRIPPER_PUSH_RADIUS / PIXEL_SIZE) # 8
GRIPPER_PUSH_RADIUS_SAFE_PIXEL = math.ceil(GRIPPER_PUSH_RADIUS_PIXEL * math.sqrt(2)) # 12
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL = GRIPPER_PUSH_RADIUS_SAFE_PIXEL + 3 # 15
GRIPPER_PUSH_ADD_PIXEL = math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2) + 3 # 10
# PUSH_DISTANCE = 0.05 + PIXEL_SIZE * GRIPPER_PUSH_RADIUS_SAFE_PIXEL # 0.074
PUSH_DISTANCE = 0.1
PUSH_DISTANCE_PIXEL = math.ceil(PUSH_DISTANCE / PIXEL_SIZE) # 37
CONSECUTIVE_ANGLE_THRESHOLD = 0.2 # radius
CONSECUTIVE_DISTANCE_THRESHOLD = 0.05 # cm
GRASP_Q_PUSH_THRESHOLD = 1.0
GRASP_Q_GRASP_THRESHOLD = 0.8
MCTS_ROLLOUTS = 50 # 300 for collection data
MCTS_EARLY_ROLLOUTS = -1 # 50 for collection data
MCTS_MAX_LEVEL = 3 # 4 for mcts, 3 for more
# MCTS_DISCOUNT_CONS = 0.8
MCTS_DISCOUNT = 0.5
MCTS_TOP = 10
MCTS_UCT_RATIO = np.sqrt(2)
# MCTS_STEP_COST = 0.2
NUM_ROTATION = 16
DEPTH_MIN = 0.01 # depth filter, count valid object
PUSH_BUFFER = 0.05
COLOR_SPACE = (
np.asarray(
[
[78, 121, 167], # blue
[89, 161, 79], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237, 201, 72], # yellow
[186, 176, 172], # gray
[255, 87, 89], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167], # pink
]
)
/ 255.0
)
REAL_COLOR_SPACE = (
np.asarray(
[
[240,170,100], # orange
[150,200,200], # wood
[11, 200, 150], # green
[180, 200, 130], # yellow
[200, 100, 100], # red
[20, 190, 230], # blue
[20, 80, 150], # purple
]
)
/ 255.0
)
# NOTE: assume there is a single type of objects will have more than 1000 instances
MODEL_MASK_ID = {
"concave.urdf": 1000,
"cube.urdf": 2000,
"cylinder.urdf": 3000,
"half-cube.urdf": 4000,
"rect.urdf": 5000,
"triangle.urdf": 6000,
}
# norm
COLOR_MEAN = [0.0241, 0.0213, 0.0165]
COLOR_STD = [0.1122, 0.0988, 0.0819]
DEPTH_MEAN = [0.0019]
DEPTH_STD = [0.0091]
BINARY_IMAGE_MEAN = [0.0646, 0.0125]
BINARY_IMAGE_STD = [0.2410, 0.1113]
BINARY_OBJ_MEAN = [0.1900]
BINARY_OBJ_STD = [0.3707]
# pre training values
PUSH_Q = 0.25
GRASP_Q = 0.5
background_threshold = {
"low": np.array([0, 0, 120], np.uint8),
"high": np.array([255, 255, 255], np.uint8),
} # white
BG_THRESHOLD = {
"low": np.array([0, 0, 0], np.uint8),
"high": np.array([180, 255, 50], np.uint8),
} # black
# colors
real_purple_lower = np.array([100, 50, 120], np.uint8)
real_purple_upper = np.array([130, 200, 255], np.uint8)
# rgb(69, 108, 149) to hsv(105 137 149)
blue_lower = np.array([95, 87, 99], np.uint8)
blue_upper = np.array([115, 187, 199], np.uint8)
# rgb(79, 143, 70) to hsv(56 130 143)
green_lower = np.array([48, 80, 87], np.uint8)
green_upper = np.array([64, 180, 187], np.uint8)
# 11 97 131
brown_lower = np.array([8, 57, 91], np.uint8)
brown_upper = np.array([14, 137, 171], np.uint8)
# 15 209 206
orange_lower = np.array([12, 159, 156], np.uint8)
orange_upper = np.array([18, 255, 255], np.uint8)
# 23 177 202
yellow_lower = np.array([20, 127, 152], np.uint8)
yellow_upper = np.array([26, 227, 252], np.uint8)
# 158, 148, 146 to 5 19 158
gray_lower = np.array([0, 0, 108], np.uint8)
gray_upper = np.array([15, 56, 208], np.uint8)
# rgb(217, 74, 76) to 0 168 217
red_lower = np.array([0, 118, 172], np.uint8)
red_upper = np.array([10, 218, 255], np.uint8)
# rgb(148, 104, 136) to 158 76 148
purple_lower = np.array([148, 26, 98], np.uint8)
purple_upper = np.array([167, 126, 198], np.uint8)
# rgb(101, 156, 151) to 87 90 156
cyan_lower = np.array([77, 40, 106], np.uint8)
cyan_upper = np.array([97, 140, 206], np.uint8)
# rgb(216, 132, 141) to 177 99 216
pink_lower = np.array([168, 49, 166], np.uint8)
pink_upper = np.array([187, 149, 255], np.uint8)
colors_lower = [
blue_lower,
green_lower,
brown_lower,
orange_lower,
yellow_lower,
gray_lower,
red_lower,
purple_lower,
cyan_lower,
pink_lower,
]
colors_upper = [
blue_upper,
green_upper,
brown_upper,
orange_upper,
yellow_upper,
gray_upper,
red_upper,
purple_upper,
cyan_upper,
pink_upper,
]
# if IS_REAL:
# TARGET_LOWER = real_purple_lower
# TARGET_UPPER = real_purple_upper
# else:
TARGET_LOWER = blue_lower
TARGET_UPPER = blue_upper
REAL_TARGET_LOWER = real_purple_lower
REAL_TARGET_UPPER = real_purple_upper
# black backgroud sim
# color_mean = [0.0235, 0.0195, 0.0163]
# color_std = [0.1233, 0.0975, 0.0857]
# depth_mean = [0.0022]
# depth_std = [0.0089]
# random sim
# color_mean = [0.0272, 0.0225, 0.0184]
# color_std = [0.1337, 0.1065, 0.0922]
# depth_mean = [0.0020]
# depth_std = [0.0073]
# # binary
# binary_mean = [0.2236]
# binary_std = [0.4167]
# used_binary_mean = [0.0635, 0.0289]
# used_binary_std = [0.2439, 0.1675]
# total_obj = 5
# resolution and padding resolution
# heightmap_resolution = 0.002
# resolution = 224
# resolution_pad = math.ceil(resolution * math.sqrt(2) / 32) * 32
# padding_width = math.ceil((resolution_pad - resolution) / 2)
# resolution_crop = 60
| 6,251 | 28.07907 | 95 |
py
|
more
|
more-main/collect_image_data.py
|
import time
import datetime
import os
import glob
import pybullet as p
import numpy as np
import cv2
import utils
from environment_sim import Environment
from constants import (
DEPTH_MIN,
GRIPPER_PUSH_RADIUS_PIXEL,
GRIPPER_PUSH_RADIUS_SAFE_PIXEL,
IMAGE_SIZE,
WORKSPACE_LIMITS,
REAL_COLOR_SPACE,
MODEL_MASK_ID,
)
class ImageDataCollector:
def __init__(self, start_iter=0, end_iter=2000, base_directory=None, seed=0):
self.rng = np.random.default_rng(seed)
self.depth_min = DEPTH_MIN
self.mesh_list = glob.glob("assets/blocks/*.urdf")
self.mesh_list = [mesh for mesh in self.mesh_list if mesh.split("/")[-1] in MODEL_MASK_ID]
# Create directory to save data
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
if base_directory is None:
self.base_directory = os.path.join(
os.path.abspath("logs_image"), timestamp_value.strftime("%Y-%m-%d-%H-%M-%S")
)
else:
self.base_directory = base_directory
print("Creating data logging session: %s" % (self.base_directory))
self.color_heightmaps_directory = os.path.join(
self.base_directory, "data", "color-heightmaps"
)
self.depth_heightmaps_directory = os.path.join(
self.base_directory, "data", "depth-heightmaps"
)
self.mask_directory = os.path.join(self.base_directory, "data", "masks")
if not os.path.exists(self.color_heightmaps_directory):
os.makedirs(self.color_heightmaps_directory)
if not os.path.exists(self.depth_heightmaps_directory):
os.makedirs(self.depth_heightmaps_directory)
if not os.path.exists(self.mask_directory):
os.makedirs(self.mask_directory)
self.iter = start_iter
self.end_iter = end_iter
def reset_np_random(self, seed):
self.rng = np.random.default_rng(seed)
def save_heightmaps(
self,
iteration,
color_heightmap,
depth_heightmap,
):
color_heightmap = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(self.color_heightmaps_directory, "%07d.color.png" % (iteration)),
color_heightmap,
)
depth_heightmap = np.round(depth_heightmap * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(
os.path.join(self.depth_heightmaps_directory, "%07d.depth.png" % (iteration)),
depth_heightmap,
)
def save_masks(self, iteration, mask):
cv2.imwrite(os.path.join(self.mask_directory, "%07d.mask.png" % (iteration)), mask)
def add_objects(self, env, num_obj):
"""Randomly dropped objects to the workspace"""
obj_mesh_ind = self.rng.integers(0, len(self.mesh_list), size=num_obj)
# sim color
# obj_mesh_color = COLOR_SPACE[np.asarray(range(num_obj)) % len(COLOR_SPACE), :]
# real color
obj_mesh_color = self.rng.choice(REAL_COLOR_SPACE, size=4)
new_obj_mesh_color = []
for object_color in obj_mesh_color:
new_obj_mesh_color.append([
max(0, min(1, object_color[0] + self.rng.random() * 0.3 - 0.15)),
max(0, min(1, object_color[1] + self.rng.random() * 0.3 - 0.15)),
max(0, min(1, object_color[2] + self.rng.random() * 0.3 - 0.15)),
])
obj_mesh_color = self.rng.choice(new_obj_mesh_color, size=num_obj)
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
body_ids = []
body_mask_ids = []
for object_idx in range(len(obj_mesh_ind)):
curr_mesh_file = self.mesh_list[obj_mesh_ind[object_idx]]
drop_x = (
(WORKSPACE_LIMITS[0][1] - WORKSPACE_LIMITS[0][0] - 0.2) * np.random.random_sample()
+ WORKSPACE_LIMITS[0][0]
+ 0.1
)
drop_y = (
(WORKSPACE_LIMITS[1][1] - WORKSPACE_LIMITS[1][0] - 0.2) * np.random.random_sample()
+ WORKSPACE_LIMITS[1][0]
+ 0.1
)
object_position = [drop_x, drop_y, 0.2]
object_orientation = [
2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample(),
]
object_color = [
obj_mesh_color[object_idx][0],
obj_mesh_color[object_idx][1],
obj_mesh_color[object_idx][2],
1,
]
body_id = p.loadURDF(
curr_mesh_file, object_position, p.getQuaternionFromEuler(object_orientation)
)
body_mask_ids.append(MODEL_MASK_ID[curr_mesh_file.split("/")[-1]])
p.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
env.add_object_id(body_id)
env.wait_static()
return body_ids, body_mask_ids
def add_object_push_from_file(self, env, file_name):
body_ids = []
success = True
# Read data
with open(file_name, "r") as preset_file:
file_content = preset_file.readlines()
num_obj = len(file_content)
obj_files = []
obj_mesh_colors = []
obj_positions = []
obj_orientations = []
for object_idx in range(num_obj):
file_content_curr_object = file_content[object_idx].split()
obj_file = os.path.join("assets", "blocks", file_content_curr_object[0])
obj_files.append(obj_file)
obj_positions.append(
[
float(file_content_curr_object[4]),
float(file_content_curr_object[5]),
float(file_content_curr_object[6]),
]
)
obj_orientations.append(
[
float(file_content_curr_object[7]),
float(file_content_curr_object[8]),
float(file_content_curr_object[9]),
]
)
obj_mesh_colors.append(
[
float(file_content_curr_object[1]),
float(file_content_curr_object[2]),
float(file_content_curr_object[3]),
]
)
# real color, ignore the color in file
obj_mesh_color = self.rng.choice(REAL_COLOR_SPACE, size=4)
new_obj_mesh_color = []
for object_color in obj_mesh_color:
new_obj_mesh_color.append([
max(0.01, min(1, object_color[0] + self.rng.random() * 0.3 - 0.15)),
max(0.01, min(1, object_color[1] + self.rng.random() * 0.3 - 0.15)),
max(0.01, min(1, object_color[2] + self.rng.random() * 0.3 - 0.15)),
])
obj_mesh_colors = self.rng.choice(new_obj_mesh_color, size=num_obj)
body_mask_ids = []
# Import objects
for object_idx in range(num_obj):
curr_mesh_file = obj_files[object_idx]
object_position = [
obj_positions[object_idx][0],
obj_positions[object_idx][1],
obj_positions[object_idx][2],
]
object_orientation = [
obj_orientations[object_idx][0],
obj_orientations[object_idx][1],
obj_orientations[object_idx][2],
]
object_color = [
obj_mesh_colors[object_idx][0],
obj_mesh_colors[object_idx][1],
obj_mesh_colors[object_idx][2],
1,
]
body_id = p.loadURDF(
curr_mesh_file, object_position, p.getQuaternionFromEuler(object_orientation)
)
p.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
body_mask_ids.append(MODEL_MASK_ID[curr_mesh_file.split("/")[-1]])
env.add_object_id(body_id)
success &= env.wait_static()
success &= env.wait_static()
return body_ids, body_mask_ids, success
def get_push_action(self, depth):
"""Find target and push, the robot makes a push from left to right."""
depth_heightmap = np.copy(depth)
depth_heightmap[depth_heightmap <= self.depth_min] = 0
depth_heightmap[depth_heightmap > self.depth_min] = 1
y_indices = np.argwhere(depth_heightmap == 1)[:, 1] # Find the y range
if len(y_indices) == 0:
print("find Skip")
return None
y_list_unique, y_list_count = np.unique(y_indices, return_counts=True)
y_list_dist = y_list_count / y_list_count.sum()
y = self.rng.choice(y_list_unique, p=y_list_dist)
x_indices = np.argwhere(depth_heightmap[:, y] == 1)[:, 0] # Find the x range
x_indices_left = np.argwhere(
depth_heightmap[:, max(0, y - GRIPPER_PUSH_RADIUS_PIXEL)] == 1
)[
:, 0
] # Find the x range
x_indices_right = np.argwhere(
depth_heightmap[:, min(y + GRIPPER_PUSH_RADIUS_PIXEL, IMAGE_SIZE - 1)] == 1
)[
:, 0
] # Find the x range
if len(x_indices) == 0:
print("Skip 1")
return None
x = x_indices.min()
if len(x_indices_left) != 0:
x = min(x, x_indices_left.min())
if len(x_indices_right) != 0:
x = min(x, x_indices_right.min())
x = x - GRIPPER_PUSH_RADIUS_SAFE_PIXEL
if x <= 0:
print("Skip 2")
return None
safe_z_position = 0.01
return [
x * env.pixel_size + env.bounds[0][0],
y * env.pixel_size + env.bounds[1][0],
safe_z_position,
]
if __name__ == "__main__":
env = Environment(gui=False)
collector = ImageDataCollector(start_iter=0, end_iter=2000)
cases = sorted(glob.glob("hard-cases/*.txt"))
cases_idx = 0
num_cases = len(cases)
seed = 0
# multi_thread_start = 1800
# collector.iter += multi_thread_start
# multi_thread_end = collector.iter + 300
# seed += multi_thread_start
while collector.iter < collector.end_iter:
# if collector.iter > multi_thread_end:
# break
print(f"-----Collecting: {collector.iter + 1}/{collector.end_iter}-----")
collector.reset_np_random(seed)
env.seed(seed)
env.reset()
# add objects
# num_objs = collector.rng.integers(4, 12, size=1)[0]
# body_ids, body_mask_ids = collector.add_objects(env, num_objs)
body_ids, body_mask_ids, success = collector.add_object_push_from_file(env, cases[cases_idx])
cases_idx += 1
if cases_idx == num_cases:
cases_idx = 0
success = env.wait_static()
if success:
# record info0
_, depth0p, _ = utils.get_true_heightmap(env)
color0, depth0, segm0 = env.render_camera(env.agent_cams[0])
# save data
collector.save_heightmaps(collector.iter, color0, depth0)
new_segm = np.zeros_like(segm0, dtype=np.uint16)
for idx, body_id in enumerate(body_ids):
new_segm[segm0 == body_id] = body_mask_ids[idx] + body_id
print(np.unique(new_segm))
collector.save_masks(collector.iter, new_segm)
# push and save again
# action = collector.get_push_action(depth0p)
# if action is not None:
# action_end = [action[0] + PUSH_DISTANCE, action[1], action[2]]
# success = env.push(action, action_end)
# success &= env.wait_static()
# if success:
# # record info0
# # color0, depth0, segm0 = utils.get_true_heightmap(env)
# color0, depth0, segm0 = env.render_camera(env.agent_cams[0])
# # save data
# collector.save_heightmaps(collector.iter + collector.end_iter, color0, depth0)
# new_segm = np.zeros_like(segm0, dtype=np.uint16)
# for idx, body_id in enumerate(body_ids):
# new_segm[segm0 == body_id] = body_mask_ids[idx] + body_id
# print(np.unique(new_segm))
# collector.save_masks(collector.iter + collector.end_iter, new_segm)
collector.iter += 1
seed += 1
| 12,818 | 38.564815 | 101 |
py
|
more
|
more-main/ppn_main.py
|
import argparse
from collections import defaultdict
import math
import time
from constants import (
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
GRASP_Q_GRASP_THRESHOLD,
GRIPPER_GRASP_WIDTH_PIXEL,
GRIPPER_PUSH_RADIUS_PIXEL,
IMAGE_PAD_WIDTH,
IS_REAL,
NUM_ROTATION,
PIXEL_SIZE,
PUSH_DISTANCE_PIXEL,
TARGET_LOWER,
TARGET_UPPER,
WORKSPACE_LIMITS,
)
import numpy as np
from mcts_utils import MCTSHelper
import random
import cv2
import torch
from environment_sim import Environment
from models import reinforcement_net, PushNet
from mcts_main import SeachCollector
import utils
from mcts_utils import _sampled_prediction_precise
from mcts_main import SeachCollector
@torch.no_grad()
def get_q(model, color_heightmap, depth_heightmap):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(3, 2, 0, 1)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(3, 2, 0, 1)
# Helper
# mask of target object
# temp = cv2.cvtColor(color_heightmap_pad, cv2.COLOR_RGB2HSV)
# mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
# # mask of clearance of target object
# target_erode = cv2.filter2D(mask, -1, self.kernel_erode)
# clearance = np.zeros_like(mask)
# clearance[
# np.logical_and(
# np.logical_and(target_erode > 0, mask == 0), depth_heightmap_pad < DEPTH_MIN
# )
# ] = 255
# temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
# mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
# mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
# mask_pad.shape = (
# mask_pad.shape[0],
# mask_pad.shape[1],
# mask_pad.shape[2],
# 1,
# )
# mask_pad = torch.from_numpy(mask_pad.astype(np.float32)).permute(3, 2, 0, 1)
# Pass input data through model
# output_prob = model(input_color_data, input_depth_data, True, -1, use_push=True, push_only=True)
output_prob = model(input_color_data, input_depth_data, True, -1)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
# output = torch.sigmoid(output_prob[rotate_idx])
output = output_prob[rotate_idx]
if rotate_idx == 0:
push_predictions = output.cpu().data.numpy()[
:, 0, :, :,
]
else:
push_predictions = np.concatenate(
(push_predictions, output.cpu().data.numpy()[:, 0, :, :,],), axis=0,
)
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = push_predictions[0].shape[0] - IMAGE_PAD_WIDTH
push_predictions = push_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
grasp_q_value = push_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, push_predictions
def filter_prediction(mask_heightmap, push_predictions):
kernel_collision = np.ones(
(GRIPPER_PUSH_RADIUS_PIXEL * 2, GRIPPER_GRASP_WIDTH_PIXEL), dtype=np.float32
)
kernel_right = np.zeros(
(
GRIPPER_PUSH_RADIUS_PIXEL * 2,
(PUSH_DISTANCE_PIXEL + round(GRIPPER_GRASP_WIDTH_PIXEL / 2)) * 2,
),
dtype=np.float32,
)
kernel_right[:, PUSH_DISTANCE_PIXEL + round(GRIPPER_GRASP_WIDTH_PIXEL / 2) :] = 1
num_rotations = push_predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
# rotate
pred_pad = utils.rotate(
push_predictions[rotate_idx], rotate_idx * (360.0 / num_rotations)
)
mask_pad = utils.rotate(
mask_heightmap, rotate_idx * (360.0 / num_rotations), is_mask=True
)
# filter collision
target_invalid = cv2.filter2D(mask_pad, -1, kernel_collision)
pred_pad[(target_invalid > 0)] = 0
# # filter point to right
target_invalid = cv2.filter2D(mask_pad, -1, kernel_right)
pred_pad[(target_invalid == 0)] = 0
# rotate back
pred_pad = utils.rotate(pred_pad, -rotate_idx * (360.0 / num_rotations))
push_predictions[rotate_idx] = pred_pad
return push_predictions
@torch.no_grad()
def sampled_prediction_precise(mcts_helper, env, model, color_image, mask_image):
actions = mcts_helper.sample_actions(None, color_image, mask_image)
out_q = _sampled_prediction_precise(env, model, actions, mask_image)
print(out_q)
final = actions[np.argmax(out_q)]
return final[0], final[1]
def sampled_prediction(mcts_helper, env, color_image, mask_image, push_predictions):
actions = mcts_helper.sample_actions(None, color_image, mask_image)
right = (1, 0)
new_push_predictions = np.zeros_like(push_predictions)
for action in actions:
action_start = (action[0][1], action[0][0])
action_end = (action[1][1], action[1][0])
current = (
action_end[0] - action_start[0],
action_end[1] - action_start[1],
)
dot = (
right[0] * current[0] + right[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = right[0] * current[1] - right[1] * current[0] # determinant
rot_angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
rot_angle = math.degrees(rot_angle)
if rot_angle < 0:
rot_angle = 360 + rot_angle
rotate_idx = round(rot_angle / (360 / NUM_ROTATION))
if rotate_idx == NUM_ROTATION:
rotate_idx = 0
new_push_predictions[
rotate_idx,
action_start[1] - 3 : action_start[1] + 4,
action_start[0] - 3 : action_start[0] + 4,
] = push_predictions[
rotate_idx,
action_start[1] - 3 : action_start[1] + 4,
action_start[0] - 3 : action_start[0] + 4,
]
print(
np.max(
push_predictions[
rotate_idx,
action_start[1] - 3 : action_start[1] + 4,
action_start[0] - 3 : action_start[0] + 4,
]
)
)
# new_push_predictions[rotate_idx, action_start[1], action_start[0]] = push_predictions[rotate_idx, action_start[1], action_start[0]]
# best_locate = [rot_angle, action_start[1], action_start[0], action_end[1], action_end[0]]
# action_start = (best_locate[1], best_locate[2])
# rotated_color_image = utils.rotate(color_image, rot_angle)
# origin = mask_image.shape
# origin = ((origin[0] - 1) / 2, (origin[1] - 1) / 2)
# new_action_start = utils.rotate_point(origin, action_start, math.radians(rot_angle))
# new_action_start = (round(new_action_start[0]), round(new_action_start[1]))
# point_from = (int(new_action_start[1]), int(new_action_start[0]))
# point_to = (int(point_from[0] + PUSH_DISTANCE_PIXEL), int(point_from[1]))
# rotated_color_image = cv2.arrowedLine(
# rotated_color_image, point_from, point_to, (100, 200, 0), 2, tipLength=0.2,
# )
# cv2.imshow('before', color_image)
# cv2.imshow('after', rotated_color_image)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return new_push_predictions
@torch.no_grad()
def get_q_mask(model, mask_heightmap, env):
mask_heightmap = np.copy(mask_heightmap)
# relabel
mask_heightmap = utils.relabel_mask(env, mask_heightmap)
# focus on target, so make one extra channel
target_mask_img = np.zeros_like(mask_heightmap, dtype=np.uint8)
target_mask_img[mask_heightmap == 255] = 255
mask_heightmap = np.dstack((target_mask_img, mask_heightmap))
# Add extra padding (to handle rotations inside network)
mask_heightmap = np.pad(
mask_heightmap,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
input_image = mask_heightmap.astype(float) / 255
# Construct minibatch of size 1 (b,c,h,w)
input_image.shape = (
input_image.shape[0],
input_image.shape[1],
input_image.shape[2],
1,
)
input_data = torch.from_numpy(input_image.astype(np.float32)).permute(3, 2, 0, 1)
# Pass input data through model
# output_prob = model(input_color_data, input_depth_data, True, -1, use_push=True, push_only=True)
output_prob = model(input_data, True, -1)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
# output = torch.sigmoid(output_prob[rotate_idx])
output = output_prob[rotate_idx]
if rotate_idx == 0:
push_predictions = output.cpu().data.numpy()[
:, 0, :, :,
]
else:
push_predictions = np.concatenate(
(push_predictions, output.cpu().data.numpy()[:, 0, :, :,],), axis=0,
)
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = push_predictions[0].shape[0] - IMAGE_PAD_WIDTH
push_predictions = push_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(push_predictions), push_predictions.shape)
grasp_q_value = push_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, push_predictions
def parse_args():
parser = argparse.ArgumentParser(description="Lifelong DQN")
parser.add_argument("--test_case", action="store", help="File for testing")
parser.add_argument("--test_cases", nargs="+", help="Files for testing")
parser.add_argument(
"--max_test_trials",
action="store",
type=int,
default=5,
help="maximum number of test runs per case/scenario",
)
parser.add_argument(
"--num_iter",
action="store",
type=int,
default=50,
)
parser.add_argument(
"--push_model",
action="store",
type=str,
default="logs_mcts/runs/2021-09-02-22-59-train-ratio-1-final/lifelong_model-20.pth",
)
parser.add_argument("--switch", action="store", type=int, help="Switch target")
args = parser.parse_args()
return args
if __name__ == "__main__":
# set seed
random.seed(1234)
torch.manual_seed(1234)
np.random.seed(1234)
# network only?
iteration = 0
args = parse_args()
case = args.test_case
cases = args.test_cases
switch = args.switch
if switch is not None:
print(f"Target ID has been switched to {switch}")
if cases:
repeat_num = len(cases)
else:
repeat_num = args.max_test_trials
cases = [case] * repeat_num
collector = SeachCollector(cases)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
env = Environment(gui=False)
mcts_helper = MCTSHelper(env, "logs_grasp/snapshot-post-020000.reinforcement.pth")
push_model = PushNet()
push_model.load_state_dict(torch.load(args.push_model)["model"])
push_model = push_model.to(device)
push_model.eval()
num_action_log = defaultdict(list)
for repeat_idx in range(repeat_num):
success = False
while not success:
env.reset()
success = env.add_object_push_from_file(cases[repeat_idx])
print(f"Reset environment of {repeat_idx}")
num_action = [0, 0, 0]
start_time = time.time()
while True:
num_action[0] += 1
color_image, depth_image, _ = utils.get_true_heightmap(env)
temp = cv2.cvtColor(color_image, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
print(f"Target on the table (value: {np.sum(mask) / 255})")
if np.sum(mask) / 255 < 10:
break
q_value, best_pix_ind, grasp_predictions = mcts_helper.get_grasp_q(
color_image, depth_image, post_checking=True
)
print(f"Max grasp Q value: {q_value}")
# record
collector.save_heightmaps(iteration, color_image, depth_image)
grasp_pred_vis = mcts_helper.get_prediction_vis(
grasp_predictions, color_image, best_pix_ind
)
collector.save_visualizations(iteration, grasp_pred_vis, "grasp")
# Grasp >>>>>
if q_value > GRASP_Q_GRASP_THRESHOLD:
best_rotation_angle = np.deg2rad(best_pix_ind[0] * (360.0 / NUM_ROTATION))
primitive_position = [
best_pix_ind[1] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
best_pix_ind[2] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
depth_image[best_pix_ind[1]][best_pix_ind[2]] + WORKSPACE_LIMITS[2][0],
]
if not IS_REAL:
success = env.grasp(primitive_position, best_rotation_angle)
else:
grasp_sucess = env.grasp(primitive_position, best_rotation_angle)
success = grasp_sucess
# record
reward_value = 1 if success else 0
collector.executed_action_log.append(
[
1, # grasp
primitive_position[0],
primitive_position[1],
primitive_position[2],
best_rotation_angle,
-1,
-1,
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
if success:
num_action[2] += 1
break
else:
continue
# Grasp <<<<<
# Push >>>>>
num_action[1] += 1
color_image, depth_image, mask_image = utils.get_true_heightmap(env)
start, end = sampled_prediction_precise(
mcts_helper, env, push_model, color_image, mask_image
)
primitive_position = [
start[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
start[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
primitive_position_end = [
end[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
end[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
env.push(primitive_position, primitive_position_end)
# Push <<<<<
# Push >>>>>
# num_action[1] += 1
# color_image, depth_image, mask_image = utils.get_true_heightmap(env)
# q_value, best_pix_ind, predictions = get_q_mask(push_model, mask_image, env)
# use same action space as mcts >>>>>
# predictions = sampled_prediction(mcts_helper, env, color_image, mask_image, predictions)
# best_pix_ind = np.unravel_index(np.argmax(predictions), predictions.shape)
# grasp_q_value = predictions[best_pix_ind]
# # <<<<<
# print(f"Push {q_value}")
# best_rotation_angle = np.deg2rad(90 - best_pix_ind[0] * (360.0 / NUM_ROTATION))
# primitive_position = [
# best_pix_ind[1] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
# best_pix_ind[2] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
# 0.01,
# ]
# primitive_position_end = [
# primitive_position[0] + PUSH_DISTANCE * np.cos(best_rotation_angle),
# primitive_position[1] + PUSH_DISTANCE * np.sin(best_rotation_angle),
# 0.01,
# ]
# env.push(primitive_position, primitive_position_end)
# pred_vis = mcts_helper.get_prediction_vis(
# predictions, color_image, best_pix_ind, is_push=True
# )
# cv2.imwrite(
# "vis.png", pred_vis,
# )
# input("wait")
# record
reward_value = 0
collector.executed_action_log.append(
[
0, # push
primitive_position[0],
primitive_position[1],
primitive_position[2],
primitive_position_end[0],
primitive_position_end[1],
primitive_position_end[2],
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
# Push <<<<<
end_time = time.time()
collector.time_log.append(end_time - start_time)
collector.write_to_log("executed-time", collector.time_log)
print(num_action)
num_action_log[cases[repeat_idx]].append(num_action)
print(num_action_log)
total_case = 0
total_action = 0
for key in num_action_log:
this_total_case = 0
this_total_action = 0
this_total_push = 0
this_total_grasp = 0
this_total_success = 0
for trial in num_action_log[key]:
this_total_case += 1
this_total_action += trial[0]
this_total_push += trial[1]
this_total_grasp += (trial[0] - trial[1])
this_total_success += trial[2]
print(key, "this_case:", this_total_case, "this_action:", this_total_action,
"this_push:", this_total_push, "this_grasp:", this_total_grasp,
"average num", this_total_action/this_total_case, "average_grasp", this_total_success / this_total_grasp, "total_complete", this_total_success
)
total_case += len(num_action_log[key])
for re in num_action_log[key]:
total_action += re[0]
print(total_case, total_action, total_action / total_case)
| 20,013 | 36.270019 | 155 |
py
|
more
|
more-main/train_maskrcnn.py
|
import torch
import torchvision
from dataset import SegmentationDataset
import log_utils
import torch_utils
import datetime
import argparse
import time
import os
from vision.coco_utils import get_coco_api_from_dataset
from vision.coco_eval import CocoEvaluator
import vision.transforms as T
import math
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
from torchvision.models.detection.rpn import AnchorGenerator
import sys
from PIL import Image
def get_model_instance_segmentation(num_classes):
# load an instance segmentation model pre-trained pre-trained on COCO
anchor_sizes = ((32,), (64,), (128,), (256,), (256,))
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes)
anchor_generator = AnchorGenerator(sizes=anchor_sizes, aspect_ratios=aspect_ratios)
model = torchvision.models.detection.maskrcnn_resnet50_fpn(
pretrained=True, trainable_backbone_layers=5, rpn_anchor_generator = anchor_generator
)
# get number of input features for the classifier
in_features = model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 512
# and replace the mask predictor with a new one
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask, hidden_layer, num_classes)
return model
def get_transform(train):
transforms = []
transforms.append(T.ToTensor())
if train:
transforms.append(T.RandomHorizontalFlip(0.5))
return T.Compose(transforms)
def get_data_loader(dataset_root, batch_size, is_real):
# use our dataset and defined transformations
dataset = SegmentationDataset(os.path.join(dataset_root, "train"), get_transform(train=True), is_real, background=os.path.join(dataset_root, "background.png"))
dataset_test = SegmentationDataset(os.path.join(dataset_root, "test"), get_transform(train=False), is_real, background=os.path.join(dataset_root, "background.png"))
# define training and validation data loaders
data_loader_train = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
shuffle=True,
num_workers=0,
drop_last=True,
collate_fn=torch_utils.collate_fn,
)
data_loader_test = torch.utils.data.DataLoader(
dataset_test,
batch_size=batch_size,
shuffle=False,
num_workers=0,
drop_last=False,
collate_fn=torch_utils.collate_fn,
)
return data_loader_train, data_loader_test
def train_one_epoch(model, optimizer, data_loader, device, epoch, logger, print_freq, resume=False):
"""
https://github.com/pytorch/vision/blob/master/references/detection/engine.py
"""
model.train()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", log_utils.SmoothedValue(window_size=1, fmt="{value:.8f}"))
header = "Epoch: [{}]".format(epoch)
lr_scheduler = None
if epoch == 0 and not resume:
warmup_factor = 1.0 / 1000
warmup_iters = min(1000, len(data_loader) - 1)
lr_scheduler = torch_utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
for images, targets in metric_logger.log_every(data_loader, print_freq, logger, header):
images = list(image.to(device) for image in images)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
loss_dict = model(images, targets)
losses = sum(loss for loss in loss_dict.values())
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = torch_utils.reduce_dict(loss_dict)
losses_reduced = sum(loss for loss in loss_dict_reduced.values())
loss_value = losses_reduced.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
losses.backward()
optimizer.step()
if lr_scheduler is not None:
lr_scheduler.step()
metric_logger.update(loss=losses_reduced, **loss_dict_reduced)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
return metric_logger
def _get_iou_types(model):
model_without_ddp = model
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
iou_types = ["bbox"]
if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN):
iou_types.append("segm")
if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN):
iou_types.append("keypoints")
return iou_types
@torch.no_grad()
def evaluate(model, data_loader, logger, device):
n_threads = torch.get_num_threads()
# FIXME remove this and make paste_masks_in_image run on the GPU
torch.set_num_threads(1)
cpu_device = torch.device("cpu")
model.eval()
metric_logger = log_utils.MetricLogger(delimiter=" ")
header = "Test:"
coco = get_coco_api_from_dataset(data_loader.dataset)
iou_types = _get_iou_types(model)
coco_evaluator = CocoEvaluator(coco, iou_types)
for images, targets in metric_logger.log_every(data_loader, 100, logger, header):
images = list(img.to(device) for img in images)
torch.cuda.synchronize()
model_time = time.time()
outputs = model(images)
outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
model_time = time.time() - model_time
res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
evaluator_time = time.time()
coco_evaluator.update(res)
evaluator_time = time.time() - evaluator_time
metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)
# gather the stats from all processes
coco_evaluator.synchronize_between_processes()
# accumulate predictions from all images
coco_evaluator.accumulate()
coco_evaluator.summarize()
torch.set_num_threads(n_threads)
return coco_evaluator
@torch.no_grad()
def test(model, data_loader, device):
import cv2
import numpy as np
model.eval()
print(len(data_loader.dataset))
count = 0
for images, targets in data_loader:
for i in range(len(images)):
print(targets[i]["image_id"])
image = images[i]
# target = targets[i]
# image = image.permute(1, 2, 0).numpy()
# image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# image *= 255
# image = image.astype(np.uint8)
# cv2.imwrite(str(i) + 'color.png', image)
# masks = target['masks']
# for mi, m in enumerate(masks):
# img = m.numpy()
# img *= 255
# print(np.max(img), np.min(img))
# img = img.astype(np.uint8)
# cv2.imwrite(str(i+mi) + 'mask.png', img)
# exit(5)
prediction = model([image.to(device)])
print(prediction[0]["scores"])
print(prediction[0]["labels"])
pred_mask = np.zeros((720, 1280), dtype=np.uint8)
if len(targets[i]["masks"]) != np.sum(prediction[0]["scores"].cpu().numpy() > 0.95):
for idx, mask in enumerate(prediction[0]["masks"]):
if prediction[0]["scores"][idx] > 0.95:
# if prediction[0]['scores'][idx] > 0.75:
img1 = mask[0].mul(255).byte().cpu().numpy()
img1[img1 > 80] = 255
img1[img1 <= 80] = 0
pred_mask[img1 > 80] = 255 - idx * 10
img1 = Image.fromarray(img1)
img1.save(str(prediction[0]["labels"][idx].cpu().item()) + "-" + str(idx) + "mask.png")
for idx, mask in enumerate(targets[i]["masks"]):
img2 = Image.fromarray(mask.mul(255).byte().cpu().numpy())
img2.save(str(idx) + "-" + str(targets[i]["labels"][idx].cpu().item()) + "target.png")
print(len(targets[i]["masks"]), len(prediction[0]["masks"] > 0.7))
img0 = Image.fromarray(image.mul(255).permute(1, 2, 0).byte().numpy())
img0.save(str(count) + "-" + str(idx) + "ori.png")
img0 = Image.fromarray(pred_mask)
img0.save(str(count) + "-" + str(idx) + "pred.png")
count += 1
exit()
def main(args):
data_loader, data_loader_test = get_data_loader(
args.dataset_root, args.batch_size, args.is_real
)
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
model = get_model_instance_segmentation(6+1)
if args.resume:
# state_dict = torch.load(os.path.join(args.dataset_root, "maskrcnn.pth"))
# keep = lambda k: 'box_predictor' not in k and 'mask_predictor' not in k
# keep = lambda k: 'rpn.head' not in k
# state_dict = {k: v for k, v in state_dict.items() if keep(k)}
# model.load_state_dict(state_dict, strict=False)
model.load_state_dict(torch.load(os.path.join(args.dataset_root, "maskrcnn.pth")))
model = model.to(device)
# construct an optimizer
params = [p for p in model.parameters() if p.requires_grad]
optimizer = torch.optim.SGD(params, lr=1e-4, momentum=0.9, weight_decay=1e-4)
# and a learning rate scheduler which decreases the learning rate by 10x every 1 epochs
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[3, 5], gamma=0.1)
log_dir = os.path.join(args.dataset_root, "runs")
timestamp_value = datetime.datetime.fromtimestamp(time.time())
time_name = timestamp_value.strftime("%Y-%m-%d-%H-%M")
log_dir = os.path.join(log_dir, time_name)
if not os.path.exists(log_dir):
os.makedirs(log_dir)
logger = log_utils.setup_logger(log_dir, "Mask R-CNN")
if args.test:
test(model, data_loader_test, device=device)
else:
for epoch in range(args.epochs):
# train for one epoch, printing every 10 iterations
train_one_epoch(
model,
optimizer,
data_loader,
device,
epoch,
logger,
print_freq=50,
resume=args.resume,
)
torch.save(model.state_dict(), os.path.join(args.dataset_root, f"maskrcnn{epoch}.pth"))
# update the learning rate
lr_scheduler.step()
# evaluate on the test dataset
evaluate(model, data_loader_test, logger, device=device)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Train foreground")
parser.add_argument(
"--dataset_root", dest="dataset_root", action="store", help="Enter the path to the dataset"
)
parser.add_argument("--is_real", dest="is_real", action="store_true", default=False, help="")
parser.add_argument(
"--epochs",
dest="epochs",
action="store",
type=int,
default=20,
help="Enter the epoch for training",
)
parser.add_argument(
"--batch_size",
dest="batch_size",
action="store",
type=int,
default=4,
help="Enter the batchsize for training and testing",
)
parser.add_argument(
"--test", dest="test", action="store_true", default=False, help="Testing and visualizing"
)
parser.add_argument(
"--resume",
dest="resume",
action="store_true",
default=False,
help="Enter the path to the dataset",
)
args = parser.parse_args()
if args.test:
args.resume = True
main(args)
| 12,144 | 36.254601 | 168 |
py
|
more
|
more-main/collect_train_grasp_data.py
|
import numpy as np
import time
import cv2
import utils
import datetime
import os
import glob
import argparse
from threading import Thread
import pybullet as p
import torch
from trainer import Trainer
from constants import (
TARGET_LOWER,
TARGET_UPPER,
DEPTH_MIN,
PUSH_DISTANCE,
NUM_ROTATION,
GRASP_Q_GRASP_THRESHOLD,
IMAGE_PAD_WIDTH,
IMAGE_PAD_DIFF,
)
from environment_sim import Environment
import multiprocessing as mp
from action_utils_mask import sample_actions, Predictor, from_maskrcnn
from train_maskrcnn import get_model_instance_segmentation
class GraspDataCollectorTrainer:
def __init__(self, args):
self.depth_min = DEPTH_MIN
# Create directory to save data
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
self.base_directory = os.path.join(
os.path.abspath("logs_grasp"), timestamp_value.strftime("%Y-%m-%d-%H-%M-%S")
)
print("Creating data logging session: %s" % (self.base_directory))
self.color_heightmaps_directory = os.path.join(
self.base_directory, "data", "color-heightmaps"
)
self.depth_heightmaps_directory = os.path.join(
self.base_directory, "data", "depth-heightmaps"
)
self.mask_directory = os.path.join(self.base_directory, "data", "masks")
self.models_directory = os.path.join(self.base_directory, "models")
self.visualizations_directory = os.path.join(self.base_directory, "visualizations")
self.transitions_directory = os.path.join(self.base_directory, "transitions")
if not os.path.exists(self.color_heightmaps_directory):
os.makedirs(self.color_heightmaps_directory)
if not os.path.exists(self.depth_heightmaps_directory):
os.makedirs(self.depth_heightmaps_directory)
if not os.path.exists(self.mask_directory):
os.makedirs(self.mask_directory)
if not os.path.exists(self.models_directory):
os.makedirs(self.models_directory)
if not os.path.exists(self.visualizations_directory):
os.makedirs(self.visualizations_directory)
if not os.path.exists(self.transitions_directory):
os.makedirs(os.path.join(self.transitions_directory))
self.iter = args.start_iter
self.end_iter = args.end_iter
def save_heightmaps(self, iteration, color_heightmap, depth_heightmap, mode):
color_heightmap = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(self.color_heightmaps_directory, "%06d.%s.color.png" % (iteration, mode)),
color_heightmap,
)
depth_heightmap = np.round(depth_heightmap * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(
os.path.join(self.depth_heightmaps_directory, "%06d.%s.depth.png" % (iteration, mode)),
depth_heightmap,
)
def write_to_log(self, log_name, log):
np.savetxt(
os.path.join(self.transitions_directory, "%s.log.txt" % log_name), log, delimiter=" "
)
def save_model(self, iteration, model, name):
torch.save(
{"model": model.state_dict()},
os.path.join(self.models_directory, "snapshot-%06d.%s.pth" % (iteration, name)),
)
def save_backup_model(self, model, name):
torch.save(
{"model": model.state_dict()},
os.path.join(self.models_directory, "snapshot-backup.%s.pth" % (name)),
)
def save_visualizations(self, iteration, affordance_vis, name):
cv2.imwrite(
os.path.join(self.visualizations_directory, "%06d.%s.png" % (iteration, name)),
affordance_vis,
)
def add_objects(self, env, num_obj, workspace_limits):
"""Randomly dropped objects to the workspace"""
color_space = (
np.asarray(
[
[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167], # pink
]
)
/ 255.0
)
mesh_list = glob.glob("assets/blocks/*.urdf")
obj_mesh_ind = np.random.randint(0, len(mesh_list), size=num_obj)
obj_mesh_color = color_space[np.asarray(range(num_obj)) % 10, :]
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
body_ids = []
for object_idx in range(len(obj_mesh_ind)):
curr_mesh_file = mesh_list[obj_mesh_ind[object_idx]]
drop_x = (
(workspace_limits[0][1] - workspace_limits[0][0] - 0.2) * np.random.random_sample()
+ workspace_limits[0][0]
+ 0.1
)
drop_y = (
(workspace_limits[1][1] - workspace_limits[1][0] - 0.2) * np.random.random_sample()
+ workspace_limits[1][0]
+ 0.1
)
object_position = [drop_x, drop_y, 0.2]
object_orientation = [
2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample(),
2 * np.pi * np.random.random_sample(),
]
object_color = [
obj_mesh_color[object_idx][0],
obj_mesh_color[object_idx][1],
obj_mesh_color[object_idx][2],
1,
]
body_id = p.loadURDF(
curr_mesh_file, object_position, p.getQuaternionFromEuler(object_orientation)
)
p.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
env.add_object_id(body_id)
env.wait_static()
return body_ids, True
def add_object_push_from_file(self, env, file_name):
body_ids = []
success = True
# Read data
with open(file_name, "r") as preset_file:
file_content = preset_file.readlines()
num_obj = len(file_content)
obj_files = []
obj_mesh_colors = []
obj_positions = []
obj_orientations = []
for object_idx in range(num_obj):
file_content_curr_object = file_content[object_idx].split()
obj_file = os.path.join("assets", "blocks", file_content_curr_object[0])
obj_files.append(obj_file)
obj_positions.append(
[
float(file_content_curr_object[4]),
float(file_content_curr_object[5]),
float(file_content_curr_object[6]),
]
)
obj_orientations.append(
[
float(file_content_curr_object[7]),
float(file_content_curr_object[8]),
float(file_content_curr_object[9]),
]
)
obj_mesh_colors.append(
[
float(file_content_curr_object[1]),
float(file_content_curr_object[2]),
float(file_content_curr_object[3]),
]
)
# Import objects
for object_idx in range(num_obj):
curr_mesh_file = obj_files[object_idx]
object_position = [
obj_positions[object_idx][0],
obj_positions[object_idx][1],
obj_positions[object_idx][2],
]
object_orientation = [
obj_orientations[object_idx][0],
obj_orientations[object_idx][1],
obj_orientations[object_idx][2],
]
object_color = [
obj_mesh_colors[object_idx][0],
obj_mesh_colors[object_idx][1],
obj_mesh_colors[object_idx][2],
1,
]
body_id = p.loadURDF(
curr_mesh_file, object_position, p.getQuaternionFromEuler(object_orientation)
)
p.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
env.add_object_id(body_id)
success &= env.wait_static()
success &= env.wait_static()
# give time to stop
for _ in range(5):
p.stepSimulation(env.client_id)
return body_ids, success
def remove_objects(self, env):
for body_id in env.obj_ids["rigid"]:
if p.getBasePositionAndOrientation(body_id)[0][0] < 0:
p.removeBody(body_id)
env.remove_object_id(body_id)
def main(self, args, env):
# TODO: workaround of cv2.cvtColor and pytorch dataloader, multi-thread bug
# mp.set_start_method("spawn")
num_obj = args.num_obj
heightmap_resolution = env.pixel_size
workspace_limits = env.bounds
random_seed = args.random_seed
force_cpu = False
method = "reinforcement"
push_rewards = args.push_rewards
future_reward_discount = args.future_reward_discount
experience_replay = args.experience_replay
explore_rate_decay = args.explore_rate_decay
grasp_only = args.grasp_only
is_real = IS_REAL
is_testing = args.is_testing
is_grasp_explore = args.is_grasp_explore
is_dipn = args.is_dipn
has_target = args.has_target
is_baseline = args.is_baseline
max_test_trials = args.max_test_trials # Maximum number of test runs per case/scenario
test_preset_cases = args.test_preset_cases
test_preset_file = os.path.abspath(args.test_preset_file) if test_preset_cases else None
load_snapshot = args.load_snapshot # Load pre-trained snapshot of model?
snapshot_file = os.path.abspath(args.snapshot_file) if load_snapshot else None
continue_logging = args.continue_logging # Continue logging from previous session
# Save visualizations of FCN predictions? Takes 0.6s per training step if set to True
save_visualizations = args.save_visualizations
# Set random seed
np.random.seed(random_seed)
# Initialize trainer
trainer = Trainer(
method,
push_rewards,
future_reward_discount,
is_testing,
load_snapshot,
snapshot_file,
force_cpu,
)
if is_dipn:
# Initialize Push Prediction
predictor = Predictor("logs_push/push_prediction_model-75.pth")
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Initialize Mask R-CNN
mask_model = get_model_instance_segmentation(2)
mask_model.load_state_dict(torch.load("logs_image/maskrcnn.pth"))
mask_model = mask_model.to(device)
mask_model.eval()
# Initialize variables for heuristic bootstrapping and exploration probability
no_change_count = [2, 2] if not is_testing else [0, 0]
explore_prob = 0.5 if not is_testing else 0.0
# Quick hack for nonlocal memory between threads in Python 2
nonlocal_variables = {
"executing_action": False,
"primitive_action": None,
"best_pix_ind": None,
"push_success": False,
"grasp_success": False,
"primitive_position": None,
"push_predictions": None,
"grasp_predictions": None,
}
# Parallel thread to process network output and execute actions
# -------------------------------------------------------------
def process_actions():
while True:
if nonlocal_variables["executing_action"]:
push_predictions = nonlocal_variables["push_predictions"]
grasp_predictions = nonlocal_variables["grasp_predictions"]
if has_target:
grasp_predictions = trainer.focus_on_target(
color_heightmap,
valid_depth_heightmap,
grasp_predictions,
TARGET_LOWER,
TARGET_UPPER,
)
# Determine whether grasping or pushing should be executed based on network predictions
best_push_conf = np.max(push_predictions)
best_grasp_conf = np.max(grasp_predictions)
if is_dipn:
chosen_best_grasp_conf = best_grasp_conf
best_grasp_confs = np.sum(np.sort(grasp_predictions.flatten())[:])
print(
f"Before Primitive confidence scores: {best_grasp_conf} (grasp) {best_grasp_confs} (grasp sum)"
)
rotate_idx = -1
if best_grasp_conf < GRASP_Q_GRASP_THRESHOLD:
old_best_grasp_conf = best_grasp_conf
old_best_grasp_confs = best_grasp_confs
mask_objs = from_maskrcnn(mask_model, color_heightmap, device, True)
# if len(mask_objs) > 1 or (len(mask_objs) == 1 and best_grasp_conf < 0.5):
if len(mask_objs) > 1:
(
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
before_rotated_action,
rotated_mask_objs,
) = sample_actions(
color_heightmap, valid_depth_heightmap, mask_objs, plot=True
)
if len(rotated_color_image) > 0:
(
generated_color_images,
generated_depth_images,
validations,
) = predictor.forward(
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
True,
)
for idx in range(len(generated_color_images)):
if validations[idx]:
with torch.no_grad():
_, new_grasp_predictions = trainer.forward(
generated_color_images[idx],
generated_depth_images[idx],
is_volatile=True,
use_push=False,
)
if has_target:
new_grasp_predictions = trainer.focus_on_target(
generated_color_images[idx],
generated_depth_images[idx],
new_grasp_predictions,
TARGET_LOWER,
TARGET_UPPER,
)
predicted_value = np.max(new_grasp_predictions)
if chosen_best_grasp_conf < predicted_value:
rotate_idx = idx
chosen_best_grasp_conf = predicted_value
else:
predicted_values = np.sum(
np.sort(new_grasp_predictions.flatten())[:]
)
best_grasp_conf = np.max(new_grasp_predictions)
if (
best_grasp_confs < predicted_values
and old_best_grasp_conf < best_grasp_conf
):
best_grasp_confs = predicted_values
rotate_idx = idx
chosen_best_grasp_conf = best_grasp_conf
else:
print("Need to check, no action?")
input("wait")
if has_target:
if rotate_idx == -1:
rng = np.random.default_rng(random_seed)
if np.any(validations):
while True:
rotate_idx = rng.integers(
0, len(generated_color_images)
)
if validations[rotate_idx]:
break
else:
rotate_idx = rng.integers(0, len(generated_color_images))
generated_color_images[rotate_idx] = generated_color_images[
rotate_idx
][
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF,
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF,
:,
]
generated_depth_images[rotate_idx] = generated_depth_images[
rotate_idx
][
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF,
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF,
]
chosen_best_grasp_conf = -10
if (
best_grasp_confs * 0.9 < old_best_grasp_confs
and chosen_best_grasp_conf > 0.5
):
chosen_best_grasp_conf = -1
# if (
# rotate_idx == -1 or best_grasp_conf < old_best_grasp_conf * 1.1
# ) and best_grasp_conf > 0.5:
if rotate_idx == -1 or (
best_grasp_confs * 0.9 < old_best_grasp_confs
and chosen_best_grasp_conf > 0.5
):
nonlocal_variables["primitive_action"] = "grasp"
else:
overlay = color_heightmap
added_image = cv2.addWeighted(
generated_color_images[rotate_idx], 0.8, overlay, 0.2, 0
)
img = cv2.cvtColor(added_image, cv2.COLOR_RGB2BGR)
cv2.imwrite("predict.png", img)
img = generated_depth_images[rotate_idx]
img[img <= DEPTH_MIN] = 0
img[img > DEPTH_MIN] = 255
cv2.imwrite("predictgray.png", img)
img = cv2.cvtColor(
generated_color_images[rotate_idx], cv2.COLOR_RGB2BGR
)
cv2.imwrite("predictcolor.png", img)
nonlocal_variables["primitive_action"] = "push"
print(
"After Primitive confidence scores: %f (grasp) %f (grasp sum)"
% (chosen_best_grasp_conf, best_grasp_confs)
)
trainer.is_exploit_log.append([1])
else:
print(
"Primitive confidence scores: %f (push), %f (grasp)"
% (best_push_conf, best_grasp_conf)
)
nonlocal_variables["primitive_action"] = "grasp"
explore_actions = False
if not grasp_only:
if best_push_conf > best_grasp_conf:
nonlocal_variables["primitive_action"] = "push"
explore_actions = np.random.uniform() < explore_prob
if (
explore_actions
): # Exploitation (do best action) vs exploration (do other action)
print(
"Strategy: explore (exploration probability: %f)"
% (explore_prob)
)
nonlocal_variables["primitive_action"] = (
"push" if np.random.randint(0, 2) == 0 else "grasp"
)
else:
print(
"Strategy: exploit (exploration probability: %f)"
% (explore_prob)
)
trainer.is_exploit_log.append([0 if explore_actions else 1])
self.write_to_log("is-exploit", trainer.is_exploit_log)
use_heuristic = False
# Get pixel location and rotation with highest affordance prediction from
# heuristic algorithms (rotation, y, x)
if nonlocal_variables["primitive_action"] == "push":
if is_dipn:
predicted_value = best_grasp_conf
angle = rotated_angle[rotate_idx]
if angle < 0:
angle = 360 + angle
nonlocal_variables["best_pix_ind"] = (
int(round((angle) / (360 / NUM_ROTATION))),
before_rotated_action[rotate_idx][0],
before_rotated_action[rotate_idx][1],
)
else:
nonlocal_variables["best_pix_ind"] = np.unravel_index(
np.argmax(push_predictions), push_predictions.shape
)
predicted_value = np.max(push_predictions)
elif nonlocal_variables["primitive_action"] == "grasp":
if is_grasp_explore:
pow_law_exp = 1.5
q_lower_limit = 0.2
num_valid_samples = np.sum(grasp_predictions > q_lower_limit)
sorted_idx = np.argsort(grasp_predictions, axis=None)
rand_sample_idx = (
int(
np.round(
np.random.power(pow_law_exp, 1) * (num_valid_samples - 1)
)
)
+ sorted_idx.size
- num_valid_samples
)
nonlocal_variables["best_pix_ind"] = np.unravel_index(
sorted_idx[rand_sample_idx], grasp_predictions.shape
)
predicted_value = grasp_predictions[nonlocal_variables["best_pix_ind"]]
print(f"Explore grasp q value: {predicted_value} (grasp)")
else:
nonlocal_variables["best_pix_ind"] = np.unravel_index(
np.argmax(grasp_predictions), grasp_predictions.shape
)
predicted_value = np.max(grasp_predictions)
trainer.use_heuristic_log.append([1 if use_heuristic else 0])
self.write_to_log("use-heuristic", trainer.use_heuristic_log)
# Save predicted confidence value
trainer.predicted_value_log.append([predicted_value])
self.write_to_log("predicted-value", trainer.predicted_value_log)
# Compute 3D position of pixel
print(
"Action: %s at (%d, %d, %d)"
% (
nonlocal_variables["primitive_action"],
nonlocal_variables["best_pix_ind"][0],
nonlocal_variables["best_pix_ind"][1],
nonlocal_variables["best_pix_ind"][2],
)
)
best_rotation_angle = np.deg2rad(
nonlocal_variables["best_pix_ind"][0]
* (360.0 / trainer.model.num_rotations)
)
best_pix_x = nonlocal_variables["best_pix_ind"][1]
best_pix_y = nonlocal_variables["best_pix_ind"][2]
primitive_position = [
best_pix_x * heightmap_resolution + workspace_limits[0][0],
best_pix_y * heightmap_resolution + workspace_limits[1][0],
valid_depth_heightmap[best_pix_x][best_pix_y] + workspace_limits[2][0],
]
# If pushing, adjust start position, and make sure z value is safe and not too low
# or nonlocal_variables['primitive_action'] == 'place':
if nonlocal_variables["primitive_action"] == "push":
# safe_kernel_width = GRIPPER_PUSH_RADIUS_PIXEL
# local_region = valid_depth_heightmap[
# max(best_pix_x - safe_kernel_width, 0) : min(
# best_pix_x + safe_kernel_width + 1, valid_depth_heightmap.shape[0]
# ),
# max(best_pix_y - safe_kernel_width, 0) : min(
# best_pix_y + safe_kernel_width + 1, valid_depth_heightmap.shape[1]
# ),
# ]
# if local_region.size == 0:
# safe_z_position = 0.01
# else:
# safe_z_position = np.max(local_region) + 0.01
safe_z_position = 0.01
primitive_position[2] = safe_z_position
# Save executed primitive
if nonlocal_variables["primitive_action"] == "push":
trainer.executed_action_log.append(
[
0,
nonlocal_variables["best_pix_ind"][0],
nonlocal_variables["best_pix_ind"][1],
nonlocal_variables["best_pix_ind"][2],
]
) # 0 - push
elif nonlocal_variables["primitive_action"] == "grasp":
trainer.executed_action_log.append(
[
1,
nonlocal_variables["best_pix_ind"][0],
nonlocal_variables["best_pix_ind"][1],
nonlocal_variables["best_pix_ind"][2],
]
) # 1 - grasp
self.write_to_log("executed-action", trainer.executed_action_log)
# Visualize executed primitive, and affordances
if save_visualizations:
if not grasp_only:
push_pred_vis = trainer.get_prediction_vis(
push_predictions,
color_heightmap,
nonlocal_variables["best_pix_ind"],
)
self.save_visualizations(trainer.iteration, push_pred_vis, "push")
cv2.imwrite("visualization.push.png", push_pred_vis)
grasp_pred_vis = trainer.get_prediction_vis(
grasp_predictions, color_heightmap, nonlocal_variables["best_pix_ind"]
)
self.save_visualizations(trainer.iteration, grasp_pred_vis, "grasp")
cv2.imwrite("visualization.grasp.png", grasp_pred_vis)
# Initialize variables that influence reward
nonlocal_variables["push_success"] = False
nonlocal_variables["grasp_success"] = False
# Execute primitive
if nonlocal_variables["primitive_action"] == "push":
# primitive_position_end = [
# primitive_position[0] + PUSH_DISTANCE * np.cos(-best_rotation_angle),
# primitive_position[1] + PUSH_DISTANCE * np.sin(-best_rotation_angle),
# primitive_position[2],
# ]
if is_dipn:
primitive_position_end = [
primitive_position[0]
+ PUSH_DISTANCE * np.cos(-best_rotation_angle),
primitive_position[1]
+ PUSH_DISTANCE * np.sin(-best_rotation_angle),
primitive_position[2],
]
else:
primitive_position_end = [
primitive_position[0]
+ PUSH_DISTANCE * np.cos(-best_rotation_angle - 180),
primitive_position[1]
+ PUSH_DISTANCE * np.sin(-best_rotation_angle - 180),
primitive_position[2],
]
if not is_real:
nonlocal_variables["push_success"] = env.push(
primitive_position, primitive_position_end
)
else:
nonlocal_variables["push_success"] = env.push(
primitive_position, primitive_position_end
)
print("Push successful: %r" % (nonlocal_variables["push_success"]))
elif nonlocal_variables["primitive_action"] == "grasp":
if not is_real:
nonlocal_variables["grasp_success"] = env.grasp(
primitive_position, best_rotation_angle
)
self.remove_objects(env)
else:
nonlocal_variables["grasp_success"] = env.grasp(
primitive_position, best_rotation_angle
)
print("Grasp successful: %r" % (nonlocal_variables["grasp_success"]))
nonlocal_variables["primitive_position"] = (best_pix_x, best_pix_y)
nonlocal_variables["executing_action"] = False
time.sleep(0.01)
action_thread = Thread(target=process_actions)
action_thread.daemon = True
action_thread.start()
exit_called = False
# Start main training/testing loop
if not is_real:
env.reset()
if test_preset_cases:
self.add_object_push_from_file(env, test_preset_file)
elif is_baseline:
hard_cases = glob.glob("hard-cases/*.txt")
self.add_object_push_from_file(env, hard_cases[trainer.iteration])
else:
self.add_objects(env, num_obj, workspace_limits)
while True:
print(
"\n%s iteration: %d" % ("Testing" if is_testing else "Training", trainer.iteration)
)
iteration_time_0 = time.time()
# Get latest RGB-D image
if not is_real:
color_heightmap, depth_heightmap, _ = utils.get_true_heightmap(env)
else:
color_heightmap, depth_heightmap = utils.get_real_heightmap(env)
valid_depth_heightmap = depth_heightmap.copy()
valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0
valid_depth_heightmap = valid_depth_heightmap.astype(np.float32)
# Save RGB-D heightmaps
self.save_heightmaps(trainer.iteration, color_heightmap, valid_depth_heightmap, 0)
# Reset simulation or pause real-world training if table is empty
stuff_count = np.zeros(valid_depth_heightmap.shape)
stuff_count[valid_depth_heightmap > self.depth_min] = 1
print("Stuff on the table (value: %d)" % (np.sum(stuff_count)))
empty_threshold = 200
if is_testing and not is_real:
empty_threshold = 10
if is_baseline or has_target:
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
print(f"Target on the table (value: {np.sum(mask) / 255})")
if np.sum(mask) / 255 < 50:
stuff_count = 0
if np.sum(stuff_count) < empty_threshold or (
no_change_count[0] + no_change_count[1] > 10
):
no_change_count = [0, 0]
print(
"Not enough objects in view (value: %d)! Repositioning objects."
% (np.sum(stuff_count))
)
if not is_real:
env.reset()
if is_baseline:
if np.random.uniform() < 0.2:
self.add_objects(env, np.random.randint(5) + 1, workspace_limits)
else:
self.add_object_push_from_file(env, hard_cases[trainer.iteration])
elif test_preset_cases:
self.add_object_push_from_file(env, test_preset_file)
else:
self.add_objects(env, num_obj, workspace_limits)
else:
print(
"Not enough stuff on the table (value: %d)! Flipping over bin of objects..."
% (np.sum(stuff_count))
)
input("Please maually reset scene")
if is_testing: # If at end of test run, re-load original weights (before test run)
trainer.loss_list = []
trainer.optimizer.zero_grad()
trainer.model.load_state_dict(torch.load(snapshot_file)["model"])
trainer.clearance_log.append([trainer.iteration])
self.write_to_log("clearance", trainer.clearance_log)
if is_testing and len(trainer.clearance_log) >= max_test_trials:
exit_called = True # Exit after training thread (backprop and saving labels)
if "prev_color_img" in locals():
# Detect changes
depth_diff = abs(depth_heightmap - prev_depth_heightmap)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = 0
depth_diff[depth_diff < 0.01] = 0
depth_diff[depth_diff > 0] = 1
change_threshold = 300
change_value = np.sum(depth_diff)
if prev_primitive_action == "push":
change_detected = change_value > change_threshold
elif prev_primitive_action == "grasp":
change_detected = prev_grasp_success
print("Change detected: %r (value: %d)" % (change_detected, change_value))
if change_detected:
if prev_primitive_action == "push":
no_change_count[0] = 0
elif prev_primitive_action == "grasp":
no_change_count[1] = 0
else:
if prev_primitive_action == "push":
no_change_count[0] += 1
elif prev_primitive_action == "grasp":
no_change_count[1] += 1
# Compute training labels
if is_baseline:
label_value, prev_reward_value = trainer.get_label_value_base(
prev_primitive_action,
prev_push_success,
prev_grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
color_heightmap,
valid_depth_heightmap,
use_push=(not grasp_only),
target=prev_primitive_position,
prev_color_img=prev_color_heightmap,
prev_depth_img=prev_valid_depth_heightmap,
)
else:
label_value, prev_reward_value = trainer.get_label_value(
prev_primitive_action,
prev_push_success,
prev_grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
color_heightmap,
valid_depth_heightmap,
prev_valid_depth_heightmap,
use_push=(not grasp_only),
)
trainer.label_value_log.append([label_value])
self.write_to_log("label-value", trainer.label_value_log)
trainer.reward_value_log.append([prev_reward_value])
self.write_to_log("reward-value", trainer.reward_value_log)
trainer.backprop(
prev_color_heightmap,
prev_valid_depth_heightmap,
prev_primitive_action,
prev_best_pix_ind,
label_value,
use_push=(not grasp_only),
)
del prev_color_img
nonlocal_variables["push_success"] = False
nonlocal_variables["grasp_success"] = False
nonlocal_variables["primitive_action"] = None
nonlocal_variables["best_pix_ind"] = None
continue
if not exit_called:
# Run forward pass with network to get affordances
with torch.no_grad():
push_predictions, grasp_predictions = trainer.forward(
color_heightmap,
valid_depth_heightmap,
is_volatile=True,
use_push=(not grasp_only),
)
nonlocal_variables["push_predictions"] = push_predictions
nonlocal_variables["grasp_predictions"] = grasp_predictions
# Execute best primitive action on robot in another thread
nonlocal_variables["executing_action"] = True
# Run training iteration in current thread (aka training thread)
if "prev_color_img" in locals():
# Detect changes
depth_diff = abs(depth_heightmap - prev_depth_heightmap)
depth_diff[np.isnan(depth_diff)] = 0
depth_diff[depth_diff > 0.3] = 0
depth_diff[depth_diff < 0.01] = 0
depth_diff[depth_diff > 0] = 1
change_threshold = 300
change_value = np.sum(depth_diff)
if prev_primitive_action == "push":
change_detected = change_value > change_threshold
elif prev_primitive_action == "grasp":
change_detected = prev_grasp_success
print("Change detected: %r (value: %d)" % (change_detected, change_value))
if change_detected:
if prev_primitive_action == "push":
no_change_count[0] = 0
elif prev_primitive_action == "grasp":
no_change_count[1] = 0
else:
if prev_primitive_action == "push":
no_change_count[0] += 1
elif prev_primitive_action == "grasp":
no_change_count[1] += 1
# Compute training labels
if is_baseline:
label_value, prev_reward_value = trainer.get_label_value_base(
prev_primitive_action,
prev_push_success,
prev_grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
color_heightmap,
valid_depth_heightmap,
use_push=(not grasp_only),
target=prev_primitive_position,
prev_color_img=prev_color_heightmap,
prev_depth_img=prev_valid_depth_heightmap,
)
else:
label_value, prev_reward_value = trainer.get_label_value(
prev_primitive_action,
prev_push_success,
prev_grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
color_heightmap,
valid_depth_heightmap,
prev_valid_depth_heightmap,
use_push=(not grasp_only),
)
trainer.label_value_log.append([label_value])
self.write_to_log("label-value", trainer.label_value_log)
trainer.reward_value_log.append([prev_reward_value])
self.write_to_log("reward-value", trainer.reward_value_log)
# Backpropagate
trainer.backprop(
prev_color_heightmap,
prev_valid_depth_heightmap,
prev_primitive_action,
prev_best_pix_ind,
label_value,
use_push=(not grasp_only),
)
# Adjust exploration probability
if not is_testing:
if is_baseline:
explore_prob = (
max(0.5 * np.power(0.9996, trainer.iteration), 0.1)
if explore_rate_decay
else 0.5
)
else:
explore_prob = (
max(0.5 * np.power(0.9998, trainer.iteration), 0.1)
if explore_rate_decay
else 0.5
)
# Do sampling for experience replay
if experience_replay and not is_testing:
sample_primitive_action = prev_primitive_action
if sample_primitive_action == "push":
sample_primitive_action_id = 0
if method == "reinforcement":
sample_reward_value = 0 if prev_reward_value > 0 else 0.1
elif sample_primitive_action == "grasp":
sample_primitive_action_id = 1
if method == "reinforcement":
if is_baseline:
sample_reward_value = 0 if prev_reward_value == 10 else 10
else:
sample_reward_value = 0 if prev_reward_value == 1 else 1
# Get samples of the same primitive but with different results
if sample_primitive_action == "push" and sample_reward_value == 0.1:
# sample_ind = np.argwhere(np.asarray(trainer.executed_action_log)[:trainer.iteration - 1, 0] == sample_primitive_action_id)
sample_ind = np.argwhere(
np.logical_and(
np.asarray(trainer.reward_value_log)[: trainer.iteration - 1, 0]
> sample_reward_value,
np.asarray(trainer.executed_action_log)[: trainer.iteration - 1, 0]
== sample_primitive_action_id,
)
)
else:
sample_ind = np.argwhere(
np.logical_and(
np.asarray(trainer.reward_value_log)[: trainer.iteration - 1, 0]
== sample_reward_value,
np.asarray(trainer.executed_action_log)[: trainer.iteration - 1, 0]
== sample_primitive_action_id,
)
)
# don't care the reward
# sample_ind = np.argwhere(np.asarray(trainer.executed_action_log)[:trainer.iteration - 1, 0] == sample_primitive_action_id)
if sample_ind.size > 0:
# Find sample with highest surprise value
if method == "reinforcement":
sample_surprise_values = np.abs(
np.asarray(trainer.predicted_value_log)[sample_ind[:, 0]]
- np.asarray(trainer.label_value_log)[sample_ind[:, 0]]
)
sorted_surprise_ind = np.argsort(sample_surprise_values[:, 0])
sorted_sample_ind = sample_ind[sorted_surprise_ind, 0]
pow_law_exp = 2
rand_sample_ind = int(
np.round(np.random.power(pow_law_exp, 1) * (sample_ind.size - 1))
)
sample_iteration = sorted_sample_ind[rand_sample_ind]
print(
"Experience replay: iteration %d (surprise value: %f)"
% (
sample_iteration,
sample_surprise_values[sorted_surprise_ind[rand_sample_ind]],
)
)
# Load sample RGB-D heightmap
sample_color_heightmap = cv2.imread(
os.path.join(
self.color_heightmaps_directory,
"%06d.0.color.png" % (sample_iteration),
)
)
sample_color_heightmap = cv2.cvtColor(
sample_color_heightmap, cv2.COLOR_BGR2RGB
)
sample_depth_heightmap = cv2.imread(
os.path.join(
self.depth_heightmaps_directory,
"%06d.0.depth.png" % (sample_iteration),
),
-1,
)
sample_depth_heightmap = sample_depth_heightmap.astype(np.float32) / 100000
# Compute forward pass with sample
with torch.no_grad():
sample_push_predictions, sample_grasp_predictions = trainer.forward(
sample_color_heightmap,
sample_depth_heightmap,
is_volatile=True,
use_push=(not grasp_only),
)
# Load next sample RGB-D heightmap
next_sample_color_heightmap = cv2.imread(
os.path.join(
self.color_heightmaps_directory,
"%06d.0.color.png" % (sample_iteration + 1),
)
)
next_sample_color_heightmap = cv2.cvtColor(
next_sample_color_heightmap, cv2.COLOR_BGR2RGB
)
next_sample_depth_heightmap = cv2.imread(
os.path.join(
self.depth_heightmaps_directory,
"%06d.0.depth.png" % (sample_iteration + 1),
),
-1,
)
next_sample_depth_heightmap = (
next_sample_depth_heightmap.astype(np.float32) / 100000
)
sample_reward_value = np.asarray(trainer.reward_value_log)[
sample_iteration, 0
]
sample_push_success = sample_reward_value > 0
sample_grasp_success = sample_reward_value == 1
sample_change_detected = sample_push_success
if is_baseline:
sample_primitive_position = (
np.asarray(trainer.executed_action_log)[sample_iteration, 2:4]
).astype(int)
(
new_sample_label_value,
new_sample_reward_value,
) = trainer.get_label_value_base(
sample_primitive_action,
sample_push_success,
sample_grasp_success,
sample_change_detected,
sample_push_predictions,
sample_grasp_predictions,
next_sample_color_heightmap,
next_sample_depth_heightmap,
use_push=(not grasp_only),
target=sample_primitive_position,
prev_color_img=sample_color_heightmap,
prev_depth_img=sample_depth_heightmap,
)
else:
(
new_sample_label_value,
new_sample_reward_value,
) = trainer.get_label_value(
sample_primitive_action,
sample_push_success,
sample_grasp_success,
sample_change_detected,
sample_push_predictions,
sample_grasp_predictions,
next_sample_color_heightmap,
next_sample_depth_heightmap,
sample_depth_heightmap,
use_push=(not grasp_only),
)
# Get labels for sample and backpropagate
sample_best_pix_ind = (
np.asarray(trainer.executed_action_log)[sample_iteration, 1:4]
).astype(int)
# trainer.backprop(sample_color_heightmap, sample_depth_heightmap, sample_primitive_action, sample_best_pix_ind, trainer.label_value_log[sample_iteration])
trainer.backprop(
sample_color_heightmap,
sample_depth_heightmap,
sample_primitive_action,
sample_best_pix_ind,
new_sample_label_value,
use_push=(not grasp_only),
)
# Recompute prediction value and label for replay buffer
if sample_primitive_action == "push":
print(
"Surprise value from %f to %f"
% (
abs(
trainer.predicted_value_log[sample_iteration][0]
- trainer.label_value_log[sample_iteration][0]
),
abs(
np.max(
sample_push_predictions
- trainer.label_value_log[sample_iteration][0]
)
),
)
)
trainer.predicted_value_log[sample_iteration] = [
np.max(sample_push_predictions)
]
trainer.label_value_log[sample_iteration] = [new_sample_label_value]
trainer.reward_value_log[sample_iteration] = [new_sample_reward_value]
self.write_to_log("predicted-value", trainer.predicted_value_log)
self.write_to_log("reward-value", trainer.reward_value_log)
self.write_to_log("label-value", trainer.label_value_log)
elif sample_primitive_action == "grasp":
print(
"Surprise value from %f to %f"
% (
abs(
trainer.predicted_value_log[sample_iteration][0]
- trainer.label_value_log[sample_iteration][0]
),
abs(
np.max(
sample_grasp_predictions
- trainer.label_value_log[sample_iteration][0]
)
),
)
)
trainer.predicted_value_log[sample_iteration] = [
np.max(sample_grasp_predictions)
]
trainer.label_value_log[sample_iteration] = [new_sample_label_value]
trainer.reward_value_log[sample_iteration] = [new_sample_reward_value]
self.write_to_log("predicted-value", trainer.predicted_value_log)
self.write_to_log("reward-value", trainer.reward_value_log)
self.write_to_log("label-value", trainer.label_value_log)
print(
"Replay update: %f, %f, %f"
% (
trainer.predicted_value_log[sample_iteration][0],
trainer.label_value_log[sample_iteration][0],
trainer.reward_value_log[sample_iteration][0],
)
)
else:
print("Not enough prior training samples. Skipping experience replay.")
# Save model snapshot
if not is_testing:
# self.save_backup_model(trainer.model, method)
if trainer.iteration % 50 == 0:
self.save_model(trainer.iteration, trainer.model, method)
if trainer.use_cuda:
trainer.model = trainer.model.cuda()
# Sync both action thread and training thread
while nonlocal_variables["executing_action"]:
time.sleep(0.01)
if exit_called:
break
# Save information for next training step
prev_color_img = color_heightmap.copy()
prev_color_heightmap = color_heightmap.copy()
prev_depth_heightmap = depth_heightmap.copy()
prev_valid_depth_heightmap = valid_depth_heightmap.copy()
prev_push_success = nonlocal_variables["push_success"]
prev_grasp_success = nonlocal_variables["grasp_success"]
prev_primitive_action = nonlocal_variables["primitive_action"]
prev_primitive_position = nonlocal_variables["primitive_position"]
if grasp_only:
prev_push_predictions = 0
else:
prev_push_predictions = nonlocal_variables["push_predictions"].copy()
prev_grasp_predictions = nonlocal_variables["grasp_predictions"].copy()
prev_best_pix_ind = nonlocal_variables["best_pix_ind"]
trainer.iteration += 1
iteration_time_1 = time.time()
print("Time elapsed: %f" % (iteration_time_1 - iteration_time_0))
self.write_to_log("batch-loss", trainer.loss_log)
if trainer.iteration > args.end_iter:
exit_called = True
def post_train(args):
import log_utils
from collections import deque
"""
For grasp-only training, this offline-training can be used train the network as supervised learning. But, we didn't use it.
"""
# TODO only work for sim now
# ------------- Algorithm options -------------
method = "reinforcement"
# ------ Pre-loading and logging options ------
load_snapshot = args.load_snapshot # Load pre-trained snapshot of model?
snapshot_file = os.path.abspath(args.snapshot_file) if load_snapshot else None
continue_logging = args.continue_logging # Continue logging from previous session
logging_directory = (
os.path.abspath(args.logging_directory) if continue_logging else os.path.abspath("logs")
)
logger = log_utils.setup_logger(logging_directory)
deque = deque(maxlen=500)
# Initialize trainer
trainer = Trainer(method, False, 0, False, load_snapshot, snapshot_file, False,)
# Find last executed iteration of pre-loaded log, and load execution info and RL variables
if continue_logging:
trainer.preload(os.path.join(logging_directory, "transitions"))
sample_ind = np.argwhere(
np.asarray(trainer.executed_action_log)[: trainer.iteration - 1, 0] == 1
)
sample_primitive_action = "grasp"
rng = np.random.default_rng()
for i in range(100000):
rand_sample_ind = rng.integers(low=0, high=len(sample_ind) - 1)
sample_iteration = sample_ind[rand_sample_ind][0]
# Load sample RGB-D heightmap
sample_color_heightmap = cv2.imread(
os.path.join(
logging_directory,
"data",
"color-heightmaps",
"%06d.0.color.png" % (sample_iteration),
)
)
sample_color_heightmap = cv2.cvtColor(sample_color_heightmap, cv2.COLOR_BGR2RGB)
sample_depth_heightmap = cv2.imread(
os.path.join(
logging_directory,
"data",
"depth-heightmaps",
"%06d.0.depth.png" % (sample_iteration),
),
-1,
)
sample_depth_heightmap = sample_depth_heightmap.astype(np.float32) / 100000
# Get labels for sample and backpropagate
sample_best_pix_ind = (
np.asarray(trainer.executed_action_log)[sample_iteration, 1:4]
).astype(int)
batch_loss = trainer.backprop(
sample_color_heightmap,
sample_depth_heightmap,
sample_primitive_action,
sample_best_pix_ind,
trainer.label_value_log[sample_iteration][0],
)
if batch_loss != -1:
deque.append(batch_loss)
if i % 100 == 0:
logger.info(f"Iteration {i}: mean {np.mean(deque)}, median {np.median(deque)}")
if i % 500 == 0:
torch.save(
{"model": trainer.model.state_dict()},
os.path.join(
logging_directory, "models", "snapshot-post-%06d.%s.pth" % (i, "reinforcement")
),
)
print("Saved at iteration %f" % (i))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--start_iter",
dest="start_iter",
type=int,
action="store",
default=0,
help="index of start iteration",
)
parser.add_argument(
"--end_iter",
dest="end_iter",
type=int,
action="store",
default=20000,
help="index of end iteration",
)
parser.add_argument(
"--num_obj",
dest="num_obj",
type=int,
action="store",
default=10,
help="number of objects to add to simulation",
)
parser.add_argument(
"--random_seed",
dest="random_seed",
type=int,
action="store",
default=123,
help="random seed for simulation and neural net initialization",
)
parser.add_argument(
"--experience_replay",
dest="experience_replay",
action="store_true",
default=False,
help="use prioritized experience replay?",
)
parser.add_argument(
"--explore_rate_decay", dest="explore_rate_decay", action="store_true", default=False
)
parser.add_argument("--grasp_only", dest="grasp_only", action="store_true", default=False)
parser.add_argument("--is_testing", dest="is_testing", action="store_true", default=False)
parser.add_argument("--is_dipn", dest="is_dipn", action="store_true", default=False)
parser.add_argument("--has_target", dest="has_target", action="store_true", default=False)
parser.add_argument("--is_grasp_explore", action="store_true", default=False)
parser.add_argument("--is_baseline", dest="is_baseline", action="store_true", default=False)
parser.add_argument(
"--max_test_trials",
dest="max_test_trials",
type=int,
action="store",
default=30,
help="maximum number of test runs per case/scenario",
)
parser.add_argument(
"--test_preset_cases", dest="test_preset_cases", action="store_true", default=False
)
parser.add_argument(
"--test_preset_file", dest="test_preset_file", action="store", default="test-10-obj-01.txt"
)
parser.add_argument(
"--load_snapshot",
dest="load_snapshot",
action="store_true",
default=False,
help="load pre-trained snapshot of model?",
)
parser.add_argument(
"--push_rewards",
dest="push_rewards",
action="store_true",
default=False,
help="use immediate rewards (from change detection) for pushing?",
)
parser.add_argument(
"--future_reward_discount",
dest="future_reward_discount",
type=float,
action="store",
default=0.5,
)
parser.add_argument("--snapshot_file", dest="snapshot_file", action="store")
parser.add_argument(
"--continue_logging",
dest="continue_logging",
action="store_true",
default=False,
help="continue logging from previous session?",
)
parser.add_argument("--logging_directory", dest="logging_directory", action="store")
parser.add_argument(
"--save_visualizations",
dest="save_visualizations",
action="store_true",
default=False,
help="save visualizations of FCN predictions?",
)
args = parser.parse_args()
if not IS_REAL:
env = Environment(gui=True)
runner = GraspDataCollectorTrainer(args)
runner.main(args, env)
else:
env = EnvironmentReal()
runner = GraspDataCollectorTrainer(args)
runner.main(args, env)
# post_train(args)
| 67,276 | 46.646601 | 179 |
py
|
more
|
more-main/utils.py
|
import math
import numpy as np
import pybullet as p
import cv2
def get_heightmap(points, colors, bounds, pixel_size):
"""Get top-down (z-axis) orthographic heightmap image from 3D pointcloud.
Args:
points: HxWx3 float array of 3D points in world coordinates.
colors: HxWx3 uint8 array of values in range 0-255 aligned with points.
bounds: 3x2 float array of values (rows: X,Y,Z; columns: min,max) defining
region in 3D space to generate heightmap in world coordinates.
pixel_size: float defining size of each pixel in meters.
Returns:
heightmap: HxW float array of height (from lower z-bound) in meters.
colormap: HxWx3 uint8 array of backprojected color aligned with heightmap.
"""
width = int(np.round((bounds[0, 1] - bounds[0, 0]) / pixel_size))
height = int(np.round((bounds[1, 1] - bounds[1, 0]) / pixel_size))
heightmap = np.zeros((height, width), dtype=np.float32)
colormap = np.zeros((height, width, colors.shape[-1]), dtype=np.uint8)
# Filter out 3D points that are outside of the predefined bounds.
ix = (points[Ellipsis, 0] >= bounds[0, 0]) & (points[Ellipsis, 0] < bounds[0, 1])
iy = (points[Ellipsis, 1] >= bounds[1, 0]) & (points[Ellipsis, 1] < bounds[1, 1])
iz = (points[Ellipsis, 2] >= bounds[2, 0]) & (points[Ellipsis, 2] < bounds[2, 1])
valid = ix & iy & iz
points = points[valid]
colors = colors[valid]
# Sort 3D points by z-value, which works with array assignment to simulate
# z-buffering for rendering the heightmap image.
iz = np.argsort(points[:, -1])
points, colors = points[iz], colors[iz]
px = np.int32(np.floor((points[:, 0] - bounds[0, 0]) / pixel_size))
py = np.int32(np.floor((points[:, 1] - bounds[1, 0]) / pixel_size))
px = np.clip(px, 0, width - 1)
py = np.clip(py, 0, height - 1)
heightmap[px, py] = points[:, 2] - bounds[2, 0]
for c in range(colors.shape[-1]):
colormap[px, py, c] = colors[:, c]
return heightmap, colormap
def get_pointcloud(depth, intrinsics):
"""Get 3D pointcloud from perspective depth image.
Args:
depth: HxW float array of perspective depth in meters.
intrinsics: 3x3 float array of camera intrinsics matrix.
Returns:
points: HxWx3 float array of 3D points in camera coordinates.
"""
height, width = depth.shape
xlin = np.linspace(0, width - 1, width)
ylin = np.linspace(0, height - 1, height)
px, py = np.meshgrid(xlin, ylin)
px = (px - intrinsics[0, 2]) * (depth / intrinsics[0, 0])
py = (py - intrinsics[1, 2]) * (depth / intrinsics[1, 1])
points = np.float32([px, py, depth]).transpose(1, 2, 0)
return points
def transform_pointcloud(points, transform):
"""Apply rigid transformation to 3D pointcloud.
Args:
points: HxWx3 float array of 3D points in camera coordinates.
transform: 4x4 float array representing a rigid transformation matrix.
Returns:
points: HxWx3 float array of transformed 3D points.
"""
padding = ((0, 0), (0, 0), (0, 1))
homogen_points = np.pad(points.copy(), padding, "constant", constant_values=1)
for i in range(3):
points[Ellipsis, i] = np.sum(transform[i, :] * homogen_points, axis=-1)
return points
def reconstruct_heightmaps(color, depth, configs, bounds, pixel_size):
"""Reconstruct top-down heightmap views from multiple 3D pointclouds."""
heightmaps, colormaps = [], []
for color, depth, config in zip(color, depth, configs):
intrinsics = config["intrinsics"]
xyz = get_pointcloud(depth, intrinsics)
position = np.array(config["position"]).reshape(3, 1)
rotation = p.getMatrixFromQuaternion(config["rotation"])
rotation = np.array(rotation).reshape(3, 3)
transform = np.eye(4)
transform[:3, :] = np.hstack((rotation, position))
xyz = transform_pointcloud(xyz, transform)
heightmap, colormap = get_heightmap(xyz, color, bounds, pixel_size)
heightmaps.append(heightmap)
colormaps.append(colormap)
return heightmaps, colormaps
def get_fuse_heightmaps(obs, configs, bounds, pixel_size):
"""Reconstruct orthographic heightmaps with segmentation masks."""
heightmaps, colormaps = reconstruct_heightmaps(
obs["color"], obs["depth"], configs, bounds, pixel_size
)
colormaps = np.float32(colormaps)
heightmaps = np.float32(heightmaps)
# Fuse maps from different views.
valid = np.sum(colormaps, axis=3) > 0
repeat = np.sum(valid, axis=0)
repeat[repeat == 0] = 1
cmap = np.sum(colormaps, axis=0) / repeat[Ellipsis, None]
cmap = np.uint8(np.round(cmap))
hmap = np.max(heightmaps, axis=0) # Max to handle occlusions.
return cmap, hmap
def get_true_heightmap(env):
"""Get RGB-D orthographic heightmaps and segmentation masks in simulation."""
# Capture near-orthographic RGB-D images and segmentation masks.
color, depth, segm = env.render_camera(env.oracle_cams[0])
# Combine color with masks for faster processing.
color = np.concatenate((color, segm[Ellipsis, None]), axis=2)
# Reconstruct real orthographic projection from point clouds.
hmaps, cmaps = reconstruct_heightmaps(
[color], [depth], env.oracle_cams, env.bounds, env.pixel_size
)
# Split color back into color and masks.
cmap = np.uint8(cmaps)[0, Ellipsis, :3]
hmap = np.float32(hmaps)[0, Ellipsis]
mask = np.int32(cmaps)[0, Ellipsis, 3:].squeeze()
return cmap, hmap, mask
def get_heightmap_from_real_image(color, depth, segm, env):
# Combine color with masks for faster processing.
color = np.concatenate((color, segm[Ellipsis, None]), axis=2)
# Reconstruct real orthographic projection from point clouds.
hmaps, cmaps = reconstruct_heightmaps(
[color], [depth], env.camera.configs, env.bounds, env.pixel_size
)
# Split color back into color and masks.
cmap = np.uint8(cmaps)[0, Ellipsis, :3]
hmap = np.float32(hmaps)[0, Ellipsis]
mask = np.uint8(cmaps)[0, Ellipsis, 3:].squeeze()
return cmap, hmap, mask
def relabel_mask(env, mask_image):
assert env.target_obj_id != -1
num_obj = 50
for i in np.unique(mask_image):
if i == env.target_obj_id:
mask_image[mask_image == i] = 255
elif i in env.obj_ids["rigid"]:
mask_image[mask_image == i] = num_obj
num_obj += 10
else:
mask_image[mask_image == i] = 0
mask_image = mask_image.astype(np.uint8)
return mask_image
def relabel_mask_real(masks):
"""Assume the target object is labeled to 255"""
mask_image = np.zeros_like(masks[0], dtype=np.uint8)
num_obj = 50
for idx, mask in enumerate(masks):
if idx == 0:
mask_image[mask == 255] = 255
else:
mask_image[mask == 255] = num_obj
num_obj += 10
mask_image = mask_image.astype(np.uint8)
return mask_image
def get_real_heightmap(env):
"""Get RGB-D orthographic heightmaps in real world."""
color, depth = env.get_camera_data()
cv2.imwrite("temp.png", cv2.cvtColor(color, cv2.COLOR_RGB2BGR))
# Reconstruct real orthographic projection from point clouds.
hmaps, cmaps = reconstruct_heightmaps(
[color], [depth], env.camera.configs, env.bounds, env.pixel_size
)
# Split color back into color and masks.
cmap = np.uint8(cmaps)[0, Ellipsis]
hmap = np.float32(hmaps)[0, Ellipsis]
return cmap, hmap
def rotate(image, angle, is_mask=False):
"""Rotate an image using cv2, counterclockwise in degrees"""
(h, w) = image.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
if is_mask:
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_NEAREST)
else:
rotated = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_LINEAR)
return rotated
def rotate_point(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + math.cos(angle) * (px - ox) - math.sin(angle) * (py - oy)
qy = oy + math.sin(angle) * (px - ox) + math.cos(angle) * (py - oy)
return qx, qy
# Get rotation matrix from euler angles
def euler2rotm(theta):
R_x = np.array(
[
[1, 0, 0],
[0, math.cos(theta[0]), -math.sin(theta[0])],
[0, math.sin(theta[0]), math.cos(theta[0])],
]
)
R_y = np.array(
[
[math.cos(theta[1]), 0, math.sin(theta[1])],
[0, 1, 0],
[-math.sin(theta[1]), 0, math.cos(theta[1])],
]
)
R_z = np.array(
[
[math.cos(theta[2]), -math.sin(theta[2]), 0],
[math.sin(theta[2]), math.cos(theta[2]), 0],
[0, 0, 1],
]
)
R = np.dot(R_z, np.dot(R_y, R_x))
return R
# Checks if a matrix is a valid rotation matrix.
def isRotm(R):
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype=R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
# Calculates rotation matrix to euler angles
def rotm2euler(R):
assert isRotm(R)
sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
singular = sy < 1e-6
if not singular:
x = math.atan2(R[2, 1], R[2, 2])
y = math.atan2(-R[2, 0], sy)
z = math.atan2(R[1, 0], R[0, 0])
else:
x = math.atan2(-R[1, 2], R[1, 1])
y = math.atan2(-R[2, 0], sy)
z = 0
return np.array([x, y, z])
def angle2rotm(angle, axis, point=None):
# Copyright (c) 2006-2018, Christoph Gohlke
sina = math.sin(angle)
cosa = math.cos(angle)
axis = axis / np.linalg.norm(axis)
# Rotation matrix around unit vector
R = np.diag([cosa, cosa, cosa])
R += np.outer(axis, axis) * (1.0 - cosa)
axis *= sina
R += np.array(
[[0.0, -axis[2], axis[1]], [axis[2], 0.0, -axis[0]], [-axis[1], axis[0], 0.0]],
dtype=np.float32,
)
M = np.identity(4)
M[:3, :3] = R
if point is not None:
# Rotation not around origin
point = np.array(point[:3], dtype=np.float64, copy=False)
M[:3, 3] = point - np.dot(R, point)
return M
def rotm2angle(R):
# From: euclideanspace.com
epsilon = 0.01 # Margin to allow for rounding errors
epsilon2 = 0.1 # Margin to distinguish between 0 and 180 degrees
assert isRotm(R)
if (
(abs(R[0][1] - R[1][0]) < epsilon)
and (abs(R[0][2] - R[2][0]) < epsilon)
and (abs(R[1][2] - R[2][1]) < epsilon)
):
# Singularity found
# First check for identity matrix which must have +1 for all terms in leading diagonaland zero in other terms
if (
(abs(R[0][1] + R[1][0]) < epsilon2)
and (abs(R[0][2] + R[2][0]) < epsilon2)
and (abs(R[1][2] + R[2][1]) < epsilon2)
and (abs(R[0][0] + R[1][1] + R[2][2] - 3) < epsilon2)
):
# this singularity is identity matrix so angle = 0
return [0, 1, 0, 0] # zero angle, arbitrary axis
# Otherwise this singularity is angle = 180
angle = np.pi
xx = (R[0][0] + 1) / 2
yy = (R[1][1] + 1) / 2
zz = (R[2][2] + 1) / 2
xy = (R[0][1] + R[1][0]) / 4
xz = (R[0][2] + R[2][0]) / 4
yz = (R[1][2] + R[2][1]) / 4
if (xx > yy) and (xx > zz): # R[0][0] is the largest diagonal term
if xx < epsilon:
x = 0
y = 0.7071
z = 0.7071
else:
x = np.sqrt(xx)
y = xy / x
z = xz / x
elif yy > zz: # R[1][1] is the largest diagonal term
if yy < epsilon:
x = 0.7071
y = 0
z = 0.7071
else:
y = np.sqrt(yy)
x = xy / y
z = yz / y
else: # R[2][2] is the largest diagonal term so base result on this
if zz < epsilon:
x = 0.7071
y = 0.7071
z = 0
else:
z = np.sqrt(zz)
x = xz / z
y = yz / z
return [angle, x, y, z] # Return 180 deg rotation
# As we have reached here there are no singularities so we can handle normally
s = np.sqrt(
(R[2][1] - R[1][2]) * (R[2][1] - R[1][2])
+ (R[0][2] - R[2][0]) * (R[0][2] - R[2][0])
+ (R[1][0] - R[0][1]) * (R[1][0] - R[0][1])
) # used to normalise
if abs(s) < 0.001:
s = 1
# Prevent divide by zero, should not happen if matrix is orthogonal and should be
# Caught by singularity test above, but I've left it in just in case
angle = np.arccos((R[0][0] + R[1][1] + R[2][2] - 1) / 2)
x = (R[2][1] - R[1][2]) / s
y = (R[0][2] - R[2][0]) / s
z = (R[1][0] - R[0][1]) / s
return [angle, x, y, z]
| 13,108 | 33.049351 | 117 |
py
|
more
|
more-main/dataset.py
|
from torch.utils.data.sampler import Sampler
import os
import math
import re
import numpy as np
import torch
import torch.utils.data
import cv2
import imutils
from torchvision.transforms import functional as TF
from PIL import Image
import random
from constants import (
IMAGE_OBJ_CROP_SIZE,
IMAGE_SIZE,
WORKSPACE_LIMITS,
PIXEL_SIZE,
PUSH_Q,
GRASP_Q,
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
BINARY_IMAGE_MEAN,
BINARY_IMAGE_STD,
BINARY_OBJ_MEAN,
BINARY_OBJ_STD,
DEPTH_MIN,
PUSH_DISTANCE,
GRIPPER_PUSH_RADIUS_PIXEL,
GRIPPER_PUSH_RADIUS_SAFE_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
IMAGE_PAD_WIDTH,
PUSH_DISTANCE_PIXEL,
GRIPPER_GRASP_WIDTH_PIXEL,
NUM_ROTATION,
IMAGE_PAD_DIFF,
)
from math import atan2, cos, sin, sqrt, pi, degrees
import glob
import pandas as pd
import utils
class LifelongEvalDataset(torch.utils.data.Dataset):
"""For lifelong learning"""
def __init__(self, env, actions, mask_image, is_real=False):
# relabel
if is_real:
mask_image = utils.relabel_mask_real(mask_image)
else:
mask_image = utils.relabel_mask(env, mask_image)
# focus on target, so make one extra channel
target_mask_img = np.zeros_like(mask_image, dtype=np.uint8)
target_mask_img[mask_image == 255] = 255
mask_heightmap = np.dstack((target_mask_img, mask_image))
mask_heightmap_pad = np.pad(
mask_heightmap,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
self.mask_heightmap_pad = mask_heightmap_pad
self.actions = actions
def __getitem__(self, idx):
action = self.actions[idx]
action_start = (action[0][1], action[0][0])
action_end = (action[1][1], action[1][0])
current = (
action_end[0] - action_start[0],
action_end[1] - action_start[1],
)
right = (1, 0)
dot = (
right[0] * current[0] + right[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = right[0] * current[1] - right[1] * current[0] # determinant
rot_angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
rot_angle = math.degrees(rot_angle)
mask_heightmap_rotated = utils.rotate(self.mask_heightmap_pad, rot_angle, is_mask=True)
input_image = mask_heightmap_rotated.astype(float) / 255
input_image.shape = (
input_image.shape[0],
input_image.shape[1],
input_image.shape[2],
)
with torch.no_grad():
rot_angle = torch.tensor(rot_angle)
input_data = torch.from_numpy(input_image.astype(np.float32)).permute(2, 0, 1)
return rot_angle, input_data
def __len__(self):
return len(self.actions)
class LifelongDataset(torch.utils.data.Dataset):
"""For lifelong learning"""
one_threshold = 0.9
half_threshold = 0.8
def __init__(self, root, ratio=1):
self.root = root
self.color_imgs = []
self.depth_imgs = []
self.mask_imgs = []
self.best_locates = []
self.labels = []
self.weights = []
self.kernel_collision = np.ones(
(GRIPPER_PUSH_RADIUS_PIXEL * 2, GRIPPER_GRASP_WIDTH_PIXEL), dtype=np.float32
)
self.kernel_right = np.zeros(
(
GRIPPER_PUSH_RADIUS_PIXEL * 2,
(PUSH_DISTANCE_PIXEL + round(GRIPPER_GRASP_WIDTH_PIXEL / 2)) * 2,
),
dtype=np.float32,
)
self.kernel_right[:, PUSH_DISTANCE_PIXEL + round(GRIPPER_GRASP_WIDTH_PIXEL / 2) :] = 1
self.kernel_erode = np.ones((IMAGE_OBJ_CROP_SIZE, IMAGE_OBJ_CROP_SIZE))
sub_roots = glob.glob(f"{root}/*")
sub_roots = sorted(sub_roots, key=lambda r: r[-3:])
if ratio == 1:
sub_roots = sub_roots
else:
ratio = int(1 / ratio)
temp = []
for idx, sr in enumerate(sub_roots):
if idx % ratio == 0:
temp.append(sr)
sub_roots = temp
num_cases = 0
for sub_root in sub_roots:
if "runs" in sub_root:
continue
# load all image files, sorting them to ensure that they are aligned
color_imgs = list(sorted(glob.glob(os.path.join(sub_root, "mcts", "color", "*.color.png"))))
if len(color_imgs) == 0:
continue
depth_imgs = list(sorted(glob.glob(os.path.join(sub_root, "mcts", "depth", "*.depth.png"))))
masks_imgs = list(sorted(glob.glob(os.path.join(sub_root, "mcts", "mask", "*.mask.png"))))
records = pd.read_csv(os.path.join(sub_root, "mcts", "records.csv"))
right = (1, 0)
label_adjust = []
weight_adjust = []
current_label_idx = -1
for row in records.itertuples():
action = list(re.split(r"\D+", row.action))
action = [int(a) for a in action if a.isnumeric()]
# if it is invalid move
if np.any(np.array(action) > IMAGE_SIZE - 1) or np.any(np.array(action) < 0):
continue
action_start = (action[0], action[1])
action_end = (action[2], action[3])
current = (
action_end[0] - action_start[0],
action_end[1] - action_start[1],
)
dot = (
right[0] * current[0] + right[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = right[0] * current[1] - right[1] * current[0] # determinant
rot_angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
rot_angle = math.degrees(rot_angle)
# if row.num_visits < 5:
# continue
self.best_locates.append(
[rot_angle, action_start[1], action_start[0], action_end[1], action_end[0]]
)
self.color_imgs.append(color_imgs[row.image_idx])
self.depth_imgs.append(depth_imgs[row.image_idx])
self.mask_imgs.append(masks_imgs[row.image_idx])
label = float(row.label)
self.labels.append(label)
weigiht = row.num_visits if row.num_visits < 50 else 50
self.weights.append(weigiht)
# 2 for the best, 1 for good, 0.5 for ok, others are 0
# if current_label_idx == -1:
# current_label_idx = row.image_idx
# label_adjust = [label]
# elif current_label_idx != row.image_idx:
# label_adjust = self.adjust(label_adjust)
# self.labels.extend(label_adjust)
# current_label_idx = row.image_idx
# label_adjust = [label]
# else:
# label_adjust.append(label)
# if len(label_adjust) > 0:
# label_adjust = self.adjust(label_adjust)
# self.labels.extend(label_adjust)
num_cases += 1
print(
sum(np.array(self.labels) >= 1),
sum(np.array(self.labels) >= 0.5),
sum(np.array(self.labels) < 0.5),
)
print(f"Total image used: {len(self.color_imgs)} and total test cases used: {num_cases}")
assert len(self.color_imgs) == len(self.labels)
def adjust_label(self, label_adjust):
label_adjust = np.array(label_adjust)
one_idx = np.where(label_adjust > np.quantile(label_adjust, LifelongDataset.one_threshold))
half_idx = np.where(
label_adjust > np.quantile(label_adjust, LifelongDataset.half_threshold)
)
mean_idx = np.where(label_adjust < np.mean(label_adjust[label_adjust != 2]))
best_idx = np.argmax(label_adjust)
label_adjust[:] = 0
label_adjust[half_idx] = 0.5
label_adjust[one_idx] = 1
label_adjust[mean_idx] = 0
label_adjust[best_idx] = 2
label_adjust = list(label_adjust)
return label_adjust
def adjust_weight(self, label, weight_adjust):
label = np.array(label)
one_idx = np.where(label > np.quantile(label, LifelongDataset.one_threshold))
half_idx = np.where(
label > np.quantile(label, LifelongDataset.half_threshold)
)
mean_idx = np.where(label < np.mean(label[label != 2]))
best_idx = np.argmax(label)
label_adjust[:] = 0
label_adjust[half_idx] = 0.5
label_adjust[one_idx] = 1
label_adjust[mean_idx] = 0
label_adjust[best_idx] = 2
label_adjust = list(label_adjust)
return label_adjust
def __getitem__(self, idx):
# color image input
# color_img = cv2.imread(self.color_imgs[idx])
# color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)
# depth image input
# depth_img = cv2.imread(self.depth_imgs[idx], cv2.IMREAD_UNCHANGED)
# depth_img = depth_img.astype(np.float32) / 100000 # translate to meters
# mask image input
mask_img = cv2.imread(self.mask_imgs[idx], cv2.IMREAD_UNCHANGED)
# mask_id = np.unique(mask_img)
# diff = -50
# for mask_i in mask_id:
# if mask_i != 255 and mask_i != 0:
# mask_img[mask_img == mask_i] = mask_i + diff
# diff += 10
rot_angle = self.best_locates[idx][0]
action_start = (self.best_locates[idx][1], self.best_locates[idx][2])
label = self.labels[idx]
weight = self.weights[idx]
# weight = self.weights[idx] * 10 if label >= 1 else 1
# if self.weights[idx] > 10 or label >= 1:
# weight = self.weights[idx] * self.weights[idx] / 500 * 10
# else:
# weight = self.weights[idx] * self.weights[idx] / 500
# target image
target_img = np.zeros_like(mask_img, dtype=np.float32)
# target_img[
# self.best_locates[idx][1] - 3 : self.best_locates[idx][1] + 4,
# self.best_locates[idx][2] - 3 : self.best_locates[idx][2] + 4,
# ] = (self.labels[idx] / 4 if self.labels[idx] > 0 else 0)
# target_img[
# self.best_locates[idx][1] - 1 : self.best_locates[idx][1] + 2,
# self.best_locates[idx][2] - 1 : self.best_locates[idx][2] + 2,
# ] = (self.labels[idx] / 2 if self.labels[idx] > 0 else 0)
# target_img[self.best_locates[idx][1], self.best_locates[idx][2]] = self.labels[idx]
# weight
weight_img = np.zeros_like(mask_img, dtype=np.float32)
# weight_img[
# self.best_locates[idx][1] - 3 : self.best_locates[idx][1] + 4,
# self.best_locates[idx][2] - 3 : self.best_locates[idx][2] + 4,
# ] = 1
# weight = self.weights[idx]
# weight_img[self.best_locates[idx][1], self.best_locates[idx][2]] = (
# weight * weight / 500
# )
# weight_img[self.best_locates[idx][1], self.best_locates[idx][2]] = (
# 100 if self.labels[idx] > 0 else 1
# )
# Post-process, collision checking
# target_invalid = np.logical_and(weight_img > 0, depth_img > DEPTH_MIN)
# target_img[target_invalid] = 0
# weight_img[target_invalid] = 0.1
# target_invalid = cv2.filter2D(depth_img, -1, self.kernel_collision)
# target_img[(target_invalid > DEPTH_MIN)] = 0
# weight_img[(target_invalid > DEPTH_MIN)] = 0.1
# color_img_pil = Image.fromarray(color_img)
# depth_img_pil = Image.fromarray(depth_img)
# focus on target, so make one extra channel
target_mask_img = np.zeros_like(mask_img, dtype=np.uint8)
target_mask_img[mask_img == 255] = 255
mask_img = np.dstack((target_mask_img, mask_img))
# mask_img_pil = Image.fromarray(mask_img)
# target_img_pil = Image.fromarray(target_img)
# weight_img_pil = Image.fromarray(weight_img)
mask_img_pil = mask_img
target_img_pil = target_img
weight_img_pil = weight_img
(
mask_img_pil,
target_img_pil,
weight_img_pil,
best_loc,
) = self.transforms(
mask_img_pil, target_img_pil, weight_img_pil, rot_angle, action_start, label, weight
)
# return (
# color_img_pil,
# depth_img_pil,
# mask_img_pil,
# help_img_pil,
# target_img_pil,
# weight_img_pil,
# best_loc,
# )
return (
mask_img_pil,
target_img_pil,
weight_img_pil,
best_loc,
)
def __len__(self):
return len(self.color_imgs)
@torch.no_grad()
def transforms(
self,
mask_heightmap,
target_heightmap,
weight_heightmap,
rot_angle,
action_start,
label,
weight,
):
# Add extra padding (to handle rotations inside network)
# color_heightmap_pad = TF.pad(
# color_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
# )
# depth_heightmap_pad = TF.pad(
# depth_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
# )
# mask_heightmap_pad = TF.pad(
# mask_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
# )
# target_heightmap_pad = TF.pad(
# target_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
# )
# weight_heightmap_pad = TF.pad(
# weight_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
# )
mask_heightmap_pad = np.pad(
mask_heightmap, ((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)), "constant", constant_values=0
)
target_heightmap_pad = np.pad(
target_heightmap, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
weight_heightmap_pad = np.pad(
weight_heightmap, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# color_heightmap_pad = TF.rotate(color_heightmap_pad, rot_angle)
# depth_heightmap_pad = TF.rotate(depth_heightmap_pad, rot_angle)
# mask_heightmap_pad = TF.rotate(mask_heightmap_pad, rot_angle)
mask_heightmap_pad = utils.rotate(mask_heightmap_pad, rot_angle, is_mask=True)
# target_heightmap_pad = TF.rotate(target_heightmap_pad, rot_angle, resample=PIL.Image.BILINEAR)
# weight_heightmap_pad = TF.rotate(weight_heightmap_pad, rot_angle, resample=PIL.Image.BILINEAR)
# color_heightmap_pad = np.array(color_heightmap_pad)
# depth_heightmap_pad = np.array(depth_heightmap_pad)
# mask_heightmap_pad = np.array(mask_heightmap_pad)
# target_heightmap_pad = np.array(target_heightmap_pad)
# weight_heightmap_pad = np.array(weight_heightmap_pad)
# help_heightmap_pad = np.zeros_like(color_heightmap_pad)
action_start = (action_start[0] + IMAGE_PAD_WIDTH, action_start[1] + IMAGE_PAD_WIDTH)
origin = target_heightmap_pad.shape
origin = ((origin[0] - 1) / 2, (origin[1] - 1) / 2)
new_action_start = utils.rotate_point(origin, action_start, math.radians(rot_angle))
new_action_start = (round(new_action_start[0]), round(new_action_start[1]))
best_loc = torch.tensor(new_action_start)
# Post-process, make single pixel larger
target_heightmap_pad[
best_loc[0] - 1 : best_loc[0] + 2, best_loc[1] - 1 : best_loc[1] + 2,
] = label if label > 0 else 0
target_heightmap_pad[best_loc[0], best_loc[1]] = label if label > 0 else 0
weight_heightmap_pad[
best_loc[0] - 1 : best_loc[0] + 2, best_loc[1] - 1 : best_loc[1] + 2,
] = weight * 0.5
weight_heightmap_pad[best_loc[0], best_loc[1]] = weight
# Post-process, collision
target_invalid = cv2.filter2D(mask_heightmap_pad[:, :, 1], -1, self.kernel_collision)
target_heightmap_pad[(target_invalid > 0)] = 0
weight_heightmap_pad[(target_invalid > 0)] = 0.001
# Post-process, point to right
target_invalid = cv2.filter2D(mask_heightmap_pad[:, :, 1], -1, self.kernel_right)
target_heightmap_pad[(target_invalid == 0)] = 0
weight_heightmap_pad[(target_invalid == 0)] = 0.0001
# if np.max(target_heightmap_pad) >= 1:
# cv2.imshow("weight", (weight_heightmap_pad * 200 + 20).astype(np.uint8))
# cv2.imshow(
# f"target-{np.max(target_heightmap_pad)}",
# (target_heightmap_pad * 100 + 20).astype(np.uint8),
# )
# cv2.imshow("mask", mask_heightmap_pad)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# Helper
# mask of target object
# temp = cv2.cvtColor(color_heightmap_pad, cv2.COLOR_RGB2HSV)
# mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
# # mask of clearance of target object
# target_erode = cv2.filter2D(mask, -1, self.kernel_erode)
# clearance = np.zeros_like(mask)
# clearance[
# np.logical_and(
# np.logical_and(target_erode > 0, mask == 0), depth_heightmap_pad < DEPTH_MIN
# )
# ] = 255
# cv2.imshow("mask", mask)
# cv2.imshow("clearance", clearance)
# cv2.imshow("action", action)
# cv2.imshow("color", color_heightmap_pad)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# To tensor
# color_heightmap_pad = TF.to_tensor(color_heightmap_pad)
# depth_heightmap_pad = TF.to_tensor(depth_heightmap_pad)
target_heightmap_pad = TF.to_tensor(target_heightmap_pad)
weight_heightmap_pad = TF.to_tensor(weight_heightmap_pad)
mask_heightmap_pad = TF.to_tensor(mask_heightmap_pad)
# mask = TF.to_tensor(mask)
# clearance = TF.to_tensor(clearance)
# Normalize
# color_heightmap_pad = TF.normalize(color_heightmap_pad, COLOR_MEAN, COLOR_STD, inplace=True)
# depth_heightmap_pad = TF.normalize(depth_heightmap_pad, DEPTH_MEAN, DEPTH_STD, inplace=True)
# mask = TF.normalize(mask, BINARY_IMAGE_MEAN[1], BINARY_IMAGE_STD[1], inplace=True)
# clearance = TF.normalize(clearance, BINARY_IMAGE_MEAN[1], BINARY_IMAGE_STD[1], inplace=True)
return (
mask_heightmap_pad,
target_heightmap_pad,
weight_heightmap_pad,
best_loc,
)
class SegmentationDataset(torch.utils.data.Dataset):
"""
Create segmentation dataset for training Mask R-CNN.
One uses pre-defined color range to separate objects (assume the color in one image is unique).
One directly reads masks.
"""
def __init__(self, root, transforms, is_real=False, background=None):
self.root = root
self.transforms = transforms
self.is_real = is_real
# load all image files, sorting them to ensure that they are aligned
self.color_imgs = list(sorted(os.listdir(os.path.join(root, "color-heightmaps"))))
self.depth_imgs = list(sorted(os.listdir(os.path.join(root, "depth-heightmaps"))))
self.masks = list(sorted(os.listdir(os.path.join(root, "masks"))))
self.background = background
if self.background is not None:
self.background = cv2.imread(background)
def __getitem__(self, idx):
# load images
color_path = os.path.join(self.root, "color-heightmaps", self.color_imgs[idx])
# depth_path = os.path.join(self.root, "depth-heightmaps", self.depth_imgs[idx])
# color image input
color_img = cv2.imread(color_path)
color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)
mask_path = os.path.join(self.root, "masks", self.masks[idx])
mask_img = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED)
if self.background is not None:
# random background
color_img = cv2.cvtColor(color_img, cv2.COLOR_RGB2BGR)
# background = cv2.resize(self.background, color_img.shape[:2], interpolation=cv2.INTER_AREA)
color_img[mask_img == 0, :] = self.background[mask_img == 0, :]
color_img = color_img.astype(np.int16)
for channel in range(color_img.shape[2]): # R, G, B
c_random = np.random.rand(1)
c_random *= 30
c_random -= 15
c_random = c_random.astype(np.int16)
color_img[mask_img == 0, channel] = color_img[mask_img == 0, channel] + c_random
color_img = np.clip(color_img, 0, 255)
color_img = color_img.astype(np.uint8)
color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)
# get masks
masks = []
labels = []
if self.is_real:
gray = cv2.cvtColor(color_img, cv2.COLOR_RGB2GRAY)
gray = gray.astype(np.uint8)
blurred = cv2.medianBlur(gray, 5)
thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
for c in cnts:
if cv2.contourArea(c) > 100:
mask = np.zeros(color_img.shape[:2], np.uint8)
cv2.drawContours(mask, [c], -1, (1), -1)
masks.append(mask)
# cv2.imshow('mask' + self.color_imgs[idx], mask)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
else:
for ci in np.unique(mask_img):
if ci != 0:
mask = mask_img == ci
if np.sum((mask == True)) > 100:
masks.append(mask)
# NOTE: assume there is a single type of objects will have more than 1000 instances
labels.append(ci // 1000)
num_objs = len(masks)
if num_objs > 0:
masks = np.stack(masks, axis=0)
# get bounding box coordinates for each mask
boxes = []
for i in range(num_objs):
pos = np.where(masks[i])
xmin = np.min(pos[1])
xmax = np.max(pos[1])
ymin = np.min(pos[0])
ymax = np.max(pos[0])
boxes.append([xmin, ymin, xmax, ymax])
if xmin == xmax or ymin == ymax:
num_objs = 0
# convert everything into a torch.Tensor
boxes = torch.as_tensor(boxes, dtype=torch.float32)
labels = torch.as_tensor(labels, dtype=torch.int64)
masks = torch.as_tensor(masks, dtype=torch.uint8)
image_id = torch.tensor([idx])
if num_objs > 0:
area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])
else:
area = torch.as_tensor([0], dtype=torch.float32)
# suppose all instances are not crowd
iscrowd = torch.zeros((num_objs,), dtype=torch.int64)
num_objs = torch.tensor(num_objs)
target = {}
target["boxes"] = boxes
target["labels"] = labels
target["masks"] = masks
target["image_id"] = image_id
target["area"] = area
target["iscrowd"] = iscrowd
target["num_obj"] = num_objs
if self.transforms is not None:
# img, target = self.transforms(img, target)
img, target = self.transforms(color_img, target)
return img, target
def __len__(self):
# return len(self.imgs)
return len(self.color_imgs)
class ForegroundDataset(torch.utils.data.Dataset):
"""
Craete binary image, 1 means foreground, 0 means background.
For grasp, we care about the center of object, while considering the clearance of gripper.
For push, we know all pushs are from left to right.
This labeling approach is the as the in the function get_neg of trainer.py
"""
def __init__(self, root, num_rotations):
self.root = root
# load all image files, sorting them to ensure that they are aligned
self.color_imgs = list(sorted(os.listdir(os.path.join(root, "color-heightmaps"))))
self.depth_imgs = list(sorted(os.listdir(os.path.join(root, "depth-heightmaps"))))
self.num_rotations = num_rotations
self.push_large_kernel = np.ones((41, 41)) # hyperparamter
self.push_small_kernel = np.ones((13, 13)) # hyperparamter
self.grasp_kernel = np.ones((9, 9)) # hyperparamter
self.post_grasp_kernel = np.zeros(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL)
)
diff = math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
self.post_grasp_kernel[:, 0:diff] = 1 # left
self.post_grasp_kernel[:, (GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - diff) :] = 1 # right
def __getitem__(self, idx):
# load images
color_path = os.path.join(self.root, "color-heightmaps", self.color_imgs[idx])
depth_path = os.path.join(self.root, "depth-heightmaps", self.depth_imgs[idx])
# color image input
color_img = cv2.imread(color_path)
color_img = cv2.cvtColor(color_img, cv2.COLOR_BGR2RGB)
color_img_pil = Image.fromarray(color_img)
# depth image input
depth_img = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED)
depth_img = depth_img.astype(np.float32) / 100000 # translate to meters
depth_img_pil = Image.fromarray(depth_img)
# binary push image target, we need boundary and some extra
push_depth_img = np.copy(depth_img)
push_depth_img[push_depth_img <= DEPTH_MIN] = 0
push_depth_img[push_depth_img > DEPTH_MIN] = 1
push_depth_large = cv2.filter2D(push_depth_img, -1, self.push_large_kernel)
push_depth_large[push_depth_large < 1] = 0
push_depth_large[push_depth_large > 1] = 1
push_depth_small = cv2.filter2D(push_depth_img, -1, self.push_small_kernel)
push_depth_small[push_depth_small < 1] = 0
push_depth_small[push_depth_small > 1] = 1
push_depth_final = push_depth_large - push_depth_small
push_depth_final[push_depth_final < 0] = 0
# prepare q values
push_depth_final[push_depth_final == 1] = PUSH_Q
push_depth_final[push_depth_final == 0] = 0
target_push_img_pil = Image.fromarray(push_depth_final)
# binary grasp image target, we need center part
grasp_depth_img = np.copy(depth_img)
grasp_depth_img[grasp_depth_img <= DEPTH_MIN] = -100
grasp_depth_img[grasp_depth_img > DEPTH_MIN] = 1
grasp_depth = cv2.filter2D(grasp_depth_img, -1, self.grasp_kernel)
grasp_depth[grasp_depth < 1] = 0
grasp_depth[grasp_depth > 1] = 1
# # focus on target
# color_mask = cv2.cvtColor(color_img, cv2.COLOR_RGB2HSV)
# color_mask = cv2.inRange(color_mask, TARGET_LOWER, TARGET_UPPER)
# grasp_depth[color_mask != 255] = 0
# prepare q values
grasp_depth[grasp_depth == 1] = GRASP_Q
grasp_depth[grasp_depth == 0] = 0
target_grasp_img_pil = Image.fromarray(grasp_depth)
color_img_pil, depth_img_pil, target_push_img_pil, target_grasp_img_pil = self.transforms(
color_img_pil, depth_img_pil, target_push_img_pil, target_grasp_img_pil
)
return color_img_pil, depth_img_pil, target_push_img_pil, target_grasp_img_pil
def __len__(self):
return len(self.color_imgs)
@torch.no_grad()
def transforms(
self, color_heightmap, depth_heightmap, target_push_heightmap, target_grasp_heightmap
):
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = TF.pad(
color_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
)
depth_heightmap_pad = TF.pad(
depth_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
)
depth_heightmap_pad_push = TF.pad(
depth_heightmap, IMAGE_PAD_WIDTH, fill=-1, padding_mode="constant"
)
target_push_heightmap_pad = TF.pad(
target_push_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
)
target_grasp_heightmap_pad = TF.pad(
target_grasp_heightmap, IMAGE_PAD_WIDTH, fill=0, padding_mode="constant"
)
# Random rotate
# rotate_idx = random.randint(0, self.num_rotations - 1)
# rotate_theta = rotate_idx * (360 / self.num_rotations)
rotate_theta = random.random() * 360
color_heightmap_pad = TF.rotate(color_heightmap_pad, rotate_theta)
depth_heightmap_pad = TF.rotate(depth_heightmap_pad, rotate_theta)
depth_heightmap_pad_push = TF.rotate(depth_heightmap_pad_push, rotate_theta)
target_push_heightmap_pad = TF.rotate(target_push_heightmap_pad, rotate_theta)
target_grasp_heightmap_pad = TF.rotate(target_grasp_heightmap_pad, rotate_theta)
color_heightmap_pad = np.array(color_heightmap_pad)
depth_heightmap_pad = np.array(depth_heightmap_pad)
depth_heightmap_pad_push = np.array(depth_heightmap_pad_push)
target_push_heightmap_pad = np.array(target_push_heightmap_pad)
target_grasp_heightmap_pad = np.array(target_grasp_heightmap_pad)
# Post process for pushing, only pixel has something on the right (based
# on heightmap) will be 1, otherwise it will be a empty push, also if the
# pushed place is empty
x_y_idx = np.argwhere(target_push_heightmap_pad > 0)
for idx in x_y_idx:
x, y = tuple(idx)
area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_PUSH_RADIUS_PIXEL / 4)) : min(
depth_heightmap_pad.shape[0], x + math.ceil(GRIPPER_PUSH_RADIUS_PIXEL / 4) + 1
),
min(depth_heightmap_pad.shape[1], y + GRIPPER_PUSH_RADIUS_SAFE_PIXEL) : min(
depth_heightmap_pad.shape[1], y + math.ceil(PUSH_DISTANCE_PIXEL / 2) + 1
),
]
if np.sum(area > DEPTH_MIN) == 0:
target_push_heightmap_pad[x, y] = 0
else:
area = depth_heightmap_pad_push[
max(0, x - math.ceil(GRIPPER_PUSH_RADIUS_PIXEL / 2)) : min(
depth_heightmap_pad_push.shape[0],
x + math.ceil(GRIPPER_PUSH_RADIUS_PIXEL / 2) + 1,
),
min(
depth_heightmap_pad_push.shape[1] - 1,
y + PUSH_DISTANCE_PIXEL + math.ceil(0.05 / PIXEL_SIZE),
),
] # 65 is a hyperparameters, (push) + 5 cm (close to limits)
if np.sum(area < 0) > 0: # out of the workspace
target_push_heightmap_pad[x, y] = 0
# Post process for grasping, only pixel has clearance on the left/right (based on heightmap) will be 1
x_y_idx = np.argwhere(target_grasp_heightmap_pad > 0)
for idx in x_y_idx:
x, y = tuple(idx)
left_area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)) : min(
depth_heightmap_pad.shape[0], x + math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2) + 1
),
max(0, y - math.ceil(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL / 2)) : max(
0, y - math.ceil(GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 2) + 1
),
]
right_area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)) : min(
depth_heightmap_pad.shape[0], x + math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2) + 1
),
min(
depth_heightmap_pad.shape[1] - 1,
y + math.ceil(GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 2),
) : min(
depth_heightmap_pad.shape[1],
y + math.ceil(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL / 2) + 1,
),
]
if (
np.sum(left_area > DEPTH_MIN) > 0
and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.05)
> 0 # 0.05 cm is a hyperparameter
) or (
np.sum(right_area > DEPTH_MIN) > 0
and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.05) > 0
):
target_grasp_heightmap_pad[x, y] = 0
# To tensor
color_heightmap_pad = TF.to_tensor(color_heightmap_pad)
depth_heightmap_pad = TF.to_tensor(depth_heightmap_pad)
target_push_heightmap_pad = TF.to_tensor(target_push_heightmap_pad)
target_grasp_heightmap_pad = TF.to_tensor(target_grasp_heightmap_pad)
# Normalize
color_heightmap_pad = TF.normalize(color_heightmap_pad, COLOR_MEAN, COLOR_STD, inplace=True)
depth_heightmap_pad = TF.normalize(depth_heightmap_pad, DEPTH_MEAN, DEPTH_STD, inplace=True)
return (
color_heightmap_pad,
depth_heightmap_pad,
target_push_heightmap_pad,
target_grasp_heightmap_pad,
)
class PushPredictionMultiDatasetEvaluation(torch.utils.data.Dataset):
"""
Push Prediction Dataset for Evaluation
Input: Image, Action (x, y), Pose (x, y)
Output: Diff_x, Diff_y, Diff_angle
"""
def __init__(self, depth_imgs, actions, poses, binary_objs):
self.distance = PUSH_DISTANCE
self.workspace_limits = WORKSPACE_LIMITS
self.heightmap_resolution = PIXEL_SIZE
self.prev_depth_imgs = []
self.prev_poses = []
self.actions = []
self.binary_objs = []
# print("Total files", len(depth_imgs), len(actions), len(poses))
for i in range(len(actions)):
self.prev_depth_imgs.append(
depth_imgs[i][IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF]
)
self.prev_poses.append(poses[i])
self.actions.append(actions[i])
self.binary_objs.append(binary_objs[i])
# print("Used files", len(self.prev_depth_imgs), len(self.prev_poses), len(self.actions), len(self.binary_objs))
assert (
len(
set(
[
len(self.prev_depth_imgs),
len(self.prev_poses),
len(self.actions),
len(self.binary_objs),
]
)
)
== 1
)
def __getitem__(self, idx):
# depth image input
prev_depth_img = self.prev_depth_imgs[idx]
# number of objects
num_obj = len(self.prev_poses[idx])
# poses
prev_poses = self.prev_poses[idx]
# action
action_start = self.actions[idx]
action_end = np.array([action_start[0] + self.distance / PIXEL_SIZE, action_start[1]])
# prev binary depth binary
# obj
prev_depth_binary_img_obj = np.copy(prev_depth_img)
prev_depth_binary_img_obj[prev_depth_binary_img_obj <= DEPTH_MIN] = 0
prev_depth_binary_img_obj[prev_depth_binary_img_obj > DEPTH_MIN] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_obj
prev_depth_binary_img_obj = temp[
int(action_start[0] + 228) - 40 : int(action_start[0] + 228) + 184,
int(action_start[1] + 228) - 112 : int(action_start[1] + 228) + 112,
]
# action
prev_depth_binary_img_action = np.zeros_like(prev_depth_img)
prev_depth_binary_img_action[
int(action_start[0]) : int(action_end[0]),
int(action_start[1])
- GRIPPER_PUSH_RADIUS_PIXEL : int(action_start[1])
+ GRIPPER_PUSH_RADIUS_PIXEL
+ 1,
] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_action
prev_depth_binary_img_action = temp[
int(action_start[0] + 228) - 40 : int(action_start[0] + 228) + 184,
int(action_start[1] + 228) - 112 : int(action_start[1] + 228) + 112,
]
binary_objs = self.binary_objs[idx]
temp = np.zeros_like(binary_objs[:, :, 0:1])
# centralize
action_start_ori = action_start.copy()
action_end_ori = action_end.copy()
action_start[0] -= 40
action_start[1] -= 112
for pi in range(num_obj):
prev_poses[pi] = prev_poses[pi] - action_start
prev_poses = prev_poses.flatten()
prev_poses = torch.tensor(prev_poses, dtype=torch.float32)
action = torch.tensor(
[40.0, 112.0, 40.0 + self.distance / PIXEL_SIZE, 112.0], dtype=torch.float32
)
used_binary_img, binary_objs_total = self.transforms(
prev_depth_binary_img_obj, prev_depth_binary_img_action, binary_objs
)
return (
prev_poses,
action,
action_start_ori,
action_end_ori,
used_binary_img,
binary_objs_total,
num_obj,
)
def __len__(self):
return len(self.actions)
@torch.no_grad()
def transforms(self, prev_depth_binary_img_obj, prev_depth_binary_img_action, binary_objs):
prev_depth_binary_img_obj = TF.to_tensor(prev_depth_binary_img_obj)
prev_depth_binary_img_action = TF.to_tensor(prev_depth_binary_img_action)
used_binary_img = torch.cat(
(prev_depth_binary_img_obj, prev_depth_binary_img_action), dim=0
)
used_binary_img = TF.normalize(
used_binary_img, BINARY_IMAGE_MEAN, BINARY_IMAGE_STD, inplace=True
)
binary_objs_total = TF.to_tensor(binary_objs)
current_binary_mean = BINARY_OBJ_MEAN * binary_objs_total.size(0)
current_binary_std = BINARY_OBJ_STD * binary_objs_total.size(0)
binary_objs_total = TF.normalize(
binary_objs_total, current_binary_mean, current_binary_std, inplace=True
)
return used_binary_img, binary_objs_total
class ClusterRandomSampler(Sampler):
"""Takes a dataset with cluster_indices property, cuts it into batch-sized chunks
Drops the extra items, not fitting into exact batches
Arguments:
data_source (Dataset): a Dataset to sample from. Should have a cluster_indices property
batch_size (int): a batch size that you would like to use later with Dataloader class
shuffle (bool): whether to shuffle the data or not
"""
def __init__(self, data_source, batch_size, shuffle=True):
self.data_source = data_source
self.batch_size = batch_size
self.shuffle = shuffle
self.num_after_batch = 0
for _, cluster_indices in self.data_source.cluster_indices.items():
self.num_after_batch += len(cluster_indices) // self.batch_size * self.batch_size
def flatten_list(self, lst):
return [item for sublist in lst for item in sublist]
def __iter__(self):
batch_lists = []
for _, cluster_indices in self.data_source.cluster_indices.items():
if self.shuffle:
random.shuffle(cluster_indices)
batches = [
cluster_indices[i : i + self.batch_size]
for i in range(0, len(cluster_indices), self.batch_size)
]
# filter our the shorter batches
if len(batches[-1]) != self.batch_size:
batch_lists.append(batches[:-1])
else:
batch_lists.append(batches)
# flatten lists and shuffle the batches if necessary
# this works on batch level
lst = self.flatten_list(batch_lists)
if self.shuffle:
random.shuffle(lst)
# final flatten - produce flat list of indexes
lst = self.flatten_list(lst)
return iter(lst)
def __len__(self):
return self.num_after_batch
class PushPredictionMultiDataset(torch.utils.data.Dataset):
"""
Push Prediction Dataset for training Push prediction network.
The push distance is fixed, could be 5 or 10 cm.
Track objects by color, so we assume each object has a unique color, however, this constraint does not needed in evalution.
Input: Image, Action (x, y), Pose (x, y)
Output: Diff_x, Diff_y, Diff_angle
"""
def __init__(self, root, distance, is_padding=False, cutoff=None):
self.root = root
self.is_padding = is_padding
# load all image files, sorting them to ensure that they are aligned
prev_color_imgs = list(sorted(os.listdir(os.path.join(root, "prev-color-heightmaps"))))
prev_depth_imgs = list(sorted(os.listdir(os.path.join(root, "prev-depth-heightmaps"))))
prev_poses = list(sorted(os.listdir(os.path.join(root, "prev-poses"))))
next_color_imgs = list(sorted(os.listdir(os.path.join(root, "next-color-heightmaps"))))
next_depth_imgs = list(sorted(os.listdir(os.path.join(root, "next-depth-heightmaps"))))
next_poses = list(sorted(os.listdir(os.path.join(root, "next-poses"))))
actions = list(sorted(os.listdir(os.path.join(root, "actions"))))
self.distance = distance
self.workspace_limits = WORKSPACE_LIMITS
self.heightmap_resolution = PIXEL_SIZE
self.prev_color_imgs = []
self.prev_depth_imgs = []
self.prev_poses = []
self.actions = []
self.next_color_imgs = []
self.next_depth_imgs = []
self.next_poses = []
self.cluster_indices = {}
print(
"Total files",
len(prev_color_imgs),
len(prev_depth_imgs),
len(prev_poses),
len(actions),
len(next_color_imgs),
len(next_depth_imgs),
len(next_poses),
)
for i in range(len(actions)):
assert (
len(
set(
[
actions[i][:7],
prev_color_imgs[i][:7],
prev_depth_imgs[i][:7],
prev_poses[i][:7],
next_color_imgs[i][:7],
next_depth_imgs[i][:7],
next_poses[i][:7],
]
)
)
== 1
), (
actions[i][:7],
prev_color_imgs[i][:7],
prev_depth_imgs[i][:7],
prev_poses[i][:7],
next_color_imgs[i][:7],
next_depth_imgs[i][:7],
next_poses[i][:7],
)
if cutoff is not None:
if int(actions[i][:7]) > cutoff:
break
self.prev_color_imgs.append(prev_color_imgs[i])
self.prev_depth_imgs.append(prev_depth_imgs[i])
self.prev_poses.append(prev_poses[i])
self.actions.append(actions[i])
self.next_color_imgs.append(next_color_imgs[i])
self.next_depth_imgs.append(next_depth_imgs[i])
self.next_poses.append(next_poses[i])
# create cluster indices, so the the data with same amount of object will be put together
poses_path = os.path.join(self.root, "prev-poses", prev_poses[i])
with open(poses_path, "r") as file:
filedata = file.read()
poses_str = filedata.split(" ")
num_obj = len(poses_str) // 5
if num_obj in self.cluster_indices:
self.cluster_indices[num_obj].append(len(self.prev_poses) - 1)
else:
self.cluster_indices[num_obj] = [len(self.prev_poses) - 1]
print(
"Used files",
len(self.prev_color_imgs),
len(self.next_color_imgs),
len(self.prev_depth_imgs),
len(self.next_depth_imgs),
len(self.prev_poses),
len(self.next_poses),
len(self.actions),
)
assert (
len(
set(
[
len(self.prev_color_imgs),
len(self.next_color_imgs),
len(self.prev_depth_imgs),
len(self.next_depth_imgs),
len(self.prev_poses),
len(self.next_poses),
len(self.actions),
]
)
)
== 1
)
def __getitem__(self, idx):
# load data path
prev_color_path = os.path.join(
self.root, "prev-color-heightmaps", self.prev_color_imgs[idx]
)
prev_depth_path = os.path.join(
self.root, "prev-depth-heightmaps", self.prev_depth_imgs[idx]
)
prev_poses_path = os.path.join(self.root, "prev-poses", self.prev_poses[idx])
actions_path = os.path.join(self.root, "actions", self.actions[idx])
next_color_path = os.path.join(
self.root, "next-color-heightmaps", self.next_color_imgs[idx]
)
next_depth_path = os.path.join(
self.root, "next-depth-heightmaps", self.next_depth_imgs[idx]
)
next_poses_path = os.path.join(self.root, "next-poses", self.next_poses[idx])
# color image input
prev_color_img = cv2.imread(prev_color_path)
prev_color_img = cv2.cvtColor(prev_color_img, cv2.COLOR_BGR2RGB)
next_color_img = cv2.imread(next_color_path)
next_color_img = cv2.cvtColor(next_color_img, cv2.COLOR_BGR2RGB)
# depth image input
prev_depth_img = cv2.imread(prev_depth_path, cv2.IMREAD_UNCHANGED)
prev_depth_img = prev_depth_img.astype(np.float32) / 100000 # translate to meters 100000
next_depth_img = cv2.imread(next_depth_path, cv2.IMREAD_UNCHANGED)
next_depth_img = next_depth_img.astype(np.float32) / 100000 # translate to meters 100000
next_depth_img[next_depth_img < 0] = 0
# poses
with open(prev_poses_path, "r") as file:
filedata = file.read()
poses = filedata.split(" ")
num_obj = len(poses) // 5
prev_poses = []
for pi in range(num_obj):
x = (float(poses[pi * 5]) - self.workspace_limits[0][0]) / self.heightmap_resolution
y = (
float(poses[pi * 5 + 1]) - self.workspace_limits[1][0]
) / self.heightmap_resolution
angle_y = degrees(float(poses[pi * 5 + 4]))
prev_poses.extend([x, y, angle_y])
prev_poses = torch.tensor(prev_poses)
with open(next_poses_path, "r") as file:
filedata = file.read()
poses = filedata.split(" ")
assert len(poses) // 5 == num_obj
next_poses = []
for pi in range(num_obj):
x = (float(poses[pi * 5]) - self.workspace_limits[0][0]) / self.heightmap_resolution
y = (
float(poses[pi * 5 + 1]) - self.workspace_limits[1][0]
) / self.heightmap_resolution
angle_y = degrees(float(poses[pi * 5 + 4]))
next_poses.extend([x, y, angle_y])
next_poses = torch.tensor(next_poses)
# action
with open(actions_path, "r") as file:
filedata = file.read()
x, y, _ = filedata.split(" ")
x = (float(x) - self.workspace_limits[0][0]) / self.heightmap_resolution
y = (float(y) - self.workspace_limits[1][0]) / self.heightmap_resolution
action_start = torch.tensor([float(x), float(y)])
action_end = torch.tensor([float(x + self.distance / PIXEL_SIZE), float(y)])
# prev binary depth binary
# obj
prev_depth_binary_img_obj = np.copy(prev_depth_img)
prev_depth_binary_img_obj[prev_depth_binary_img_obj <= DEPTH_MIN] = 0
prev_depth_binary_img_obj[prev_depth_binary_img_obj > DEPTH_MIN] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_obj
prev_depth_binary_img_obj = temp[
int(action_start[0] + 228) - 40 : int(action_start[0] + 228) + 184,
int(action_start[1] + 228) - 112 : int(action_start[1] + 228) + 112,
]
# action
prev_depth_binary_img_action = np.zeros_like(prev_depth_img)
prev_depth_binary_img_action[
int(action_start[0]) : int(action_end[0]),
int(action_start[1])
- GRIPPER_PUSH_RADIUS_PIXEL : int(action_start[1])
+ GRIPPER_PUSH_RADIUS_PIXEL
+ 1,
] = 1
temp = np.zeros((680, 680))
temp[228:452, 228:452] = prev_depth_binary_img_action
prev_depth_binary_img_action = temp[
int(action_start[0] + 228) - 40 : int(action_start[0] + 228) + 184,
int(action_start[1] + 228) - 112 : int(action_start[1] + 228) + 112,
]
# TODO: assume pose in order of blue, green, brown, orange, yellow
imgcolor = np.copy(prev_color_img)
imgcolor = imgcolor.astype(np.uint8)
temp = np.zeros((480, 480, 3), dtype=np.uint8)
temp[128 : (480 - 128), 128 : (480 - 128), :] = imgcolor
imgcolor = cv2.cvtColor(temp, cv2.COLOR_RGB2HSV)
binary_objs = []
for ci in range(num_obj):
crop = imgcolor[
int(prev_poses[ci * 3]) + 128 - 30 : int(prev_poses[ci * 3]) + 128 + 30,
int(prev_poses[ci * 3 + 1]) + 128 - 30 : int(prev_poses[ci * 3 + 1]) + 128 + 30,
:,
]
assert crop.shape[0] == 60 and crop.shape[1] == 60, (
self.prev_color_imgs[idx],
crop.shape,
)
mask = cv2.inRange(crop, colors_lower[ci], colors_upper[ci])
binary_objs.append(mask)
# delta poses
deltas = []
for pi in range(num_obj):
d_x = next_poses[pi * 3] - prev_poses[pi * 3]
d_y = next_poses[pi * 3 + 1] - prev_poses[pi * 3 + 1]
d_a = next_poses[pi * 3 + 2] - prev_poses[pi * 3 + 2]
if d_a < -180:
d_a = 360 + d_a
elif d_a > 180:
d_a = d_a - 360
assert abs(d_a) < 120, (
pi,
d_a,
self.prev_color_imgs[idx],
self.next_color_imgs[idx],
prev_poses,
next_poses,
)
deltas.extend([d_x, d_y, d_a])
deltas = torch.tensor(deltas, dtype=torch.float32)
# centralize
action_start_ori = torch.clone(action_start).detach()
action_end_ori = torch.clone(action_end).detach()
action_start[0] -= 40
action_start[1] -= 112
for pi in range(num_obj):
prev_poses[pi * 3 : pi * 3 + 2] = prev_poses[pi * 3 : pi * 3 + 2] - action_start
next_poses[pi * 3 : pi * 3 + 2] = next_poses[pi * 3 : pi * 3 + 2] - action_start
prev_poses_no_angle = []
for pi in range(num_obj):
prev_poses_no_angle.extend([prev_poses[pi * 3], prev_poses[pi * 3 + 1]])
next_poses_no_angle = []
for pi in range(num_obj):
next_poses_no_angle.extend([next_poses[pi * 3], next_poses[pi * 3 + 1]])
prev_poses = torch.tensor(prev_poses_no_angle, dtype=torch.float32)
next_poses = torch.tensor(next_poses_no_angle, dtype=torch.float32)
action = torch.tensor([40.0, 112.0, 40.0 + self.distance / PIXEL_SIZE, 112.0])
num_obj = torch.tensor(num_obj)
(
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
binary_objs_total,
) = self.transforms(
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
prev_depth_binary_img_obj,
prev_depth_binary_img_action,
binary_objs,
)
return (
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
prev_poses,
next_poses,
action,
deltas,
self.prev_color_imgs[idx],
self.next_color_imgs[idx],
action_start_ori,
action_end_ori,
binary_objs_total,
num_obj,
)
def __len__(self):
return len(self.actions)
@torch.no_grad()
def transforms(
self,
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
prev_depth_binary_img_obj,
prev_depth_binary_img_action,
binary_objs,
):
# To tensor
prev_color_img = TF.to_tensor(prev_color_img)
prev_depth_img = TF.to_tensor(prev_depth_img)
next_color_img = TF.to_tensor(next_color_img)
next_depth_img = TF.to_tensor(next_depth_img)
prev_depth_binary_img_obj = TF.to_tensor(prev_depth_binary_img_obj)
prev_depth_binary_img_action = TF.to_tensor(prev_depth_binary_img_action)
used_binary_img = torch.cat(
(prev_depth_binary_img_obj, prev_depth_binary_img_action), dim=0
)
used_binary_img = TF.normalize(
used_binary_img, BINARY_IMAGE_MEAN, BINARY_IMAGE_STD, inplace=True
)
binary_objs_total = TF.to_tensor(binary_objs[0])
for ci in range(1, len(binary_objs)):
temp = TF.to_tensor(binary_objs[ci])
temp = TF.normalize(temp, BINARY_OBJ_MEAN, BINARY_OBJ_STD, inplace=True)
binary_objs_total = torch.cat((binary_objs_total, temp), dim=0)
# Normalize
prev_color_img = TF.normalize(prev_color_img, COLOR_MEAN, COLOR_STD, inplace=True)
next_color_img = TF.normalize(next_color_img, COLOR_MEAN, COLOR_STD, inplace=True)
prev_depth_img = TF.normalize(prev_depth_img, DEPTH_MEAN, DEPTH_STD, inplace=True)
next_depth_img = TF.normalize(next_depth_img, DEPTH_MEAN, DEPTH_STD, inplace=True)
return (
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
binary_objs_total,
)
def drawAxis(img, p_, q_, colour, scale):
p = list(p_)
q = list(q_)
angle = atan2(p[1] - q[1], p[0] - q[0]) # angle in radians
hypotenuse = sqrt((p[1] - q[1]) * (p[1] - q[1]) + (p[0] - q[0]) * (p[0] - q[0]))
# Here we lengthen the arrow by a factor of scale
q[0] = p[0] - scale * hypotenuse * cos(angle)
q[1] = p[1] - scale * hypotenuse * sin(angle)
cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv2.LINE_AA)
# create the arrow hooks
p[0] = q[0] + 9 * cos(angle + pi / 4)
p[1] = q[1] + 9 * sin(angle + pi / 4)
cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv2.LINE_AA)
p[0] = q[0] + 9 * cos(angle - pi / 4)
p[1] = q[1] + 9 * sin(angle - pi / 4)
cv2.line(img, (int(p[0]), int(p[1])), (int(q[0]), int(q[1])), colour, 1, cv2.LINE_AA)
def getCenterOrientation(pts, img):
sz = len(pts)
data_pts = np.empty((sz, 2), dtype=np.float64)
for i in range(data_pts.shape[0]):
data_pts[i, 0] = pts[i, 0, 0]
data_pts[i, 1] = pts[i, 0, 1]
# Perform PCA analysis
mean = np.empty((0))
mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)
# Store the center of the object
cntr = (int(mean[0, 0]), int(mean[0, 1]))
cv2.circle(img, cntr, 3, (255, 0, 255), 2)
p1 = (
cntr[0] + 0.02 * eigenvectors[0, 0] * eigenvalues[0, 0],
cntr[1] + 0.02 * eigenvectors[0, 1] * eigenvalues[0, 0],
)
p2 = (
cntr[0] - 0.02 * eigenvectors[1, 0] * eigenvalues[1, 0],
cntr[1] - 0.02 * eigenvectors[1, 1] * eigenvalues[1, 0],
)
drawAxis(img, cntr, p1, (0, 255, 0), 1)
drawAxis(img, cntr, p2, (255, 255, 0), 5)
# angle = math.atan2(eigenvectors[0, 1], eigenvectors[0, 0]) # orientation in radians
angle = atan2(eigenvectors[1, 1], eigenvectors[1, 0]) # orientation in radians
return cntr[0], cntr[1], angle
def compute_mean_std(dataset):
loader = torch.utils.data.DataLoader(dataset, batch_size=64, num_workers=4, shuffle=False)
color_mean = 0
color_std = 0
depth_mean = 0
depth_std = 0
num_samples = 0.0
for color, depth, _, _ in loader:
batch_samples = color.size(0)
color = color.view(batch_samples, color.size(1), -1)
color_mean += color.mean(2).sum(0)
color_std += color.std(2).sum(0)
depth = depth.view(batch_samples, depth.size(1), -1)
depth_mean += depth.mean(2).sum(0)
depth_std += depth.std(2).sum(0)
num_samples += batch_samples
color_mean /= num_samples
color_std /= num_samples
print(f"color mean: {color_mean}, color std: {color_std}")
depth_mean /= num_samples
depth_std /= num_samples
print(f"depth mean: {depth_mean}, depth std: {depth_std}")
# sampler = ClusterRandomSampler(dataset, batch_size=64)
# data_loader = torch.utils.data.DataLoader(
# dataset,
# batch_size=64,
# sampler=sampler,
# shuffle=False,
# num_workers=4,
# drop_last=True,
# )
# binary_image_mean = 0
# binary_image_std = 0
# binary_obj_mean = 0
# binary_obj_std = 0
# num_samples = 0
# for _, _, _, _, used_binary_img, _, _, _, _, _, _, _, _, binary_objs_total, _ in data_loader:
# batch_samples = used_binary_img.size(0)
# used_binary_img = used_binary_img.view(batch_samples, used_binary_img.size(1), -1)
# binary_image_mean += used_binary_img.mean(2).sum(0)
# binary_image_std += used_binary_img.std(2).sum(0)
# binary_objs_total = binary_objs_total.view(batch_samples, binary_objs_total.size(1), -1)
# binary_obj_mean += binary_objs_total.mean(2).mean(1).sum(0)
# binary_obj_std += binary_objs_total.std(2).mean(1).sum(0)
# num_samples += batch_samples
# binary_image_mean /= num_samples
# binary_image_std /= num_samples
# print(f"binary image mean: {binary_image_mean}, binary image std: {binary_image_std}")
# binary_obj_mean /= num_samples
# binary_obj_std /= num_samples
# print(f"binary obj mean: {binary_obj_mean}, binary obj std: {binary_obj_std}")
if __name__ == "__main__":
dataset = ForegroundDataset("logs_image/foreground/data", NUM_ROTATION)
# dataset = PushPredictionMultiDataset("logs_push/push-05/train", PUSH_DISTANCE)
compute_mean_std(dataset)
| 59,575 | 39.973865 | 139 |
py
|
more
|
more-main/old_utils.py
|
from collections import defaultdict, deque
import time
import datetime
import torch.distributed as dist
import torch
import torch.nn as nn
import torch.nn.functional as F
# Cross entropy loss for 2D outputs
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs, dim=1), targets)
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device="cuda")
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
def is_dist_avail_and_initialized():
"""
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
"""
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
class MetricLogger(object):
"""
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
print(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print("{} Total time: {} ({:.4f} s / it)".format(header, total_time_str, total_time / len(iterable)))
def collate_fn(batch):
new_batch = list(filter(lambda b: b[1]["num_obj"].item() > 0, batch))
return tuple(zip(*new_batch))
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
| 9,084 | 31.216312 | 112 |
py
|
more
|
more-main/train_push_prediction.py
|
import torch
from torchvision import transforms as T
from push_net import PushPredictionNet
from dataset import PushPredictionMultiDataset, ClusterRandomSampler
import argparse
import time
import datetime
import os
import numpy as np
import cv2
from torch.utils.tensorboard import SummaryWriter
from constants import (
PUSH_DISTANCE,
COLOR_MEAN,
COLOR_STD,
)
import torch_utils
import log_utils
def parse_args():
default_params = {
"lr": 1e-3,
"batch_size": 64,
"t_0": 5, # CosineAnnealing, start 1 6 16 36 76
"t_mult": 2, # CosineAnnealing, period 5 10 20 40
"eta_min": 1e-8, # CosineAnnealing, minimum lr
"epochs": 76, # CosineAnnealing, should end before warm start
"loss_beta": 2,
"distance": PUSH_DISTANCE,
}
parser = argparse.ArgumentParser(description="Train Push Prediction")
parser.add_argument(
"--lr", action="store", default=default_params["lr"], type=float, help="The learning rate"
)
parser.add_argument(
"--batch_size",
action="store",
default=default_params["batch_size"],
type=int,
help="The batch size for training and testing",
)
parser.add_argument(
"--t_0",
action="store",
default=default_params["t_0"],
type=int,
help="The t_0 of CosineAnnealing",
)
parser.add_argument(
"--t_mult",
action="store",
default=default_params["t_mult"],
type=int,
help="The t_mult of CosineAnnealing",
)
parser.add_argument(
"--eta_min",
action="store",
default=default_params["eta_min"],
type=float,
help="The eta_min of CosineAnnealing",
)
parser.add_argument(
"--epochs",
action="store",
default=default_params["epochs"],
type=int,
help="The epoch for training, should end before warm start of CosineAnnealing",
)
parser.add_argument(
"--loss_beta",
action="store",
default=default_params["loss_beta"],
type=int,
help="The beta of SmoothL1Loss",
)
parser.add_argument(
"--distance",
action="store",
default=default_params["distance"],
type=float,
help="The distance of one push",
)
parser.add_argument(
"--dataset_root", action="store", required=True, help="The path to the dataset"
)
parser.add_argument(
"--pretrained_model", action="store", help="The path to the pretrained model"
)
parser.add_argument(
"--len_dataset",
action="store",
default=-1,
type=int,
help="The number of push dataset should be used",
)
parser.add_argument(
"--test", action="store_true", default=False, help="Testing and visualizing"
)
parser.add_argument("--verify", action="store_true", default=False, help="Verify the dataset")
parser.add_argument(
"--test_plot",
action="store_true",
default=False,
help="Testing with colorful visualization",
)
parser.add_argument(
"--symmetric_diff", action="store_true", default=False, help="Compute symmetric_diff"
)
args = parser.parse_args()
if (args.test or args.test_plot or args.symmetric_diff) and args.pretrained_model is None:
parser.error("--test, --test_plot, and --symmetric_diff require --pretrained_model.")
return args
class PushPredictionTrainer:
def __init__(self, args):
self.params = {
"lr": args.lr,
"batch_size": args.batch_size,
"t_0": args.t_0, # CosineAnnealing, start 0 4 12 28
"t_mult": args.t_mult, # CosineAnnealing, period 4 8 16
"eta_min": args.eta_min, # CosineAnnealing, minimum lr
"epochs": args.epochs, # CosineAnnealing, should end before warm start
"loss_beta": args.loss_beta,
"distance": args.distance,
}
self.dataset_root = args.dataset_root
self.pretrained_model = args.pretrained_model
self.len_dataset = args.len_dataset
self.test = args.test
self.verify = args.verify
self.test_plot = args.test_plot
self.symmetric_diff = args.symmetric_diff
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if not (self.test or self.verify or self.test_plot or self.symmetric_diff):
self.log_dir = os.path.join(self.dataset_root, "runs")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
timestamp_value = datetime.datetime.fromtimestamp(time.time())
time_name = timestamp_value.strftime("%Y-%m-%d-%H-%M-%S")
self.log_dir = os.path.join(self.log_dir, time_name)
self.tb_logger = SummaryWriter(self.log_dir)
self.logger = log_utils.setup_logger(self.log_dir, "Push Prediction")
def main(self):
model = PushPredictionNet()
model = model.to(self.device)
criterion = torch.nn.SmoothL1Loss(beta=self.params["loss_beta"])
optimizer = torch.optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=0.9,
weight_decay=1e-4,
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=self.params["t_0"],
T_mult=self.params["t_mult"],
eta_min=self.params["eta_min"],
last_epoch=-1,
verbose=False,
)
start_epoch = 0
if self.pretrained_model is not None:
checkpoint = torch.load(self.pretrained_model)
model.load_state_dict(checkpoint["model"])
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
start_epoch = checkpoint["epoch"] + 1
# prev_params = checkpoint["params"]
if self.test:
data_loader = self._get_data_loader("test", 1, shuffle=True, test=True)
criterion = torch.nn.MSELoss(reduction="none")
self._test(model, criterion, data_loader)
elif self.verify:
data_loader = self._get_data_loader("test", 1, test=True)
self._verify_dataset(model, data_loader)
elif self.test_plot:
data_loader = self._get_data_loader("test", 1, shuffle=True, test=True)
criterion = torch.nn.SmoothL1Loss(reduction="none")
self._test_plot(model, criterion, data_loader)
elif self.symmetric_diff:
data_loader = self._get_data_loader("test", 1, test=True)
criterion = torch.nn.MSELoss(reduction="none")
self._symmetric_diff(model, criterion, data_loader)
else:
self.logger.info(f"Hyperparameters: {self.params}")
if self.pretrained_model is not None:
self.logger.info(f"Start from the pretrained model: {self.pretrained_model}")
# self.logger.info(f"Previous Hyperparameters: {prev_params}")
data_loader_train = self._get_data_loader(
"train", self.params["batch_size"], shuffle=True
)
data_loader_test = self._get_data_loader("test", max(1, self.params["batch_size"] // 2))
for epoch in range(start_epoch, self.params["epochs"]):
# warmup start
if epoch == 0:
warmup_factor = 0.001
warmup_iters = min(1000, len(data_loader_train) - 1)
current_lr_scheduler = torch_utils.warmup_lr_scheduler(
optimizer, warmup_iters, warmup_factor
)
else:
current_lr_scheduler = lr_scheduler
train_loss = self._train_one_epoch(
model, criterion, optimizer, data_loader_train, current_lr_scheduler, epoch
)
evaluate_loss = self._evaluate(model, criterion, data_loader_test)
save_state = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"params": self.params,
}
torch.save(
save_state, os.path.join(self.log_dir, f"push_prediction_model-{epoch}.pth")
)
self.tb_logger.add_scalars(
"Epoch_Loss", {"train": train_loss, "test": evaluate_loss}, epoch
)
self.tb_logger.flush()
self.tb_logger.add_hparams(
self.params, {"hparam/train": train_loss, "hparam/test": evaluate_loss}
)
self.logger.info("Training completed!")
def _train_one_epoch(
self, model, criterion, optimizer, data_loader, lr_scheduler, epoch, print_freq=100
):
model.train()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", log_utils.SmoothedValue(window_size=1, fmt="{value:.8f}"))
metric_logger.add_meter("loss", log_utils.SmoothedValue())
header = "Epoch: [{}]".format(epoch)
losses = []
n_iter = 0
total_iters = len(data_loader)
for (
prev_color_img,
_,
_,
_,
used_binary_img,
prev_poses,
_,
action,
delta,
_,
_,
_,
_,
binary_objs_total,
num_obj,
) in metric_logger.log_every(data_loader, print_freq, self.logger, header):
used_binary_img_gpu = used_binary_img.to(
self.device, non_blocking=True, dtype=torch.float
)
prev_poses_gpu = prev_poses.to(self.device, non_blocking=True)
action_gpu = action.to(self.device, non_blocking=True)
binary_objs_total_gpu = binary_objs_total.to(self.device, non_blocking=True)
target_gpu = delta.to(self.device, non_blocking=True)
# forward
output = model(
prev_poses_gpu, action_gpu, used_binary_img_gpu, binary_objs_total_gpu, num_obj[0]
)
# get loss
loss = criterion(output, target_gpu)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# log
log_loss = loss.item()
log_lr = optimizer.param_groups[0]["lr"]
metric_logger.update(loss=log_loss, lr=log_lr)
self.tb_logger.add_scalar("Step/Loss/Train", log_loss, total_iters * epoch + n_iter)
self.tb_logger.add_scalar("Step/LR", log_lr, total_iters * epoch + n_iter)
losses.append(log_loss)
if epoch == 0:
lr_scheduler.step()
n_iter += 1
if epoch != 0:
lr_scheduler.step(epoch)
return sum(losses) / len(losses)
@torch.no_grad()
def _evaluate(self, model, criterion, data_loader, print_freq=20):
model.eval()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("loss", log_utils.SmoothedValue(window_size=len(data_loader)))
losses = []
header = "Test:"
for (
_,
_,
_,
_,
used_binary_img,
prev_poses,
_,
action,
delta,
_,
_,
_,
_,
binary_objs_total,
num_obj,
) in metric_logger.log_every(data_loader, print_freq, self.logger, header):
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
prev_poses = prev_poses.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
target = delta.to(self.device, non_blocking=True)
output = model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
loss = criterion(output, target)
log_loss = loss.item()
metric_logger.update(loss=log_loss)
losses.append(log_loss)
return sum(losses) / len(losses)
def _get_data_loader(self, folder, batch_size, len_dataset=None, shuffle=False, test=False):
"""Get data loader, group data with the same number of objects.
With ClusterRandomSamplerThe shuffle should be False, drop_last is not used, so it can be False.
"""
path = os.path.join(self.dataset_root, folder)
dataset = PushPredictionMultiDataset(path, self.params["distance"], False, len_dataset)
if not test:
sampler = ClusterRandomSampler(dataset, batch_size, shuffle)
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
shuffle=False,
num_workers=4,
drop_last=True,
)
else:
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0, drop_last=False
)
return data_loader
# @torch.no_grad()
# def _compute_overlap(self, prev_poses, outputs, prev_color_imgs, mask_objs, num_obj):
# batch_size = prev_poses.size(0)
# overlaps = np.zeros(batch_size)
# prev_poses = prev_poses.numpy()
# outputs = outputs.numpy()
# num_obj = num_obj.item()
# mask_objs = mask_objs.numpy()
# inv_normalize = T.Normalize(
# mean=[
# -COLOR_MEAN[0] / COLOR_STD[0],
# -COLOR_MEAN[1] / COLOR_STD[1],
# -COLOR_MEAN[2] / COLOR_STD[2],
# ],
# std=[1 / COLOR_STD[0], 1 / COLOR_STD[1], 1 / COLOR_STD[2]],
# )
# for batch_idx in range(batch_size):
# ori_image = inv_normalize(prev_color_imgs[batch_idx])
# ori_image = ori_image.permute(1, 2, 0).numpy()
# ori_image[ori_image < 0] = 0
# ori_image *= 255
# ori_image = ori_image.astype(np.uint8)
# ori_image = cv2.cvtColor(ori_image, cv2.COLOR_RGB2GRAY)
# ori_image = cv2.threshold(ori_image, 50, 255, cv2.THRESH_BINARY)[1]
# new_image = np.zeros(ori_image.shape)
# for obj_idx in range(num_obj):
# mask = mask_objs[batch_idx][obj_idx]
# points = np.argwhere(mask == 255)
# points = np.expand_dims(points, axis=0)
# prev_pose = prev_poses[batch_idx][obj_idx * 2 : obj_idx * 2 + 2]
# output = outputs[batch_idx][obj_idx * 3 : obj_idx * 3 + 3]
# M = cv2.getRotationMatrix2D((prev_pose[0], prev_pose[1]), -output[2], 1)
# M[0, 2] += output[0]
# M[1, 2] += output[1]
# new_points = cv2.transform(points, M)[0]
# valid_points = np.logical_and(
# np.logical_and(new_points[:, 0] <= 223, new_points[:, 0] >= 0),
# np.logical_and(new_points[:, 1] <= 223, new_points[:, 1] >= 0),
# )
# new_points = tuple(np.transpose(new_points[valid_points]))
# new_image[new_points] = 255
# ori_area = np.sum(ori_image == 255)
# new_area = np.sum(new_image == 255)
# overlaps[batch_idx] = ori_area - new_area
# if overlaps[batch_idx] < 0:
# cv2.imshow("new", new_image)
# cv2.imshow("ori", ori_image)
# cv2.waitKey()
# cv2.destroyAllWindows()
# assert overlaps[batch_idx] >= 0, (ori_area, new_area)
# norm = np.linalg.norm(overlaps, ord=1)
# if norm == 0:
# overlaps = np.ones(batch_size)
# else:
# overlaps = overlaps / norm * batch_size
# overlaps += 1
# norm = np.linalg.norm(overlaps, ord=1)
# overlaps = overlaps / norm * batch_size
# overlaps = overlaps.reshape((batch_size, 1))
# overlaps = np.tile(overlaps, 3 * num_obj)
# return overlaps
@torch.no_grad()
def _test(self, model, criterion, data_loader):
import matplotlib.pyplot as plt
import math
from constants import colors_upper, colors_lower
torch.manual_seed(1)
inv_normalize = T.Normalize(
mean=[
-COLOR_MEAN[0] / COLOR_STD[0],
-COLOR_MEAN[1] / COLOR_STD[1],
-COLOR_MEAN[2] / COLOR_STD[2],
],
std=[1 / COLOR_STD[0], 1 / COLOR_STD[1], 1 / COLOR_STD[2]],
)
model.eval()
images = []
refs = []
for i, data in enumerate(data_loader):
(
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
prev_poses,
next_poses,
action,
delta,
prev_ref,
next_ref,
action_start_ori,
action_end_ori,
binary_objs_total,
num_obj,
) = data
prev_color_img = prev_color_img.to(self.device, non_blocking=True)
prev_depth_img = prev_depth_img.to(self.device, non_blocking=True)
next_color_img = next_color_img.to(self.device, non_blocking=True)
next_depth_img = next_depth_img.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
prev_poses = prev_poses.to(self.device, non_blocking=True)
next_poses = next_poses.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
delta = delta.to(self.device, non_blocking=True)
action_start_ori = action_start_ori.to(self.device, non_blocking=True)
action_end_ori = action_end_ori.to(self.device, non_blocking=True)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
output = model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
target = delta
loss = criterion(output, target)
output = output[0].cpu().numpy()
target = target[0].cpu().numpy()
# output = target
output_xy = []
output_a = []
for num_idx in range(num_obj):
output_xy.append([output[num_idx * 3], output[num_idx * 3 + 1]])
output_a.append(output[num_idx * 3 + 2])
# no move
# output_xy.append([0, 0])
# output_a.append(0)
# move in the direction as the action
# output_xy.append([args.distance / 0.2, 0])
# output_a.append(0)
print(i)
print(prev_ref[0])
print(next_ref[0])
np.set_printoptions(precision=3, suppress=True)
print("output", output)
print("target", target)
print("action", action_start_ori.cpu().numpy())
print("loss", loss.cpu().numpy())
loss = loss.cpu().numpy()[0]
next_color_img = inv_normalize(next_color_img[0])
next_color_img = next_color_img.cpu().permute(1, 2, 0).numpy()
imdepth = next_color_img
imdepth[imdepth < 0] = 0
imdepth[imdepth > 0] = 255
imdepth = imdepth.astype(np.uint8)
imdepth = cv2.cvtColor(imdepth, cv2.COLOR_RGB2BGR)
prev_color_img = inv_normalize(prev_color_img[0])
prev_color_img = prev_color_img.cpu().permute(1, 2, 0).numpy()
imgcolor = prev_color_img
imgcolor[imgcolor < 0] = 0
imgcolor *= 255
imgcolor = imgcolor.astype(np.uint8)
imgcolor = cv2.GaussianBlur(imgcolor, (5, 5), 0)
imgcolor = cv2.cvtColor(imgcolor, cv2.COLOR_RGB2HSV)
prev_poses = prev_poses[0].cpu().numpy()
next_poses = next_poses[0].cpu().numpy()
action_start_ori = action_start_ori[0].cpu().numpy()
action_end_ori = action_end_ori[0].cpu().numpy()
action_start_ori_tile = np.tile(action_start_ori, num_obj[0])
action = action[0].cpu().numpy()
action_start_tile = np.tile(action[:2], num_obj[0])
prev_poses += action_start_ori_tile
prev_poses -= action_start_tile
next_poses += action_start_ori_tile
next_poses -= action_start_tile
print("prev poses", prev_poses)
print("next poses", next_poses)
for ci in range(num_obj):
color = cv2.inRange(imgcolor, colors_lower[ci], colors_upper[ci])
contours, _ = cv2.findContours(color, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
found = False
for contour in contours:
if cv2.contourArea(contour) > 100:
contours = contour
found = True
break
if not found:
continue
cv2.drawContours(
imdepth,
[contours],
-1,
(255 / int(num_obj) * (ci + 1), 255, 255 - 255 / int(num_obj) * (ci + 1)),
1,
)
cnt_rotated = rotate_contour(contours, -output_a[ci])
cnt_rotated_translated = cnt_rotated + [output_xy[ci][1], output_xy[ci][0]]
cnt_rotated_translated = np.rint(cnt_rotated_translated).astype(np.int32)
cv2.drawContours(
imdepth,
[cnt_rotated_translated],
-1,
(255 / int(num_obj) * (ci + 1), 150, 255 - 255 / int(num_obj) * (ci + 1)),
2,
)
for pi in range(num_obj):
cv2.circle(
imdepth,
(int(round(prev_poses[pi * 2 + 1])), int(round(prev_poses[pi * 2]))),
2,
(255, 0, 255),
-1,
)
cv2.circle(
imdepth,
(int(round(next_poses[pi * 2 + 1])), int(round(next_poses[pi * 2]))),
2,
(255, 255, 0),
-1,
)
# action
cv2.circle(
imdepth,
(int(round(action_start_ori[1])), int(round(action_start_ori[0]))),
5,
(255, 0, 0),
-1,
)
cv2.circle(
imdepth,
(int(round(action_end_ori[1])), int(round(action_end_ori[0]))),
5,
(0, 0, 255),
-1,
)
# if math.sqrt(loss[0]) > 5 or math.sqrt(loss[1]) > 5 or math.sqrt(loss[3]) > 5 or math.sqrt(loss[4]) > 5 or math.sqrt(loss[2]) > 5 or math.sqrt(loss[5]) > 5:
images.append(cv2.cvtColor(imdepth, cv2.COLOR_BGR2RGB))
refs.append(prev_ref[0])
if len(images) == 28:
for i in range(len(images)):
plt.subplot(math.ceil(len(images) / 7), 7, i + 1), plt.imshow(images[i], "gray")
plt.title(refs[i][:7])
plt.xticks([]), plt.yticks([])
plt.show()
# plt.savefig("test.png", dpi=600)
input_str = input("One more?")
if input_str == "y":
images = []
refs = []
else:
break
@torch.no_grad()
def _test_plot(self, model, criterion, data_loader):
import torchvision
import matplotlib.pyplot as plt
from PIL import Image, ImageStat
import math
from constants import colors_upper, colors_lower
torch.manual_seed(1)
inv_normalize = T.Normalize(
mean=[
-COLOR_MEAN[0] / COLOR_STD[0],
-COLOR_MEAN[1] / COLOR_STD[1],
-COLOR_MEAN[2] / COLOR_STD[2],
],
std=[1 / COLOR_STD[0], 1 / COLOR_STD[1], 1 / COLOR_STD[2]],
)
model.eval()
images = []
refs = []
for i, data in enumerate(data_loader):
(
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
prev_poses,
next_poses,
action,
delta,
prev_ref,
next_ref,
action_start_ori,
action_end_ori,
binary_objs_total,
num_obj,
) = data
prev_color_img = prev_color_img.to(self.device, non_blocking=True)
prev_depth_img = prev_depth_img.to(self.device, non_blocking=True)
next_color_img = next_color_img.to(self.device, non_blocking=True)
next_depth_img = next_depth_img.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
prev_poses = prev_poses.to(self.device, non_blocking=True)
next_poses = next_poses.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
delta = delta.to(self.device, non_blocking=True)
action_start_ori = action_start_ori.to(self.device, non_blocking=True)
action_end_ori = action_end_ori.to(self.device, non_blocking=True)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
output = model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
target = delta
loss = criterion(output, target)
output = output[0].cpu().numpy()
target = target[0].cpu().numpy()
output_xy = []
output_a = []
for num_idx in range(num_obj):
output_xy.append([output[num_idx * 3], output[num_idx * 3 + 1]])
output_a.append(output[num_idx * 3 + 2])
print(i)
print(prev_ref[0])
print(next_ref[0])
np.set_printoptions(precision=3, suppress=True)
print("output", output)
print("target", target)
print("action", action_start_ori.cpu().numpy())
print("loss", loss.cpu().numpy())
loss = loss.cpu().numpy()[0]
# background
next_color_img = inv_normalize(next_color_img[0])
next_color_img = next_color_img.cpu().permute(1, 2, 0).numpy()
imnext = next_color_img
imnext[imnext < 0] = 0
imnext *= 255
imnext = imnext.astype(np.uint8)
imnext = cv2.cvtColor(imnext, cv2.COLOR_RGB2BGR)
prev_color_img = inv_normalize(prev_color_img[0])
prev_color_img = prev_color_img.cpu().permute(1, 2, 0).numpy()
imgcolor = prev_color_img
imgcolor[imgcolor < 0] = 0
imgcolor *= 255
imgcolor = imgcolor.astype(np.uint8)
imgcolor = cv2.GaussianBlur(imgcolor, (5, 5), 0)
imgcolorhsv = cv2.cvtColor(imgcolor, cv2.COLOR_RGB2HSV)
prev_poses = prev_poses[0].cpu().numpy()
next_poses = next_poses[0].cpu().numpy()
action_start_ori = action_start_ori[0].cpu().numpy()
action_end_ori = action_end_ori[0].cpu().numpy()
action_start_ori_tile = np.tile(action_start_ori, num_obj[0])
action = action[0].cpu().numpy()
action_start_tile = np.tile(action[:2], num_obj[0])
prev_poses += action_start_ori_tile
prev_poses -= action_start_tile
next_poses += action_start_ori_tile
next_poses -= action_start_tile
print("prev poses", prev_poses)
print("next poses", next_poses)
newimg = np.zeros_like(imnext)
for ci in range(num_obj):
color = cv2.inRange(imgcolorhsv, colors_lower[ci], colors_upper[ci])
if np.sum(color == 255) > 100:
points = np.argwhere(color == 255)
points = np.expand_dims(points, axis=0)
M = cv2.getRotationMatrix2D(
(prev_poses[ci * 2], prev_poses[ci * 2 + 1]), -output[ci * 3 + 2], 1
)
M[0, 2] += output[ci * 3]
M[1, 2] += output[ci * 3 + 1]
new_points = cv2.transform(points, M)
newimg[tuple(np.transpose(new_points))] = imgcolor[tuple(np.transpose(points))]
# action
cv2.arrowedLine(
imnext,
(action_start_ori[1], action_start_ori[0]),
(action_end_ori[1], action_end_ori[0]),
(255, 255, 255),
2,
tipLength=0.4,
)
cv2.arrowedLine(
imgcolor,
(action_start_ori[1], action_start_ori[0]),
(action_end_ori[1], action_end_ori[0]),
(255, 255, 255),
2,
tipLength=0.4,
)
newimg = cv2.medianBlur(newimg, 5)
newimg = cv2.cvtColor(newimg, cv2.COLOR_RGB2BGR)
newimg = cv2.addWeighted(newimg, 0.3, imnext, 0.7, 0)
images.append(imgcolor)
refs.append(prev_ref[0][3:7])
images.append(cv2.cvtColor(newimg, cv2.COLOR_BGR2RGB))
refs.append("prediction of " + str(prev_ref[0][3:7]))
if len(images) == 32:
for i in range(len(images)):
# cv2.imwrite(
# "figures/push-prediction-plot/" + refs[i] + ".png",
# cv2.cvtColor(images[i], cv2.COLOR_RGB2BGR),
# )
plt.subplot(math.ceil(len(images) / 8), 8, i + 1), plt.imshow(images[i], "gray")
plt.title(refs[i])
plt.xticks([]), plt.yticks([])
plt.show()
# plt.savefig('test.png', dpi=400)
input_str = input("One more?")
if input_str == "y":
images = []
refs = []
else:
break
@torch.no_grad()
def _verify_dataset(self, model, data_loader):
import torchvision
import matplotlib.pyplot as plt
from PIL import Image, ImageStat
import math
from constants import DEPTH_MEAN, DEPTH_STD
torch.manual_seed(1)
inv_normalize_color = T.Normalize(
mean=[
-COLOR_MEAN[0] / COLOR_STD[0],
-COLOR_MEAN[1] / COLOR_STD[1],
-COLOR_MEAN[2] / COLOR_STD[2],
],
std=[1 / COLOR_STD[0], 1 / COLOR_STD[1], 1 / COLOR_STD[2]],
)
inv_normalize_depth = T.Normalize(
mean=[-DEPTH_MEAN[0] / DEPTH_STD[0]], std=[1 / DEPTH_STD[0]]
)
model.eval()
for i, data in enumerate(data_loader):
(
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
prev_poses,
next_poses,
action,
delta,
prev_ref,
next_ref,
action_start_ori,
action_end_ori,
binary_objs_total,
num_obj,
) = data
binary_objs_total = binary_objs_total[0].numpy().astype(np.uint8)
num_obj = len(binary_objs_total)
for i in range(num_obj):
temp = binary_objs_total[i]
temp = cv2.cvtColor(temp, cv2.COLOR_RGB2BGR)
temp *= 255
temp = temp.astype(np.uint8)
cv2.imshow(str(i), temp)
np.set_printoptions(precision=3, suppress=True)
prev_poses = prev_poses[0].numpy().astype(int)
action = action[0].numpy().astype(int)
action_start_tile = np.tile(action[:2], num_obj)
print("prev poses", prev_poses)
img = inv_normalize_color(prev_color_img[0])
img = img.permute(1, 2, 0).numpy()
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img *= 255
img = img.astype(np.uint8)
cv2.imshow("prev color", img)
img = inv_normalize_depth(prev_depth_img[0])
img = img.permute(1, 2, 0).numpy()
cv2.imshow("prev depth", img)
img = used_binary_img[0, 0].numpy().astype(int)
img *= 255
img = img.astype(np.uint8)
for pi in range(num_obj):
cv2.circle(
img, (prev_poses[pi * 2 + 1], prev_poses[pi * 2]), 2, (120, 102, 255), -1
)
cv2.imshow("prev binary", img)
img = used_binary_img[0][1].numpy()
img *= 255
img = img.astype(np.uint8)
cv2.circle(img, (action[1], action[0]), 2, (120, 102, 255), -1)
cv2.circle(img, (action[3], action[2]), 2, (120, 102, 255), -1)
cv2.imshow("action", img)
img = inv_normalize_color(next_color_img[0])
img = img.permute(1, 2, 0).numpy()
img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
img *= 255
img = img.astype(np.uint8)
cv2.imshow("next color", img)
img = inv_normalize_depth(next_depth_img[0])
img = img.permute(1, 2, 0).numpy()
cv2.imshow("next depth", img)
action_start_ori = action_start_ori[0].numpy().astype(int)
action_end_ori = action_end_ori[0].numpy().astype(int)
action_start_ori_tile = np.tile(action_start_ori, num_obj)
prev_imdepth = prev_depth_img[0].cpu().permute(1, 2, 0).numpy()
prev_imdepth[prev_imdepth <= 0] = 0
prev_imdepth[prev_imdepth > 0] = 255
prev_imdepth = np.repeat(prev_imdepth, 3, axis=2)
prev_imdepth = prev_imdepth.astype(np.uint8)
cv2.circle(prev_imdepth, (action_start_ori[1], action_start_ori[0]), 5, (255, 0, 0), -1)
cv2.circle(prev_imdepth, (action_end_ori[1], action_end_ori[0]), 5, (0, 0, 255), -1)
prev_poses += action_start_ori_tile
prev_poses -= action_start_tile
for pi in range(num_obj):
cv2.circle(
prev_imdepth, (prev_poses[pi * 2 + 1], prev_poses[pi * 2]), 2, (255, 0, 255), -1
)
print("prev poses", prev_poses)
next_imdepth = next_depth_img[0].cpu().permute(1, 2, 0).numpy()
next_imdepth[next_imdepth <= 0] = 0
next_imdepth[next_imdepth > 0] = 255
next_imdepth = np.repeat(next_imdepth, 3, axis=2)
next_imdepth = next_imdepth.astype(np.uint8)
cv2.circle(next_imdepth, (action_start_ori[1], action_start_ori[0]), 5, (255, 0, 0), -1)
cv2.circle(next_imdepth, (action_end_ori[1], action_end_ori[0]), 5, (0, 0, 255), -1)
next_poses = next_poses[0].numpy().astype(int)
next_poses += action_start_ori_tile
next_poses -= action_start_tile
for pi in range(num_obj):
cv2.circle(
next_imdepth, (next_poses[pi * 2 + 1], next_poses[pi * 2]), 2, (255, 255, 0), -1
)
print("next poses", next_poses)
delta = delta[0].numpy()
print("delta", delta)
cv2.imshow("prev imdepth", prev_imdepth)
cv2.imshow("next imdepth", next_imdepth)
k = cv2.waitKey(0)
cv2.destroyAllWindows()
if k == ord("q"): # ESC
break
@torch.no_grad()
def _symmetric_diff(self, model, criterion, data_loader):
import matplotlib.pyplot as plt
from matplotlib.ticker import PercentFormatter
from constants import (
colors_upper,
colors_lower,
)
inv_normalize = T.Normalize(
mean=[
-COLOR_MEAN[0] / COLOR_STD[0],
-COLOR_MEAN[1] / COLOR_STD[1],
-COLOR_MEAN[2] / COLOR_STD[2],
],
std=[1 / COLOR_STD[0], 1 / COLOR_STD[1], 1 / COLOR_STD[2]],
)
model.eval()
total_symmetric_difference = []
total_area = []
total_num = len(data_loader.dataset)
print(total_num)
for i, data in enumerate(data_loader):
(
prev_color_img,
prev_depth_img,
next_color_img,
next_depth_img,
used_binary_img,
prev_poses,
next_poses,
action,
delta,
prev_ref,
next_ref,
action_start_ori,
action_end_ori,
binary_objs_total,
num_obj,
) = data
prev_color_img = prev_color_img.to(self.device, non_blocking=True)
prev_depth_img = prev_depth_img.to(self.device, non_blocking=True)
next_color_img = next_color_img.to(self.device, non_blocking=True)
next_depth_img = next_depth_img.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
prev_poses = prev_poses.to(self.device, non_blocking=True)
next_poses = next_poses.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
delta = delta.to(self.device, non_blocking=True)
action_start_ori = action_start_ori.to(self.device, non_blocking=True)
action_end_ori = action_end_ori.to(self.device, non_blocking=True)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
output = model(
prev_poses, action, used_binary_img, binary_objs_total, num_obj[0]
) # output = model(action, prev_poses)
target = delta
loss = criterion(output, target)
output = output[0].cpu().numpy()
target = target[0].cpu().numpy()
num_obj = num_obj[0].cpu().item()
output_xy = []
output_a = []
for num_idx in range(num_obj):
output_xy.append([output[num_idx * 3], output[num_idx * 3 + 1]])
output_a.append(output[num_idx * 3 + 2])
# no move
# output_xy.append([0, 0])
# output_a.append(0)
# move in the direction as the action
# if num_idx == 0:
# output_xy.append([(args.distance / 0.2), 0])
# output_a.append(0)
# else:
# output_xy.append([0, 0])
# output_a.append(0)
print(i)
print(prev_ref[0])
# print('output', output_x_y1.numpy(), output_a1.numpy(), output_x_y2.numpy(), output_a2.numpy())
# print('target', target.numpy())
# print('action', action_start_ori.cpu().numpy())
# print('loss', loss.cpu().numpy())
# ===== symmetric difference =====
prev_poses = prev_poses[0].cpu().numpy().astype(int)
action_start_ori = action_start_ori[0].cpu().numpy().astype(int)
action_start_ori_tile = np.tile(action_start_ori, num_obj)
action = action[0].cpu().numpy().astype(int)
action_start_tile = np.tile(action[:2], num_obj)
prev_poses += action_start_ori_tile
prev_poses -= action_start_tile
next_img = next_depth_img[0].cpu().permute(1, 2, 0).squeeze().numpy()
pred_img_colors = [np.zeros((320, 320), dtype=np.uint8) for i in range(num_obj)]
prev_color_img = inv_normalize(prev_color_img[0])
prev_color_img = prev_color_img.cpu().permute(1, 2, 0).numpy()
imgcolor = prev_color_img
imgcolor *= 255
imgcolor = imgcolor.astype(np.uint8)
imgcolor = cv2.medianBlur(imgcolor, 5)
imgcolor = cv2.cvtColor(imgcolor, cv2.COLOR_RGB2HSV)
# prediction
for ci in range(num_obj):
color = cv2.inRange(imgcolor, colors_lower[ci], colors_upper[ci])
points = np.argwhere(color == 255)
points = np.expand_dims(points, axis=0)
M = cv2.getRotationMatrix2D(
(prev_poses[ci * 2], prev_poses[ci * 2 + 1]), -output_a[ci], 1
)
M[0, 2] += output_xy[ci][0]
M[1, 2] += output_xy[ci][1]
points = cv2.transform(points, M)
points[0, :, 0] += 48
points[0, :, 1] += 48
pred_img_colors[ci][tuple(np.transpose(points[0]))] = 255
pred_img_colors[ci] = pred_img_colors[ci][48 : (320 - 48), 48 : (320 - 48)]
pred_img_colors[ci] = cv2.medianBlur(pred_img_colors[ci], 5)
# ground truth
next_color_img = inv_normalize(next_color_img[0])
next_color_img = next_color_img.cpu().permute(1, 2, 0).numpy()
next_img_color = next_color_img
next_img_color[next_img_color < 0] = 0
next_img_color *= 255
next_img_color = next_img_color.astype(np.uint8)
imgcolor = cv2.cvtColor(next_img_color, cv2.COLOR_RGB2HSV)
next_img_colors = []
for ci in range(num_obj):
next_img_color = cv2.inRange(imgcolor, colors_lower[ci], colors_upper[ci])
next_img_colors.append(next_img_color)
total_area.append(np.sum(next_img_color == 255))
# intersection
for ci in range(num_obj):
intersection_color = np.zeros_like(next_img)
intersection_color[
np.logical_and(pred_img_colors[ci] == 255, next_img_colors[ci] == 255)
] = 255
union_color = np.zeros_like(next_img)
union_color[
np.logical_or(pred_img_colors[ci] == 255, next_img_colors[ci] == 255)
] = 255
diff_color = union_color - intersection_color
total_symmetric_difference.append(np.sum(diff_color == 255))
print(np.average(total_area))
print(np.std(total_area))
diff_union = np.array(total_symmetric_difference) / np.array(total_area)
print(np.average(diff_union))
print(np.std(diff_union))
np.savetxt("test.txt", diff_union)
plt.hist(diff_union, weights=np.ones(len(diff_union)) / len(diff_union), range=(0, 2))
plt.gca().yaxis.set_major_formatter(PercentFormatter(1))
# plt.show()
plt.savefig("test.png")
def cart2pol(x, y):
theta = np.arctan2(y, x)
rho = np.hypot(x, y)
return theta, rho
def pol2cart(theta, rho):
x = rho * np.cos(theta)
y = rho * np.sin(theta)
return x, y
def rotate_contour(cnt, angle):
M = cv2.moments(cnt)
cx = int(round(M["m10"] / M["m00"]))
cy = int(round(M["m01"] / M["m00"]))
cnt_norm = cnt - [cx, cy]
coordinates = cnt_norm[:, 0, :]
xs, ys = coordinates[:, 0], coordinates[:, 1]
thetas, rhos = cart2pol(xs, ys)
thetas = np.rad2deg(thetas)
thetas = (thetas + angle) % 360
# thetas = thetas + angle
thetas = np.deg2rad(thetas)
xs, ys = pol2cart(thetas, rhos)
cnt_norm[:, 0, 0] = xs
cnt_norm[:, 0, 1] = ys
cnt_rotated = cnt_norm + [cx, cy]
return cnt_rotated
if __name__ == "__main__":
args = parse_args()
trainer = PushPredictionTrainer(args)
trainer.main()
| 44,879 | 38.402985 | 170 |
py
|
more
|
more-main/generate_hard_cases.py
|
""" Han """
import numpy as np
from numpy.core.fromnumeric import shape
from shapely.geometry import Point, Polygon, LineString, MultiLineString
import matplotlib.pyplot as plt
from signal import signal, SIGINT
from constants import WORKSPACE_LIMITS, PUSH_DISTANCE
def handler(signal_received, frame):
# Handle any cleanup here
print("SIGINT or CTRL-C detected. Exiting gracefully")
exit(0)
""" Parameters to specify """
POLYGONS = {
"concave.urdf": [(0.045, -0.0225), (0.045, 0.0225), (-0.045, 0.0225), (-0.045, -0.0225)],
"triangle.urdf": [(0.0225, -0.045), (0.0225, 0.045), (-0.0225, 0)],
"cube.urdf": [(0.0225, -0.0225), (0.0225, 0.0225), (-0.0225, 0.0225), (-0.0225, -0.0225)],
"half-cube.urdf": [(0.01125, -0.0225), (0.01125, 0.0225), (-0.01125, 0.0225), (-0.01125, -0.0225)],
"rect.urdf": [(0.0225, -0.045), (0.0225, 0.045), (-0.0225, 0.045), (-0.0225, -0.045)],
"cylinder.urdf": [(0.022, -0.022), (0.022, 0.022), (-0.022, 0.022), (-0.022, -0.022)],
'concave.urdf cylinder.urdf concave.urdf': [(0.045, -0.05), (0.045, 0.05), (-0.045, 0.05), (-0.045, -0.05)],
# "concave.urdf": [(0.0225, -0.045), (0.0225, 0.045), (-0.0225, 0.045), (-0.0225, -0.045)],
# "triangle.urdf": [(0.045, 0.0225), (-0.045, 0.0225), (0.0, -0.0225)],
# "cube.urdf": [(0.0225, -0.0225), (0.0225, 0.0225), (-0.0225, 0.0225), (-0.0225, -0.0225)],
# "half-cube.urdf": [(0.0225, -0.01125), (0.0225, 0.01125), (-0.0225, 0.01125), (-0.0225, -0.01125)],
# "rect.urdf": [(0.045, -0.0225), (0.045, 0.0225), (-0.045, 0.0225), (-0.045, -0.0225)],
# "cylinder.urdf": [(0.022, -0.022), (0.022, 0.022), (-0.022, 0.022), (-0.022, -0.022)],
# 'concave.urdf cylinder.urdf concave.urdf': [(0.05, -0.045), (0.05, 0.045), (-0.05, 0.045), (-0.05, -0.045)],
# "half-cylinder.urdf": [(0.022, -0.011), (0.022, 0.011), (-0.022, 0.011), (-0.022, -0.011)],
}
CENTERS = {
"concave.urdf": (0.0, 0.0),
"triangle.urdf": (0.0, 0.0),
"cube.urdf": (0.0, 0.0),
"half-cube.urdf": (0.0, 0.0),
"rect.urdf": (0.0, 0.0),
"cylinder.urdf": (0.0, 0.0),
'concave.urdf cylinder.urdf concave.urdf': (0.0, 0.0),
# "half-cylinder.urdf": (0.0, 0.0),
}
heights = {
"concave.urdf": 0.024,
"triangle.urdf": 0.024,
"cube.urdf": 0.024,
"half-cube.urdf": 0.024,
"rect.urdf": 0.024,
"cylinder.urdf": 0.024,
'concave.urdf cylinder.urdf concave.urdf': 0.024,
# "half-cylinder.urdf": 0.024,
}
color_space = (
np.asarray(
[
[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167], # pink
]
)
/ 255.0
)
def generate_unit_scenario(shapes: list) -> np.ndarray:
"""Randomly generate challenging VPG test scenarios.
Params:
shapes (list): shapes of objects in the generated scene.
Return:
np.ndarray: each row represents the 2d pose for a shape.
"""
# Find polygons of all shapes
polys = [np.array(POLYGONS[s], dtype=np.float64) for s in shapes]
centers = [np.array(CENTERS[s], dtype=np.float64) for s in shapes]
configs = [[centers[0][0], centers[0][1], 0]]
# We start with an initial shape and build up
meta_poly = Polygon(polys[0])
# Iterate through all polygons and add them to meta_poly
for j, p in enumerate(polys):
if j == 0:
continue
# Randomly find an edge on meta_poly to attach polygon
coords = np.transpose(meta_poly.exterior.coords.xy)
matched = False
count = 0
while not matched:
count += 1
if count > 100:
return None
# Looking for an edge
index = np.random.randint(0, len(coords) - 1)
start_pt = coords[index]
if j > 0 and np.random.uniform(0, 1) > 0.7:
start_next_pt = coords[index + 1]
ratio = 0.5
start_pt = np.array(
[
start_pt[0] + ratio * (start_next_pt[0] - start_pt[0]),
start_pt[1] + ratio * (start_next_pt[1] - start_pt[1]),
]
)
# Looking for an arbitrary rotation
# angle = np.random.randint(0, 4) * np.pi / 2
angle = np.random.randint(0, 2) * np.pi - np.pi
rotation_matrix = np.matrix(
[[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]
)
# Transitions to match the new polygon to the existing shape
center = np.copy(centers[j])
poly = np.copy(p)
center -= poly[0]
poly -= poly[0]
for i in range(len(poly)):
poly[i] = np.dot(rotation_matrix, np.transpose(poly[i]))
center = np.dot(rotation_matrix, np.transpose(center))
pt = np.random.randint(poly.shape[0])
center -= poly[pt]
poly -= poly[pt]
poly += start_pt
center += start_pt
if (
center[0, 0] + 0.5 < WORKSPACE_LIMITS[0][0] + PUSH_DISTANCE
or center[0, 0] + 0.5 > WORKSPACE_LIMITS[0][1] - PUSH_DISTANCE
or center[0, 1] < WORKSPACE_LIMITS[1][0] + PUSH_DISTANCE
or center[0, 1] > WORKSPACE_LIMITS[1][1] - PUSH_DISTANCE
):
continue
# Check if the generated pose suggests a hard case
suggested_poly = Polygon(poly)
if meta_poly.intersects(suggested_poly):
if (
type(meta_poly.intersection(suggested_poly)) is Polygon
and meta_poly.intersection(suggested_poly).area < 1e-15
):
meta_poly = meta_poly.union(suggested_poly)
configs.append([center[0, 0], center[0, 1], angle])
break
if meta_poly.touches(suggested_poly):
if (
type(meta_poly.intersection(suggested_poly)) is not Point
and meta_poly.intersection(suggested_poly).area < 1e-8
):
meta_poly = meta_poly.union(suggested_poly)
configs.append([center[0, 0], center[0, 1], angle])
break
# Finally, a random rotation for all objects
def my_rotate(origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
return [qx, qy]
angle = np.random.uniform(-np.pi, np.pi)
configs = [
my_rotate((0, 0), (config[0], config[1]), angle) + [config[2] + angle] for config in configs
]
# fig, ax = plt.subplots()
# ax.plot(*meta_poly.exterior.xy)
# ax.plot(*(np.transpose(configs)[:2]), "o")
# ax.set_aspect(1)
# plt.show()
return configs
def generate(shape_list, num_scenarios, num_shapes_min, num_shapes_max, color_space):
"""Randomly generate challenging VPG test scenarios. Output to txt.
Params:
shape_list (list): all available shapes.
num_scenarios (int): number of scenarios to be generated.
num_shapes_min, num_shapes_max: the range of number of objects in a scenario
"""
np.random.seed(121119)
num_generated = 0
while num_generated < num_scenarios:
print(num_generated)
num_objects = np.random.randint(num_shapes_min, num_shapes_max + 1)
selected_objects = np.random.choice(shape_list, size=num_objects)
num_combined = np.sum(selected_objects == 'concave.urdf cylinder.urdf concave.urdf')
# if 'concave.urdf cylinder.urdf concave.urdf' in selected_objects:
# idx = np.argwhere(selected_objects == 'concave.urdf cylinder.urdf concave.urdf')
# temp = selected_objects[0]
# selected_objects[0] = 'concave.urdf cylinder.urdf concave.urdf'
# selected_objects[idx] = temp
# else:
# continue
if num_objects + num_combined * 2 > 12:
continue
try:
configs = generate_unit_scenario(selected_objects)
if configs is not None:
configs = [[round(c, 6) for c in config] for config in configs]
with open("hard-cases/" + f"{num_generated:06d}" + ".txt", "w") as out_file:
# color_idx = np.random.choice(len(color_space), 2)
# color_idx = list(color_idx) * num_objects
for i, obj in enumerate(selected_objects):
# color = color_space[color_idx[i]]
if i % len(color_space) == 0 and i != 0:
color = color_space[np.random.randint(1, 10)]
else:
color = color_space[i % len(color_space)]
if obj == 'concave.urdf cylinder.urdf concave.urdf':
print(obj)
out_file.write(
"%s %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e\n"
% (
"cylinder.urdf",
color[0],
color[1],
color[2],
configs[i][0] + 0.5,
configs[i][1],
heights[obj],
0,
0,
configs[i][2],
)
)
color = color_space[np.random.randint(1, 10)]
out_file.write(
"%s %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e\n"
% (
"concave.urdf",
color[0],
color[1],
color[2],
configs[i][0] + 0.5 + np.cos(configs[i][2]) * 0.024,
configs[i][1] + np.sin(configs[i][2]) * 0.024,
heights[obj],
0,
0,
configs[i][2] + np.pi / 2,
)
)
color = color_space[np.random.randint(1, 10)]
out_file.write(
"%s %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e\n"
% (
"concave.urdf",
color[0],
color[1],
color[2],
configs[i][0] + 0.5 - np.cos(configs[i][2]) * 0.024,
configs[i][1] - np.sin(configs[i][2]) * 0.024,
heights[obj],
0,
0,
configs[i][2] - np.pi / 2,
)
)
else:
out_file.write(
"%s %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e %.18e\n"
% (
obj,
color[0],
color[1],
color[2],
configs[i][0] + 0.5,
configs[i][1],
heights[obj],
0,
0,
configs[i][2],
)
)
num_generated += 1
except Exception as e:
print(e, "?")
continue
if __name__ == "__main__":
signal(SIGINT, handler)
shapes = [x for x in POLYGONS.keys() if "concave.urdf cylinder.urdf concave.urdf" not in x and "concave" not in x and "cylinder" not in x] * 6
shapes.extend([x for x in POLYGONS.keys() if "concave" in x and "concave.urdf cylinder.urdf concave.urdf" not in x] * 4)
shapes.extend([x for x in POLYGONS.keys() if "cylinder" in x and "concave.urdf cylinder.urdf concave.urdf" not in x] * 2)
shapes.extend([x for x in POLYGONS.keys() if "concave.urdf cylinder.urdf concave.urdf" in x] * 1)
print(shapes)
generate(shapes, 3000, 5, 12, color_space)
| 13,360 | 43.83557 | 146 |
py
|
more
|
more-main/torch_utils.py
|
import torch
import torch.distributed as dist
def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor):
"""
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
def f(x):
if x >= warmup_iters:
return 1
alpha = float(x) / warmup_iters
return warmup_factor * (1 - alpha) + alpha
return torch.optim.lr_scheduler.LambdaLR(optimizer, f)
def collate_fn(batch):
new_batch = list(filter(lambda b: b[1]["num_obj"].item() > 0, batch))
return tuple(zip(*new_batch))
def is_dist_avail_and_initialized():
"""
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
| 1,845 | 27.4 | 80 |
py
|
more
|
more-main/log_utils.py
|
from collections import defaultdict, deque
import datetime
import time
import logging
from termcolor import colored
import sys
import os
import torch
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
https://github.com/pytorch/vision/blob/master/references/detection/utils.py
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} (global_avg: {global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median, avg=self.avg, global_avg=self.global_avg, max=self.max, value=self.value
)
class MetricLogger(object):
"""https://github.com/pytorch/vision/blob/master/references/segmentation/utils.py"""
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, logger, header=None):
i = 0
if not header:
header = ""
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt="{avg:.4f}")
data_time = SmoothedValue(fmt="{avg:.4f}")
space_fmt = ":" + str(len(str(len(iterable)))) + "d"
if torch.cuda.is_available():
log_msg = self.delimiter.join(
[
header,
"[{0" + space_fmt + "}/{1}]",
"eta: {eta}",
"{meters}",
"time: {time}",
"data: {data}",
"max mem: {memory:.0f}",
]
)
else:
log_msg = self.delimiter.join(
[header, "[{0" + space_fmt + "}/{1}]", "eta: {eta}", "{meters}", "time: {time}", "data: {data}"]
)
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == (len(iterable) - 1):
eta_seconds = iter_time.avg * (len(iterable) - 1 - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
logger.info(
log_msg.format(
i,
len(iterable) - 1,
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB,
)
)
else:
logger.info(
log_msg.format(
i,
len(iterable) - 1,
eta=eta_string,
meters=str(self),
time=str(iter_time),
data=str(data_time),
)
)
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info("{} Total time: {}".format(header, total_time_str))
class _ColorfulFormatter(logging.Formatter):
"""https://github.com/facebookresearch/detectron2/blob/299c4b0dbab6fe5fb81d3870636cfd86fc334447/detectron2/utils/logger.py"""
def __init__(self, *args, **kwargs):
self._root_name = kwargs.pop("root_name") + "."
super().__init__(*args, **kwargs)
def formatMessage(self, record):
log = super().formatMessage(record)
if record.levelno == logging.WARNING:
prefix = colored("WARNING", "red", attrs=["blink"])
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
prefix = colored("ERROR", "red", attrs=["blink", "underline"])
elif record.levelno == logging.DEBUG:
prefix = colored("DEBUG", "grey")
else:
return log
return prefix + " " + log
def setup_logger(output_dir=None, name="Training"):
"""https://github.com/facebookresearch/detectron2/blob/299c4b0dbab6fe5fb81d3870636cfd86fc334447/detectron2/utils/logger.py"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
plain_formatter = logging.Formatter("[%(asctime)s] %(name)s %(levelname)s: %(message)s", datefmt="%m/%d %H:%M:%S")
color_formatter = _ColorfulFormatter(
colored("[%(asctime)s %(name)s]: ", "green") + "%(message)s",
datefmt="%m/%d %H:%M:%S",
root_name=name,
)
# stdout logging
ch = logging.StreamHandler(stream=sys.stdout)
ch.setFormatter(color_formatter)
logger.addHandler(ch)
# file logging
if output_dir is not None:
filename = os.path.join(output_dir, "log.txt")
fh = logging.FileHandler(filename)
fh.setFormatter(plain_formatter)
logger.addHandler(fh)
return logger
| 6,646 | 32.741117 | 129 |
py
|
more
|
more-main/range_detector.py
|
import cv2
import argparse
from operator import xor
def callback(value):
pass
def setup_trackbars(range_filter):
cv2.namedWindow("Trackbars", 0)
for i in ["MIN", "MAX"]:
v = 0 if i == "MIN" else 255
for j in range_filter:
if j == "H":
cv2.createTrackbar("%s_%s" % (j, i), "Trackbars", v, 179, callback)
else:
cv2.createTrackbar("%s_%s" % (j, i), "Trackbars", v, 255, callback)
def get_arguments():
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--filter", required=True, help="Range filter. RGB or HSV")
ap.add_argument("-i", "--image", required=False, help="Path to the image")
ap.add_argument("-w", "--webcam", required=False, help="Use webcam", action="store_true")
ap.add_argument(
"-p",
"--preview",
required=False,
help="Show a preview of the image after applying the mask",
action="store_true",
)
args = vars(ap.parse_args())
if not xor(bool(args["image"]), bool(args["webcam"])):
ap.error("Please specify only one image source")
if not args["filter"].upper() in ["RGB", "HSV"]:
ap.error("Please speciy a correct filter.")
return args
def get_trackbar_values(range_filter):
values = []
for i in ["MIN", "MAX"]:
for j in range_filter:
v = cv2.getTrackbarPos("%s_%s" % (j, i), "Trackbars")
values.append(v)
return values
def main():
args = get_arguments()
range_filter = args["filter"].upper()
if args["image"]:
print(args["image"])
image = cv2.imread(args["image"])
if range_filter == "RGB":
frame_to_thresh = image.copy()
else:
frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
else:
camera = cv2.VideoCapture(0)
setup_trackbars(range_filter)
while True:
if args["webcam"]:
ret, image = camera.read()
if not ret:
break
if range_filter == "RGB":
frame_to_thresh = image.copy()
else:
frame_to_thresh = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
v1_min, v2_min, v3_min, v1_max, v2_max, v3_max = get_trackbar_values(range_filter)
thresh = cv2.inRange(frame_to_thresh, (v1_min, v2_min, v3_min), (v1_max, v2_max, v3_max))
if args["preview"]:
preview = cv2.bitwise_and(image, image, mask=thresh)
cv2.imshow("Preview", preview)
else:
cv2.imshow("Original", image)
cv2.imshow("Thresh", thresh)
if cv2.waitKey(1) & 0xFF is ord("q"):
break
if __name__ == "__main__":
main()
| 2,729 | 25.504854 | 97 |
py
|
more
|
more-main/action_utils_mask.py
|
import cv2
import imutils
import math
import random
from constants import (
GRIPPER_PUSH_ADD_PIXEL,
colors_lower,
colors_upper,
IMAGE_PAD_SIZE,
IMAGE_SIZE,
IMAGE_PAD_WIDTH,
PUSH_DISTANCE,
GRIPPER_PUSH_RADIUS_PIXEL,
PIXEL_SIZE,
DEPTH_MIN,
IMAGE_SIZE,
CONSECUTIVE_DISTANCE_THRESHOLD,
IMAGE_PAD_WIDTH,
PUSH_BUFFER,
IMAGE_PAD_DIFF,
GRIPPER_GRASP_WIDTH_PIXEL,
)
import numpy as np
import torch
from dataset import PushPredictionMultiDatasetEvaluation
from push_net import PushPredictionNet
from train_maskrcnn import get_model_instance_segmentation
from torchvision.transforms import functional as TF
import copy
import utils
class Predictor:
"""
Predict and generate images after push actions.
Assume the color image and depth image are well matched.
We use the masks to generate new images, so the quality of mask is important.
The input to this forward function should be returned from the sample_actions.
"""
def __init__(self, snapshot):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
push_model = PushPredictionNet()
state = torch.load(snapshot)["model"]
push_model.load_state_dict(state)
self.push_model = push_model.to(self.device)
self.push_model.eval()
# only rotated_color_image, rotated_depth_image are padding to 320x320
@torch.no_grad()
def forward(
self,
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
plot=False,
):
# get data
dataset = PushPredictionMultiDatasetEvaluation(
rotated_depth_image, rotated_action, rotated_center, rotated_binary_objs
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=len(rotated_depth_image), shuffle=False, num_workers=0
)
(
prev_poses,
action,
action_start_ori,
action_end_ori,
used_binary_img,
binary_objs_total,
num_obj,
) = next(iter(data_loader))
prev_poses = prev_poses.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
# get output
output = self.push_model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
output = output.cpu().numpy()
# generate new images
prev_poses_input = prev_poses.cpu().numpy().astype(int)
prev_poses = copy.deepcopy(prev_poses_input)
action_start_ori = action_start_ori.numpy().astype(int)
action_end_ori = action_end_ori.numpy().astype(int)
action_start_ori_tile = np.tile(action_start_ori, num_obj[0])
action_start = action[:, :2].cpu().numpy().astype(int)
action_start_tile = np.tile(action_start, num_obj[0])
generated_color_images = []
generated_depth_images = []
validations = []
for i in range(len(rotated_depth_image)):
i_output = output[i]
i_prev_poses = prev_poses[i]
i_action_start_ori_tile = action_start_ori_tile[i]
i_action_start_tile = action_start_tile[i]
i_prev_poses += i_action_start_ori_tile
i_prev_poses -= i_action_start_tile
i_rotated_angle = rotated_angle[i]
i_rotated_mask_objs = rotated_mask_objs[i]
color_image = rotated_color_image[i]
depth_image = rotated_depth_image[i]
# transform points and fill them into a black image
generated_color_image = np.zeros_like(color_image)
generated_depth_image = np.zeros_like(depth_image)
post_points_pad = []
post_new_points_pad = []
# for each object
valid = True
for pi in range(num_obj[i]):
# if the object is out of the boundary, then, we can skip this action
center = [
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
]
center = np.array([[center]])
M = cv2.getRotationMatrix2D(
(
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
),
-i_output[pi * 3 + 2],
1,
)
ori_M = M.copy()
M[0, 2] += i_output[pi * 3]
M[1, 2] += i_output[pi * 3 + 1]
new_center = cv2.transform(center, M)
new_center = np.transpose(new_center[0])
ori_center = cv2.transform(center, ori_M)
ori_center = np.transpose(ori_center[0])
M = cv2.getRotationMatrix2D(
(IMAGE_PAD_SIZE // 2, IMAGE_PAD_SIZE // 2), i_rotated_angle, 1,
)
new_center = [new_center[0][0], new_center[1][0]]
new_center = np.array([[new_center]])
new_center = cv2.transform(new_center, M)[0][0]
ori_center = [ori_center[0][0], ori_center[1][0]]
ori_center = np.array([[ori_center]])
ori_center = cv2.transform(ori_center, M)[0][0]
if (
new_center[1] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
if not (
ori_center[1] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
valid = False
break
if valid:
for pi in range(num_obj[i]):
# # if the object is out of the boundary, then, we can skip this action
# if (
# i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1] < PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3] < PUSH_BUFFER / PIXEL_SIZE
# ):
# valid = False
# break
# find out transformation
mask = i_rotated_mask_objs[pi]
points = np.argwhere(mask == 255)
points = np.expand_dims(points, axis=0)
M = cv2.getRotationMatrix2D(
(
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
),
-i_output[pi * 3 + 2],
1,
)
M[0, 2] += i_output[pi * 3]
M[1, 2] += i_output[pi * 3 + 1]
new_points = cv2.transform(points, M)
post_points_pad.append(list(np.transpose(points[0])))
post_new_points_pad.append(list(np.transpose(new_points[0])))
validations.append(valid)
if valid:
for pi in range(num_obj[i]):
post_new_points_pad[pi] = (
np.clip(post_new_points_pad[pi][0], 0, IMAGE_PAD_SIZE - 1),
np.clip(post_new_points_pad[pi][1], 0, IMAGE_PAD_SIZE - 1),
)
post_points_pad[pi] = (
np.clip(post_points_pad[pi][0], 0, IMAGE_PAD_SIZE - 1),
np.clip(post_points_pad[pi][1], 0, IMAGE_PAD_SIZE - 1),
)
generated_color_image[post_new_points_pad[pi]] = color_image[
post_points_pad[pi]
]
generated_depth_image[post_new_points_pad[pi]] = depth_image[
post_points_pad[pi]
]
if plot:
cv2.circle(
generated_color_image,
(i_prev_poses[pi * 2 + 1] + 48, i_prev_poses[pi * 2] + 48),
3,
(255, 255, 255),
-1,
)
if plot:
cv2.arrowedLine(
generated_color_image,
(action_start_ori[i][1] + 48, action_start_ori[i][0] + 48),
(action_end_ori[i][1] + 48, action_end_ori[i][0] + 48),
(255, 0, 255),
2,
tipLength=0.4,
)
generated_color_image = utils.rotate(generated_color_image, angle=-i_rotated_angle)
generated_depth_image = utils.rotate(generated_depth_image, angle=-i_rotated_angle)
generated_color_image = generated_color_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, :
]
generated_depth_image = generated_depth_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
]
generated_color_image = cv2.medianBlur(generated_color_image, 5)
generated_depth_image = generated_depth_image.astype(np.float32)
generated_depth_image = cv2.medianBlur(generated_depth_image, 5)
generated_color_images.append(generated_color_image)
generated_depth_images.append(generated_depth_image)
return generated_color_images, generated_depth_images, validations
def get_sign_line(pose0, pose1, pose2):
"""
Line is from pose1 to pose2.
if value > 0, pose0 is on the left side of the line.
if value = 0, pose0 is on the same line.
if value < 0, pose0 is on the right side of the line.
"""
return (pose2[0] - pose1[0]) * (pose0[1] - pose1[1]) - (pose0[0] - pose1[0]) * (
pose2[1] - pose1[1]
)
def distance_to_line(pose0, pose1, pose2):
"""
Line is from pose1 to pose2.
"""
return abs(
(pose2[0] - pose1[0]) * (pose1[1] - pose0[1])
- (pose1[0] - pose0[0]) * (pose2[1] - pose1[1])
) / math.sqrt((pose2[0] - pose1[0]) ** 2 + (pose2[1] - pose1[1]) ** 2)
def adjust_push_start_point(
pose0, pose1, contour, distance=GRIPPER_PUSH_RADIUS_PIXEL, add_distance=GRIPPER_PUSH_ADD_PIXEL,
):
"""
Give two points, find the most left and right point on the contour within a given range based on pose1->pose0.
So the push will not collide with the contour
pose0: the center of contour
pose1: the point on the contour
"""
r = math.sqrt((pose1[0] - pose0[0]) ** 2 + (pose1[1] - pose0[1]) ** 2)
dx = round(distance / r * (pose0[1] - pose1[1]))
dy = round(distance / r * (pose1[0] - pose0[0]))
pose2 = (pose0[0] + dx, pose0[1] + dy)
pose3 = (pose1[0] + dx, pose1[1] + dy)
pose4 = (pose0[0] - dx, pose0[1] - dy)
pose5 = (pose1[0] - dx, pose1[1] - dy)
pose1_sign23 = get_sign_line(pose1, pose2, pose3)
pose1_sign45 = get_sign_line(pose1, pose4, pose5)
assert pose1_sign23 * pose1_sign45 < 0
center_distance = distance_to_line(pose1, pose2, pose4)
max_distance = 0
for p in range(0, len(contour)):
test_pose = contour[p][0]
test_pose_sign23 = get_sign_line(test_pose, pose2, pose3)
test_pose_sign45 = get_sign_line(test_pose, pose4, pose5)
# in the range, between two lines
if pose1_sign23 * test_pose_sign23 >= 0 and pose1_sign45 * test_pose_sign45 >= 0:
# is far enough
test_center_distance = distance_to_line(test_pose, pose2, pose4)
if test_center_distance >= center_distance:
# in the correct side
test_edge_distance = distance_to_line(test_pose, pose3, pose5)
if test_edge_distance < test_center_distance:
if test_center_distance > max_distance:
max_distance = test_center_distance
diff_distance = abs(max_distance - center_distance)
return math.ceil(diff_distance) + add_distance
def get_orientation(pts):
sz = len(pts)
data_pts = np.empty((sz, 2), dtype=np.float64)
for i in range(data_pts.shape[0]):
data_pts[i, 0] = pts[i, 0, 0]
data_pts[i, 1] = pts[i, 0, 1]
# Perform PCA analysis
mean = np.empty((0))
mean, eigenvectors, eigenvalues = cv2.PCACompute2(data_pts, mean)
angle = math.atan2(eigenvectors[0, 1], eigenvectors[0, 0]) # orientation in radians
return angle
def is_close(prev_pose, this_pose):
dis = math.sqrt((this_pose[0] - prev_pose[0]) ** 2 + (this_pose[1] - prev_pose[1]) ** 2)
if dis < CONSECUTIVE_DISTANCE_THRESHOLD / PIXEL_SIZE:
return True
return False
def close_distance(prev_pose, this_pose):
dis = math.sqrt((this_pose[0] - prev_pose[0]) ** 2 + (this_pose[1] - prev_pose[1]) ** 2)
return dis
def sample_actions(
color_image,
depth_image,
mask_objs,
plot=False,
start_pose=None,
from_color=False,
prev_move=None,
):
"""
Sample actions around the objects, from the boundary to the center.
Assume there is no object in "black"
Output the rotated image, such that the push action is from left to right
"""
gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
gray = gray.astype(np.uint8)
if plot:
plot_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
blurred = cv2.medianBlur(gray, 5)
thresh = cv2.threshold(blurred, 20, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# if the mask is in color format
if from_color:
ori_mask_objs = mask_objs[0]
ori_mask_obj_centers = mask_objs[1]
new_mask_objs = []
for idx, mask in enumerate(mask_objs[0]):
center = ori_mask_obj_centers[idx]
new_mask = np.copy(mask[0])
new_mask = new_mask.astype(np.uint8)
new_mask = cv2.cvtColor(new_mask, cv2.COLOR_RGB2GRAY)
new_mask = cv2.threshold(new_mask, 50, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
new_mask_pad = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE), dtype=np.uint8)
if (
center[0] - 30 < 0
or center[0] + 30 >= IMAGE_PAD_SIZE
or center[1] - 30 < 0
or center[1] + 30 >= IMAGE_PAD_SIZE
):
return [], [], [], [], [], [], [], []
new_mask_pad[
center[0] - 30 : center[0] + 30, center[1] - 30 : center[1] + 30
] = new_mask
new_mask = new_mask_pad[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
]
new_mask_objs.append(new_mask)
mask_objs = new_mask_objs
# find the contour of a single object
points_on_contour = []
points = []
four_idx = []
other_idx = []
priority_points_on_contour = []
priority_points = []
center = []
binary_objs = []
for oi in range(len(mask_objs)):
obj_cnt = cv2.findContours(mask_objs[oi], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
obj_cnt = imutils.grab_contours(obj_cnt)
if len(obj_cnt) == 0:
return [], [], [], [], [], [], [], []
obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))[
-1
] # the mask r cnn could give bad masks
if cv2.contourArea(obj_cnt) < 10:
return [], [], [], [], [], [], [], []
# get center
M = cv2.moments(obj_cnt)
cX = round(M["m10"] / M["m00"])
cY = round(M["m01"] / M["m00"])
center.append([cX, cY])
# get crop of each object
temp = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE), dtype=np.uint8)
temp[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
] = mask_objs[oi]
crop = temp[
cY + IMAGE_PAD_WIDTH - 30 : cY + IMAGE_PAD_WIDTH + 30,
cX + IMAGE_PAD_WIDTH - 30 : cX + IMAGE_PAD_WIDTH + 30,
]
assert crop.shape[0] == 60 and crop.shape[1] == 60, crop.shape
binary_objs.append(crop)
if plot:
cv2.circle(plot_image, (cX, cY), 3, (255, 255, 255), -1)
# get pca angle
angle = get_orientation(obj_cnt)
# get contour points
skip_num = len(obj_cnt) // 12 # 12 possible pushes for an object
skip_count = 0
diff_angle_limit_four = 0.3
target_diff_angles = np.array([0, np.pi, np.pi / 2, 3 * np.pi / 2])
# add the consecutive move
if prev_move:
prev_angle = math.atan2(
prev_move[1][1] - prev_move[0][1], prev_move[1][0] - prev_move[0][0]
)
pose = (cX - math.cos(prev_angle) * 2, cY - math.sin(prev_angle) * 2)
x = pose[0]
y = pose[1]
diff_x = cX - x
diff_y = cY - y
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt, add_distance=0)
test_point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
if is_close(prev_move[1], test_point):
if len(priority_points) > 0:
prev_dis = close_distance(prev_move[1], priority_points[0])
this_dis = close_distance(prev_move[1], test_point)
if this_dis < prev_dis:
priority_points_on_contour[0] = point_on_contour
priority_points[0] = point
else:
priority_points_on_contour.append(point_on_contour)
priority_points.append(point)
# add four directions to center of object
four_poses = [
(cX + math.cos(angle) * 2, cY + math.sin(angle) * 2),
(cX + math.cos(angle + np.pi / 2) * 2, cY + math.sin(angle + np.pi / 2) * 2),
(cX + math.cos(angle - np.pi / 2) * 2, cY + math.sin(angle - np.pi / 2) * 2),
(cX - math.cos(angle) * 2, cY - math.sin(angle) * 2),
]
for pose in four_poses:
x = pose[0]
y = pose[1]
diff_x = cX - x
diff_y = cY - y
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
points_on_contour.append(point_on_contour)
points.append(point)
four_idx.append(len(points) - 1)
for pi, p in enumerate(obj_cnt):
x = p[0][0]
y = p[0][1]
if x == cX or y == cY:
continue
diff_x = cX - x
diff_y = cY - y
test_angle = math.atan2(diff_y, diff_x)
should_append = False
# avoid four directions to center of object
if np.min(np.abs(abs(angle - test_angle) - target_diff_angles)) < diff_angle_limit_four:
should_append = False
skip_count = 0
elif skip_count == skip_num:
should_append = True
if should_append:
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
points_on_contour.append(point_on_contour)
points.append(point)
other_idx.append(len(points) - 1)
skip_count = 0
else:
skip_count += 1
# random actions, adding priority points at the end
# temp = list(zip(points_on_contour, points))
# random.shuffle(temp)
# points_on_contour, points = zip(*temp)
# points_on_contour = list(points_on_contour)
# points = list(points)
# points.extend(priority_points)
# points_on_contour.extend(priority_points_on_contour)
random.shuffle(four_idx)
random.shuffle(other_idx)
new_points = []
new_points_on_contour = []
for idx in other_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
for idx in four_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
new_points.extend(priority_points)
new_points_on_contour.extend(priority_points_on_contour)
points = new_points
points_on_contour = new_points_on_contour
priority_qualified = False
if plot:
# loop over the contours
for c in cnts:
cv2.drawContours(plot_image, [c], -1, (133, 137, 140), 2)
valid_points = []
for pi in range(len(points)):
# out of boundary
if (
points[pi][0] < 5
or points[pi][0] > IMAGE_SIZE - 5
or points[pi][1] < 5
or points[pi][1] > IMAGE_SIZE - 5
):
qualify = False
elif pi >= len(points) - len(priority_points):
temp = list(points[pi])
temp[0] = max(temp[0], 5)
temp[0] = min(temp[0], IMAGE_SIZE - 5)
temp[1] = max(temp[1], 5)
temp[1] = min(temp[1], IMAGE_SIZE - 5)
points[pi] = temp
qualify = True
priority_qualified = True
# clearance
elif (
np.sum(
thresh[
points[pi][1]
- GRIPPER_GRASP_WIDTH_PIXEL // 2 : points[pi][1]
+ GRIPPER_GRASP_WIDTH_PIXEL // 2
+ 1,
points[pi][0]
- GRIPPER_GRASP_WIDTH_PIXEL // 2 : points[pi][0]
+ GRIPPER_GRASP_WIDTH_PIXEL // 2
+ 1,
]
> 0
)
== 0
):
qualify = True
else:
qualify = False
if qualify:
if plot:
diff_x = points_on_contour[pi][0] - points[pi][0]
diff_y = points_on_contour[pi][1] - points[pi][1]
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_to = (
int(points[pi][0] + diff_x * PUSH_DISTANCE / PIXEL_SIZE / 2),
int(points[pi][1] + diff_y * PUSH_DISTANCE / PIXEL_SIZE / 2),
)
if pi < len(other_idx):
cv2.arrowedLine(
plot_image, points[pi], point_to, (0, 0, 255), 2, tipLength=0.2,
)
elif pi >= len(points) - len(priority_points):
cv2.arrowedLine(
plot_image, tuple(points[pi]), point_to, (0, 255, 0), 2, tipLength=0.2,
)
else:
cv2.arrowedLine(
plot_image, points[pi], point_to, (255, 0, 0), 2, tipLength=0.2,
)
valid_points.append([points[pi], points_on_contour[pi]])
if start_pose is not None:
spose = (start_pose[1], start_pose[0])
epose = (start_pose[3], start_pose[2])
valid_points = [[spose, epose]]
print(valid_points)
if plot:
cv2.imwrite("test.png", plot_image)
# rotate image
rotated_color_image = []
rotated_depth_image = []
rotated_mask_objs = []
rotated_angle = []
rotated_center = []
rotated_action = []
rotated_binary_objs_image = []
before_rotated_action = []
count = 0
for aidx, action in enumerate(valid_points):
# padding from 224 to 320
# color image
color_image_pad = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE, 3), np.uint8)
color_image_pad[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
] = color_image
# depth image
depth_image_pad = np.zeros((IMAGE_PAD_SIZE, IMAGE_PAD_SIZE), np.float32)
depth_image_pad[
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
] = depth_image
# compute rotation angle
down = (0, 1)
current = (action[1][0] - action[0][0], action[1][1] - action[0][1])
dot = (
down[0] * current[0] + down[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = down[0] * current[1] - down[1] * current[0] # determinant
angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
angle = math.degrees(angle)
# rotate images
rotated_color = utils.rotate(color_image_pad, angle)
rotated_depth = utils.rotate(depth_image_pad, angle)
# rotate cropped object
if len(binary_objs) == 1:
# binary_objs_image = np.expand_dims(binary_objs[0], axis=-1)
binary_objs_image = binary_objs[0]
rotated_binary_objs = utils.rotate(binary_objs_image, angle, True)
rotated_binary_objs = np.expand_dims(rotated_binary_objs, axis=-1)
else:
binary_objs_image = np.stack(binary_objs, axis=-1)
rotated_binary_objs = utils.rotate(binary_objs_image, angle, True)
M = cv2.getRotationMatrix2D((IMAGE_SIZE / 2, IMAGE_SIZE / 2), angle, 1) # rotate by center
# rotate points
points = np.array(center)
points = np.concatenate((points, [action[0]]), axis=0)
points = np.expand_dims(points, axis=0)
points = cv2.transform(points, M)[0]
points_center = points[: len(center)]
# clearance check
clearance = cv2.cvtColor(rotated_color, cv2.COLOR_RGB2GRAY)
clearance = cv2.medianBlur(clearance, 5)
clearance = cv2.threshold(clearance, 20, 255, cv2.THRESH_BINARY)[1]
area = np.sum(
clearance[
max(
0, points[-1][1] + IMAGE_PAD_WIDTH - round(GRIPPER_GRASP_WIDTH_PIXEL / 2)
) : min(
IMAGE_PAD_SIZE,
points[-1][1] + IMAGE_PAD_WIDTH + round(GRIPPER_GRASP_WIDTH_PIXEL / 2) + 1,
),
max(0, points[-1][0] + IMAGE_PAD_WIDTH - GRIPPER_PUSH_RADIUS_PIXEL) : min(
IMAGE_PAD_SIZE, points[-1][0] + IMAGE_PAD_WIDTH + GRIPPER_PUSH_RADIUS_PIXEL + 1
),
]
> 0
)
if area > 0:
if not (priority_qualified and aidx == len(valid_points) - 1):
continue
rotated_color_image.append(rotated_color)
rotated_depth_image.append(rotated_depth)
rotated_angle.append(angle)
rotated_center.append(np.flip(points_center, 1))
rotated_action.append(np.flip(points[-1]))
rotated_binary_objs_image.append(rotated_binary_objs)
rotated_mask_obj = []
rotated_mask_centers = []
if from_color:
for idx, mask in enumerate(ori_mask_objs):
mask_color = mask[0]
mask_depth = mask[1]
rotated_mask_color = utils.rotate(mask_color, angle)
rotated_mask_depth = utils.rotate(mask_depth, angle)
rotated_mask = (rotated_mask_color, rotated_mask_depth)
rotated_mask_obj.append(rotated_mask)
rotated_mask_centers.append(
[
points_center[idx][1] + IMAGE_PAD_WIDTH,
points_center[idx][0] + IMAGE_PAD_WIDTH,
]
)
rotated_mask_objs.append((rotated_mask_obj, rotated_mask_centers))
else:
for mask in mask_objs:
mask = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
rotated_mask = utils.rotate(mask, angle, True)
rotated_mask_obj.append(rotated_mask)
rotated_mask_objs.append(rotated_mask_obj)
before_rotated_action.append(np.flip(action[0]))
# if plot:
# rotated_image = rotated_color.copy()
# rotated_image_gray = cv2.cvtColor(rotated_image, cv2.COLOR_RGB2GRAY)
# rotated_image_gray = rotated_image_gray.astype(np.uint8)
# rotated_image_gray = cv2.medianBlur(rotated_image_gray, 5)
# rotated_image = cv2.threshold(rotated_image_gray, 50, 255, cv2.THRESH_BINARY)[1]
# rotated_image = rotated_image[
# IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
# IMAGE_PAD_WIDTH : IMAGE_PAD_SIZE - IMAGE_PAD_WIDTH,
# ]
# for ci in range(len(points_center)):
# cY, cX = rotated_center[-1][ci]
# cv2.circle(rotated_image, (cX, cY), 3, (128), -1)
# y1, x1 = rotated_action[-1]
# cv2.arrowedLine(
# rotated_image,
# (x1, y1),
# (x1, y1 + int(PUSH_DISTANCE / PIXEL_SIZE)),
# (128),
# 2,
# tipLength=0.4,
# )
# cv2.circle(rotated_image, (x1, y1), 2, (200), -1)
# cv2.imwrite(str(count) + "test_rotated.png", rotated_image)
# count += 1
return (
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs_image,
before_rotated_action,
rotated_mask_objs,
)
def from_color_segm(color_image, plot=False):
"""
Use Pre-defined color to do instance segmentation and output masks in binary format.
"""
image = cv2.cvtColor(color_image, cv2.COLOR_RGB2HSV)
mask_objs = []
if plot:
pred_mask = np.zeros((224, 224), dtype=np.uint8)
for ci in range(4):
mask = cv2.inRange(image, colors_lower[ci], colors_upper[ci])
if np.sum(mask > 0):
mask_objs.append(mask)
if plot:
pred_mask[mask > 0] = 255 - ci * 20
cv2.imwrite(str(ci) + "mask.png", mask)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % len(mask_objs))
return mask_objs
@torch.no_grad()
def from_maskrcnn(model, color_image, device, plot=False):
"""
Use Mask R-CNN to do instance segmentation and output masks in binary format.
"""
model.eval()
image = color_image.copy()
image = TF.to_tensor(image)
prediction = model([image.to(device)])[0]
mask_objs = []
if plot:
pred_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE), dtype=np.uint8)
for idx, mask in enumerate(prediction["masks"]):
# NOTE: 0.98 can be tuned
if prediction["scores"][idx] > 0.98:
img = mask[0].mul(255).byte().cpu().numpy()
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
if np.sum(img == 255) < 100:
continue
mask_objs.append(img)
if plot:
pred_mask[img > 0] = 255 - idx * 50
cv2.imwrite(str(idx) + "mask.png", img)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % len(mask_objs), prediction["scores"].cpu())
return mask_objs
if __name__ == "__main__":
# color_image = cv2.imread(
# "logs_grasp/mcts-2021-03-21-00-31-13/data/color-heightmaps/000019.0.color.png"
# )
color_image = cv2.imread("tree_plot/root.0-73_140_74_103.1-72_138_108_130.2-99_132_136_132.png")
# color_image_after = cv2.imread("logs_push/final-test/data/color_heightmaps/0002507.color.png")
# color_image = cv2.imread("logs/action_test/data/color-heightmaps/000004.0.color.png")
# color_image = cv2.imread(
# "logs_push/2021-01-24-16-07-43/data/color-heightmaps/000000.0.color.png"
# )
# color_image = cv2.imread("logs/vpg+&pp/p104/data/color-heightmaps/000001.0.color.png")
color_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2RGB)
# color_image_after = cv2.cvtColor(color_image_after, cv2.COLOR_BGR2RGB)
depth_image = cv2.imread(
"tree_plot/root.0-73_140_74_103.1-72_138_108_130.2-99_132_136_132-depth.png",
cv2.IMREAD_UNCHANGED,
)
# depth_image = cv2.imread("logs/real-maskrcnn/data/depth-heightmaps/000002.0.depth.png", cv2.IMREAD_UNCHANGED)
# depth_image = cv2.imread("logs/old/object-detection-data/data/depth-heightmaps/000001.0.depth.png", cv2.IMREAD_UNCHANGED)
# depth_image = cv2.imread(
# "logs_grasp/mcts-2021-03-21-00-31-13/data/depth-heightmaps/000019.0.depth.png",
# cv2.IMREAD_UNCHANGED,
# )
# depth_image = cv2.imread("logs/vpg+&pp/p104/data/depth-heightmaps/000001.0.depth.png", cv2.IMREAD_UNCHANGED)
depth_image = depth_image.astype(np.float32) / 100000
# with open('logs_push/final-test/data/actions/0002502.action.txt', 'r') as file:
# filedata = file.read()
# x, y = filedata.split(' ')
# start_pose = [x, y]
# cv2.imwrite('predicttruth.png', color_image_after)
# check diff of color image and depth image
# gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
# blurred = cv2.medianBlur(gray, 5)
# gray = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]
# depth_image[depth_image <= DEPTH_MIN] = 0
# depth_image[depth_image > DEPTH_MIN] = 255
# # depth_image = depth_image.astype(np.uint8)
# cv2.imshow('color', gray)
# cv2.imwrite('blackwhite', gray)
# diff = depth_image - gray
# diff[diff < 0] = 128
# cv2.imshow('diff', diff)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
# exit()
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
predictor = Predictor("logs_push/push_prediction_model-75.pth")
# trainer = Trainer(
# "reinforcement",
# 0,
# 0,
# True,
# True,
# "logs_grasp/power1.5graspnew/models/snapshot-post-020000.reinforcement.pth",
# False,
# )
model = get_model_instance_segmentation(2)
model.load_state_dict(torch.load("logs_image/maskrcnn.pth"))
model = model.to(device)
mask_objs = from_maskrcnn(model, color_image, device, True)
(
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
before_rotated_action,
rotated_mask_objs,
) = sample_actions(color_image, depth_image, mask_objs, True)
generated_color_images, generated_depth_images, validations = predictor.forward(
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
True,
)
for idx, img in enumerate(generated_color_images):
overlay = color_image
# added_image = cv2.addWeighted(generated_color_images[idx], 0.8, overlay, 0.4, 0)
added_image = generated_color_images[idx].copy()
img = cv2.cvtColor(added_image, cv2.COLOR_RGB2BGR)
cv2.imwrite(str(idx) + "predict.png", img)
img = generated_depth_images[idx]
img[img <= DEPTH_MIN] = 0
img[img > DEPTH_MIN] = 255
cv2.imwrite(str(idx) + "predictgray.png", img)
# generated_color_images.append(color_image)
# generated_depth_images.append(depth_image)
# for idx, img in enumerate(generated_color_images):
# if idx + 1 == len(generated_color_images) or validations[idx]:
# _, grasp_predictions = trainer.forward(
# generated_color_images[idx], generated_depth_images[idx], is_volatile=True
# )
# grasp_predictions = trainer.focus_on_target(
# generated_color_images[idx], grasp_predictions
# )
# best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
# predicted_value = np.max(grasp_predictions)
# grasp_pred_vis = trainer.get_prediction_vis(
# grasp_predictions, generated_color_images[idx], best_pix_ind
# )
# cv2.imwrite(str(idx) + "visualization.grasp.png", grasp_pred_vis)
# predicted_values = np.sum(np.sort(grasp_predictions.flatten())[:])
# print(idx, predicted_value, predicted_values)
# else:
# print("invalid")
# _, grasp_predictions = trainer.forward(
# color_image, depth_image, is_volatile=True
# )
# grasp_predictions = trainer.focus_on_target(
# color_image, depth_image, grasp_predictions, TARGET_LOWER, TARGET_UPPER
# )
# best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
# predicted_value = np.max(grasp_predictions)
# grasp_pred_vis = trainer.get_prediction_vis(
# grasp_predictions, color_image, best_pix_ind
# )
# cv2.imwrite("visualization.grasp.png", grasp_pred_vis)
# predicted_values = np.sum(np.sort(grasp_predictions.flatten())[:])
# print(predicted_value, predicted_values)
| 39,123 | 40.933548 | 127 |
py
|
more
|
more-main/models.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from vision.backbone_utils import resnet_fpn_net
from constants import NUM_ROTATION
class PushNet(nn.Module):
"""
The DQN Network.
"""
def __init__(self, pre_train=False):
super().__init__()
self.device = torch.device("cuda")
self.pre_train = pre_train
self.num_rotations = NUM_ROTATION
# self.pushnet = FCN(2, 1).to(self.device)
self.pushnet = resnet_fpn_net(
"resnet34", trainable_layers=5, grasp=False, input_channels=2).to(self.device)
print("max_memory_allocated (MB):", torch.cuda.max_memory_allocated() / 2 ** 20)
print("memory_allocated (MB):", torch.cuda.memory_allocated() / 2 ** 20)
def forward(
self, input_data, is_volatile=False, specific_rotation=-1,
):
if self.pre_train:
output_probs = self.pushnet(input_data)
return output_probs
else:
if is_volatile:
with torch.no_grad():
output_prob = []
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = np.asarray(
[
[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
]
)
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = (
torch.from_numpy(affine_mat_before)
.permute(2, 0, 1)
.float()
.to(self.device)
)
flow_grid_before = F.affine_grid(
affine_mat_before, input_data.size(), align_corners=True
)
# Rotate images clockwise
rotate_data = F.grid_sample(
input_data.to(self.device),
flow_grid_before,
mode="bilinear",
align_corners=True,
)
final_push_feat = self.pushnet(rotate_data)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray(
[
[np.cos(rotate_theta), np.sin(rotate_theta), 0],
[-np.sin(rotate_theta), np.cos(rotate_theta), 0],
]
)
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = (
torch.from_numpy(affine_mat_after)
.permute(2, 0, 1)
.float()
.to(self.device)
)
flow_grid_after = F.affine_grid(
affine_mat_after, final_push_feat.data.size(), align_corners=True
)
# Forward pass through branches, undo rotation on output predictions, upsample results
output_prob.append(
F.grid_sample(
final_push_feat,
flow_grid_after,
mode="bilinear",
align_corners=True,
),
)
return output_prob
else:
raise NotImplementedError
# self.output_prob = []
# # Apply rotations to images
# rotate_idx = specific_rotation
# rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# # Compute sample grid for rotation BEFORE branches
# affine_mat_before = np.asarray(
# [
# [np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
# [-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
# ]
# )
# affine_mat_before.shape = (2, 3, 1)
# affine_mat_before = (
# torch.from_numpy(affine_mat_before).permute(2, 0, 1).float().to(self.device)
# )
# affine_mat_before.requires_grad_(False)
# flow_grid_before = F.affine_grid(
# affine_mat_before, input_color_data.size(), align_corners=True
# )
# # Rotate images clockwise
# rotate_color = F.grid_sample(
# input_color_data.to(self.device),
# flow_grid_before,
# mode="bilinear",
# align_corners=True,
# )
# rotate_depth = F.grid_sample(
# input_depth_data.to(self.device),
# flow_grid_before,
# mode="bilinear",
# align_corners=True,
# )
# input_data = torch.cat((rotate_color, rotate_depth), dim=1)
# # Pass intermediate features to net
# final_push_feat = self.pushnet(input_data)
# # Compute sample grid for rotation AFTER branches
# affine_mat_after = np.asarray(
# [
# [np.cos(rotate_theta), np.sin(rotate_theta), 0],
# [-np.sin(rotate_theta), np.cos(rotate_theta), 0],
# ]
# )
# affine_mat_after.shape = (2, 3, 1)
# affine_mat_after = (
# torch.from_numpy(affine_mat_after).permute(2, 0, 1).float().to(self.device)
# )
# affine_mat_after.requires_grad_(False)
# flow_grid_after = F.affine_grid(
# affine_mat_after.to(self.device),
# final_push_feat.data.size(),
# align_corners=True,
# )
# # Forward pass through branches, undo rotation on output predictions, upsample results
# self.output_prob.append(
# F.grid_sample(
# final_push_feat, flow_grid_after, mode="bilinear", align_corners=True
# )
# )
# return self.output_prob
class reinforcement_net(nn.Module):
"""
The DQN Network.
graspnet is the Grasp Network.
pushnet is the Push Network for the DQN + GN method.
"""
def __init__(self, pre_train=False): # , snapshot=None
super(reinforcement_net, self).__init__()
self.device = torch.device("cuda")
self.pre_train = pre_train
self.num_rotations = NUM_ROTATION
if pre_train:
# self.pushnet = resnet_fpn_net(
# "resnet18", trainable_layers=5, grasp=False, input_channels=4
# ).to(self.device)
# self.pushnet = FCN(4, 1).to(self.device)
self.graspnet = resnet_fpn_net("resnet18", trainable_layers=5).to(self.device)
else:
# self.pushnet = resnet_fpn_net(
# "resnet18", trainable_layers=5, grasp=False, input_channels=4
# ).to(self.device)
# self.pushnet = FCN(4, 1).to(self.device)
self.graspnet = resnet_fpn_net("resnet18", trainable_layers=5).to(self.device)
print("max_memory_allocated (MB):", torch.cuda.max_memory_allocated() / 2 ** 20)
print("memory_allocated (MB):", torch.cuda.memory_allocated() / 2 ** 20)
def forward(
self,
input_color_data,
input_depth_data,
is_volatile=False,
specific_rotation=-1,
use_push=True,
push_only=False,
):
if self.pre_train:
input_data = torch.cat((input_color_data, input_depth_data), dim=1)
if use_push:
if push_only:
output_probs = self.pushnet(input_data)
else:
final_push_feat = self.pushnet(input_data)
final_grasp_feat = self.graspnet(input_data)
output_probs = (final_push_feat, final_grasp_feat)
else:
output_probs = self.graspnet(input_data)
return output_probs
else:
if is_volatile:
with torch.no_grad():
output_prob = []
# Apply rotations to images
for rotate_idx in range(self.num_rotations):
rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# Compute sample grid for rotation BEFORE neural network
affine_mat_before = np.asarray(
[
[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
]
)
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = (
torch.from_numpy(affine_mat_before)
.permute(2, 0, 1)
.float()
.to(self.device)
)
flow_grid_before = F.affine_grid(
affine_mat_before, input_color_data.size(), align_corners=True
)
# Rotate images clockwise
rotate_color = F.grid_sample(
input_color_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
rotate_depth = F.grid_sample(
input_depth_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
input_data = torch.cat((rotate_color, rotate_depth), dim=1)
# Pass intermediate features to net
if use_push:
final_push_feat = self.pushnet(input_data)
if not push_only:
final_grasp_feat = self.graspnet(input_data)
else:
final_grasp_feat = self.graspnet(input_data)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray(
[
[np.cos(rotate_theta), np.sin(rotate_theta), 0],
[-np.sin(rotate_theta), np.cos(rotate_theta), 0],
]
)
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = (
torch.from_numpy(affine_mat_after)
.permute(2, 0, 1)
.float()
.to(self.device)
)
if use_push:
flow_grid_after = F.affine_grid(
affine_mat_after, final_push_feat.data.size(), align_corners=True
)
else:
flow_grid_after = F.affine_grid(
affine_mat_after, final_grasp_feat.data.size(), align_corners=True
)
# Forward pass through branches, undo rotation on output predictions, upsample results
if use_push:
if push_only:
output_prob.append(
F.grid_sample(
final_push_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
)
else:
output_prob.append(
[
F.grid_sample(
final_push_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
F.grid_sample(
final_grasp_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
]
)
else:
output_prob.append(
[
None,
F.grid_sample(
final_grasp_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
]
)
return output_prob
else:
self.output_prob = []
# Apply rotations to images
rotate_idx = specific_rotation
rotate_theta = np.radians(rotate_idx * (360 / self.num_rotations))
# Compute sample grid for rotation BEFORE branches
affine_mat_before = np.asarray(
[
[np.cos(-rotate_theta), np.sin(-rotate_theta), 0],
[-np.sin(-rotate_theta), np.cos(-rotate_theta), 0],
]
)
affine_mat_before.shape = (2, 3, 1)
affine_mat_before = (
torch.from_numpy(affine_mat_before).permute(2, 0, 1).float().to(self.device)
)
affine_mat_before.requires_grad_(False)
flow_grid_before = F.affine_grid(
affine_mat_before, input_color_data.size(), align_corners=True
)
# Rotate images clockwise
rotate_color = F.grid_sample(
input_color_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
rotate_depth = F.grid_sample(
input_depth_data.to(self.device),
flow_grid_before,
mode="nearest",
align_corners=True,
)
input_data = torch.cat((rotate_color, rotate_depth), dim=1)
# Pass intermediate features to net
final_push_feat = self.pushnet(input_data)
if not push_only:
final_grasp_feat = self.graspnet(input_data)
# Compute sample grid for rotation AFTER branches
affine_mat_after = np.asarray(
[
[np.cos(rotate_theta), np.sin(rotate_theta), 0],
[-np.sin(rotate_theta), np.cos(rotate_theta), 0],
]
)
affine_mat_after.shape = (2, 3, 1)
affine_mat_after = (
torch.from_numpy(affine_mat_after).permute(2, 0, 1).float().to(self.device)
)
affine_mat_after.requires_grad_(False)
flow_grid_after = F.affine_grid(
affine_mat_after.to(self.device),
final_push_feat.data.size(),
align_corners=True,
)
# Forward pass through branches, undo rotation on output predictions, upsample results
if push_only:
self.output_prob.append(
F.grid_sample(
final_push_feat, flow_grid_after, mode="nearest", align_corners=True
)
)
else:
self.output_prob.append(
[
F.grid_sample(
final_push_feat, flow_grid_after, mode="nearest", align_corners=True
),
F.grid_sample(
final_grasp_feat,
flow_grid_after,
mode="nearest",
align_corners=True,
),
]
)
return self.output_prob
| 17,954 | 40.370968 | 110 |
py
|
more
|
more-main/push_net.py
|
import torch
import torch.nn as nn
from vision.backbone_utils import resent_backbone
from collections import OrderedDict
class PushPredictionNet(nn.Module):
def __init__(self):
super().__init__()
# single object state encoder
self.single_state_encoder = nn.Sequential(
OrderedDict(
[
("single-state-encoder-fc1", nn.Linear(2, 8)),
("single-state-encoder-relu1", nn.ReLU(inplace=True)),
("single-state-encoder-fc2", nn.Linear(8, 16)),
("single-state-encoder-relu2", nn.ReLU(inplace=True)),
]
)
)
# single object image encoder
self.singel_image_encoder = resent_backbone(
"resnet10", pretrained=False, num_classes=64, input_channels=1
)
# Interactive transformation
self.interact = nn.Sequential(
OrderedDict(
[
("interact-fc1", nn.Linear(176, 256)),
("interact-relu1", nn.ReLU(inplace=True)),
("interact-fc2", nn.Linear(256, 256)),
("interact-relu2", nn.ReLU(inplace=True)),
("interact-fc3", nn.Linear(256, 256)),
("interact-relu3", nn.ReLU(inplace=True)),
]
)
)
# Direct transformation
self.dynamics = nn.Sequential(
OrderedDict(
[
("dynamics-fc1", nn.Linear(96, 256)),
("dynamics-relu1", nn.ReLU(inplace=True)),
("dynamics-fc2", nn.Linear(256, 256)),
("dynamics-relu2", nn.ReLU(inplace=True)),
]
)
)
# action encoder
self.action_encoder = nn.Sequential(
OrderedDict(
[
("action_encoder-fc1", nn.Linear(4, 8)),
("action_encoder-relu1", nn.ReLU(inplace=True)),
("action_encoder-fc2", nn.Linear(8, 16)),
("action_encoder-relu2", nn.ReLU(inplace=True)),
]
)
)
# global image encoder
self.image_encoder = resent_backbone(
"resnet10", pretrained=False, num_classes=512, input_channels=2
)
self.decoder = nn.Sequential(
OrderedDict(
[
("decoder-fc00", nn.Linear(768, 256)),
("decoder-relu00", nn.ReLU(inplace=True)),
("decoder-fc0", nn.Linear(256, 64)),
("decoder-relu0", nn.ReLU(inplace=True)),
("decoder-fc1", nn.Linear(64, 16)),
("decoder-relu1", nn.ReLU(inplace=True)),
("decoder-fc3", nn.Linear(16, 3)),
("decoder-relu3", nn.ReLU(inplace=True)),
("decoder-fc4", nn.Linear(3, 3)),
]
)
)
def forward(self, prev_poses, action, image, image_objs, num_objs):
# action
encoded_action = self.action_encoder(action)
# single object
encoded_info = []
for i in range(num_objs):
encoded_state = self.single_state_encoder(prev_poses[:, i * 2 : i * 2 + 2])
encoded_image = self.singel_image_encoder(
image_objs[
:,
i : i + 1,
:,
:,
]
)
encoded_cat = torch.cat((encoded_state, encoded_image), dim=1)
encoded_info.append(encoded_cat)
# the environment
y = self.image_encoder(image)
# interact
z = None
for i in range(num_objs):
dy_input = torch.cat((encoded_action, encoded_info[i]), dim=1)
all_dynamics = self.dynamics(dy_input)
for j in range(1, num_objs):
idx = i + j
if idx >= num_objs:
idx = idx - num_objs
inter_input = torch.cat((dy_input, encoded_info[idx]), dim=1)
other = self.interact(inter_input)
all_dynamics = all_dynamics + other
de_input = torch.cat((y, all_dynamics), dim=1)
output = self.decoder(de_input)
if z is None:
z = output
else:
z = torch.cat((z, output), dim=1)
return z
| 4,492 | 33.829457 | 87 |
py
|
more
|
more-main/collect_push_data.py
|
import time
import datetime
import os
import glob
import pybullet as p
import numpy as np
import cv2
import utils
from environment import Environment
from constants import (
DEPTH_MIN,
PUSH_DISTANCE,
IMAGE_SIZE,
GRIPPER_PUSH_RADIUS_PIXEL,
GRIPPER_PUSH_RADIUS_SAFE_PIXEL,
)
class PushDataCollector:
def __init__(self, start_iter=0, end_iter=2000, base_directory=None, seed=0):
# Objects have heights of 0.05 meters, so center should be less than 0.035
self.height_upper = 0.035
self.depth_min = DEPTH_MIN
self.rng = np.random.default_rng(seed)
# Create directory to save data
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
if base_directory is None:
self.base_directory = os.path.join(
os.path.abspath("logs_push"), timestamp_value.strftime("%Y-%m-%d-%H-%M-%S")
)
else:
self.base_directory = base_directory
print("Creating data logging session: %s" % (self.base_directory))
self.prev_color_heightmaps_directory = os.path.join(
self.base_directory, "data", "prev-color-heightmaps"
)
self.prev_depth_heightmaps_directory = os.path.join(
self.base_directory, "data", "prev-depth-heightmaps"
)
self.prev_pose_directory = os.path.join(self.base_directory, "data", "prev-poses")
self.next_color_heightmaps_directory = os.path.join(
self.base_directory, "data", "next-color-heightmaps"
)
self.next_depth_heightmaps_directory = os.path.join(
self.base_directory, "data", "next-depth-heightmaps"
)
self.next_pose_directory = os.path.join(self.base_directory, "data", "next-poses")
self.action_directory = os.path.join(self.base_directory, "data", "actions")
self.mask_directory = os.path.join(self.base_directory, "data", "masks")
if not os.path.exists(self.prev_color_heightmaps_directory):
os.makedirs(self.prev_color_heightmaps_directory)
if not os.path.exists(self.prev_depth_heightmaps_directory):
os.makedirs(self.prev_depth_heightmaps_directory)
if not os.path.exists(self.prev_pose_directory):
os.makedirs(self.prev_pose_directory)
if not os.path.exists(self.next_color_heightmaps_directory):
os.makedirs(self.next_color_heightmaps_directory)
if not os.path.exists(self.next_depth_heightmaps_directory):
os.makedirs(self.next_depth_heightmaps_directory)
if not os.path.exists(self.next_pose_directory):
os.makedirs(self.next_pose_directory)
if not os.path.exists(self.action_directory):
os.makedirs(self.action_directory)
if not os.path.exists(self.mask_directory):
os.makedirs(self.mask_directory)
self.iter = start_iter
self.end_iter = end_iter
def reset_np_random(self, seed):
self.rng = np.random.default_rng(seed)
def save_heightmaps(
self,
iteration,
prev_color_heightmap,
prev_depth_heightmap,
next_color_heightmap,
next_depth_heightmap,
):
color_heightmap = cv2.cvtColor(prev_color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(self.prev_color_heightmaps_directory, "%07d.color.png" % (iteration)),
color_heightmap,
)
depth_heightmap = np.round(prev_depth_heightmap * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(
os.path.join(self.prev_depth_heightmaps_directory, "%07d.depth.png" % (iteration)),
depth_heightmap,
)
color_heightmap = cv2.cvtColor(next_color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(self.next_color_heightmaps_directory, "%07d.color.png" % (iteration)),
color_heightmap,
)
depth_heightmap = np.round(next_depth_heightmap * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(
os.path.join(self.next_depth_heightmaps_directory, "%07d.depth.png" % (iteration)),
depth_heightmap,
)
def save_masks(self, iteration, mask):
cv2.imwrite(os.path.join(self.mask_directory, "%07d.mask.png" % (iteration)), mask)
def save_action(self, iteration, pose):
np.savetxt(
os.path.join(self.action_directory, "%07d.action.txt" % (iteration)), pose, fmt="%s"
)
def save_pose(self, iteration, pose0, pose1):
np.savetxt(
os.path.join(self.prev_pose_directory, "%07d.pose.txt" % (iteration)), pose0, fmt="%s"
)
np.savetxt(
os.path.join(self.next_pose_directory, "%07d.pose.txt" % (iteration)), pose1, fmt="%s"
)
def add_object_push_from_file(self, env, file_name):
body_ids = []
success = True
# Read data
with open(file_name, "r") as preset_file:
file_content = preset_file.readlines()
num_obj = len(file_content)
obj_files = []
obj_mesh_colors = []
obj_positions = []
obj_orientations = []
for object_idx in range(num_obj):
file_content_curr_object = file_content[object_idx].split()
obj_file = os.path.join("assets", "blocks", file_content_curr_object[0])
obj_files.append(obj_file)
obj_positions.append(
[
float(file_content_curr_object[4]),
float(file_content_curr_object[5]),
float(file_content_curr_object[6]),
]
)
obj_orientations.append(
[
float(file_content_curr_object[7]),
float(file_content_curr_object[8]),
float(file_content_curr_object[9]),
]
)
obj_mesh_colors.append(
[
float(file_content_curr_object[1]),
float(file_content_curr_object[2]),
float(file_content_curr_object[3]),
]
)
# Import objects
for object_idx in range(num_obj):
curr_mesh_file = obj_files[object_idx]
object_position = [
obj_positions[object_idx][0],
obj_positions[object_idx][1],
obj_positions[object_idx][2],
]
object_orientation = [
obj_orientations[object_idx][0],
obj_orientations[object_idx][1],
obj_orientations[object_idx][2],
]
object_color = [
obj_mesh_colors[object_idx][0],
obj_mesh_colors[object_idx][1],
obj_mesh_colors[object_idx][2],
1,
]
body_id = p.loadURDF(
curr_mesh_file, object_position, p.getQuaternionFromEuler(object_orientation)
)
p.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
env.add_object_id(body_id)
success &= env.wait_static()
success &= env.wait_static()
# give time to stop
for _ in range(5):
p.stepSimulation(env.client_id)
return body_ids, success
def add_object_push(self, env):
"""Randomly dropped objects to the workspace"""
color_space = (
np.asarray(
[
[78.0, 121.0, 167.0], # blue
[89.0, 161.0, 79.0], # green
[156, 117, 95], # brown
[242, 142, 43], # orange
[237.0, 201.0, 72.0], # yellow
[186, 176, 172], # gray
[255.0, 87.0, 89.0], # red
[176, 122, 161], # purple
[118, 183, 178], # cyan
[255, 157, 167], # pink
]
)
/ 255.0
)
drop_height = 0.15
obj_num = self.rng.choice(
[1, 2, 3, 4, 5, 6, 7, 8, 9], p=[0.01, 0.04, 0.05, 0.15, 0.20, 0.20, 0.15, 0.1, 0.1]
)
mesh_list = glob.glob("assets/blocks/*.urdf")
obj_mesh_ind = self.rng.choice(mesh_list, obj_num)
obj_mesh_color = color_space[np.asarray(range(obj_num)), :]
obj_mesh_color_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8]
# Add each object to robot workspace at x,y location and orientation (random or pre-loaded)
body_ids = []
object_positions = []
object_orientations = []
success = True
for object_idx in range(len(obj_mesh_ind)):
curr_mesh_file = obj_mesh_ind[object_idx]
drop_x = 0.45 + self.rng.random() * 0.1
drop_y = -0.05 + self.rng.random() * 0.1
object_position = [drop_x, drop_y, drop_height]
object_orientation = [0, 0, 2 * np.pi * self.rng.random()]
adjust_angle = 2 * np.pi * self.rng.random()
object_color = [
obj_mesh_color[obj_mesh_color_ind[object_idx]][0],
obj_mesh_color[obj_mesh_color_ind[object_idx]][1],
obj_mesh_color[obj_mesh_color_ind[object_idx]][2],
1,
]
body_id = p.loadURDF(
curr_mesh_file, object_position, p.getQuaternionFromEuler(object_orientation)
)
p.changeVisualShape(body_id, -1, rgbaColor=object_color)
body_ids.append(body_id)
env.add_object_id(body_id)
success &= env.wait_static()
count = 0
while success:
success &= env.wait_static()
object_position, _ = p.getBasePositionAndOrientation(body_id)
if count > 20:
break
# if overlap
if object_position[2] > self.height_upper:
drop_x = np.cos(adjust_angle) * 0.01 + drop_x # 1 cm
drop_y = np.sin(adjust_angle) * 0.01 + drop_y
object_position = [drop_x, drop_y, drop_height]
p.resetBasePositionAndOrientation(
body_id, object_position, p.getQuaternionFromEuler(object_orientation)
)
else:
break
count += 1
if count > 20:
object_position = [drop_x, drop_y, self.height_upper + 0.01]
p.resetBasePositionAndOrientation(
body_id, object_position, p.getQuaternionFromEuler(object_orientation)
)
object_position, _ = p.getBasePositionAndOrientation(body_id)
object_positions.append(object_position)
object_orientations.append(object_orientation)
for idx in range(len(body_ids)):
p.resetBasePositionAndOrientation(
body_ids[idx],
object_positions[idx],
p.getQuaternionFromEuler(object_orientations[idx]),
)
success &= env.wait_static()
# give time to stop
for _ in range(5):
p.stepSimulation(env.client_id)
return body_ids, success
def is_valid(self, body_ids, env):
"""Decide randomly dropped objects in the valid state."""
for body_id in body_ids:
# Check height
object_position, object_orientation = p.getBasePositionAndOrientation(body_id)
if object_position[2] > self.height_upper:
print(f"Height is wrong. Skip! {object_position[2]} > {self.height_upper}")
return False
# Check range
if (
object_position[0] < env.bounds[0][0] + PUSH_DISTANCE / 2
or object_position[0] > env.bounds[0][1] - PUSH_DISTANCE / 2
or object_position[1] < env.bounds[1][0] + PUSH_DISTANCE / 2
or object_position[1] > env.bounds[1][1] - PUSH_DISTANCE / 2
):
print(f"Out of bounds. Skip! {object_position[0]}, {object_position[1]}")
return False
# Check orientation
object_orientation = p.getEulerFromQuaternion(object_orientation)
if abs(object_orientation[0]) > 1e-2 or abs(object_orientation[1]) > 1e-2:
print(f"Wrong orientation. Skip! {object_orientation}")
return False
return True
def get_push_action(self, depth):
"""Find target and push, the robot makes a push from left to right."""
depth_heightmap = np.copy(depth)
depth_heightmap[depth_heightmap <= self.depth_min] = 0
depth_heightmap[depth_heightmap > self.depth_min] = 1
y_indices = np.argwhere(depth_heightmap == 1)[:, 1] # Find the y range
if len(y_indices) == 0:
print("find Skip")
return None
y_list_unique, y_list_count = np.unique(y_indices, return_counts=True)
y_list_dist = y_list_count / y_list_count.sum()
y = self.rng.choice(y_list_unique, p=y_list_dist)
x_indices = np.argwhere(depth_heightmap[:, y] == 1)[:, 0] # Find the x range
x_indices_left = np.argwhere(
depth_heightmap[:, max(0, y - GRIPPER_PUSH_RADIUS_PIXEL)] == 1
)[
:, 0
] # Find the x range
x_indices_right = np.argwhere(
depth_heightmap[:, min(y + GRIPPER_PUSH_RADIUS_PIXEL, IMAGE_SIZE - 1)] == 1
)[
:, 0
] # Find the x range
if len(x_indices) == 0:
print("Skip 1")
return None
x = x_indices.min()
if len(x_indices_left) != 0:
x = min(x, x_indices_left.min())
if len(x_indices_right) != 0:
x = min(x, x_indices_right.min())
x = x - GRIPPER_PUSH_RADIUS_SAFE_PIXEL
if x <= 0:
print("Skip 2")
return None
safe_z_position = 0.01
return [
x * env.pixel_size + env.bounds[0][0],
y * env.pixel_size + env.bounds[1][0],
safe_z_position,
]
def get_poses(self, body_ids):
poses = []
for body_id in body_ids:
pos, rot = p.getBasePositionAndOrientation(body_id)
rot = p.getEulerFromQuaternion(rot)
poses.append(pos[0])
poses.append(pos[1])
poses.append(rot[0])
poses.append(rot[1])
poses.append(rot[2])
return poses
if __name__ == "__main__":
is_test = False
env = Environment(gui=False)
if is_test:
collector = PushDataCollector(start_iter=0, end_iter=2000)
cases = sorted(glob.glob("hard-cases-test/*.txt"))
else:
collector = PushDataCollector(start_iter=0, end_iter=200000)
cases = sorted(glob.glob("hard-cases/*.txt"))
cases_idx = 0
num_cases = len(cases)
if is_test:
seed = 200000
else:
seed = 0
# multi_thread_start = 160
# multi_thread_end = multi_thread_start + 40
# collector.iter += multi_thread_start
# seed += multi_thread_start
# cases_idx += multi_thread_start
while collector.iter < collector.end_iter:
# if collector.iter > multi_thread_end:
# break
print(f"-----Collecting: {collector.iter + 1}/{collector.end_iter}-----")
collector.reset_np_random(seed)
env.reset(use_gripper=False)
# add objects, some from hard cases and some from random cases
if collector.iter > collector.end_iter // 5:
body_ids, success = collector.add_object_push_from_file(env, cases[cases_idx])
cases_idx += 1
if cases_idx == num_cases:
cases_idx = 0
else:
body_ids, success = collector.add_object_push(env)
if success and collector.is_valid(body_ids, env):
# record info0
color0, depth0, segm0 = utils.get_true_heightmap(env)
poses0 = collector.get_poses(body_ids)
# push
action = collector.get_push_action(depth0)
if action is not None:
action_end = [action[0] + PUSH_DISTANCE, action[1], action[2]]
success = env.push(action, action_end)
success &= env.wait_static()
success &= collector.is_valid(body_ids, env)
if success:
# record info1
color1, depth1, segm1 = utils.get_true_heightmap(env)
poses1 = collector.get_poses(body_ids)
# save data
collector.save_heightmaps(collector.iter, color0, depth0, color1, depth1)
collector.save_action(collector.iter, [action])
collector.save_pose(collector.iter, [poses0], [poses1])
# >>>>> save masks
# segm_ids = np.unique(segm1)
# for sid in segm_ids:
# if sid not in body_ids:
# segm1[segm1 == sid] = 0
# bidxs = []
# for bid in body_ids:
# bidxs.append(segm1 == bid)
# for idx, bidx in enumerate(bidxs):
# segm1[bidx] = idx + 1
# collector.save_masks(collector.iter, segm1)
# <<<<<
collector.iter += 1
seed += 1
| 17,799 | 38.821029 | 99 |
py
|
more
|
more-main/mcts_utils.py
|
from dataset import LifelongEvalDataset
import math
import random
import torch
from torchvision.transforms import functional as TF
import numpy as np
import cv2
import imutils
from models import reinforcement_net
from action_utils_mask import get_orientation, adjust_push_start_point
import utils
from constants import (
GRIPPER_PUSH_RADIUS_PIXEL,
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL,
PIXEL_SIZE,
PUSH_DISTANCE_PIXEL,
TARGET_LOWER,
TARGET_UPPER,
IMAGE_PAD_WIDTH,
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
NUM_ROTATION,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
GRIPPER_GRASP_WIDTH_PIXEL,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
IMAGE_PAD_WIDTH,
BG_THRESHOLD,
IMAGE_SIZE,
WORKSPACE_LIMITS,
PUSH_DISTANCE,
)
class MCTSHelper:
"""
Simulate the state after push actions.
Evaluation the grasp rewards.
"""
def __init__(self, env, grasp_model_path):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Initialize Mask R-CNN
# self.mask_model = get_model_instance_segmentation(2)
# self.mask_model.load_state_dict(torch.load(mask_model_path))
# self.mask_model = self.mask_model.to(self.device)
# self.mask_model.eval()
# Initialize Grasp Q Evaluation
self.grasp_model = reinforcement_net()
self.grasp_model.load_state_dict(torch.load(grasp_model_path)["model"], strict=False)
self.grasp_model = self.grasp_model.to(self.device)
self.grasp_model.eval()
self.env = env
self.move_recorder = {}
self.simulation_recorder = {}
def reset(self):
self.move_recorder = {}
self.simulation_recorder = {}
# @torch.no_grad()
# def from_maskrcnn(self, color_image, plot=False):
# """
# Use Mask R-CNN to do instance segmentation and output masks in binary format.
# """
# image = color_image.copy()
# image = TF.to_tensor(image)
# prediction = self.mask_model([image.to(self.device)])[0]
# mask_objs = []
# if plot:
# pred_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE), dtype=np.uint8)
# for idx, mask in enumerate(prediction["masks"]):
# # NOTE: 0.98 can be tuned
# if prediction["scores"][idx] > 0.98:
# img = mask[0].mul(255).byte().cpu().numpy()
# img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# if np.sum(img == 255) < 100:
# continue
# mask_objs.append(img)
# if plot:
# pred_mask[img > 0] = 255 - idx * 50
# cv2.imwrite(str(idx) + "mask.png", img)
# if plot:
# cv2.imwrite("pred.png", pred_mask)
# print("Mask R-CNN: %d objects detected" % len(mask_objs), prediction["scores"].cpu())
# return mask_objs
# def sample_actions(
# self, object_states, color_image=None, mask_image=None, prev_move=None, plot=False
# ):
# """
# Sample actions around the objects, from the boundary to the center.
# Assume there is no object in "black"
# Output the rotated image, such that the push action is from left to right
# """
# # Retrieve information
# if color_image is None:
# self.env.restore_objects(object_states)
# color_image, _, mask_image = utils.get_true_heightmap(self.env)
# # Process mask into binary format
# masks = []
# for i in self.env.obj_ids["rigid"]:
# mask = np.where(mask_image == i, 255, 0).astype(np.uint8)
# masks.append(mask)
# if len(masks) == 0:
# return [], [], [], [], [], [], [], []
# gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
# gray = gray.astype(np.uint8)
# if plot:
# plot_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
# blurred = cv2.medianBlur(gray, 5)
# thresh = cv2.threshold(blurred, 20, 255, cv2.THRESH_BINARY)[1]
# cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# cnts = imutils.grab_contours(cnts)
# # find the contour of a single object
# points_on_contour = []
# points = []
# # four_idx = []
# other_idx = []
# # priority_points_on_contour = []
# # priority_points = []
# for oi in range(len(masks)):
# obj_cnt = cv2.findContours(masks[oi], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
# obj_cnt = imutils.grab_contours(obj_cnt)
# obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))
# if len(obj_cnt) == 0:
# continue
# else:
# obj_cnt = obj_cnt[-1]
# # if too small, then, we skip
# if cv2.contourArea(obj_cnt) < 10:
# continue
# # get center
# M = cv2.moments(obj_cnt)
# cX = round(M["m10"] / M["m00"])
# cY = round(M["m01"] / M["m00"])
# if plot:
# cv2.circle(plot_image, (cX, cY), 3, (255, 255, 255), -1)
# # get pca angle
# # angle = get_orientation(obj_cnt)
# # get contour points
# skip_num = len(obj_cnt) // 12 # 12 possible pushes for an object
# skip_count = 0
# # diff_angle_limit_four = 0.3
# # target_diff_angles = np.array([0, np.pi, np.pi / 2, 3 * np.pi / 2])
# # add the consecutive move
# # if prev_move:
# # prev_angle = math.atan2(
# # prev_move[1][1] - prev_move[0][1], prev_move[1][0] - prev_move[0][0]
# # )
# # pose = (cX - math.cos(prev_angle) * 2, cY - math.sin(prev_angle) * 2)
# # x = pose[0]
# # y = pose[1]
# # diff_x = cX - x
# # diff_y = cY - y
# # diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# # diff_x /= diff_norm
# # diff_y /= diff_norm
# # point_on_contour = (round(x), round(y))
# # diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
# # point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# # diff_mul = adjust_push_start_point(
# # (cX, cY), point_on_contour, obj_cnt, add_distance=0
# # )
# # test_point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# # if is_close(prev_move[1], test_point):
# # if len(priority_points) > 0:
# # prev_dis = close_distance(prev_move[1], priority_points[0])
# # this_dis = close_distance(prev_move[1], test_point)
# # if this_dis < prev_dis:
# # priority_points_on_contour[0] = point_on_contour
# # priority_points[0] = point
# # else:
# # priority_points_on_contour.append(point_on_contour)
# # priority_points.append(point)
# # add four directions to center of object
# # four_poses = [
# # (cX + math.cos(angle) * 2, cY + math.sin(angle) * 2),
# # (cX + math.cos(angle + np.pi / 2) * 2, cY + math.sin(angle + np.pi / 2) * 2),
# # (cX + math.cos(angle - np.pi / 2) * 2, cY + math.sin(angle - np.pi / 2) * 2),
# # (cX - math.cos(angle) * 2, cY - math.sin(angle) * 2),
# # ]
# # for pose in four_poses:
# # x = pose[0]
# # y = pose[1]
# # diff_x = cX - x
# # diff_y = cY - y
# # diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# # diff_x /= diff_norm
# # diff_y /= diff_norm
# # point_on_contour = (round(x), round(y))
# # diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
# # point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# # points_on_contour.append(point_on_contour)
# # points.append(point)
# # four_idx.append(len(points) - 1)
# tested_angles = []
# for pi, p in enumerate(obj_cnt):
# x = p[0][0]
# y = p[0][1]
# if x == cX or y == cY:
# continue
# diff_x = cX - x
# diff_y = cY - y
# test_angle = math.atan2(diff_y, diff_x)
# should_append = False
# # avoid four directions to center of object
# # if (
# # np.min(np.abs(abs(angle - test_angle) - target_diff_angles))
# # < diff_angle_limit_four
# # ):
# # should_append = False
# # skip_count = 0
# if skip_count == skip_num:
# should_append = True
# if should_append:
# diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# diff_x /= diff_norm
# diff_y /= diff_norm
# point_on_contour = (round(x), round(y))
# diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
# point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
# points_on_contour.append(point_on_contour)
# points.append(point)
# other_idx.append(len(points) - 1)
# skip_count = 0
# tested_angles.append(test_angle)
# else:
# skip_count += 1
# # random actions, adding priority points at the end
# # random.shuffle(four_idx)
# random.shuffle(other_idx)
# new_points = []
# new_points_on_contour = []
# for idx in other_idx:
# new_points.append(points[idx])
# new_points_on_contour.append(points_on_contour[idx])
# # for idx in four_idx:
# # new_points.append(points[idx])
# # new_points_on_contour.append(points_on_contour[idx])
# # new_points.extend(priority_points)
# # new_points_on_contour.extend(priority_points_on_contour)
# points = new_points
# points_on_contour = new_points_on_contour
# if plot:
# # loop over the contours
# for c in cnts:
# cv2.drawContours(plot_image, [c], -1, (133, 137, 140), 2)
# actions = []
# for pi in range(len(points)):
# # out of boundary
# if (
# points[pi][0] < 5
# or points[pi][0] > IMAGE_SIZE - 5
# or points[pi][1] < 5
# or points[pi][1] > IMAGE_SIZE - 5
# ):
# qualify = False
# # consecutive action
# # elif pi >= len(points) - len(priority_points):
# # qualify = True
# # clearance large
# elif (
# np.sum(
# thresh[
# max(0, points[pi][1] - GRIPPER_PUSH_RADIUS_SAFE_PIXEL) : min(
# IMAGE_SIZE, points[pi][1] + GRIPPER_PUSH_RADIUS_SAFE_PIXEL + 1
# ),
# max(0, points[pi][0] - GRIPPER_PUSH_RADIUS_SAFE_PIXEL) : min(
# IMAGE_SIZE, points[pi][0] + GRIPPER_PUSH_RADIUS_SAFE_PIXEL + 1
# ),
# ]
# > 0
# )
# == 0
# ):
# qualify = True
# # clearance small
# else:
# # compute rotation angle
# down = (0, 1)
# current = (
# points_on_contour[pi][0] - points[pi][0],
# points_on_contour[pi][1] - points[pi][1],
# )
# dot = (
# down[0] * current[0] + down[1] * current[1]
# ) # dot product between [x1, y1] and [x2, y2]
# det = down[0] * current[1] - down[1] * current[0] # determinant
# angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
# angle = math.degrees(angle)
# crop = thresh[
# points[pi][1]
# - GRIPPER_PUSH_RADIUS_SAFE_PIXEL : points[pi][1]
# + GRIPPER_PUSH_RADIUS_SAFE_PIXEL
# + 1,
# points[pi][0]
# - GRIPPER_PUSH_RADIUS_SAFE_PIXEL : points[pi][0]
# + GRIPPER_PUSH_RADIUS_SAFE_PIXEL
# + 1,
# ]
# if crop.shape == (
# GRIPPER_PUSH_RADIUS_SAFE_PIXEL * 2 + 1,
# GRIPPER_PUSH_RADIUS_SAFE_PIXEL * 2 + 1,
# ):
# crop = utils.rotate(crop, angle)
# (h, w) = crop.shape
# crop_cy, crop_cx = (h // 2, w // 2)
# crop = crop[
# crop_cy
# - math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2) : crop_cy
# + math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)
# + 1,
# crop_cx
# - GRIPPER_PUSH_RADIUS_PIXEL : crop_cx
# + GRIPPER_PUSH_RADIUS_PIXEL
# + 1,
# ]
# qualify = np.sum(crop > 0) == 0
# else:
# qualify = False
# if qualify:
# if plot:
# diff_x = points_on_contour[pi][0] - points[pi][0]
# diff_y = points_on_contour[pi][1] - points[pi][1]
# diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
# diff_x /= diff_norm
# diff_y /= diff_norm
# point_to = (
# int(points[pi][0] + diff_x * PUSH_DISTANCE / PIXEL_SIZE / 2),
# int(points[pi][1] + diff_y * PUSH_DISTANCE / PIXEL_SIZE / 2),
# )
# if pi < len(other_idx):
# cv2.arrowedLine(
# plot_image, points[pi], point_to, (0, 0, 255), 2, tipLength=0.2,
# )
# # elif pi >= len(points) - len(priority_points):
# # cv2.arrowedLine(
# # plot_image, tuple(points[pi]), point_to, (0, 255, 0), 2, tipLength=0.2,
# # )
# else:
# cv2.arrowedLine(
# plot_image, points[pi], point_to, (255, 0, 0), 2, tipLength=0.2,
# )
# push_start = (points[pi][1], points[pi][0])
# push_vector = np.array(
# [
# points_on_contour[pi][1] - points[pi][1],
# points_on_contour[pi][0] - points[pi][0],
# ]
# )
# unit_push = push_vector / np.linalg.norm(push_vector)
# push_end = (
# round(push_start[0] + unit_push[0] * PUSH_DISTANCE / PIXEL_SIZE),
# round(push_start[1] + unit_push[1] * PUSH_DISTANCE / PIXEL_SIZE),
# )
# actions.append([push_start, push_end])
# if plot:
# cv2.imwrite("test.png", plot_image)
# return actions
def check_valid(self, point, point_on_contour, thresh):
# out of boundary
if not (
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
< point[0]
< IMAGE_SIZE - GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
) or not (
GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
< point[1]
< IMAGE_SIZE - GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
):
qualify = False
else:
# compute rotation angle
down = (0, 1)
current = (
point_on_contour[0] - point[0],
point_on_contour[1] - point[1],
)
dot = (
down[0] * current[0] + down[1] * current[1]
) # dot product between [x1, y1] and [x2, y2]
det = down[0] * current[1] - down[1] * current[0] # determinant
angle = math.atan2(det, dot) # atan2(y, x) or atan2(sin, cos)
angle = math.degrees(angle)
crop = thresh[
point[1]
- GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL : point[1]
+ GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
+ 1,
point[0]
- GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL : point[0]
+ GRIPPER_PUSH_RADIUS_SAFE_PAD_PIXEL
+ 1,
]
# test the rotated crop part
crop = utils.rotate(crop, angle, is_mask=True)
(h, w) = crop.shape
crop_cy, crop_cx = (h // 2, w // 2)
crop = crop[
crop_cy
- math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)
- 1 : crop_cy
+ math.ceil(GRIPPER_GRASP_WIDTH_PIXEL / 2)
+ 2,
crop_cx - GRIPPER_PUSH_RADIUS_PIXEL - 1 : crop_cx + GRIPPER_PUSH_RADIUS_PIXEL + 2,
]
qualify = np.sum(crop > 0) == 0
return qualify
def global_adjust(self, point, point_on_contour, thresh):
for dis in [0.01, 0.02]:
dis = dis / PIXEL_SIZE
diff_x = point_on_contour[0] - point[0]
diff_y = point_on_contour[1] - point[1]
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
test_point = (round(point[0] - diff_x * dis), round(point[1] - diff_y * dis))
qualify = self.check_valid(test_point, point_on_contour, thresh)
if qualify:
return qualify, test_point
return False, None
def sample_actions(
self, object_states, color_image=None, mask_image=None, env=None, plot=False, masks=None
):
"""
Sample actions around the objects, from the boundary to the center.
Assume there is no object in "black"
Output the rotated image, such that the push action is from left to right
"""
if env is None:
env = self.env
# Retrieve information
if color_image is None:
env.restore_objects(object_states)
color_image, _, mask_image = utils.get_true_heightmap(env)
# Process mask into binary format
if masks is None:
masks = []
for i in env.obj_ids["rigid"]:
mask = np.where(mask_image == i, 255, 0).astype(np.uint8)
masks.append(mask)
if len(masks) == 0:
return None
gray = cv2.cvtColor(color_image, cv2.COLOR_RGB2GRAY)
gray = gray.astype(np.uint8)
if plot:
plot_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
blurred = cv2.medianBlur(gray, 5)
thresh = cv2.threshold(blurred, 20, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
cnts = imutils.grab_contours(cnts)
# find the contour of a single object
points_on_contour = []
points = []
four_idx = []
other_idx = []
for oi in range(len(masks)):
obj_cnt = cv2.findContours(masks[oi], cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
obj_cnt = imutils.grab_contours(obj_cnt)
obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))
if len(obj_cnt) == 0:
continue
else:
obj_cnt = obj_cnt[-1]
# if too small, then, we skip
if cv2.contourArea(obj_cnt) < 10:
continue
# get center
M = cv2.moments(obj_cnt)
cX = round(M["m10"] / M["m00"])
cY = round(M["m01"] / M["m00"])
if plot:
cv2.circle(plot_image, (cX, cY), 3, (255, 255, 255), -1)
# get pca angle
angle = get_orientation(obj_cnt)
# get contour points
# skip_num = len(obj_cnt) // 12 # 12 possible pushes for an object
# skip_count = 0
diff_angle_limit = 0.75 # around 45 degrees
# target_diff_angles = np.array([0, np.pi, np.pi / 2, 3 * np.pi / 2])
target_diff_angles = []
# add four directions to center of object
four_poses = [
(cX + math.cos(angle) * 2, cY + math.sin(angle) * 2),
(cX + math.cos(angle + np.pi / 2) * 2, cY + math.sin(angle + np.pi / 2) * 2),
(cX + math.cos(angle - np.pi / 2) * 2, cY + math.sin(angle - np.pi / 2) * 2),
(cX - math.cos(angle) * 2, cY - math.sin(angle) * 2),
]
for pose in four_poses:
x = pose[0]
y = pose[1]
diff_x = cX - x
diff_y = cY - y
test_angle = math.atan2(diff_y, diff_x)
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
should_append = self.check_valid(point, point_on_contour, thresh)
if not should_append:
should_append, point = self.global_adjust(point, point_on_contour, thresh)
if should_append:
points_on_contour.append(point_on_contour)
points.append(point)
four_idx.append(len(points) - 1)
target_diff_angles.append(test_angle)
for pi, p in enumerate(obj_cnt):
x = p[0][0]
y = p[0][1]
if x == cX or y == cY:
continue
diff_x = cX - x
diff_y = cY - y
test_angle = math.atan2(diff_y, diff_x)
# avoid similar directions to center of object
if len(target_diff_angles) > 0:
test_target_diff_angles = np.abs(np.array(target_diff_angles) - test_angle)
should_append = (
np.min(test_target_diff_angles) > diff_angle_limit
and np.max(test_target_diff_angles) < math.pi * 2 - diff_angle_limit
)
else:
should_append = True
if should_append:
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_on_contour = (round(x), round(y))
diff_mul = adjust_push_start_point((cX, cY), point_on_contour, obj_cnt)
point = (round(x - diff_x * diff_mul), round(y - diff_y * diff_mul))
should_append = self.check_valid(point, point_on_contour, thresh)
if not should_append:
should_append, point = self.global_adjust(point, point_on_contour, thresh)
if should_append:
points_on_contour.append(point_on_contour)
points.append(point)
other_idx.append(len(points) - 1)
target_diff_angles.append(test_angle)
# random actions, adding priority points at the end
random.shuffle(four_idx)
random.shuffle(other_idx)
new_points = []
new_points_on_contour = []
for idx in other_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
for idx in four_idx:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
points = new_points
points_on_contour = new_points_on_contour
idx_list = list(range(len(points)))
random.shuffle(idx_list)
new_points = []
new_points_on_contour = []
for idx in idx_list:
new_points.append(points[idx])
new_points_on_contour.append(points_on_contour[idx])
points = new_points
points_on_contour = new_points_on_contour
if plot:
# loop over the contours
for c in cnts:
cv2.drawContours(plot_image, [c], -1, (133, 137, 140), 2)
actions = []
for pi in range(len(points)):
if plot:
diff_x = points_on_contour[pi][0] - points[pi][0]
diff_y = points_on_contour[pi][1] - points[pi][1]
diff_norm = math.sqrt(diff_x ** 2 + diff_y ** 2)
diff_x /= diff_norm
diff_y /= diff_norm
point_to = (
int(points[pi][0] + diff_x * PUSH_DISTANCE / PIXEL_SIZE / 2),
int(points[pi][1] + diff_y * PUSH_DISTANCE / PIXEL_SIZE / 2),
)
if pi < len(other_idx):
cv2.arrowedLine(
plot_image, points[pi], point_to, (0, 0, 255), 2, tipLength=0.2,
)
else:
cv2.arrowedLine(
plot_image, points[pi], point_to, (255, 0, 0), 2, tipLength=0.2,
)
push_start = (points[pi][1], points[pi][0])
push_vector = np.array(
[
points_on_contour[pi][1] - points[pi][1],
points_on_contour[pi][0] - points[pi][0],
]
)
unit_push = push_vector / np.linalg.norm(push_vector)
push_end = (
round(push_start[0] + unit_push[0] * PUSH_DISTANCE / PIXEL_SIZE),
round(push_start[1] + unit_push[1] * PUSH_DISTANCE / PIXEL_SIZE),
)
actions.append([push_start, push_end])
if plot:
cv2.imwrite("test.png", plot_image)
return actions
def simulate(self, push_start, push_end, restore_states=None):
if restore_states is not None:
self.env.restore_objects(restore_states)
push_start = [
push_start[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_start[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
push_end = [
push_end[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_end[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
success = self.env.push(push_start, push_end, verbose=False)
if not success:
return None
self.env.wait_static()
object_states = self.env.save_objects()
# Check if all objects are still in workspace
for obj in object_states:
pos = obj[0]
if (
pos[0] < WORKSPACE_LIMITS[0][0]
or pos[0] > WORKSPACE_LIMITS[0][1]
or pos[1] < WORKSPACE_LIMITS[1][0]
or pos[1] > WORKSPACE_LIMITS[1][1]
):
return None
color_image, depth_image, mask_image = utils.get_true_heightmap(self.env)
return color_image, depth_image, mask_image, object_states
@torch.no_grad()
def get_grasp_q(self, color_heightmap, depth_heightmap, post_checking=False, is_real=False):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(
3, 2, 0, 1
)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(
3, 2, 0, 1
)
# Pass input data through model
output_prob = self.grasp_model(input_color_data, input_depth_data, True, -1, False)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
grasp_predictions = (
output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, :, :,]
)
else:
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1].cpu().data.numpy()[:, 0, :, :,],
),
axis=0,
)
# post process, only grasp one object, focus on blue object
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
mask_bg = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask_bg_pad = np.pad(mask_bg, IMAGE_PAD_WIDTH, "constant", constant_values=255)
# focus on blue
for rotate_idx in range(len(grasp_predictions)):
grasp_predictions[rotate_idx][mask_pad != 255] = 0
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = grasp_predictions[0].shape[0] - IMAGE_PAD_WIDTH
# only grasp one object
kernel_big = np.ones(
(GRIPPER_GRASP_SAFE_WIDTH_PIXEL, GRIPPER_GRASP_INNER_DISTANCE_PIXEL), dtype=np.uint8
)
if (
is_real
): # due to color, depth sensor and lighting, the size of object looks a bit smaller.
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 5
threshold_small = (
GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
)
else:
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
threshold_small = (
GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 20
)
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1])
for rotate_idx in range(len(grasp_predictions)):
color_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
color_mask[color_mask == 0] = 1
color_mask[color_mask == 255] = 0
no_target_mask = color_mask
bg_mask = utils.rotate(mask_bg_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
no_target_mask[bg_mask == 255] = 0
# only grasp one object
invalid_mask = cv2.filter2D(no_target_mask, -1, kernel_big)
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
grasp_predictions[rotate_idx][invalid_mask > threshold_small] = (
grasp_predictions[rotate_idx][invalid_mask > threshold_small] / 2
)
grasp_predictions[rotate_idx][invalid_mask > threshold_big] = 0
# collision checking, only work for one level
if post_checking:
mask = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask = 255 - mask
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
check_kernel = np.ones(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL), dtype=np.uint8
)
left_bound = math.floor(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
right_bound = (
math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL + GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
+ 1
)
check_kernel[:, left_bound:right_bound] = 0
for rotate_idx in range(len(grasp_predictions)):
object_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = cv2.filter2D(object_mask, -1, check_kernel)
invalid_mask[invalid_mask > 5] = 255
invalid_mask = utils.rotate(
invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True
)
grasp_predictions[rotate_idx][invalid_mask > 128] = 0
grasp_predictions = grasp_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
grasp_q_value = grasp_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, grasp_predictions
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind, is_push=False):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis,
(int(best_pix_ind[2]), int(best_pix_ind[1])),
7,
(0, 0, 255),
2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
if rotate_idx == best_pix_ind[0]:
center = np.array([[[int(best_pix_ind[2]), int(best_pix_ind[1])]]])
M = cv2.getRotationMatrix2D(
(prediction_vis.shape[1] // 2, prediction_vis.shape[0] // 2,),
rotate_idx * (360.0 / num_rotations),
1,
)
center = cv2.transform(center, M)
center = np.transpose(center[0])
if is_push:
point_from = (int(center[0]), int(center[1]))
point_to = (int(center[0] + PUSH_DISTANCE_PIXEL), int(center[1]))
prediction_vis = cv2.arrowedLine(
prediction_vis, point_from, point_to, (100, 255, 0), 2, tipLength=0.2,
)
else:
prediction_vis = cv2.rectangle(
prediction_vis,
(
max(0, int(center[0]) - GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2),
max(0, int(center[1]) - GRIPPER_GRASP_WIDTH_PIXEL // 2),
),
(
min(
prediction_vis.shape[1],
int(center[0]) + GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2,
),
min(
prediction_vis.shape[0],
int(center[1]) + GRIPPER_GRASP_WIDTH_PIXEL // 2,
),
),
(100, 255, 0),
1,
)
prediction_vis = cv2.rectangle(
prediction_vis,
(
max(0, int(center[0]) - GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2),
max(0, int(center[1]) - GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2),
),
(
min(
prediction_vis.shape[1],
int(center[0]) + GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
min(
prediction_vis.shape[0],
int(center[1]) + GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
),
),
(100, 100, 155),
1,
)
background_image = utils.rotate(
color_heightmap, rotate_idx * (360.0 / num_rotations)
)
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
@torch.no_grad()
def _sampled_prediction_precise(env, model, actions, mask_image):
model.pre_train = True
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
dataset = LifelongEvalDataset(env, actions, mask_image)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=len(actions), shuffle=False, num_workers=0, drop_last=False
)
rot_angle, input_data = next(iter(data_loader))
input_data = input_data.to(device)
# get output
output = model(input_data)
output = output.cpu().numpy()
rot_angle = rot_angle.numpy()
out_q = []
for idx, out in enumerate(output):
out = utils.rotate(out[0], -rot_angle[idx])
action = actions[idx]
q = np.max(
out[
action[0][0] + IMAGE_PAD_WIDTH - 3 : action[0][0] + IMAGE_PAD_WIDTH + 4,
action[0][1] + IMAGE_PAD_WIDTH - 3 : action[0][1] + IMAGE_PAD_WIDTH + 4,
]
)
out_q.append(q)
return out_q
@torch.no_grad()
def from_maskrcnn(model, color_image, device, plot=False):
"""
Use Mask R-CNN to do instance segmentation and output masks in binary format.
Assume it works in real world
"""
image = color_image.copy()
image = TF.to_tensor(image)
prediction = model([image.to(device)])[0]
final_mask = np.zeros((720, 1280), dtype=np.uint8)
labels = {}
if plot:
pred_mask = np.zeros((720, 1280), dtype=np.uint8)
for idx, mask in enumerate(prediction["masks"]):
# TODO, 0.9 can be tuned
threshold = 0.7
if prediction["scores"][idx] > threshold:
# get mask
img = mask[0].mul(255).byte().cpu().numpy()
# img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
# too small
if np.sum(img == 255) < 100:
continue
# overlap IoU 70%
if np.sum(np.logical_and(final_mask > 0, img == 255)) > np.sum(img == 255) * 3 / 4:
continue
fill_pixels = np.logical_and(final_mask == 0, img == 255)
final_mask[fill_pixels] = idx + 1
labels[(idx + 1)] = prediction["labels"][idx].cpu().item()
if plot:
pred_mask[img > 0] = prediction["labels"][idx].cpu().item() * 10
cv2.imwrite(str(idx) + "mask.png", img)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % (len(np.unique(final_mask)) - 1), prediction["scores"].cpu())
return final_mask, labels
| 42,442 | 42.48668 | 107 |
py
|
more
|
more-main/train_foreground.py
|
import torch
from models import reinforcement_net
from dataset import ForegroundDataset
import argparse
import time
import datetime
import os
from constants import PUSH_Q, GRASP_Q, NUM_ROTATION
from torch.utils.tensorboard import SummaryWriter
import log_utils
import torch_utils
def parse_args():
default_params = {
"lr": 1e-6,
"batch_size": 16,
"t_0": 5, # CosineAnnealing, start 1 6 16 36 76
"t_mult": 2, # CosineAnnealing, period 5 10 20 40
"eta_min": 1e-15, # CosineAnnealing, minimum lr
"epochs": 36, # CosineAnnealing, should end before warm start
"loss_beta": 1,
"num_rotation": NUM_ROTATION,
}
parser = argparse.ArgumentParser(description="Train foreground")
parser.add_argument(
"--lr",
action="store",
type=float,
default=default_params["lr"],
help="Enter the learning rate",
)
parser.add_argument(
"--batch_size",
action="store",
default=default_params["batch_size"],
type=int,
help="Enter the batchsize for training and testing",
)
parser.add_argument(
"--t_0",
action="store",
default=default_params["t_0"],
type=int,
help="The t_0 of CosineAnnealing",
)
parser.add_argument(
"--t_mult",
action="store",
default=default_params["t_mult"],
type=int,
help="The t_mult of CosineAnnealing",
)
parser.add_argument(
"--eta_min",
action="store",
default=default_params["eta_min"],
type=float,
help="The eta_min of CosineAnnealing",
)
parser.add_argument(
"--epochs",
action="store",
default=default_params["epochs"],
type=int,
help="Enter the epoch for training",
)
parser.add_argument(
"--loss_beta",
action="store",
default=default_params["loss_beta"],
type=int,
help="The beta of SmoothL1Loss",
)
parser.add_argument(
"--num_rotation",
action="store",
default=default_params["num_rotation"],
type=int,
help="Number of rotation",
)
parser.add_argument("--dataset_root", action="store", help="Enter the path to the dataset")
parser.add_argument(
"--pretrained_model", action="store", help="The path to the pretrained model"
)
parser.add_argument(
"--test", action="store_true", default=False, help="Testing and visualizing"
)
args = parser.parse_args()
return args
class ForegroundTrainer:
def __init__(self, args):
self.params = {
"lr": args.lr,
"batch_size": args.batch_size,
"t_0": args.t_0, # CosineAnnealing, start 0 4 12 28
"t_mult": args.t_mult, # CosineAnnealing, period 4 8 16
"eta_min": args.eta_min, # CosineAnnealing, minimum lr
"epochs": args.epochs, # CosineAnnealing, should end before warm start
"loss_beta": args.loss_beta,
"num_rotation": args.num_rotation,
}
self.dataset_root = args.dataset_root
self.pretrained_model = args.pretrained_model
self.test = args.test
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
if not self.test:
self.log_dir = os.path.join(self.dataset_root, "runs")
if not os.path.exists(self.log_dir):
os.makedirs(self.log_dir)
timestamp_value = datetime.datetime.fromtimestamp(time.time())
time_name = timestamp_value.strftime("%Y-%m-%d-%H-%M")
self.log_dir = os.path.join(self.log_dir, time_name)
self.tb_logger = SummaryWriter(self.log_dir)
self.logger = log_utils.setup_logger(self.log_dir, "Foreground")
def main(self):
model = reinforcement_net(True)
model = model.to(self.device)
criterion_push = torch.nn.SmoothL1Loss(beta=self.params["loss_beta"], reduction="none")
criterion_grasp = torch.nn.SmoothL1Loss(beta=self.params["loss_beta"], reduction="none")
optimizer = torch.optim.SGD(
[p for p in model.parameters() if p.requires_grad],
lr=args.lr,
momentum=0.9,
weight_decay=2e-5,
)
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(
optimizer,
T_0=self.params["t_0"],
T_mult=self.params["t_mult"],
eta_min=self.params["eta_min"],
last_epoch=-1,
verbose=False,
)
start_epoch = 0
if self.pretrained_model is not None:
checkpoint = torch.load(self.pretrained_model)
model.load_state_dict(checkpoint["model"], strict=False)
optimizer.load_state_dict(checkpoint["optimizer"])
lr_scheduler.load_state_dict(checkpoint["lr_scheduler"])
start_epoch = checkpoint["epoch"] + 1
# prev_params = checkpoint["params"]
if self.test:
data_loader = self._get_data_loader("test", 1, shuffle=False, test=True)
criterion = torch.nn.SmoothL1Loss(reduction="none")
self._test(model, data_loader)
else:
self.logger.info(f"Hyperparameters: {self.params}")
if self.pretrained_model is not None:
self.logger.info(f"Start from the pretrained model: {self.pretrained_model}")
# self.logger.info(f"Previous Hyperparameters: {prev_params}")
data_loader_train = self._get_data_loader(
"train", self.params["batch_size"], shuffle=True
)
data_loader_test = self._get_data_loader("test", max(1, self.params["batch_size"] // 2))
for epoch in range(start_epoch, self.params["epochs"]):
# warmup start
if epoch == 0:
warmup_factor = 0.001
warmup_iters = min(1000, len(data_loader_train) - 1)
current_lr_scheduler = torch_utils.warmup_lr_scheduler(
optimizer, warmup_iters, warmup_factor
)
else:
current_lr_scheduler = lr_scheduler
train_loss = self._train_one_epoch(
model,
criterion_push,
criterion_grasp,
optimizer,
data_loader_train,
current_lr_scheduler,
epoch,
)
evaluate_loss = self._evaluate(
model, criterion_push, criterion_grasp, data_loader_test
)
save_state = {
"model": model.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
"params": self.params,
}
torch.save(save_state, os.path.join(self.log_dir, f"foreground_model-{epoch}.pth"))
self.tb_logger.add_scalars(
"Epoch_Loss", {"train": train_loss, "test": evaluate_loss}, epoch
)
self.tb_logger.flush()
self.tb_logger.add_hparams(
self.params, {"hparam/train": train_loss, "hparam/test": evaluate_loss}
)
self.logger.info("Training completed!")
def _train_one_epoch(
self,
model,
criterion_push,
criterion_grasp,
optimizer,
data_loader,
lr_scheduler,
epoch,
print_freq=50,
):
model.train()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("lr", log_utils.SmoothedValue(window_size=1, fmt="{value:.12f}"))
metric_logger.add_meter("loss", log_utils.SmoothedValue())
metric_logger.add_meter("grasp_loss", log_utils.SmoothedValue())
metric_logger.add_meter("push_loss", log_utils.SmoothedValue())
header = "Epoch: [{}]".format(epoch)
losses = []
n_iter = 0
total_iters = len(data_loader)
for (color_images, depth_images, push_targets, grasp_targets) in metric_logger.log_every(
data_loader, print_freq, self.logger, header
):
color_images = color_images.to(self.device, non_blocking=True)
depth_images = depth_images.to(self.device, non_blocking=True)
push_targets = push_targets.to(self.device, non_blocking=True)
grasp_targets = grasp_targets.to(self.device, non_blocking=True)
output = model(color_images, depth_images, use_push=False)
weights_push = torch.ones(push_targets.shape)
weights_grasp = torch.ones(grasp_targets.shape)
weights_push[push_targets > 0] = 2
weights_grasp[grasp_targets > 0] = 2
loss_push = criterion_push(output[0], push_targets) * weights_push.cuda()
loss_push = loss_push.sum() / push_targets.size(0)
loss_grasp = criterion_grasp(output[1], grasp_targets) * weights_grasp.cuda()
loss_grasp = loss_grasp.sum() / grasp_targets.size(0)
optimizer.zero_grad()
if epoch != 0:
loss_push.backward()
loss_grasp.backward()
loss = loss_push + loss_grasp
optimizer.step()
# log
log_loss = loss.item()
log_loss_push = loss_push.item()
log_loss_grasp = loss_grasp.item()
log_lr = optimizer.param_groups[0]["lr"]
metric_logger.update(
loss=log_loss, lr=log_lr, grasp_loss=log_loss_grasp, push_loss=log_loss_push
)
self.tb_logger.add_scalar("Step/Loss/Train", log_loss, total_iters * epoch + n_iter)
self.tb_logger.add_scalar(
"Step/Loss/Train/Push", log_loss, total_iters * epoch + n_iter
)
self.tb_logger.add_scalar(
"Step/Loss/Train/Grasp", log_loss, total_iters * epoch + n_iter
)
self.tb_logger.add_scalar("Step/LR", log_lr, total_iters * epoch + n_iter)
losses.append(log_loss)
if epoch == 0:
lr_scheduler.step()
n_iter += 1
if epoch != 0:
lr_scheduler.step(epoch)
return sum(losses) / len(losses)
@torch.no_grad()
def _evaluate(self, model, criterion_push, criterion_grasp, data_loader, print_freq=10):
model.eval()
metric_logger = log_utils.MetricLogger(delimiter=" ")
metric_logger.add_meter("loss", log_utils.SmoothedValue(window_size=len(data_loader)))
metric_logger.add_meter("grasp_loss", log_utils.SmoothedValue())
metric_logger.add_meter("push_loss", log_utils.SmoothedValue())
losses = []
header = "Test:"
for (color_images, depth_images, push_targets, grasp_targets) in metric_logger.log_every(
data_loader, print_freq, self.logger, header
):
color_images = color_images.to(self.device, non_blocking=True)
depth_images = depth_images.to(self.device, non_blocking=True)
push_targets = push_targets.to(self.device, non_blocking=True)
grasp_targets = grasp_targets.to(self.device, non_blocking=True)
output = model(color_images, depth_images, use_push=False)
weights_push = torch.ones(push_targets.shape)
weights_grasp = torch.ones(grasp_targets.shape)
weights_push[push_targets > 0] = 2
weights_grasp[grasp_targets > 0] = 2
loss_push = criterion_push(output[0], push_targets) * weights_push.cuda()
loss_push = loss_push.sum() / push_targets.size(0)
loss_grasp = criterion_grasp(output[1], grasp_targets) * weights_grasp.cuda()
loss_grasp = loss_grasp.sum() / grasp_targets.size(0)
loss = loss_push + loss_grasp
log_loss = loss.item()
log_loss_push = loss_push.item()
log_loss_grasp = loss_grasp.item()
metric_logger.update(loss=log_loss, grasp_loss=log_loss_grasp, push_loss=log_loss_push)
losses.append(log_loss)
return sum(losses) / len(losses)
def _get_data_loader(self, folder, batch_size, shuffle=False, test=False):
"""Get data loader."""
path = os.path.join(self.dataset_root, folder)
dataset = ForegroundDataset(path, self.params["num_rotation"])
if not test:
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=4, drop_last=False
)
else:
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle, num_workers=0, drop_last=False
)
return data_loader
@torch.no_grad()
def _test(self, model, data_loader):
import torchvision
import matplotlib.pyplot as plt
from PIL import Image, ImageStat
torch.manual_seed(1)
model.eval()
ite = iter(data_loader)
for _ in range(5):
color_img_pil, depth_img_pil, push_target_img_pil, grasp_target_img_pil = next(ite)
color_img_pil_train = color_img_pil.to(self.device)
depth_img_pil_train = depth_img_pil.to(self.device)
outputs = model(color_img_pil_train, depth_img_pil_train)
push = outputs[0][0].cpu()
grasp = outputs[1][0].cpu()
push *= 1 / PUSH_Q
push[push > 1] = 1
push[push < 0] = 0
grasp *= 1 / GRASP_Q
grasp[grasp > 1] = 1
grasp[grasp < 0] = 0
new_push = push.clone()
new_grasp = grasp.clone()
new_push[new_push > 0.5] = 1
new_push[new_push <= 0.5] = 0
new_grasp[new_grasp > 0.5] = 1
new_grasp[new_grasp <= 0.5] = 0
to_pil = torchvision.transforms.ToPILImage()
img1 = to_pil(color_img_pil[0])
img2 = to_pil(depth_img_pil[0])
img3 = to_pil(push_target_img_pil[0])
img4 = to_pil(grasp_target_img_pil[0])
img5 = to_pil(push)
img6 = to_pil(grasp)
img7 = to_pil(new_push)
img8 = to_pil(new_grasp)
titles = [
"Color",
"Depth",
"Target_push",
"Target_grasp",
"predicted push",
"predicted grasp",
"binary predicted push",
"binary predicted grasp",
]
images = [img1, img2, img3, img4, img5, img6, img7, img8]
for i in range(len(images)):
plt.subplot(2, 4, i + 1), plt.imshow(images[i], "gray")
plt.title(titles[i])
plt.xticks([]), plt.yticks([])
plt.show()
if __name__ == "__main__":
args = parse_args()
trainer = ForegroundTrainer(args)
trainer.main()
# def get_data_loader(dataset_root, batch_size):
# # use our dataset and defined transformations
# dataset = ForegroundDataset(dataset_root, 16)
# # dataset_test = ForegroundDataset(dataset_root, 16)
# # split the dataset in train and test set
# indices = torch.randperm(len(dataset)).tolist()
# start_point = 5
# dataset = torch.utils.data.Subset(dataset, indices[start_point:])
# dataset_test = torch.utils.data.Subset(dataset, indices[:start_point])
# # define training and validation data loaders
# data_loader = torch.utils.data.DataLoader(
# dataset, batch_size=batch_size, shuffle=True, num_workers=4, drop_last=True
# )
# data_loader_test = torch.utils.data.DataLoader(
# dataset_test, batch_size=batch_size, shuffle=False, num_workers=1
# )
# return data_loader, data_loader_test
# def train_one_epoch(
# model,
# criterion_push,
# criterion_grasp,
# optimizer,
# data_loader,
# device,
# epoch,
# print_freq,
# resume=False,
# ):
# """
# https://github.com/pytorch/vision/blob/master/references/detection/engine.py
# """
# model.train()
# metric_logger = utils.MetricLogger(delimiter=" ")
# metric_logger.add_meter("lr", utils.SmoothedValue(window_size=1, fmt="{value:.8f}"))
# header = "Epoch: [{}]".format(epoch)
# lr_scheduler = None
# if epoch == 0 and not resume:
# warmup_factor = 1.0 / 1000
# warmup_iters = min(1000, len(data_loader) - 1)
# lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor)
# for color_images, depth_images, push_targets, grasp_targets in metric_logger.log_every(
# data_loader, print_freq, header
# ):
# color_images = color_images.to(device)
# depth_images = depth_images.to(device)
# push_targets = push_targets.to(device)
# grasp_targets = grasp_targets.to(device)
# optimizer.zero_grad()
# output_probs = model(color_images, depth_images)
# weights = torch.ones(grasp_targets.shape)
# # if it doesn't converge, just restart, expecting the loss to below 60. it should below 100 very soon
# weights[grasp_targets > 0] = 2
# loss1 = criterion_push(output_probs[0], push_targets)
# loss1 = loss1.sum() / push_targets.size(0)
# loss1.backward()
# loss2 = criterion_grasp(output_probs[1], grasp_targets) * weights.cuda()
# loss2 = loss2.sum() / grasp_targets.size(0)
# loss2.backward()
# losses = loss1 + loss2
# optimizer.step()
# if lr_scheduler is not None:
# lr_scheduler.step()
# metric_logger.update(loss=losses.cpu())
# metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# return metric_logger
# def main(args):
# data_loader, data_loader_test = get_data_loader(
# args.dataset_root, args.batch_size, args.fine_tuning_num
# )
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# model = reinforcement_net(True) # TODO: remove use_cuda in model, replace with device
# if args.resume:
# # model.load_state_dict(torch.load('data/pre_train/foreground_model.pth'))
# model.load_state_dict(torch.load(os.path.join(args.dataset_root, "foreground_model.pth")))
# criterion_push = torch.nn.SmoothL1Loss(reduction="none")
# criterion_grasp = torch.nn.SmoothL1Loss(reduction="none")
# # criterion_push = torch.nn.BCEWithLogitsLoss()
# # criterion_grasp = torch.nn.BCEWithLogitsLoss(pos_weight=torch.tensor(5))
# # construct an optimizer
# params = [p for p in model.parameters() if p.requires_grad]
# optimizer = torch.optim.SGD(params, lr=args.lr, momentum=0.9, weight_decay=2e-5)
# # optimizer = torch.optim.SGD(params, lr=1e-4, momentum=0.9, weight_decay=2e-5)
# # and a learning rate scheduler which decreases the learning rate by 10x every 1 epochs
# # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=3, gamma=0.5)
# # for large dataset
# lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=9, gamma=0.5)
# # for small dataset, expect ~ 50 epochs
# # lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=20, gamma=0.5)
# for epoch in range(args.epochs):
# # train for one epoch, printing every 10 iterations
# train_one_epoch(
# model,
# criterion_push,
# criterion_grasp,
# optimizer,
# data_loader,
# device,
# epoch,
# print_freq=20,
# resume=args.resume,
# )
# # update the learning rate
# lr_scheduler.step()
# # evaluate on the test dataset
# # evaluate(model, criterion, data_loader_test, device=device)
# torch.save(model.state_dict(), os.path.join(args.dataset_root, "foreground_model.pth"))
# @torch.no_grad()
# def test():
# import torchvision
# import matplotlib.pyplot as plt
# from PIL import Image, ImageStat
# torch.manual_seed(2)
# # data_loader, data_loader_test = get_data_loader('data/pre_train/', 1)
# data_loader, data_loader_test = get_data_loader("logs/real-maskrcnn/data", 1)
# # data_loader, data_loader_test = get_data_loader('logs/final-pretrain/data', 1)
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# model = reinforcement_net(True)
# # model.load_state_dict(torch.load('data/pre_train/foreground_model.pth'))
# model.load_state_dict(torch.load("logs/random-pretrain/data/foreground_model.pth"))
# # model.load_state_dict(torch.load('logs/real-maskrcnn/data/foreground_model.pth'))
# # model.load_state_dict(torch.load('logs_push/final/data/foreground_model.pth'))
# model.eval().to(device)
# sig = torch.nn.Sigmoid()
# ite = iter(data_loader)
# for _ in range(6):
# color_img_pil, depth_img_pil, push_target_img_pil, grasp_target_img_pil = next(ite)
# color_img_pil_train = color_img_pil.to(device)
# depth_img_pil_train = depth_img_pil.to(device)
# outputs = model(color_img_pil_train, depth_img_pil_train)
# # push = sig(outputs[0][0]).cpu()
# # grasp = sig(outputs[1][0]).cpu()
# push = outputs[0][0].cpu()
# grasp = outputs[1][0].cpu()
# push *= 1 / PUSH_Q
# push[push > 1] = 1
# push[push < 0] = 0
# grasp *= 1 / GRASP_Q
# grasp[grasp > 1] = 1
# grasp[grasp < 0] = 0
# new_push = push.clone()
# new_grasp = grasp.clone()
# new_push[new_push > 0.5] = 1
# new_push[new_push <= 0.5] = 0
# new_grasp[new_grasp > 0.5] = 1
# new_grasp[new_grasp <= 0.5] = 0
# to_pil = torchvision.transforms.ToPILImage()
# img1 = to_pil(color_img_pil[0])
# img2 = to_pil(depth_img_pil[0])
# img3 = to_pil(push_target_img_pil[0])
# img4 = to_pil(grasp_target_img_pil[0])
# img5 = to_pil(push)
# img6 = to_pil(grasp)
# img7 = to_pil(new_push)
# img8 = to_pil(new_grasp)
# titles = [
# "Color",
# "Depth",
# "Target_push",
# "Target_grasp",
# "predicted push",
# "predicted grasp",
# "binary predicted push",
# "binary predicted grasp",
# ]
# images = [img1, img2, img3, img4, img5, img6, img7, img8]
# for i in range(len(images)):
# plt.subplot(2, 4, i + 1), plt.imshow(images[i], "gray")
# plt.title(titles[i])
# plt.xticks([]), plt.yticks([])
# plt.show()
# # plt.savefig('test_pre.png')
# if __name__ == "__main__":
# parser = argparse.ArgumentParser(description="Train foreground")
# parser.add_argument(
# "--dataset_root", dest="dataset_root", action="store", help="Enter the path to the dataset"
# )
# parser.add_argument(
# "--epochs",
# dest="epochs",
# action="store",
# type=int,
# default=30,
# help="Enter the epoch for training",
# )
# parser.add_argument(
# "--batch_size",
# dest="batch_size",
# action="store",
# type=int,
# default=16,
# help="Enter the batchsize for training and testing",
# )
# parser.add_argument(
# "--test", dest="test", action="store_true", default=False, help="Testing and visualizing"
# )
# parser.add_argument(
# "--lr", dest="lr", action="store", type=float, default=1e-6, help="Enter the learning rate"
# )
# parser.add_argument(
# "--real_fine_tuning", dest="real_fine_tuning", action="store_true", default=False, help=""
# )
# parser.add_argument(
# "--fine_tuning_num",
# dest="fine_tuning_num",
# action="store",
# type=int,
# default=16500,
# help="1500 action, one action contains 11 images",
# )
# parser.add_argument(
# "--resume",
# dest="resume",
# action="store_true",
# default=False,
# help="Enter the path to the dataset",
# )
# args = parser.parse_args()
# if args.resume:
# args.epochs = 10
# else:
# args.fine_tuning_num = None
# if args.test:
# test()
# else:
# main(args)
| 24,462 | 35.241481 | 111 |
py
|
more
|
more-main/push_predictor.py
|
import copy
import torch
import gc
import numpy as np
import cv2
from torchvision.transforms import functional as TF
import math
from push_net import PushPredictionNet
from models import reinforcement_net
from train_maskrcnn import get_model_instance_segmentation
from dataset import PushPredictionMultiDatasetEvaluation
from constants import (
DEPTH_MIN,
TARGET_LOWER,
TARGET_UPPER,
IS_REAL,
IMAGE_SIZE,
IMAGE_PAD_WIDTH,
PUSH_DISTANCE,
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
NUM_ROTATION,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
GRIPPER_GRASP_WIDTH_PIXEL,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
IMAGE_PAD_WIDTH,
IMAGE_PAD_DIFF,
PIXEL_SIZE,
IMAGE_PAD_SIZE,
PUSH_BUFFER,
GRASP_Q_GRASP_THRESHOLD,
BG_THRESHOLD,
COLOR_SPACE
)
from action_utils_mask import sample_actions as sample_actions_util
import imutils
import utils
class PushPredictor:
"""
Predict and generate images after push actions.
Assume the color image and depth image are well matched.
We use the masks to generate new images, so the quality of mask is important.
The input to this forward function should be returned from the sample_actions.
"""
def __init__(self):
self.device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# Initialize Mask R-CNN
self.mask_model = get_model_instance_segmentation(2)
self.mask_model.load_state_dict(torch.load("logs_image/maskrcnn.pth"))
self.mask_model = self.mask_model.to(self.device)
self.mask_model.eval()
# Initialize Push Prediction
self.push_model = PushPredictionNet()
self.push_model.load_state_dict(
torch.load("logs_push/push_prediction_model-75.pth")["model"]
)
self.push_model = self.push_model.to(self.device)
self.push_model.eval()
# Initialize Grasp Q Evaluation
self.grasp_model = reinforcement_net()
self.grasp_model.load_state_dict(
torch.load("logs_grasp/snapshot-post-020000.reinforcement.pth")["model"]
)
self.grasp_model = self.grasp_model.to(self.device)
self.grasp_model.eval()
self.move_recorder = {}
self.prediction_recorder = {}
def reset(self):
del self.move_recorder
del self.prediction_recorder
gc.collect()
self.move_recorder = {}
self.prediction_recorder = {}
@torch.no_grad()
def from_maskrcnn(self, color_image, depth_image, plot=False):
"""
Use Mask R-CNN to do instance segmentation and output masks in binary format.
"""
image = color_image.copy()
image = TF.to_tensor(image)
prediction = self.mask_model([image.to(self.device)])[0]
mask_objs = []
centers = []
blue_idx = -1
if plot:
pred_mask = np.zeros((IMAGE_SIZE, IMAGE_SIZE), dtype=np.uint8)
for idx, mask in enumerate(prediction["masks"]):
# TODO, 0.9 can be tuned
if IS_REAL:
threshold = 0.97
else:
threshold = 0.98
if prediction["scores"][idx] > threshold:
# get mask
img = mask[0].mul(255).byte().cpu().numpy()
img = cv2.GaussianBlur(img, (3, 3), 0)
img = cv2.threshold(img, 128, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
if np.sum(img == 255) < 100:
continue
# get center
obj_cnt = cv2.findContours(img, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
obj_cnt = imutils.grab_contours(obj_cnt)
obj_cnt = sorted(obj_cnt, key=lambda x: cv2.contourArea(x))[
-1
] # the mask r cnn could give bad masks
M = cv2.moments(obj_cnt)
cX = round(M["m10"] / M["m00"])
cY = round(M["m01"] / M["m00"])
# get color and depth masks
color_mask = cv2.bitwise_and(color_image, color_image, mask=img)
temp = cv2.cvtColor(color_mask, cv2.COLOR_RGB2HSV)
temp = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
if np.sum(temp == 255) >= 100:
blue_idx = idx
depth_mask = cv2.bitwise_and(depth_image, depth_image, mask=img)
# get cropped masks
color_mask = np.pad(
color_mask,
(
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
(0, 0),
),
"constant",
constant_values=0,
)
color_mask = color_mask[
cY + IMAGE_PAD_WIDTH - 30 : cY + IMAGE_PAD_WIDTH + 30,
cX + IMAGE_PAD_WIDTH - 30 : cX + IMAGE_PAD_WIDTH + 30,
:,
]
depth_mask = np.pad(
depth_mask,
(
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
(IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH),
),
"constant",
constant_values=0,
)
depth_mask = depth_mask[
cY + IMAGE_PAD_WIDTH - 30 : cY + IMAGE_PAD_WIDTH + 30,
cX + IMAGE_PAD_WIDTH - 30 : cX + IMAGE_PAD_WIDTH + 30,
]
final_mask = (color_mask, depth_mask)
mask_objs.append(final_mask)
centers.append([cY + IMAGE_PAD_WIDTH, cX + IMAGE_PAD_WIDTH])
if plot:
pred_mask[img > 0] = 255 - idx * 20
cv2.imwrite(str(idx) + "mask.png", img)
if plot:
cv2.imwrite("pred.png", pred_mask)
print("Mask R-CNN: %d objects detected" % len(mask_objs), prediction["scores"].cpu())
if blue_idx != -1 and blue_idx != 0:
temp = mask_objs[0]
mask_objs[0] = mask_objs[blue_idx]
mask_objs[blue_idx] = temp
temp = centers[0]
centers[0] = centers[blue_idx]
centers[blue_idx] = temp
return mask_objs, centers
def sample_actions(
self, color_image, depth_image, mask_objs, plot=False, start_pose=None, prev_move=None
):
"""
Sample actions around the objects, from the boundary to the center.
Assume there is no object in "black"
Output the rotated image, such that the push action is from left to right
"""
return sample_actions_util(
color_image,
depth_image,
mask_objs,
plot,
start_pose,
from_color=True,
prev_move=prev_move,
)
# only rotated_color_image, rotated_depth_image are padding to 320x320
@torch.no_grad()
def predict(
self,
rotated_color_image,
rotated_depth_image,
rotated_action,
rotated_center,
rotated_angle,
rotated_binary_objs,
rotated_mask_objs,
plot=False,
):
# get data
dataset = PushPredictionMultiDatasetEvaluation(
rotated_depth_image, rotated_action, rotated_center, rotated_binary_objs
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=len(rotated_depth_image), shuffle=False, num_workers=0
)
(
prev_poses,
action,
action_start_ori,
action_end_ori,
used_binary_img,
binary_objs_total,
num_obj,
) = next(iter(data_loader))
prev_poses = prev_poses.to(self.device, non_blocking=True)
used_binary_img = used_binary_img.to(self.device, non_blocking=True, dtype=torch.float)
binary_objs_total = binary_objs_total.to(self.device, non_blocking=True)
action = action.to(self.device, non_blocking=True)
# get output
output = self.push_model(prev_poses, action, used_binary_img, binary_objs_total, num_obj[0])
output = output.cpu().numpy()
# generate new images
prev_poses_input = prev_poses.cpu().numpy().astype(int)
prev_poses = copy.deepcopy(prev_poses_input)
action_start_ori = action_start_ori.numpy().astype(int)
action_end_ori = action_end_ori.numpy().astype(int)
action_start_ori_tile = np.tile(action_start_ori, num_obj[0])
action_start = action[:, :2].cpu().numpy().astype(int)
action_start_tile = np.tile(action_start, num_obj[0])
generated_color_images = []
generated_depth_images = []
generated_obj_masks = []
validations = []
for i in range(len(rotated_depth_image)):
i_output = output[i]
i_prev_poses = prev_poses[i]
i_action_start_ori_tile = action_start_ori_tile[i]
i_action_start_tile = action_start_tile[i]
i_prev_poses += i_action_start_ori_tile
i_prev_poses -= i_action_start_tile
i_rotated_angle = rotated_angle[i]
i_rotated_mask_objs, i_rotated_mask_obj_centers = rotated_mask_objs[i]
color_image = rotated_color_image[i]
depth_image = rotated_depth_image[i]
# transform points and fill them into a black image
generated_color_image = np.zeros_like(color_image)
generated_depth_image = np.zeros_like(depth_image)
obj_masks = []
obj_mask_centers = []
temp_obj_masks = []
temp_obj_mask_centers = []
# for each object
valid = True
for pi in range(num_obj[i]):
# if the object is out of the boundary, then, we can skip this action
center = i_rotated_mask_obj_centers[pi]
center = np.array([[center]])
M = cv2.getRotationMatrix2D(
(
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
),
-i_output[pi * 3 + 2],
1,
)
ori_M = M.copy()
M[0, 2] += i_output[pi * 3]
M[1, 2] += i_output[pi * 3 + 1]
new_center = cv2.transform(center, M)
new_center = np.transpose(new_center[0])
temp_obj_mask_centers.append(new_center)
ori_center = cv2.transform(center, ori_M)
ori_center = np.transpose(ori_center[0])
M = cv2.getRotationMatrix2D(
(IMAGE_PAD_SIZE // 2, IMAGE_PAD_SIZE // 2),
i_rotated_angle,
1,
)
new_center = [new_center[0][0], new_center[1][0]]
new_center = np.array([[new_center]])
new_center = cv2.transform(new_center, M)[0][0]
obj_mask_centers.append(new_center)
ori_center = [ori_center[0][0], ori_center[1][0]]
ori_center = np.array([[ori_center]])
ori_center = cv2.transform(ori_center, M)[0][0]
if (
new_center[1] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or new_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
if not (
ori_center[1] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[1] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH
> IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
or ori_center[0] - IMAGE_PAD_WIDTH < PUSH_BUFFER / PIXEL_SIZE
):
valid = False
break
validations.append(valid)
if valid:
for pi in range(num_obj[i]):
# if the object is out of the boundary, then, we can skip this action
# if (
# i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1] < PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3]
# > IMAGE_SIZE - PUSH_BUFFER / PIXEL_SIZE
# or i_prev_poses[pi * 2] + i_output[pi * 3] < PUSH_BUFFER / PIXEL_SIZE
# ):
# valid = False
# break
# find out transformation
# mask
mask_color = i_rotated_mask_objs[pi][0]
mask_depth = i_rotated_mask_objs[pi][1]
rotated_color = utils.rotate(mask_color, i_output[pi * 3 + 2])
rotated_depth = utils.rotate(mask_depth, i_output[pi * 3 + 2])
temp_obj_masks.append((rotated_color, rotated_depth))
# center
# center = i_rotated_mask_obj_centers[pi]
# center = np.array([[center]])
# M = cv2.getRotationMatrix2D(
# (
# i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
# i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
# ),
# -i_output[pi * 3 + 2],
# 1,
# )
# M[0, 2] += i_output[pi * 3]
# M[1, 2] += i_output[pi * 3 + 1]
# new_center = cv2.transform(center, M)
# new_center = np.transpose(new_center[0])
# temp_obj_mask_centers.append(new_center)
# validations.append(valid)
# if valid:
for pi in range(num_obj[i]):
mask = temp_obj_masks[pi]
new_center = temp_obj_mask_centers[pi]
color = mask[0]
fill_color = np.nonzero(np.sum(color, axis=2))
fill_color_shift = (
np.clip(fill_color[0] + new_center[0] - 30, 0, IMAGE_PAD_SIZE - 1),
np.clip(fill_color[1] + new_center[1] - 30, 0, IMAGE_PAD_SIZE - 1)
)
generated_color_image[fill_color_shift] = color[fill_color]
depth = mask[1]
fill_depth = np.nonzero(depth)
fill_depth_shift = (
np.clip(fill_depth[0] + new_center[0] - 30, 0, IMAGE_PAD_SIZE - 1),
np.clip(fill_depth[1] + new_center[1] - 30, 0, IMAGE_PAD_SIZE - 1)
)
generated_depth_image[fill_depth_shift] = depth[fill_depth]
generated_obj_mask_color = utils.rotate(color, -i_rotated_angle)
generated_obj_mask_depth = utils.rotate(depth, -i_rotated_angle)
obj_masks.append((generated_obj_mask_color, generated_obj_mask_depth))
# M = cv2.getRotationMatrix2D(
# (IMAGE_PAD_SIZE // 2, IMAGE_PAD_SIZE // 2),
# i_rotated_angle,
# 1,
# )
# new_center = [new_center[0][0], new_center[1][0]]
# new_center = np.array([[new_center]])
# new_center = cv2.transform(new_center, M)[0][0]
# obj_mask_centers.append(new_center)
if plot:
cv2.circle(
generated_color_image,
(
i_prev_poses[pi * 2 + 1] + IMAGE_PAD_WIDTH,
i_prev_poses[pi * 2] + IMAGE_PAD_WIDTH,
),
3,
(255, 255, 255),
-1,
)
cv2.circle(
generated_color_image,
(
round(i_prev_poses[pi * 2 + 1] + i_output[pi * 3 + 1]) + IMAGE_PAD_WIDTH,
round(i_prev_poses[pi * 2] + i_output[pi * 3]) + IMAGE_PAD_WIDTH,
),
3,
(128, 255, 0),
-1,
)
if plot:
cv2.arrowedLine(
generated_color_image,
(
action_start_ori[i][1] + IMAGE_PAD_WIDTH,
action_start_ori[i][0] + IMAGE_PAD_WIDTH,
),
(
action_end_ori[i][1] + IMAGE_PAD_WIDTH,
action_end_ori[i][0] + IMAGE_PAD_WIDTH,
),
(255, 0, 255),
2,
tipLength=0.4,
)
generated_color_image = utils.rotate(generated_color_image, -i_rotated_angle)
generated_depth_image = utils.rotate(generated_depth_image, -i_rotated_angle)
generated_color_image = generated_color_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, :
]
generated_depth_image = generated_depth_image[
IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
]
generated_depth_image = generated_depth_image.astype(np.float32)
generated_color_images.append(generated_color_image)
generated_depth_images.append(generated_depth_image)
generated_obj_masks.append((obj_masks, obj_mask_centers))
return generated_color_images, generated_depth_images, generated_obj_masks, validations
@torch.no_grad()
def get_grasp_q(self, color_heightmap, depth_heightmap, post_checking=False):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# use light color
if IS_REAL:
temp = cv2.cvtColor(color_heightmap_pad, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
color_heightmap_pad[mask == 255] = [118, 183, 178] # cyan
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(
3, 2, 0, 1
)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(
3, 2, 0, 1
)
# Pass input data through model
output_prob = self.grasp_model(input_color_data, input_depth_data, True, -1, False)
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
grasp_predictions = (
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
:,
:,
]
)
else:
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
:,
:,
],
),
axis=0,
)
# post process, only grasp one object, focus on blue object
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
mask_bg = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask_bg_pad = np.pad(mask_bg, IMAGE_PAD_WIDTH, "constant", constant_values=255)
# focus on blue
for rotate_idx in range(len(grasp_predictions)):
grasp_predictions[rotate_idx][mask_pad != 255] = 0
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = grasp_predictions[0].shape[0] - IMAGE_PAD_WIDTH
# only grasp one object
kernel_big = np.ones(
(GRIPPER_GRASP_SAFE_WIDTH_PIXEL, GRIPPER_GRASP_INNER_DISTANCE_PIXEL), dtype=np.uint8
)
if IS_REAL: # due to color, depth sensor and lighting, the size of object looks a bit smaller.
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 7
threshold_small = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 15
else:
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
threshold_small = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 20
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1])
for rotate_idx in range(len(grasp_predictions)):
color_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
color_mask[color_mask == 0] = 1
color_mask[color_mask == 255] = 0
no_target_mask = color_mask
bg_mask = utils.rotate(mask_bg_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
no_target_mask[bg_mask == 255] = 0
# only grasp one object
invalid_mask = cv2.filter2D(no_target_mask, -1, kernel_big)
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
grasp_predictions[rotate_idx][invalid_mask > threshold_small] = (
grasp_predictions[rotate_idx][invalid_mask > threshold_small] / 2
)
grasp_predictions[rotate_idx][invalid_mask > threshold_big] = 0
# collision checking, only work for one level
if post_checking:
mask = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask = 255 - mask
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
check_kernel = np.ones(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL), dtype=np.uint8
)
left_bound = math.floor(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
right_bound = (
math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL + GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
+ 1
)
check_kernel[:, left_bound:right_bound] = 0
for rotate_idx in range(len(grasp_predictions)):
object_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = cv2.filter2D(object_mask, -1, check_kernel)
invalid_mask[invalid_mask > 5] = 255
invalid_mask = utils.rotate(
invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True
)
grasp_predictions[rotate_idx][invalid_mask > 128] = 0
grasp_predictions = grasp_predictions[
:, padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
best_pix_ind = np.unravel_index(np.argmax(grasp_predictions), grasp_predictions.shape)
grasp_q_value = grasp_predictions[best_pix_ind]
return grasp_q_value, best_pix_ind, grasp_predictions
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis,
(int(best_pix_ind[2]), int(best_pix_ind[1])),
7,
(0, 0, 255),
2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
if rotate_idx == best_pix_ind[0]:
center = np.array([[[int(best_pix_ind[2]), int(best_pix_ind[1])]]])
M = cv2.getRotationMatrix2D(
(
prediction_vis.shape[1] // 2,
prediction_vis.shape[0] // 2,
),
rotate_idx * (360.0 / num_rotations),
1,
)
center = cv2.transform(center, M)
center = np.transpose(center[0])
prediction_vis = cv2.rectangle(
prediction_vis,
(max(0, int(center[0]) - GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2), max(0, int(center[1]) - GRIPPER_GRASP_WIDTH_PIXEL // 2)),
(min(prediction_vis.shape[1], int(center[0]) + GRIPPER_GRASP_INNER_DISTANCE_PIXEL // 2), min(prediction_vis.shape[0], int(center[1]) + GRIPPER_GRASP_WIDTH_PIXEL // 2)),
(100, 255, 0),
1
)
prediction_vis = cv2.rectangle(
prediction_vis,
(max(0, int(center[0]) - GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2), max(0, int(center[1]) - GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2)),
(min(prediction_vis.shape[1], int(center[0]) + GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2), min(prediction_vis.shape[0], int(center[1]) + GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2)),
(100, 100, 155),
1,
)
background_image = utils.rotate(
color_heightmap, rotate_idx * (360.0 / num_rotations)
)
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
| 29,449 | 43.961832 | 198 |
py
|
more
|
more-main/trainer.py
|
import os
import numpy as np
import math
import cv2
import torch
from torch.autograd import Variable
from models import reinforcement_net
from scipy import ndimage
from constants import (
COLOR_MEAN,
COLOR_STD,
DEPTH_MEAN,
DEPTH_STD,
DEPTH_MIN,
IMAGE_PAD_WIDTH,
NUM_ROTATION,
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
GRIPPER_GRASP_INNER_DISTANCE_PIXEL,
TARGET_LOWER,
TARGET_UPPER,
BG_THRESHOLD,
GRIPPER_GRASP_WIDTH_PIXEL,
IMAGE_PAD_SIZE,
IMAGE_PAD_DIFF,
IMAGE_SIZE,
)
import utils
class Trainer(object):
def __init__(
self,
method,
push_rewards,
future_reward_discount,
is_testing,
load_snapshot,
snapshot_file,
force_cpu,
is_baseline=False,
):
self.method = method
self.is_baseline = is_baseline
# Check if CUDA can be used
if torch.cuda.is_available() and not force_cpu:
print("CUDA detected. Running with GPU acceleration.")
self.use_cuda = True
elif force_cpu:
print("CUDA detected, but overriding with option '--cpu'. Running with only CPU.")
self.use_cuda = False
else:
print("CUDA is *NOT* detected. Running with only CPU.")
self.use_cuda = False
# Fully convolutional classification network for supervised learning
if self.method == "reinforcement":
self.model = reinforcement_net()
self.push_rewards = push_rewards
self.future_reward_discount = future_reward_discount
# Initialize Huber loss
self.push_criterion = torch.nn.SmoothL1Loss(reduction="none") # Huber loss
self.grasp_criterion = torch.nn.SmoothL1Loss(reduction="none") # Huber loss
if self.use_cuda:
self.push_criterion = self.push_criterion.cuda()
self.grasp_criterion = self.grasp_criterion.cuda()
# Load pre-trained model
if load_snapshot:
states = torch.load(snapshot_file)
if "model" in states:
self.model.load_state_dict(states["model"])
else:
self.model.load_state_dict(states)
print("Pre-trained model snapshot loaded from: %s" % (snapshot_file))
# Convert model from CPU to GPU
if self.use_cuda:
self.model = self.model.cuda()
# Set model to training mode
self.model.train()
# Initialize optimizer
self.iteration = 0
if is_testing:
if is_baseline:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5
)
else:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5
)
else:
if is_baseline:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=1e-5, momentum=0.9, weight_decay=2e-5
)
else:
self.optimizer = torch.optim.SGD(
self.model.parameters(), lr=5e-5, momentum=0.9, weight_decay=2e-5
)
if is_baseline:
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=5000, gamma=0.5
)
else:
self.lr_scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=2000, gamma=0.5
)
# Initialize lists to save execution info and RL variables
self.executed_action_log = []
self.label_value_log = []
self.reward_value_log = []
self.predicted_value_log = []
self.use_heuristic_log = []
self.is_exploit_log = []
self.clearance_log = []
self.loss_log = []
if is_testing:
self.batch_size = 2
else:
self.batch_size = 12
self.loss_list = []
# Pre-load execution info and RL variables
def preload(self, transitions_directory):
self.executed_action_log = np.loadtxt(
os.path.join(transitions_directory, "executed-action.log.txt"), delimiter=" "
)
self.iteration = self.executed_action_log.shape[0] - 2
self.executed_action_log = self.executed_action_log[0 : self.iteration, :]
self.executed_action_log = self.executed_action_log.tolist()
self.label_value_log = np.loadtxt(
os.path.join(transitions_directory, "label-value.log.txt"), delimiter=" "
)
self.label_value_log = self.label_value_log[0 : self.iteration]
self.label_value_log.shape = (self.iteration, 1)
self.label_value_log = self.label_value_log.tolist()
self.predicted_value_log = np.loadtxt(
os.path.join(transitions_directory, "predicted-value.log.txt"), delimiter=" "
)
self.predicted_value_log = self.predicted_value_log[0 : self.iteration]
self.predicted_value_log.shape = (self.iteration, 1)
self.predicted_value_log = self.predicted_value_log.tolist()
self.reward_value_log = np.loadtxt(
os.path.join(transitions_directory, "reward-value.log.txt"), delimiter=" "
)
self.reward_value_log = self.reward_value_log[0 : self.iteration]
self.reward_value_log.shape = (self.iteration, 1)
self.reward_value_log = self.reward_value_log.tolist()
self.use_heuristic_log = np.loadtxt(
os.path.join(transitions_directory, "use-heuristic.log.txt"), delimiter=" "
)
self.use_heuristic_log = self.use_heuristic_log[0 : self.iteration]
self.use_heuristic_log.shape = (self.iteration, 1)
self.use_heuristic_log = self.use_heuristic_log.tolist()
self.is_exploit_log = np.loadtxt(
os.path.join(transitions_directory, "is-exploit.log.txt"), delimiter=" "
)
self.is_exploit_log = self.is_exploit_log[0 : self.iteration]
self.is_exploit_log.shape = (self.iteration, 1)
self.is_exploit_log = self.is_exploit_log.tolist()
self.clearance_log = np.loadtxt(
os.path.join(transitions_directory, "clearance.log.txt"), delimiter=" "
)
self.clearance_log.shape = (self.clearance_log.shape[0], 1)
self.clearance_log = self.clearance_log.tolist()
# Compute forward pass through model to compute affordances/Q
def forward(
self,
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=-1,
use_push=True,
):
color_heightmap_pad = np.copy(color_heightmap)
depth_heightmap_pad = np.copy(depth_heightmap)
# Add extra padding (to handle rotations inside network)
color_heightmap_pad = np.pad(
color_heightmap_pad,
((IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (IMAGE_PAD_WIDTH, IMAGE_PAD_WIDTH), (0, 0)),
"constant",
constant_values=0,
)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
# Pre-process color image (scale and normalize)
image_mean = COLOR_MEAN
image_std = COLOR_STD
input_color_image = color_heightmap_pad.astype(float) / 255
for c in range(3):
input_color_image[:, :, c] = (input_color_image[:, :, c] - image_mean[c]) / image_std[c]
# Pre-process depth image (normalize)
image_mean = DEPTH_MEAN
image_std = DEPTH_STD
depth_heightmap_pad.shape = (depth_heightmap_pad.shape[0], depth_heightmap_pad.shape[1], 1)
input_depth_image = np.copy(depth_heightmap_pad)
input_depth_image[:, :, 0] = (input_depth_image[:, :, 0] - image_mean[0]) / image_std[0]
# Construct minibatch of size 1 (b,c,h,w)
input_color_image.shape = (
input_color_image.shape[0],
input_color_image.shape[1],
input_color_image.shape[2],
1,
)
input_depth_image.shape = (
input_depth_image.shape[0],
input_depth_image.shape[1],
input_depth_image.shape[2],
1,
)
input_color_data = torch.from_numpy(input_color_image.astype(np.float32)).permute(
3, 2, 0, 1
)
input_depth_data = torch.from_numpy(input_depth_image.astype(np.float32)).permute(
3, 2, 0, 1
)
# Pass input data through model
output_prob = self.model(
input_color_data, input_depth_data, is_volatile, specific_rotation, use_push
)
if self.method == "reinforcement":
# Return Q values (and remove extra padding)
for rotate_idx in range(len(output_prob)):
if rotate_idx == 0:
if not use_push:
push_predictions = 0
grasp_predictions = (
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
]
)
else:
push_predictions = (
output_prob[rotate_idx][0]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
]
)
grasp_predictions = (
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
]
)
else:
if not use_push:
push_predictions = 0
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
],
),
axis=0,
)
else:
push_predictions = np.concatenate(
(
push_predictions,
output_prob[rotate_idx][0]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
],
),
axis=0,
)
grasp_predictions = np.concatenate(
(
grasp_predictions,
output_prob[rotate_idx][1]
.cpu()
.data.numpy()[
:,
0,
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[0] - IMAGE_PAD_WIDTH
),
int(IMAGE_PAD_WIDTH) : int(
color_heightmap_pad.shape[1] - IMAGE_PAD_WIDTH
),
],
),
axis=0,
)
return push_predictions, grasp_predictions
def focus_on_target(
self, color_heightmap, depth_heightmap, grasp_predictions, target_lower, target_upper
):
"""Should match push_predictor"""
# focus on target object
temp = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
mask_bg = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask_bg_pad = np.pad(mask_bg, IMAGE_PAD_WIDTH, "constant", constant_values=255)
# focus on blue
for rotate_idx in range(len(grasp_predictions)):
grasp_predictions[rotate_idx][mask != 255] = 0
padding_width_start = IMAGE_PAD_WIDTH
padding_width_end = grasp_predictions[0].shape[0] + IMAGE_PAD_WIDTH
# only grasp one object
kernel_big = np.ones(
(GRIPPER_GRASP_SAFE_WIDTH_PIXEL, GRIPPER_GRASP_INNER_DISTANCE_PIXEL), dtype=np.float32
)
threshold_big = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 10
threshold_small = GRIPPER_GRASP_SAFE_WIDTH_PIXEL * GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 20
depth_heightmap_pad = np.pad(
depth_heightmap, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
for rotate_idx in range(len(grasp_predictions)):
color_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
color_mask[color_mask == 0] = 1
color_mask[color_mask == 255] = 0
no_target_mask = color_mask
bg_mask = utils.rotate(mask_bg_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
no_target_mask[bg_mask == 255] = 0
# only grasp one object
invalid_mask = cv2.filter2D(no_target_mask, -1, kernel_big)
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = invalid_mask[
padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
grasp_predictions[rotate_idx][invalid_mask > threshold_small] = (
grasp_predictions[rotate_idx][invalid_mask > threshold_small] / 2
)
grasp_predictions[rotate_idx][invalid_mask > threshold_big] = 0
# collision checking
mask = cv2.inRange(temp, BG_THRESHOLD["low"], BG_THRESHOLD["high"])
mask = 255 - mask
mask_pad = np.pad(mask, IMAGE_PAD_WIDTH, "constant", constant_values=0)
check_kernel = np.ones(
(GRIPPER_GRASP_WIDTH_PIXEL, GRIPPER_GRASP_OUTER_DISTANCE_PIXEL), dtype=np.uint8
)
left_bound = math.ceil(
(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL - GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2
)
right_bound = (
math.ceil((GRIPPER_GRASP_OUTER_DISTANCE_PIXEL + GRIPPER_GRASP_INNER_DISTANCE_PIXEL) / 2)
+ 1
)
check_kernel[:, left_bound:right_bound] = 0
for rotate_idx in range(len(grasp_predictions)):
object_mask = utils.rotate(mask_pad, rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = cv2.filter2D(object_mask, -1, check_kernel)
invalid_mask[invalid_mask > 5] = 255
invalid_mask = utils.rotate(invalid_mask, -rotate_idx * (360.0 / NUM_ROTATION), True)
invalid_mask = invalid_mask[
padding_width_start:padding_width_end, padding_width_start:padding_width_end
]
grasp_predictions[rotate_idx][invalid_mask > 128] = 0
return grasp_predictions
def get_label_value(
self,
primitive_action,
push_success,
grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
next_color_heightmap,
next_depth_heightmap,
prev_depth_heightmap,
use_push=True,
):
if self.method == "reinforcement":
# Compute current reward
current_reward = 0
if primitive_action == "push":
if change_detected:
current_reward = 0.0
elif primitive_action == "grasp":
if grasp_success:
current_reward = 1.0
# Compute future reward
if not change_detected and not grasp_success:
future_reward = 0
else:
future_reward = 0 # no future reward
if primitive_action == "push":
_, next_grasp_predictions = self.forward(
next_color_heightmap,
next_depth_heightmap,
is_volatile=True,
use_push=use_push,
)
if np.max(next_grasp_predictions) > np.max(prev_grasp_predictions) * 1.1:
current_reward = (
np.max(next_grasp_predictions) + np.max(prev_grasp_predictions)
) / 2
else:
future_reward = 0
print(
"Prediction:",
np.max(prev_grasp_predictions),
np.max(next_grasp_predictions),
)
delta_area = self.push_change_area(prev_depth_heightmap, next_depth_heightmap)
if delta_area > 300: # 300 can be changed
if current_reward < 0.8:
current_reward = 0.8
elif delta_area < -100: # -100 can be changed
current_reward = 0
future_reward = 0
print("Current reward: %f" % (current_reward))
print("Future reward: %f" % (future_reward))
if primitive_action == "push" and not self.push_rewards:
expected_reward = self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (0.0, self.future_reward_discount, future_reward, expected_reward)
)
else:
expected_reward = current_reward + self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (current_reward, self.future_reward_discount, future_reward, expected_reward)
)
return expected_reward, current_reward
def get_label_value_base(
self,
primitive_action,
push_success,
grasp_success,
change_detected,
prev_push_predictions,
prev_grasp_predictions,
next_color_heightmap,
next_depth_heightmap,
use_push=True,
target=None,
prev_color_img=None,
prev_depth_img=None,
):
"""As baseline label value function"""
if self.method == "reinforcement":
# Compute current reward
current_reward = 0
if primitive_action == "push":
if change_detected:
current_reward = 0.0
elif primitive_action == "grasp":
if grasp_success:
crop = prev_color_img[
max(0, target[0] - 2) : min(target[0] + 3, IMAGE_SIZE),
max(0, target[1] - 2) : min(target[1] + 3, IMAGE_SIZE),
]
crop = cv2.cvtColor(crop, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(crop, TARGET_LOWER, TARGET_UPPER)
if np.sum(mask) > 0:
current_reward = 20
else:
current_reward = 0
# Compute future reward
if not change_detected and not grasp_success:
future_reward = 0
else:
next_push_predictions, next_grasp_predictions = self.forward(
next_color_heightmap, next_depth_heightmap, is_volatile=True, use_push=use_push
)
target_next_grasp_predictions = self.focus_on_target(
next_color_heightmap,
next_depth_heightmap,
next_grasp_predictions,
TARGET_LOWER,
TARGET_UPPER,
)
target_prev_grasp_predictions = self.focus_on_target(
prev_color_img,
prev_depth_img,
prev_grasp_predictions,
TARGET_LOWER,
TARGET_UPPER,
)
future_reward = max(np.max(next_push_predictions), np.max(next_grasp_predictions))
if primitive_action == "push":
if (
np.max(target_next_grasp_predictions)
> np.max(target_prev_grasp_predictions) * 1.1
):
current_reward = 1
print(
"Prediction:",
np.max(prev_grasp_predictions),
np.max(next_grasp_predictions),
)
delta_area = self.push_change_area(prev_depth_img, next_depth_heightmap)
if delta_area > 300: # 300 can be changed
if current_reward < 1:
current_reward = 0.5
elif delta_area < -100: # -100 can be changed
current_reward = 0
future_reward = 0
print("Current reward: %f" % (current_reward))
print("Future reward: %f" % (future_reward))
if primitive_action == "push" and not self.push_rewards:
expected_reward = self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (0.0, self.future_reward_discount, future_reward, expected_reward)
)
else:
expected_reward = current_reward + self.future_reward_discount * future_reward
print(
"Expected reward: %f + %f x %f = %f"
% (current_reward, self.future_reward_discount, future_reward, expected_reward)
)
return expected_reward, current_reward
def get_neg(self, depth_heightmap, label, best_pix_ind):
"""Should match train_foreground"""
depth_heightmap_pad = np.copy(depth_heightmap)
depth_heightmap_pad = np.pad(
depth_heightmap_pad, IMAGE_PAD_WIDTH, "constant", constant_values=0
)
depth_heightmap_pad = utils.rotate(
depth_heightmap_pad, best_pix_ind * (360.0 / NUM_ROTATION)
)
label = ndimage.rotate(
label, best_pix_ind * (360.0 / NUM_ROTATION), axes=(2, 1), reshape=False
)
label = np.round(label)
x_y_idx = np.argwhere(label > 0)
for idx in x_y_idx:
_, x, y = tuple(idx)
left_area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2)) : min(
depth_heightmap_pad.shape[0],
x + math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2) + 1,
),
max(0, y - math.ceil(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL / 2)) : max(
0, y - math.ceil(GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 2) + 1
),
]
right_area = depth_heightmap_pad[
max(0, x - math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2)) : min(
depth_heightmap_pad.shape[0],
x + math.ceil(GRIPPER_GRASP_SAFE_WIDTH_PIXEL / 2) + 1,
),
min(
depth_heightmap_pad.shape[1] - 1,
y + math.ceil(GRIPPER_GRASP_INNER_DISTANCE_PIXEL / 2),
) : min(
depth_heightmap_pad.shape[1],
y + math.ceil(GRIPPER_GRASP_OUTER_DISTANCE_PIXEL / 2) + 1,
),
]
if (
np.sum(left_area > DEPTH_MIN) > 0
and np.sum((left_area - depth_heightmap_pad[x, y]) > -0.05) > 0
) or (
np.sum(right_area > DEPTH_MIN) > 0
and np.sum((right_area - depth_heightmap_pad[x, y]) > -0.05) > 0
):
label[0, x, y] = 0
label = ndimage.rotate(
label, -best_pix_ind * (360.0 / NUM_ROTATION), axes=(2, 1), reshape=False
)
label = np.round(label)
return label
# Compute labels and backpropagate
def backprop(
self,
color_heightmap,
depth_heightmap,
primitive_action,
best_pix_ind,
label_value,
use_push=True,
):
if self.method == "reinforcement":
batch_lose = -1
# Compute labels
label = np.zeros((1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE))
action_area = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
action_area[best_pix_ind[1]][best_pix_ind[2]] = 1
tmp_label = np.zeros((IMAGE_SIZE, IMAGE_SIZE))
tmp_label[action_area > 0] = label_value
label[0, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF] = tmp_label
# Compute label mask
label_weights = np.zeros(label.shape)
tmp_label_weights = np.zeros((224, 224))
tmp_label_weights[action_area > 0] = 1
label_weights[
0, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
] = tmp_label_weights
# Compute loss and backward pass
if len(self.loss_list) == 0:
self.optimizer.zero_grad()
if primitive_action == "grasp" and label_value > 0:
neg_loss = []
for i in range(self.model.num_rotations):
if i != best_pix_ind[0]:
neg_label = self.get_neg(depth_heightmap, label.copy(), i)
if (
neg_label[
0, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF, IMAGE_PAD_WIDTH:IMAGE_PAD_DIFF
][best_pix_ind[1]][best_pix_ind[2]]
== 0
):
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=i,
use_push=use_push,
)
loss = self.grasp_criterion(
self.model.output_prob[0][1].view(
1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE
),
torch.from_numpy(neg_label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda(),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
)
loss = loss.sum()
neg_loss.append(loss)
if len(neg_loss) > 0:
self.loss_list.append(sum(neg_loss) / len(neg_loss))
if primitive_action == "push":
if not self.is_baseline:
if label_value > 0:
label_weights *= 2 # to compromise the less push operations
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=best_pix_ind[0],
use_push=use_push,
)
if self.use_cuda:
loss = self.push_criterion(
self.model.output_prob[0][0].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(
torch.from_numpy(label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda(),
requires_grad=False,
)
else:
loss = self.push_criterion(
self.model.output_prob[0][0].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(torch.from_numpy(label).float()),
) * Variable(torch.from_numpy(label_weights).float(), requires_grad=False)
loss = loss.sum()
self.loss_list.append(loss)
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
print("Batch Loss:", total_loss.cpu().item())
self.loss_log.append([self.iteration, total_loss.cpu().item()])
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
# loss.backward()
loss_value = loss.cpu().data.numpy()
elif primitive_action == "grasp":
if label_value > 0:
if self.is_baseline:
label_weights *= 4
else:
label_weights *= 2
# Do forward pass with specified rotation (to save gradients)
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=best_pix_ind[0],
use_push=use_push,
)
if self.use_cuda:
loss1 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(
torch.from_numpy(label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
)
else:
loss1 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(torch.from_numpy(label).float()),
) * Variable(torch.from_numpy(label_weights).float())
loss1 = loss1.sum()
self.loss_list.append(loss1)
# loss.backward()
loss_value = loss1.detach().cpu().data.numpy()
opposite_rotate_idx = (
best_pix_ind[0] + self.model.num_rotations / 2
) % self.model.num_rotations
_, _ = self.forward(
color_heightmap,
depth_heightmap,
is_volatile=False,
specific_rotation=opposite_rotate_idx,
use_push=use_push,
)
if self.use_cuda:
loss2 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(
torch.from_numpy(label)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
),
) * Variable(
torch.from_numpy(label_weights)
.view(1, 1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE)
.float()
.cuda()
)
else:
loss2 = self.grasp_criterion(
self.model.output_prob[0][1].view(1, IMAGE_PAD_SIZE, IMAGE_PAD_SIZE),
Variable(torch.from_numpy(label).float()),
) * Variable(torch.from_numpy(label_weights).float())
loss2 = loss2.sum()
self.loss_list.append(loss2)
if len(self.loss_list) >= self.batch_size:
total_loss = sum(self.loss_list)
mean_loss = total_loss / len(self.loss_list)
mean_loss.backward()
self.loss_list = []
total_loss = total_loss.detach().cpu().item()
print("Batch Loss:", total_loss, len(self.loss_list))
self.loss_log.append([self.iteration, total_loss])
# loss.backward()
loss_value += loss2.detach().cpu().data.numpy()
loss_value = loss_value / 2
if len(self.loss_list) == 0:
print("Training loss: %f" % (loss_value.sum()))
self.optimizer.step()
self.lr_scheduler.step()
def get_prediction_vis(self, predictions, color_heightmap, best_pix_ind):
canvas = None
num_rotations = predictions.shape[0]
for canvas_row in range(int(num_rotations / 4)):
tmp_row_canvas = None
for canvas_col in range(4):
rotate_idx = canvas_row * 4 + canvas_col
prediction_vis = predictions[rotate_idx, :, :].copy()
# prediction_vis[prediction_vis < 0] = 0 # assume probability
# prediction_vis[prediction_vis > 1] = 1 # assume probability
prediction_vis = np.clip(prediction_vis, 0, 1)
prediction_vis.shape = (predictions.shape[1], predictions.shape[2])
prediction_vis = cv2.applyColorMap(
(prediction_vis * 255).astype(np.uint8), cv2.COLORMAP_JET
)
if rotate_idx == best_pix_ind[0]:
prediction_vis = cv2.circle(
prediction_vis,
(int(best_pix_ind[2]), int(best_pix_ind[1])),
7,
(0, 0, 255),
2,
)
prediction_vis = utils.rotate(prediction_vis, rotate_idx * (360.0 / num_rotations))
background_image = utils.rotate(
color_heightmap, rotate_idx * (360.0 / num_rotations)
)
prediction_vis = (
0.5 * cv2.cvtColor(background_image, cv2.COLOR_RGB2BGR) + 0.5 * prediction_vis
).astype(np.uint8)
if tmp_row_canvas is None:
tmp_row_canvas = prediction_vis
else:
tmp_row_canvas = np.concatenate((tmp_row_canvas, prediction_vis), axis=1)
if canvas is None:
canvas = tmp_row_canvas
else:
canvas = np.concatenate((canvas, tmp_row_canvas), axis=0)
return canvas
def push_change_area(self, prev_depth_img, next_depth_img):
kernel = np.ones((11, 11))
# kernel_num = np.ones((5, 5))
depth_img = np.copy(prev_depth_img)
depth_img_copy = np.copy(depth_img)
depth_img_copy[depth_img_copy <= DEPTH_MIN] = 0
depth_img_copy[depth_img_copy > DEPTH_MIN] = 1
prev_area = cv2.filter2D(depth_img_copy, -1, kernel)
prev_area[prev_area <= 1] = 0
prev_area[prev_area > 1] = 1
prev_area = np.sum(prev_area - depth_img_copy)
depth_img = np.copy(next_depth_img)
depth_img_copy = np.copy(depth_img)
depth_img_copy[depth_img_copy <= DEPTH_MIN] = 0
depth_img_copy[depth_img_copy > DEPTH_MIN] = 1
next_area = cv2.filter2D(depth_img_copy, -1, kernel)
next_area[next_area <= 1] = 0
next_area[next_area > 1] = 1
next_area = np.sum(next_area - depth_img_copy)
print("Prev Area %d" % (prev_area))
print("Next Area %d" % (next_area))
return next_area - prev_area
| 39,013 | 41.222944 | 100 |
py
|
more
|
more-main/mcts_main.py
|
"""Test"""
import glob
import gc
import os
import time
import datetime
import pybullet as p
import cv2
import numpy as np
from graphviz import Digraph
import argparse
import random
import torch
import pandas as pd
from mcts_utils import MCTSHelper
from mcts.search import MonteCarloTreeSearch
from mcts.nodes import PushSearchNode
from mcts.push import PushState
import utils
from constants import (
MCTS_EARLY_ROLLOUTS,
PIXEL_SIZE,
WORKSPACE_LIMITS,
TARGET_LOWER,
TARGET_UPPER,
NUM_ROTATION,
GRASP_Q_PUSH_THRESHOLD,
GRASP_Q_GRASP_THRESHOLD,
IS_REAL,
MCTS_MAX_LEVEL,
MCTS_ROLLOUTS,
)
from environment_sim import Environment
class SeachCollector:
def __init__(self, cases):
# Create directory to save data
timestamp = time.time()
timestamp_value = datetime.datetime.fromtimestamp(timestamp)
name = ""
for case in cases:
name = name + case.split("/")[-1].split(".")[0] + "-"
name = name[:-1]
self.base_directory = os.path.join(
os.path.abspath("logs_grasp"),
"mcts-" + timestamp_value.strftime("%Y-%m-%d-%H-%M-%S") + "-" + name,
)
print("Creating data logging session: %s" % (self.base_directory))
self.color_heightmaps_directory = os.path.join(
self.base_directory, "data", "color-heightmaps"
)
self.depth_heightmaps_directory = os.path.join(
self.base_directory, "data", "depth-heightmaps"
)
self.mask_directory = os.path.join(self.base_directory, "data", "masks")
self.prediction_directory = os.path.join(self.base_directory, "data", "predictions")
self.visualizations_directory = os.path.join(self.base_directory, "visualizations")
self.transitions_directory = os.path.join(self.base_directory, "transitions")
self.executed_action_log = []
self.label_value_log = []
self.consecutive_log = []
self.time_log = []
self.mcts_directory = os.path.join(self.base_directory, "mcts")
self.mcts_color_directory = os.path.join(self.base_directory, "mcts", "color")
self.mcts_depth_directory = os.path.join(self.base_directory, "mcts", "depth")
self.mcts_mask_directory = os.path.join(self.base_directory, "mcts", "mask")
self.mcts_child_image_directory = os.path.join(self.base_directory, "mcts", "child_image")
self.idx = 0
self.record_image_idx = []
self.record_action = []
self.record_label = []
self.record_num_visits = []
self.record_child_image_idx = []
self.record_data = {
"image_idx": self.record_image_idx,
"action": self.record_action,
"label": self.record_label,
"num_visits": self.record_num_visits,
"child_image_idx": self.record_child_image_idx,
}
if not os.path.exists(self.color_heightmaps_directory):
os.makedirs(self.color_heightmaps_directory)
if not os.path.exists(self.depth_heightmaps_directory):
os.makedirs(self.depth_heightmaps_directory)
if not os.path.exists(self.mask_directory):
os.makedirs(self.mask_directory)
if not os.path.exists(self.prediction_directory):
os.makedirs(self.prediction_directory)
if not os.path.exists(self.visualizations_directory):
os.makedirs(self.visualizations_directory)
if not os.path.exists(self.transitions_directory):
os.makedirs(os.path.join(self.transitions_directory))
if not os.path.exists(self.mcts_directory):
os.makedirs(os.path.join(self.mcts_directory))
if not os.path.exists(self.mcts_color_directory):
os.makedirs(os.path.join(self.mcts_color_directory))
if not os.path.exists(self.mcts_depth_directory):
os.makedirs(os.path.join(self.mcts_depth_directory))
if not os.path.exists(self.mcts_mask_directory):
os.makedirs(os.path.join(self.mcts_mask_directory))
if not os.path.exists(self.mcts_child_image_directory):
os.makedirs(os.path.join(self.mcts_child_image_directory))
def save_heightmaps(self, iteration, color_heightmap, depth_heightmap, mode=0):
color_heightmap = cv2.cvtColor(color_heightmap, cv2.COLOR_RGB2BGR)
cv2.imwrite(
os.path.join(self.color_heightmaps_directory, "%06d.%s.color.png" % (iteration, mode)),
color_heightmap,
)
depth_heightmap = np.round(depth_heightmap * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(
os.path.join(self.depth_heightmaps_directory, "%06d.%s.depth.png" % (iteration, mode)),
depth_heightmap,
)
def write_to_log(self, log_name, log):
np.savetxt(
os.path.join(self.transitions_directory, "%s.log.txt" % log_name), log, delimiter=" "
)
def save_predictions(self, iteration, pred, name="push"):
cv2.imwrite(
os.path.join(self.prediction_directory, "%06d.png" % (iteration)), pred,
)
def save_visualizations(self, iteration, affordance_vis, name):
cv2.imwrite(
os.path.join(self.visualizations_directory, "%06d.%s.png" % (iteration, name)),
affordance_vis,
)
def _save_mcts_image(self, env, file_id, node, is_child=False):
env.restore_objects(node.state.object_states)
color_image, depth_image, mask_image = utils.get_true_heightmap(env)
mask_image = utils.relabel_mask(env, mask_image)
file_name = f"{file_id:06d}"
if is_child:
file_name += f"-{node.prev_move}"
# color
color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
if is_child:
cv2.imwrite(
os.path.join(self.mcts_child_image_directory, f"{file_name}.color.png"),
color_image,
)
else:
cv2.imwrite(
os.path.join(self.mcts_color_directory, f"{file_name}.color.png"), color_image,
)
# depth
depth_image = np.round(depth_image * 100000).astype(np.uint16) # Save depth in 1e-5 meters
if is_child:
cv2.imwrite(
os.path.join(self.mcts_child_image_directory, f"{file_name}.depth.png"),
depth_image,
)
else:
cv2.imwrite(
os.path.join(self.mcts_depth_directory, f"{file_name}.depth.png"), depth_image,
)
# mask
if is_child:
cv2.imwrite(
os.path.join(self.mcts_child_image_directory, f"{file_name}.mask.png"), mask_image,
)
else:
cv2.imwrite(
os.path.join(self.mcts_mask_directory, f"{file_name}.mask.png"), mask_image,
)
return file_name
def save_mcts_data(self, mcts_helper, env, root, best_action, best_idx):
backup_state = env.save_objects()
search_list = [root]
while len(search_list) > 0:
current_node = search_list.pop(0)
if current_node.has_children:
save_image = False
for i in range(len(current_node.children)):
child_node = current_node.children[i]
action = child_node.prev_move
# child_q = sum(sorted(child_node.q)[-MCTS_TOP:]) / min(child_node.n, MCTS_TOP)
# child_q = sum(child_node.q) / child_node.n
child_q = max(child_node.q)
self.record_image_idx.append(self.idx)
self.record_action.append(
[action.pos0[1], action.pos0[0], action.pos1[1], action.pos1[0]]
)
label = child_q
self.record_label.append(label)
self.record_num_visits.append(child_node.n)
child_idx = self._save_mcts_image(env, self.idx, child_node, is_child=True)
self.record_child_image_idx.append(child_idx)
save_image = True
if save_image:
self._save_mcts_image(env, self.idx, current_node, is_child=False)
self.idx += 1
search_list.extend(current_node.children)
df = pd.DataFrame(self.record_data, columns=list(self.record_data.keys()))
df.to_csv(os.path.join(self.mcts_directory, "records.csv"), index=False, header=True)
env.restore_objects(backup_state)
def plot_mcts(self, env, root, iteration):
backup_state = env.save_objects()
files = glob.glob("tree_plot/*")
for f in files:
os.remove(f)
dot = Digraph(
"mcts",
filename=f"tree_plot/mcts{iteration}.gv",
node_attr={
"shape": "box",
"fontcolor": "white",
"fontsize": "3",
"labelloc": "b",
"fixedsize": "true",
},
)
search_list = [root]
while len(search_list) > 0:
current_node = search_list.pop(0)
node_name = current_node.state.uid
# node_name_label = f"Q: {(sum(sorted(current_node.q)[-MCTS_TOP:]) / min(current_node.n, MCTS_TOP)):.3f}, N: {current_node.n}, Grasp Q: {current_node.state.q_value:.3f}"
node_name_label = f"Q: {(sum(sorted(current_node.q)[-1:]) / min(current_node.n, 1)):.3f}, N: {current_node.n}, Grasp Q: {current_node.state.q_value:.3f}"
env.restore_objects(current_node.state.object_states)
color_image, depth_image, _ = utils.get_true_heightmap(env)
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
if current_node.prev_move is not None:
node_action = str(current_node.prev_move).split("_")
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
image_name = f"tree_plot/{node_name}.png"
cv2.imwrite(image_name, node_image)
depthimage_name = f"tree_plot/{node_name}-depth.png"
depth_image = np.round(depth_image * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(depthimage_name, depth_image)
image_name = f"{node_name}.png"
image_size = str(
max(
0.6,
# sum(sorted(current_node.q)[-MCTS_TOP:]) / min(current_node.n, MCTS_TOP) * 2,
sum(sorted(current_node.q)[-1:]) / min(current_node.n, 1) * 2,
)
)
dot.node(
node_name,
label=node_name_label,
image=image_name,
width=image_size,
height=image_size,
)
if current_node.parent is not None:
node_partent_name = current_node.parent.state.uid
dot.edge(node_partent_name, node_name)
untracked_states = [current_node.state]
last_node_used = False
while len(untracked_states) > 0:
current_state = untracked_states.pop()
last_state_name = current_state.uid
if last_node_used:
actions = current_state.get_actions()
else:
if len(current_node.children) == 0:
actions = current_state.get_actions()
else:
actions = current_node.untried_actions
last_node_used = True
for _, move in enumerate(actions):
key = current_state.uid + str(move)
if key in current_state.mcts_helper.simulation_recorder:
(
object_states,
new_image_q,
) = current_state.mcts_helper.simulation_recorder[key]
node_name = f"{current_state.uid}.{current_state.level}-{move}"
node_name_label = f"Grasp Q: {new_image_q:.3f}"
env.restore_objects(object_states)
color_image, depth_image, _ = utils.get_true_heightmap(env)
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
node_action = str(move).split("_")
if len(node_action) > 1:
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
image_name = f"tree_plot/{node_name}.png"
cv2.imwrite(image_name, node_image)
depthimage_name = f"tree_plot/{node_name}-depth.png"
depth_image = np.round(depth_image * 100000).astype(
np.uint16
) # Save depth in 1e-5 meters
cv2.imwrite(depthimage_name, depth_image)
image_name = f"{node_name}.png"
image_size = str(max(0.6, new_image_q * 2))
dot.node(
node_name,
label=node_name_label,
image=image_name,
width=image_size,
height=image_size,
)
dot.edge(last_state_name, node_name)
new_state, _, _, _ = current_state.move(move)
if new_state is not None:
untracked_states.append(new_state)
search_list.extend(current_node.children)
dot.view()
env.restore_objects(backup_state)
# input("wait for key")
def parse_args():
parser = argparse.ArgumentParser(description="MCTS DIPN")
parser.add_argument("--test_case", action="store", help="File for testing")
parser.add_argument("--test_cases", nargs="+", help="Files for testing")
parser.add_argument(
"--max_test_trials",
action="store",
type=int,
default=5,
help="maximum number of test runs per case/scenario",
)
parser.add_argument("--switch", action="store", type=int, help="Switch target")
parser.add_argument("--plot", action="store_true")
parser.add_argument("--test", action="store_true", default=False)
args = parser.parse_args()
return args
if __name__ == "__main__":
# set seed
random.seed(1234)
torch.manual_seed(1234)
np.random.seed(1234)
iteration = 0
args = parse_args()
case = args.test_case
cases = args.test_cases
switch = args.switch
test = args.test
if switch is not None:
print(f"Target ID has been switched to {switch}")
if cases:
repeat_num = len(cases)
else:
repeat_num = args.max_test_trials
cases = [case] * repeat_num
collector = SeachCollector(cases)
env = Environment(gui=False)
env_sim = Environment(gui=False)
mcts_helper = MCTSHelper(env_sim, "logs_grasp/snapshot-post-020000.reinforcement.pth")
is_plot = args.plot
for repeat_idx in range(repeat_num):
if not IS_REAL:
success = False
while not success:
env.reset()
env_sim.reset()
success = env.add_object_push_from_file(cases[repeat_idx], switch)
success &= env_sim.add_object_push_from_file(cases[repeat_idx], switch)
print(f"Reset environment at iteration {iteration} of repeat times {repeat_idx}")
else:
print(f"Reset environment at iteration {iteration} of repeat times {repeat_idx}")
obj_num = input("Reset manually!!! Enter the number of objects")
# push_start = [4.120000000000000329e-01, -1.999999999999999001e-02, 1.000000000000000021e-02]
# push_end = [5.080000000000000071e-01, -4.800000000000001488e-02, 1.000000000000000021e-02]
# push_start = [4.879999999999999893e-01, -1.239999999999999991e-01, 1.000000000000000021e-02]
# push_end = [5.180000000000000160e-01, -2.799999999999999711e-02, 1.000000000000000021e-02]
# push_start = [
# 82 * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
# 76 * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
# 1.000000000000000021e-02,
# ]
# push_end = [
# 112 * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
# 116 * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
# 1.000000000000000021e-02,
# ]
# env_sim.push(push_start, push_end)
# env.push(push_start, push_end)
# push_start = [4.800000000000000377e-01, 4.400000000000001132e-02, 1.000000000000000021e-02]
# push_end = [5.800000000000000711e-01, 4.400000000000001132e-02, 1.000000000000000021e-02]
# env.push(push_start, push_end)
# input("test")
start_time = time.time()
while True:
color_image, depth_image, _ = utils.get_true_heightmap(env)
temp = cv2.cvtColor(color_image, cv2.COLOR_RGB2HSV)
mask = cv2.inRange(temp, TARGET_LOWER, TARGET_UPPER)
print(f"Target on the table (value: {np.sum(mask) / 255}) at iteration {iteration}")
if np.sum(mask) / 255 < 10:
break
q_value, best_pix_ind, grasp_predictions = mcts_helper.get_grasp_q(
color_image, depth_image, post_checking=True
)
print(f"Max grasp Q value: {q_value}")
# record
collector.save_heightmaps(iteration, color_image, depth_image)
grasp_pred_vis = mcts_helper.get_prediction_vis(
grasp_predictions, color_image, best_pix_ind
)
collector.save_visualizations(iteration, grasp_pred_vis, "grasp")
# Grasp >>>>>
if q_value > GRASP_Q_GRASP_THRESHOLD:
best_rotation_angle = np.deg2rad(best_pix_ind[0] * (360.0 / NUM_ROTATION))
primitive_position = [
best_pix_ind[1] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
best_pix_ind[2] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
depth_image[best_pix_ind[1]][best_pix_ind[2]] + WORKSPACE_LIMITS[2][0],
]
if not IS_REAL:
success = env.grasp(primitive_position, best_rotation_angle)
else:
grasp_sucess = env.grasp(primitive_position, best_rotation_angle)
success = grasp_sucess
# record
reward_value = 1 if success else 0
collector.executed_action_log.append(
[
1, # grasp
primitive_position[0],
primitive_position[1],
primitive_position[2],
best_rotation_angle,
-1,
-1,
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
if success:
break
else:
continue
# Grasp <<<<<
# Search >>>>>
object_states = env.save_objects()
initial_state = PushState("root", object_states, q_value, 0, mcts_helper, max_q=GRASP_Q_PUSH_THRESHOLD, max_level=MCTS_MAX_LEVEL)
root = PushSearchNode(initial_state)
mcts = MonteCarloTreeSearch(root)
best_node = mcts.best_action(MCTS_ROLLOUTS, MCTS_EARLY_ROLLOUTS, test)
print("best node:")
print(best_node.state.uid)
print(best_node.state.q_value)
print(best_node.prev_move)
print(len(root.children))
node = best_node
# env.restore_objects(node.state.object_states)
# color_image, _, _ = utils.get_true_heightmap(env)
node_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)
node_action = str(node.prev_move).split("_")
cv2.arrowedLine(
node_image,
(int(node_action[1]), int(node_action[0])),
(int(node_action[3]), int(node_action[2])),
(255, 0, 255),
2,
tipLength=0.4,
)
collector.save_predictions(iteration, node_image)
# env.restore_objects(object_states)
# Search <<<<<
# Push >>>>>
push_start = best_node.prev_move.pos0
push_end = best_node.prev_move.pos1
push_start = [
push_start[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_start[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
push_end = [
push_end[0] * PIXEL_SIZE + WORKSPACE_LIMITS[0][0],
push_end[1] * PIXEL_SIZE + WORKSPACE_LIMITS[1][0],
0.01,
]
env.push(push_start, push_end)
# record
reward_value = 0
collector.executed_action_log.append(
[
0, # push
push_start[0],
push_start[1],
push_start[2],
push_end[0],
push_end[1],
push_end[2],
]
)
collector.label_value_log.append(reward_value)
collector.write_to_log("executed-action", collector.executed_action_log)
collector.write_to_log("label-value", collector.label_value_log)
iteration += 1
# Push <<<<<
# Plot
if is_plot:
collector.plot_mcts(env_sim, root, iteration)
# Save tree for training, BFS
# best_action = best_node.prev_move
# collector.record_image_idx.append(collector.idx)
# collector.record_action.append(
# [best_action.pos0[1], best_action.pos0[0], best_action.pos1[1], best_action.pos1[0]]
# )
# label = 2
# collector.record_label.append(label)
if not test:
collector.save_mcts_data(mcts_helper, env_sim, root, best_node.prev_move, collector.idx)
# clean up for memory
del initial_state
del mcts
del root
del best_node
del push_start
del push_end
mcts_helper.reset()
gc.collect()
end_time = time.time()
collector.time_log.append(end_time - start_time)
collector.write_to_log("executed-time", collector.time_log)
| 23,696 | 40.793651 | 183 |
py
|
more
|
more-main/collect_logs_mcts.py
|
import subprocess
import time
import glob
import logging
cases = glob.glob("test-cases/test/*") # glob.glob("test-cases/train/*")
cases = sorted(cases, reverse=False)
switches = [0] # [0,1,2,3,4]
logging.basicConfig(
filename="logs_grasp/collect.log",
filemode="w",
format="%(asctime)s - %(levelname)s - %(message)s",
datefmt="%d-%b-%y %H:%M:%S",
)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
logger.info(f"Starting collect mcts data...")
logger.info(f"Switch: {switches}")
logger.info(f"Cases {cases}")
commands = []
for switch in switches:
for case in cases:
file_path = case
c = ["python", "mcts_main.py", "--test_case", file_path, "--max_test_trials", "5", "--test"]
if switch != 0:
c.extend(["--switch", str(switch)])
commands.append(c)
print(c)
print("=================================================")
max_procs = 6
procs = []
names = []
while len(commands) > 0:
if len(procs) < max_procs:
command = commands.pop(0)
print(f"Staring: {command}")
procs.append(subprocess.Popen(command))
names.append(command)
logger.info(f"Staring {command}")
for p in procs:
poll = p.poll()
if poll is not None:
idx = procs.index(p)
procs.pop(idx)
info = names.pop(idx)
print(f"End: {command}")
logger.info(f"End {info}")
time.sleep(5)
| 1,456 | 23.283333 | 100 |
py
|
more
|
more-main/mcts_network/nodes.py
|
"""Node for MCTS"""
import math
import numpy as np
from constants import (
MCTS_DISCOUNT,
)
class PushSearchNode:
"""MCTS search node for push prediction."""
def __init__(self, state=None, prev_move=None, parent=None):
self.state = state
self.prev_move = prev_move
self.parent = parent
self.children = []
self._number_of_visits = 1
self._results = [0]
self._untried_actions = None
@property
def untried_actions(self):
if self._untried_actions is None:
self._untried_actions = self.state.get_actions().copy()
return self._untried_actions
@property
def nq(self):
return self.prev_move.q_value
@property
def q(self):
return self._results
@property
def n(self):
return self._number_of_visits
@property
def has_children(self):
return len(self.children) > 0
@property
def is_fully_expanded(self):
for c in self.children:
if c.state is None:
return False
return True
def is_terminal_node(self):
return self.state.is_push_over() or not self.has_children
def pre_expand(self):
while len(self.untried_actions) > 0:
action = self.untried_actions.pop()
self.children.append(PushSearchNode(None, action, parent=self))
def expand(self):
result = self.parent.state.move(self.prev_move)
if result is None:
self.parent.state.remove_action(self.prev_move)
expanded = False
else:
next_state, _, _, _ = result
self.state = next_state
self.pre_expand()
expanded = True
return expanded
def rollout(self):
current_rollout_state = self.state
discount_accum = 1
results = [current_rollout_state.push_result]
is_consecutive_move = False
color_image = None
mask_image = None
while not current_rollout_state.is_push_over():
possible_moves = current_rollout_state.get_actions(color_image, mask_image)
if len(possible_moves) == 0:
break
action = self.rollout_policy(possible_moves)
# use PPN for rollout
# result = max(0, min(action.q_value, 1.2))
# results.append(result * discount_accum)
# break
result = current_rollout_state.move(action, is_consecutive_move=is_consecutive_move)
if result is None:
if current_rollout_state == self.state:
for ci, c in enumerate(self.children):
if c.prev_move == action:
self.children.pop(ci)
break
current_rollout_state.remove_action(action)
is_consecutive_move = False
else:
discount_accum *= MCTS_DISCOUNT
new_rollout_state, color_image, mask_image, in_recorder = result
current_rollout_state = new_rollout_state
results.append(current_rollout_state.push_result * discount_accum)
if in_recorder:
is_consecutive_move = False
else:
is_consecutive_move = True
return np.max(results)
def backpropagate(self, result):
self._number_of_visits += 1
self._results.append(result)
if self.parent:
self.parent.backpropagate(result * MCTS_DISCOUNT)
def best_child(self, top=3):
choices_weights = [
(sum(sorted(c.q)[-top:]) + 1 * max(0, min(c.nq, 1.2))) / c.n
for c in self.children
]
# if self.prev_move is None:
# temp = np.zeros((224, 224, 3))
# for idx, c in enumerate(self.children):
# node_action = str(c.prev_move).split("_")
# cv2.arrowedLine(
# temp,
# (int(node_action[1]), int(node_action[0])),
# (int(node_action[3]), int(node_action[2])),
# (255, 0, 255),
# 1,
# tipLength=0.1,
# )
# cv2.putText(temp, f'{choices_weights[idx]:.2f}', (int(node_action[1]), int(node_action[0]) + 5), cv2.FONT_HERSHEY_SIMPLEX,
# 0.5, (125, 125, 0), 1, cv2.LINE_AA)
# print(choices_weights)
# cv2.imshow('temp', temp)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
child_idx = np.argmax(choices_weights)
return self.children[child_idx], child_idx
def best_child_top(self):
choices_weights = [max(c.q) + 1 * max(0, min(c.nq, 1.2)) for c in self.children]
return self.children[np.argmax(choices_weights)]
def rollout_policy(self, possible_moves):
highest_q = -math.inf
best_move = None
for move in possible_moves:
if move.q_value > highest_q:
highest_q = move.q_value
best_move = move
return best_move
| 5,153 | 32.251613 | 141 |
py
|
more
|
more-main/mcts_network/push.py
|
"""Class for MCTS."""
import math
from constants import (
MCTS_MAX_LEVEL,
GRASP_Q_PUSH_THRESHOLD,
)
from mcts_utils import _sampled_prediction_precise
import utils
class PushMove:
"""Represent a move from start to end pose"""
def __init__(self, pos0, pos1, q_value):
self.pos0 = pos0
self.pos1 = pos1
self.q_value = q_value
def __str__(self):
return f"{self.pos0[0]}_{self.pos0[1]}_{self.pos1[0]}_{self.pos1[1]}"
def __repr__(self):
return f"start: {self.pos0} to: {self.pos1}"
def __eq__(self, other):
return self.pos0 == other.pos0 and self.pos1 == other.pos1
def __hash__(self):
return hash((self.pos0, self.pos1))
class PushState:
"""Use move_recorder and simulation_recorder from simulation.
move_recorder, Key is uid: '(key of this)'.
simulation_recorder, Key is the uid: '(key of this) + (level)' + (move).
"""
# TODO: how to get a good max_q, which could be used to decide an object is graspable
def __init__(
self,
uid,
object_states,
q_value,
level,
mcts_helper,
push_net,
max_q=GRASP_Q_PUSH_THRESHOLD,
max_level=MCTS_MAX_LEVEL,
prev_angle=None,
prev_move=None,
):
self.uid = uid
self.object_states = object_states
self.q_value = q_value
self.level = level
self.mcts_helper = mcts_helper
self.push_net = push_net
self.max_q = max_q
self.max_level = max_level
self.prev_angle = prev_angle
self.prev_move = prev_move
@property
def push_result(self):
"""Return the grasp q value"""
result = self.q_value
result = min(result, 1)
result = max(result, 0)
result *= 0.2
if self.q_value > self.max_q:
result += 1
return result
def is_push_over(self):
"""Should stop the search"""
# if reaches the last defined level or the object can be grasp
if self.level == self.max_level or self.q_value > self.max_q:
return True
# if no legal actions
if self.uid in self.mcts_helper.move_recorder:
if len(self.mcts_helper.move_recorder[self.uid]) == 0:
return True
# if not over - no result
return False
def _move_result(self, move, is_consecutive_move=False):
"""Return the result after a move"""
key = self.uid + str(move)
if key not in self.mcts_helper.simulation_recorder:
result = self.mcts_helper.simulate(
move.pos0, move.pos1, self.object_states if not is_consecutive_move else None
)
if result is None:
return None
color_image, depth_image, mask_image, object_states = result
new_image_q, _, _ = self.mcts_helper.get_grasp_q(
color_image, depth_image, post_checking=True
)
self.mcts_helper.simulation_recorder[key] = object_states, new_image_q
in_recorder = False
else:
self.mcts_helper.env.restore_objects(self.object_states)
object_states, new_image_q = self.mcts_helper.simulation_recorder[key]
color_image, mask_image = None, None
in_recorder = True
return object_states, new_image_q, color_image, mask_image, in_recorder
def move(self, move, is_consecutive_move=False):
result = self._move_result(move, is_consecutive_move=is_consecutive_move)
if result is None:
return None
object_states, new_image_q, color_image, mask_image, in_recorder = result
push_angle = math.atan2(move.pos1[1] - move.pos0[1], move.pos1[0] - move.pos0[0])
move_in_image = ((move.pos0[1], move.pos0[0]), (move.pos1[1], move.pos1[0]))
return (
PushState(
f"{self.uid}.{self.level}-{move}",
object_states,
new_image_q,
self.level + 1,
self.mcts_helper,
self.push_net,
max_q=self.max_q,
max_level=self.max_level,
prev_angle=push_angle,
prev_move=move_in_image,
),
color_image,
mask_image,
in_recorder,
)
def get_actions(self, color_image=None, mask_image=None):
key = self.uid
if key not in self.mcts_helper.move_recorder:
# Retrieve information
if color_image is None:
self.mcts_helper.env.restore_objects(self.object_states)
color_image, _, mask_image = utils.get_true_heightmap(self.mcts_helper.env)
actions = self.mcts_helper.sample_actions(
self.object_states, color_image, mask_image, plot=False
)
out_q = _sampled_prediction_precise(
self.mcts_helper.env, self.push_net, actions, mask_image
)
moves = []
for idx, action in enumerate(actions):
moves.append(PushMove(action[0], action[1], out_q[idx]))
self.mcts_helper.move_recorder[key] = moves
else:
moves = self.mcts_helper.move_recorder[key]
return moves
def remove_action(self, move):
key = self.uid
if key in self.mcts_helper.move_recorder:
moves = self.mcts_helper.move_recorder[key]
if move in moves:
moves.remove(move)
| 5,567 | 31.946746 | 93 |
py
|
more
|
more-main/mcts_network/__init__.py
| 0 | 0 | 0 |
py
|
|
more
|
more-main/mcts_network/search.py
|
from tqdm import tqdm
class MonteCarloTreeSearch(object):
def __init__(self, node):
self.root = node
self.root.pre_expand()
def best_action(self, simulations_number, early_stop_number, eval=False):
early_stop_sign = False
stop_level = 1
for itr in tqdm(range(simulations_number)):
child_node = self._tree_policy()
reward = child_node.rollout()
child_node.backpropagate(reward)
if eval:
# stop early if a solutin within one step has been found
if child_node.state.level == stop_level and child_node.state.push_result >= child_node.state.max_q:
early_stop_sign = True
if itr > early_stop_number and early_stop_sign:
break
if self.root.is_fully_expanded:
stop_level = 2 # TODO: good for now
# to select best child go for exploitation only
return self.root.best_child_top()
def _tree_policy(self):
current_node = self.root
while not current_node.is_terminal_node():
if current_node.has_children:
child_node, child_idx = current_node.best_child()
if child_node.state is None:
expanded = child_node.expand()
if expanded:
return child_node
else:
current_node.children.pop(child_idx)
else:
current_node = child_node
return current_node
| 1,585 | 37.682927 | 115 |
py
|
more
|
more-main/mcts/nodes.py
|
"""Node for MCTS"""
import numpy as np
from constants import (
MCTS_DISCOUNT,
MCTS_TOP,
MCTS_UCT_RATIO,
)
class PushSearchNode:
"""MCTS search node for push prediction."""
def __init__(self, state, prev_move=None, parent=None):
self.state = state
self.prev_move = prev_move
self.parent = parent
self.children = []
self._number_of_visits = 0
self._results = []
self._untried_actions = None
@property
def untried_actions(self):
if self._untried_actions is None:
self._untried_actions = self.state.get_actions().copy()
return self._untried_actions
@property
def q(self):
return self._results
@property
def n(self):
return self._number_of_visits
@property
def has_children(self):
return len(self.children) > 0
@property
def is_fully_expanded(self):
return len(self.untried_actions) == 0
def is_terminal_node(self):
return self.state.is_push_over() or (self.is_fully_expanded and not self.has_children)
def expand(self):
expanded = False
child_node = self
while len(self.untried_actions) > 0:
action = self.untried_actions.pop()
result = self.state.move(action)
if result is None:
self.state.remove_action(action)
else:
next_state, _, _, _ = result
child_node = PushSearchNode(next_state, action, parent=self)
self.children.append(child_node)
expanded = True
break
return expanded, child_node
def rollout(self):
current_rollout_state = self.state
discount_accum = 1
results = [current_rollout_state.push_result]
# discounts = []
# cost = 0
is_consecutive_move = False
color_image = None
mask_image = None
while not current_rollout_state.is_push_over():
possible_moves = current_rollout_state.get_actions(color_image, mask_image)
if len(possible_moves) == 0:
break
action = self.rollout_policy(possible_moves)
result = current_rollout_state.move(action, is_consecutive_move=is_consecutive_move)
if result is None:
if current_rollout_state == self.state:
self.untried_actions.remove(action)
current_rollout_state.remove_action(action)
is_consecutive_move = False
else:
# cost += MCTS_STEP_COST
discount_accum *= MCTS_DISCOUNT
new_rollout_state, color_image, mask_image, in_recorder = result
current_rollout_state = new_rollout_state
results.append(current_rollout_state.push_result * discount_accum)
# results.append(current_rollout_state.push_result - cost)
# discounts.append(discount_accum)
if in_recorder:
is_consecutive_move = False
else:
is_consecutive_move = True
# if len(results) > 0:
# result_idx = np.argmax(results)
# return results[result_idx] * discounts[result_idx], results[result_idx]
# else:
# return (
# current_rollout_state.push_result * discount_accum,
# current_rollout_state.push_result,
# )
return np.max(results)
def backpropagate(self, result):
self._number_of_visits += 1
# if high_q <= self.state.push_result:
# high_q = self.state.push_result
# result = high_q
# if result < self.state.push_result:
# result = self.state.push_result
self._results.append(result)
if self.parent:
# discount_factor = MCTS_DISCOUNT
self.parent.backpropagate(result * MCTS_DISCOUNT)
# self.parent.backpropagate(result - MCTS_STEP_COST)
def best_child(self, c_param=MCTS_UCT_RATIO, top=MCTS_TOP):
choices_weights = [
(sum(sorted(c.q)[-top:]) / min(c.n, top))
+ c_param * np.sqrt((2 * np.log(self.n) / c.n))
for c in self.children
]
return self.children[np.argmax(choices_weights)]
def best_child_top(self):
# choices_weights = [(sum(sorted(c.q)[-top:]) / min(c.n, top)) for c in self.children]
choices_weights = [max(c.q) for c in self.children]
return self.children[np.argmax(choices_weights)]
def rollout_policy(self, possible_moves):
return possible_moves[np.random.randint(len(possible_moves))]
| 4,727 | 33.510949 | 96 |
py
|
more
|
more-main/mcts/push.py
|
"""Class for MCTS."""
import math
from constants import (
MCTS_MAX_LEVEL,
GRASP_Q_PUSH_THRESHOLD,
)
class PushMove:
"""Represent a move from start to end pose"""
def __init__(self, pos0, pos1):
self.pos0 = pos0
self.pos1 = pos1
def __str__(self):
return f"{self.pos0[0]}_{self.pos0[1]}_{self.pos1[0]}_{self.pos1[1]}"
def __repr__(self):
return f"start: {self.pos0} to: {self.pos1}"
def __eq__(self, other):
return self.pos0 == other.pos0 and self.pos1 == other.pos1
def __hash__(self):
return hash((self.pos0, self.pos1))
class PushState:
"""Use move_recorder and simulation_recorder from simulation.
move_recorder, Key is uid: '(key of this)'.
simulation_recorder, Key is the uid: '(key of this) + (level)' + (move).
"""
# TODO: how to get a good max_q, which could be used to decide an object is graspable
def __init__(
self,
uid,
object_states,
q_value,
level,
mcts_helper,
max_q=GRASP_Q_PUSH_THRESHOLD,
max_level=MCTS_MAX_LEVEL,
prev_angle=None,
prev_move=None,
):
self.uid = uid
self.object_states = object_states
self.q_value = q_value
self.level = level
self.mcts_helper = mcts_helper
self.max_q = max_q
self.max_level = max_level
self.prev_angle = prev_angle
self.prev_move = prev_move
@property
def push_result(self):
"""Return the grasp q value"""
result = self.q_value
result = min(result, 1)
result = max(result, 0)
result *= 0.2
if self.q_value > self.max_q:
result += 1
return result
def is_push_over(self):
"""Should stop the search"""
# if reaches the last defined level or the object can be grasp
if self.level == self.max_level or self.q_value > self.max_q:
return True
# if no legal actions
if self.uid in self.mcts_helper.move_recorder:
if len(self.mcts_helper.move_recorder[self.uid]) == 0:
return True
# if not over - no result
return False
def _move_result(self, move, is_consecutive_move=False):
"""Return the result after a move"""
key = self.uid + str(move)
if key not in self.mcts_helper.simulation_recorder:
result = self.mcts_helper.simulate(
move.pos0, move.pos1, self.object_states if not is_consecutive_move else None
)
if result is None:
return None
color_image, depth_image, mask_image, object_states = result
new_image_q, _, _ = self.mcts_helper.get_grasp_q(
color_image, depth_image, post_checking=True
)
self.mcts_helper.simulation_recorder[key] = object_states, new_image_q
in_recorder = False
else:
self.mcts_helper.env.restore_objects(self.object_states)
object_states, new_image_q = self.mcts_helper.simulation_recorder[key]
color_image, mask_image = None, None
in_recorder = True
return object_states, new_image_q, color_image, mask_image, in_recorder
def move(self, move, is_consecutive_move=False):
result = self._move_result(move, is_consecutive_move=is_consecutive_move)
if result is None:
return None
object_states, new_image_q, color_image, mask_image, in_recorder = result
push_angle = math.atan2(move.pos1[1] - move.pos0[1], move.pos1[0] - move.pos0[0])
move_in_image = ((move.pos0[1], move.pos0[0]), (move.pos1[1], move.pos1[0]))
return (
PushState(
f"{self.uid}.{self.level}-{move}",
object_states,
new_image_q,
self.level + 1,
self.mcts_helper,
max_q=self.max_q,
max_level=self.max_level,
prev_angle=push_angle,
prev_move=move_in_image,
),
color_image,
mask_image,
in_recorder,
)
def get_actions(self, color_image=None, mask_image=None):
key = self.uid
if key not in self.mcts_helper.move_recorder:
actions = self.mcts_helper.sample_actions(
self.object_states, color_image, mask_image, plot=False
)
moves = []
for action in actions:
moves.append(PushMove(action[0], action[1]))
self.mcts_helper.move_recorder[key] = moves
else:
moves = self.mcts_helper.move_recorder[key]
return moves
def remove_action(self, move):
key = self.uid
if key in self.mcts_helper.move_recorder:
moves = self.mcts_helper.move_recorder[key]
if move in moves:
moves.remove(move)
| 4,979 | 31.337662 | 93 |
py
|
more
|
more-main/mcts/__init__.py
| 0 | 0 | 0 |
py
|
|
more
|
more-main/mcts/search.py
|
from tqdm import tqdm
class MonteCarloTreeSearch(object):
def __init__(self, node):
self.root = node
def best_action(self, simulations_number, early_stop_number, eval=False):
early_stop_sign = False
stop_level = 1
for itr in tqdm(range(simulations_number)):
child_node = self._tree_policy()
reward = child_node.rollout()
child_node.backpropagate(reward)
if eval:
# stop early if a solutin within one step has been found
if child_node.state.level == stop_level and child_node.state.push_result > child_node.state.max_q:
early_stop_sign = True
if itr > early_stop_number and early_stop_sign:
break
if self.root.is_fully_expanded:
stop_level = 2 # TODO: good for now
# to select best child go for exploitation only
# return self.root.best_child(c_param=0.0)
return self.root.best_child_top()
def _tree_policy(self):
current_node = self.root
while not current_node.is_terminal_node():
if not current_node.is_fully_expanded:
expanded, node = current_node.expand()
if expanded:
return node
if current_node.has_children:
current_node = current_node.best_child()
return current_node
| 1,456 | 38.378378 | 114 |
py
|
more
|
more-main/vision/backbone_utils.py
|
from collections import OrderedDict
from torch import nn
from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool
import torch.nn.functional as F
from torchvision.ops import misc as misc_nn_ops
from ._utils import IntermediateLayerGetter
from . import resnet
from constants import GRIPPER_GRASP_OUTER_DISTANCE_PIXEL, GRIPPER_GRASP_SAFE_WIDTH_PIXEL
class FCNHead(nn.Sequential):
def __init__(self, in_channels, channels, kernel_size=3, padding=1, last=True):
inter_channels = in_channels // 2
if last:
layers = [
nn.Conv2d(in_channels, inter_channels, kernel_size, padding=padding, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(),
nn.Conv2d(inter_channels, channels, 1),
]
else:
layers = [
nn.Conv2d(in_channels, inter_channels, kernel_size, padding=padding, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(),
nn.Conv2d(inter_channels, channels, 1),
nn.BatchNorm2d(channels),
nn.ReLU(),
]
super(FCNHead, self).__init__(*layers)
class BackboneWithFPNAndHeadPush(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels, is_real=False):
super().__init__()
# self.backbone = backbone
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.conv0 = nn.Conv2d(256, 256, kernel_size=3, stride=1, bias=False)
self.bn0 = nn.BatchNorm2d(256)
self.conv1 = nn.Conv2d(256, 128, kernel_size=3, stride=1, bias=False)
self.bn1 = nn.BatchNorm2d(128)
self.conv2 = nn.Conv2d(128, 32, kernel_size=1, stride=1, bias=False)
self.bn2 = nn.BatchNorm2d(32)
self.conv3 = nn.Conv2d(32, 1, kernel_size=1, stride=1, bias=False)
# self.head = nn.Sequential(
# OrderedDict(
# [
# ("push-head-conv0", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
# ("head-relu0", nn.ReLU(inplace=True)),
# ("push-head-conv1", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
# ]
# )
# )
inplanes = 256 # the channels of 'out' layer.
final_out_channels = 1
self.classifier1 = FCNHead(inplanes, 64, last=False)
self.classifier2 = FCNHead(64, final_out_channels, last=True)
self.out_channels = out_channels
def forward(self, x):
input_shape_half = (x.shape[-2] // 2, x.shape[-1] // 2)
input_shape = x.shape[-2:]
# x = self.body(x)
# x = self.fpn(x)
# x = x["0"]
# x = self.classifier1(x)
# x = F.interpolate(x, size=input_shape_half, mode="nearest")
# x = self.classifier2(x)
# x = F.interpolate(x, size=input_shape, mode="nearest")
# x = self.backbone(x)
x = self.body(x)
x = self.fpn(x)
x = x["0"]
x = self.conv0(x)
x = self.bn0(x)
x = F.relu(x)
x = self.conv1(x)
x = self.bn1(x)
x = F.relu(x)
x = F.interpolate(x, size=input_shape_half, mode="bilinear", align_corners=True)
x = self.conv2(x)
x = self.bn2(x)
x = F.relu(x)
x = F.interpolate(x, size=input_shape, mode="bilinear", align_corners=True)
x = self.conv3(x)
return x
class BackboneWithFPNAndHeadGrasp(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels, is_real=False):
super().__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.head = nn.Sequential(
OrderedDict(
[
(
"grasp-head-conv000",
nn.Conv2d(
1,
1,
kernel_size=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
),
padding=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
bias=False,
),
),
("grasp-head-relu000", nn.ReLU(inplace=True)),
(
"grasp-head-conv0000",
nn.Conv2d(
1,
1,
kernel_size=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL,
),
padding=(
GRIPPER_GRASP_SAFE_WIDTH_PIXEL // 2,
GRIPPER_GRASP_OUTER_DISTANCE_PIXEL // 2,
),
bias=False,
),
),
("grasp-head-relu0000", nn.ReLU(inplace=True)),
("grasp-head-conv1", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
("grasp-head-relu1", nn.ReLU(inplace=True)),
("grasp-head-conv2", nn.Conv2d(1, 1, kernel_size=(1, 1), bias=False),),
]
)
)
inplanes = 256 # the channels of 'out' layer.
final_out_channels = 1
self.classifier1 = FCNHead(inplanes, 64, last=False)
self.classifier2 = FCNHead(64, final_out_channels, last=False)
self.out_channels = out_channels
def forward(self, x):
input_shape_half = (x.shape[-2] // 2, x.shape[-1] // 2)
input_shape = x.shape[-2:]
x = self.body(x)
x = self.fpn(x)
x = x["0"]
x = self.classifier1(x)
x = F.interpolate(x, size=input_shape_half, mode="nearest")
x = self.classifier2(x)
x = F.interpolate(x, size=input_shape, mode="nearest")
x = self.head(x)
return x
class BackboneWithFPN(nn.Module):
"""
Adds a FPN on top of a model.
Internally, it uses torchvision.models._utils.IntermediateLayerGetter to
extract a submodel that returns the feature maps specified in return_layers.
The same limitations of IntermediatLayerGetter apply here.
Arguments:
backbone (nn.Module)
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
in_channels_list (List[int]): number of channels for each feature map
that is returned, in the order they are present in the OrderedDict
out_channels (int): number of channels in the FPN.
Attributes:
out_channels (int): the number of channels in the FPN
"""
def __init__(self, backbone, return_layers, in_channels_list, out_channels):
super(BackboneWithFPN, self).__init__()
self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
self.fpn = FeaturePyramidNetwork(
in_channels_list=in_channels_list,
out_channels=out_channels,
extra_blocks=LastLevelMaxPool(),
)
self.out_channels = out_channels
def forward(self, x):
x = self.body(x)
x = self.fpn(x)
return x
def resnet_fpn_net(
backbone_name,
norm_layer=misc_nn_ops.FrozenBatchNorm2d,
trainable_layers=5,
grasp=True,
is_real=False,
pretrained=False,
input_channels=4,
):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained, norm_layer=norm_layer, input_channels=input_channels
)
"""
Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone.
Examples::
>>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone
>>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3)
>>> # get some dummy image
>>> x = torch.rand(1,3,64,64)
>>> # compute the output
>>> output = backbone(x)
>>> print([(k, v.shape) for k, v in output.items()])
>>> # returns
>>> [('0', torch.Size([1, 256, 16, 16])),
>>> ('1', torch.Size([1, 256, 8, 8])),
>>> ('2', torch.Size([1, 256, 4, 4])),
>>> ('3', torch.Size([1, 256, 2, 2])),
>>> ('pool', torch.Size([1, 256, 1, 1]))]
Arguments:
backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50',
'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2'
norm_layer (torchvision.ops): it is recommended to use the default value. For details visit:
(https://github.com/facebookresearch/maskrcnn-benchmark/issues/267)
pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet
trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block.
Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable.
"""
# select layers that wont be frozen
assert trainable_layers <= 5 and trainable_layers >= 0
layers_to_train = ["layer4", "layer3", "layer2", "layer1", "conv1"][:trainable_layers]
# freeze layers only if pretrained backbone is used
for name, parameter in backbone.named_parameters():
if all([not name.startswith(layer) for layer in layers_to_train]):
parameter.requires_grad_(False)
return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
in_channels_stage2 = backbone.inplanes // 8
in_channels_list = [
in_channels_stage2,
in_channels_stage2 * 2,
in_channels_stage2 * 4,
in_channels_stage2 * 8,
]
out_channels = 256
if grasp:
return BackboneWithFPNAndHeadGrasp(
backbone, return_layers, in_channels_list, out_channels, is_real
)
else:
return BackboneWithFPNAndHeadPush(
backbone, return_layers, in_channels_list, out_channels, is_real
)
def resent_backbone(
backbone_name, pretrained, num_classes, input_channels, norm_layer=misc_nn_ops.FrozenBatchNorm2d
):
backbone = resnet.__dict__[backbone_name](
pretrained=pretrained,
input_channels=input_channels,
num_classes=num_classes,
norm_layer=norm_layer,
)
return backbone
| 13,140 | 38.821212 | 118 |
py
|
more
|
more-main/vision/_utils.py
|
from collections import OrderedDict
import torch
from torch import nn
from torch.jit.annotations import Dict
from torch.nn import functional as F
class IntermediateLayerGetter(nn.ModuleDict):
"""
Module wrapper that returns intermediate layers from a model
It has a strong assumption that the modules have been registered
into the model in the same order as they are used.
This means that one should **not** reuse the same nn.Module
twice in the forward if you want this to work.
Additionally, it is only able to query submodules that are directly
assigned to the model. So if `model` is passed, `model.feature1` can
be returned, but not `model.feature1.layer2`.
Arguments:
model (nn.Module): model on which we will extract the features
return_layers (Dict[name, new_name]): a dict containing the names
of the modules for which the activations will be returned as
the key of the dict, and the value of the dict is the name
of the returned activation (which the user can specify).
Examples::
>>> m = torchvision.models.resnet18(pretrained=True)
>>> # extract layer1 and layer3, giving as names `feat1` and feat2`
>>> new_m = torchvision.models._utils.IntermediateLayerGetter(m,
>>> {'layer1': 'feat1', 'layer3': 'feat2'})
>>> out = new_m(torch.rand(1, 3, 224, 224))
>>> print([(k, v.shape) for k, v in out.items()])
>>> [('feat1', torch.Size([1, 64, 56, 56])),
>>> ('feat2', torch.Size([1, 256, 14, 14]))]
"""
_version = 2
__annotations__ = {
"return_layers": Dict[str, str],
}
def __init__(self, model, return_layers):
if not set(return_layers).issubset([name for name, _ in model.named_children()]):
raise ValueError("return_layers are not present in model")
orig_return_layers = return_layers
return_layers = {str(k): str(v) for k, v in return_layers.items()}
layers = OrderedDict()
for name, module in model.named_children():
layers[name] = module
if name in return_layers:
del return_layers[name]
if not return_layers:
break
super(IntermediateLayerGetter, self).__init__(layers)
self.return_layers = orig_return_layers
def forward(self, x):
out = OrderedDict()
for name, module in self.items():
x = module(x)
if name in self.return_layers:
out_name = self.return_layers[name]
out[out_name] = x
return out
| 2,641 | 37.289855 | 89 |
py
|
more
|
more-main/vision/resnet.py
|
import torch
import torch.nn as nn
from torchvision.models.utils import load_state_dict_from_url
__all__ = [
"ResNet",
"resnet10",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
input_channels=3,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
# color + depth = 4
self.conv1 = nn.Conv2d(
input_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet10(pretrained=False, progress=True, **kwargs):
r"""ResNet-10 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet10", BasicBlock, [1, 1, 1, 1], pretrained, progress, **kwargs)
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", Bottleneck, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet("resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet("resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet("wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
| 14,901 | 34.229314 | 107 |
py
|
more
|
more-main/vision/__init__.py
|
from .resnet import *
| 22 | 10.5 | 21 |
py
|
more
|
more-main/vision/coco_utils.py
|
import copy
import os
from PIL import Image
import torch
import torch.utils.data
import torchvision
from pycocotools import mask as coco_mask
from pycocotools.coco import COCO
class FilterAndRemapCocoCategories(object):
def __init__(self, categories, remap=True):
self.categories = categories
self.remap = remap
def __call__(self, image, target):
anno = target["annotations"]
anno = [obj for obj in anno if obj["category_id"] in self.categories]
if not self.remap:
target["annotations"] = anno
return image, target
anno = copy.deepcopy(anno)
for obj in anno:
obj["category_id"] = self.categories.index(obj["category_id"])
target["annotations"] = anno
return image, target
def convert_coco_poly_to_mask(segmentations, height, width):
masks = []
for polygons in segmentations:
rles = coco_mask.frPyObjects(polygons, height, width)
mask = coco_mask.decode(rles)
if len(mask.shape) < 3:
mask = mask[..., None]
mask = torch.as_tensor(mask, dtype=torch.uint8)
mask = mask.any(dim=2)
masks.append(mask)
if masks:
masks = torch.stack(masks, dim=0)
else:
masks = torch.zeros((0, height, width), dtype=torch.uint8)
return masks
class ConvertCocoPolysToMask(object):
def __call__(self, image, target):
w, h = image.size
image_id = target["image_id"]
image_id = torch.tensor([image_id])
anno = target["annotations"]
anno = [obj for obj in anno if obj['iscrowd'] == 0]
boxes = [obj["bbox"] for obj in anno]
# guard against no boxes via resizing
boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4)
boxes[:, 2:] += boxes[:, :2]
boxes[:, 0::2].clamp_(min=0, max=w)
boxes[:, 1::2].clamp_(min=0, max=h)
classes = [obj["category_id"] for obj in anno]
classes = torch.tensor(classes, dtype=torch.int64)
segmentations = [obj["segmentation"] for obj in anno]
masks = convert_coco_poly_to_mask(segmentations, h, w)
keypoints = None
if anno and "keypoints" in anno[0]:
keypoints = [obj["keypoints"] for obj in anno]
keypoints = torch.as_tensor(keypoints, dtype=torch.float32)
num_keypoints = keypoints.shape[0]
if num_keypoints:
keypoints = keypoints.view(num_keypoints, -1, 3)
keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0])
boxes = boxes[keep]
classes = classes[keep]
masks = masks[keep]
if keypoints is not None:
keypoints = keypoints[keep]
target = {}
target["boxes"] = boxes
target["labels"] = classes
target["masks"] = masks
target["image_id"] = image_id
if keypoints is not None:
target["keypoints"] = keypoints
# for conversion to coco api
area = torch.tensor([obj["area"] for obj in anno])
iscrowd = torch.tensor([obj["iscrowd"] for obj in anno])
target["area"] = area
target["iscrowd"] = iscrowd
return image, target
def _coco_remove_images_without_annotations(dataset, cat_list=None):
def _has_only_empty_bbox(anno):
return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno)
def _count_visible_keypoints(anno):
return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno)
min_keypoints_per_image = 10
def _has_valid_annotation(anno):
# if it's empty, there is no annotation
if len(anno) == 0:
return False
# if all boxes have close to zero area, there is no annotation
if _has_only_empty_bbox(anno):
return False
# keypoints task have a slight different critera for considering
# if an annotation is valid
if "keypoints" not in anno[0]:
return True
# for keypoint detection tasks, only consider valid images those
# containing at least min_keypoints_per_image
if _count_visible_keypoints(anno) >= min_keypoints_per_image:
return True
return False
assert isinstance(dataset, torchvision.datasets.CocoDetection)
ids = []
for ds_idx, img_id in enumerate(dataset.ids):
ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None)
anno = dataset.coco.loadAnns(ann_ids)
if cat_list:
anno = [obj for obj in anno if obj["category_id"] in cat_list]
if _has_valid_annotation(anno):
ids.append(ds_idx)
dataset = torch.utils.data.Subset(dataset, ids)
return dataset
def convert_to_coco_api(ds):
coco_ds = COCO()
# annotation IDs need to start at 1, not 0, see torchvision issue #1530
ann_id = 1
dataset = {'images': [], 'categories': [], 'annotations': []}
categories = set()
for img_idx in range(len(ds)):
# find better way to get target
# targets = ds.get_annotations(img_idx)
img, targets = ds[img_idx]
if targets['num_obj'].item() == 0: continue
image_id = targets["image_id"].item()
img_dict = {}
img_dict['id'] = image_id
img_dict['height'] = img.shape[-2]
img_dict['width'] = img.shape[-1]
dataset['images'].append(img_dict)
bboxes = targets["boxes"]
bboxes[:, 2:] -= bboxes[:, :2]
bboxes = bboxes.tolist()
labels = targets['labels'].tolist()
areas = targets['area'].tolist()
iscrowd = targets['iscrowd'].tolist()
if 'masks' in targets:
masks = targets['masks']
# make masks Fortran contiguous for coco_mask
masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1)
if 'keypoints' in targets:
keypoints = targets['keypoints']
keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist()
num_objs = len(bboxes)
for i in range(num_objs):
ann = {}
ann['image_id'] = image_id
ann['bbox'] = bboxes[i]
ann['category_id'] = labels[i]
categories.add(labels[i])
ann['area'] = areas[i]
ann['iscrowd'] = iscrowd[i]
ann['id'] = ann_id
if 'masks' in targets:
ann["segmentation"] = coco_mask.encode(masks[i].numpy())
if 'keypoints' in targets:
ann['keypoints'] = keypoints[i]
ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3])
dataset['annotations'].append(ann)
ann_id += 1
dataset['categories'] = [{'id': i} for i in sorted(categories)]
coco_ds.dataset = dataset
coco_ds.createIndex()
return coco_ds
def get_coco_api_from_dataset(dataset):
for _ in range(10):
if isinstance(dataset, torchvision.datasets.CocoDetection):
break
if isinstance(dataset, torch.utils.data.Subset):
dataset = dataset.dataset
if isinstance(dataset, torchvision.datasets.CocoDetection):
return dataset.coco
return convert_to_coco_api(dataset)
class CocoDetection(torchvision.datasets.CocoDetection):
def __init__(self, img_folder, ann_file, transforms):
super(CocoDetection, self).__init__(img_folder, ann_file)
self._transforms = transforms
def __getitem__(self, idx):
img, target = super(CocoDetection, self).__getitem__(idx)
image_id = self.ids[idx]
target = dict(image_id=image_id, annotations=target)
if self._transforms is not None:
img, target = self._transforms(img, target)
return img, target
| 7,759 | 34.272727 | 83 |
py
|
more
|
more-main/vision/coco_eval.py
|
import json
import tempfile
import numpy as np
import copy
import time
import torch
import torch._six
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from collections import defaultdict
import old_utils as utils
class CocoEvaluator(object):
def __init__(self, coco_gt, iou_types):
assert isinstance(iou_types, (list, tuple))
coco_gt = copy.deepcopy(coco_gt)
self.coco_gt = coco_gt
self.iou_types = iou_types
self.coco_eval = {}
for iou_type in iou_types:
self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type)
self.img_ids = []
self.eval_imgs = {k: [] for k in iou_types}
def update(self, predictions):
img_ids = list(np.unique(list(predictions.keys())))
self.img_ids.extend(img_ids)
for iou_type in self.iou_types:
results = self.prepare(predictions, iou_type)
coco_dt = loadRes(self.coco_gt, results) if results else COCO()
coco_eval = self.coco_eval[iou_type]
coco_eval.cocoDt = coco_dt
coco_eval.params.imgIds = list(img_ids)
img_ids, eval_imgs = evaluate(coco_eval)
self.eval_imgs[iou_type].append(eval_imgs)
def synchronize_between_processes(self):
for iou_type in self.iou_types:
self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2)
create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type])
def accumulate(self):
for coco_eval in self.coco_eval.values():
coco_eval.accumulate()
def summarize(self):
for iou_type, coco_eval in self.coco_eval.items():
print("IoU metric: {}".format(iou_type))
coco_eval.summarize()
def prepare(self, predictions, iou_type):
if iou_type == "bbox":
return self.prepare_for_coco_detection(predictions)
elif iou_type == "segm":
return self.prepare_for_coco_segmentation(predictions)
elif iou_type == "keypoints":
return self.prepare_for_coco_keypoint(predictions)
else:
raise ValueError("Unknown iou type {}".format(iou_type))
def prepare_for_coco_detection(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"bbox": box,
"score": scores[k],
}
for k, box in enumerate(boxes)
]
)
return coco_results
def prepare_for_coco_segmentation(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
scores = prediction["scores"]
labels = prediction["labels"]
masks = prediction["masks"]
masks = masks > 0.5
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
rles = [
mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0]
for mask in masks
]
for rle in rles:
rle["counts"] = rle["counts"].decode("utf-8")
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
"segmentation": rle,
"score": scores[k],
}
for k, rle in enumerate(rles)
]
)
return coco_results
def prepare_for_coco_keypoint(self, predictions):
coco_results = []
for original_id, prediction in predictions.items():
if len(prediction) == 0:
continue
boxes = prediction["boxes"]
boxes = convert_to_xywh(boxes).tolist()
scores = prediction["scores"].tolist()
labels = prediction["labels"].tolist()
keypoints = prediction["keypoints"]
keypoints = keypoints.flatten(start_dim=1).tolist()
coco_results.extend(
[
{
"image_id": original_id,
"category_id": labels[k],
'keypoints': keypoint,
"score": scores[k],
}
for k, keypoint in enumerate(keypoints)
]
)
return coco_results
def convert_to_xywh(boxes):
xmin, ymin, xmax, ymax = boxes.unbind(1)
return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1)
def merge(img_ids, eval_imgs):
all_img_ids = utils.all_gather(img_ids)
all_eval_imgs = utils.all_gather(eval_imgs)
merged_img_ids = []
for p in all_img_ids:
merged_img_ids.extend(p)
merged_eval_imgs = []
for p in all_eval_imgs:
merged_eval_imgs.append(p)
merged_img_ids = np.array(merged_img_ids)
merged_eval_imgs = np.concatenate(merged_eval_imgs, 2)
# keep only unique (and in sorted order) images
merged_img_ids, idx = np.unique(merged_img_ids, return_index=True)
merged_eval_imgs = merged_eval_imgs[..., idx]
return merged_img_ids, merged_eval_imgs
def create_common_coco_eval(coco_eval, img_ids, eval_imgs):
img_ids, eval_imgs = merge(img_ids, eval_imgs)
img_ids = list(img_ids)
eval_imgs = list(eval_imgs.flatten())
coco_eval.evalImgs = eval_imgs
coco_eval.params.imgIds = img_ids
coco_eval._paramsEval = copy.deepcopy(coco_eval.params)
#################################################################
# From pycocotools, just removed the prints and fixed
# a Python3 bug about unicode not defined
#################################################################
# Ideally, pycocotools wouldn't have hard-coded prints
# so that we could avoid copy-pasting those two functions
def createIndex(self):
# create index
# print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns, catToImgs = defaultdict(list), defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
# print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
maskUtils = mask_util
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
# print('Loading and preparing results...')
# tic = time.time()
if isinstance(resFile, torch._six.string_classes):
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id + 1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]]
if 'segmentation' not in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2] * bb[3]
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if 'bbox' not in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id + 1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x2 - x1) * (y2 - y1)
ann['id'] = id + 1
ann['bbox'] = [x1, y1, x2 - x1, y2 - y1]
# print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
createIndex(res)
return res
def evaluate(self):
'''
Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
:return: None
'''
# tic = time.time()
# print('Running per image evaluation...')
p = self.params
# add backward compatibility if useSegm is specified in params
if p.useSegm is not None:
p.iouType = 'segm' if p.useSegm == 1 else 'bbox'
print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))
# print('Evaluate annotation type *{}*'.format(p.iouType))
p.imgIds = list(np.unique(p.imgIds))
if p.useCats:
p.catIds = list(np.unique(p.catIds))
p.maxDets = sorted(p.maxDets)
self.params = p
self._prepare()
# loop through images, area range, max detection number
catIds = p.catIds if p.useCats else [-1]
if p.iouType == 'segm' or p.iouType == 'bbox':
computeIoU = self.computeIoU
elif p.iouType == 'keypoints':
computeIoU = self.computeOks
self.ious = {
(imgId, catId): computeIoU(imgId, catId)
for imgId in p.imgIds
for catId in catIds}
evaluateImg = self.evaluateImg
maxDet = p.maxDets[-1]
evalImgs = [
evaluateImg(imgId, catId, areaRng, maxDet)
for catId in catIds
for areaRng in p.areaRng
for imgId in p.imgIds
]
# this is NOT in the pycocotools code, but could be done outside
evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds))
self._paramsEval = copy.deepcopy(self.params)
# toc = time.time()
# print('DONE (t={:0.2f}s).'.format(toc-tic))
return p.imgIds, evalImgs
#################################################################
# end of straight copy from pycocotools, just removing the prints
#################################################################
| 12,012 | 33.421203 | 107 |
py
|
more
|
more-main/vision/transforms.py
|
import random
import torch
from torchvision.transforms import functional as F
def _flip_coco_person_keypoints(kps, width):
flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15]
flipped_data = kps[:, flip_inds]
flipped_data[..., 0] = width - flipped_data[..., 0]
# Maintain COCO convention that if visibility == 0, then x, y = 0
inds = flipped_data[..., 2] == 0
flipped_data[inds] = 0
return flipped_data
class Compose(object):
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, image, target):
for t in self.transforms:
image, target = t(image, target)
return image, target
class RandomHorizontalFlip(object):
def __init__(self, prob):
self.prob = prob
def __call__(self, image, target):
if random.random() < self.prob and target['num_obj'].item() > 0:
height, width = image.shape[-2:]
image = image.flip(-1)
bbox = target["boxes"]
bbox[:, [0, 2]] = width - bbox[:, [2, 0]]
target["boxes"] = bbox
if "masks" in target:
target["masks"] = target["masks"].flip(-1)
return image, target
class ToTensor(object):
def __call__(self, image, target):
image = F.to_tensor(image)
return image, target
| 1,358 | 28.543478 | 74 |
py
|
more
|
more-main/test-cases/blender_case.py
|
import bpy
content = ""
for a in bpy.context.selected_objects:
content += '' + a.name + " "
content += str(a.location[0])+' '+str(a.location[1])+' '+str(a.location[2]) + " "
content += str(a.rotation_euler[0])+' '+str(a.rotation_euler[1])+' '+str(a.rotation_euler[2])
content += "\n"
with open("/home/mluser/search-YCB/test-cases/ycb/Output.txt", "w") as text_file:
text_file.write(content)
| 412 | 36.545455 | 97 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/plot_results.py
|
import random
import numpy as np
import time
import pickle
import matplotlib.pyplot as plt
import scipy.stats
def mean_confidence_interval(data, confidence=0.95):
"""
Compute the mean and confidence interval of the the input data array-like.
:param data: (array-like)
:param confidence: probability of the mean to lie in the interval
:return: (tuple) mean, interval upper-endpoint, interval lower-endpoint
"""
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
return m, m-h, m+h
def plot_instance_parameters(global_path, dataset_name):
instance_name_file = open(global_path + "/MCNF_solver/instance_files/" + dataset_name + "/instance_name_file.p", "rb" )
instance_name_list = pickle.load(instance_name_file)
instance_name_file.close()
nb_nodes_list = []
nb_arcs_list = []
nb_commodities_list = []
for instance_name in instance_name_list:
instance_file = open(global_path + "/MCNF_solver/instance_files/" + dataset_name + "/" + instance_name + ".p", "rb" )
graph, commodity_list = pickle.load(instance_file) # read an instance
instance_file.close()
nb_nodes_list.append(len(graph))
nb_arcs_list.append(sum(len(neighbor_dict) for neighbor_dict in graph))
nb_commodities_list.append(len(commodity_list))
# print the parameters for each instance of the dataset
nb_commodities_list = sorted(nb_commodities_list)
print(nb_commodities_list)
print(nb_nodes_list)
print(nb_arcs_list)
def plot_results(abscisse, results, algorithm_list, colors, formating, title, x_log=True, y_log=True, interval=True, x_label="Nb_nodes", y_label="Performance", legend_position="upper left"):
figure = plt.figure()
plt.rcParams.update({'font.size': 13})
if x_log : plt.xscale("log")
if y_log : plt.yscale("log")
ax = figure.gca()
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
plt.xticks([30, 50, 100, 200, 400], [30, 50, 100, 200, 400])
for algorithm_name in algorithm_list:
if interval:
plt.plot(abscisse, results[algorithm_name][0], formating[algorithm_name], label=algorithm_label, color=colors[algorithm_name]) # print the main curve
plt.fill_between(abscisse, results[algorithm_name][1], results[algorithm_name][2], alpha=0.25, facecolor=colors[algorithm_name], edgecolor=colors[algorithm_name]) # print the intervals around the main curve
ax.legend(loc=legend_position, framealpha=0.3)
else:
plt.plot(abscisse, results[algorithm_name], label=algorithm_name, color=colors[algorithm_name])
ax.legend(loc=legend_position, framealpha=0.3)
return figure
def plot_dataset(global_path, dataset_name, algorithm_list=None, x_label="Nb nodes", legend_position="upper left"):
# This function reads the results of a dataset, aggregates the results of instances with the same parameters and call the plotting function
result_file = open(global_path + "/MCNF_solver/instance_files/" + dataset_name + "/result_file.p", "rb" )
# result_file = open(global_path + "/MCNF_solver/instance_files_dynamic/" + dataset_name + "/result_file.p", "rb" )
result_dict = pickle.load(result_file)
result_file.close()
if algorithm_list is None:
algorithm_list = list(result_dict.keys())
# Color for each algorithm
colors = {"SRR" : '#1f77b4', "RR" : '#ff7f0e', "SRR congestion" : '#1f77b4',
"RR congestion" : '#ff7f0e', 'SA' : '#2ca02c', "MILP solver" : '#d62728',
"CSRR" : '#9467bd', "SRR unsorted" : "#000000", "RR sorted" : "#eeee00",
'SA 2' : '#2ca02c', 'VNS' : '#d62728', 'VNS 2' : '#d62728', 'Ant colony' : '#000000'}
# Line style for each algorithm
formating = {"SRR" : '-s', "RR" : '-^', "SRR congestion" : '-_',
"RR congestion" : '-', 'SA' : '-o', "MILP solver" : ':',
"CSRR" : '-', "SRR unsorted" : "--", "RR sorted" : "-d",
'SA 2' : '--', 'VNS' : '-o', 'VNS 2' : '--', 'Ant colony' : '-o'}
# Color for each algorithm
# colors = {"SRR arc node" : '#1f77b4', "SRR arc path" : '#ff7f0e', "SRR restricted" : '#ff7f0e',
# "B&B restricted medium" : '#2ca02c', 'Partial B&B restricted' : '#2ca02c', "SRR path-combination" : '#d62728',
# "SRR path-combination restricted" : '#d62728', 'SRR arc path no penalization' : '#ff7f0e', 'B&B restricted short' : '#2ca02c',
# 'B&B restricted long' : '#2ca02c', 'SRR path-combination no penalization' : '#d62728', 'SRR path-combination timestep' : '#9467bd',
# 'SRR arc node no penalization' : '#1f77b4', 'SRR path-combination commodity' : '#eeee00'}
# Line style for each algorithm
# formating = {"SRR arc node" : '-', "SRR arc path" : '-', "SRR restricted" : '-s',
# "B&B restricted medium" : '-', 'Partial B&B restricted' : '-o', "SRR path-combination" : '-',
# "SRR path-combination restricted" : '-s', 'SRR arc path no penalization' : '-o', 'B&B restricted short' : '-s',
# 'B&B restricted long' : '-o', 'SRR path-combination no penalization' : '-o', 'SRR path-combination timestep' : '-',
# 'SRR arc node no penalization' : '-o', 'SRR path-combination commodity' : '-'}
results_performance = {algorithm_name : ([], [], []) for algorithm_name in result_dict}
results_compututing_time = {algorithm_name : ([], [], []) for algorithm_name in result_dict}
results_total_overload = {algorithm_name : ([], [], []) for algorithm_name in result_dict}
for algorithm_name in algorithm_list:
temp_dict = {}
for instance_name in result_dict[algorithm_name]:
size = int(instance_name.split('_')[1]) # use for graph_scaling_dataset
# size = int(instance_name.split('_')[2]) # use for graph_scaling_dataset_random and commodity_scaling_dataset
if size not in temp_dict:
temp_dict[size] = []
temp_dict[size].extend(result_dict[algorithm_name][instance_name])
for size in sorted(list(temp_dict.keys())):
# result_list = [res if res[0] is not None else (0, 0, 10, 10**5, 10**4) for res in temp_dict[size]]
result_list = [res if res[0] is not None else (2, 0, 1200) for res in temp_dict[size]] #If an algorithm could not finish in time we give it a bad result
performance_list, total_overload, computing_time_list = zip(*result_list)
# _, _, performance_list, total_overload_list, computing_time_list = zip(*result_list)
# total_overload_list = [x + 1 for x in total_overload_list]
# performance_list = [x - 1 for x in performance_list]
# Aggregation of the performance : mean and bound of the confidence interval
performance_mean, performance_low, performance_up = mean_confidence_interval(list(performance_list))
results_performance[algorithm_name][0].append(performance_mean)
results_performance[algorithm_name][1].append(max(10**-6, performance_low)) # prevent bad plotting in log scales
results_performance[algorithm_name][2].append(performance_up)
# Aggregation of the computing time : mean and bound of the confidence interval
computing_time_mean, computing_time_low, computing_time_up = mean_confidence_interval(list(computing_time_list))
results_compututing_time[algorithm_name][0].append(computing_time_mean)
results_compututing_time[algorithm_name][1].append(computing_time_low)
results_compututing_time[algorithm_name][2].append(computing_time_up)
# plt.plot([size]*len(total_overload_list), total_overload_list, '+', color=colors[algorithm_name])
# total_overload_mean, total_overload_low, total_overload_up = mean_confidence_interval(list(total_overload_list))
# results_total_overload[algorithm_name][0].append(total_overload_mean)
# results_total_overload[algorithm_name][1].append(max(1, total_overload_low))
# results_total_overload[algorithm_name][2].append(total_overload_up)
# abscisse = [182.23, 362.88, 685.2, 1038.48, 1615.56, 2462.05, 3512.71, 5048.89, 8138.71, 11644.12]
# abscisse = [63, 125.0, 234.0, 350.2, 540.3, 800.9, 1200.5, 1730.7, 2750.1, 3900.5]
abscisse = list(temp_dict.keys())
#Call to the plotting function for the differents metrics (performance, computing time, ...)
performance_figure = plot_results(abscisse, results_performance, algorithm_list, colors, formating, "Performance vs number of nodes", x_label=x_label, y_label="Performance", legend_position=legend_position)
computing_time_figure = plot_results(abscisse, results_compututing_time, algorithm_list, colors, formating, "Computing time vs number of nodes", x_label=x_label, y_label="Computing time", legend_position=legend_position)
# total_overload_figure = plot_results(abscisse, results_total_overload, algorithm_list, colors, formating, "Total overload vs number of nodes", x_label=x_label, y_label="Total overload")
plt.show()
if __name__ == "__main__":
global_path = "/home/francois/Desktop"
# dataset_name = "graph_scaling_dataset"
# dataset_name = "graph_scaling_dataset_small_commodities"
# dataset_name = "graph_scaling_dataset_random"
dataset_name = "commodity_scaling_dataset"
# dataset_name = "small_instance_dataset"
# dataset_name = "graph_scaling_dataset_easy"
# dataset_name = "graph_scaling_dataset_hard"
# dataset_name = "graph_scaling_dataset_random"
# dataset_name = "commodity_scaling_dataset"
plot_instance_parameters(global_path, dataset_name)
algorithm_list = []
# algorithm_list.append("SRR arc node")
# algorithm_list.append("SRR arc path")
# algorithm_list.append("SRR arc node no penalization")
# algorithm_list.append("SRR arc path no penalization")
# algorithm_list.append("SRR restricted")
# algorithm_list.append("B&B restricted short")
# algorithm_list.append("B&B restricted medium")
# algorithm_list.append("B&B restricted long")
# algorithm_list.append("SRR path-combination")
# algorithm_list.append("SRR path-combination no penalization")
# algorithm_list.append("SRR path-combination timestep")
# algorithm_list.append("SRR path-combination commodity")
# algorithm_list.append("SRR path-combination restricted")
algorithm_list.append("SRR")
# algorithm_list.append("SRR unsorted")
# algorithm_list.append("SRR congestion")
algorithm_list.append("RR")
# algorithm_list.append("RR sorted")
# algorithm_list.append("RR congestion")
algorithm_list.append("CSRR")
algorithm_list.append("SA")
# algorithm_list.append("SA 2")
# algorithm_list.append("VNS")
# algorithm_list.append("VNS 2")
# algorithm_list.append("Ant colony")
# algorithm_list.append("MILP solver")
plot_dataset(global_path, dataset_name, algorithm_list, x_label="Nb nodes", legend_position="upper left")
| 11,270 | 53.449275 | 224 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/mcnf_do_test.py
|
import random
import numpy as np
import time
from instance_mcnf import generate_instance
from mcnf import *
from simulated_annealing import simulated_annealing_unsplittable_flows
from VNS_masri import VNS_masri
from ant_colony import ant_colony_optimiser
# Here you choose the setting of the instances and of the solvers
# Size of the graph
# size_list = [10]*10
# size_list = [3, 4, 5, 6, 7, 9, 10, 12, 13, 15]
size_list = [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20]
# size_list = [30, 50, 70, 100, 130, 160, 200, 250, 300, 400]
size_list = np.array(size_list)
# size_list = size_list**2
# Capacity of the arcs of the graph
capacity_list = [10000] * len(size_list)
# capacity_list = [3] * len(size_list)
# capacity_list = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
# Threshold of actualisation of the heuristic
actulisation_threshold_list = None
# actulisation_threshold_list = 2 ** (np.arange(10) + 4)
# Upper bound on the size of the commodities
max_demand_list = [1500] * len(size_list)
# max_demand_list = [2] * len(size_list)
# max_demand_list = [int(np.sqrt(capa)) for capa in capacity_list]
test_list = []
for size, capacity, max_demand in zip(size_list, capacity_list, max_demand_list):
test_list += [("grid", (size, size, size, 2*size, capacity, capacity), {"max_demand" : max_demand, "smaller_commodities" : False})]
# test_list += [("grid", (size, size, size, 2*size, capacity, capacity), {"max_demand" : max_demand, "smaller_commodities" : True})]
# test_list += [("random_connected", (size, 5/size, int(size * 0.1), capacity), {"max_demand" : max_demand, "smaller_commodities" : False})]
# Choice of the tested algorithms
tested_algorithms = []
tested_algorithms.append("RR")
# tested_algorithms.append("RR sorted")
# tested_algorithms.append("RR congestion")
tested_algorithms.append("SRR")
# tested_algorithms.append("SRR unsorted")
# tested_algorithms.append("SRR congestion")
tested_algorithms.append("CSRR")
tested_algorithms.append("SA")
# tested_algorithms.append("SA 2")
# tested_algorithms.append("MILP solver")
# tested_algorithms.append("VNS")
# tested_algorithms.append("VNS 2")
# tested_algorithms.append("Ant colony")
results_dict = {algorithm_name : ([],[]) for algorithm_name in tested_algorithms}
i = -1
nb_commodity_list = []
nb_node_list = []
for graph_type, graph_generator_inputs, demand_generator_inputs in test_list:
i += 1
print("############################## ", i,"/",len(test_list))
# Instance generation
graph, commodity_list, initial_solution, origin_list = generate_instance(graph_type, graph_generator_inputs, demand_generator_inputs)
total_demand = sum([c[2] for c in commodity_list])
nb_nodes = len(graph)
nb_commodities = len(commodity_list)
print("total_demand is : ", total_demand)
print("nb_commodities = ", nb_commodities)
nb_commodity_list.append(len(commodity_list))
nb_node_list.append(nb_nodes)
#Setting default Threshold for the heuristic
if actulisation_threshold_list is None:
actualisation_threshold = None
else:
actualisation_threshold = actulisation_threshold_list[i]
# Applying the algorithm present in tested_algorithms
for algorithm_name in tested_algorithms:
print("Running {}".format(algorithm_name))
temp = time.time()
if algorithm_name == "RR" : a = randomized_rounding_heuristic(graph, commodity_list, actualisation=False, sorted_commodities=False)
if algorithm_name == "RR sorted" : a = randomized_rounding_heuristic(graph, commodity_list, actualisation=False)
if algorithm_name == "RR congestion" : a = randomized_rounding_heuristic(graph, commodity_list, actualisation=False, linear_objectif="congestion")
if algorithm_name == "SRR" : a = randomized_rounding_heuristic(graph, commodity_list)
if algorithm_name == "SRR unsorted" : a = randomized_rounding_heuristic(graph, commodity_list, sorted_commodities=False)
if algorithm_name == "SRR congestion" : a = randomized_rounding_heuristic(graph, commodity_list, linear_objectif="congestion")
if algorithm_name == "CSRR" : a = randomized_rounding_heuristic(graph, commodity_list, proof_constaint=True)
if algorithm_name == "SA" : a = simulated_annealing_unsplittable_flows(graph, commodity_list, nb_iterations= int(len(commodity_list)**1.5 * 2))
if algorithm_name == "SA 2" : a = simulated_annealing_unsplittable_flows(graph, commodity_list, nb_iterations= int(len(commodity_list)**1.5 * 6))
if algorithm_name == "MILP solver" : a = gurobi_unsplittable_flows(graph, commodity_list, time_limit=1000)
if algorithm_name == "VNS" : a = VNS_masri(graph, commodity_list, nb_iterations= 100)
if algorithm_name == "VNS 2" : a = VNS_masri(graph, commodity_list, nb_iterations= int(len(commodity_list)**1.5), amelioration=True)
if algorithm_name == "Ant colony" : a = ant_colony_optimiser(graph, commodity_list, nb_iterations=50)
commodity_path_list, total_overload = a
computing_time = time.time() - temp
results_dict[algorithm_name][0].append(total_overload / total_demand)
results_dict[algorithm_name][1].append(computing_time)
print("Performance = ", total_overload / total_demand)
print("Computing_time = ", computing_time)
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(len(graph))]
for commodity_index, path in enumerate(commodity_path_list):
update_graph_capacity(use_graph, path, -commodity_list[commodity_index][2])
overload_graph = [{neighbor : max(0, use_graph[node][neighbor] - graph[node][neighbor]) for neighbor in graph[node]} for node in range(len(graph))]
overload = sum([sum(dct.values()) for dct in overload_graph])
print("Overload = ", overload)
print()
| 5,855 | 47.8 | 155 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/simulated_annealing.py
|
import random
import numpy as np
import heapq as hp
import time
from k_shortest_path import k_shortest_path_algorithm, k_shortest_path_all_destination
def simulated_annealing_unsplittable_flows(graph, commodity_list, nb_iterations=10**5, nb_k_shortest_paths=10, verbose=0):
nb_nodes = len(graph)
nb_commodities = len(commodity_list)
# Set the initial/final temperature and the temperature decrement
T = T_init = 200
T_final = 1
dT = (T_final/T_init)**(1/nb_iterations)
all_distances = compute_all_distances(graph)
log_values = - np.log(1 - np.arange(0, 1, 0.001))
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)] # Will contain the total flow used on each arc
# Compute the k-shortest paths for each commodity and store them in possible_paths_per_commodity
shortest_paths_per_origin = {}
possible_paths_per_commodity = []
for commodity_index, commodity in enumerate(commodity_list):
origin, destination, demand = commodity
if origin not in shortest_paths_per_origin:
shortest_paths_per_origin[origin] = k_shortest_path_all_destination(graph, origin, nb_k_shortest_paths)
path_and_cost_list = shortest_paths_per_origin[origin][destination]
possible_paths_per_commodity.append([remove_cycle_from_path(path) for path, cost in path_and_cost_list])
solution = []
fitness = 0
# Create an intial solution
for commodity_index, commodity in enumerate(commodity_list):
new_path_index = np.random.choice(len(possible_paths_per_commodity[commodity_index]))
new_path = possible_paths_per_commodity[commodity_index][new_path_index]
solution.append(new_path)
fitness += update_fitness_and_use_graph(use_graph, graph, [], new_path, commodity[2])
# Main loop
for iter_index in range(nb_iterations):
if verbose and iter_index %1000 == 0:
print(iter_index, fitness, end=' \r')
commodity_index = random.randint(0, nb_commodities - 1) # Choose the commodity which will change its path during the iteration
commodity = commodity_list[commodity_index]
nb_possible_paths = len(possible_paths_per_commodity[commodity_index])
if nb_possible_paths < 5 or random.random() < 0.:
if nb_possible_paths >= 10:
index_to_remove = np.random.choice(len(possible_paths_per_commodity[commodity_index]))
possible_paths_per_commodity[commodity_index].pop(index_to_remove)
# Create a new possible path for the commodity : this procedure considers the current overflow of the arcs
new_path = get_new_path(graph, use_graph, commodity, log_values, all_distances, T=3)
possible_paths_per_commodity[commodity_index].append(new_path)
else:
# Choose a random new_path for the commodity
new_path_index = np.random.choice(len(possible_paths_per_commodity[commodity_index]))
new_path = possible_paths_per_commodity[commodity_index][new_path_index]
# Change the path used by the commodity and modify the fitness accordingly
new_fitness = fitness + update_fitness_and_use_graph(use_graph, graph, solution[commodity_index], new_path, commodity[2])
old_path = solution[commodity_index]
solution[commodity_index] = new_path
proba = np.exp((fitness - new_fitness) / T)
T = T * dT
# Keep the new solution according to the simulated annealing rule or return to the old solution
if new_fitness <= fitness or random.random() < proba:
fitness = new_fitness
else:
solution[commodity_index] = old_path
update_fitness_and_use_graph(use_graph, graph, new_path, old_path, commodity[2]) # Modify use_graph
return solution, fitness
def update_fitness_and_use_graph(use_graph, graph, old_path, new_path, commodity_demand):
# This function makes the updates necessary to reflect the fact that a commodity uses new_path instead of old_path
# To do so, it updates use_graph (total flow going through eahc arc) and computes in delta_fitness
delta_fitness = 0
for i in range(len(old_path) - 1):
node1 = old_path[i]
node2 = old_path[i+1]
old_overload = max(use_graph[node1][node2] - graph[node1][node2], 0)
use_graph[node1][node2] -= commodity_demand
delta_fitness += max(use_graph[node1][node2] - graph[node1][node2], 0) - old_overload
for i in range(len(new_path) - 1):
node1 = new_path[i]
node2 = new_path[i+1]
old_overload = max(use_graph[node1][node2] - graph[node1][node2], 0)
use_graph[node1][node2] += commodity_demand
delta_fitness += max(use_graph[node1][node2] - graph[node1][node2], 0) - old_overload
return delta_fitness
def get_new_path(graph, use_graph, commodity, log_values, all_distances, T=1, overload_coeff=10**-2):
# This functions create a random path for a commodity biased toward short path and arcs with a low overflow
# To do so, an A* algorithm is applied with random arc cost and a penalisation of the overflow
origin, destination, demand = commodity
priority_q = [(0, origin, None)] # Priority queue storing the nodes to explore : encode with a binary tree
parent_list = [None] * len(graph)
distances = [None] * len(graph)
while priority_q:
value, current_node, parent_node = hp.heappop(priority_q)
if distances[current_node] is None:
parent_list[current_node] = parent_node
distances[current_node] = value
if current_node == destination:
break
for neighbor in graph[current_node]:
if distances[neighbor] is None:
arc_cost = 1 # Each arc has length 1 to guide toward shorter paths
arc_cost += overload_coeff * max(0, demand + use_graph[current_node][neighbor] - graph[current_node][neighbor]) # Penalize the arc with high overflow
arc_cost += log_values[int(random.random() * 1000)] # Add some randomness to the arc cost
arc_cost += all_distances[neighbor][destination] # A* heuristic : distance to the target node
hp.heappush(priority_q, (value + arc_cost, neighbor, current_node))
# Compute a reverse path according to parent_list
path = [destination]
current_node = destination
while current_node != origin:
current_node = parent_list[current_node]
path.append(current_node)
path.reverse()
return path
def get_new_path_2(graph, use_graph, commodity, all_distances, T=1, overload_coeff=None):
origin, destination, demand = commodity
current_node = origin
new_path = [current_node]
if overload_coeff is None:
overload_coeff = 10**-3
i = 0
while current_node != destination:
i+=1
if i%20==0:
overload_coeff /= 10
neighbor_list = list(graph[current_node].keys())
arc_efficiency_list = []
l = []
for neighbor in neighbor_list:
if True or neighbor in graph[current_node] and graph[current_node][neighbor] > 0:
arc_efficiency_list.append(all_distances[neighbor][destination] + 1 + overload_coeff * max(0, demand + use_graph[current_node][neighbor] - graph[current_node][neighbor]))
l.append(neighbor)
arc_efficiency_list = np.array(arc_efficiency_list)
neighbor_list = l
proba = np.exp(- arc_efficiency_list * T)
proba = proba/sum(proba)
current_node = np.random.choice(neighbor_list, p=proba)
new_path.append(current_node)
return new_path
def compute_all_distances(graph):
nb_nodes = len(graph)
all_distances = []
unitary_graph = [{neighbor : 1 for neighbor in graph[node]} for node in range(nb_nodes)]
for initial_node in range(nb_nodes):
parent_list, distances = dijkstra(unitary_graph, initial_node)
for i in range(len(distances)):
if distances[i] is None:
distances[i] = 10.**10
all_distances.append(distances)
return all_distances
def dijkstra(graph, initial_node, destination_node=None):
priority_q = [(0, initial_node, None)]
parent_list = [None] * len(graph)
distances = [None] * len(graph)
while priority_q:
value, current_node, parent_node = hp.heappop(priority_q)
if distances[current_node] is None:
parent_list[current_node] = parent_node
distances[current_node] = value
if current_node == destination_node:
return parent_list, distances
for neighbor in graph[current_node]:
if distances[neighbor] is None:
hp.heappush(priority_q, (value + graph[current_node][neighbor], neighbor, current_node))
return parent_list, distances
def remove_cycle_from_path(path):
is_in_path = set()
new_path = []
for node in path:
if node in is_in_path:
while new_path[-1] != node:
node_to_remove = new_path.pop()
is_in_path.remove(node_to_remove)
else:
is_in_path.add(node)
new_path.append(node)
return new_path
if __name__ == "__main__":
pass
| 9,393 | 38.974468 | 186 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/mcnf_continuous.py
|
import numpy as np
import random
import time
import heapq as hp
import gurobipy
from mcnf_heuristics import find_fitting_most_capacited_path, compute_all_shortest_path
def gurobi_overload_sum_solver(graph, commodity_list, use_graph=None, flow_upper_bound_graph=None, verbose=0, proof_constaint=False, return_model=False):
# LP program that solves the multicommodity flow problem with the following objective function : minimize the sum of the arc_list overload
nb_nodes = len(graph)
nb_commodities = len(commodity_list)
if use_graph is None:
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)]
# we aggregate the commodities by origin : this create a super commodity
# this process does not change the results of the continuous solver
super_commodity_dict = {}
for origin, destination, demand in commodity_list:
if origin not in super_commodity_dict:
super_commodity_dict[origin] = {}
if destination not in super_commodity_dict[origin]:
super_commodity_dict[origin][destination] = 0
super_commodity_dict[origin][destination] += demand
arc_list = [(node, neighbor) for node in range(nb_nodes) for neighbor in graph[node]]
capacities = [graph[node][neighbor] for node, neighbor in arc_list]
commodities = super_commodity_dict.keys()
# Create optimization model
model = gurobipy.Model('netflow')
model.modelSense = gurobipy.GRB.MINIMIZE
model.Params.OutputFlag = verbose>1
# Create variables
flow_var = model.addVars(commodities, arc_list, obj=0, name="flow_var") # flow variables
overload_var = model.addVars(arc_list, obj=1, name="overload_var") # overload variables : we want to minimize their sum
# Arc capacity constraints :
capacity_constraint_dict = model.addConstrs((flow_var.sum('*', node, neighbor) + use_graph[node][neighbor] - overload_var[node, neighbor] <= graph[node][neighbor] for node, neighbor in arc_list), "cap")
if proof_constaint:
model.addConstrs(flow_var.sum('*', node, neighbor) <= flow_upper_bound_graph[node][neighbor] for node, neighbor in arc_list)
# Flow conservation constraints
for origin in super_commodity_dict:
for node in range(nb_nodes):
rhs = 0
if node == origin:
rhs += sum(super_commodity_dict[origin].values())
if node in super_commodity_dict[origin]:
rhs += -super_commodity_dict[origin][node]
model.addConstr((flow_var.sum(origin,node,'*') - flow_var.sum(origin,'*',node) == rhs), "node{}_{}".format(node, origin))
model.update()
if return_model:
return model, overload_var, flow_var, super_commodity_dict
# Launching the model
model.optimize()
# Getting the results from the solver : the allocation of each super commodity and the total necessary capacity
if model.status == gurobipy.GRB.Status.OPTIMAL:
overload_values = model.getAttr('x', overload_var)
if verbose : print("Result = ", sum(overload_values.values()))
if proof_constaint:
remaining_capacity_graph = [{neighbor : min(flow_upper_bound_graph[node][neighbor], graph[node][neighbor] + overload_values[(node, neighbor)] - use_graph[node][neighbor]) for neighbor in graph[node]} for node in range(nb_nodes)]
else:
remaining_capacity_graph = [{neighbor : graph[node][neighbor] + overload_values[(node, neighbor)] - use_graph[node][neighbor] for neighbor in graph[node]} for node in range(nb_nodes)]
allocation_graph_per_origin = {}
flow_values = model.getAttr('x', flow_var)
for origin in super_commodity_dict:
allocation_graph = [{} for node in range(nb_nodes)]
for node,neighbor in arc_list:
allocation_graph[node][neighbor] = flow_values[origin,node,neighbor]
allocation_graph_per_origin[origin] = allocation_graph
return allocation_graph_per_origin, remaining_capacity_graph
else:
print("Solver exit status : ", model.status)
def gurobi_congestion_solver(graph, commodity_list, use_graph=None, flow_upper_bound_graph=None, verbose=0, proof_constaint=False, return_model=False, second_objective=False):
# LP program that solves the multicommodity flow problem with the following objective function : minimize the maximum arc overload (i.e. the congestion)
nb_nodes = len(graph)
nb_commodities = len(commodity_list)
if use_graph is None:
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)]
# we aggregate the commodities by origin : this create a super commodity
# this process does not change the results of the continuous solver
super_commodity_dict = {}
for origin, destination, demand in commodity_list:
if origin not in super_commodity_dict:
super_commodity_dict[origin] = {}
if destination not in super_commodity_dict[origin]:
super_commodity_dict[origin][destination] = 0
super_commodity_dict[origin][destination] += demand
arc_list = [(node, neighbor) for node in range(nb_nodes) for neighbor in graph[node]]
capacities = [graph[node][neighbor] for node, neighbor in arc_list]
commodities = super_commodity_dict.keys()
# Create optimization model
model = gurobipy.Model('netflow')
model.modelSense = gurobipy.GRB.MINIMIZE
model.Params.OutputFlag = verbose
# Create variables
flow_var = model.addVars(commodities, arc_list, obj=0, name="flow_var") # flow variables
overload_var = model.addVar(obj=1, name="overload_var") # overload variable
if second_objective:
overload_var_sum = model.addVars(arc_list, obj=0, name="overload_var") # overload variables : we want to minimize their sum
model.setObjectiveN(sum(overload_var_sum.values()),1)
model.addConstrs((flow_var.sum('*',node,neighbor) + use_graph[node][neighbor] <= graph[node][neighbor] * (1 + overload_var_sum[(node, neighbor)]) for node,neighbor in arc_list), "cap")
# Arc capacity constraints :
model.addConstrs((flow_var.sum('*',node,neighbor) + use_graph[node][neighbor] <= graph[node][neighbor] * overload_var for node,neighbor in arc_list), "cap")
if proof_constaint:
model.addConstrs(flow_var.sum('*',node,neighbor) <= flow_upper_bound_graph[node][neighbor] for node, neighbor in arc_list)
# Flow conservation constraints
for origin in super_commodity_dict:
for node in range(nb_nodes):
rhs = 0
if node == origin:
rhs += sum(super_commodity_dict[origin].values())
if node in super_commodity_dict[origin]:
rhs += -super_commodity_dict[origin][node]
model.addConstr((flow_var.sum(origin,node,'*') - flow_var.sum(origin,'*',node) == rhs), "node{}_{}".format(node, origin))
model.update()
if return_model:
return model, overload_var, flow_var, super_commodity_dict
# Launching the model
model.optimize()
# Getting the results from the solver : the allocation of each super commodity and the total necessary capacity
if model.status == gurobipy.GRB.Status.OPTIMAL:
overload_values = overload_var.X
if verbose : print("Result = ", overload_values)
if proof_constaint:
remaining_capacity_graph = [{neighbor : min(flow_upper_bound_graph[node][neighbor], graph[node][neighbor] * overload_values - use_graph[node][neighbor]) for neighbor in graph[node]} for node in range(nb_nodes)]
else:
remaining_capacity_graph = [{neighbor : graph[node][neighbor] * overload_values - use_graph[node][neighbor] for neighbor in graph[node]} for node in range(nb_nodes)]
allocation_graph_per_origin = {}
flow_values = model.getAttr('x', flow_var)
for origin in super_commodity_dict:
allocation_graph = [{} for node in range(nb_nodes)]
for node,neighbor in arc_list:
allocation_graph[node][neighbor] = flow_values[origin,node,neighbor]
allocation_graph_per_origin[origin] = allocation_graph
return allocation_graph_per_origin, remaining_capacity_graph
else:
print("Solver exit status : ", model.status)
if __name__ == "__main__":
pass
| 8,387 | 43.617021 | 240 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/create_and_store_instances.py
|
import random
import numpy as np
import time
import pickle
from instance_mcnf import generate_instance
# Here you choose the setting of the instances
nb_repetitions = 100
nb_unique_exp = 10
# Size of the graph : controls the number of nodes and arcs
size_list = [10]*nb_unique_exp
# size_list = [5, 7, 8, 10, 12, 13, 14, 16, 18, 20]
# size_list = [3, 4, 5, 6, 7, 9, 10, 12, 13, 15]
# size_list = [30, 50, 70, 100, 130, 160, 200, 250, 300, 400]
# Capacity of the arcs of the graph
capacity_list = [1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
# capacity_list = [10000] * nb_unique_exp
# capacity_list = [3] * nb_unique_exp
# Upper bound on the size of the commodities
# max_demand_list = [1500] * nb_unique_exp
# max_demand_list = [2] * nb_unique_exp
max_demand_list = [int(np.sqrt(capacity)) for capacity in capacity_list]
# Choose here the type of graph to be created: note that the size parameter does not have the same meaning for both types
instance_parameter_list = []
for size, capacity, max_demand in zip(size_list, capacity_list, max_demand_list):
instance_parameter_list.append(("grid", (size, size, size, 2*size, capacity, capacity), {"max_demand" : max_demand, "smaller_commodities" : False}))
# instance_parameter_list.append(("grid", (size, size, size, 2*size, capacity, capacity), {"max_demand" : max_demand, "smaller_commodities" : True}))
# instance_parameter_list += [("random_connected", (size, 5/size, int(size * 0.1), capacity), {"max_demand" : max_demand, "smaller_commodities" : False})]
# Complete with the path to the main directory
global_path = "/home/francois/Desktop/"
# Complete name of the directory that will contain the instances
# experience_string = "graph_scaling_dataset/"
# experience_string = "graph_scaling_dataset_small_commodities/"
# experience_string = "graph_scaling_dataset_random/"
experience_string = "commodity_scaling_dataset/"
# experience_string = "small_instance_dataset/"
instance_name_list = []
for graph_type, graph_generator_inputs, demand_generator_inputs in instance_parameter_list:
for repetition_index in range(nb_repetitions):
# Generate the graph and the commodity list
graph, commodity_list, initial_solution, origin_list = generate_instance(graph_type, graph_generator_inputs, demand_generator_inputs)
max_demand = demand_generator_inputs["max_demand"]
if graph_type == "grid":
size, _, _, _, capacity, _ = graph_generator_inputs
nb_nodes = size ** 2 + size
if graph_type == "random_connected":
nb_nodes, _, _, capacity = graph_generator_inputs
instance_name = graph_type + "_" + str(nb_nodes) + "_" + str(capacity) + "_" + str(max_demand) + "_" + str(repetition_index)
# Store the created instance
instance_file = open(global_path + "MCNF_solver/instance_files/" + experience_string + instance_name + ".p", "wb" )
pickle.dump((graph, commodity_list), instance_file)
instance_file.close()
instance_name_list.append(instance_name)
# Create a file containing the name of all the instances
instance_name_file = open(global_path + "MCNF_solver/instance_files/" + experience_string + "instance_name_file.p", "wb" )
pickle.dump(instance_name_list, instance_name_file)
instance_name_file.close()
| 3,307 | 43.702703 | 158 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/ant_colony.py
|
import heapq as hp
import random
import numpy as np
import time
def ant_colony_optimiser(graph, commodity_list, nb_iterations, verbose=0):
nb_nodes = len(graph)
nb_commodities = len(commodity_list)
nb_edges = sum(len(neighbor_dict) for neighbor_dict in graph)
# Setting hyper-parameters
evaporation_factor = 0.9
reset_counter = 0
reset_threshold = 50
solution, solution_value = None, 10**15
best_solution, best_solution_value = None, 10**15
pheromone_trails_per_commodity = np.ones((nb_commodities, nb_edges)) * 0.5 # Initialize pheromone trails
all_distances = compute_all_distances(graph)
edge_list = [(node, neighbor) for node in range(nb_nodes) for neighbor in graph[node]]
edge_index_dict = {edge : edge_index for edge_index, edge in edge_list}
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)]
commodities_order = list(range(nb_commodities))
# Main loop
for iter_index in range(nb_iterations):
if iter_index %1 == 0 and verbose:
print(iter_index, solution_value, t)
temp = time.time()
old_solution = solution
solution = [None]*nb_commodities
solution_value = 0
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)]
# Create a new solution with the ant colony method
for commodity_index in commodities_order:
commodity = commodity_list[commodity_index]
path = compute_path(graph, use_graph, all_distances, pheromone_trails_per_commodity[commodity_index], edge_index_dict, commodity)
solution[commodity_index] = path
solution_value += update_fitness_and_use_graph(use_graph, graph, [], path, commodity[2])
# Apply local search
solution_value = local_search(solution, use_graph, graph, solution_value, commodity_list)
# Keep the solution if it is the best so far
if best_solution_value >= solution_value:
best_solution = solution
best_solution_value = solution_value
reset_counter = 0
else:
reset_counter += 1
# Update the pheromones
pheromone_trails_per_commodity *= evaporation_factor
update_pheromones(pheromone_trails_per_commodity, edge_index_dict, best_solution, evaporation_factor)
# Reset the pheromone if no improvement is made for a large number of iteration
if reset_counter >= reset_threshold:
pheromone_trails_per_commodity = np.ones((nb_commodities, nb_edges)) * 0.5
solution = None
reset_counter = 0
return best_solution, best_solution_value
def update_pheromones(pheromone_trails_per_commodity, edge_index_dict, best_solution, evaporation_factor):
# This function update the pheromones of an ant colony optimiser in the hyper-cube framework
for commodity_index, path in enumerate(best_solution):
pheromone_trails = pheromone_trails_per_commodity[commodity_index]
for node_index in range(len(path) - 1):
node, neighbor = path[node_index], path[node_index + 1]
edge_index = edge_index_dict[node, neighbor]
pheromone_trails[edge_index] += (1 - evaporation_factor)
pheromone_trails[edge_index] = max(0.001, min(0.999, pheromone_trails[edge_index]))
def compute_path(graph, use_graph, all_distances, pheromone_trails, edge_index_dict, commodity, beta=2, gamma=3, p0=0.4, reset_lenght=25):
# This function creates a new path for a commodity with the ant colony method : have an ant do a guided random walked on the graph
nb_nodes = len(graph)
origin, destination, demand = commodity
current_node = origin
visited = [False] * nb_nodes
visited[current_node] = True
path_with_cycles = [current_node]
while current_node != destination:
neighbor_list = [neighbor for neighbor in graph[current_node] if not visited[neighbor]]
if neighbor_list == []:
neighbor_list = [neighbor for neighbor in graph[current_node]]
# Compute all heuristic information necessary to choose the next arc
pheromone_array = np.array([pheromone_trails[edge_index_dict[current_node, neighbor]] for neighbor in neighbor_list])
pheromone_array = np.maximum(0.001, pheromone_array)
use_array = np.array([use_graph[current_node][neighbor] for neighbor in neighbor_list])
if np.sum(use_array) == 0:
use_heuristic = use_array + 1
else:
use_heuristic = np.maximum(0.001, 1 - use_array / np.sum(use_array))
distance_heuristic = np.array([all_distances[neighbor][destination] for neighbor in neighbor_list])
distance_heuristic = 1 / np.maximum(1, distance_heuristic)
proba_list = use_heuristic**beta * distance_heuristic**gamma * pheromone_array
# Choose the next arc
if random.random() < p0:
neighbor_index = np.argmax(proba_list)
else:
proba_list = proba_list / sum(proba_list)
neighbor_index = np.random.choice(len(neighbor_list), p=proba_list)
neighbor = neighbor_list[neighbor_index]
visited[neighbor] = True
path_with_cycles.append(neighbor)
current_node = neighbor
# Reset the path if it becomes too long
if len(path_with_cycles) > reset_lenght:
current_node = origin
visited = [False] * nb_nodes
visited[current_node] = True
path_with_cycles = [current_node]
# Cycle deletion
path = []
in_path = [False] * nb_nodes
for node in path_with_cycles:
if in_path[node]:
while path[-1] != node:
poped_node = path.pop()
in_path[poped_node] = False
else:
path.append(node)
return path
def local_search(solution, use_graph, graph, solution_value, commodity_list):
counter = 0
while counter < 3:
arc = find_random_most_congested_arc(use_graph, graph)
commodities_on_congested_arc = find_commodities_on_arc(solution, arc)
best_congestion_reduction = 0
best_reduction_move = None
node1, node2 = arc
current_congestion = use_graph[node1][node2] / graph[node1][node2]
for commodity_index, node1_index in commodities_on_congested_arc:
origin, destination, demand = commodity_list[commodity_index]
path = solution[commodity_index]
if node1_index > 0 and node2 in graph[path[node1_index - 1]]:
# Try skip first node
node1_predecessor = path[node1_index - 1]
congestion_reduction = current_congestion - (use_graph[node1_predecessor][node2] + demand) / graph[node1_predecessor][node2]
if congestion_reduction > best_congestion_reduction :
best_congestion_reduction = congestion_reduction
best_reduction_move = (commodity_index, path[ : node1_index] + path[node1_index + 1 :])
if node1_index < len(path)-2 and path[node1_index + 2] in graph[node1]:
# Try skip second node
node2_successor = path[node1_index + 2]
congestion_reduction = current_congestion - (use_graph[node1][node2_successor] + demand) / graph[node1][node2_successor]
if congestion_reduction > best_congestion_reduction :
best_congestion_reduction = congestion_reduction
best_reduction_move = (commodity_index, path[ : node1_index + 1] + path[node1_index + 2 :])
for added_node in [neighbor for neighbor in graph[node1] if node2 in graph(neighbor)]:
# Try add intermediate node
new_congestion1 = (use_graph[node1][added_node] + demand) / graph[node1][added_node]
new_congestion2 = (use_graph[added_node][node2] + demand) / graph[added_node][node2]
congestion_reduction = current_congestion - max(new_congestion1, new_congestion2)
if congestion_reduction > best_congestion_reduction :
best_congestion_reduction = congestion_reduction
best_reduction_move = (commodity_index, path[ : node1_index + 1]+ [added_node] + path[node1_index + 1 :])
if best_congestion_reduction < 0:
changed_commodity_index, new_path = best_reduction_move
solution_value += update_fitness_and_use_graph(use_graph, graph, solution[changed_commodity_index], new_path, commodity_list[commodity_index][2])
solution[changed_commodity_index] = new_path
else:
counter += 1
return solution_value
def find_random_most_congested_arc(use_graph, graph):
nb_nodes = len(graph)
largest_congestion = 0
most_congested_arc_list = []
for node in range(nb_nodes):
for neighbor in graph[node]:
congestion = use_graph[node][neighbor] / graph[node][neighbor]
if congestion > largest_congestion:
largest_congestion = congestion
most_congested_arc_list = []
if congestion >= largest_congestion:
most_congested_arc_list.append((node, neighbor))
chosen_arc_index = np.random.choice(len(most_congested_arc_list))
return most_congested_arc_list[chosen_arc_index]
def find_commodities_on_arc(solution, arc):
commodities_on_arc = []
for commodity_index, path in enumerate(solution):
for node_index in range(len(path) - 1):
if path == (path[node_index], path[node_index + 1]):
commodities_on_arc.append((commodity_index, node_index))
break
return commodities_on_arc
def compute_all_distances(graph):
nb_nodes = len(graph)
all_distances = []
unitary_graph = [{neighbor : 1 for neighbor in graph[node]} for node in range(nb_nodes)]
for initial_node in range(nb_nodes):
parent_list, distances = dijkstra(unitary_graph, initial_node)
for i in range(len(distances)):
if distances[i] is None:
distances[i] = 10.**10
all_distances.append(distances)
return all_distances
def dijkstra(graph, initial_node, destination_node=None):
priority_q = [(0, initial_node, None)]
parent_list = [None] * len(graph)
distances = [None] * len(graph)
while priority_q:
value, current_node, parent_node = hp.heappop(priority_q)
if distances[current_node] is None:
parent_list[current_node] = parent_node
distances[current_node] = value
if current_node == destination_node:
return parent_list, distances
for neighbor in graph[current_node]:
if distances[neighbor] is None:
hp.heappush(priority_q, (value + graph[current_node][neighbor], neighbor, current_node))
return parent_list, distances
def update_fitness_and_use_graph(use_graph, graph, old_path, new_path, commodity_demand):
# This function makes the updates necessary to reflect the fact that a commodity uses new_path instead of old_path
# To do so, it updates use_graph (total flow going through eahc arc) and computes in delta_fitness
delta_fitness = 0
for i in range(len(old_path) - 1):
node1 = old_path[i]
node2 = old_path[i+1]
old_overload = max(use_graph[node1][node2] - graph[node1][node2], 0)
use_graph[node1][node2] -= commodity_demand
delta_fitness += max(use_graph[node1][node2] - graph[node1][node2], 0) - old_overload
for i in range(len(new_path) - 1):
node1 = new_path[i]
node2 = new_path[i+1]
old_overload = max(use_graph[node1][node2] - graph[node1][node2], 0)
use_graph[node1][node2] += commodity_demand
delta_fitness += max(use_graph[node1][node2] - graph[node1][node2], 0) - old_overload
return delta_fitness
| 12,009 | 40.701389 | 157 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/launch_dataset_test.py
|
import random
import time
import pickle
from multiprocessing import Process, Manager
from instance_mcnf import generate_instance, mutate_instance
from mcnf import *
from VNS_masri import VNS_masri
from ant_colony import ant_colony_optimiser
from simulated_annealing import simulated_annealing_unsplittable_flows
def launch_dataset(global_path, dataset_name, algorithm_list, nb_repetitions, nb_workers, duration_before_timeout):
# Launches all the algorithms to test on the instance present in the dataset directory
# The number of time algorithms are lauched is decided with nb_repetitions
# Open the file containing the name of the instances
instance_name_file = open(global_path + "/MCNF_solver/instance_files/" + dataset_name + "/instance_name_file.p", "rb" )
instance_name_list = pickle.load(instance_name_file)
instance_name_file.close()
log_file = open(global_path + "/MCNF_solver/log_file.txt", 'w')
log_file.write("Start\n")
log_file.close()
manager = Manager()
result_dict = {algorithm_name : {instance_name : [None]*nb_repetitions for instance_name in instance_name_list} for algorithm_name in algorithm_list}
worker_list = [] # Will contain the process running in parallel. Each process runs one algorithm on one instance
computation_list = [(repetition_index, instance_index, instance_name, algorithm_name) for repetition_index in range(nb_repetitions)
for instance_index, instance_name in enumerate(instance_name_list)
for algorithm_name in algorithm_list]
while len(computation_list) + len(worker_list) > 0:
remaining_worker_list = []
for process, start_time, return_list, computation_info in worker_list:
repetition_index, instance_index, instance_name, algorithm_name = computation_info
if not process.is_alive():
# If the process terminated with errors store the results
result_dict[algorithm_name][instance_name][repetition_index] = return_list[0]
elif time.time() > start_time + duration_before_timeout:
# If the process used more than the maximum computing time, kill it and store an error result
process.terminate()
result_dict[algorithm_name][instance_name][repetition_index] = (None, None, None, None, duration_before_timeout)
else:
# Let the worker continue
remaining_worker_list.append((process, start_time, return_list, computation_info))
worker_list = remaining_worker_list
if len(worker_list) < nb_workers and len(computation_list) > 0: # If all the workers are not working and there is still some experiments to launch
computation_info = computation_list.pop(0)
repetition_index, instance_index, instance_name, algorithm_name = computation_info
print_string = "repetition : {0}/{1}, instance : {2}/{3}, algorithm : {4}".format(repetition_index, nb_repetitions, instance_index, len(instance_name_list), algorithm_name)
instance_file_path = global_path + "/MCNF_solver/instance_files/" + dataset_name + "/" + instance_name + ".p"
return_list = manager.list()
# Create a worker that launchs an algorithm through the function launch_solver_on_instance
process = Process(target=launch_solver_on_instance, args=(instance_file_path, algorithm_name, print_string, global_path, return_list))
start_time = time.time()
process.start()
worker_list.append((process, start_time, return_list, computation_info))
# Write the results in a file
result_file = open(global_path + "/MCNF_solver/instance_files/" + dataset_name + "/result_file.p", "wb" )
pickle.dump(result_dict, result_file)
result_file.close()
def launch_solver_on_instance(instance_file_path, algorithm_name, print_string, global_path, return_list):
# Launch the algorithm named algortihm_name on the instance stored in the file at instance_file_path
print(print_string)
# Read the instance in the instance file
instance_file = open(instance_file_path, "rb" )
graph, commodity_list = pickle.load(instance_file)
instance_file.close()
total_demand = sum([commodity[2] for commodity in commodity_list])
nb_commodities = len(commodity_list)
nb_nodes = len(graph)
temp = time.time()
# Launch the chosen algorithm
if algorithm_name == "RR" : a = randomized_rounding_heuristic(graph, commodity_list, actualisation=False, sorted_commodities=False)
if algorithm_name == "RR sorted" : a = randomized_rounding_heuristic(graph, commodity_list, actualisation=False)
if algorithm_name == "RR congestion" : a = randomized_rounding_heuristic(graph, commodity_list, actualisation=False, linear_objectif="congestion")
if algorithm_name == "SRR" : a = randomized_rounding_heuristic(graph, commodity_list)
if algorithm_name == "SRR unsorted" : a = randomized_rounding_heuristic(graph, commodity_list, sorted_commodities=False)
if algorithm_name == "SRR congestion" : a = randomized_rounding_heuristic(graph, commodity_list, linear_objectif="congestion")
if algorithm_name == "CSRR" : a = randomized_rounding_heuristic(graph, commodity_list, proof_constaint=True)
if algorithm_name == "SA" : a = simulated_annealing_unsplittable_flows(graph, commodity_list, nb_iterations= int(len(commodity_list)**1.5 * 2))
if algorithm_name == "SA 2" : a = simulated_annealing_unsplittable_flows(graph, commodity_list, nb_iterations= int(len(commodity_list)**1.5 * 6))
if algorithm_name == "MILP solver" : a = gurobi_unsplittable_flows(graph, commodity_list, time_limit=1000)
if algorithm_name == "VNS" : a = VNS_masri(graph, commodity_list, nb_iterations= 100)
if algorithm_name == "VNS 2" : a = VNS_masri(graph, commodity_list, nb_iterations= int(len(commodity_list)**1.5), amelioration=True)
if algorithm_name == "Ant colony" : a = ant_colony_optimiser(graph, commodity_list, nb_iterations=50)
computing_time = time.time() - temp
commodity_path_list, total_overload = a
performance = total_overload / total_demand
log_file = open(global_path + "/MCNF_solver/log_file.txt", 'a')
log_file.write("Finished : " + instance_file_path + ", " + print_string + "\n")
log_file.close()
return_list.append((performance, total_overload, computing_time))
if __name__ == "__main__":
# Set the path to the global directory
global_path = "/home/disc/f.lamothe"
# global_path = "/home/francois/Desktop"
# assert False, "Complete global_path with the path to the main directory"
# Set the number of repetition
nb_workers = 15
duration_before_timeout = 2*60*60
settings_list = []
settings_list.append(("graph_scaling_dataset", ["RR", 'RR sorted', "SRR", 'SRR unsorted', "CSRR", "SA", "SA 2", "VNS", "VNS 2", "Ant colony"], 1))
settings_list.append(("graph_scaling_dataset_small_commodities", ["RR", 'RR sorted', "SRR", 'SRR unsorted', "CSRR", "SA", "SA 2", "VNS", "VNS 2", "Ant colony"], 1))
settings_list.append(("graph_scaling_dataset_random", ["RR", 'RR sorted', "SRR", 'SRR unsorted', "CSRR", "SA", "SA 2", "VNS", "VNS 2", "Ant colony"], 1))
settings_list.append(("commodity_scaling_dataset", ["RR", 'RR sorted', "SRR", 'SRR unsorted', "CSRR", "SA", "SA 2", "VNS", "VNS 2", "Ant colony"], 1))
settings_list.append(("small_instance_dataset", ["RR", "SRR", "MILP solver"], 1))
for dataset_name, algorithm_list, nb_repetitions in settings_list:
launch_dataset(global_path, dataset_name, algorithm_list, nb_repetitions, nb_workers, duration_before_timeout)
| 7,873 | 56.897059 | 184 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/mcnf.py
|
import heapq as hp
import random
import numpy as np
import time
import gurobipy
from mcnf_continuous import gurobi_congestion_solver, gurobi_overload_sum_solver
from mcnf_heuristics import find_fitting_most_capacited_path
def gurobi_unsplittable_flows(graph, commodity_list, verbose=0, time_limit=None):
# MILP model that solves the unsplittable flow problem with an arc-node formulation
nb_nodes = len(graph)
nb_edges = sum([len(d) for d in graph])
nb_commodities = len(commodity_list)
arcs = [(node, neighbor) for node in range(nb_nodes) for neighbor in graph[node]]
capacities = [graph[node][neighbor] for node, neighbor in arcs]
commodities = range(nb_commodities)
# Create optimization model
model = gurobipy.Model('netflow')
model.modelSense = gurobipy.GRB.MINIMIZE
model.Params.OutputFlag = verbose
model.Params.Method = 2
if time_limit is not None:
model.Params.TimeLimit = time_limit
# Create variables
flow = model.addVars(commodities, arcs, vtype=gurobipy.GRB.BINARY, name="flow") # flow variables
overload_var = model.addVars(arcs, obj=1, name="lambda") # overload variables : we want to minimize their sum
# Arc capacity constraints :
model.addConstrs((sum([flow[commodity_index, node, neighbor] * commodity[2] for commodity_index, commodity in enumerate(commodity_list)]) - overload_var[node, neighbor] <= graph[node][neighbor] for node,neighbor in arcs), "cap")
# Flow conservation constraints
for commodity_index, commodity in enumerate(commodity_list):
origin, destination, demand = commodity
# print(origin, destination)
for node in range(nb_nodes):
if node == destination:
rhs = -1
elif node == origin:
rhs = 1
else:
rhs = 0
model.addConstr((flow.sum(commodity_index,node,'*') - flow.sum(commodity_index,'*',node) == rhs), "node{}_{}".format(node, origin))
# Launching the model
model.update()
model.optimize()
# Getting the results from the solver : the allocation of each super commodity and the total necessary capacity
if model.status == gurobipy.GRB.Status.OPTIMAL or model.status == gurobipy.GRB.Status.TIME_LIMIT:
remaining_capacity_graph = [{neighbor : graph[node][neighbor] for neighbor in graph[node]} for node in range(nb_nodes)]
solution = model.getAttr('x', overload_var)
for node,neighbor in arcs:
remaining_capacity_graph[node][neighbor] += solution[node,neighbor]
commodity_path_list = []
solution = model.getAttr('x', flow)
for commodity_index in range(nb_commodities):
origin, destination, demand = commodity_list[commodity_index]
allocation_graph = [{} for node in range(nb_nodes)]
for node,neighbor in arcs:
allocation_graph[node][neighbor] = solution[commodity_index,node,neighbor]
path, path_capacity = find_fitting_most_capacited_path(allocation_graph, allocation_graph, origin, destination, 1)
commodity_path_list.append(path)
return commodity_path_list, model.objVal
else:
return None, model.objVal
def remove_cycle_from_path(path):
is_in_path = set()
new_path = []
for node in path:
if node in is_in_path:
while new_path[-1] != node:
node_to_remove = new_path.pop()
is_in_path.remove(node_to_remove)
else:
is_in_path.add(node)
new_path.append(node)
return new_path
def is_correct_path(graph, commodity, path):
# function that checks if a path is valid for a commodity in an instance
origin, destination, demand = commodity
is_correct = path[0] == origin and path[-1] == destination
for node_index in range(len(path)-1):
node, neighbor = path[node_index], path[node_index+1]
is_correct = is_correct and neighbor in graph[node]
if not is_correct:
break
return is_correct
def randomized_rounding_heuristic(graph, commodity_list, actualisation=True, actualisation_threshold=None, proof_constaint=False, sorted_commodities=True,
linear_objectif="overload_sum", verbose=0):
# randomized round heuristic :
# - it uses information retreived from the continuous solution to create an unsplittable solution
# - if actualisation = False and actualisation_threshold = None no actualisation is done (as in Raghavan and Thompson's algorithm)
# - actualisation_threshold : when that many commodities have their path fixed while htey used several paths in the linear relaxation, the linear relaxation is acualised
# - proof_constaint : tell the linear solver to add the constraint that makes this algorithm an approximation algorithm
# - sorted_commodities : decides if the commodites are allocated in the graph in decreasing demand order
# - linear_objectif : decide if we minimize the sum of overload or the maximum overload
nb_nodes = len(graph)
nb_commodities = len(commodity_list)
# Create the default actualisation_threshold
if actualisation_threshold is None:
if actualisation:
actualisation_threshold = nb_nodes * 0.25
else:
actualisation_threshold = nb_commodities + 1
counter = actualisation_threshold + 1
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)] # record the capacity used by the assigned commodities
flow_upper_bound_graph = [{neighbor : graph[node][neighbor] for neighbor in graph[node]} for node in range(nb_nodes)]
commodity_path_list = [[] for i in range(nb_commodities)] # store the path of the commodities : the solution of the problem
commodities_order = list(range(nb_commodities))
random.shuffle(commodities_order)
if sorted_commodities:
# Commodities are sorted by decreasing demand so that the biggest are assigned first and the smallest fills the gaps
commodities_order.sort(key= lambda commodity_index : commodity_list[commodity_index][2])
t = [0]*2
# main loop : 1 commodity is assigned in each iteration
while commodities_order != []:
# at the begginning or when the solution deviates to much from the the previously computed continuous solution :
# compute a new continuous solution with the non assigned commodities
if counter > actualisation_threshold:
temp = time.time()
if verbose : print()
counter = 0
remaining_commodities = [commodity_list[index] for index in commodities_order]
if linear_objectif == "overload_sum":
allocation_graph_per_origin, remaining_capacity_graph = gurobi_overload_sum_solver(graph, remaining_commodities, use_graph=use_graph, flow_upper_bound_graph=flow_upper_bound_graph, proof_constaint=proof_constaint, verbose=max(0,verbose-1))
elif linear_objectif == "congestion":
allocation_graph_per_origin, remaining_capacity_graph = gurobi_congestion_solver(graph, remaining_commodities, use_graph=use_graph, flow_upper_bound_graph=flow_upper_bound_graph, proof_constaint=proof_constaint, verbose=max(0,verbose-1))
else:
assert False, "Objectif not implemented, please check your spelling or contribute"
t[0] += time.time() - temp
temp = time.time()
# Select a commodity
commodity_index = commodities_order.pop()
origin, destination, demand = commodity_list[commodity_index]
allocation_graph = allocation_graph_per_origin[origin]
# Extract the paths of the commodity from the linear relaxation results
remaining_demand = demand
path_list = []
used_capacity_list = []
while remaining_demand > 10**-6:
path, path_capacity = find_fitting_most_capacited_path(allocation_graph, remaining_capacity_graph, origin, destination, demand)
if path is None or path_capacity <= 10**-5:
path, path_capacity = find_fitting_most_capacited_path(allocation_graph, remaining_capacity_graph, origin, destination, -10**10)
used_capacity = min(path_capacity, remaining_demand)
path_list.append(path)
used_capacity_list.append(used_capacity)
remaining_demand -= used_capacity
update_graph_capacity(allocation_graph, path, used_capacity)
update_graph_capacity(flow_upper_bound_graph, path, used_capacity)
if len(path_list) > 1:
counter += 1
# Choose a path for the commodity
proba_list = np.array(used_capacity_list) / sum(used_capacity_list)
chosen_path_index = np.random.choice(len(path_list), p=proba_list)
path = path_list[chosen_path_index]
# allcoate the commodity and update the capacities in different graphs
update_graph_capacity(use_graph, path, -demand)
update_graph_capacity(remaining_capacity_graph, path, demand)
commodity_path_list[commodity_index] = path
t[1] += time.time() - temp
# a bit of printing
if len(commodities_order) % 100 == 0 or counter > actualisation_threshold:
if verbose : print(len(commodities_order), sum([commodity_list[index][2] for index in commodities_order]), end=" \r")
# compute metrics for the overload
overload_graph = [{neighbor : max(0, use_graph[node][neighbor] - graph[node][neighbor]) for neighbor in graph[node]} for node in range(len(graph))]
congestion_graph = [{neighbor : use_graph[node][neighbor] / graph[node][neighbor] for neighbor in graph[node] if graph[node][neighbor] > 0} for node in range(len(graph))]
total_overload = sum([sum(dct.values()) for dct in overload_graph])
if verbose :
print("total_overload is ", total_overload)
print("Congestion is ", max([max(list(dct.values())+[0]) for dct in congestion_graph]))
print(t)
return commodity_path_list, total_overload
def update_graph_capacity(graph, path, demand, reverse_graph=False):
# deecrease the capacities in the graph taken by a commodity size "demand" and allocate to the path "path"
# also computes the overload created
# negative demands are possible to increase the capacity instead of decreasing it
new_overload = 0
for i in range(len(path)-1):
node = path[i]
neighbor = path[i+1]
if reverse_graph:
node, neighbor = neighbor, node
old_overload = - min(0, graph[node][neighbor])
graph[node][neighbor] -= demand
new_overload += - min(0, graph[node][neighbor]) - old_overload
return new_overload
| 10,814 | 44.441176 | 255 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/mcnf_heuristics.py
|
import random
import numpy as np
import heapq as hp
def single_source_mcnf_preprocessing(reverse_graph, commodity_list):
commodity_path_list = [[] for c in commodity_list]
process_graph = [{neighbor : reverse_graph[node][neighbor] for neighbor in reverse_graph[node]} for node in range(len(reverse_graph))]
commodity_indices = sorted(list(range(len(commodity_list))), key = lambda x : commodity_list[x][2], reverse=True)
for commodity_index in commodity_indices:
origin, destination, demand = commodity_list[commodity_index]
path = commodity_path_list[commodity_index]
current_node = destination
while current_node != origin:
for neighbor in process_graph[current_node]:
if process_graph[current_node][neighbor] >= demand:
process_graph[current_node][neighbor] -= demand
path.append(current_node)
current_node = neighbor
break
else:
while path != []:
previous_node = path.pop()
process_graph[previous_node][current_node] += demand
current_node = previous_node
break
if current_node == origin:
path.append(origin)
path.reverse()
return process_graph, commodity_path_list
def allocate_biggest_commodities(reverse_graph, commodity_list, nb_max_failure=0):
commodity_path_list = [[] for c in commodity_list]
process_graph = [{neighbor : reverse_graph[neighbor][node] for neighbor in range(len(reverse_graph)) if node in reverse_graph[neighbor]} for node in range(len(reverse_graph))]
commodity_indices = sorted(list(range(len(commodity_list))), key = lambda x : commodity_list[x][2], reverse=True)
nb_failure = 0
for commodity_index in commodity_indices:
origin, destination, demand = commodity_list[commodity_index]
path = find_most_capacited_path(process_graph, origin, destination, demand)
overload = update_graph_capacity(graph, path, demand)
commodity_path_list[commodity_index] = path
if overload > 0:
nb_failure += 1
if nb_failure > nb_max_failure:
return process_graph, commodity_path_list
return process_graph, commodity_path_list
def find_fitting_most_capacited_path(graph1, graph2, origin, destination, minimum_capacity):
# this is a dijkstra like algorithm that computes the most capacited path from the origin to the destination
# the best capacity is according to graph1 but only edges with capacities >= minimum_capacity in graph2 are taken into account
priority_q = [(-10.**10, origin, None)]
parent_list = [None] * len(graph1)
visited = [False]*len(graph1)
best_capacity = [None] * len(graph1)
while priority_q != []:
c, current_node, parent_node = hp.heappop(priority_q)
capa_of_current_node = -c
if not visited[current_node]:
visited[current_node] = True
parent_list[current_node] = parent_node
best_capacity[current_node] = capa_of_current_node
if current_node == destination:
break
for neighbor in graph1[current_node]:
if not visited[neighbor] and graph2[current_node][neighbor] >= minimum_capacity:
hp.heappush(priority_q, (-min(capa_of_current_node, graph1[current_node][neighbor]), neighbor, current_node))
if parent_list[destination] is None:
return None, None
path = [destination]
current_node = destination
while current_node != origin:
current_node = parent_list[current_node]
path.append(current_node)
path.reverse()
return path, best_capacity[destination]
def find_shortest_path(graph, origin, destination, minimum_capacity):
# this is a breadth first search algorithm
pile = [(origin, None)]
parent_list = [None] * len(graph)
visited = [0]*len(graph)
visited[origin] = 1
while pile != []:
current_node, parent_node = pile.pop(0)
parent_list[current_node] = parent_node
if current_node == destination:
break
neighbor_list = list(graph[current_node].keys())
random.shuffle(neighbor_list)
for neighbor in neighbor_list:
if not visited[neighbor] and graph[current_node][neighbor] >= minimum_capacity:
pile.append((neighbor, current_node))
visited[neighbor] = 1
if parent_list[destination] is None:
return
path = [destination]
current_node = destination
while current_node != origin:
current_node = parent_list[current_node]
path.append(current_node)
path.reverse()
return path
def find_shortest_path2(graph, origin, destination):
# this is a breadth first search algorithm
pile = [(origin, None)]
parent_list = [None] * len(graph)
visited = [0]*len(graph)
visited[origin] = 1
path_capacity = [None]*len(graph)
while pile != []:
current_node, parent_node = pile.pop(0)
parent_list[current_node] = parent_node
if current_node != origin:
path_capacity[current_node] = min(path_capacity[parent_node], graph[parent_node][current_node])
else:
path_capacity[origin] = 10**10
if current_node == destination:
break
neighbor_list = list(graph[current_node].keys())
random.shuffle(neighbor_list)
for neighbor in neighbor_list:
if not visited[neighbor] and graph[current_node][neighbor] > 0:
pile.append((neighbor, current_node))
visited[neighbor] = 1
if parent_list[destination] is None:
return None, None
path = [destination]
current_node = destination
while current_node != origin:
current_node = parent_list[current_node]
path.append(current_node)
path.reverse()
return path, path_capacity[destination]
def find_shortest_path_double_graph(graph1, graph2, origin, destination, minimum_capacity1, minimum_capacity2):
# this is a breadth first search algorithm where edges must verify capacity condition in the 2 graphs
pile = [(origin, None)]
parent_list = [None] * len(graph1)
visited = [0]*len(graph1)
visited[origin] = 1
while pile != []:
current_node, parent_node = pile.pop(0)
parent_list[current_node] = parent_node
if current_node == destination:
break
neighbor_list = list(graph1[current_node].keys())
random.shuffle(neighbor_list)
for neighbor in neighbor_list:
if not visited[neighbor] and graph1[current_node][neighbor] >= minimum_capacity1 and graph2[current_node][neighbor] >= minimum_capacity2:
pile.append((neighbor, current_node))
visited[neighbor] = 1
if parent_list[destination] is None:
return
path = [destination]
current_node = destination
while current_node != origin:
current_node = parent_list[current_node]
path.append(current_node)
path.reverse()
return path
def compute_all_shortest_path(graph, origin_list):
nb_nodes = len(graph)
all_shortest_path = {}
for origin in origin_list:
parent_list, distances = dijkstra(graph, origin)
shortest_path_list = [None for node in range(nb_nodes)]
shortest_path_list[origin] = [origin]
for node in range(nb_nodes):
if shortest_path_list[node] is None and parent_list[node] is not None:
compute_shortest_path(shortest_path_list, parent_list, node)
all_shortest_path[origin] = [(shortest_path_list[node], distances[node]) for node in range(nb_nodes)]
return all_shortest_path
def compute_shortest_path(shortest_path_list, parent_list, node):
parent = parent_list[node]
if shortest_path_list[parent] is None:
compute_shortest_path(shortest_path_list, parent_list, parent)
shortest_path_list[node] = shortest_path_list[parent] + [node]
def dijkstra(graph, intial_node, destination_node=None):
priority_q = [(0, intial_node, None)]
parent_list = [None] * len(graph)
distances = [None] * len(graph)
while priority_q:
value, current_node, parent_node = hp.heappop(priority_q)
if distances[current_node] is None:
parent_list[current_node] = parent_node
distances[current_node] = value
if current_node == destination_node:
return parent_list, distances
for neighbor in graph[current_node]:
if distances[neighbor] is None:
hp.heappush(priority_q, (value + graph[current_node][neighbor], neighbor, current_node))
return parent_list, distances
def update_graph_capacity(graph, path, demand, reverse_graph=False):
overload = 0
for i in range(len(path)-1):
node = path[i]
neighbor = path[i+1]
if reverse_graph:
node, neighbor = neighbor, node
graph[node][neighbor] -= demand
overload += max(0, min(-graph[node][neighbor], demand))
return overload
def is_strongly_connected(graph):
nb_nodes = len(graph)
for initial_node in range(nb_nodes):
reachable = [False]*nb_nodes
reachable[initial_node] = True
pile = [initial_node]
nb_reachable = 1
while pile:
current_node = pile.pop()
for neighbor in graph[current_node]:
if not reachable[neighbor]:
reachable[neighbor] = True
pile.append(neighbor)
nb_reachable += 1
if nb_reachable < nb_nodes:
print(nb_reachable, nb_nodes)
return False
return True
| 9,909 | 32.255034 | 179 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/instance_mcnf.py
|
import random
import heapq as hp
import numpy as np
import time
def generate_instance(graph_type, graph_generator_inputs, demand_generator_inputs):
# this function generates an intances according to the asked caracteristics :
# - first a graph is generated : a grid graph or a random graph
# - then commodities are created so that there exist a solution
# - optional : remaining capacities are erased so that the capacities perfectly fit the solution
if graph_type == "grid":
reverse_graph_generator = generate_grid_reverse_graph
elif graph_type == "random":
reverse_graph_generator = generate_random_reverse_graph
elif graph_type == "random_connected":
reverse_graph_generator = generate_random_connected_reverse_graph
else:
assert False, "No generator for this type of graph is implemented, check your spelling or contribute"
# graph generation
reverse_graph, is_origin_list = reverse_graph_generator(*graph_generator_inputs)
origin_list = [node for node, is_origin in enumerate(is_origin_list) if is_origin]
# commodities generation
commodity_list, path_list = generate_demand(is_origin_list, reverse_graph, **demand_generator_inputs)
# the created graph was reversed so we reverse it
graph = [{neighbor : reverse_graph[neighbor][node] for neighbor in range(len(reverse_graph)) if node in reverse_graph[neighbor]} for node in range(len(reverse_graph))]
return graph, commodity_list, path_list, origin_list
def generate_grid_reverse_graph(nb_origins, nb_row_grid, nb_column_grid, nb_origin_connections, grid_link_capacity=15000, other_link_capacity=10000, local_connection_of_origin = False):
# generates a grid graph with additional nodes conected to the grid and uniform capacities
# the graph is reversed
reverse_graph = []
for i in range(nb_row_grid * nb_column_grid + nb_origins):
reverse_graph.append({})
# generates the grid
for i in range(nb_row_grid):
for j in range(nb_column_grid):
reverse_graph[i + nb_row_grid * j][(i+1)%nb_row_grid + nb_row_grid * j] = grid_link_capacity
reverse_graph[i + nb_row_grid * j][(i-1)%nb_row_grid + nb_row_grid * j] = grid_link_capacity
reverse_graph[i + nb_row_grid * j][i + nb_row_grid * ((j+1)%nb_column_grid)] = grid_link_capacity
reverse_graph[i + nb_row_grid * j][i + nb_row_grid * ((j-1)%nb_column_grid)] = grid_link_capacity
# adding the additional nodes (the origins, i.e. the gateways/pops)
if local_connection_of_origin:
for d in range(nb_origins):
origin = d + nb_row_grid * nb_column_grid
square_size = int(np.ceil(np.sqrt(nb_origin_connections)))
i = random.randint(0, nb_row_grid-1)
j = random.randint(0, nb_column_grid-1)
count = 0
for k in range(square_size):
for l in range(square_size):
if count < nb_origin_connections:
reverse_graph[(i+k)%nb_row_grid + nb_row_grid * ((j+l)%nb_column_grid)][origin] = other_link_capacity
count += 1
else:
for d in range(nb_origins):
origin = d + nb_row_grid * nb_column_grid
for k in range(nb_origin_connections):
i = random.randint(0, nb_row_grid-1)
j = random.randint(0, nb_column_grid-1)
reverse_graph[i + nb_row_grid * j][origin] = other_link_capacity
is_origin_list = [0] * nb_row_grid * nb_column_grid + [1] * nb_origins
return reverse_graph, is_origin_list
def generate_random_reverse_graph(nb_nodes, edge_proba, nb_origins, arc_capacity):
# generates a random graph with uniform capacities
# the graph is reversed
reverse_graph = [{} for i in range(nb_nodes)]
is_origin_list = [0]*nb_nodes
for node in np.random.choice(nb_nodes, nb_origins, replace=False):
is_origin_list[node] = 1
for node in range(nb_nodes):
for neighbor in range(nb_nodes):
if node != neighbor and random.random() < edge_proba:
reverse_graph[node][neighbor] = arc_capacity
return reverse_graph, is_origin_list
def generate_random_connected_reverse_graph(nb_nodes, edge_proba, nb_origins, arc_capacity):
# generates a random graph with uniform capacities
# the graph is reversed
# the returned graph is always strongly connected
reverse_graph = [{} for i in range(nb_nodes)]
is_origin_list = [0]*nb_nodes
for node in np.random.choice(nb_nodes, nb_origins, replace=False):
is_origin_list[node] = 1
not_root_set = set(range(nb_nodes))
while len(not_root_set) > 0:
initial_node = random.choice(tuple(not_root_set))
reachable = [False]*nb_nodes
reachable[initial_node] = True
pile = [initial_node]
while pile:
current_node = pile.pop()
for neighbor in reverse_graph[current_node]:
if not reachable[neighbor]:
reachable[neighbor] = True
pile.append(neighbor)
unreachable_nodes = [node for node in range(nb_nodes) if not reachable[node]]
if len(unreachable_nodes) == 0:
not_root_set.remove(initial_node)
else:
chosen_node = random.choice(unreachable_nodes)
reverse_graph[initial_node][chosen_node] = arc_capacity
current_nb_edge = sum([len(d) for d in reverse_graph])
edge_proba -= current_nb_edge / (nb_nodes**2 - nb_nodes)
for node in range(nb_nodes):
for neighbor in range(nb_nodes):
if node != neighbor and random.random() < edge_proba:
reverse_graph[node][neighbor] = arc_capacity
return reverse_graph, is_origin_list
def generate_demand(is_origin_list, reverse_graph, random_filling_of_origins=True, random_paths=True, max_demand=1500, delete_resuidal_capacity=False,
smaller_commodities=False, verbose=0):
# generates the commodities so that there exist a solution
# To create one commodity :
# a random node is chosen, all the origins attainable from the node are computed
# one is randomly chosen with a random path to it, create a commodity demand that can fit on the path
residual_graph = [{neighbor : reverse_graph[node][neighbor] for neighbor in reverse_graph[node]} for node in range(len(reverse_graph))]
commodity_list = []
path_list = []
possible_destination_nodes = 1 - np.array(is_origin_list)
i = 0
while True:
if i%100 == 0: print(i, end='\r')
i+=1
# choosing a random none origin node
destination = np.random.choice(len(is_origin_list), p=possible_destination_nodes / sum(possible_destination_nodes))
# getting all attainable origins
origin_list = get_availables_origins(residual_graph, destination, is_origin_list, random_paths)
# raising the failure when no origin is attainable
if origin_list == []:
possible_destination_nodes[destination] = 0
if sum(possible_destination_nodes) == 0:
if verbose:
print()
print("residual value is ",sum([sum(dct.values()) for dct in residual_graph]))
print("full value is ",sum([sum(dct.values()) for dct in reverse_graph]))
if delete_resuidal_capacity:
for node, neighbor_dict in enumerate(reverse_graph):
reverse_graph[node] = {neighbor : neighbor_dict[neighbor] - residual_graph[node][neighbor] for neighbor in neighbor_dict}
return commodity_list, path_list
else:
continue
# choosing an origin
if random_filling_of_origins:
origin, path = origin_list[random.randint(0, len(origin_list)-1)]
else:
origin, path = min(origin_list, key=lambda x:x[0])
# allocating the commodity in the graph
min_remaining_capacity = min([residual_graph[path[node_index]][path[node_index+1]] for node_index in range(len(path)-1)])
if smaller_commodities:
used_capacity = random.randint(1, min(min_remaining_capacity, max_demand))
else:
used_capacity = min(min_remaining_capacity, random.randint(1, max_demand))
for node_index in range(len(path)-1):
residual_graph[path[node_index]][path[node_index+1]] -= used_capacity
commodity_list.append((origin, destination, used_capacity))
path.reverse()
path_list.append(path)
def get_availables_origins(residual_graph, initial_node, is_origin_list, random_paths):
# look for all the origins attainable from the initial_node and a path to each of this origins
pile = [(initial_node, [initial_node])]
visited = [0]*len(residual_graph)
visited[initial_node] = 1
origin_list = []
while pile != []:
if random_paths:
current_node, path = pile.pop(random.randint(0, len(pile)-1))
else:
current_node, path = pile.pop(0)
for neighbor in residual_graph[current_node]:
if residual_graph[current_node][neighbor] > 0 and not visited[neighbor]:
visited[neighbor] = 1
if is_origin_list[neighbor]:
origin_list.append((neighbor, path + [neighbor]))
else:
pile.append((neighbor, path + [neighbor]))
return origin_list
| 9,547 | 42.009009 | 185 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/k_shortest_path.py
|
import heapq as hp
import random
import numpy as np
import time
def k_shortest_path_all_destination(graph, origin, k):
nb_nodes = len(graph)
parent_list, distances = dijkstra(graph, origin)
shortest_path_list = [[] for node in range(nb_nodes)]
shortest_path_list[origin].append(([origin], 0))
for node in range(nb_nodes):
if len(shortest_path_list[node]) == 0 and parent_list[node] is not None:
compute_shortest_path(graph, shortest_path_list, parent_list, node)
index_predecessor_shortest_path = [{neighbor : 0 for neighbor in range(nb_nodes) if node in graph[neighbor]} for node in range(nb_nodes)]
for node in range(nb_nodes):
if parent_list[node] is not None:
index_predecessor_shortest_path[node][parent_list[node]] = 1
for node in range(nb_nodes):
while parent_list[node] != None and len(shortest_path_list[node]) != k:
is_new_path_computed = compute_next_shortest_path(graph, shortest_path_list, index_predecessor_shortest_path, node)
if not is_new_path_computed:
break
return shortest_path_list
def k_shortest_path_all_destination_cost_difference(graph, origin, cost_difference):
nb_nodes = len(graph)
parent_list, distances = dijkstra(graph, origin)
shortest_path_list = [[] for node in range(nb_nodes)]
shortest_path_list[origin].append(([origin], 0))
for node in range(nb_nodes):
if len(shortest_path_list[node]) == 0 and parent_list[node] is not None:
compute_shortest_path(graph, shortest_path_list, parent_list, node)
index_predecessor_shortest_path = [{neighbor : 0 for neighbor in range(nb_nodes) if node in graph[neighbor]} for node in range(nb_nodes)]
for node in range(nb_nodes):
if parent_list[node] is not None:
index_predecessor_shortest_path[node][parent_list[node]] = 1
for node in range(nb_nodes):
while parent_list[node] != None and shortest_path_list[node][-1][1] < shortest_path_list[node][0][1] + cost_difference:
is_new_path_computed = compute_next_shortest_path(graph, shortest_path_list, index_predecessor_shortest_path, node)
if not is_new_path_computed:
break
return shortest_path_list
def k_shortest_path_algorithm(graph, origin, destination, k):
nb_nodes = len(graph)
parent_list, distances = dijkstra(graph, origin)
shortest_path_list = [[] for node in range(nb_nodes)]
shortest_path_list[origin].append(([origin], 0))
for node in range(nb_nodes):
if len(shortest_path_list[node]) == 0 and parent_list[node] is not None:
compute_shortest_path(graph, shortest_path_list, parent_list, node)
index_predecessor_shortest_path = [{neighbor : 0 for neighbor in range(nb_nodes) if node in graph[neighbor]} for node in range(nb_nodes)]
for node in range(nb_nodes):
if parent_list[node] is not None:
index_predecessor_shortest_path[node][parent_list[node]] = 1
for i in range(k-1):
compute_next_shortest_path(graph, shortest_path_list, index_predecessor_shortest_path, destination)
return shortest_path_list[destination]
def compute_shortest_path(graph, shortest_path_list, parent_list, node):
parent = parent_list[node]
if len(shortest_path_list[parent]) == 0:
compute_shortest_path(graph, shortest_path_list, parent_list, parent)
parent_path, parent_path_cost = shortest_path_list[parent][0]
shortest_path_list[node].append((parent_path + [node], parent_path_cost + graph[parent][node]))
def compute_next_shortest_path(graph, shortest_path_list, index_predecessor_shortest_path, node):
chosen_predecessor = None
for predecessor in index_predecessor_shortest_path[node]:
predecessor_index = index_predecessor_shortest_path[node][predecessor]
if predecessor_index is not None:
if len(shortest_path_list[predecessor]) <= predecessor_index:
is_new_path_computed = compute_next_shortest_path(graph, shortest_path_list, index_predecessor_shortest_path, predecessor)
if not is_new_path_computed:
continue
predecessor_path, predecessor_path_cost = shortest_path_list[predecessor][predecessor_index]
if chosen_predecessor is None or min_length > predecessor_path_cost + graph[predecessor][node]:
min_length = predecessor_path_cost + graph[predecessor][node]
chosen_predecessor = predecessor
if chosen_predecessor is not None:
predecessor_index = index_predecessor_shortest_path[node][chosen_predecessor]
predecessor_path, predecessor_path_cost = shortest_path_list[chosen_predecessor][predecessor_index]
shortest_path_list[node].append((predecessor_path + [node], predecessor_path_cost + graph[chosen_predecessor][node]))
index_predecessor_shortest_path[node][chosen_predecessor] += 1
return True
return False
def dijkstra(graph, intial_node, destination_node=None):
priority_q = [(0, intial_node, None)]
parent_list = [None] * len(graph)
distances = [None] * len(graph)
while priority_q:
value, current_node, parent_node = hp.heappop(priority_q)
if distances[current_node] is None:
parent_list[current_node] = parent_node
distances[current_node] = value
if current_node == destination_node:
break
for neighbor in graph[current_node]:
if distances[neighbor] is None:
hp.heappush(priority_q, (value + graph[current_node][neighbor], neighbor, current_node))
return parent_list, distances
| 5,728 | 41.437037 | 141 |
py
|
randomized_rounding_paper_code
|
randomized_rounding_paper_code-master/VNS_masri.py
|
import heapq as hp
import random
import numpy as np
import time
import matplotlib.pyplot as plt
from k_shortest_path import k_shortest_path_algorithm, k_shortest_path_all_destination
from simulated_annealing import compute_all_distances
def VNS_masri(graph, commodity_list, nb_iterations, amelioration=False, verbose=0):
# Setting hyper-parameters
nb_modifications = 1
nb_modification_max = 3
nb_k_shortest_paths = 10
nb_commodities = len(commodity_list)
nb_nodes = len(graph)
total_demand = sum([c[2] for c in commodity_list])
# Compute the k-shortest paths for each commodity and store them in possible_paths_per_commodity
k_shortest_path_structure = {}
possible_paths_per_commodity = []
for commodity_index, commodity in enumerate(commodity_list):
origin, destination, demand = commodity
if origin not in k_shortest_path_structure:
k_shortest_path_structure[origin] = k_shortest_path_all_destination(graph, origin, nb_k_shortest_paths)
path_and_cost_list = k_shortest_path_structure[origin][destination]
possible_paths_per_commodity.append([path for path, path_cost in path_and_cost_list])
if verbose : print("possible_paths_per_commodity computed")
use_graph = [{neighbor : 0 for neighbor in graph[node]} for node in range(nb_nodes)]
all_distances = compute_all_distances(graph)
# Create an initial solution and compute its solution value
solution = []
solution_value = 0
for commodity_index, commodity in enumerate(commodity_list):
path_index = np.random.choice(len(possible_paths_per_commodity[commodity_index]))
path = possible_paths_per_commodity[commodity_index][path_index]
solution.append(path)
solution_value += update_fitness_and_use_graph(use_graph, graph, [], path, commodity[2])
# Main loop
for iter_index in range(nb_iterations):
if iter_index % 1 == 0 and verbose:
print(iter_index, solution_value, nb_modifications, end=' \r')
# Make several modifications on the current solution and evaluate the new solution
new_solution_value, modification_list = make_modifications(graph, commodity_list, solution, use_graph, possible_paths_per_commodity, nb_modifications, solution_value)
# Keep the new solution if it has a smaller salution value
if new_solution_value < solution_value:
nb_modifications = 1
solution_value = new_solution_value
continue
# Apply local search on the new solution
for local_search_index in range((not amelioration) * nb_commodities // 2):
modified_commodity_index = np.random.choice(nb_commodities)
# Create a new path for a commodity
new_path = create_new_path(graph, use_graph, commodity_list[modified_commodity_index], all_distances)
old_path = solution[modified_commodity_index]
solution[modified_commodity_index] = new_path
new_solution_value += update_fitness_and_use_graph(use_graph, graph, old_path, new_path, commodity_list[modified_commodity_index][2])
if new_solution_value < solution_value:
nb_modifications = 1
solution_value = new_solution_value
break
solution[modified_commodity_index] = old_path
new_solution_value += update_fitness_and_use_graph(use_graph, graph, new_path, old_path, commodity_list[modified_commodity_index][2])
else:
# If the local search ends without finding an improving solution, return to the old solution and change the size of the neighborhood
nb_modifications = min(nb_modification_max, nb_modifications + 1)
for commodity_index, old_path, new_path in modification_list:
solution[commodity_index] = old_path
update_fitness_and_use_graph(use_graph, graph, new_path, old_path, commodity_list[commodity_index][2])
return solution, solution_value
def make_modifications(graph, commodity_list, solution, use_graph, possible_paths_per_commodity, nb_modifications, solution_value):
# Make several modifications on the current solution and evaluate the solution obtained
new_solution_value = solution_value
modification_list = []
modified_commodity_list = np.random.choice(len(commodity_list), size=nb_modifications, replace=False)
for commodity_index in modified_commodity_list:
possible_paths = possible_paths_per_commodity[commodity_index]
old_path = solution[commodity_index]
new_path = possible_paths[random.randint(0, len(possible_paths)-1)]
solution[commodity_index] = new_path
new_solution_value += update_fitness_and_use_graph(use_graph, graph, old_path, new_path, commodity_list[commodity_index][2])
modification_list.append((commodity_index, old_path, new_path))
return new_solution_value, modification_list
def update_fitness_and_use_graph(use_graph, graph, old_path, new_path, commodity_demand):
# This function makes the updates necessary to reflect the fact that a commodity uses new_path instead of old_path
# To do so, it updates use_graph (total flow going through eahc arc) and computes in delta_fitness
delta_fitness = 0
for i in range(len(old_path) - 1):
node1 = old_path[i]
node2 = old_path[i+1]
old_overload = max(use_graph[node1][node2] - graph[node1][node2], 0)
use_graph[node1][node2] -= commodity_demand
delta_fitness += max(use_graph[node1][node2] - graph[node1][node2], 0) - old_overload
for i in range(len(new_path) - 1):
node1 = new_path[i]
node2 = new_path[i+1]
old_overload = max(use_graph[node1][node2] - graph[node1][node2], 0)
use_graph[node1][node2] += commodity_demand
delta_fitness += max(use_graph[node1][node2] - graph[node1][node2], 0) - old_overload
return delta_fitness
def create_new_path(graph, use_graph, commodity, all_distances, q=0.5, remove_cycles=False, better_heuristic=False):
# Create a new path for a commodity using a guided random walk
origin, destination, demand = commodity
current_node = origin
path_with_cycles = [current_node]
while current_node != destination:
heuristic_information_list = []
neighbor_list = list(graph[current_node].keys())
# Computing the heuristic information for each possible neighbor
for neighbor in graph[current_node]:
x = graph[current_node][neighbor] - use_graph[current_node][neighbor]
if better_heuristic:
heuristic_information_list.append(1 / (1 + all_distances[neighbor][destination]) + 0.5 * (1 + x / (1 + abs(x)) ))
else:
heuristic_information_list.append(1 + 0.5 * (1 + x / (1 + abs(x)) ))
heuristic_information_list = np.array(heuristic_information_list)
# Choosing the next node of the path
if random.random() < q:
neighbor_index = np.argmax(heuristic_information_list)
else:
proba_list = heuristic_information_list/np.sum(heuristic_information_list)
neighbor_index = np.random.choice(len(neighbor_list), p=proba_list)
current_node = neighbor_list[neighbor_index]
path_with_cycles.append(current_node)
if remove_cycles:
# Cycle deletion
path = []
in_path = [False] * len(graph)
for node in path_with_cycles:
if in_path[node]:
while path[-1] != node:
poped_node = path.pop()
in_path[poped_node] = False
else:
path.append(node)
return path
else:
return path_with_cycles
| 7,809 | 44.144509 | 174 |
py
|
RWP
|
RWP-main/utils.py
|
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torch.nn.functional as F
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models_imagenet
import numpy as np
import random
import os
import time
import models
import sys
import torch.utils.data as data
from torchvision.datasets.utils import download_url, check_integrity
import os.path
import pickle
from PIL import Image
def set_seed(seed=1):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
class Logger(object):
def __init__(self,fileN ="Default.log"):
self.terminal = sys.stdout
self.log = open(fileN,"a")
def write(self,message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
################################ datasets #######################################
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from torch.utils.data import DataLoader, Subset
from torchvision.datasets import CIFAR10, CIFAR100, ImageFolder
class Cutout:
def __init__(self, size=16, p=0.5):
self.size = size
self.half_size = size // 2
self.p = p
def __call__(self, image):
if torch.rand([1]).item() > self.p:
return image
left = torch.randint(-self.half_size, image.size(1) - self.half_size, [1]).item()
top = torch.randint(-self.half_size, image.size(2) - self.half_size, [1]).item()
right = min(image.size(1), left + self.size)
bottom = min(image.size(2), top + self.size)
image[:, max(0, left): right, max(0, top): bottom] = 0
return image
def get_datasets(args):
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'ImageNet':
traindir = os.path.join('/home/datasets/ILSVRC2012/', 'train')
valdir = os.path.join('/home/datasets/ILSVRC2012/', 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers)
return train_loader, val_loader
def get_datasets_ddp(args):
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def get_datasets_cutout(args):
print ('cutout!')
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True),
batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def get_datasets_cutout_ddp(args):
if args.datasets == 'CIFAR10':
print ('cifar10 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR10(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler, drop_last=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR10(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
elif args.datasets == 'CIFAR100':
print ('cifar100 dataset!')
normalize = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
my_trainset = datasets.CIFAR100(root='./datasets/', train=True, transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
Cutout()
]), download=True)
train_sampler = torch.utils.data.distributed.DistributedSampler(my_trainset)
train_loader = torch.utils.data.DataLoader(my_trainset, batch_size=args.batch_size, sampler=train_sampler, drop_last=True, num_workers=args.workers, pin_memory=True)
val_loader = torch.utils.data.DataLoader(
datasets.CIFAR100(root='./datasets/', train=False, transform=transforms.Compose([
transforms.ToTensor(),
normalize,
])),
batch_size=128, shuffle=False,
num_workers=args.workers, pin_memory=True)
return train_loader, val_loader
def get_model(args):
print('Model: {}'.format(args.arch))
if args.datasets == 'ImageNet':
return models_imagenet.__dict__[args.arch]()
if args.datasets == 'CIFAR10':
num_classes = 10
elif args.datasets == 'CIFAR100':
num_classes = 100
model_cfg = getattr(models, args.arch)
return model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs)
class SAM(torch.optim.Optimizer):
def __init__(self, params, base_optimizer, rho=0.05, adaptive=False, **kwargs):
assert rho >= 0.0, f"Invalid rho, should be non-negative: {rho}"
defaults = dict(rho=rho, adaptive=adaptive, **kwargs)
super(SAM, self).__init__(params, defaults)
self.base_optimizer = base_optimizer(self.param_groups, **kwargs)
self.param_groups = self.base_optimizer.param_groups
self.defaults.update(self.base_optimizer.defaults)
@torch.no_grad()
def first_step(self, zero_grad=False):
grad_norm = self._grad_norm()
for group in self.param_groups:
scale = group["rho"] / (grad_norm + 1e-12)
for p in group["params"]:
if p.grad is None: continue
self.state[p]["old_p"] = p.data.clone()
e_w = (torch.pow(p, 2) if group["adaptive"] else 1.0) * p.grad * scale.to(p)
p.add_(e_w) # climb to the local maximum "w + e(w)"
if zero_grad: self.zero_grad()
@torch.no_grad()
def second_step(self, zero_grad=False):
for group in self.param_groups:
for p in group["params"]:
if p.grad is None: continue
p.data = self.state[p]["old_p"] # get back to "w" from "w + e(w)"
self.base_optimizer.step() # do the actual "sharpness-aware" update
if zero_grad: self.zero_grad()
@torch.no_grad()
def step(self, closure=None):
assert closure is not None, "Sharpness Aware Minimization requires closure, but it was not provided"
closure = torch.enable_grad()(closure) # the closure should do a full forward-backward pass
self.first_step(zero_grad=True)
closure()
self.second_step()
def _grad_norm(self):
shared_device = self.param_groups[0]["params"][0].device # put everything on the same device, in case of model parallelism
norm = torch.norm(
torch.stack([
((torch.abs(p) if group["adaptive"] else 1.0) * p.grad).norm(p=2).to(shared_device)
for group in self.param_groups for p in group["params"]
if p.grad is not None
]),
p=2
)
return norm
def load_state_dict(self, state_dict):
super().load_state_dict(state_dict)
self.base_optimizer.param_groups = self.param_groups
| 14,352 | 38.215847 | 173 |
py
|
RWP
|
RWP-main/train_rwp_parallel.py
|
import argparse
from torch.nn.modules.batchnorm import _BatchNorm
import os
import time
import numpy as np
import random
import sys
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from utils import *
# Parse arguments
parser = argparse.ArgumentParser(description='DDP RWP training')
parser.add_argument('--EXP', metavar='EXP', help='experiment name', default='SGD')
parser.add_argument('--arch', '-a', metavar='ARCH',
help='The architecture of the model')
parser.add_argument('--datasets', metavar='DATASETS', default='CIFAR10', type=str,
help='The training datasets')
parser.add_argument('--optimizer', metavar='OPTIMIZER', default='sgd', type=str,
help='The optimizer for training')
parser.add_argument('--schedule', metavar='SCHEDULE', default='step', type=str,
help='The schedule for training')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=200, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=128, type=int,
metavar='N', help='mini-batch size (default: 128)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=100, type=int,
metavar='N', help='print frequency (default: 50 iterations)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--wandb', dest='wandb', action='store_true',
help='use wandb to monitor statisitcs')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--half', dest='half', action='store_true',
help='use half-precision(16-bit) ')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--log-dir', dest='log_dir',
help='The directory used to save the log',
default='save_temp', type=str)
parser.add_argument('--log-name', dest='log_name',
help='The log file name',
default='log', type=str)
parser.add_argument('--randomseed',
help='Randomseed for training and initialization',
type=int, default=1)
parser.add_argument('--cutout', dest='cutout', action='store_true',
help='use cutout data augmentation')
parser.add_argument('--alpha', default=0.5, type=float,
metavar='A', help='alpha for mixing gradients')
parser.add_argument('--gamma', default=0.01, type=float,
metavar='gamma', help='Perturbation magnitude gamma for RWP')
parser.add_argument("--local_rank", default=-1, type=int)
best_prec1 = 0
# Record training statistics
train_loss = []
train_err = []
test_loss = []
test_err = []
arr_time = []
args = parser.parse_args()
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
dist.init_process_group(backend='nccl')
args.world_size = torch.distributed.get_world_size()
args.workers = int((args.workers + args.world_size - 1) / args.world_size)
if args.local_rank == 0:
print ('world size: {} workers per GPU: {}'.format(args.world_size, args.workers))
device = torch.device("cuda", local_rank)
if args.wandb:
import wandb
wandb.init(project="TWA", entity="nblt")
date = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
wandb.run.name = args.EXP + date
def get_model_param_vec(model):
# Return the model parameters as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.data.detach().reshape(-1))
return torch.cat(vec, 0)
def get_model_grad_vec(model):
# Return the model gradient as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.grad.detach().reshape(-1))
return torch.cat(vec, 0)
def update_grad(model, grad_vec):
idx = 0
for name,param in model.named_parameters():
arr_shape = param.grad.shape
size = param.grad.numel()
param.grad.data = grad_vec[idx:idx+size].reshape(arr_shape).clone()
idx += size
def update_param(model, param_vec):
idx = 0
for name,param in model.named_parameters():
arr_shape = param.data.shape
size = param.data.numel()
param.data = param_vec[idx:idx+size].reshape(arr_shape).clone()
idx += size
def print_param_shape(model):
for name,param in model.named_parameters():
print (name, param.data.shape)
def main():
global args, best_prec1, p0
global train_loss, train_err, test_loss, test_err, arr_time, running_weight
set_seed(args.randomseed)
# Check the save_dir exists or not
if args.local_rank == 0:
print ('save dir:', args.save_dir)
if not os.path.exists(args.save_dir):
os.makedirs(args.save_dir)
# Check the log_dir exists or not
if args.local_rank == 0:
print ('log dir:', args.log_dir)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
sys.stdout = Logger(os.path.join(args.log_dir, args.log_name))
# Define model
# model = torch.nn.DataParallel(get_model(args))
model = get_model(args).to(device)
model = DDP(model, device_ids=[local_rank], output_device=local_rank)
# print_param_shape(model)
# Optionally resume from a checkpoint
if args.resume:
# if os.path.isfile(args.resume):
if os.path.isfile(os.path.join(args.save_dir, args.resume)):
# model.load_state_dict(torch.load(os.path.join(args.save_dir, args.resume)))
print ("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
print ('from ', args.start_epoch)
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
print ("=> loaded checkpoint '{}' (epoch {})"
.format(args.evaluate, checkpoint['epoch']))
else:
print ("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Prepare Dataloader
print ('cutout:', args.cutout)
if args.cutout:
train_loader, val_loader = get_datasets_cutout_ddp(args)
else:
train_loader, val_loader = get_datasets_ddp(args)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().to(device)
if args.half:
model.half()
criterion.half()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)
# Double the training epochs since each iteration will consume two batches of data for calculating g and g_s
args.epochs = args.epochs * 2
if args.schedule == 'step':
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[int(args.epochs * 0.5), int(args.epochs * 0.75)], last_epoch=args.start_epoch - 1)
elif args.schedule == 'cosine':
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
if args.evaluate:
validate(val_loader, model, criterion)
return
is_best = 0
print ('Start training: ', args.start_epoch, '->', args.epochs)
print ('gamma:', args.gamma)
print ('len(train_loader):', len(train_loader))
for epoch in range(args.start_epoch, args.epochs):
train_loader.sampler.set_epoch(epoch)
# train for one epoch
if args.local_rank == 0:
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(train_loader, model, criterion, optimizer, epoch)
lr_scheduler.step()
if epoch % 2 == 0: continue
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
if args.local_rank == 0:
save_checkpoint({
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best, filename=os.path.join(args.save_dir, 'model.th'))
if args.local_rank == 0:
print ('train loss: ', train_loss)
print ('train err: ', train_err)
print ('test loss: ', test_loss)
print ('test err: ', test_err)
print ('time: ', arr_time)
def train(train_loader, model, criterion, optimizer, epoch):
"""
Run one train epoch
"""
global train_loss, train_err, arr_time
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
total_loss, total_err = 0, 0
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.to(device)
input_var = input.to(device)
target_var = target
if args.half:
input_var = input_var.half()
if args.local_rank % 2 == 1:
weight = args.alpha * 2
with torch.no_grad():
noise = []
for mp in model.parameters():
if len(mp.shape) > 1:
sh = mp.shape
sh_mul = np.prod(sh[1:])
temp = mp.view(sh[0], -1).norm(dim=1, keepdim=True).repeat(1, sh_mul).view(mp.shape)
temp = torch.normal(0, args.gamma*temp).to(mp.data.device)
else:
temp = torch.empty_like(mp, device=mp.data.device)
temp.normal_(0, args.gamma*(mp.view(-1).norm().item() + 1e-16))
noise.append(temp)
mp.data.add_(noise[-1])
else:
weight = (1 - args.alpha) * 2
# compute output
output = model(input_var)
loss = criterion(output, target_var) * weight
optimizer.zero_grad()
loss.backward()
if args.local_rank % 2 == 1:
# going back to without theta
with torch.no_grad():
for mp, n in zip(model.parameters(), noise):
mp.data.sub_(n)
optimizer.step()
total_loss += loss.item() * input_var.shape[0] / weight
total_err += (output.max(dim=1)[1] != target_var).sum().item()
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and (i % args.print_freq == 0 or i == len(train_loader) - 1):
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1))
if args.local_rank == 0:
print ('Total time for epoch [{0}] : {1:.3f}'.format(epoch, batch_time.sum))
tloss = total_loss / len(train_loader.dataset) * args.world_size
terr = total_err / len(train_loader.dataset) * args.world_size
train_loss.append(tloss)
train_err.append(terr)
print ('train loss | acc', tloss, 1 - terr)
if args.wandb:
wandb.log({"train loss": total_loss / len(train_loader.dataset)})
wandb.log({"train acc": 1 - total_err / len(train_loader.dataset)})
arr_time.append(batch_time.sum)
def validate(val_loader, model, criterion, add=True):
"""
Run evaluation
"""
global test_err, test_loss
total_loss = 0
total_err = 0
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
target = target.to(device)
input_var = input.to(device)
target_var = target.to(device)
if args.half:
input_var = input_var.half()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
output = output.float()
loss = loss.float()
total_loss += loss.item() * input_var.shape[0]
total_err += (output.max(dim=1)[1] != target_var).sum().item()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0 and add:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1))
if add:
print(' * Prec@1 {top1.avg:.3f}'
.format(top1=top1))
test_loss.append(total_loss / len(val_loader.dataset))
test_err.append(total_err / len(val_loader.dataset))
if args.wandb:
wandb.log({"test loss": total_loss / len(val_loader.dataset)})
wandb.log({"test acc": 1 - total_err / len(val_loader.dataset)})
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""
Save the training model
"""
torch.save(state, filename)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 16,489 | 34.310493 | 165 |
py
|
RWP
|
RWP-main/train_rwp_imagenet.py
|
import argparse
import os
import random
import shutil
import time
import warnings
import os
import numpy as np
import pickle
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
from utils import *
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--alpha', default=0.5, type=float,
metavar='AA', help='alpha for mixing gradients')
parser.add_argument('--gamma', default=0.01, type=float,
metavar='GAMMA', help='gamma for noise')
parser.add_argument('-p', '--print-freq', default=1000, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
help='distributed backend')
parser.add_argument('--seed', default=42, type=int,
help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
help='GPU id to use.')
parser.add_argument('--save-dir', dest='save_dir',
help='The directory used to save the trained models',
default='save_temp', type=str)
parser.add_argument('--log-dir', dest='log_dir',
help='The directory used to save the log',
default='save_temp', type=str)
parser.add_argument('--log-name', dest='log_name',
help='The log file name',
default='log', type=str)
parser.add_argument('--multiprocessing-distributed', action='store_true',
help='Use multi-processing distributed training to launch '
'N processes per node, which has N GPUs. This is the '
'fastest way to use PyTorch for either single node or '
'multi node data parallel training')
best_acc1 = 0
param_vec = []
# Record training statistics
train_loss = []
train_acc = []
test_loss = []
test_acc = []
arr_time = []
def get_model_grad_vec(model):
# Return the model gradient as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.grad.detach().reshape(-1))
return torch.cat(vec, 0)
def update_grad(model, grad_vec):
idx = 0
for name,param in model.named_parameters():
arr_shape = param.grad.shape
size = 1
for i in range(len(list(arr_shape))):
size *= arr_shape[i]
param.grad.data = grad_vec[idx:idx+size].reshape(arr_shape).clone()
idx += size
iters = 0
def get_model_param_vec(model):
# Return the model parameters as a vector
vec = []
for name,param in model.named_parameters():
vec.append(param.detach().cpu().reshape(-1).numpy())
return np.concatenate(vec, 0)
def main():
global train_loss, train_acc, test_loss, test_acc, arr_time
args = parser.parse_args()
print ('gamma:', args.gamma)
save_dir = 'save_' + args.arch
if not os.path.exists(save_dir):
os.makedirs(save_dir)
args.save_dir = save_dir
# Check the log_dir exists or not
# if args.rank == 0:
print ('log dir:', args.log_dir)
if not os.path.exists(args.log_dir):
os.makedirs(args.log_dir)
sys.stdout = Logger(os.path.join(args.log_dir, args.log_name))
print ('log dir:', args.log_dir)
if args.seed is not None:
random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.deterministic = True
warnings.warn('You have chosen to seed training. '
'This will turn on the CUDNN deterministic setting, '
'which can slow down your training considerably! '
'You may see unexpected behavior when restarting '
'from checkpoints.')
if args.gpu is not None:
warnings.warn('You have chosen a specific GPU. This will completely '
'disable data parallelism.')
if args.dist_url == "env://" and args.world_size == -1:
args.world_size = int(os.environ["WORLD_SIZE"])
args.distributed = args.world_size > 1 or args.multiprocessing_distributed
ngpus_per_node = torch.cuda.device_count()
if args.multiprocessing_distributed:
# Since we have ngpus_per_node processes per node, the total world_size
# needs to be adjusted accordingly
args.world_size = ngpus_per_node * args.world_size
# Use torch.multiprocessing.spawn to launch distributed processes: the
# main_worker process function
mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
else:
# Simply call main_worker function
main_worker(args.gpu, ngpus_per_node, args)
sample_idx = 0
def main_worker(gpu, ngpus_per_node, args):
global train_loss, train_acc, test_loss, test_acc, arr_time
global best_acc1, param_vec, sample_idx
args.gpu = gpu
if args.gpu is not None:
print("Use GPU: {} for training".format(args.gpu))
if args.distributed:
if args.dist_url == "env://" and args.rank == -1:
args.rank = int(os.environ["RANK"])
if args.multiprocessing_distributed:
# For multiprocessing distributed training, rank needs to be the
# global rank among all the processes
args.rank = args.rank * ngpus_per_node + gpu
dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
# Double the training epochs since each iteration will consume two batches of data for calculating g and g_s
args.epochs = args.epochs * 2
args.batch_size = args.batch_size * 2
if not torch.cuda.is_available():
print('using CPU, this will be slow')
elif args.distributed:
# For multiprocessing distributed, DistributedDataParallel constructor
# should always set the single device scope, otherwise,
# DistributedDataParallel will use all available devices.
if args.gpu is not None:
torch.cuda.set_device(args.gpu)
model.cuda(args.gpu)
# When using a single GPU per process and per
# DistributedDataParallel, we need to divide the batch size
# ourselves based on the total number of GPUs we have
args.batch_size = int(args.batch_size / ngpus_per_node)
args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
else:
model.cuda()
# DistributedDataParallel will divide and allocate batch_size to all
# available GPUs if device_ids are not set
model = torch.nn.parallel.DistributedDataParallel(model)
elif args.gpu is not None:
torch.cuda.set_device(args.gpu)
model = model.cuda(args.gpu)
else:
# DataParallel will divide and allocate batch_size to all available GPUs
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features)
model.cuda()
else:
model = torch.nn.DataParallel(model).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda(args.gpu)
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
if args.gpu is None:
checkpoint = torch.load(args.resume)
else:
# Map model to be loaded to specified single gpu.
loc = 'cuda:{}'.format(args.gpu)
checkpoint = torch.load(args.resume, map_location=loc)
args.start_epoch = checkpoint['epoch']
best_acc1 = checkpoint['best_acc1']
if args.gpu is not None:
# best_acc1 may be from a checkpoint from a different GPU
best_acc1 = best_acc1.to(args.gpu)
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
else:
train_sampler = None
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion, args)
return
if not args.multiprocessing_distributed or (args.multiprocessing_distributed and args.rank % ngpus_per_node == 0):
torch.save(model.state_dict(), 'save_' + args.arch + '/' + str(sample_idx)+'.pt')
lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
print('current lr {:.5e}'.format(optimizer.param_groups[0]['lr']))
train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node)
lr_scheduler.step()
# evaluate on validation set
acc1 = validate(val_loader, model, criterion, args)
# remember best acc@1 and save checkpoint
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_acc1': best_acc1,
'optimizer' : optimizer.state_dict(),
}, is_best)
torch.save(model, os.path.join(args.save_dir, 'model.pt'))
print ('train loss: ', train_loss)
print ('train acc: ', train_acc)
print ('test loss: ', test_loss)
print ('test acc: ', test_acc)
print ('time: ', arr_time)
def train(train_loader, model, criterion, optimizer, epoch, args, ngpus_per_node):
global iters, param_vec, sample_idx
global train_loss, train_acc, test_loss, test_acc, arr_time
batch_time = AverageMeter('Time', ':6.3f')
data_time = AverageMeter('Data', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(train_loader),
[batch_time, data_time, losses, top1, top5],
prefix="Epoch: [{}]".format(epoch))
# switch to train mode
model.train()
end = time.time()
epoch_start = end
for i, (images, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
if args.rank % 2 == 1:
weight = args.alpha * 2
##################### grw #############################
noise = []
for mp in model.parameters():
if len(mp.shape) > 1:
sh = mp.shape
sh_mul = np.prod(sh[1:])
temp = mp.view(sh[0], -1).norm(dim=1, keepdim=True).repeat(1, sh_mul).view(mp.shape)
temp = torch.normal(0, args.gamma*temp).to(mp.data.device)
else:
temp = torch.empty_like(mp, device=mp.data.device)
temp.normal_(0, args.gamma*(mp.view(-1).norm().item() + 1e-16))
noise.append(temp)
mp.data.add_(noise[-1])
else:
weight = (1 - args.alpha) * 2
# compute output
output = model(images)
loss = criterion(output, target) * weight
optimizer.zero_grad()
loss.backward()
if args.rank % 2 == 1:
# going back to without theta
with torch.no_grad():
for mp, n in zip(model.parameters(), noise):
mp.data.sub_(n)
optimizer.step()
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item() / weight, images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# compute gradient and do SGD step
# optimizer.zero_grad()
# loss.backward()
# optimizer.step()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
if i % args.print_freq == 0:
progress.display(i)
if i > 0 and i % 1000 == 0 and i < 5000:
sample_idx += 1
# torch.save(model.state_dict(), 'save_' + args.arch + '/'+str(sample_idx)+'.pt')
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if not args.multiprocessing_distributed or (args.multiprocessing_distributed
and args.rank % ngpus_per_node == 0):
sample_idx += 1
# torch.save(model.state_dict(), 'save_' + args.arch + '/'+str(sample_idx)+'.pt')
arr_time.append(time.time() - epoch_start)
train_loss.append(losses.avg)
train_acc.append(top1.avg)
def validate(val_loader, model, criterion, args):
global train_loss, train_acc, test_loss, test_acc, arr_time
batch_time = AverageMeter('Time', ':6.3f')
losses = AverageMeter('Loss', ':.4e')
top1 = AverageMeter('Acc@1', ':6.2f')
top5 = AverageMeter('Acc@5', ':6.2f')
progress = ProgressMeter(
len(val_loader),
[batch_time, losses, top1, top5],
prefix='Test: ')
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (images, target) in enumerate(val_loader):
if args.gpu is not None:
images = images.cuda(args.gpu, non_blocking=True)
if torch.cuda.is_available():
target = target.cuda(args.gpu, non_blocking=True)
# compute output
output = model(images)
loss = criterion(output, target)
# measure accuracy and record loss
acc1, acc5 = accuracy(output, target, topk=(1, 5))
losses.update(loss.item(), images.size(0))
top1.update(acc1[0], images.size(0))
top5.update(acc5[0], images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
# TODO: this should also be done with the ProgressMeter
print(' * Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
test_acc.append(top1.avg)
test_loss.append(losses.avg)
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
if __name__ == '__main__':
main()
| 21,710 | 36.890052 | 118 |
py
|
RWP
|
RWP-main/models/resnet.py
|
"""resnet in pytorch
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun.
Deep Residual Learning for Image Recognition
https://arxiv.org/abs/1512.03385v1
"""
import torch
import torch.nn as nn
class BasicBlock(nn.Module):
"""Basic Block for resnet 18 and resnet 34
"""
#BasicBlock and BottleNeck block
#have different output size
#we use class attribute expansion
#to distinct
expansion = 1
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
#residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BasicBlock.expansion, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
#shortcut
self.shortcut = nn.Sequential()
#the shortcut output dimension is not the same with residual function
#use 1*1 convolution to match the dimension
if stride != 1 or in_channels != BasicBlock.expansion * out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BasicBlock.expansion, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels * BasicBlock.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class BottleNeck(nn.Module):
"""Residual block for resnet over 50 layers
"""
expansion = 4
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, stride=stride, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels * BottleNeck.expansion, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion),
)
self.shortcut = nn.Sequential()
if stride != 1 or in_channels != out_channels * BottleNeck.expansion:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels * BottleNeck.expansion, stride=stride, kernel_size=1, bias=False),
nn.BatchNorm2d(out_channels * BottleNeck.expansion)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x))
class ResNet(nn.Module):
def __init__(self, block, num_block, num_classes=100):
super().__init__()
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
#we use a different inputsize than the original paper
#so conv2_x's stride is 1
self.conv2_x = self._make_layer(block, 64, num_block[0], 1)
self.conv3_x = self._make_layer(block, 128, num_block[1], 2)
self.conv4_x = self._make_layer(block, 256, num_block[2], 2)
self.conv5_x = self._make_layer(block, 512, num_block[3], 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
def _make_layer(self, block, out_channels, num_blocks, stride):
"""make resnet layers(by layer i didnt mean this 'layer' was the
same as a neuron netowork layer, ex. conv layer), one layer may
contain more than one residual block
Args:
block: block type, basic block or bottle neck block
out_channels: output depth channel number of this layer
num_blocks: how many blocks per layer
stride: the stride of the first block of this layer
Return:
return a resnet layer
"""
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
output = self.conv1(x)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
class resnet18:
base = ResNet
args = list()
kwargs = {'block': BasicBlock, 'num_block': [2, 2, 2, 2]}
# def resnet18():
# """ return a ResNet 18 object
# """
# kwargs = {}
# return ResNet(BasicBlock, [2, 2, 2, 2])
def resnet34():
""" return a ResNet 34 object
"""
return ResNet(BasicBlock, [3, 4, 6, 3])
def resnet50():
""" return a ResNet 50 object
"""
return ResNet(BottleNeck, [3, 4, 6, 3])
def resnet101():
""" return a ResNet 101 object
"""
return ResNet(BottleNeck, [3, 4, 23, 3])
def resnet152():
""" return a ResNet 152 object
"""
return ResNet(BottleNeck, [3, 8, 36, 3])
| 5,620 | 32.064706 | 118 |
py
|
RWP
|
RWP-main/models/vgg.py
|
"""
VGG model definition
ported from https://github.com/pytorch/vision/blob/master/torchvision/models/vgg.py
"""
import math
import torch.nn as nn
import torchvision.transforms as transforms
__all__ = ['VGG16', 'VGG16BN', 'VGG19', 'VGG19BN']
def make_layers(cfg, batch_norm=False):
layers = list()
in_channels = 3
for v in cfg:
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfg = {
16: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
19: [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M',
512, 512, 512, 512, 'M'],
}
class VGG(nn.Module):
def __init__(self, num_classes=10, depth=16, batch_norm=False):
super(VGG, self).__init__()
self.features = make_layers(cfg[depth], batch_norm)
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, num_classes),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
m.bias.data.zero_()
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Base:
base = VGG
args = list()
kwargs = dict()
transform_train = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, padding=4),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))
])
class VGG16(Base):
pass
class VGG16BN(Base):
kwargs = {'batch_norm': True}
class VGG19(Base):
kwargs = {'depth': 19}
class VGG19BN(Base):
kwargs = {'depth': 19, 'batch_norm': True}
| 2,502 | 25.913978 | 97 |
py
|
RWP
|
RWP-main/models/wide_resnet.py
|
"""
WideResNet model definition
ported from https://github.com/meliketoy/wide-resnet.pytorch/blob/master/networks/wide_resnet.py
"""
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
import math
__all__ = ['WideResNet28x10', 'WideResNet16x8']
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicUnit(nn.Module):
def __init__(self, channels: int, dropout: float):
super(BasicUnit, self).__init__()
self.block = nn.Sequential(OrderedDict([
("0_normalization", nn.BatchNorm2d(channels)),
("1_activation", nn.ReLU(inplace=True)),
("2_convolution", nn.Conv2d(channels, channels, (3, 3), stride=1, padding=1, bias=False)),
("3_normalization", nn.BatchNorm2d(channels)),
("4_activation", nn.ReLU(inplace=True)),
("5_dropout", nn.Dropout(dropout, inplace=True)),
("6_convolution", nn.Conv2d(channels, channels, (3, 3), stride=1, padding=1, bias=False)),
]))
def forward(self, x):
return x + self.block(x)
class DownsampleUnit(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int, dropout: float):
super(DownsampleUnit, self).__init__()
self.norm_act = nn.Sequential(OrderedDict([
("0_normalization", nn.BatchNorm2d(in_channels)),
("1_activation", nn.ReLU(inplace=True)),
]))
self.block = nn.Sequential(OrderedDict([
("0_convolution", nn.Conv2d(in_channels, out_channels, (3, 3), stride=stride, padding=1, bias=False)),
("1_normalization", nn.BatchNorm2d(out_channels)),
("2_activation", nn.ReLU(inplace=True)),
("3_dropout", nn.Dropout(dropout, inplace=True)),
("4_convolution", nn.Conv2d(out_channels, out_channels, (3, 3), stride=1, padding=1, bias=False)),
]))
self.downsample = nn.Conv2d(in_channels, out_channels, (1, 1), stride=stride, padding=0, bias=False)
def forward(self, x):
x = self.norm_act(x)
return self.block(x) + self.downsample(x)
class Block(nn.Module):
def __init__(self, in_channels: int, out_channels: int, stride: int, depth: int, dropout: float):
super(Block, self).__init__()
self.block = nn.Sequential(
DownsampleUnit(in_channels, out_channels, stride, dropout),
*(BasicUnit(out_channels, dropout) for _ in range(depth))
)
def forward(self, x):
return self.block(x)
class WideResNet(nn.Module):
def __init__(self, depth: int, width_factor: int, dropout: float, in_channels: int, num_classes: int):
super(WideResNet, self).__init__()
self.filters = [16, 1 * 16 * width_factor, 2 * 16 * width_factor, 4 * 16 * width_factor]
self.block_depth = (depth - 4) // (3 * 2)
self.f = nn.Sequential(OrderedDict([
("0_convolution", nn.Conv2d(in_channels, self.filters[0], (3, 3), stride=1, padding=1, bias=False)),
("1_block", Block(self.filters[0], self.filters[1], 1, self.block_depth, dropout)),
("2_block", Block(self.filters[1], self.filters[2], 2, self.block_depth, dropout)),
("3_block", Block(self.filters[2], self.filters[3], 2, self.block_depth, dropout)),
("4_normalization", nn.BatchNorm2d(self.filters[3])),
("5_activation", nn.ReLU(inplace=True)),
("6_pooling", nn.AvgPool2d(kernel_size=8)),
("7_flattening", nn.Flatten()),
("8_classification", nn.Linear(in_features=self.filters[3], out_features=num_classes)),
]))
self._initialize()
def _initialize(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight.data, mode="fan_in", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.zero_()
m.bias.data.zero_()
def forward(self, x):
return self.f(x)
class WideResNet28x10:
base = WideResNet
args = list()
kwargs = {'depth': 28, 'width_factor': 10, 'dropout': 0, 'in_channels': 3}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
class WideResNet16x8:
base = WideResNet
args = list()
kwargs = {'depth': 16, 'width_factor': 8, 'dropout': 0, 'in_channels': 3}
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
| 5,426 | 38.904412 | 114 |
py
|
RWP
|
RWP-main/models/__init__.py
|
from .resnet import *
from .vgg import *
from .wide_resnet import *
| 67 | 21.666667 | 26 |
py
|
F-SHARP
|
F-SHARP-main/measure_coflex.py
|
import numpy as np
import sys,os
from astropy.cosmology import Planck15
import pandas as pd
from astropy import units as u
from scipy import interpolate
from fastdist import fastdist
"""
Script: Measure the cosmic flexion and shear-flexion two-point correlation functions from a dataset.
Author: Evan J. Arena
Description:
.. We want to compute the two-point statistics on a dataset given
galaxy position, size, ellipticity, and flexion
.. We will compute the following statistics:
(i). All cosmic flexion-flexion correlators:
xi_FF_plus, xi_FF_minus,
xi_GG_plus, xi_GG_minus,
xi_FG_plus, xi_FG_minus, xi_GF_plus, xi_GF_minus
(ii). All cosmic shear-flexion correlators:
xi_gamF_plus, xi_gamF_minus, xi_Fgam_plus, xi_Fgam_minus
xi_Ggam_plus, xi_Ggam_minus, xi_gamG_plus, xi_gamG_minus,
(iii). All autovariances of each estimator
"""
class MeasureCF2P:
def __init__(self, survey, bin_combo, RA, DEC, F1, F2, G1, G2, a, eps1, eps2, w):
"""
We take as an input a galaxy "catalogue" containing the following parameters:
.. survey: a string denoting the name of the cosmological survey,
e.g. 'DES'
.. bin_combo: a string denoting the tomographic bin combination
e.g. '11' for bin combination (1,1), 'nontom' for nontomographic analysis
.. RA: Array of the right-ascention of each galaxy, in arcseconds
.. DEC: Array of the declination of each galaxy, in arcseconds
.. F1: Array of the F-flexion 1-component of each galaxy, in [arcsec]^-1
.. F2: Array of the F-flexion 2-component of each galaxy, in [arcsec]^-1
.. G1: Array of the G-flexion 1-component of each galaxy, in [arcsec]^-1
.. G2: Array of the G-flexion 2-component of each galaxy, in [arcsec]^-1
.. a: Array of the sizes of each galaxy, in arcseconds. Size is defined
in terms of the quandrupole image moments:
a = sqrt(|Q11 + Q22|)
.. eps1: Array of the ellipticity 1-component of each galaxy*.
.. eps2: Array of the ellipticity 2-component of each galaxy*.
.. w: Array of weights for each galaxy.
* Note that ellipticity is defined here as (a-b)/(a+b)e^2*i*phi, where a and b
are the semi-major and semi-minor axes. Do not confuse the semi-major axis here
with the image size.
"""
self.survey = str(survey)
self.bin_combo = str(bin_combo)
self.x_list = RA
self.y_list = DEC
self.F1_list = F1
self.F2_list = F2
self.G1_list = G1
self.G2_list = G2
self.a_list = a
self.eps1_list = eps1
self.eps2_list = eps2
self.w_list = w
self.Ngals = len(self.w_list)
def measureTwoPoint(self):
"""
Measure all available two-point correlation functions.
"""
# First, calculate the scatter in intrinsic flexion and ellipticity
sigma_aF, sigma_aG, sigma_eps = self.get_intrinsic_scatter()
print('sigma_aF, sigma_aG, sigma_eps =', sigma_aF, sigma_aG, sigma_eps)
self.sigma_aF = sigma_aF
self.sigma_aG = sigma_aG
self.sigma_eps = sigma_eps
# Get flexion-flexion correlation functions first.
# .. Get angular separations and bins
theta_flexflex_list, flexflex_bins = self.theta_flexflex_bin()
# .. Get weights and two-point arrays in each bin
# .. .. flexflex_bins = array(wp_bins, xi_FF_plus_bins, ...)
flexflex_bins = self.get_binned_two_point_flexflex(theta_flexflex_list, flexflex_bins)
# .. Get two-point correlation functions and autovariances
flexflex = self.get_flexflex_corr(*flexflex_bins)
xi_FF_plus = flexflex[0]
xi_FF_plus_autoVar = flexflex[1]
xi_FF_minus = flexflex[2]
xi_FF_minus_autoVar = flexflex[3]
xi_GG_plus = flexflex[4]
xi_GG_plus_autoVar = flexflex[5]
xi_GG_minus = flexflex[6]
xi_GG_minus_autoVar = flexflex[7]
xi_FG_plus = flexflex[8]
xi_FG_plus_autoVar = flexflex[9]
xi_FG_minus = flexflex[10]
xi_FG_minus_autoVar = flexflex[11]
xi_GF_plus = flexflex[12]
xi_GF_plus_autoVar = flexflex[13]
xi_GF_minus = flexflex[14]
xi_GF_minus_autoVar = flexflex[15]
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_FF_plus', 'xi_FF_plus_autoVar', 'xi_FF_minus', 'xi_FF_minus_autoVar', 'xi_GG_plus', 'xi_GG_plus_autoVar', 'xi_GG_minus', 'xi_GG_minus_autoVar', 'xi_FG_plus', 'xi_FG_plus_autoVar', 'xi_FG_minus', 'xi_FG_minus_autoVar', 'xi_GF_plus', 'xi_GF_plus_autoVar', 'xi_GF_minus', 'xi_GF_minus_autoVar']
arrs = [theta_flexflex_list, xi_FF_plus, xi_FF_plus_autoVar, xi_FF_minus, xi_FF_minus_autoVar, xi_GG_plus, xi_GG_plus_autoVar, xi_GG_minus, xi_GG_minus_autoVar, xi_FG_plus, xi_FG_plus_autoVar, xi_FG_minus, xi_FG_minus_autoVar, xi_GF_plus, xi_GF_plus_autoVar, xi_GF_minus, xi_GF_minus_autoVar]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Measure/flexion-flexion_two_point_measured_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
# Shear-flexion correlations
# .. Get angular separations and bins
theta_shearflex_list, shearflex_bins = self.theta_shearflex_bin()
# .. Get weights and two-point arrays in each bin
# .. .. flexflex_bins = array(wp_bins, xi_FF_plus_bins, ...)
shearflex_bins = self.get_binned_two_point_shearflex(theta_shearflex_list, shearflex_bins)
# .. Get two-point correlation functions
shearflex = self.get_shearflex_corr(*shearflex_bins)
xi_gamF_plus = shearflex[0]
xi_gamF_plus_autoVar = shearflex[1]
xi_gamF_minus = shearflex[2]
xi_gamF_minus_autoVar = shearflex[3]
xi_Fgam_plus = shearflex[4]
xi_Fgam_plus_autoVar = shearflex[5]
xi_Fgam_minus = shearflex[6]
xi_Fgam_minus_autoVar = shearflex[7]
xi_Ggam_plus = shearflex[8]
xi_Ggam_plus_autoVar = shearflex[9]
xi_Ggam_minus = shearflex[10]
xi_Ggam_minus_autoVar = shearflex[11]
xi_gamG_plus = shearflex[12]
xi_gamG_plus_autoVar = shearflex[13]
xi_gamG_minus = shearflex[14]
xi_gamG_minus_autoVar = shearflex[15]
# .. Export shear-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_gamF_plus', 'xi_gamF_plus_autoVar', 'xi_gamF_minus', 'xi_gamF_minus_autoVar', 'xi_Fgam_plus', 'xi_Fgam_plus_autoVar', 'xi_Fgam_minus', 'xi_Fgam_minus_autoVar', 'xi_Ggam_plus', 'xi_Ggam_plus_autoVar', 'xi_Ggam_minus', 'xi_Ggam_minus_autoVar', 'xi_gamG_plus', 'xi_gamG_plus_autoVar', 'xi_gamG_minus', 'xi_gamG_minus_autoVar']
arrs = [theta_shearflex_list, xi_gamF_plus, xi_gamF_plus_autoVar, xi_gamF_minus, xi_gamF_minus_autoVar, xi_Fgam_plus, xi_Fgam_plus_autoVar, xi_Fgam_minus, xi_Fgam_minus_autoVar, xi_Ggam_plus, xi_Ggam_plus_autoVar, xi_Ggam_minus, xi_Ggam_minus_autoVar, xi_gamG_plus, xi_gamG_plus_autoVar, xi_gamG_minus, xi_gamG_minus_autoVar]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Measure/shear-flexion_two_point_measured_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
def theta_flexflex_bin(self, theta_min=1, theta_max=100, N_theta=10):
"""
List of theta values for real-space cosmic flexion correlation functions
Input angle values are in untis of arcseconds
"""
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
bins = np.append(bin_low_list,bin_high_list[-1])
return theta_list, bins
def theta_shearflex_bin(self, theta_min=1/60, theta_max=10, N_theta=15):
"""
List of theta values for real-space cosmic shear-flexion correlation functions
Input angle values are in untis of arcminutes
"""
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
bins = np.append(bin_low_list,bin_high_list[-1])
return theta_list, bins
def get_binned_two_point_flexflex(self, theta_list, bins):
"""
Calculate the following quantities for each galaxy pair (i,j):
1. The product of galaxy weights:
w_p = w_i*w_j
2. The two-point correlations, e.g.
xi_FF_p/m = (F1_rot_i*F1_rot_j +/- F2_rot_i*F2_rot_j)
and then separate each galaxy pair into angular separation bins
defined by the function self.theta_bin().
Rather than perform this calculation for every single galaxy pair, i.e.
>>> for i in range(N_bins):
>>> .. for j in range(N_bins):
which is an O(N^2) operation, we turn this into an ~O(N) operation by
creating square grid cells with widths equal to the largest angular
separation we consider: np.max(theta_list).
"""
# Get the total number of bins
N_bins = len(theta_list)
# Define the arrays for two-point calculation
# .. Product of weights
wp_bins = [[] for _ in range(N_bins)]
# .. Auto-correlations
xi_FF_plus_bins = [[] for _ in range(N_bins)]
xi_FF_minus_bins = [[] for _ in range(N_bins)]
xi_FF_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_FF_minus_autoVar_bins = [[] for _ in range(N_bins)]
xi_GG_plus_bins = [[] for _ in range(N_bins)]
xi_GG_minus_bins = [[] for _ in range(N_bins)]
xi_GG_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_GG_minus_autoVar_bins = [[] for _ in range(N_bins)]
# .. Cross-correlations
xi_FG_plus_bins = [[] for _ in range(N_bins)]
xi_FG_minus_bins = [[] for _ in range(N_bins)]
xi_FG_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_FG_minus_autoVar_bins = [[] for _ in range(N_bins)]
xi_GF_plus_bins = [[] for _ in range(N_bins)]
xi_GF_minus_bins = [[] for _ in range(N_bins)]
xi_GF_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_GF_minus_autoVar_bins = [[] for _ in range(N_bins)]
# Get the width of each grid cell:
dg = np.max(bins)
# Let (Gx,Gy) denote the grid pairs. We can assign each galaxy, k, to a
# grid in the following way:
gx_list = np.zeros(self.Ngals)
gy_list = np.zeros(self.Ngals)
for k in range(self.Ngals):
gx = int((self.x_list[k]-self.x_list[0])/dg)
gy = int((self.y_list[k]-self.y_list[0])/dg)
gx_list[k] = gx
gy_list[k] = gy
# Next, we want to loop through every galaxy, i. We want to calculate
# the separation and the two-point statistics, between i and all other
# galaxies, j, that lie in either the same grid cell as i or a grid cell
# adjacent to it. Now, unless we account for the fact that there are
# multiple galaxies in each grid cell, the separation between galaxy
# i and i+1 will be calculated on both the 0th and 1st iterations.
# Therefore, we should create a running list containing galaxies already
# looped through and exclude them from the next iteration. We can do this
# simply by requiring j > i. This is fine because for each iteration (i),
# all of (i)'s pairs are identified.
# Create list of galaxy j:
j_list = np.arange(0, self.Ngals)
for i in range(self.Ngals):
# Get grid coordinates for galaxy i
gx_i = gx_list[i]
gy_i = gy_list[i]
# Get (RA,Dec) for galaxy i
x_i = self.x_list[i]
y_i = self.y_list[i]
# Get size for galaxy i
a_i = self.a_list[i]
a_i *= (u.arcsec)
a_i = a_i.to(u.rad).value
# Get weight of galaxy i
w_i = self.w_list[i]
# Get flexion of galaxy i
F1_i = self.F1_list[i]
F2_i = self.F2_list[i]
G1_i = self.G1_list[i]
G2_i = self.G2_list[i]
# Get galaxy pairs {j} associated with galaxy i
id = np.where((j_list > i) &
(gx_list <= gx_i+1) & (gx_list >= gx_i-1) &
(gy_list <= gy_i+1) & (gy_list >= gy_i-1))
# Positions of each galaxy j
x_j_list = self.x_list[id]
y_j_list = self.y_list[id]
# Get total number of galaxy js
N_j = len(x_j_list)
# Sizes
a_j_list = self.a_list[id]
# Weights
w_j_list = self.w_list[id]
# Flexions
F1_j_list = self.F1_list[id]
F2_j_list = self.F2_list[id]
G1_j_list = self.G1_list[id]
G2_j_list = self.G2_list[id]
# Calculate two-point for each pair (i,j)
for j in range(N_j):
# Get separation between (i,j)
theta_ij = self.theta_ij(x_i,y_i,x_j_list[j],y_j_list[j])
# Get polar angle between (i,j)
varPhi_ij = self.varPhi_ij(x_i,y_i,x_j_list[j],y_j_list[j])
# Calculate tangential and radial flexions for pair (i,j)
F1_rot_i = self.F1_rot(F1_i, F2_i, varPhi_ij)
F1_rot_j = self.F1_rot(F1_j_list[j], F2_j_list[j], varPhi_ij)
F2_rot_i = self.F2_rot(F1_i, F2_i, varPhi_ij)
F2_rot_j = self.F2_rot(F1_j_list[j], F2_j_list[j], varPhi_ij)
G1_rot_i = self.G1_rot(G1_i, G2_i, varPhi_ij)
G1_rot_j = self.G1_rot(G1_j_list[j], G2_j_list[j], varPhi_ij)
G2_rot_i = self.G2_rot(G1_i, G2_i, varPhi_ij)
G2_rot_j = self.G2_rot(G1_j_list[j], G2_j_list[j], varPhi_ij)
# Convert the flexions from 1/arcsec to 1/rad:
F1_rot_i /= (u.arcsec)
F1_rot_i = F1_rot_i.to(1/u.rad).value
F1_rot_j /= (u.arcsec)
F1_rot_j = F1_rot_j.to(1/u.rad).value
F2_rot_i /= (u.arcsec)
F2_rot_i = F2_rot_i.to(1/u.rad).value
F2_rot_j /= (u.arcsec)
F2_rot_j = F2_rot_j.to(1/u.rad).value
G1_rot_i /= (u.arcsec)
G1_rot_i = G1_rot_i.to(1/u.rad).value
G1_rot_j /= (u.arcsec)
G1_rot_j = G1_rot_j.to(1/u.rad).value
G2_rot_i /= (u.arcsec)
G2_rot_i = G2_rot_i.to(1/u.rad).value
G2_rot_j /= (u.arcsec)
G2_rot_j = G2_rot_j.to(1/u.rad).value
# Convert size to rad
a_j = a_j_list[j]*(u.arcsec)
a_j = a_j.to(u.rad).value
# Weight for each pair
wp_ij = w_i*w_j_list[j]
# Two-points for each pair
xi_FF_p_ij = wp_ij*(F1_rot_i*F1_rot_j + F2_rot_i*F2_rot_j)
xi_FF_m_ij = wp_ij*(F1_rot_i*F1_rot_j - F2_rot_i*F2_rot_j)
xi_GG_p_ij = wp_ij*(G1_rot_i*G1_rot_j + G2_rot_i*G2_rot_j)
xi_GG_m_ij = wp_ij*(G1_rot_i*G1_rot_j - G2_rot_i*G2_rot_j)
xi_FG_p_ij = wp_ij*(F1_rot_i*G1_rot_j + F2_rot_i*G2_rot_j)
xi_FG_m_ij = wp_ij*(F1_rot_i*G1_rot_j - F2_rot_i*G2_rot_j)
xi_GF_p_ij = wp_ij*(G1_rot_i*F1_rot_j + G2_rot_i*F2_rot_j)
xi_GF_m_ij = wp_ij*(G1_rot_i*F1_rot_j - G2_rot_i*F2_rot_j)
# Autovar for each pair
xi_FF_p_aV_ij = (wp_ij/(a_i*a_j))**2.
xi_FF_m_aV_ij = xi_FF_p_aV_ij
xi_GG_p_aV_ij = xi_FF_p_aV_ij
xi_GG_m_aV_ij = xi_FF_p_aV_ij
xi_FG_p_aV_ij = xi_FF_p_aV_ij
xi_FG_m_aV_ij = xi_FF_p_aV_ij
xi_GF_p_aV_ij = xi_FF_p_aV_ij
xi_GF_m_aV_ij = xi_FF_p_aV_ij
# Get the bin for each galaxy pair (i,j). It is simplest to use
# np.digitize(theta_ij, bins). This returns the bin number that
# theta_ij belongs to (digitize indexes at 1). If digitize returns 0,
# theta_ij is smaller than the smallest bin. If digitize returns the
# number = N_bins + 1, then theta_ij is larger than the largest bin. So
bin_ij = np.digitize(theta_ij, bins, right=True)
if (bin_ij > 0) & (bin_ij < N_bins + 1):
bin_index = bin_ij-1
wp_bins[bin_index].append(wp_ij)
xi_FF_plus_bins[bin_index].append(xi_FF_p_ij)
xi_FF_minus_bins[bin_index].append(xi_FF_m_ij)
xi_GG_plus_bins[bin_index].append(xi_GG_p_ij)
xi_GG_minus_bins[bin_index].append(xi_GG_m_ij)
xi_FG_plus_bins[bin_index].append(xi_FG_p_ij)
xi_FG_minus_bins[bin_index].append(xi_FG_m_ij)
xi_GF_plus_bins[bin_index].append(xi_GF_p_ij)
xi_GF_minus_bins[bin_index].append(xi_GF_m_ij)
xi_FF_plus_autoVar_bins[bin_index].append(xi_FF_p_aV_ij)
xi_FF_minus_autoVar_bins[bin_index].append(xi_FF_m_aV_ij)
xi_GG_plus_autoVar_bins[bin_index].append(xi_GG_p_aV_ij)
xi_GG_minus_autoVar_bins[bin_index].append(xi_GG_m_aV_ij)
xi_FG_plus_autoVar_bins[bin_index].append(xi_FG_p_aV_ij)
xi_FG_minus_autoVar_bins[bin_index].append(xi_FG_m_aV_ij)
xi_GF_plus_autoVar_bins[bin_index].append(xi_GF_p_aV_ij)
xi_GF_minus_autoVar_bins[bin_index].append(xi_GF_m_aV_ij)
return wp_bins, xi_FF_plus_bins, xi_FF_minus_bins, xi_GG_plus_bins, xi_GG_minus_bins, xi_FG_plus_bins, xi_FG_minus_bins, xi_GF_plus_bins, xi_GF_minus_bins, xi_FF_plus_autoVar_bins, xi_FF_minus_autoVar_bins, xi_GG_plus_autoVar_bins, xi_GG_minus_autoVar_bins, xi_FG_plus_autoVar_bins, xi_FG_minus_autoVar_bins, xi_GF_plus_autoVar_bins, xi_GF_minus_autoVar_bins,
def get_binned_two_point_shearflex(self, theta_list, bins):
"""
Calculate the following quantities for each galaxy pair (i,j):
1. The product of galaxy weights:
w_p = w_i*w_j
2. The two-point correlations, e.g.
xi_gamF_p/m = (gam1_rot_i*F1_rot_j +/- gam2_rot_i*F2_rot_j)
and then separate each galaxy pair into angular separation bins
defined by the function self.theta_bin().
Rather than perform this calculation for every single galaxy pair, i.e.
>>> for i in range(N_bins):
>>> .. for j in range(N_bins):
which is an O(N^2) operation, we turn this into an ~O(N) operation by
creating square grid cells with widths equal to the largest angular
separation we consider: np.max(theta_list).
"""
# First, get positions in arcminutes
x_list = self.x_list/60
y_list = self.y_list/60
# Get the total number of bins
N_bins = len(theta_list)
# Define the arrays for two-point calculation
wp_bins = [[] for _ in range(N_bins)]
xi_gamF_plus_bins = [[] for _ in range(N_bins)]
xi_gamF_minus_bins = [[] for _ in range(N_bins)]
xi_Fgam_plus_bins = [[] for _ in range(N_bins)]
xi_Fgam_minus_bins = [[] for _ in range(N_bins)]
xi_Ggam_plus_bins = [[] for _ in range(N_bins)]
xi_Ggam_minus_bins = [[] for _ in range(N_bins)]
xi_gamG_plus_bins = [[] for _ in range(N_bins)]
xi_gamG_minus_bins = [[] for _ in range(N_bins)]
xi_gamF_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_gamF_minus_autoVar_bins = [[] for _ in range(N_bins)]
xi_Fgam_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_Fgam_minus_autoVar_bins = [[] for _ in range(N_bins)]
xi_Ggam_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_Ggam_minus_autoVar_bins = [[] for _ in range(N_bins)]
xi_gamG_plus_autoVar_bins = [[] for _ in range(N_bins)]
xi_gamG_minus_autoVar_bins = [[] for _ in range(N_bins)]
# Get the width of each grid cell:
dg = np.max(bins)
# Let (Gx,Gy) denote the grid pairs. We can assign each galaxy, k, to a
# grid in the following way:
gx_list = np.zeros(self.Ngals)
gy_list = np.zeros(self.Ngals)
for k in range(self.Ngals):
gx = int((x_list[k]-x_list[0])/dg)
gy = int((y_list[k]-y_list[0])/dg)
gx_list[k] = gx
gy_list[k] = gy
# Next, we want to loop through every galaxy, i. We want to calculate
# the separation and the two-point statistics, between i and all other
# galaxies, j, that lie in either the same grid cell as i or a grid cell
# adjacent to it. Now, unless we account for the fact that there are
# multiple galaxies in each grid cell, the separation between galaxy
# i and i+1 will be calculated on both the 0th and 1st iterations.
# Therefore, we should create a running list containing galaxies already
# looped through and exclude them from the next iteration. We can do this
# simply by requiring j > i. This is fine because for each iteration (i),
# all of (i)'s pairs are identified.
# Create list of galaxy j:
j_list = np.arange(0, self.Ngals)
for i in range(self.Ngals):
# Get grid coordinates for galaxy i
gx_i = gx_list[i]
gy_i = gy_list[i]
# Get (RA,Dec) for galaxy i
x_i = x_list[i]
y_i = y_list[i]
# Get size for galaxy i
a_i = self.a_list[i]
a_i *= (u.arcsec)
a_i = a_i.to(u.rad).value
# Get weight of galaxy i
w_i = self.w_list[i]
# Get flexion of galaxy i
F1_i = self.F1_list[i]
F2_i = self.F2_list[i]
G1_i = self.G1_list[i]
G2_i = self.G2_list[i]
# Get ellipticity of galaxy i
eps1_i = self.eps1_list[i]
eps2_i = self.eps2_list[i]
# Get galaxy pairs {j} associated with galaxy i
id = np.where((j_list > i) &
(gx_list <= gx_i+1) & (gx_list >= gx_i-1) &
(gy_list <= gy_i+1) & (gy_list >= gy_i-1))
# Positions of each galaxy j
x_j_list = x_list[id]
y_j_list = y_list[id]
# Get total number of galaxy js
N_j = len(x_j_list)
# Sizes
a_j_list = self.a_list[id]
# Weights
w_j_list = self.w_list[id]
# Flexions
F1_j_list = self.F1_list[id]
F2_j_list = self.F2_list[id]
G1_j_list = self.G1_list[id]
G2_j_list = self.G2_list[id]
# Ellipticities
eps1_j_list = self.eps1_list[id]
eps2_j_list = self.eps2_list[id]
# Calculate two-point for each pair (i,j)
for j in range(N_j):
# Get separation between (i,j)
theta_ij = self.theta_ij(x_i,y_i,x_j_list[j],y_j_list[j])
# Get polar angle between (i,j)
varPhi_ij = self.varPhi_ij(x_i,y_i,x_j_list[j],y_j_list[j])
# Calculate tangential and radial flexions for pair (i,j)
F1_rot_i = self.F1_rot(F1_i, F2_i, varPhi_ij)
F1_rot_j = self.F1_rot(F1_j_list[j], F2_j_list[j], varPhi_ij)
F2_rot_i = self.F2_rot(F1_i, F2_i, varPhi_ij)
F2_rot_j = self.F2_rot(F1_j_list[j], F2_j_list[j], varPhi_ij)
G1_rot_i = self.G1_rot(G1_i, G2_i, varPhi_ij)
G1_rot_j = self.G1_rot(G1_j_list[j], G2_j_list[j], varPhi_ij)
G2_rot_i = self.G2_rot(G1_i, G2_i, varPhi_ij)
G2_rot_j = self.G2_rot(G1_j_list[j], G2_j_list[j], varPhi_ij)
# Calculate tangential and cross ellipticities for pair (i,j)
eps1_rot_i = self.eps1_rot(eps1_i, eps2_i, varPhi_ij)
eps1_rot_j = self.eps1_rot(eps1_j_list[j], eps2_j_list[j], varPhi_ij)
eps2_rot_i = self.eps2_rot(eps1_i, eps2_i, varPhi_ij)
eps2_rot_j = self.eps2_rot(eps1_j_list[j], eps2_j_list[j], varPhi_ij)
# Convert the flexions from 1/arcsec to 1/rad:
F1_rot_i /= (u.arcsec)
F1_rot_i = F1_rot_i.to(1/u.rad).value
F1_rot_j /= (u.arcsec)
F1_rot_j = F1_rot_j.to(1/u.rad).value
F2_rot_i /= (u.arcsec)
F2_rot_i = F2_rot_i.to(1/u.rad).value
F2_rot_j /= (u.arcsec)
F2_rot_j = F2_rot_j.to(1/u.rad).value
G1_rot_i /= (u.arcsec)
G1_rot_i = G1_rot_i.to(1/u.rad).value
G1_rot_j /= (u.arcsec)
G1_rot_j = G1_rot_j.to(1/u.rad).value
G2_rot_i /= (u.arcsec)
G2_rot_i = G2_rot_i.to(1/u.rad).value
G2_rot_j /= (u.arcsec)
G2_rot_j = G2_rot_j.to(1/u.rad).value
# Convert size to rad
a_j = a_j_list[j]*(u.arcsec)
a_j = a_j.to(u.rad).value
# Weight for each pair
wp_ij = w_i*w_j_list[j]
# Two-points for each pair
xi_epsF_p_ij = wp_ij*(eps1_rot_i*F1_rot_j + eps2_rot_i*F2_rot_j)
xi_epsF_m_ij = wp_ij*(eps1_rot_i*F1_rot_j - eps2_rot_i*F2_rot_j)
xi_Feps_p_ij = wp_ij*(F1_rot_i*eps1_rot_j + F2_rot_i*eps2_rot_j)
xi_Feps_m_ij = wp_ij*(F1_rot_i*eps1_rot_j - F2_rot_i*eps2_rot_j)
xi_Geps_p_ij = wp_ij*(G1_rot_i*eps1_rot_j + G2_rot_i*eps2_rot_j)
xi_Geps_m_ij = wp_ij*(G1_rot_i*eps1_rot_j - G2_rot_i*eps2_rot_j)
xi_epsG_p_ij = wp_ij*(eps1_rot_i*G1_rot_j + eps2_rot_i*G2_rot_j)
xi_epsG_m_ij = wp_ij*(eps1_rot_i*G1_rot_j - eps2_rot_i*G2_rot_j)
# Autovar for each pair
xi_epsF_p_aV_ij = (wp_ij/(a_j))**2.
xi_epsF_m_aV_ij = (wp_ij/(a_j))**2.
xi_Feps_p_aV_ij = (wp_ij/(a_i))**2.
xi_Feps_m_aV_ij = (wp_ij/(a_i))**2.
xi_Geps_p_aV_ij = (wp_ij/(a_i))**2.
xi_Geps_m_aV_ij = (wp_ij/(a_i))**2.
xi_epsG_p_aV_ij = (wp_ij/(a_j))**2.
xi_epsG_m_aV_ij = (wp_ij/(a_j))**2.
# Get the bin for each galaxy pair (i,j). It is simplest to use
# np.digitize(theta_ij, bins). This returns the bin number that
# theta_ij belongs to (digitize indexes at 1). If digitize returns 0,
# theta_ij is smaller than the smallest bin. If digitize returns the
# number = N_bins + 1, then theta_ij is larger than the largest bin. So
bin_ij = np.digitize(theta_ij, bins, right=True)
if (bin_ij > 0) & (bin_ij < N_bins + 1):
bin_index = bin_ij-1
wp_bins[bin_index].append(wp_ij)
xi_gamF_plus_bins[bin_index].append(xi_epsF_p_ij)
xi_gamF_minus_bins[bin_index].append(xi_epsF_m_ij)
xi_Fgam_plus_bins[bin_index].append(xi_Feps_p_ij)
xi_Fgam_minus_bins[bin_index].append(xi_Feps_m_ij)
xi_Ggam_plus_bins[bin_index].append(xi_Geps_p_ij)
xi_Ggam_minus_bins[bin_index].append(xi_Geps_m_ij)
xi_gamG_plus_bins[bin_index].append(xi_epsG_p_ij)
xi_gamG_minus_bins[bin_index].append(xi_epsG_m_ij)
xi_gamF_plus_autoVar_bins[bin_index].append(xi_epsF_p_aV_ij)
xi_gamF_minus_autoVar_bins[bin_index].append(xi_epsF_m_aV_ij)
xi_Fgam_plus_autoVar_bins[bin_index].append(xi_Feps_p_aV_ij)
xi_Fgam_minus_autoVar_bins[bin_index].append(xi_Feps_m_aV_ij)
xi_Ggam_plus_autoVar_bins[bin_index].append(xi_Geps_p_aV_ij)
xi_Ggam_minus_autoVar_bins[bin_index].append(xi_Geps_m_aV_ij)
xi_gamG_plus_autoVar_bins[bin_index].append(xi_epsG_p_aV_ij)
xi_gamG_minus_autoVar_bins[bin_index].append(xi_epsG_m_aV_ij)
return wp_bins, xi_gamF_plus_bins, xi_gamF_minus_bins, xi_Fgam_plus_bins, xi_Fgam_minus_bins, xi_Ggam_plus_bins, xi_Ggam_minus_bins, xi_gamG_plus_bins, xi_gamG_minus_bins, xi_gamF_plus_autoVar_bins, xi_gamF_minus_autoVar_bins, xi_Fgam_plus_autoVar_bins, xi_Fgam_minus_autoVar_bins, xi_Ggam_plus_autoVar_bins, xi_Ggam_minus_autoVar_bins, xi_gamG_plus_autoVar_bins, xi_gamG_minus_autoVar_bins
# To Do: finish below
def get_flexflex_corr(self, wp_bins,
xi_FF_plus_bins, xi_FF_minus_bins,
xi_GG_plus_bins, xi_GG_minus_bins,
xi_FG_plus_bins, xi_FG_minus_bins,
xi_GF_plus_bins, xi_GF_minus_bins,
xi_FF_plus_autoVar_bins, xi_FF_minus_autoVar_bins,
xi_GG_plus_autoVar_bins, xi_GG_minus_autoVar_bins,
xi_FG_plus_autoVar_bins, xi_FG_minus_autoVar_bins,
xi_GF_plus_autoVar_bins, xi_GF_minus_autoVar_bins):
"""
Calculate the two-point correlation functions and their errors
within each bin.
"""
# Get number of bins
N_bins = len(wp_bins)
# Initiliaze arrays for two-point functions and errors
xi_FF_plus_list = []
xi_FF_minus_list = []
xi_FF_plus_autoVar_list = []
xi_FF_minus_autoVar_list = []
xi_GG_plus_list = []
xi_GG_minus_list = []
xi_GG_plus_autoVar_list = []
xi_GG_minus_autoVar_list = []
xi_FG_plus_list = []
xi_FG_minus_list = []
xi_FG_plus_autoVar_list = []
xi_FG_minus_autoVar_list = []
xi_GF_plus_list = []
xi_GF_minus_list = []
xi_GF_plus_autoVar_list = []
xi_GF_minus_autoVar_list = []
for i in range(N_bins):
# Get number of pairs in bin i
Np = np.sum(wp_bins[i])
# Get two point correlation functions and autovariances
# .. FF
print('Np =', Np)
xi_FF_p = np.sum(xi_FF_plus_bins[i])/Np
xi_FF_p_aV = (self.sigma_aF**4./(2*Np**2.))*np.sum(xi_FF_plus_autoVar_bins[i])
xi_FF_m = np.sum(xi_FF_minus_bins[i])/Np
xi_FF_m_aV = (self.sigma_aF**4./(2*Np**2.))*np.sum(xi_FF_minus_autoVar_bins[i])
xi_FF_plus_list.append(xi_FF_p)
xi_FF_minus_list.append(xi_FF_m)
xi_FF_plus_autoVar_list.append(xi_FF_p_aV)
xi_FF_minus_autoVar_list.append(xi_FF_m_aV)
# .. GG
xi_GG_p = np.sum(xi_GG_plus_bins[i])/Np
xi_GG_p_aV = (self.sigma_aG**4./(2*Np**2.))*np.sum(xi_GG_plus_autoVar_bins[i])
xi_GG_m = np.sum(xi_GG_minus_bins[i])/Np
xi_GG_m_aV = (self.sigma_aG**4./(2*Np**2.))*np.sum(xi_GG_minus_autoVar_bins[i])
xi_GG_plus_list.append(xi_GG_p)
xi_GG_minus_list.append(xi_GG_m)
xi_GG_plus_autoVar_list.append(xi_GG_p_aV)
xi_GG_minus_autoVar_list.append(xi_GG_m_aV)
# .. FG
xi_FG_p = np.sum(xi_FG_plus_bins[i])/Np
xi_FG_p_aV = (self.sigma_aF**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_FG_plus_autoVar_bins[i])
xi_FG_m = np.sum(xi_FG_minus_bins[i])/Np
xi_FG_m_aV = (self.sigma_aF**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_FG_minus_autoVar_bins[i])
xi_FG_plus_list.append(xi_FG_p)
xi_FG_minus_list.append(xi_FG_m)
xi_FG_plus_autoVar_list.append(xi_FG_p_aV)
xi_FG_minus_autoVar_list.append(xi_FG_m_aV)
# .. GF
xi_GF_p = np.sum(xi_GF_plus_bins[i])/Np
xi_GF_p_aV = (self.sigma_aF**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_GF_plus_autoVar_bins[i])
xi_GF_m = np.sum(xi_GF_minus_bins[i])/Np
xi_GF_m_aV = (self.sigma_aF**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_GF_minus_autoVar_bins[i])
xi_GF_plus_list.append(xi_GF_p)
xi_GF_minus_list.append(xi_GF_m)
xi_GF_plus_autoVar_list.append(xi_GF_p_aV)
xi_GF_minus_autoVar_list.append(xi_GF_m_aV)
return xi_FF_plus_list, xi_FF_plus_autoVar_list, xi_FF_minus_list, xi_FF_minus_autoVar_list, xi_GG_plus_list, xi_GG_plus_autoVar_list, xi_GG_minus_list, xi_GG_minus_autoVar_list, xi_GF_plus_list, xi_GF_plus_autoVar_list, xi_GF_minus_list, xi_GF_minus_autoVar_list
def get_shearflex_corr(self, wp_bins,
xi_gamF_plus_bins, xi_gamF_minus_bins,
xi_gamG_plus_bins, xi_gamG_minus_bins,
xi_gamF_plus_autoVar_bins, xi_gamF_minus_autoVar_bins,
xi_gamG_plus_autoVar_bins, xi_gamG_minus_autoVar_bins):
"""
Calculate the two-point correlation functions and their errors
within each bin.
"""
# Get number of bins
N_bins = len(wp_bins)
# Initiliaze arrays for two-point functions and errors
xi_gamF_plus_list = []
xi_gamF_minus_list = []
xi_gamF_plus_autoVar_list = []
xi_gamF_minus_autoVar_list = []
xi_gamG_plus_list = []
xi_gamG_minus_list = []
xi_gamG_plus_autoVar_list = []
xi_gamG_minus_autoVar_list = []
for i in range(N_bins):
# Get number of pairs in bin i
Np = np.sum(wp_bins[i])
# Get two point correlation functions and autovariances
# .. gamF
xi_gamF_p = np.sum(xi_gamF_plus_bins[i])/Np
xi_gamF_p_aV = (self.sigma_eps**2.*self.sigma_aF**2./(2*Np**2.))*np.sum(xi_gamF_plus_autoVar_bins[i])
xi_gamF_m = np.sum(xi_gamF_minus_bins[i])/Np
xi_gamF_m_aV = (self.sigma_eps**2.*self.sigma_aF**2./(2*Np**2.))*np.sum(xi_gamF_minus_autoVar_bins[i])
xi_gamF_plus_list.append(xi_gamF_p)
xi_gamF_minus_list.append(xi_gamF_m)
xi_gamF_plus_autoVar_list.append(xi_gamF_p_aV)
xi_gamF_minus_autoVar_list.append(xi_gamF_m_aV)
# .. Fgam
xi_Fgam_p = np.sum(xi_Fgam_plus_bins[i])/Np
xi_Fgam_p_aV = (self.sigma_eps**2.*self.sigma_aF**2./(2*Np**2.))*np.sum(xi_Fgam_plus_autoVar_bins[i])
xi_Fgam_m = np.sum(xi_Fgam_minus_bins[i])/Np
xi_Fgam_m_aV = (self.sigma_eps**2.*self.sigma_aF**2./(2*Np**2.))*np.sum(xi_Fgam_minus_autoVar_bins[i])
xi_Fgam_plus_list.append(xi_Fgam_p)
xi_Fgam_minus_list.append(xi_Fgam_m)
xi_Fgam_plus_autoVar_list.append(xi_Fgam_p_aV)
xi_Fgam_minus_autoVar_list.append(xi_Fgam_m_aV)
# .. Ggam
xi_Ggam_p = np.sum(xi_Ggam_plus_bins[i])/Np
xi_Ggam_p_aV = (self.sigma_eps**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_Ggam_plus_autoVar_bins[i])
xi_Ggam_m = np.sum(xi_Ggam_minus_bins[i])/Np
xi_Ggam_m_aV = (self.sigma_eps**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_Ggam_minus_autoVar_bins[i])
xi_Ggam_plus_list.append(xi_Ggam_p)
xi_Ggam_minus_list.append(xi_Ggam_m)
xi_Ggam_plus_autoVar_list.append(xi_Ggam_p_aV)
xi_Ggam_minus_autoVar_list.append(xi_Ggam_m_aV)
# .. gamG
xi_gamG_p = np.sum(xi_gamG_plus_bins[i])/Np
xi_gamG_p_aV = (self.sigma_eps**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_gamG_plus_autoVar_bins[i])
xi_gamG_m = np.sum(xi_gamG_minus_bins[i])/Np
xi_gamG_m_aV = (self.sigma_eps**2.*self.sigma_aG**2./(2*Np**2.))*np.sum(xi_gamG_minus_autoVar_bins[i])
xi_gamG_plus_list.append(xi_gamG_p)
xi_gamG_minus_list.append(xi_gamG_m)
xi_gamG_plus_autoVar_list.append(xi_gamG_p_aV)
xi_gamG_minus_autoVar_list.append(xi_gamG_m_aV)
return xi_gamF_plus_list, xi_gamF_plus_autoVar_list, xi_gamF_minus_list, xi_gamF_minus_autoVar_list, xi_Fgam_plus_list, xi_Fgam_plus_autoVar_list, xi_Fgam_minus_list, xi_Fgam_minus_autoVar_list, xi_Ggam_plus_list, xi_Ggam_plus_autoVar_list, xi_Ggam_minus_list, xi_Ggam_minus_autoVar_list, xi_gamG_plus_autoVar_list, xi_gamG_minus_list, xi_gamG_minus_autoVar_list
def theta_ij(self, xi,yi,xj,yj):
"""Get the magnitude of the angular separation vector,
varTheta_ij = |varTheta_j - varTheta_i|,
between two galaxies i and j (in arcsec)
"""
varTheta = np.sqrt((xj-xi)**2.+(yj-yi)**2.)
return varTheta
def varPhi_ij(self, xi,yi,xj,yj):
"""Get the polar angle of the angular separation vector,
between two galaxies i and j (in radians)
"""
varPhi = np.arctan2((yj-yi), (xj-xi))
return varPhi
def F1_rot(self, F1, F2, varPhi):
"""Get the rotated 1-component of the F-flexion, defined as
F1_rot = -Re(Fe^(-i*varPhi)),
where F = F1 + iF2, and phi is the polar angle of the separation vector theta
"""
F1_r = -F1*np.cos(varPhi) - F2*np.sin(varPhi)
return F1_r
def F2_rot(self, F1, F2, varPhi):
"""Get the rotated 2-component of the F-flexion, defined as
F2_rot = -Im(Fe^(-i*varPhi)),
where F = F1 + iF2, and phi is the polar angle of the separation vector theta
"""
F2_r = -F2*np.cos(varPhi) + F1*np.sin(varPhi)
return F2_r
def G1_rot(self, G1, G2, varPhi):
"""Get the rotated 1-component of the G-flexion, defined as
G1_rot = -Re(Fe^(-i*3*varPhi)),
where G = G1 + iG2, and phi is the polar angle of the separation vector theta
"""
G1_r = -G1*np.cos(3*varPhi) - G2*np.sin(3*varPhi)
return G1_r
def G2_rot(self, G1, G2, varPhi):
"""Get the rotated 2-component of the G-flexion, defined as
G2_rot = -Im(Fe^(-i*3*varPhi)),
where G = G1 + iG2, and phi is the polar angle of the separation vector theta
"""
G2_r = -G2*np.cos(3*varPhi) + G1*np.sin(3*varPhi)
return G2_r
def eps1_rot(self, eps1, eps2, varPhi):
"""Get the rotated 1-component of the ellipticity, defined as
eps1_rot = -Re(eps*e^(-2i*varPhi)),
where eps = eps1 + ieps2, and phi is the polar angle of the separation vector theta
"""
eps1_r = -eps1*np.cos(2*varPhi) - eps2*np.sin(2*varPhi)
return eps1_r
def eps2_rot(self, eps1, eps2, varPhi):
"""Get the rotated 2-component of the ellipticity, defined as
eps_x = -Im(eps*e^(-2i*varPhi)),
where eps = eps1 + ieps2, and phi is the polar angle of the separation vector theta
"""
eps2_r = -eps2*np.cos(2*varPhi) + eps1*np.sin(2*varPhi)
return eps2_r
def get_intrinsic_scatter(self):
"""Get the intrinsic flexion and intrinsic ellipticity, to be used in the
covariance/autovariance calculation.
"""
# Intrinsic first flexion
F = np.sqrt(self.F1_list**2. + self.F2_list**2.)
sigma_aF = np.sqrt(np.mean((self.a_list*F*self.w_list)**2.))
# Intrinsic second flexion
G = np.sqrt(self.G1_list**2. + self.G2_list**2.)
sigma_aG = np.sqrt(np.mean((self.a_list*G*self.w_list)**2.))
# Intrinsic ellipticity
eps = np.sqrt(self.eps1_list**2. + self.eps2_list**2.)
sigma_eps = np.sqrt(np.mean((eps*self.w_list)**2.))
return sigma_aF, sigma_aG, sigma_eps
| 40,759 | 49.197044 | 399 |
py
|
F-SHARP
|
F-SHARP-main/coflex_twopoint.py
|
import numpy as np
import pandas as pd
from classy import Class
import pickle
import sys,os
import astropy
from astropy.cosmology import Planck15
from astropy import units as u
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
from scipy import interpolate
from scipy import integrate
from scipy import special
from scipy.signal import argrelextrema
class CoflexTwopoint:
def __init__(self, coflex_power, survey, bin_combo):
self.l_list = coflex_power['ell']
self.P_F_list = coflex_power['P_F']
self.P_kappa_F_list = coflex_power['P_kappa_F']
self.survey = str(survey)
self.bin_combo = str(bin_combo)
def getTwoPoint(self):
# First, interpolate all arrays so that they can be turned into callable functions
self.interpolateArrays()
# .. Get two point correlation functions
# .. .. F-F autocorrelation
xi_FF_plus = self.two_point_corr_flexflex(theta_flexflex_list, 'FF_plus')
xi_FF_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'FF_minus')
# .. .. F-G cross-correlation. Note: xi_FG_plus = -xi_FF_minus
xi_FG_plus = [-xi_FF_minus[i] for i in range(len(xi_FF_minus))]
xi_FG_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'FG_minus')
# .. .. G-G cross correlation. Note: xi_GG_plus = xi_FF_plus
xi_GG_plus = xi_FF_plus
xi_GG_minus = self.two_point_corr_flexflex(theta_flexflex_list, 'GG_minus')
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_FF_plus', 'xi_FF_minus', 'xi_FG_plus', 'xi_FG_minus', 'xi_GG_plus', 'xi_GG_minus']
arrs = [theta_flexflex_list, xi_FF_plus, xi_FF_minus, xi_FG_plus, xi_FG_minus, xi_GG_plus, xi_GG_minus]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/flexion-flexion_two_point_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
# Shear-flexion correlations:
# .. Get theta_list
theta_shearflex_list = self.theta_shearflex_list()
# .. Get two point correlation functions
# .. .. gam-F cross-correlation
xi_gamF_plus = self.two_point_corr_shearflex(theta_shearflex_list, 'gamF_plus')
xi_gamF_minus = self.two_point_corr_shearflex(theta_shearflex_list, 'gamF_minus')
# .. .. G-gam cross-correlation. Note: xi_Ggam_plus = xi_gamF_minus
xi_Ggam_plus = xi_gamF_plus
xi_Ggam_minus = self.two_point_corr_shearflex(theta_shearflex_list, 'Ggam_minus')
# .. Export flexion-flexion correlation functions to .pkl file
col_list = ['theta', 'xi_gamF_plus', 'xi_gamF_minus', 'xi_Ggam_plus', 'xi_Ggam_minus']
arrs = [theta_shearflex_list, xi_gamF_plus, xi_gamF_minus, xi_Ggam_plus, xi_Ggam_minus]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/shear-flexion_two_point_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
def theta_flexflex_list(self, theta_min=1, theta_max=100, N_theta=100):
"""
List of theta values for real-space cosmic flexion correlation functions
Input angle values are in untis of arcseconds
self, theta_min=1, theta_max=120, N_theta=100
"""
# Create logspace list of angular scale, in units of arcseconds
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
theta_max = np.log10(bin_high_list[-1])
theta_list = np.logspace(theta_min,theta_max,N_theta)
theta_list *= u.arcsec
return theta_list
def theta_shearflex_list(self, theta_min=1/60, theta_max=10., N_theta=100):
"""
List of theta values for real-space cosmic shear-flexion correlation functions
Input angle values are in untis of arcminutes
self, theta_min=0.01, theta_max=15., N_theta=100
theta_min=1/60, theta_max=50., N_theta=100
"""
# Create logspace list of angular scale, in units of arcseconds
theta_min = np.log10(theta_min)
theta_max = np.log10(theta_max)
theta_list = np.logspace(theta_min,theta_max,N_theta)
dtheta = np.log10(theta_list[1])-np.log10(theta_list[0])
bin_low_list = 10**(np.log10(theta_list)-dtheta/2)
bin_high_list = 10**(np.log10(theta_list)+dtheta/2)
theta_max = np.log10(bin_high_list[-1])
theta_list = np.logspace(theta_min,theta_max,N_theta)
theta_list *= u.arcmin
return theta_list
def interpolateArrays(self):
self.P_F_interpolate = interpolate.interp1d(self.l_list, self.P_F_list)
self.P_kappa_F_interpolate = interpolate.interp1d(self.l_list, self.P_kappa_F_list)
def P_F(self, ell):
return self.P_F_interpolate(ell)
def P_kappa_F(self, ell):
return self.P_kappa_F_interpolate(ell)
def two_point_corr_flexflex(self, theta_list, fields):
# First, convert the list of angles to radians
theta_list_rad = theta_list.to(u.rad).value
# Get parameters specific to the particular two-point correlation function.
# These include the order of the Bessel function for the Hankel transform,
# as well as the algebraic sign of the two-point correlation function.
if fields == 'FF_plus':
order = 0
sign = (+1)
elif fields == 'FF_minus':
order = 2
sign = (-1)
elif fields == 'FG_plus':
order = 2
sign = (+1)
elif fields == 'FG_minus':
order = 4
sign = (-1)
elif fields == 'GG_plus':
order = 0
sign = (+1)
elif fields == 'GG_minus':
order = 6
sign = (-1)
# Get two-point correlation function for each angular separation
xi_list_norm = []
for theta in theta_list_rad:
# Get down-sampled ell list
l_list = np.logspace(np.log10(np.min(self.l_list)), np.log10(np.max(self.l_list)), int(1e7))
# Get integrand of two-point correlation function
xi_integrand_unnorm = l_list * special.jv(order, l_list*theta)*self.P_F(l_list)
# Perform integrand renormalization.
ell_min_index = argrelextrema(xi_integrand_unnorm, np.less)[0]
ell_min = l_list[ell_min_index]
xi_integrand_min = xi_integrand_unnorm[ell_min_index]
id_min = np.where(xi_integrand_min < 0)
ell_min_1 = ell_min[id_min][0]
ell_max_index = argrelextrema(xi_integrand_unnorm, np.greater)[0]
ell_max = l_list[ell_max_index][0]
ell_max_1 = l_list[ell_max_index][1]
ell_max_2 = l_list[ell_max_index][2]
xi_integrand_norm = xi_integrand_unnorm*np.e**(-((l_list*(l_list+1))/ell_max_2**2.))
# Now we can integrate. We use Simpson's rule for fast integration
xi_integral_norm = integrate.simps(xi_integrand_norm, l_list, axis=-1)
# xi = 1/2pi times the integral, with the appropriate algebraic sign
xi_norm = sign*(1/(2*np.pi))*xi_integral_norm
xi_list_norm.append(xi_norm)
return xi_list_norm
def two_point_corr_shearflex(self, theta_list, fields):
# First, convert the list of angles to radians
theta_list_rad = theta_list.to(u.rad).value
# Get parameters specific to the particular two-point correlation function.
# These include the order of the Bessel function for the Hankel transform,
# as well as the algebraic sign of the two-point correlation function.
if fields == 'gamF_plus':
order = 1
sign = (-1)
elif fields == 'gamF_minus':
order = 3
sign = (+1)
elif fields == 'Ggam_plus':
order = 1
sign = (-1)
elif fields == 'Ggam_minus':
order = 5
sign = (-1)
# Get two-point correlation function for each angular separation
xi_list_norm = []
for theta in theta_list_rad:
# Get down-sampled ell list
l_list = np.logspace(np.log10(np.min(self.l_list)), np.log10(np.max(self.l_list)), int(1e7))
# Get integrand of two-point correlation function
xi_integrand_unnorm = l_list * special.jv(order, l_list*theta)*self.P_kappa_F(l_list)
# Perform integrand renormalization.
ell_min_index = argrelextrema(xi_integrand_unnorm, np.less)[0]
ell_min = l_list[ell_min_index]
xi_integrand_min = xi_integrand_unnorm[ell_min_index]
id_min = np.where(xi_integrand_min < 0)
ell_min_1 = ell_min[id_min][0]
ell_max_index = argrelextrema(xi_integrand_unnorm, np.greater)[0]
ell_max = l_list[ell_max_index][0]
ell_max_1 = l_list[ell_max_index][1]
ell_max_2 = l_list[ell_max_index][2]
xi_integrand_norm = xi_integrand_unnorm*np.e**(-((l_list*(l_list+1))/ell_max_2**2.))
# Now we can integrate. We use Simpson's rule for fast integration
xi_integral_norm = integrate.simps(xi_integrand_norm, l_list, axis=-1)
# xi = 1/2pi times the integral, with the appropriate algebraic sign
xi_norm = sign*(1/(2*np.pi))*xi_integral_norm
xi_list_norm.append(xi_norm)
return xi_list_norm
| 10,108 | 39.2749 | 142 |
py
|
F-SHARP
|
F-SHARP-main/coflex_power.py
|
import numpy as np
import pandas as pd
from classy import Class
import pickle
import sys,os
import astropy
from astropy.cosmology import FlatLambdaCDM
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
rc('text', usetex=True)
from scipy import interpolate
from scipy import integrate
class CoflexPower:
def __init__(self, z_list, nz_bin_i, nz_bin_j, survey, bin_combo):
self.z_list = z_list
self.Nz = len(z_list)
self.nz_bin_i = nz_bin_i
self.nz_bin_j = nz_bin_j
self.survey = str(survey)
self.bin_combo = str(bin_combo)
self.kmin = 1e-4
self.kmax_class = 1e3
self.kmax = 1e9
self.lmax = 1e11
self.l_list = np.logspace(np.log10(1),np.log10(self.lmax),100)
def getPower(self):
"""
Function to be called when running matter_power in order to generate
the nonlinear matter power spectrum. Exports to a pickle file an array of the form
k P_NL(k, z_1) P_NL(k, z_2) ... P_NL(k, z_n)
. . . .
. . . .
. . . .
"""
# First, set the cosmology
self.cosmology()
# Next, generate the nonlinear matter power spectrum using CLASS out to kmax_class
self.P_NL_class()
# Extrapolate the nonlinear matter power spectrum out to kmax
#k_P_NL = self.P_NL_extrapolated()
k_P_NL = self.P_NL_asymptotic()
# Next, interpolate all arrays so that they can be turned into callable functions
self.interpolateArrays()
# Get the cosmic flexion power spectra
P_kappa_F_list = []
P_F_list = []
for l in self.l_list:
pkapf = self.P_kappa_F(l)
pf = self.P_F(l)
P_kappa_F_list.append(pkapf)
P_F_list.append(pf)
## ..Export to a pandas dataframe
col_list = ['ell', 'P_kappa_F', 'P_F']
arrs = [self.l_list, P_kappa_F_list, P_F_list]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/power_spectra_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
# Get q(chi) lists
q_i_list = []
q_j_list = []
for comov in self.comov_list:
qi = self.q_i(comov)
qj = self.q_j(comov)
q_i_list.append(qi)
q_j_list.append(qj)
# ..Export to a pandas dataframe
col_list = ['comov', 'q_i', 'q_j']
arrs = [self.comov_list, q_i_list, q_j_list]
dat = {i:arrs[j] for i,j in zip(col_list, range(len(col_list)))}
out_frame = pd.DataFrame(data = dat, columns = col_list)
out_frame.to_pickle(self.survey+'/'+self.survey+'_Theory/lensing_efficiency_'+self.survey+'_bin_combo_'+self.bin_combo+'.pkl')
def cosmology(self):
"""
Using the Boltzmann code CLASS, generate the cosmology for
General Relativity with Planck18 parameters. We export the nonlinear
matter power spectrum up to kmax_class.
cosmology: Planck 18 TT,TE,EE+lowE+lensing (Table 2.)
"""
self.cosmo = Class()
lcdmpars = {'output': 'mPk',
'non linear': 'halofit',
'P_k_max_1/Mpc': self.kmax_class,
'z_max_pk': self.z_list[-1]+2,
'background_verbose': 1, #Info
'tau_reio': 0.0544,
'omega_cdm': 0.1200,
'sigma8': 0.8111,
'h': 0.6736,
'N_ur': 2.99-1.,
'N_ncdm': 1.,
'm_ncdm': 0.06,
'omega_b': 0.0224,
'n_s': 0.9649,
}
#'A_s': 2.204e-9,
self.cosmo.set(lcdmpars)
self.cosmo.compute()
print('Cosmology generated...')
def P_NL_class(self):
"""
The nonlinear matter power spectrum generated by the Boltzmann code CLASS.
Exports an array of the form:
k P_NL(k, z_1) P_NL(k, z_2) ... P_NL(k, z_n)
. . . .
. . . .
. . . .
"""
# First, generate a list of k values in units of h/Mpc
Nk = 1000
k_list = np.logspace(np.log10(self.kmin), np.log10(self.kmax_class), Nk)
# Generate empty 2D array for P_NL(k,z) of dimensions Nk x Nz
P_NL_list = np.zeros((Nk, self.Nz))
# Populate P_NL(k,z) array.
for z in range(self.Nz):
for k in range(Nk):
P_NL_list[k][z] = self.cosmo.pk(k_list[k], self.z_list[z])
# Combine k_list, and P_NL(k,z)
k_P_NL = np.column_stack((k_list, P_NL_list))
self.k_P_NL_class = k_P_NL
def P_NL_asymptotic(self):
"""
Take the nonlinear matter power spectrum that was generated by CLASS,
out to a k of kmax_class, and extrapolate to kmax. This is done for two reasons:
(i). We need to go out to essentially arbitrarily large k for purposes of integrating
the flexion power spectrum from k=0 to k=inf.
(ii). For renormalization purposes, we take the
matter power spectrum simply follows a power law beyond kmax_class, so we can
trivially use linear extrapolation in log space out to arbitrarily large k.
The power law is [arXiv:0901.4576]
P_NL \propto k^(ns-4)
where ns = 0.96 is the index of the primordial power spectrum
Exports an array of the form:
k P_NL(k, z_1) P_NL(k, z_2) ... P_NL(k, z_n)
. . . .
. . . .
. . . .
"""
# Define primordial power spectrum index
ns = self.cosmo.n_s()
slope = ns-4
# Get k_list and P_NL(k,z) array from class
k_list_class = self.k_P_NL_class[:,0]
P_NL_class = self.k_P_NL_class[:,1:]
N_class = len(k_list_class)
# Generate list of k values for which P_NL will be extrapolated to
Nk_ext = 1000
k_list_ext = np.logspace(np.log10(self.kmax_class+1), np.log10(self.kmax), Nk_ext)
# Generate empty 2D array for P_NL(k,z) of dimensions Nk_ext x Nz
P_NL_ext = np.zeros((Nk_ext, self.Nz))
# Combine the two k lists:
k_list = np.append(k_list_class,k_list_ext)
# Combine the two P_NL arrays:
P_NL = np.row_stack((P_NL_class,P_NL_ext))
for z in range(self.Nz):
for k in range(Nk_ext):
P_star = np.log10(P_NL[(N_class+k)-1][z]) + (slope)*(np.log10(k_list[(N_class+k)]) - np.log10(k_list[(N_class+k)-1]))
P_star = 10**(P_star)
P_NL[(N_class)+k][z] = P_star
# Combine k_list, and P_NL(k,z)
k_P_NL = np.column_stack((k_list, P_NL))
self.k_list = k_list
self.P_NL = P_NL
def z_to_comov(self):
"""
Returns comoving distance as a function of redshift
"""
Planck18 = FlatLambdaCDM(H0=100*0.674, Om0=((0.120+0.0224)/0.674**2.))
comov = Planck18.comoving_distance(self.z_list).value
return comov
def interpolateArrays(self):
"""
Interpolate all arrays in order to turn them into callable functions.
Replace redshift with comoving distance in the interpolators.
"""
# First, get conversion of redshift to comoving distance
self.comov_list = self.z_to_comov()
# Nonlinear matter power spectrum P_NL(comov,k)
self.P_NL_interpolate = interpolate.interp2d(self.comov_list, self.k_list, self.P_NL)
# n(comov) for bin_i and bin_j
self.nz_bin_i_interpolate = interpolate.interp1d(self.comov_list, self.nz_bin_i)
self.nz_bin_j_interpolate = interpolate.interp1d(self.comov_list, self.nz_bin_j)
# Scale factor:
a_list = 1/(1+self.z_list)
self.a_interpolate = interpolate.interp1d(self.comov_list, a_list)
def Pkz_NL(self, k, comov):
return self.P_NL_interpolate(comov, k)
def n_i(self, comov):
return self.nz_bin_i_interpolate(comov)
def n_j(self, comov):
return self.nz_bin_j_interpolate(comov)
def a(self, comov):
return self.a_interpolate(comov)
def q_i(self, comov):
"""
Lensing efficiency function for bin_i
"""
def integrand(comov_prime):
return self.n_i(comov_prime)*(comov_prime-comov)/comov_prime
integral = integrate.quad(integrand, comov, np.max(self.comov_list))[0]
q_i = (3/2)*self.cosmo.Omega_m()*(100*self.cosmo.h()/3e5)**2. * (comov/self.a(comov)) * integral
return q_i
def q_j(self, comov):
"""
Lensing efficiency function for bin_j
"""
def integrand(comov_prime):
return self.n_j(comov_prime)*(comov_prime-comov)/comov_prime
integral = integrate.quad(integrand, comov, np.max(self.comov_list))[0]
q_j = (3/2)*self.cosmo.Omega_m()*(100*self.cosmo.h()/3e5)**2. * (comov/self.a(comov)) * integral
return q_j
def P_F(self, ell):
"""
Cosmic flexion power spectrum as a function of angular wavenumber ell
"""
def integrand(comov):
return (self.q_i(comov)*self.q_j(comov)/comov**2.)*self.Pkz_NL((ell)/comov,comov)
integral = integrate.quad(integrand, np.min(self.comov_list), np.max(self.comov_list))
P_F = ell**2.*integral[0]
return P_F
def P_kappa_F(self, ell):
"""
Cosmic shear-flexion cross power spectrum as a function of angular wavenumber ell
"""
def integrand(comov):
return (self.q_i(comov)*self.q_j(comov)/comov**2.)*self.Pkz_NL((ell)/comov,comov)
integral = integrate.quad(integrand, np.min(self.comov_list), np.max(self.comov_list))
P_kap_F = ell*integral[0]
return P_kap_F
| 10,490 | 37.149091 | 134 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/SCLP.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from subroutines.SCLP_solution import SCLP_solution
from subroutines.SCLP_formulation import SCLP_formulation
from subroutines.SCLP_solver import SCLP_solver
from subroutines.utils import relative_to_project
class SCLP_settings():
def __init__(self, find_alt_line =True, tmp_path=None, file_name = None, memory_management= True, hot_start =False,
save_solution = False, check_final_solution=True, check_intermediate_solution=False, suppress_printing = False,
rewind_max_delta = 1, collect_plot_data=False, max_iteration = None):
self.find_alt_line = find_alt_line
self.hot_start = hot_start
self.file_name = file_name
self.save_solution = save_solution
self.check_final_solution = check_final_solution
self.check_intermediate_solution = check_intermediate_solution
self.memory_management = memory_management
self.max_iteration = max_iteration
self.suppress_printing = suppress_printing
self.rewind_max_delta = rewind_max_delta
self.collect_plot_data = collect_plot_data
if tmp_path is None:
self.tmp_path = relative_to_project('')
elif os.path.isdir(tmp_path):
self.tmp_path = tmp_path
else:
self.tmp_path = relative_to_project('')
#'#@profile
def SCLP(G, H, F, a, b, c, d, alpha, gamma, TT, settings = SCLP_settings(), tolerance=1E-11):
#
# solves the separated continuous linear program:
#
# Find x,uSCLP1
# max int_0^T (gamma + (T-t) c) u(t) + d x(t) dt
#
# s.t. int_0^t G u(s) ds + F x(t) ? alpha + a t
# H u(t) ? b
# u,x ? 0, 0 < t < T.
#
# and the dual separated continuous linear program:
#
# Find q,p
# min int_0^T (alpha + (T-t) a)' p(t) + b' q(t) dt
#
# s.t. int_0^t G' p(s) ds + H' q(t) ? gamma' + c' t
# F' p(t) ? d'
# p,q ? 0, 0 < t < T.
#
#
# Input parameters: G is KxJ array
# H is IxJ array
# F is KxL array
# alpha,a are K column vectors
# b is I column vector
# gamma,c is J row vector
# d is L row vector
# TT scalar, time horizon
# if TT = Inf problem is solved for all ranges of T
#
# MESSAGELEVEL = messagelevel
# GRAPHLEVEL = graphlevel
#
# TOL1 = tol1 numerical precision value rounde to zero
# TOL2 = tol2 numerical precision value for comparisons
#
# taxis,xaxis,qaxis axes lengths for plots (Inf allowed)
#
#
# Output parameters: t vector of the N+1 breakpoints
# x (K+L)x(N+1) array
# q (J+I)x(N+1) array
# u (J+I)xN array
# p (K+L)xN array
# firstbase The first basis in the solution base sequence
# lastbase The first basis in the solution base sequence
# pivots The sequence of pivots in the solution base sequence
# Obj scalar, objective value
# Err scalar, objective error |primal-dual|
# inters counts the number of intervals in the final solution
# pivots counts the number of pivots
# flopss counts the number of floating point operations
# ecpu elapsed CPU time
#
# To create movie re-run with values of:
#
# taxis time range
# xaxis x range
# qaxis q range
#
formulation = SCLP_formulation(G, F, H, a, b, c, d, alpha, gamma, TT)
if not settings.hot_start:
# Initiate top level problem, by obtaining the boundary and first dictionary
# default constructor creates main parametric line
param_line = formulation.get_parametric_line(tolerance)
# calculate initial basis
solution = SCLP_solution(formulation, param_line.x_0, param_line.q_N, tolerance, settings)
# building Kset0 and JsetN
param_line.build_boundary_sets(solution.klist, solution.jlist)
else:
import pickle
print('Loading solution!')
if settings.file_name is not None:
solution_file_name = settings.tmp_path + '/' + settings.file_name + '_solution.dat'
line_file_name = settings.tmp_path + '/' + settings.file_name + '_param_line.dat'
else:
solution_file_name = settings.tmp_path + '/solution.dat'
line_file_name = settings.tmp_path + '/param_line.dat'
try:
solution = pickle.load(open(solution_file_name, 'rb'))
param_line = pickle.load(open(line_file_name,'rb'))
except IOError:
raise Exception('Solution files not found in: ' + settings.tmp_path)
param_line.theta_bar = TT
if settings.memory_management:
from subroutines.memory_manager import memory_manager
mm = memory_manager(formulation.K, formulation.J + formulation.L, formulation.I)
else:
mm = None
# Solve the problem, by a sequence of parametric steps
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, param_line, 'toplevel',
0, 0, dict(), settings, tolerance, settings.find_alt_line, mm)
# extract solution for output
is_ok = solution.update_state(param_line, check_state=settings.check_final_solution, tolerance = tolerance *10)
if pivot_problem['result'] > 0 or settings.save_solution or not is_ok:
print('Saving solution!')
solution.prepare_to_save()
import pickle
if settings.file_name is None:
file_name = settings.tmp_path + '/SCLP'
else:
file_name = settings.file_name
solution_file_name = file_name + '_' + str(STEPCOUNT) +'_solution.dat'
line_file_name = file_name + '_' + str(STEPCOUNT) + '_param_line.dat'
pickle.dump(solution, open(solution_file_name, 'wb'))
pickle.dump(param_line, open(line_file_name, 'wb'))
return solution, STEPCOUNT, param_line, pivot_problem['result']
| 6,369 | 39.833333 | 128 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/doe_utils.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
default_translation_table = {'sum_rate': 'sr', 'gdist':'gd', 'gdist_params':'gdp', 'h_rate': 'hr', 'hdist': 'hd', 'hdist_params' :'hdp',
'alpha_rate': 'alpr', 'alpha_dist': 'alpd', 'alpha_dist_params': 'alpdp', 'a_rate': 'ar', 'a_dist' : 'ad',
'a_dist_params': 'adp', 'cost_scale': 'cs', 'cost_dist': 'cd', 'cost_dist_params': 'cdp', 'gamma_rate': 'gmr',
'gamma_dist' : 'gmd', 'gamma_dist_params': 'gmdp', 'c_scale': 'ccs', 'c_dist' : 'ccd', 'c_dist_params': 'ccdp'}
def reverse_translation_table(table):
result = dict()
for k,v in table.items():
result[v] = k
class path_utils:
def __init__(self, home_path):
if home_path is None:
self.home_path = os.path.expanduser('~/Box/SCLP comparison/data')
else:
self.home_path = home_path
cplex_data_path = os.path.join(home_path,'CPLEX')
if not os.path.exists(cplex_data_path):
os.makedirs(cplex_data_path)
def get_experiment_type_path(self, exp_type):
return self.home_path + '/' + exp_type
def get_experiment_path_old(self, exp_type, K, I, seed):
return self.get_experiment_type_path(exp_type) + '/K'+str(K)+'/I' + str(I)+ '/seed' + str(seed)
def get_experiment_path(self, exp_type, **kwargs):
path = self.get_experiment_type_path(exp_type)
if kwargs is not None:
for k,v in kwargs.items():
path += '/' + str(k) + str(v)
return path
def get_CPLEX_data_file_name(self, exp_type, translation_table = None, **kwargs):
path = os.path.join(self.home_path,'CPLEX', exp_type + '_')
if kwargs is not None:
kwargs = self.translate_param_names(translation_table, **kwargs)
for k,v in kwargs.items():
path += str(k) + str(v) + '_'
return path + 'data.dat'
def get_tmp_data_file_name(self, exp_type, translation_table = None, **kwargs):
path = self.home_path + '/tmp/' + exp_type + '_'
if kwargs is not None:
kwargs = self.translate_param_names(translation_table, **kwargs)
for k,v in kwargs.items():
path += str(k) + str(v) + '_'
return path
def get_CPLEX_data_path(self):
return os.path.join(self.home_path,'CPLEX')
def translate_param_names(self, translation_table=None, **kwargs):
if translation_table is None:
translation_table = default_translation_table
result = dict()
if kwargs is not None:
for k,v in kwargs.items():
if k in translation_table.keys():
result[translation_table[k]] = v
else:
result[k] = v
return result
| 3,396 | 39.927711 | 140 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/results_producer.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import csv
def combine_results(python_results, cplex_results, discr, dual=False, xobj=False):
d = 'd' if dual else ''
if xobj:
prefix = 'cplex_x_'+d+str(discr)
else:
prefix = 'cplex_' + d + str(discr)
for pres in python_results:
for cres in cplex_results:
if cres['file'] == pres['file']:
if xobj:
pres[prefix+'_objective'] = pres['buffer_cost'] - cres['objective']
pres[prefix+ '_real_objective'] = cres['objective']
else:
pres[prefix+'_objective'] = cres['objective']
pres[prefix + '_real_objective'] = pres['buffer_cost'] - cres['objective']
pres[prefix+'_time'] = cres['time']
if xobj:
optimality_gap = cres['objective'] - pres['real_objective']
elif dual:
optimality_gap = cres['objective'] - pres['objective']
else:
optimality_gap = pres['objective'] - cres['objective']
pres[prefix + '_relative_objective'] = optimality_gap/abs(pres['objective'])
pres[prefix + '_real_relative_objective'] = optimality_gap / abs(pres['real_objective'])
pres[prefix + '_relative_time'] = cres['time'] / pres['time']
if dual:
pres['cplex_' + str(discr) + '_duality_gap'] = cres['objective'] - pres['cplex_'+str(discr)+'_objective']
pres['cplex_' + str(discr) + '_relative_gap'] = (cres['objective'] - pres['cplex_'+str(discr)+'_objective']) / pres['objective']
return python_results
def add_raw_tau(results, raw_tau):
for pres in results:
for cres in raw_tau:
if cres['file'] == pres['file']:
pres['raw_tau'] = str(cres['raw_tau'].tolist())[1:-1]
return results
def write_results_to_csv(results, res_file, overwrite=False, raw_tau = None):
if raw_tau is not None:
results = add_raw_tau(results, raw_tau)
if os.path.isfile(res_file) and not overwrite:
csvfile = open(res_file, "a", newline='')
reswriter = csv.writer(csvfile)
else:
csvfile = open(res_file, "w", newline='')
reswriter = csv.writer(csvfile)
reswriter.writerow(results[0].keys())
for res in results:
reswriter.writerow(res.values())
csvfile.close()
def read_results_from_csv(res_file):
with open(res_file, newline='') as csvfile:
reader = csv.DictReader(csvfile)
results = [row for row in reader]
return results
| 3,197 | 40 | 148 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/doe.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import numpy as np
from functools import partial
from math import sin, pi
from .data_generators.MCQN import generate_MCQN_data, perturb_MCQN_data
from .data_generators.reentrant import generate_reentrant_data
from .data_generators.write_CPLEX_dat import write_CPLEX_dat
from .data_generators.simple_reentrant import generate_simple_reentrant_data
from .doe_utils import path_utils
from ..SCLP import SCLP, SCLP_settings
def sin_uncertainty(h: float, height: float, width: float, amps: list, freqs: list, shifts: list, t: float):
"""Generate a sine-based continuous uncertainty function of time, centered around h
and evaluate it at t.
Parameters
----------
h: float
The true point.
height:
The total height of the uncertainty.
width: float
The width of the time interval.
amps: list
The amplitudes of each sine function.
freqs: list
The frequencies of each sine function.
shifts: list
The amount to shift the sin functions.
t: float
The time to
Returns
-------
float
The random function evaluated at t
"""
k = len(amps)
if k != len(freqs):
raise RuntimeError("amps and freqs parameters must have same length")
if k != len(shifts):
raise RuntimeError("amps and shifts parameters must have same length")
return h * (1 + 0.5*height) + 0.5 * sum([amps[i] * sin(freqs[i] * pi * t / width + shifts[i]) for i in range(k)])
def sin_uncertainty_low(h: float, height: float, width: float, amps: list, freqs: list, shifts: list, t: float):
"""Generate a sine-based continuous uncertainty function of time, centered around h
and evaluate it at t.
Parameters
----------
h: float
The true point.
height:
The total height of the uncertainty.
width: float
The width of the time interval.
amps: list
The amplitudes of each sine function.
freqs: list
The frequencies of each sine function.
shifts: list
The amount to shift the sin functions.
t: float
The time to
Returns
-------
float
The random function evaluated at t
"""
k = len(amps)
if k != len(freqs):
raise RuntimeError("amps and freqs parameters must have same length")
if k != len(shifts):
raise RuntimeError("amps and shifts parameters must have same length")
return h + 0.5 * sum([amps[i] * sin(freqs[i] * pi * t / width + shifts[i]) for i in range(k)])
def gen_uncertain_param(params: np.ndarray, domain: tuple, perturbation: tuple, budget: tuple = None,
uncertain_func = sin_uncertainty, k: int = 4, seed: int = None) -> np.ndarray:
"""Generate functions for producing the "uncertain" values of parameters.
This function takes a vector/matrix of parameters and
generates corresponding continuous functions that are
a random distance away from the original parameters.
The functions each take the time value from the domain
parameter and produce an output in the range.
The result will be an np.ndarray of such functions.
The shape of the resulting array is the same as
the shape of the input parameter.
Parameters
----------
params : np.ndarray of numbers
The parameters which will be randomized over time. For each 0 value, the 0 function will be generated.
domain : tuple of int
The time domain of the functions
<<<<<<< HEAD
codomain : tuple of int
The output range of the functions
seed: int
Random number generator seed. Defaults to 1.
=======
perturbation : tuple of numbers
The relative amount to perturb the output range of the functions.
For example, (0, 0.1) will perturb the parameters 10% on the upside.
budget : tuple of numbers
The maximum total uncertainty per row of parameters at any time t.
Default is None.
uncertain_func: function
Function that generates uncertainty functions of time.
Must accept parameters h, height, width, amps, freqs, shifts, t.
Default is sin_uncertainty(h, height, width, amps, freqs, shifts, t).
k: int
The number of sine wave perturbations. Default is 4.
seed: int
Random number generator seed or None (default).
Returns
-------
np.ndarray
Functions from the domain to the range,
randomly perturbed from the input.
"""
if seed:
np.random.seed(seed)
shape = params.shape
if len(shape) > 1:
J = shape[1]
else:
J = shape[0]
left, right = domain
width = right - left
result = np.empty(shape, dtype=object)
perturb_low, perturb_high = perturbation
height = perturb_high - perturb_low
for index, h in np.ndenumerate(params):
if h == 0:
result[index] = lambda t: 0
else:
result[index] = partial(uncertain_func, h, height, width, [h*height/k] * k, range(1,k+1), np.random.uniform(0, 2*pi, k))
if budget is None:
return result
else:
param_rows = params.sum(axis=1)
b_result = np.empty(shape, dtype=object)
def excess(i, t):
r = []
for j in range(J):
f = result[i, j]
r.append((j, (f(t) - params[i, j])))
return sorted(r, key=lambda x: x[1]/params[i, j], reverse=True)
def budget_row(i, j, t):
excess_row = partial(excess, i)
total = sum([x for i, x in excess_row(t)])
for jj, diff in excess_row(t):
f = result[i, jj]
if total > 0 and diff > budget[i] * params[i, j]:
amt = (diff - budget[i] * params[i, j])
amt = min(total, amt)
total -= amt
else:
amt = 0
if j == jj:
return f(t) - amt
for i, p in np.ndenumerate(params):
b_result[i] = partial(budget_row, i[0], i[1])
return b_result
def run_experiment_series(exp_type, exp_num, K, I, T, settings, starting_seed = 1000, solver_settings = None,
use_adaptive_T = False, get_raw_tau = True, **kwargs):
failure_trials = 0
ps = {'K':K,'I':I,'T':T}
for k, v in kwargs.items():
ps[k] = v
for k,v in settings.items():
if isinstance(v, object) and hasattr(v, '__name__'):
ps[k] = v.__name__[:4]
else:
ps[k] = str(v)
#pu = path_utils(os.path.expanduser('~/Box/SCLP comparison/data'))
pu = path_utils("C:/DataD/SCLP_data")
results = []
files = []
if get_raw_tau:
raw_tau = []
else:
raw_tau = None
for seed in range(starting_seed, starting_seed + exp_num):
ps['seed'] = seed
if exp_type == 'MCQN':
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_MCQN_data(seed, K, I, **settings)
elif exp_type == 'reentrant':
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_reentrant_data(seed, K, I, **settings)
elif exp_type == 'simple_reentrant':
G, H, F, gamma, c, d, alpha, a, b, TT, total_buffer_cost, buffer_cost = generate_simple_reentrant_data(seed, K, I, **settings)
else:
raise Exception('Undefined experiment type!')
if T is None:
if TT is None:
raise Exception('Undefined time horizon!')
else:
T = TT
if solver_settings is None:
solver_settings = SCLP_settings(find_alt_line=False)
solver_settings.file_name = pu.get_tmp_data_file_name(exp_type)
import time
start_time = time.time()
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, T, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT = solution.get_final_solution(False)
time_to_solve = time.time() - start_time
print(obj, err, solution.last_T, maxT)
print("--- %s seconds ---" % time_to_solve)
print("--- seed %s ---" % seed)
Tres = param_line.T
if res == 0 or use_adaptive_T:
if res != 0:
ps['T'] = 'adpt'
else:
ps['T'] = T
full_file_name = pu.get_CPLEX_data_file_name(exp_type, **ps)
write_CPLEX_dat(full_file_name, Tres, G, H, alpha, a, b, gamma, c, buffer_cost)
path, filename = os.path.split(full_file_name)
buf_cost = total_buffer_cost[0]*Tres+total_buffer_cost[1]*Tres*Tres/2.0
r = {'file': filename, 'seed': seed, 'result': res, 'objective': obj, 'time': time_to_solve,'steps': STEPCOUNT,
'intervals': NN, 'T': Tres, 'maxT': maxT, 'mean_tau': np.mean(tau), 'max_tau': np.max(tau), 'min_tau':np.min(tau),
'std_tau':np.std(tau), 'buffer_cost': buf_cost, 'real_objective':buf_cost - obj}
results.append(r)
if get_raw_tau:
raw_tau.append({'file': filename,'raw_tau':tau})
files.append(full_file_name)
else:
failure_trials +=1
return results, failure_trials, files, raw_tau
def run_experiment_perturbation(exp_type, exp_num, K, I, T, settings, rel_perturbation, symmetric, starting_seed = 1000, solver_settings = SCLP_settings(),
use_adaptive_T = False, get_raw_tau = True, **kwargs):
num_feasible = 0
true_objective = float("inf")
perturbed_obj_vals = list()
# 1. generate the "true" MCQN data
G0, H0, F0, gamma0, c0, d0, alpha0, a0, b0, TT0, buffer_cost0, xcost0 = generate_MCQN_data(starting_seed, K, I, **settings)
# 2. Solve using SCLP
solution0, STEPCOUNT0, param_line0, res0 = SCLP(G0, H0, F0, a0, b0, c0, d0, alpha0, gamma0, T, solver_settings)
t, x, q, u, p, pivots, obj, err, NN, tau, maxT= solution0.get_final_solution()
if True:
true_objective = obj
for seed in range(starting_seed+1, starting_seed + 1 + exp_num):
print("\n\n\n==============")
print(seed)
print("==============\n\n\n")
# 3. Perturb the MCQN data
G, H, F, a, b, c, d, alpha, gamma = perturb_MCQN_data(seed, rel_perturbation, symmetric, G0, H0, F0, a0, b0, c0, d0, alpha0, gamma0)
# 4. Solve the SCLP with perturbed MCQN
solution, STEPCOUNT, param_line, res = SCLP(G, H, F, a, b, c, d, alpha, gamma, T, solver_settings)
# 5. Test
# a. Check if the "true" values are feasible under perturbation
is_feasible = solution.is_other_feasible(solution0)
num_feasible += int(is_feasible)
# b. Assuming feasibility, get the objective value of the "true" solution under the perturbation
if is_feasible:
perturbed_obj_vals.append(solution.other_objective(solution0))
return num_feasible, true_objective, perturbed_obj_vals
def run_experiment_randomized():
return 0
| 11,653 | 38.239057 | 155 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/write_CPLEX_dat.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from scipy.io import mmwrite
from scipy.sparse import csr_matrix
import numpy as np
import os
def write_CPLEX_dat(file_name, T, G, H, alpha, a, b, gamma, c, buf_cost):
f1 = file_name + 'g.mtx'
mmwrite(f1, csr_matrix(G))
f2 = file_name + 'h.mtx'
mmwrite(f2, csr_matrix(H))
fout = open(file_name, 'w')
fout.write('T = {:f};\n'.format(T))
fout.write('VarDimension = {:d};\n'.format(G.shape[1]))
fout.write('ConstrDimensionH = {:d};\n'.format(H.shape[0]))
fout.write('ConstrDimensionG = {:d};\n'.format(G.shape[0]))
fout.write('G = {\n')
with open(f1,'r') as fp:
prev_line = fp.readline()
line = prev_line
while line:
if prev_line[0] != '%':
fout.write('<'+ line[:-1] +'>\n')
prev_line = line
line = fp.readline()
fp.close()
os.remove(f1)
fout.write( '};\n\n')
fout.write('H = {\n')
with open(f2, 'r') as fp:
prev_line = fp.readline()
line = prev_line
while line:
if prev_line[0] != '%':
fout.write('<' + line[:-1] + '>\n')
prev_line = line
line = fp.readline()
fp.close()
os.remove(f2)
fout.write('};\n\n')
fout.write('alpha = {\n')
for i in np.nonzero(alpha)[0]:
fout.write('<{:d} {:d} {:.16f}>\n'.format(i+1,1,alpha[i]))
fout.write('};\n\n')
fout.write('a = {\n')
for i in np.nonzero(a)[0]:
fout.write('<{:d} {:d} {:.16f}>\n'.format(i+1, 1, a[i]))
fout.write('};\n\n')
fout.write('b = {\n')
for i in np.nonzero(b)[0]:
fout.write('<{:d} {:d} {:.16f}>\n'.format(i+1, 1, b[i]))
fout.write('};\n\n')
fout.write('buf_cost = {\n')
for i in np.nonzero(buf_cost)[0]:
fout.write('<{:d} {:d} {:.16f}>\n'.format(i + 1, 1, buf_cost[i]))
fout.write('};\n\n')
fout.write('c = {\n')
for i in np.nonzero(c)[0]:
fout.write('<{:d} {:d} {:.16f}>\n'.format(i+1, 1, c[i]))
fout.write('};\n\n')
fout.write('gamma = {\n')
for i in np.nonzero(gamma)[0]:
fout.write('<{:d} {:d} {:.16f}>\n'.format(i+1, 1, gamma[i]))
fout.write('};\n\n')
fout.close()
| 2,761 | 33.962025 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/reentrant.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# General Reentrant line:
# goes through machines in some random order.
# holding costs are geneal random.
# More initial fluid in buffer 1,
# random initial amounts in all the others buffers.
# We generate random problem instances as follows:
# system dynamics are:
# Fluid flows from step 1 to 2 ... to K and out.
# a(1) = 100.
# initial buffer levels are a(k)= ~U(0,40), k=2,...,K
# average processing times of step k are m(k)= ~U(0,3)
# average input rates are alpha(k)= ~U(0,0.01)
# steps k at machines i(k), k=1,....
# M(i,k) = m(k) if step k on machine i, 0 else.
# resource limits: b(i)=1, with sum_k M(i,k)*u(k) <= b(i).
# holding costs rates for fluid in buffer k:
# cost(k) = ~U(0,2) .
def generate_reentrant_data(seed, K, I, h_rate = 0.3, hdist = np.random.rand, hdist_params = (), first_alpha = 100, alpha_rate = 40,
alpha_dist = np.random.rand, alpha_dist_params = (), a_rate = 0.01, a_dist = np.random.rand, a_dist_params = (),
cost_scale = 2, cost_dist = np.random.rand, cost_dist_params = (), gamma_rate=0,
gamma_dist=np.random.rand, gamma_dist_params=(), c_scale = 0, c_dist = np.random.rand, c_dist_params = ()):
np.random.seed(seed)
b = np.ones(I)
G = np.eye(K) - np.diag(np.ones(K - 1), -1)
# construct random machine constituency matrix
cols = np.arange(K)
np.random.shuffle(cols)
H = np.zeros((I, K))
rows = np.concatenate((np.arange(I), np.random.choice(I, K-I, True)))
H[rows, cols] = h_rate * hdist(*hdist_params, K)
# initial fluid
alpha = alpha_rate * alpha_dist(*alpha_dist_params, K)
alpha[0] = first_alpha
# exogenous input rate
a = a_rate * a_dist(*a_dist_params, K)
F = np.empty((K, 0))
d = np.empty(0)
if gamma_rate == 0:
gamma = np.zeros(K)
else:
gamma = gamma_rate * gamma_dist(*gamma_dist_params, K)
if c_scale != 0:
c = c_scale * c_dist(*c_dist_params, K) * np.random.choice([-1, 1], K, True)
else:
c = np.zeros(K)
cost = None
if cost_scale != 0:
cost = cost_scale * cost_dist(*cost_dist_params, K)
# this produce negative and positive costs!
c += np.matmul(cost, G)
total_buffer_cost = (np.inner(cost, alpha), np.inner(cost, a))
else:
total_buffer_cost = (0, 0)
return G, H, F, gamma, c, d, alpha, a, b, None, total_buffer_cost, cost
| 3,035 | 37.43038 | 140 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/data_loader.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def load_data(path):
G = np.load(path + '/G.dat', allow_pickle=True)
F = np.load(path + '/F.dat', allow_pickle=True)
H = np.load(path + '/H.dat', allow_pickle=True)
a = np.hstack(np.load(path + '/a.dat', allow_pickle=True))
b = np.hstack(np.load(path + '/b.dat', allow_pickle=True))
c = np.hstack(np.load(path + '/c.dat', allow_pickle=True))
d = np.load(path + '/d.dat', allow_pickle=True)
if np.size(d) ==0:
d = np.empty(shape=(0))
if np.size(F) ==0:
F = np.empty(shape=(G.shape[0], 0))
alpha = np.hstack(np.load(path + '/alpha.dat', allow_pickle=True))
gamma = np.hstack(np.load(path + '/gamma.dat', allow_pickle=True))
return G, H, F, gamma, c, d, alpha, a, b, None
| 1,329 | 40.5625 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/MCQN.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
# General Multi - Class Queueing Network:
# buffers numbered k = 1, ..., K.
# Each buffer is served by one of the I machines,
# buffer k served by machine i(k).
# we generate random constituencies of the machines.
# average service time m(k) = ~U(0, 3).
# M(i, k) = m(k) if i(k) = i, 0 else.
# fluid from buffer k goes to buffer l in proportion
# p(k, l), where the these are random, and sum to < 1,
# 1 - sum_l p(k, l) leaves the system.
#
# initial buffer levels are a(k) = ~U(0, 40), k = 1, ..., K
# random small input rate alpha(k) = ~U(0, 0.01)
# random holding costs cost(k) = ~U(0, 2).
# resource limits: b(i) = 1, with sum_k M(i, k) * u(k) <= b(i).
def generate_MCQN_data(seed, K, I, nz = 0.4, sum_rate=0.8, gdist=np.random.rand, gdist_params=(), h_rate = 0.6, hdist = np.random.rand,
hdist_params = (), alpha_rate = 40, alpha_dist = np.random.rand, alpha_dist_params = (), a_rate = 0.01, a_dist
= np.random.rand, a_dist_params = (), cost_scale = 2, cost_dist = np.random.rand, cost_dist_params = (),
gamma_rate = 0, gamma_dist=np.random.rand, gamma_dist_params=(), c_scale = 0, c_dist = np.random.rand, c_dist_params = ()):
np.random.seed(seed)
b = np.ones(I)
# transition probabilities
# ~nz of them are > 0,
# they sum up to ~sum_rate so ~1-sum_rate flows out.
if gdist is np.random.rand:
P = gdist(K,K)
else:
P = gdist(*gdist_params, (K,K))
P-= (1- nz) * np.ones((K,K)) + np.eye(K)
P[P < 0] = 0
P[0, K-1] += 0.1
coeff = (1/sum_rate - 1) * 2
P+= np.diag(np.full(K-1,0.1),-1)
P /= np.outer(np.ones(K)+ coeff * np.random.rand(K), sum(P))
G = np.eye(K) - P
# construct random machine constituency matrix
cols = np.arange(K)
np.random.shuffle(cols)
H = np.zeros((I, K))
rows = np.concatenate((np.arange(I),np.random.choice(I,K-I,True)))
H[rows,cols] = h_rate * hdist(*hdist_params, K)
# initial fluid
alpha = alpha_rate * alpha_dist(*alpha_dist_params, K)
# exogenous input rate
a = a_rate * a_dist(*a_dist_params, K)
F = np.empty((K,0))
d = np.empty(0)
if gamma_rate==0:
gamma = np.zeros(K)
else:
gamma = gamma_rate * gamma_dist(*gamma_dist_params, K)
if c_scale != 0:
c = c_scale * c_dist(*c_dist_params, K) * np.random.choice([-1,1],K,True)
else:
c = np.zeros(K)
cost = None
if cost_scale != 0:
cost = cost_scale * cost_dist(*cost_dist_params, K)
#this produce negative and positive costs!
c += np.matmul(cost, G)
total_buffer_cost = (np.inner(cost, alpha),np.inner(cost, a))
else:
total_buffer_cost = (0,0)
return G,H,F,gamma,c,d,alpha,a,b,None,total_buffer_cost, cost
def perturb_MCQN_data(seed, rel_perturbation, symmetric, G0, H0, F0, a0, b0, c0, d0, alpha0, gamma0):
if seed:
np.random.seed(seed)
if symmetric:
lower, upper = -1, 1
else:
lower, upper = 0, 1
# F = 0
# H, a, b, alpha >= 0
G = G0 * (1 + rel_perturbation * np.random.uniform(lower, upper, size=G0.shape))
np.fill_diagonal(G, 1)
H = H0 * (1 - rel_perturbation * np.random.uniform(0, upper, size=H0.shape))
F = F0 * (1 + rel_perturbation * np.random.uniform(lower, upper, size=F0.shape))
a = a0 * (1 + rel_perturbation * np.random.uniform(0, upper, size=a0.shape))
b = b0 * (1 + rel_perturbation * np.random.uniform(0, upper, size=b0.shape))
c = c0 * (1 + rel_perturbation * np.random.uniform(lower, upper, size=c0.shape))
d = d0 * (1 + rel_perturbation * np.random.uniform(lower, upper, size=d0.shape))
alpha = alpha0 * (1 + rel_perturbation * np.random.uniform(0, upper, size=alpha0.shape))
gamma = gamma0 * (1 + rel_perturbation * np.random.uniform(lower, upper, size=gamma0.shape))
return G,H,F,a,b,c,d,alpha,gamma
| 4,490 | 38.743363 | 147 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/WorkloadPlacement.py
|
# Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def generate_workload_placement_data(T, I, J, R, P, a, mu, x0, r, rprime):
"""Generate a random SCLP model of the Workload Placement problem.
For now, only CPU no RAM.
Suppose I = 2, J = 3
Then K = 6
H is I x K
H = [[1, 1, 1, 0, 0, 0], [0, 0, 0, 1, 1, 1]] representing all classes on server 1 in 1st row, etc.
suppose mu = (4, 5)
G is J x K = 3x6
G = [[-4, 0, 0, -5, 0, 0], [0, -4, 0, 0, -5, 0], [0, 0, -4 ,0, 0, -5]]
Fprime is J x K = 3x6
Fprime = [[1, 0, 0, 1, 0, 0], [0, 1, 0, 0, 1, 0], [0, 0, 1, 0, 0, 1]]
F is J x (K-J) = 3x3
F = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
alpha is J vector with initial number of jobs in each class
Parameters
----------
T : numeric
Time horizon
I : int
Number of servers
J : intֹ
Number of job classes
R : np.ndarray
1D array of `int` (size I) containing the total RAM on servers
P : np.ndarray
1D array of `int` (size I) containing the total CPU rates (tasks per unit time) on servers
a : np.ndarray
1D array of `int` (size J) containing the task arrival rates by class
mu : np.ndarray
1D array of `int` (size J) containing the task service rates by class
x0 : np.ndarray
2D array of `int` (size I x J) containing the initial buffer quantity for server, class
r : np.ndarray
1D array of `int` (size J) containing the run-independent RAM requirements by class
rprime : np.ndarray
1D array of `int` (size J) containing the run-dependent RAM requirements by class
Returns
-------
A tuple with the following values:
G
H
F
gamma
c
d
alpha
a
b
T
total_buffer_cost
cost
"""
# K number of buffers / flows
K = I * J
# H is composed of 2 parts: CPU constraints and eventually plus RAM constraints
H = np.array([np.repeat([ii == i for ii in range(I)], J) for i in range(I)], dtype="int")
# b CPU per server and eventually RAM
b = P.copy()
# G Task flows between buffers
G = np.array([[jj == j for jj in range(J)] * I for j in range(J)], dtype="int") * np.repeat(mu, J) * -1
# F relates buffers belong to each server
F = np.array([[jj == j for jj in range(J)] * (I-1) for j in range(J)], dtype="int")
# alpha initial values of tasks in buffers
alpha = x0.sum(axis=0)
# gamma unit cost per unit time of CPU of server and class
gamma = np.zeros(K)
# d is cost per unit time per task per server in buffer
d1 = np.ones(J)
d2 = np.ones(K-J)
d = np.matmul(d1, F) -d2
# cost ?
cost = np.ones(J) # cost per unit time processing by class, hard-coded to 1 for now
# c is cost per unit time of the buffers on the controls for the objective function
c = np.matmul(d1, G)
# total_buffer_cost is for inverse transformation when there is no F
# not needed for this model
total_buffer_cost = (0, 0)
return G,H,F,gamma,c,d,alpha,a,b,T,total_buffer_cost,cost
def generate_workload_placement_data_new(a1, a2, b1, c1, c2, tau1, tau2, alpha1, alpha2, normalize=True):
"""
Generate workload data, new format.
This function currently works for a single server with two queues for servicing two job classes.
Parameters
----------
a1 : float
task arrival rate of class 1 in tasks per unit time
a2 : float
task arrival rate of class 2 in tasks per unit time
b1 : float
cpu limit of server 1
c1 : float
cost per unit time of buffer 1
c2 : float
cost per unit time of buffer 2
tau1 : float
time to complete task of class 1
tau2 : float
time to complete task of class 2
alpha1 : float
initial quantity of buffer 1
alpha2 : float
initial quantity of buffer 2
normalize : bool
Generate a model where the decision variable is normalized to a fraction of the total server capacity (True),
or is the actual flow rate (False).
Default True.
Returns
-------
A tuple with the following values:
G
H
F
gamma
c
d
alpha
a
b
T
total_buffer_cost
cost
"""
a = np.array((a1,a2))
b = np.array((b1,))
d = np.empty(0)
alpha = np.array((alpha1, alpha2))
gamma = np.zeros(2)
mu1, mu2 = 1.0 / tau1, 1.0 / tau2
if normalize:
G = np.diag((1.0, 1.0))
H = np.array(((tau1, tau2),))
else:
G = np.diag((mu1, mu2))
H = np.array(((1.0, 1.0),))
F = np.empty((2, 0))
cost = np.array((c1, c2))
total_buffer_cost = (np.inner(cost, alpha), np.inner(cost, a))
c = np.matmul(cost, G)
T = None
return G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost
def generate_workload_placement_data_paper(a, b, c, tau, alpha, normalize=True):
"""
Generate workload data, new format.
This function currently works for a single server with two queues for servicing two job classes.
Parameters
----------
a : list of float
task arrival rates by job buffer (vector of size K)
b : list of float
cpu limits per server (vector of size I)
c : list of float
holding cost per unit time per buffer (vector of size K)
tau : list of float
mean time to complete tasks by job class (vector of size J)
alpha : alpha float
initial quantity of buffers (vector of size K)
normalize : bool
Generate a model where the decision variable is normalized to a fraction of the total server capacity (True),
or is the actual flow rate (False).
Default True.
Returns
-------
A tuple with the following values:
G
H
F
gamma
c
d
alpha
a
b
T
total_buffer_cost
cost
"""
I = len(b)
K = len(tau)
J = len(a)
a = np.array(a)
b = np.array(b)
d = np.empty(0)
alpha = np.array(alpha)
gamma = np.zeros(K)
tau = np.array(tau)
mu = np.divide(np.ones(shape=tau.shape), tau)
if normalize:
G = np.eye(K) # no flows between buffers yet
H = np.array([[j % I == i and tau[j] or 0 for j in range(J)] for i in range(I)])
else:
G = np.diag(mu)
H = np.array([[j % I == i and 1 for j in range(J)] for i in range(I)])
F = np.empty((K, 0))
cost = np.array(c)
total_buffer_cost = (np.inner(cost, alpha), np.inner(cost, a))
c = np.matmul(cost, G)
T = None
return G, H, F, gamma, c, d, alpha, a, b, T, total_buffer_cost, cost
| 7,323 | 29.139918 | 117 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/__init__.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 576 | 37.466667 | 74 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/data_generators/simple_reentrant.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def generate_simple_reentrant_data(seed, K, I, h_rate1 = 0.08, h_rate2 = 0.045, hdist = np.random.rand, hdist_params = (), alpha_rate1 = 0.8, alpha_rate2 = 0.45,
alpha_dist = np.random.rand, alpha_dist_params = (), a_rate = 0.03, a_dist = np.random.rand, a_dist_params = (),
cost_scale = 0, cost_dist = np.random.rand, cost_dist_params = (), last_cost = None, gamma_rate=0,
gamma_dist=np.random.rand, gamma_dist_params=(), c_scale = 5, c_dist = np.random.rand, c_dist_params = ()):
np.random.seed(seed)
b = np.ones(I)
G = np.eye(K) - np.diag(np.ones(K - 1), -1)
# initial fluid
cf = round(15 / K, 6)
alpha_mean = np.arange(cf * (K+0.99), cf, -cf)
alpha = alpha_rate1 * alpha_mean + alpha_rate2 * alpha_mean * alpha_dist(*alpha_dist_params, K)
# exogenous input rate
a_mean = a_rate * alpha_mean
a = alpha_rate1 * a_mean + alpha_rate2 * a_mean * a_dist(*a_dist_params, K)
# construct random machine constituency matrix
cols = np.arange(K)
rows = np.repeat(np.arange(I), int(K/I))
H = np.zeros((I, K))
h_mean = 1/np.arange(1,K+1)
H[rows, cols] = h_rate1 * h_mean + h_rate2 * h_mean * hdist(*hdist_params, K)
F = np.empty((K, 0))
d = np.empty(0)
if gamma_rate == 0:
gamma = np.zeros(K)
else:
gamma = gamma_rate * gamma_dist(*gamma_dist_params, K)
if c_scale != 0:
c = c_scale * c_dist(*c_dist_params, K)
else:
c = np.zeros(K)
cost = None
if cost_scale != 0:
cost = cost_scale * cost_dist(*cost_dist_params, K)
if last_cost is not None:
cost[-1] = last_cost
# this produce negative and positive costs!
c += np.matmul(cost, G)
total_buffer_cost = (np.inner(cost, alpha), np.inner(cost, a))
else:
total_buffer_cost = (0, 0)
return G, H, F, gamma, c, d, alpha, a, b, 1.5 * K, total_buffer_cost, cost
| 2,576 | 39.904762 | 161 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/cplex_integration/run_cplex_experiments.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from doopl.factory import *
from os import listdir
from os.path import join, isfile, split
def run_cplex_experiments(data_dir, mod_file, files = None):
results = []
if files is None:
files = [join(data_dir, f) for f in listdir(data_dir) if isfile(join(data_dir, f))]
#reswriter = csv.writer(csvfile)
for dat_file in files:
with create_opl_model(model=mod_file, data=dat_file) as opl:
start_time = time.time()
if opl.run():
obj = opl.objective_value
time_to_solve = time.time() - start_time
path, filename = split(dat_file)
#reswriter.writerow([filename, obj, time_to_solve])
results.append({'file': filename, 'objective': obj, 'time': time_to_solve})
print("ExpName: " + filename + " OBJECTIVE: " + str(obj) + " Time: " + str(time_to_solve))
else:
print("No solution!")
return results
| 1,558 | 40.026316 | 106 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/cplex_integration/benchSclp1.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from doopl.factory import *
import os
from os import listdir
from os.path import dirname, abspath, join, isfile
#from doe.doe_utils import path_utils
DATADIRm = join(dirname(abspath(__file__)), 'mod_files')
mod = join(DATADIRm, "main.mod")
#pu = path_utils(os.path.expanduser('~/Box/SCLP comparison/data'))
#DATADIRd = pu.get_CPLEX_data_path()
DATADIRd=os.path.expanduser('~/Box/SCLP comparison/data/CPLEX')
onlyfiles = [f for f in listdir(DATADIRd) if isfile(join(DATADIRd, f))]
print("Test: ", onlyfiles)
status = 127
curr = float("inf")
#Capacity = pd.DataFrame({'name' : ['flour', 'eggs'], 'value' : [20, 40]})
file = open("results.csv","w")
file.write("ExpName,OBJECTIVE,Time")
file.write('\n')
#print("ExpName","OBJECTIVE","Time", end='\n', file='results.csv')
#print(value, ..., sep=' ', end='\n', file=sys.stdout)
for i in onlyfiles:
dat = join(DATADIRd, i)
with create_opl_model(model=mod, data=dat) as opl:
#opl.set_input("Capacity", Capacity)
start_time = time.time()
if opl.run():
obj = opl.objective_value
time_to_solve = time.time() - start_time
print("ExpName: " + str(i) + " OBJECTIVE: " + str(obj) + " Time: " + str(time_to_solve))
file.write(str(i))
file.write(',')
file.write(str(obj))
file.write(',')
file.write(str(time_to_solve))
file.write('\n')
else:
print("No solution!")
file.close()
| 2,069 | 30.846154 | 100 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/cplex_integration/doopl_test.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
# Source file provided under Apache License, Version 2.0, January 2004,
# http://www.apache.org/licenses/
# (c) Copyright IBM Corp. 2018
# --------------------------------------------------------------------------
"""
Shows how to do optimization workflows by running several models.
"""
from doopl.factory import *
import os
from os.path import dirname, abspath, join
DATADIR = join(dirname(abspath(__file__)), 'data')
mod = join(DATADIR, "mulprod.mod")
dat = join(DATADIR, "mulprod.dat")
status = 127
capFlour = 20
best = .0
curr = float("inf")
Capacity = pd.DataFrame({'name' : ['flour', 'eggs'], 'value' : [20, 40]})
while (best != curr):
best = curr
with create_opl_model(model=mod, data=dat) as opl:
opl.set_input("Capacity", Capacity)
print("Solve with capFlour = " + str(capFlour))
if opl.run():
curr = opl.objective_value
print("OBJECTIVE: " + str(curr))
else:
print("No solution!")
capFlour += 1
Capacity.update(pd.DataFrame({'value' : [capFlour]}))
| 1,703 | 31.769231 | 76 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/doe/robust/robust_reformulation.py
|
import numpy as np
#this function should get original matrix H, degrees of perturbation (d), i.e. \tau_gal = d \tau and uncertainty budget
# and return two matricies - one eith coefficients related to u_j and one with coefficients related to \alpha and \beta
def do_server_robust_reformulation(H, degree, budget, separate = True):
if separate:
H0_alpha = np.divide(H, H, where=H != 0, out=np.zeros_like(H))
H0_beta = np.diag(budget)
H1 = -np.diag(np.sum(H, axis=0) * degree)
H1_alpha = np.eye(H.shape[1])
H1_beta = H0_alpha.T
return H0_alpha, H0_beta, H1, H1_alpha, H1_beta
else:
ab_dim = np.sum(H.shape)
Hnew = np.zeros((ab_dim, ab_dim+ H.shape[1]))
Hnew[:H.shape[0],:H.shape[1]] = H
np.divide(H, H, where=H != 0, out=Hnew[:H.shape[0], H.shape[1]:2*H.shape[1]])
Hnew[:H.shape[0], 2*H.shape[1]:] = np.diag(budget)
Hnew[H.shape[0]:,:H.shape[1]] = -np.diag(np.sum(H, axis=0) * degree)
Hnew[H.shape[0]:, H.shape[1]:2 * H.shape[1]] = np.eye(H.shape[1])
Hnew[H.shape[0]:, 2*H.shape[1]:] = Hnew[:H.shape[0], H.shape[1]:2*H.shape[1]].T
return Hnew
| 1,170 | 49.913043 | 119 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_solver.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .classification import classification
from .SCLP_pivot import SCLP_pivot
from .collision_info import collision_info
from .time_collision_resolver import reclassify, reclassify_ztau
#'#@profile
def SCLP_solver(solution, param_line, case, DEPTH, STEPCOUNT, ITERATION, settings, tolerance, find_alt_line=True, mm=None):
ITERATION[DEPTH] = 0
col_info = collision_info(case)
pivot_problem = {'result': 0}
solution.print_short_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], 0, 0, case)
solution.clear_collision_stack()
rewind_required = False
while True:
if not rewind_required:
if solution.check_if_complete(param_line):
solution.print_short_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], param_line.theta, param_line.theta, 'complete')
return solution, STEPCOUNT, pivot_problem
res = solution.update_state(param_line, settings.check_intermediate_solution, tolerance*100)
if not res:
return solution, STEPCOUNT, {'result': 1}
col_info, problem = classification(solution, param_line, tolerance)
if problem['result'] > 0:
ztau_ind = solution.get_ztau_ind()
if ztau_ind is not None:
new_col_info = reclassify_ztau(col_info, solution, param_line, ztau_ind, tolerance, DEPTH>0 or not solution.can_rewind())
if new_col_info is None:
rewind_required = True
else:
col_info = new_col_info
if new_col_info.N1 >= solution.last_collision.N1 and new_col_info.N2 <= solution.last_collision.N2 + solution.last_collision.Nnew:
rewind_required = True
else:
rewind_required = False
else:
rewind_required = True
else:
rewind_required = False
if rewind_required:
if DEPTH == 0:
lastCollision = solution.update_rewind()
resolved = False
up_theta = param_line.theta + col_info.delta + 0.04
while lastCollision is not None:
# rewinding to previous iteration
print('rewind... ')
param_line.backward_to(lastCollision.delta)
res = solution.update_state(param_line, settings.check_intermediate_solution, tolerance*10, up_rewind =True)
if not res:
return solution, STEPCOUNT, {'result': 1}
solution.print_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], param_line.theta, lastCollision)
col_info, resolved = reclassify(lastCollision, solution, param_line, tolerance)
if not resolved:
lastCollision = solution.update_rewind()
else:
break
if not resolved:
if find_alt_line and not param_line.is_orthogonal() and param_line.theta > 0:
print('Unable to rewind... Trying to outflank!')
param_line.forward_to(col_info.delta)
main_theta_bar = param_line.theta_bar
param_line.theta_bar = param_line.theta - 0.02 - col_info.delta
print('Going backward by:', 0.02)
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, param_line, 'start', DEPTH,
STEPCOUNT,
ITERATION, settings, tolerance, mm=mm)
ort_line = param_line.get_orthogonal_line(0.04)
print('Going orthogonal to:', ort_line.theta_bar)
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, ort_line, 'start', DEPTH, STEPCOUNT, ITERATION, settings, tolerance, mm=mm)
if pivot_problem['result'] == 1:
print('Problem during orthogonal step!')
param_line.theta_bar = up_theta
print('Going forward to:', up_theta)
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, param_line, 'start', DEPTH, STEPCOUNT,
ITERATION, settings, tolerance, mm=mm)
ort_line.theta_bar = 0
ort_line.T = param_line.T
print('Returning to main line.')
#update T before going forward
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, ort_line, 'start', DEPTH, STEPCOUNT,
ITERATION, settings, tolerance, mm=mm)
param_line.theta_bar = max(main_theta_bar,param_line.T)
if pivot_problem['result'] == 1:
print('Problem during orthogonal step!')
return solution, STEPCOUNT, pivot_problem
rewind_required = False
continue
else:
pivot_problem['result'] = 1
return solution, STEPCOUNT, pivot_problem
else:
pivot_problem = {'result':1}
print('Rewind in subproblem not supported. Returning to main problem!')
return solution, STEPCOUNT, pivot_problem
if DEPTH == 0 and param_line.is_end(col_info.delta):
col_info.case = 'solved__'
solution.print_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], param_line.theta, col_info)
solution.last_T = param_line.theta
solution.max_valid_T = param_line.theta + col_info.delta
param_line.forward_to_end()
return solution, STEPCOUNT, pivot_problem
STEPCOUNT = STEPCOUNT + 1
ITERATION[DEPTH] = ITERATION[DEPTH] + 1
if settings.max_iteration is not None:
if DEPTH == 0 and ITERATION[DEPTH] >= settings.max_iteration:
return solution, STEPCOUNT, pivot_problem
solution.print_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], param_line.theta, col_info)
if DEPTH > 0 and param_line.is_end(col_info.delta):
print("Theta > 1....")
pivot_problem['result'] = 1
return solution, STEPCOUNT, pivot_problem
if col_info.case == 'Case i__':
solution.update_caseI(col_info)
rewind_required = False
elif col_info.case == 'Case ii_' or col_info.case == 'Case iii':
solution, STEPCOUNT, ITERATION, pivot_problem = SCLP_pivot(param_line.Kset_0, param_line.Jset_N, solution,
col_info, DEPTH, STEPCOUNT, ITERATION, settings,
tolerance)
while pivot_problem['result'] == 1: # theta > 1
print('Pivot problem: trying to resolve * ', col_info.tol_coeff, '...')
new_col_info, resolved = reclassify(col_info, solution, param_line, tolerance)
if resolved:
print('resolved!')
col_info = new_col_info
solution.print_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], param_line.theta, col_info)
if col_info.case == 'Case i__':
solution.update_caseI(col_info)
pivot_problem['result'] = 0
elif col_info.case == 'Case ii_' or col_info.case == 'Case iii':
try:
solution, STEPCOUNT, ITERATION, pivot_problem = SCLP_pivot(param_line.Kset_0, param_line.Jset_N, solution,
col_info, DEPTH, STEPCOUNT, ITERATION, settings, tolerance)
except Exception as ex:
print('Exception during SCLP pivot:')
print(ex)
return solution, STEPCOUNT, {'result': 1}
else:
break
if pivot_problem['result'] == 1:
if DEPTH > 0:
return solution, STEPCOUNT, pivot_problem
else:
rewind_required = True
else:
rewind_required = False
if not rewind_required:
param_line.forward_to(col_info.delta)
if DEPTH == 0:
if mm is not None:
solution.clear_base_sequence(mm)
return solution, STEPCOUNT, pivot_problem
| 9,534 | 51.679558 | 158 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/classification.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .state_tools.calc_statecollide import calc_statecollide
from .collision_info import collision_info
from .time_collision_resolver import calc_timecollide, resolve_and_classify, reclassify
from .matlab_utils import find
#'#@profile
def classification(solution, param_line, tolerance, time_only=False):
#idenitfy next collision and classify it
#problem
# result = 0 Ok
# result = 1 state prblem
# result = 2 time problem
# result = 3 state problem + time problem
# result = 4 compound problem
# result = 5 state problem + compound problem
# result = 6 time problem + compound problem
# result = 7 state problem + time problem + compound problem
NN = solution.NN
problem = {'result': 0, 'stateProblem': [], 'timeProblem': [], 'compoundProblem': {'result':0, 'data': []}}
Delta = 0
N1 = -1
N2 = NN
v1 = []
v2 = []
case = ''
#CC1, prob = calc_statecollide(solution.klist,solution.jlist, solution.state, tolerance)
if not time_only:
CC1, prob = calc_statecollide(solution.klist,solution.jlist, solution.state, solution.get_raw_dx(),
solution.get_raw_dq(), param_line, solution.loc_min_storage, solution.partial_states, tolerance)
#
problem['stateProblem'] = prob
else:
CC1 =[]
if solution.last_collision is None:
lastN1 = 0
lastN2 = 0
else:
lastN1 = solution.last_collision.N1
lastN2 = solution.last_collision.N2 + solution.last_collision.Nnew
CC2, prob = calc_timecollide(solution.state.tau, solution.state.dtau, lastN1, lastN2, tolerance)
problem['timeProblem'] = prob
if len(CC1) == 0 and len(CC2) == 0:
if problem['stateProblem']['result'] == 0 and problem['timeProblem']['result'] == 0:
case = 'complete'
Delta = np.inf
return collision_info(case, Delta, N1, N2, v1, v2), problem
else:
return collision_info('', Delta, N1, N2, v1, v2), problem
Didle = 0
if len(CC1) > 0 and len(CC2) > 0:
Didle = CC1[0] - CC2[0]
if abs(Didle) <= tolerance and abs(Didle) < CC1[0] and abs(Didle) < CC2[0]:
Didle = 0
if (len(CC1) > 0 and len(CC2) == 0) or Didle < 0:
if problem['stateProblem']['result'] >1:
problem['result'] = 1
return collision_info('', Delta, N1, N2, v1, v2), problem
case = 'Case iii'
Delta = CC1[0]
N1 = CC1[1]
N2 = CC1[1] + 1
if CC1[2] < 0:
v1 = CC1[2]
else:
v2 = CC1[2]
col_info = collision_info(case, Delta, N1, N2, v1, v2)
if problem['stateProblem']['result'] > 0:
col_info.had_resolution = True
if abs(Didle) <= 1000 * tolerance:
if problem['timeProblem']['result'] == 0 and len(CC2) >1:
col_info1, prob1 = resolve_and_classify(CC2[0], CC2[1], solution, param_line, 1, tolerance)
if prob1['result'] == 0:
col_info.alternative = col_info1
col_info.alternative.had_resolution = True
return col_info, problem
elif (len(CC1) == 0 and len(CC2) > 0) or Didle >= 0:
if problem['timeProblem']['result'] != 0:
problem['result'] = problem['result'] + 2
if problem['timeProblem']['result'] == 2:
tol2 = 10 * tolerance
inegDTAU = solution.state.dtau < -tol2
izerTAU = np.fabs(solution.state.tau) <= tol2
solution.store_ztau_ind(find(np.logical_and(izerTAU, inegDTAU)))
col_info = collision_info('', Delta, N1, N2, v1, v2)
else:
if CC2[0] == 0:
N1 = CC2[1][0] - 1
N2 = CC2[1][1] + 1
col_info = collision_info('Case i__', 0, N1, N2, [], [], None, 1)
else:
col_info, prob1 = resolve_and_classify(CC2[0], CC2[1], solution, param_line, 1, tolerance)
tol2 = 10 * tolerance
inegDTAU = solution.state.dtau < -tol2
izerTAU = np.fabs(solution.state.tau) <= tol2
solution.store_ztau_ind(find(np.logical_and(izerTAU, inegDTAU)))
problem['timeProblem'] = prob1
if prob1['result'] != 0:
problem['result'] = 2
col_info = collision_info('', Delta, N1, N2, v1, v2)
else:
col_info.had_resolution = col_info.had_resolution or prob['had_resolution']
if Didle == 0 and len(CC1) >1:
if not (col_info.N1 <= CC1[1] and CC1[1] <= col_info.N2):
print('time shrink as well as state hits zero elsewhere...')
col_info, resolved = reclassify(col_info, solution, tolerance, CC1[1])
# if not resolved:
# print('time shrink as well as state hits zero elsewhere...\n')
# problem['result'] = problem['result'] + 4
# problem['compoundProblem']['result'] = 1
# col_info = collision_info('', Delta, N1, N2, v1, v2)
if abs(Didle) <= 1000 * tolerance:
if len(CC1) > 1:
if col_info.case != '':
col_info.alternative = collision_info('Case iii', CC1[0], CC1[1], CC1[1] + 1,
*((CC1[2], None) if CC1[2] < 0 else (None, CC1[2])))
col_info.alternative.had_resolution = True
else:
col_info = collision_info('Case iii', CC1[0], CC1[1], CC1[1] + 1,
*((CC1[2], None) if CC1[2] < 0 else (None, CC1[2])))
col_info.had_resolution = True
return col_info, {'result': 0, 'stateProblem': [], 'timeProblem': [], 'compoundProblem': {'result':0, 'data': []}}
return col_info, problem
problem['result'] = 8
return None, problem
| 6,700 | 44.89726 | 135 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/parametric_line_ex.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from enum import Enum
from subroutines.parametric_line import parametric_line
class line_type(Enum):
SCLP_main = 0
SCLP_sub = 1
SCLP_orthogonal = 2
MCLP_dual = 3
MCLP_primal = 4
MCLP_both = 5
class parametric_line_ex(parametric_line):
def __init__(self, x_0, q_N, theta_bar, T=0, del_T=1, del_x_0=None, del_q_N=None, gamma=None, del_gamma=None, alpha=None,
del_alpha = None, xlambda = None, mu = None, Kset_0=None, Jset_N=None, B1=None, B2=None, ltype=line_type.SCLP_main):
super().__init__(x_0, q_N, theta_bar, T, del_T, del_x_0, del_q_N, Kset_0, Jset_N, B1, B2, ltype)
self._gamma = gamma
self._alpha = alpha
self._del_gamma = del_gamma
self._del_alpha = del_alpha
self._xlambda = xlambda
self._mu = mu
def is_main(self):
return self._ltype == line_type.SCLP_main
def is_sub(self):
return self._ltype == line_type.SCLP_sub
def is_orthogonal(self):
return self._ltype == line_type.SCLP_orthogonal
def is_SCLP(self):
return self._ltype in [line_type.SCLP_main, line_type.SCLP_sub, line_type.SCLP_orthogonal]
def is_MCLP_dual(self):
return self._ltype == line_type.MCLP_dual
def is_MCLP_primal(self):
return self._ltype == line_type.MCLP_primal
def is_MCLP_both(self):
return self._ltype == line_type.MCLP_both
def _forward_to(self, delta):
if not self.is_SCLP():
if self._gamma is not None and self._del_gamma is not None:
self._gamma += self._del_gamma * delta
if self._alpha is not None and self._del_alpha is not None:
self._alpha += self._del_alpha * delta
if self._xlambda is not None:
self._xlambda -= self._xlambda * delta
if self._mu is not None:
self._mu -= self._mu * delta
super()._forward_to(delta)
def _backward_to(self, delta):
if not self.is_SCLP():
if self._gamma is not None and self._del_gamma is not None:
self._gamma -= self._del_gamma * delta
if self._alpha is not None and self._del_alpha is not None:
self._alpha -= self._del_alpha * delta
if self._xlambda is not None:
self._xlambda += self._xlambda * delta
if self._mu is not None:
self._mu += self._mu * delta
super()._forward_to(delta)
| 3,071 | 35.141176 | 133 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_x0_solver.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .matlab_utils import find
from .classification import classification
from .SCLP_pivot import SCLP_pivot
from .collision_info import collision_info
from .parametric_line import parametric_line
from .SCLP_solver import SCLP_solver
#'#@profile
def SCLP_x0_solver(solution, param_line, target_x0, target_T, DEPTH, STEPCOUNT, ITERATION, settings, tolerance, find_alt_line=True, mm=None):
ITERATION[DEPTH] = 0
K_add_set = find(np.logical_and(param_line.x_0 == 0, target_x0 > 0))
pivot_problem = {'result': 0}
solution.print_short_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], 0, 0, 'x0')
solution.clear_collision_stack()
source_T = param_line.T
for v1 in K_add_set:
col_info = collision_info('x0: ' + str(v1), 0, -1, 0, v1+1, [])
solution, STEPCOUNT, ITERATION, pivot_problem = SCLP_pivot(param_line.Kset_0, param_line.Jset_N, solution, col_info, DEPTH, STEPCOUNT,
ITERATION, settings, tolerance)
param_line = param_line.get_x0_parline(solution, v1+1, target_x0[v1])
res = solution.update_state(param_line, settings.check_intermediate_solution, tolerance * 100)
if not res:
return solution, STEPCOUNT, {'result': 1}
col_info, problem = classification(solution, param_line, tolerance)
theta = param_line.theta
if param_line.is_end(col_info.delta):
param_line.forward_to_end()
else:
param_line.forward_to(col_info.delta/2)
STEPCOUNT = STEPCOUNT + 1
ITERATION[DEPTH] = ITERATION[DEPTH] + 1
solution.print_short_status(STEPCOUNT, DEPTH, ITERATION[DEPTH], theta, param_line.theta, 'x0: ' + str(v1))
if abs(source_T - target_T) < tolerance:
param_line = parametric_line(param_line.x_0, param_line.q_N, 1, source_T, 0, target_x0 - param_line.x_0, None,
param_line.Kset_0, param_line._Jset_N)
else:
param_line = parametric_line(param_line.x_0, param_line.q_N, 1, source_T, target_T - source_T, target_x0 - param_line.x_0, None,
param_line.Kset_0, param_line._Jset_N)
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, param_line, 'update', DEPTH, STEPCOUNT, ITERATION, settings, tolerance, find_alt_line,
mm)
return STEPCOUNT, pivot_problem
| 2,981 | 47.885246 | 149 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/time_collision_resolver.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .calc_order_ratio import calc_order_ratio
from .collision_info import collision_info
from .matlab_utils import find
from .equation_tools.eq_tools import get_time_ratio
import math
def get_shrinking_intervals(delta, rz, inegDTAU, tol1, tol2, tol_coeff, tol_coeff2, tolerance, shrinking_ind = None):
if abs(delta) < tol2 * tol_coeff2:
if delta <= 0:
print("Negative delta:", delta)
tol_coeff2 = 0.1
resolved = False
while tol_coeff2 >= 0.001 and tol_coeff <= 0.01 / tolerance:
print('immediate collision ... resolving * ', tol_coeff2, 'delta:', delta)
if abs(delta) < tol2 * tol_coeff2:
print('... fail!')
tol_coeff2 = tol_coeff2 * 0.1
continue
elif delta <= -tol2 * tol_coeff2:
problem = 1
return None, problem, tol_coeff, True
elif delta >= tol2 * tol_coeff2:
test = np.fabs(rz * delta - 1)
ind1 = np.logical_and(test < tol1 * tol_coeff, inegDTAU)
if shrinking_ind is not None:
ind1 = np.logical_or(ind1,shrinking_ind)
ishrink = find(ind1)
nn1 = np.min(ishrink)
nn2 = np.max(ishrink)
if len(ishrink) < nn2 - nn1:
if tol_coeff <= 0.01 / tolerance:
print('multiple location shrinks...', ishrink)
tol_coeff = 10 * tol_coeff
continue
else:
break
else:
problem = 0
return [nn1, nn2], problem, tol_coeff, True
if not resolved:
ct = 0.1
while ct >= tol_coeff2:
test = rz * delta - 1
ishrink = find(np.logical_and(test < tol1 * tol_coeff/ct, inegDTAU))
nn1 = np.min(ishrink)
nn2 = np.max(ishrink)
if len(ishrink) < nn2 - nn1:
print('multiple location shrinks!...', ishrink)
ct = 0.1 * ct
continue
else:
problem = 0
return [nn1, nn2], problem, tol_coeff, True
problem = 3
return None, problem, tol_coeff, True
elif delta <= -tol2 * tol_coeff2:
problem = 4
return None, problem, tol_coeff, True
elif delta >= tol2 * tol_coeff2:
test = np.fabs(rz * delta - 1)
ind1 = np.logical_and(test < tol1 * tol_coeff, inegDTAU)
if shrinking_ind is not None:
ind1 = np.logical_or(ind1, shrinking_ind)
ishrink = find(ind1)
if (ishrink.shape[0] == 0):
print('here!')
nn1 = np.min(ishrink)
nn2 = np.max(ishrink)
if len(ishrink) < nn2 - nn1:
print('multiple location shrinks...', ishrink)
tol_coeff = 10 * tol_coeff
problem = -1
return None, problem, tol_coeff, True
else:
problem = 0
return [nn1, nn2], problem, tol_coeff, False
def resolve_and_classify(delta, rz, solution, param_line, tol_coeff0, tolerance, shrinking_ind = None):
problem = {'result': 0}
tol2 = 10 * tolerance
tol1 = tol2
tol_coeff = tol_coeff0
tol_coeff2 = 1
had_resolution = False
inegDTAU = solution.state.dtau < -tol2
while tol_coeff <= 0.01/tolerance:
if tol_coeff > 1:
print('trying to resolve * ',tol_coeff)
intervals, prob, tol_coeff, res = get_shrinking_intervals(delta, rz, inegDTAU, tol1, tol2, tol_coeff, tol_coeff2, tolerance, shrinking_ind)
if prob > 0:
problem['result'] = prob
return None, problem
else:
had_resolution = had_resolution or res
if prob == 0:
N1 = intervals[0] - 1
N2 = intervals[1] + 1
if prob == -1:
continue
if N1 == -1 and N2 == solution.NN:
problem['result'] = 5
print('Max tolerance coefficient reached ....')
return None, problem
col_info = classify_time_collision(delta, rz, tol_coeff, N1, N2, solution, param_line, tolerance)
if col_info is None:
had_resolution = True
else:
col_info.had_resolution = had_resolution
return col_info, problem
tol_coeff = 10 * tol_coeff
problem['result'] = 5
return None, problem
def reclassify(col_info, solution, param_line, tolerance, stateN=None):
if col_info.from_ztau:
res = solution.pivots.find_N1_N2_around(col_info.ztau_ind, col_info.N1-1)
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
if col != col_info:
col.from_ztau = True
col.ztau_ind = col_info.ztau_ind
return col, True
res = solution.pivots.find_N1_N2_around(col_info.ztau_ind, col_info.N1, col_info.N2 + 1)
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
if col != col_info:
col.from_ztau = True
col.ztau_ind = col_info.ztau_ind
return col, True
else:
if col_info.alternative is not None:
return col_info.alternative, True
return col_info, False
else:
tol_coeff = col_info.tol_coeff * 10
resolved = False
if col_info.case == 'Case iii':
if col_info.alternative is not None:
stateN = col_info.N1
col_info = col_info.alternative
new_col_info, problem = resolve_and_classify(col_info.delta, col_info.rz, solution, param_line, tol_coeff, tolerance)
if problem['result'] > 0:
return col_info, False
elif new_col_info.N1 <= stateN and stateN <= new_col_info.N2:
resolved = True
return new_col_info, resolved
else:
col_info = new_col_info
else:
return col_info, resolved
while tol_coeff <= 0.01/tolerance:
new_col_info, problem = resolve_and_classify(col_info.delta, col_info.rz, solution, param_line, tol_coeff, tolerance)
tol_coeff = tol_coeff * 10
if problem['result'] > 0:
break
if new_col_info != col_info:
col_info = new_col_info
if stateN is None:
resolved = True
break
elif col_info.N1 <= stateN and stateN <= col_info.N2:
resolved = True
break
if not resolved:
if col_info.alternative is not None:
return col_info.alternative, True
return col_info, resolved
def reclassify_ztau(col_info, solution, param_line, ztau_ind, tolerance, hard_find=False):
if len(ztau_ind) >= 3:
if ztau_ind[1] - ztau_ind[0] > 2:
if ztau_ind[-1] - ztau_ind[-2] > 2:
res = solution.pivots.find_N1_N2_around(ztau_ind[1:-1])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind
return col
res = solution.pivots.find_N1_N2_around(ztau_ind[1:])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind
return col
elif ztau_ind[-1] - ztau_ind[-2] > 2:
res = solution.pivots.find_N1_N2_around(ztau_ind[:-1])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind
return col
if (len(ztau_ind) > 3 and (max(ztau_ind) - min(ztau_ind) + 1)/len(ztau_ind) > 0.9) or hard_find:
for n in range(len(ztau_ind)):
res = solution.pivots.find_N1_N2_around(ztau_ind[n:])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind[n:]
return col
for n in range(1, len(ztau_ind)-1):
res = solution.pivots.find_N1_N2_around(ztau_ind[:-n])
if res is not None:
col = classify_time_collision(col_info.delta, col_info.rz, col_info.tol_coeff, res[0], res[1],
solution, param_line, tolerance)
if col is not None:
col.from_ztau = True
col.ztau_ind = ztau_ind[:-n]
return col
return None
def classify_time_collision(delta, rz, tol_coeff, N1, N2, solution, param_line, tolerance):
if N1 == -1 or N2 == solution.NN:
return collision_info('Case i__', delta, N1, N2, [], [], rz, tol_coeff)
else:
vlist = solution.pivots.get_out_difference(N1, N2)
if len(vlist) > 2:
print('More than two variables leave in time shrink ....')
return None
elif len(vlist) == 1:
return collision_info('Case i__', delta, N1, N2, [], [], rz, tol_coeff)
elif len(vlist) == 2:
case = 'Case ii_'
if N2 - N1 == 2:
return collision_info(case, delta, N1, N2, solution.pivots.outpivots[N1], solution.pivots.outpivots[N1+1], rz, tol_coeff)
else:
order_ratio, correct = calc_order_ratio(vlist[0], vlist[1], N1, N2, solution, param_line, delta / 2)
if abs(abs(order_ratio) - 1) < tolerance:
print('Tolerance in R unclear...')
if abs(order_ratio) < 1:
v1 = vlist[0]
v2 = vlist[1]
else:
v1 = vlist[1]
v2 = vlist[0]
if correct:
return collision_info(case, delta, N1, N2, v1, v2, rz, tol_coeff)
else:
return None
def calc_timecollide(TAU, DTAU, lastN1, lastN2, tolerance):
problem = {'result': 0, 'data': [], 'had_resolution': False, 'resolved_types':[]}
tol2 = 10 * tolerance
max_neg_coeff = 100000
tol_coeff = 1
min_tau = np.min(TAU)
if min_tau < -tol2:
print('negative interval length...', min_tau)
problem['had_resolution'] = True
tol2 = 10**math.ceil(math.log10(-min_tau))
if tol2 > tolerance * max_neg_coeff:
print('fail!')
problem['result'] = 1
problem['data'] = find(min_tau)
return [0], problem
print('resolved!')
# while np.any(inegTAU):
# print('negative interval length...', np.min(TAU))
# problem['had_resolution'] = True
# problem['resolved_types'].append(1)
# d = d * 10
# if d > max_neg_coeff:
# print('fail!')
# problem['result'] = 1
# problem['data'] = find(inegTAU)
# return [0], problem
# else:
# print('resolving * ', d)
# tol2 = 10 * tolerance * d
# iposTAU = TAU > tol2
# izerTAU = np.fabs(TAU) <= tol2
# inegTAU = TAU < -tol2
NN = TAU.shape[0]
iposTAU = TAU > tol2
izerTAU = np.fabs(TAU) <= tol2
inegTAU = TAU < -tol2
#iposDTAU = DTAU > tolerance
izerDTAU = np.fabs(DTAU) <= tol2
inegDTAU = DTAU < -tol2
# TODO: take this to cython
test1 = np.logical_and(izerTAU, inegDTAU)
zflag = np.any(test1)
if zflag:
problem['had_resolution'] = True
problem['resolved_types'].append(2)
ztau_ind = find(test1)
print('zero length interval shrinks:', ztau_ind, 'last N1:', lastN1, ' N2:', lastN2)
last_col_int = np.arange(lastN1 + 1, lastN2, dtype=int)
ind1 = len(np.intersect1d(ztau_ind, last_col_int, assume_unique=True)) == 0
# zmin = np.min(ztau_ind)
# zmax = np.max(ztau_ind)
# ztau_int= np.arange(zmin,zmax+1, dtype=int)
# ind2 = len(np.intersect1d(last_col_int, ztau_int , assume_unique=True)) != 0
# ind3 = (len(ztau_ind) + lastN2 - lastN1 - 1)/(zmax - zmin + 1) >= 1
if np.sum(izerTAU) == NN - 1:
locposTAU = find(iposTAU)[0]
if locposTAU > 0 and locposTAU < NN - 1:
if np.sum(DTAU[np.arange(0,locposTAU)]) < 0:
return [0, [0, locposTAU-1]], problem
elif np.sum(DTAU[np.arange(locposTAU + 1,NN)]) < 0:
return [0, [locposTAU + 1, NN-1]], problem
elif ind1:
# if last_case == 'rewind':
# zmin = np.min(ztau_ind)
# zmax = np.max(ztau_ind)
# if len(ztau_ind) / (zmax - zmin + 1) >= 0.75 and len(ztau_ind) > 3:
# if np.all(izerTAU[zmin: zmax+1]):
# print('trying to remove zero intervals...')
# rz = np.divide(-DTAU, TAU, where=inegDTAU, out=np.zeros_like(TAU))
# zz_ind = np.argmax(rz)
# zz = rz[zz_ind]
# return [1 / zz, zmin, zmax], problem
tol_coeff = 0.1
while tol_coeff >= 0.001 and zflag:
print('trying to resolve * ', tol_coeff, ' ...')
iposTAU = TAU > tol2 * tol_coeff
inegTAU = TAU < tol2 * tol_coeff
izerTAU = np.fabs(TAU) <= tol2 * tol_coeff
test1 = np.logical_and(izerTAU, inegDTAU)
zflag = np.any(test1)
if zflag:
if np.sum(izerTAU) == NN - 1:
locposTAU = find(iposTAU)[0]
if locposTAU > 0 and locposTAU < NN - 1:
if np.sum(DTAU[np.arange(0, locposTAU)]) < 0:
return [0, [0, locposTAU-1]], problem
elif np.sum(DTAU[np.arange(locposTAU + 1, NN)]) < 0:
return [0, [locposTAU + 1, NN-1]], problem
else:
break
tol_coeff = tol_coeff * 0.1
if zflag:
if lastN1 !=0 or lastN2 !=0:
print('zero length interval shrinks\n ')
problem['result'] = 2
problem['data'] = find(test1)
return [0], problem
else:
delta, rz = get_time_ratio(TAU, DTAU, inegTAU, inegDTAU, tol2 * tol_coeff)
if delta < 0:
return [], problem
else:
return [delta, rz], problem
# test3 = inegDTAU
# if not np.any(test3):
# return [], problem
# xTAU = TAU.copy()
# xTAU[inegTAU] = tol2 * tol_coeff
# rz = np.divide(-DTAU, xTAU, where=test3, out=np.zeros_like(TAU))
# zz_ind = np.argmax(rz)
# zz = rz[zz_ind]
# rz[ztau_ind] = zz
# return [1 / zz, rz], problem
else:
print('zero length interval shrinks\n ')
problem['result'] = 2
problem['data'] = find(test1)
return [0], problem
# TODO: take this to cython
test2 = np.logical_and(izerTAU, izerDTAU)
zflag = np.any(test2)
if zflag:
problem['had_resolution'] = True
problem['resolved_types'].append(5)
#this should be test2 - othervise this true
if len(np.intersect1d(find(test1), np.arange(lastN1 + 1, lastN2, dtype=int), assume_unique=True)) == 0:
print('zero length interval does not expand')
tol_coeff = 0.1
while tol_coeff >= 0.001 and zflag:
print('trying to resolve * ', tol_coeff, ' ...')
inegTAU = TAU < tol2 * tol_coeff
iposTAU = TAU > tol2 * tol_coeff
izerTAU = np.fabs(TAU) <= tol2 * tol_coeff
test2 = np.logical_and(izerTAU, izerDTAU)
zflag = np.any(test2)
tol_coeff = tol_coeff * 0.1
if zflag:
print('zero length interval does not expand... trying to ignore')
# problem['result'] = 5
# problem['data'] = find(test2)
# return [], problem
else:
problem['data'] = find(test2)
problem['result'] = 5
print('zero length interval does not expand\n')
#TODO: here is the source of potential bug!!!
return [0], problem
delta, rz = get_time_ratio(TAU, DTAU, inegTAU, inegDTAU, tol2 * tol_coeff)
if delta < 0:
return [], problem
else:
return [delta, rz], problem
# #test3 = np.logical_and(iposTAU, inegDTAU)
# #inegDTAU = DTAU < -tol2 * tol_coeff
# test3 = inegDTAU
# if not np.any(test3):
# return [], problem
#
# xTAU = TAU.copy()
# xTAU[inegTAU] = tol2 * tol_coeff
# rz = np.divide(-DTAU, xTAU, where=test3, out=np.zeros_like(TAU))
# zz_ind = np.argmax(rz)
# zz = rz[zz_ind]
# return [1/zz, rz], problem
| 19,195 | 42.826484 | 147 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/collision_info.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class collision_info():
def __init__(self, case, delta=None, N1=None, N2=None, v1=None, v2=None, rz = None, tol_coeff=1,
had_resolution=False, stable_iteration = True):
self._N1 = N1
self._N2 = N2
self._rz = rz
self._tol_coeff = tol_coeff
if v1 is None:
self._v1 = []
else:
self._v1 = v1
if v2 is None:
self._v2 = []
else:
self._v2 = v2
self._case = case
self._delta = delta
self._Nnew = None
self._rewind_info = None
self._had_resolution = had_resolution
self._ztau_ind = None
self.from_ztau = False
self.alternative = None
self.stable_iteration = stable_iteration
@property
def N1(self):
return self._N1
@property
def N2(self):
return self._N2
@property
def v1(self):
return self._v1
@property
def v2(self):
return self._v2
@property
def case(self):
return self._case
@case.setter
def case(self, value):
self._case = value
def __eq__(self, other):
return self._N1 == other.N1 and self._N2 == other.N2 and self._v1 == other.v1 and self._v2 == other.v2 and self._case == other.case
@property
def delta(self):
return self._delta
@delta.setter
def delta(self, value):
self._delta = value
@property
def Nnew(self):
return self._Nnew
@Nnew.setter
def Nnew(self, value):
self._Nnew = value
@property
def rewind_info(self):
return self._rewind_info
@rewind_info.setter
def rewind_info(self, value):
self._rewind_info = value
@property
def had_resolution(self):
return self._had_resolution
@had_resolution.setter
def had_resolution(self, value):
self._had_resolution = value
@property
def rz(self):
return self._rz
@property
def tol_coeff(self):
return self._tol_coeff
@property
def ztau_ind(self):
return self._ztau_ind
@ztau_ind.setter
def ztau_ind(self, value):
self._ztau_ind = value
| 2,784 | 22.803419 | 139 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/matrix_constructor.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import math
class matrix_constructor():
def __init__(self, data, indexes, row_num, align=0, col_num=None, dtype=np.double):
if col_num is None:
col_num = row_num
self._matrix = np.zeros(shape=(row_num, col_num), dtype=dtype, order='C')
self._align = align
self._dtype = dtype
if self._align == 0:
self._left = math.floor(row_num/2)
elif self._align == -1:
self._left = 0
elif self._align == 1:
self._left = col_num - 1
else:
raise Exception('Incorrect alignment!')
if data is not None:
self._matrix[indexes,self._left] = data
self._right = self._left + 1
else:
if self._align == 1:
self._left = col_num
self._right = col_num
else:
self._right = self._left
#'#@profile
def remove(self, from_, to_):
if from_ <= 0:
self._left += to_
elif to_ >= self._right - self._left:
self._right -= to_ - from_
else:
if self._align == 0:
if self._left > self._matrix.shape[1] - self._right:
self._matrix[:, self._left + from_:self._right + from_ - to_] = self._matrix[:, self._left + to_:self._right]
self._right -= to_ - from_
else:
self._matrix[:, self._left - from_ + to_:self._left + to_] = self._matrix[:, self._left:self._left + from_]
self._left = self._left - from_ + to_
elif self._align == -1:
self._matrix[:, from_:self._right + from_ - to_] = self._matrix[:,to_:self._right]
self._right -= to_ - from_
elif self._align == 1:
self._matrix[:, self._left - from_ + to_:self._left+ to_] = self._matrix[:,self._left:self._left + from_]
self._left = self._left - from_ + to_
#'#@profile
def get_sub_matrix(self, from_, to_, copy=True):
if from_ <= 0:
ret = self._matrix[:, self._left:self._left +to_]
elif to_ >= self._right - self._left:
ret = self._matrix[:, self._left + from_:self._right]
else:
ret = self._matrix[:, self._left + from_:self._left + to_]
if copy:
return ret.copy()
else:
return ret
def _increase_col_num(self, where = 0, min_num = None):
col_num = max(math.floor(self._matrix.shape[1] / 2), min_num * 2)
mat = np.zeros(shape=(self._matrix.shape[0], self._matrix.shape[1] + col_num), dtype=self._dtype, order='C')
if self._align == -1:
mat[:,:self._right] = self._matrix[:,:self._right]
elif self._align == 1:
new_left = mat.shape[1] -(self._right-self._left)
mat[:,new_left:] = self._matrix[:,self._left:]
self._right = mat.shape[1]
self._left = new_left
else:
new_left = math.floor((mat.shape[1] - (self._right-self._left))/2)
new_right = new_left + self._right - self._left
mat[:, new_left:new_right] = self._matrix[:,self._left:self._right]
self._right = new_right
self._left = new_left
self._matrix = mat
#'#@profile
def append(self, dtuple):
if self._right == self._matrix.shape[1]:
self._increase_col_num(1)
self._matrix[dtuple[1],self._right] = dtuple[0]
self._right +=1
#'#@profile
def prepend(self, dtuple):
if self._left == 0:
self._increase_col_num(-1)
self._left -=1
self._matrix[dtuple[1], self._left] = dtuple[0]
#'#@profile
def replace_matrix(self, from_, to_, matrix):
col_num = matrix.shape[1]
if from_ <= 0:
self._left += to_
if col_num > self._left:
self._increase_col_num(-1, col_num)
self._matrix[:, self._left - col_num:self._left] = matrix
self._left -= col_num
elif to_ >= self._right - self._left:
self._right -= to_ - from_
if col_num > self._matrix.shape[1] - self._right:
self._increase_col_num(1, col_num)
self._matrix[:, self._right:self._right + col_num] = matrix
self._right += col_num
else:
nnew = from_ - to_ + col_num
if self._align == 0:
t = self._left > self._matrix.shape[1] - self._right
if (t and nnew > 0) or (not t and nnew < 0):
if nnew > self._left:
self._increase_col_num(-1, nnew)
self._matrix[:, self._left - nnew:self._left + to_ - col_num] = self._matrix[:, self._left:self._left + from_]
self._matrix[:, self._left + to_ - col_num: self._left + to_] = matrix
self._left -= nnew
else:
if nnew > self._matrix.shape[1] - self._right:
self._increase_col_num(1, nnew)
self._matrix[:, self._left + from_ + col_num: self._right + nnew] = self._matrix[:, self._left +to_: self._right]
self._matrix[:, self._left + from_: self._left + from_ + col_num] = matrix
self._right += nnew
elif self._align == -1:
if nnew > self._matrix.shape[1] - self._right:
self._increase_col_num(1, nnew)
self._matrix[:, from_+ col_num : self._right + nnew] = self._matrix[:,to_: self._right]
self._matrix[:, from_: from_ + col_num] = matrix
self._right += nnew
elif self._align == 1:
if nnew > self._left:
self._increase_col_num(-1, nnew)
self._matrix[:, self._left - nnew:self._left + to_- col_num] = self._matrix[:, self._left:self._left + from_]
self._matrix[:, self._left + to_- col_num: self._left + to_] = matrix
self._left -= nnew
#'#@profile
def replace(self, from_, to_, data, indexes):
if from_ <= 0:
self._left += to_
if self._left == 0:
self._increase_col_num(-1, 1)
self._matrix[:, self._left - 1] = 0
self._matrix[indexes, self._left - 1] = data
self._left -= 1
elif to_ >= self._right - self._left:
self._right -= to_ - from_
if self._right == self._matrix.shape[1]:
self._increase_col_num(1, 1)
self._matrix[:, self._right] = 0
self._matrix[indexes, self._right] = data
self._right += 1
else:
nnew = from_ - to_ + 1
if self._align == 0:
t = self._left > self._matrix.shape[1] - self._right
if (t and nnew > 0) or (not t and nnew < 0):
if nnew > self._left:
self._increase_col_num(-1, nnew)
self._matrix[:, self._left - nnew:self._left + to_ - 1] = self._matrix[:, self._left:self._left + from_]
self._matrix[:, self._left + to_ - 1] = 0
self._matrix[indexes, self._left + to_ - 1] = data
self._left -= nnew
else:
if nnew > self._matrix.shape[1] - self._right:
self._increase_col_num(1, nnew)
self._matrix[:, self._left + from_ + 1: self._right + nnew] = self._matrix[:, self._left +to_: self._right]
self._matrix[:, self._left + from_] = 0
self._matrix[indexes, self._left + from_] = data
self._right += nnew
elif self._align == -1:
if nnew > self._matrix.shape[1] - self._right:
self._increase_col_num(1, nnew)
self._matrix[:, from_+ 1 : self._right + nnew] = self._matrix[:,to_: self._right]
self._matrix[:, from_] = 0
self._matrix[indexes, from_] = data
self._right += nnew
elif self._align == 1:
if nnew > self._left:
self._increase_col_num(-1, nnew)
self._matrix[:, self._left - nnew:self._left + to_- 1] = self._matrix[:, self._left:self._left + from_]
self._matrix[:, self._left + to_ - 1] = 0
self._matrix[indexes, self._left + to_- 1] = data
self._left -= nnew
#'#@profile
def get_matrix(self):
return self._matrix[:,self._left:self._right]
def get_raw_matrix(self):
return self._matrix, self._left, self._right
def get_vector(self, pos):
if self._left + pos >= self._right:
return None
else:
return self._matrix[:,self._left + pos]
| 9,513 | 43.251163 | 133 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_subproblem.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .generic_SCLP_solution import generic_SCLP_solution
from .parametric_line import parametric_line
from .prepare_subproblem_data import prepare_subproblem_basis
from .collision_info import collision_info
#'#@profile
def SCLP_subproblem(new_basis, v1,v2,Kset_0, Jset_N,
AAN1,AAN2, totalK, totalJ, DEPTH, STEPCOUNT, ITERATION, settings, tolerance):
# Excluding the k's and j's which are > 0
rates_LP_form, pbaseB1red, pbaseB2red = prepare_subproblem_basis(new_basis, Kset_0, Jset_N, v1, v2, AAN1, AAN2)
# The starting solution
solution = generic_SCLP_solution(rates_LP_form, settings, totalK=totalK, totalJ=totalJ)
# prepare the boundaries
param_line = parametric_line.get_subproblem_parametric_line(new_basis, solution, v1, v2, AAN1, AAN2, pbaseB1red,
pbaseB2red)
# performing the left and right first pivots
# the right pivot:
K_0 = []
J_N = []
if pbaseB2red is not None:
if not isinstance(v1, list):
if v1 > 0:
K_0 = [v1]
else:
J_N = [-v1]
if not isinstance(v2, list):
if v2 < 0:
J_N.append(-v2)
from .SCLP_pivot import SCLP_pivot
col_info = collision_info('', 0, 0,1,[],v1)
solution, STEPCOUNT, ITERATION, pivot_problem = SCLP_pivot(np.asarray(K_0, dtype=np.int32),np.asarray(J_N, dtype=np.int32),
solution,col_info, DEPTH, STEPCOUNT, ITERATION, settings, tolerance)
if pivot_problem['result'] == 1:
print('Problem during right pivot...')
return solution, STEPCOUNT, ITERATION, pivot_problem
# the left pivot:
K_0 = []
J_N = []
if pbaseB1red is not None:
if not isinstance(v2, list):
if v2 > 0:
K_0 = [v2]
else:
J_N = [-v2]
if not isinstance(v1, list):
if v1 > 0:
K_0.append(v1)
from .SCLP_pivot import SCLP_pivot
col_info = collision_info('', 0, -1,0,v2,[])
solution, STEPCOUNT, ITERATION, pivot_problem = SCLP_pivot(np.asarray(K_0, dtype=np.int32),np.asarray(J_N, dtype=np.int32),
solution,col_info, DEPTH, STEPCOUNT, ITERATION, settings, tolerance)
if pivot_problem['result'] == 1:
print('Problem during left pivot...')
return solution, STEPCOUNT, ITERATION, pivot_problem
#############################################
# solving the subproblem
from .SCLP_solver import SCLP_solver
solution, STEPCOUNT, pivot_problem = SCLP_solver(solution, param_line, 'sub_prob', DEPTH, STEPCOUNT, ITERATION, settings, tolerance)
#############################################
return solution, STEPCOUNT, ITERATION, pivot_problem
| 3,540 | 43.2625 | 136 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_base_sequence.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .get_new_dict import get_new_dict
from .lp_tools.pivot import pivot_mn
class SCLP_base_sequence():
def __init__(self, basis):
self._bases = [basis]
self._num_bases = 1
@property
def bases(self):
return self._bases
@property
def num_bases(self):
return self._num_bases
def insert_basis(self, basis, place):
if self._bases[place] is None:
self._bases[place] = basis
self._num_bases += 1
def remove_bases(self, N1, N2, pivots, bases_mm, Nadd = 0):
rem_bases = [self._bases.pop(i) for i in range(N2-1, N1, -1)]
rem_places = [N2-i-1 for i,v in enumerate(rem_bases) if v is not None]
rem_bases = [v for v in rem_bases if v is not None]
bases_mm.add(rem_bases)
if len(rem_bases) == self._num_bases:
last_place = rem_places[-1]
first_place = rem_places[0]
if N1 >= 0:
if N2 <= len(pivots):
if N2 - last_place < first_place - N1:
self._bases[N1+1] = get_new_dict(rem_bases[-1], last_place, N2, pivots)
else:
self._bases[N1] = get_new_dict(rem_bases[0], first_place, N1, pivots)
else:
self._bases[N1] = get_new_dict(rem_bases[0], first_place, N1, pivots)
else:
self._bases[N1 + 1] = get_new_dict(rem_bases[-1], last_place, N2, pivots)
self._num_bases = 1
else:
self._num_bases -= len(rem_bases)
#this for a case when called from solution.rewind
if Nadd > 0:
self._bases[N1+1:N1+1] = [None] * Nadd
def replace_bases(self, N1, N2, Nnew, AAN1, AAN2, bases_mm):
rem_bases = [self._bases.pop(i) for i in range(N2-1, N1, -1) if self._bases[i] is not None]
bases_mm.add(rem_bases)
Nadd = Nnew - (N2 - N1 - 1 - len(rem_bases))
if Nadd > 0:
self._bases[N1+1:N1+1] = [None] * Nadd
else:
del self._bases[N1+1:N1+1-Nadd]
if len(rem_bases) == self._num_bases:
if AAN1 is not None:
self._bases[N1] = AAN1
else:
self._bases[N1 + Nnew + 1] = AAN2
self._num_bases = 1
else:
self._num_bases -= len(rem_bases)
def get_basis_at(self, place, pivots):
return self.get_nearby_basis(place, place, pivots)
def get_next_basis(self, basis, place, pivots, preserve = True):
exist_basis = self._bases[place+1]
if not preserve and exist_basis is not None:
return exist_basis
else:
return pivot_mn(basis, pivots[place][0], pivots[place][1])
def get_nearby_basis_at0(self):
return next((i,b) for i, b in enumerate(self._bases) if b is not None)
def get_nearby_basis_atN(self):
return next((-i,b) for i, b in enumerate(reversed(self._bases)) if b is not None)
def get_nearby_basis(self, N1, N2, pivots):
basis_N1 = self._bases[N1]
if basis_N1 is not None:
return basis_N1, N1
elif self._bases[N2] is not None:
return self._bases[N2], N2
else:
res1 = next(((i,b) for i, b in enumerate(self._bases[N2+1:]) if b is not None), (2*len(self._bases), None))
res2 = next(((i, b) for i, b in enumerate(reversed(self._bases[:N2])) if b is not None), (2*len(self._bases), None))
if res1[0] <= res2[0]:
#print('New1:', N2 + res1[0] + 1)
old_place = N2 + res1[0] + 1
if N1<old_place<N2:
self._bases[old_place] = None
self._num_bases -= 1
return get_new_dict(res1[1], old_place, N2, pivots, False), N2
else:
return get_new_dict(res1[1], old_place, N2, pivots), N2
else:
#print('New2:', N2 - res2[0] - 1)
old_place = N2 - res2[0] - 1
if N1 < old_place < N2:
self._bases[old_place] = None
self._num_bases -= 1
return get_new_dict(res2[1], old_place, N2, pivots, False), N2
else:
return get_new_dict(res2[1], old_place, N2, pivots), N2
# rewrite
def clear_base_sequense(self, numBasesToRemove, maxBases, NN):
pass
def keep_only_one(self):
n, b = self.get_nearby_basis_atN()
self._bases = [None] * len(self._bases)
self._bases[n-1] = b
self._num_bases = 1
def check_places(self, places):
if len(places) != self._num_bases:
raise Exception('Num bases')
else:
for p in places:
if self._bases[p] is None:
raise Exception('Not same')
def check_bases_num(self):
bases_num = sum([1 for basis in self._bases if basis is not None])
if self._num_bases != bases_num:
print('here')
| 5,615 | 37.731034 | 128 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/utils.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
def relative_to_project(file_path):
if os.path.isabs(file_path):
return file_path
else:
proj = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
return os.path.join(proj, file_path)
| 833 | 35.26087 | 96 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/solution_state.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class solution_state():
__slots__ = ['dx','dq', 'x','q','del_x','del_q','_tau','dtau','_max_tau_size','_tau_size']
def __init__(self, max_tau_size, JJ, KK):
self._max_tau_size = max_tau_size
self._tau = np.zeros(self._max_tau_size, dtype=np.double, order='C')
#TODO: further improvement can be provided if we reuse states between iterations
self.x = np.zeros((KK, self._max_tau_size+1), dtype=np.float64, order='C')
self.del_x = np.zeros((KK, self._max_tau_size+1), dtype=np.float64, order='C')
self.q = np.zeros((JJ, self._max_tau_size+1), dtype=np.float64, order='C')
self.del_q = np.zeros((JJ, self._max_tau_size+1), dtype=np.float64, order='C')
self._tau_size = 0
self.dtau = None
def update_tau(self, col_info, initT = 0, backward = False):
if self.dtau is None:
self._tau_size = 1
self._tau[0] = initT
else:
if backward:
#TODO understand when we need this and complete
self._tau[:self._tau_size] -= self.dtau * col_info.delta
else:
self._tau[:self._tau_size] += self.dtau * col_info.delta
if col_info.Nnew != 0:
if col_info.Nnew + self._tau_size > self._max_tau_size:
self._enlarge((col_info.Nnew + self._tau_size)*2)
new_size = self._tau_size+col_info.Nnew
self._tau[col_info.N2 + col_info.Nnew:new_size] = self._tau[col_info.N2:self._tau_size]
self._tau_size = new_size
if col_info.Nnew > 0:
self._tau[col_info.N2:col_info.N2 + col_info.Nnew] = 0.
def _enlarge(self, size):
self._max_tau_size = size
tau = np.zeros(self._max_tau_size, dtype=np.double, order='C')
tau[:self._tau_size] = self._tau[:self._tau_size]
self._tau = tau
self.reserve_memory_for_states()
@property
def tau(self):
return self._tau[:self._tau_size]
@tau.setter
def tau(self, value):
self._tau_size = value.shape[0]
if self._tau_size > self._max_tau_size:
self._max_tau_size = self._tau_size * 2
self._tau = np.zeros(self._max_tau_size, dtype=np.double, order='C')
self.reserve_memory_for_states()
self._tau[:self._tau_size] = value
def reserve_memory_for_states(self):
self.x = np.zeros((self.x.shape[0], self._max_tau_size + 1), dtype=np.float64, order='C')
self.del_x = np.zeros((self.del_x.shape[0], self._max_tau_size + 1), dtype=np.float64, order='C')
self.q = np.zeros((self.q.shape[0], self._max_tau_size + 1), dtype=np.float64, order='C')
self.del_q = np.zeros((self.del_q.shape[0], self._max_tau_size + 1), dtype=np.float64, order='C')
| 3,443 | 44.92 | 107 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/bases_memory_manager.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
class bases_memory_manager():
def __init__(self, pl, dl, max_bases = 3):
self.max_bases = max_bases
self._bases = []
self.ps = np.zeros(pl, dtype=np.int32, order='C')
self.ds = np.zeros(dl, dtype=np.int32, order='C')
def add(self, bases):
for i in range(min(len(bases), self.max_bases - len(self._bases))):
self._bases.append(bases[i])
def pop(self):
if len(self._bases) > 0:
return self._bases.pop(0)
else:
return None
| 1,123 | 32.058824 | 75 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/get_new_dict.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .lp_tools.pivot import pivot_mn
from .lp_tools.LP_formulation import LP_formulation
def get_new_dict(dct, oldPlace, newPlace, pivots, preserve = True):
if isinstance(oldPlace, list):
oldPlace = oldPlace[0]
dct = dct[0]
if preserve:
tmp_dict = LP_formulation(np.empty_like(dct.simplex_dict), None, None)
else:
tmp_dict = None
if oldPlace < newPlace:
for i in range(oldPlace,newPlace):
dct = pivot_mn(dct, pivots[i][0], pivots[i][1], tmp_dict)
tmp_dict= None
elif newPlace < oldPlace:
for i in range(oldPlace-1, newPlace-1, -1):
dct = pivot_mn(dct, pivots[i][1], pivots[i][0], tmp_dict)
tmp_dict = None
return dct
| 1,333 | 36.055556 | 78 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_solution.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .generic_SCLP_solution import generic_SCLP_solution
from .calc_objective import calc_objective
from .calc_controls import calc_controls
from .solution_state import solution_state
from .lp_tools.LP_formulation import solve_LP_in_place
from .parametric_line import parametric_line
import itertools
class SCLP_solution(generic_SCLP_solution):
plot_width = 800
plot_height = 400
def __init__(self, formulation, x_0, q_N, tolerance, solver_settings):
LP_form, ps, ds = formulation.formulate_ratesLP(x_0, q_N)
LP_form, err = solve_LP_in_place(LP_form, ps, ds, tolerance)
if err['result'] != 0:
raise Exception(err['message'])
super().__init__(LP_form, solver_settings, formulation.K + formulation.L, formulation.J + formulation.I)
self._formulation = formulation
self._final_T = 0
self.last_T = 0
self.max_valid_T = 0
self._is_final = False
self._u, self._p, self._t, self._obj, self._err= None, None, None, None, None
if solver_settings.collect_plot_data:
self.plot_data = []
else:
self.plot_data = None
@property
def formulation(self):
return self._formulation
def __getstate__(self):
return self._problem_dims, self._pivots, self._base_sequence, self._dx, self._dq, self._last_collision, self._col_info_stack, self._klist, self._jlist, self._formulation
def __setstate__(self, state):
self._problem_dims, self._pivots, self._base_sequence, self._dx, self._dq, self._last_collision, self._col_info_stack, self._klist, self._jlist, self._formulation = state
self.plot_data = None
self.tmp_matrix = np.zeros_like(self._base_sequence.bases[0]['A'])
self._state = solution_state()
def update_state(self, param_line, check_state=False, tolerance=0, up_rewind=False):
res = super().update_state(param_line, check_state, tolerance, up_rewind)
if res and self.plot_data is not None:
self.plot_data.append({'T': param_line.T, 'tau': self._state.tau, 'dtau': self._state.dtau})
return res
def get_final_solution(self, preserve = True):
if not self._is_final:
self._extract_final_solution(preserve)
return self._t, self._state.x, self._state.q, self._u, self._p, self.pivots, self._obj, self._err, self.NN, self._state.tau, self.max_valid_T
def _extract_final_solution(self, preserve = True):
self._u, self._p = calc_controls(self, self._problem_dims.JJ, self._problem_dims.KK, preserve)
self._t = np.cumsum(np.hstack((0, self._state.tau)))
self._final_T = self._t[-1]
self._obj, self._err = calc_objective(self._formulation, self._u, self._state.x, self._p, self._state.q, self._state.tau)
self._is_final = True
def check_final_solution(self, tolerance):
is_ok = True
if np.any(self._state.tau < -tolerance):
n = np.argmin(self._state.tau)
print('Negative tau!', n, self._state.tau[n])
is_ok = False
if np.any(self._state.x < -tolerance*10):
n,i = np.unravel_index(np.argmin(self._state.x),self._state.x.shape)
print('Negative primal state!',n,i, self._state.x[n,i])
is_ok = False
if np.any(self._state.q < -tolerance*10):
print('Negative dual state!')
is_ok = False
return is_ok
def is_other_feasible(self, other_sol, tolerance=1E-11):
t,x,q,u,p,pivots,obj,err,NN,tau, maxT = other_sol.get_final_solution()
# now we calculate important values at all points of the time partition, i.e. for t=t_0,...,t_N
slack_u = np.vstack(self._formulation.b) - np.dot(self._formulation.H, u[:self._formulation.J, :]) # b - Hu(t)
int_u = np.cumsum(u[:self._formulation.J,:]*tau, axis=1) # \int_0^t u(s) ds
slack_dx = np.outer(self._formulation.a, np.cumsum(tau)) - np.dot(self._formulation.G, int_u) # at - G\int_0^t u(s) ds
if self._formulation.L > 0:
# this for the case when F \ne \emptyset (not our case)
slack_x0 = np.vstack(self._formulation.alpha) - np.dot(self._formulation.F, x[self._formulation.K:, 0])
real_dx = np.dot(self._formulation.F, np.cumsum(other_sol.state.dx[self._formulation.K:, :] * tau, axis=1))
slack_dx = slack_dx - real_dx
else:
slack_x0 = np.vstack(self._formulation.alpha) # x^0 = \alpha (our case)
slack_x = slack_dx + slack_x0 # x(t) = \alpha + at - G\int_0^t u(s) (our case)
return np.all(slack_x >= -tolerance*10) and np.all(slack_u >= -tolerance*10) and np.all(slack_x0 >= -tolerance*10) # changed '>' to '>=' probably this was a problem
def other_objective(self, other_sol):
t,x,q,u,p,pivots,obj,err,NN,tau, maxT = other_sol.get_final_solution()
TT = t[NN]
part1 = np.dot(np.dot(self._formulation.gamma, u[:self._formulation.J, :]), tau)
ddtau = tau * (t[:-1] + t[1:]) / 2
part2 = np.dot(np.dot(self._formulation.c, u[:self._formulation.J, :]), tau * TT - ddtau)
if self._formulation.L == 0:
part3 = 0
else:
part3 = np.dot(np.dot(self._formulation.d, ((x[self._formulation.K:, :-1] + x[self._formulation.K:, 1:]) / 2)), tau)
return part1 + part2 + part3
def truncate_at(self, t0):
self.t = np.cumsum(np.hstack((0, self._state.tau)))
self._final_T = self.t[-1]
if t0 < self._final_T:
#TODO: check last_breakpoint
last_breakpoint = np.where(self.t<=t0)[0][-1]
delta_t = t0 - self.t[last_breakpoint]
self._base_sequence.remove_bases(-1, last_breakpoint, self._pivots, self.bases_mm)
self._pivots.remove_pivots(-1, last_breakpoint)
self._state.tau=self._state.tau[last_breakpoint:]
self._state.dtau = self._state.dtau[last_breakpoint:]
self._state.del_x = self._state.del_x[:, last_breakpoint:]
self._state.del_q = self._state.del_q[:, last_breakpoint:]
self._state.x = self._state.x[:,last_breakpoint:]
self._state.q = self._state.q[:, last_breakpoint:]
self._dx.remove(0, last_breakpoint)
self._dq.remove(0, last_breakpoint)
self._state.dx = self._dx.get_matrix()
self._state.dq = self._dq.get_matrix()
self.loc_min_storage.update_caseI(-1, last_breakpoint, self._state.dx, self._state.dq)
self._state.x[:, 0] += self._state.dx[:, 0] * delta_t
self._state.x[self._state.dx[:, 0]==0, 0] = 0
self._state.q[:, 0] += self._state.dq[:, 0] * delta_t
def recalculate(self, param_line, t0, new_T, new_x0, settings, tolerance, mm = None):
if t0 >= self._final_T:
print('!!!')
if new_T <= t0:
print('!!!')
self._is_final = False
if new_x0 is not None:
if new_T is None:
new_T = param_line.T
from .SCLP_x0_solver import SCLP_x0_solver
STEPCOUNT, pivot_problem = SCLP_x0_solver(self, param_line, new_x0, new_T - t0, 0, 0, dict(),
settings, tolerance, settings.find_alt_line, mm)
is_ok = self.update_state(param_line, check_state=settings.check_final_solution,
tolerance=tolerance * 10)
return STEPCOUNT, pivot_problem
else:
self.truncate_at(t0)
if new_T is not None:
from .SCLP_solver import SCLP_solver
param_line = parametric_line(np.ascontiguousarray(self._state.x[:, 0]), param_line.q_N, new_T-param_line.T, param_line.T-t0, del_T =1)
param_line.build_boundary_sets(self.klist, self.jlist)
self._state.reserve_memory_for_states()
solution, STEPCOUNT, pivot_problem = SCLP_solver(self, param_line, 'update', 0, 0, dict(), settings, tolerance, settings.find_alt_line, mm)
is_ok = solution.update_state(param_line, check_state=settings.check_final_solution,
tolerance=tolerance * 10)
return STEPCOUNT, pivot_problem
else:
return 0, {'result': 0}
def plot_history(self, plt):
if self.plot_data is not None:
if self._final_T == 0:
last_T = sum(self._state.tau)
else:
last_T = self._final_T
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.plot([0,last_T], [last_T,0])
prev_T = 0
prev_dtau = self.plot_data[0]['dtau']
xstarts = [0]
prev_tau = self.plot_data[0]['tau']
yticks = []
for r, dt_entry in enumerate(self.plot_data[1:]):
y1 = last_T - prev_T
y2 = last_T - dt_entry['T']
ax1.plot([0, xstarts[-1]],[y1,y1], color='k')
xends = np.cumsum(prev_tau + prev_tau * prev_dtau * (y1-y2))
print(r)
for i in range(len(xstarts)):
print(xstarts[i], y1, xends[i], y2)
ax1.plot([xstarts[i],xends[i]], [y1, y2], color='k')
prev_T = dt_entry['T']
xstarts = np.cumsum(dt_entry['tau'])
prev_dtau = dt_entry['dtau']
prev_tau = dt_entry['tau']
yticks.append(y1)
# set the x-spine (see below for more info on `set_position`)
ax1.spines['left'].set_position('zero')
# turn off the right spine/ticks
ax1.spines['right'].set_color('none')
ax1.yaxis.tick_left()
# set the y-spine
ax1.spines['bottom'].set_position('zero')
# turn off the top spine/ticks
ax1.spines['top'].set_color('none')
ax1.xaxis.tick_bottom()
ax1.set_xticks(xends)
ax1.set_yticks(yticks)
ax1.set_yticklabels(list(range(len(self.plot_data))))
plt.setp(ax1.get_xticklabels(), rotation=30)
return plt
return None
def show_buffer_status(self):
from bokeh.io import output_file, show
from bokeh.plotting import figure
from bokeh.palettes import Dark2_5 as line_palette
# Plots of buffers status: piecewise linear graphs where:
# t = [0,t1,...,Tres] vector containing time partition
# X = (12,len(t)) matrix representing quantities at each of 12 buffers at each timepoint
self.get_final_solution()
number_of_buffers = self.formulation.K
output_file("buffer_status.html")
plot_line = figure(plot_width=self.plot_width, plot_height=self.plot_height)
# create a color iterator
colors = itertools.cycle(line_palette)
# add a line renderer
for i, color in zip(range(number_of_buffers), colors):
plot_line.line(self.t, self._state.x[i], line_width=2, line_color=color)
show(plot_line)
return None
def show_server_utilization(self):
import pandas as pd
from bokeh.core.property.dataspec import value
from bokeh.io import output_file, show
from bokeh.layouts import gridplot
from bokeh.plotting import figure
from bokeh.palettes import Category20
# Plot of time_slots utilization: 4 barcharts where each bar can contain up to 12 colors. Colors are according to kind of tasks running on server
# we have 12 kinds of tasks (number of columns in H) and 4 time_slots (number of rows in H)
# if specific task (j) can run on the specific server (k) then we have H[k,j] > 0
# otherwise H[k,j] == 0 and we cannot run specific task on specific server
# U is a (16,len(t)-1) matrix where we interesting only on first (12,len(t)-1) part
# U[j,n] * H[k,j] indicate how many capacity of server k took task j at time period t[n]...t[n+1]
# we need for each server k create barchart where width of bar is length of time period
# and total height is sum(U[n,j] * H[k,j]) for all j this height splitted by different colors according to j (up to 12)
#self.extract_final_solution()
number_of_buffers = self.formulation.K
number_of_servers = self.formulation.I
time_horizon = 150
number_of_time_slots = len(self.t) - 1
output_file('server_utilization.html')
tasks = ['task ' + str(i) for i in range(1, len(self.formulation.H[0]) + 1)]
new_legend_tasks = {}
new_t = np.zeros(2 * number_of_time_slots)
new_t[0] = self.t[1] / 2
new_t[1:-1] = np.repeat(self.t[1:-1], 2)
new_t[-1] = self.t[-1]
data = {'t': new_t}
new_matrix = np.zeros((number_of_buffers, 2 * number_of_time_slots))
p = {}
network_graph_tasks_indices = []
network_graph_server_indices = []
network_graph_tasks_server_hash = {}
max_y_value = 1
for k in range(number_of_servers): # servers
for j in range(number_of_buffers): # tasks
for ti in range(0, number_of_time_slots): # time slices
new_matrix[j, 2 * ti] = self._u[j, ti] * self.formulation.H[k, j]
new_matrix[j, 2 * ti + 1] = self._u[j, ti] * self.formulation.H[k, j]
if self.formulation.H[k, j] > 0:
new_legend_tasks[j] = 'task ' + str(j + 1)
network_graph_tasks_indices.append(j + 1)
network_graph_server_indices.append(len(tasks) + k + 1)
network_graph_tasks_server_hash[j + 1] = self.formulation.H[k, j]
data['task ' + str(j + 1)] = new_matrix[j].tolist()
df = pd.DataFrame(data)
p[k] = figure(x_range=(0, time_horizon * 1.2), y_range=(0, max_y_value), plot_width=self.plot_width,
plot_height=self.plot_height, title='Server ' + str(k) + ' Utilization')
p[k].varea_stack(stackers=tasks, x='t', color=Category20[number_of_buffers],
legend=[value(x) for x in tasks], source=df)
# reverse the legend entries to match the stacked order
for j in reversed(range(number_of_buffers)):
if self.formulation.H[k, j] == 0:
del p[k].legend[0].items[j]
p[k].legend[0].items.reverse()
grid = gridplot([[p[0], p[1]], [p[2], p[3]]])
show(grid)
return None
| 15,467 | 47.3375 | 178 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/calc_controls.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .matlab_utils import find
from .sparse_matrix_constructor import sparse_matrix_constructor
#'#@profile
def calc_controls(solution, JJ, KK, preserve = True):
new_dict = solution.get_basis_at(0)
u = sparse_matrix_constructor(None, None, JJ)
p = sparse_matrix_constructor(None, None, KK)
if preserve:
new_dict = new_dict.copy()
for place in range(solution.NN):
klist2 = find(new_dict.dual_name > 0)
jlist1 = find(new_dict.prim_name < 0)
kn2 = new_dict.dual_name[klist2]
jn1 = -new_dict.prim_name[jlist1]
u.append(sparse_matrix_constructor(new_dict.simplex_dict[jlist1+1,0].copy(), jn1-1, JJ))
p.append(sparse_matrix_constructor(new_dict.simplex_dict[0,klist2+1].copy(), kn2-1, KK))
if place < solution.NN - 1:
new_dict = solution.get_next_basis_for_solution(new_dict, place, preserve)
return u.get_matrix(), p.get_matrix()
| 1,505 | 39.702703 | 96 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/prepare_subproblem_data.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .matlab_utils import find
from .lp_tools.LP_formulation import LP_formulation, get_dx_names, get_dq_names
#'#@profile
def prepare_subproblem_basis(basis, Kset_0, Jset_N, v1, v2, AAN1, AAN2):
pbaseB1red = None
pbaseB2red = None
if AAN1 is None:
Kex1 = np.intersect1d(get_dx_names(basis), Kset_0, assume_unique=True)
Kexclude = np.intersect1d(Kex1, get_dx_names(AAN2), assume_unique=True)
Jexclude = -np.intersect1d(get_dq_names(basis), get_dq_names(AAN2), assume_unique=True)
elif AAN2 is None:
Kexclude = np.intersect1d(get_dx_names(basis), get_dx_names(AAN1), assume_unique=True)
Jex1 = np.intersect1d(get_dq_names(basis), np.asarray([-v for v in Jset_N]), assume_unique=True)
Jexclude = -np.intersect1d(Jex1, get_dq_names(AAN1), assume_unique=True)
else:
Kexclude = np.intersect1d(get_dx_names(AAN1), get_dx_names(AAN2), assume_unique=True)
Jexclude = -np.intersect1d(get_dq_names(AAN1), get_dq_names(AAN2), assume_unique=True)
if not isinstance(v1, list):
Kexclude = Kexclude[Kexclude != v1]
Jexclude = Jexclude[Jexclude != -v1]
if not isinstance(v2, list):
Kexclude = Kexclude[Kexclude != v2]
Jexclude = Jexclude[Jexclude != -v2]
if AAN1 is not None:
pbaseB1red = AAN1.prim_name[np.logical_not(np.in1d(AAN1.prim_name, Kexclude, assume_unique=True))]
if AAN2 is not None:
pbaseB2red = AAN2.prim_name[np.logical_not(np.in1d(AAN2.prim_name, Kexclude, assume_unique=True))]
lKDDin = np.logical_not(np.in1d(basis.prim_name, Kexclude, assume_unique=True))
lJDDin = np.logical_not(np.in1d(basis.dual_name, -Jexclude, assume_unique=True))
DDred = np.ascontiguousarray(basis.simplex_dict[find(np.hstack(([True], lKDDin)))[:, None], find(np.hstack(([True], lJDDin)))])
return LP_formulation(DDred, np.ascontiguousarray(basis.prim_name[lKDDin]), np.ascontiguousarray(basis.dual_name[lJDDin])), pbaseB1red, pbaseB2red
| 2,604 | 48.150943 | 150 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/SCLP_pivot.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from .collision_info import collision_info
from .generic_SCLP_solution import generic_SCLP_solution
from .lp_tools.LP_formulation import solve_ratesLP, get_value_by_name, get_dx_names, get_dq_names, solve_simple_caseII
from .SCLP_subproblem import SCLP_subproblem
from .pivot_storage import pivot_storage
#from ..SCLP import SCLP_settings
#'#@profile
def SCLP_pivot(Kset_0:np.ndarray, Jset_N:np.ndarray, solution:generic_SCLP_solution,
col_info:collision_info, DEPTH:int, STEPCOUNT:int, ITERATION:dict, settings:dict, tolerance:float):
"""Perform an SCLP pivot operation on the basis in the simplex-like algorithm.
Parameters
----------
Kset_0: np.ndarray
Array of K indexes of {k: x^0_k > 0} where x^0 is at t=0.
Jset_N: np.ndarray
Array of J indexes of {j: q^N_j > 0} where q^N is at t=T.
solution: generic_SCLP_solution
The previous solution.
col_info: collision_info
Description of the collision that occurred.
DEPTH: int
Depth within the recursion starting at 0.
STEPCOUNT: int
Counter of how many steps the algorithm has taken overall.
ITERATION: dict
Counter of how many steps the algorithm has taken for each specific problem or subproblem.
settings: SCLP_settings
Solver settings.
tolerance: float
The numerical tolerance for floating point comparisons.
Returns
-------
generic_SCLP_solution, int, dict, dict
New solution, STEPCOUNT, ITERATION, pivot_problem
"""
pivot_problem = {'result': 0}
v1 = col_info.v1
v2 = col_info.v2
if col_info.N1 == -1:
AAN1 = None
AAN2 = solution.get_basis_at(col_info.N2)
Jset = get_dq_names(AAN2)
Kset = Kset_0
if not isinstance(v1, list):
Jset = Jset[Jset!= v1]
if v1 > 0:
Kset = Kset_0.copy()
Kset = np.append(Kset, v1).astype(np.int32)
else:
print('v1',v1)
new_basis, lp_pivots, err = solve_ratesLP(AAN2, Kset, Jset, solution.bases_mm, tolerance)
pp21 = lp_pivots.in_
pp22 = lp_pivots.out_
if len(pp21) == 0 and len(pp22) == 0:
print('Basis B2 is optimal')
pivot_problem['result'] = 1
return solution, STEPCOUNT, ITERATION, pivot_problem
piv1 = pivot_storage(list(pp21), list(pp22))
elif col_info.N2 == solution.NN:
AAN1 = solution.get_basis_at(col_info.N1)
AAN2 = None
Kset = get_dx_names(AAN1)
Jset = -Jset_N.copy()
if not isinstance(v2, list):
Kset = Kset[Kset!=v2]
if v2 < 0:
Jset = np.append(Jset, v2).astype(np.int32)
else:
print('v2', v2)
new_basis, lp_pivots, err = solve_ratesLP(AAN1, Kset, Jset, solution.bases_mm, tolerance)
pp11 = lp_pivots.out_
pp12 = lp_pivots.in_
if len(pp11) == 0 and len(pp12) == 0:
pivot_problem['result'] = 1
print('Basis B1 is optimal')
return solution, STEPCOUNT, ITERATION, pivot_problem
piv1 = pivot_storage(list(pp11), list(pp12))
else:
AAN1, AAN2 = solution.get_bases(col_info.N1, col_info.N2)
#this for the collusion case III
if isinstance(v1, list) or isinstance(v2, list):
#vv = get_pivot(AAN1, AAN2, True)
vv = solution.pivots.outpivots[col_info.N1]
out_diff = {vv}
if isinstance(v2, list):
v2 = vv
else:
v1 = vv
else:
out_diff = {v1, v2}
Kset = get_dx_names(AAN1)
Kset = Kset[Kset != v2]
Jset = get_dq_names(AAN2)
Jset = Jset[Jset != v1]
in_diff = solution.pivots.get_in_difference(col_info.N1,col_info.N2)
if col_info.N2 - col_info.N1 == 2:
in_diff_list = list(in_diff)
ok, new_basis, lp_pivots, err = solve_simple_caseII(AAN2, Kset, Jset, solution.bases_mm, v1, in_diff_list)
if ok == 1:
in_diff_list.remove(lp_pivots[1])
piv1 = pivot_storage([v2, v1], in_diff_list + [lp_pivots[1]])
solution.update_from_basis(col_info, piv1, AAN1, AAN2, new_basis)
return solution, STEPCOUNT, ITERATION, pivot_problem
else:
# TBD getting a new basis, in_out_pivot, error
# need to work on pivots
new_basis, lp_pivots, err = solve_ratesLP(AAN2, Kset, Jset, solution.bases_mm, tolerance)
# new_basis, lp_pivots, err = solve_ratesLP(AAN2, Kset, Jset, solution.bases_mm, tolerance)
# if col_info.N2 - col_info.N1 == 2:
# in_diff_list = list(in_diff)
# ok, prim_vars, dual_vars, i, j = partial_solve_caseII(AAN2, Kset, Jset, solution.bases_mm, v1, in_diff_list)
# if ok == 0:
# new_basis, lp_pivots, err = solve_ratesLP(AAN2, Kset, Jset, solution.bases_mm, tolerance, build_sign=False)
# else:
# prim_name, dual_name = AAN2.prim_name.copy(), AAN2.dual_name.copy()
# in_diff_list.remove(prim_name[i])
# piv1 = pivot_storage([v2, v1], in_diff_list + [prim_name[i]])
# tmp = dual_name[j]
# dual_name[j] = prim_name[i]
# prim_name[i] = tmp
# solution.update_from_partial(col_info, piv1, AAN1, AAN2, prim_vars, dual_vars, prim_name, dual_name)
# return solution, STEPCOUNT, ITERATION, pivot_problem
# else:
# new_basis, lp_pivots, err = solve_ratesLP(AAN2, Kset, Jset, solution.bases_mm, tolerance)
#new_basis, lp_pivots, err = solve_ratesLP(AAN2, Kset, Jset, solution.bases_mm, tolerance)
# Pivots in and out of the new basis from the previous basis
#
# pp1[12] indexes that were part of the pivot during this step from left
# pp2[12] indexes that were part of the pivot during this step from right
# If the lengths are 1, then they are neighbours
#
# Note: values are in new_basis and AAN2(old) (class LP_form)
# simplex dictionary with first row, first column, also names of variables
pp21 = lp_pivots.in_.copy() # pp21 = lp_pivots.in_.copy() - new_basis.prim_zvars
pp22 = lp_pivots.out_.copy()
#instead of solution.pivots.get_out_difference
# if out_diff != solution.pivots.get_out_difference(col_info.N1, col_info.N2):
# print('aaa')
lp_pivots.extr(out_diff, set(in_diff))
pp11 = lp_pivots.in_ # pp11 = lp_pivots.in_ - new_basis.prim_zvars
pp12 = lp_pivots.out_
if len(pp11) == 0 and len(pp12) == 0:
pivot_problem['result'] = 1
print('Basis B1 is optimal')
return solution, STEPCOUNT, ITERATION, pivot_problem
elif len(pp21) == 0 and len(pp22) == 0:
print('Basis B2 is optimal')
pivot_problem['result'] = 1
return solution, STEPCOUNT, ITERATION, pivot_problem
piv1 = pivot_storage(list(pp11) + list(pp21), list(pp12) + list(pp22))
objective = new_basis.simplex_dict[0, 0]
if objective == np.inf or objective == -np.inf:
pivot_problem['result'] = 1
if col_info.N1 == -1:
print('*** beyond this primal problem is unbounded, dual is infeasible')
cases = 'unbound_'
elif col_info.N2 == solution.NN:
print('*** beyond this primal problem is infeasible, dual is unbounded')
cases = 'infeas__'
else:
print('*** infeasibility in middle of base sequence')
return solution, STEPCOUNT, ITERATION, pivot_problem
i1 = 1
i2 = 1
if col_info.N1 >= 0:
i1 = len(pp11)
# check that positive dq not jumping to 0
if i1 == 1:
v = pp11.pop()
if v < 0:
if get_value_by_name(new_basis, v, False) > 0:
print('Positive dq jumping to 0!')
pivot_problem['result'] = 1
return solution, STEPCOUNT, ITERATION, pivot_problem
if col_info.N2 < solution.NN:
i2 = len(pp21)
# check that positive dx not jumping to 0
if i2 == 1:
v = pp21.pop()
if v > 0:
if get_value_by_name(new_basis, v, True) > 0:
print('Positive dx jumping to 0!')
pivot_problem['result'] = 1
return solution, STEPCOUNT, ITERATION, pivot_problem
if i1 == 1 and i2 == 1:
solution.update_from_basis(col_info, piv1, AAN1, AAN2, new_basis)
return solution, STEPCOUNT, ITERATION, pivot_problem
else:
sub_solution, STEPCOUNT, ITERATION, pivot_problem =\
SCLP_subproblem(new_basis, v1, v2, Kset_0, Jset_N, AAN1, AAN2, solution.totalK, solution.totalJ,
DEPTH+1, STEPCOUNT, ITERATION, settings, tolerance)
if pivot_problem['result'] == 0:
solution.update_from_subproblem(col_info, sub_solution.pivots, AAN1, AAN2)
return solution, STEPCOUNT, ITERATION, pivot_problem
| 9,779 | 43.253394 | 125 |
py
|
SCLPsolver
|
SCLPsolver-master/SCLPsolver/subroutines/calc_objective.py
|
# Copyright 2020 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
def calc_objective(formulation, u, x, p, q, tau):
NN = len(tau)
t = np.cumsum(np.hstack((0., tau)))
TT = t[NN]
part1 = np.dot(np.dot(formulation.gamma, u[:formulation.J,:]), tau)
ddtau = tau*(t[:-1] + t[1:])/2
part2 = np.dot(np.dot(formulation.c, u[:formulation.J,:]), tau * TT - ddtau)
if formulation.L == 0:
part3 = 0
else:
part3 = np.dot(np.dot(formulation.d, ((x[formulation.K:, :-1]+x[formulation.K:, 1:])/ 2)),tau)
primobjective = part1 + part2 + part3
part4 = np.dot(np.dot(formulation.alpha,p[:formulation.K,:]),tau)
part5 = np.dot(np.dot(formulation.a,p[:formulation.K,:]), ddtau)
if formulation.I == 0:
part6 = 0
else:
part6 = np.dot(np.dot(formulation.b, ((q[formulation.J:, :-1]+q[formulation.J:, 1:])/ 2)),tau)
dualobjective = part4 + part5 + part6
obj = (primobjective + dualobjective) / 2
err = abs(dualobjective - primobjective)
return obj, err
| 1,560 | 38.025 | 103 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.